This is an automated email from the ASF dual-hosted git repository.

xiaoxiang pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/nuttx.git

commit 9a36b8b8231ea777b43174d59690dada05658416
Author: hujun5 <[email protected]>
AuthorDate: Wed May 8 19:56:22 2024 +0800

    csection: We can save execution time by removing judgments.
    
    test:
    We can use qemu for testing.
    
    compiling
    make distclean -j20; ./tools/configure.sh -l qemu-armv8a:nsh_smp ;make -j20
    running
    qemu-system-aarch64 -cpu cortex-a53 -smp 4 -nographic -machine 
virt,virtualization=on,gic-version=3 -net none -chardev stdio,id=con,mux=on 
-serial chardev:con -mon chardev=con,mode=readline -kernel ./nuttx
    
    or
    compiling
    make distclean -j20; ./tools/configure.sh -l sabre-6quad:smp ;make -j20
    running
    qemu-system-arm  -semihosting -M sabrelite -m 1024 -smp 4 -kernel 
nuttx/nuttx -nographic
    
    Signed-off-by: hujun5 <[email protected]>
---
 sched/irq/irq_csection.c | 470 +++++++++++++++++++++++------------------------
 1 file changed, 227 insertions(+), 243 deletions(-)

diff --git a/sched/irq/irq_csection.c b/sched/irq/irq_csection.c
index d4525d4760..c42a8b2d25 100644
--- a/sched/irq/irq_csection.c
+++ b/sched/irq/irq_csection.c
@@ -188,212 +188,205 @@ irqstate_t enter_critical_section(void)
 try_again:
   ret = up_irq_save();
 
-  /* Verify that the system has sufficiently initialized so that the task
-   * lists are valid.
+  /* If called from an interrupt handler, then just take the spinlock.
+   * If we are already in a critical section, this will lock the CPU
+   * in the interrupt handler.  Sounds worse than it is.
    */
 
-  if (nxsched_get_initstate() >= OSINIT_TASKLISTS)
+  if (up_interrupt_context())
     {
-      /* If called from an interrupt handler, then just take the spinlock.
-       * If we are already in a critical section, this will lock the CPU
-       * in the interrupt handler.  Sounds worse than it is.
+      /* We are in an interrupt handler.  How can this happen?
+       *
+       *   1. We were not in a critical section when the interrupt
+       *      occurred.  In this case, the interrupt was entered with:
+       *
+       *      g_cpu_irqlock = SP_UNLOCKED.
+       *      g_cpu_nestcount = 0
+       *      All CPU bits in g_cpu_irqset should be zero
+       *
+       *   2. We were in a critical section and interrupts on this
+       *      this CPU were disabled -- this is an impossible case.
+       *
+       *   3. We were in critical section, but up_irq_save() only
+       *      disabled local interrupts on a different CPU;
+       *      Interrupts could still be enabled on this CPU.
+       *
+       *      g_cpu_irqlock = SP_LOCKED.
+       *      g_cpu_nestcount = 0
+       *      The bit in g_cpu_irqset for this CPU should be zero
+       *
+       *   4. An extension of 3 is that we may be re-entered numerous
+       *      times from the same interrupt handler.  In that case:
+       *
+       *      g_cpu_irqlock = SP_LOCKED.
+       *      g_cpu_nestcount > 0
+       *      The bit in g_cpu_irqset for this CPU should be zero
+       *
+       * NOTE: However, the interrupt entry conditions can change due
+       * to previous processing by the interrupt handler that may
+       * instantiate a new thread that has irqcount > 0 and may then
+       * set the bit in g_cpu_irqset and g_cpu_irqlock = SP_LOCKED
        */
 
-      if (up_interrupt_context())
+      /* Handle nested calls to enter_critical_section() from the same
+       * interrupt.
+       */
+
+      cpu = this_cpu();
+      if (g_cpu_nestcount[cpu] > 0)
         {
-          /* We are in an interrupt handler.  How can this happen?
-           *
-           *   1. We were not in a critical section when the interrupt
-           *      occurred.  In this case, the interrupt was entered with:
-           *
-           *      g_cpu_irqlock = SP_UNLOCKED.
-           *      g_cpu_nestcount = 0
-           *      All CPU bits in g_cpu_irqset should be zero
-           *
-           *   2. We were in a critical section and interrupts on this
-           *      this CPU were disabled -- this is an impossible case.
-           *
-           *   3. We were in critical section, but up_irq_save() only
-           *      disabled local interrupts on a different CPU;
-           *      Interrupts could still be enabled on this CPU.
-           *
-           *      g_cpu_irqlock = SP_LOCKED.
-           *      g_cpu_nestcount = 0
-           *      The bit in g_cpu_irqset for this CPU should be zero
-           *
-           *   4. An extension of 3 is that we may be re-entered numerous
-           *      times from the same interrupt handler.  In that case:
-           *
-           *      g_cpu_irqlock = SP_LOCKED.
-           *      g_cpu_nestcount > 0
-           *      The bit in g_cpu_irqset for this CPU should be zero
-           *
-           * NOTE: However, the interrupt entry conditions can change due
-           * to previous processing by the interrupt handler that may
-           * instantiate a new thread that has irqcount > 0 and may then
-           * set the bit in g_cpu_irqset and g_cpu_irqlock = SP_LOCKED
-           */
+          DEBUGASSERT(spin_is_locked(&g_cpu_irqlock) &&
+                      g_cpu_nestcount[cpu] < UINT8_MAX);
+          g_cpu_nestcount[cpu]++;
+        }
 
-          /* Handle nested calls to enter_critical_section() from the same
-           * interrupt.
-           */
+      /* This is the first call to enter_critical_section from the
+       * interrupt handler.
+       */
 
-          cpu = this_cpu();
-          if (g_cpu_nestcount[cpu] > 0)
-            {
-              DEBUGASSERT(spin_is_locked(&g_cpu_irqlock) &&
-                          g_cpu_nestcount[cpu] < UINT8_MAX);
-              g_cpu_nestcount[cpu]++;
-            }
+      else
+        {
+          int paused = false;
 
-          /* This is the first call to enter_critical_section from the
-           * interrupt handler.
+          /* Make sure that the g_cpu_irqset was not already set
+           * by previous logic on this CPU that was executed by the
+           * interrupt handler.  We know that the bit in g_cpu_irqset
+           * for this CPU was zero on entry into the interrupt handler,
+           * so if it is non-zero now then we know that was the case.
            */
 
-          else
+          if ((g_cpu_irqset & (1 << cpu)) == 0)
             {
-              int paused = false;
-
-              /* Make sure that the g_cpu_irqset was not already set
-               * by previous logic on this CPU that was executed by the
-               * interrupt handler.  We know that the bit in g_cpu_irqset
-               * for this CPU was zero on entry into the interrupt handler,
-               * so if it is non-zero now then we know that was the case.
+              /* Wait until we can get the spinlock (meaning that we are
+               * no longer blocked by the critical section).
                */
 
-              if ((g_cpu_irqset & (1 << cpu)) == 0)
+try_again_in_irq:
+              if (!irq_waitlock(cpu))
                 {
-                  /* Wait until we can get the spinlock (meaning that we are
-                   * no longer blocked by the critical section).
+                  /* We are in a deadlock condition due to a pending
+                   * pause request interrupt.  Break the deadlock by
+                   * handling the pause request now.
                    */
 
-try_again_in_irq:
-                  if (!irq_waitlock(cpu))
+                  if (!paused)
                     {
-                      /* We are in a deadlock condition due to a pending
-                       * pause request interrupt.  Break the deadlock by
-                       * handling the pause request now.
-                       */
-
-                      if (!paused)
-                        {
-                          up_cpu_paused_save();
-                        }
-
-                      DEBUGVERIFY(up_cpu_paused(cpu));
-                      paused = true;
-
-                      /* NOTE: As the result of up_cpu_paused(cpu), this CPU
-                       * might set g_cpu_irqset in nxsched_resume_scheduler()
-                       * However, another CPU might hold g_cpu_irqlock.
-                       * To avoid this situation, releae g_cpu_irqlock first.
-                       */
-
-                      if ((g_cpu_irqset & (1 << cpu)) != 0)
-                        {
-                          spin_clrbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock,
-                                      &g_cpu_irqlock);
-                        }
-
-                      /* NOTE: Here, this CPU does not hold g_cpu_irqlock,
-                       * so call irq_waitlock(cpu) to acquire g_cpu_irqlock.
-                       */
-
-                      goto try_again_in_irq;
+                      up_cpu_paused_save();
                     }
-                }
 
-              /* In any event, the nesting count is now one */
+                  DEBUGVERIFY(up_cpu_paused(cpu));
+                  paused = true;
 
-              g_cpu_nestcount[cpu] = 1;
+                  /* NOTE: As the result of up_cpu_paused(cpu), this CPU
+                   * might set g_cpu_irqset in nxsched_resume_scheduler()
+                   * However, another CPU might hold g_cpu_irqlock.
+                   * To avoid this situation, releae g_cpu_irqlock first.
+                   */
 
-              /* Also set the CPU bit so that other CPUs will be aware that
-               * this CPU holds the critical section.
-               */
+                  if ((g_cpu_irqset & (1 << cpu)) != 0)
+                    {
+                      spin_clrbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock,
+                                  &g_cpu_irqlock);
+                    }
 
-              spin_setbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock,
-                          &g_cpu_irqlock);
-              if (paused)
-                {
-                  up_cpu_paused_restore();
+                  /* NOTE: Here, this CPU does not hold g_cpu_irqlock,
+                   * so call irq_waitlock(cpu) to acquire g_cpu_irqlock.
+                   */
+
+                  goto try_again_in_irq;
                 }
             }
+
+          /* In any event, the nesting count is now one */
+
+          g_cpu_nestcount[cpu] = 1;
+
+          /* Also set the CPU bit so that other CPUs will be aware that
+           * this CPU holds the critical section.
+           */
+
+          spin_setbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock,
+                      &g_cpu_irqlock);
+          if (paused)
+            {
+              up_cpu_paused_restore();
+            }
         }
-      else
+    }
+  else
+    {
+      /* Normal tasking environment.
+       *
+       * Get the TCB of the currently executing task on this CPU (avoid
+       * using this_task() which can recurse.
+       */
+
+      cpu  = this_cpu();
+      rtcb = current_task(cpu);
+      DEBUGASSERT(rtcb != NULL);
+
+      /* Do we already have interrupts disabled? */
+
+      if (rtcb->irqcount > 0)
         {
-          /* Normal tasking environment.
+          /* Yes... make sure that the spinlock is set and increment the
+           * IRQ lock count.
            *
-           * Get the TCB of the currently executing task on this CPU (avoid
-           * using this_task() which can recurse.
+           * NOTE: If irqcount > 0 then (1) we are in a critical section,
+           * and (2) this CPU should hold the lock.
            */
 
-          cpu  = this_cpu();
-          rtcb = current_task(cpu);
-          DEBUGASSERT(rtcb != NULL);
+          DEBUGASSERT(spin_is_locked(&g_cpu_irqlock) &&
+                      (g_cpu_irqset & (1 << this_cpu())) != 0 &&
+                      rtcb->irqcount < INT16_MAX);
+          rtcb->irqcount++;
+        }
+      else
+        {
+          /* If we get here with irqcount == 0, then we know that the
+           * current task running on this CPU is not in a critical
+           * section.  However other tasks on other CPUs may be in a
+           * critical section.  If so, we must wait until they release
+           * the spinlock.
+           */
 
-          /* Do we already have interrupts disabled? */
+          DEBUGASSERT((g_cpu_irqset & (1 << cpu)) == 0);
 
-          if (rtcb->irqcount > 0)
+          if (!irq_waitlock(cpu))
             {
-              /* Yes... make sure that the spinlock is set and increment the
-               * IRQ lock count.
-               *
-               * NOTE: If irqcount > 0 then (1) we are in a critical section,
-               * and (2) this CPU should hold the lock.
+              /* We are in a deadlock condition due to a pending pause
+               * request interrupt.  Re-enable interrupts on this CPU
+               * and try again.  Briefly re-enabling interrupts should
+               * be sufficient to permit processing the pending pause
+               * request.
                */
 
-              DEBUGASSERT(spin_is_locked(&g_cpu_irqlock) &&
-                          (g_cpu_irqset & (1 << this_cpu())) != 0 &&
-                          rtcb->irqcount < INT16_MAX);
-              rtcb->irqcount++;
+              up_irq_restore(ret);
+              goto try_again;
             }
-          else
-            {
-              /* If we get here with irqcount == 0, then we know that the
-               * current task running on this CPU is not in a critical
-               * section.  However other tasks on other CPUs may be in a
-               * critical section.  If so, we must wait until they release
-               * the spinlock.
-               */
-
-              DEBUGASSERT((g_cpu_irqset & (1 << cpu)) == 0);
-
-              if (!irq_waitlock(cpu))
-                {
-                  /* We are in a deadlock condition due to a pending pause
-                   * request interrupt.  Re-enable interrupts on this CPU
-                   * and try again.  Briefly re-enabling interrupts should
-                   * be sufficient to permit processing the pending pause
-                   * request.
-                   */
 
-                  up_irq_restore(ret);
-                  goto try_again;
-                }
-
-              /* Then set the lock count to 1.
-               *
-               * Interrupts disables must follow a stacked order.  We
-               * cannot other context switches to re-order the enabling
-               * disabling of interrupts.
-               *
-               * The scheduler accomplishes this by treating the irqcount
-               * like lockcount:  Both will disable pre-emption.
-               */
+          /* Then set the lock count to 1.
+           *
+           * Interrupts disables must follow a stacked order.  We
+           * cannot other context switches to re-order the enabling
+           * disabling of interrupts.
+           *
+           * The scheduler accomplishes this by treating the irqcount
+           * like lockcount:  Both will disable pre-emption.
+           */
 
-              spin_setbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock,
-                          &g_cpu_irqlock);
-              rtcb->irqcount = 1;
+          spin_setbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock,
+                      &g_cpu_irqlock);
+          rtcb->irqcount = 1;
 
-              /* Note that we have entered the critical section */
+          /* Note that we have entered the critical section */
 
 #ifdef CONFIG_SCHED_CRITMONITOR
-              nxsched_critmon_csection(rtcb, true);
+          nxsched_critmon_csection(rtcb, true);
 #endif
 #ifdef CONFIG_SCHED_INSTRUMENTATION_CSECTION
-              sched_note_csection(rtcb, true);
+          sched_note_csection(rtcb, true);
 #endif
-            }
         }
     }
 
@@ -412,11 +405,9 @@ irqstate_t enter_critical_section(void)
 
   ret = up_irq_save();
 
-  /* Check if we were called from an interrupt handler and that the task
-   * lists have been initialized.
-   */
+  /* Check if we were called from an interrupt handler */
 
-  if (!up_interrupt_context() && nxsched_get_initstate() >= OSINIT_TASKLISTS)
+  if (!up_interrupt_context())
     {
       FAR struct tcb_s *rtcb = this_task();
       DEBUGASSERT(rtcb != NULL);
@@ -459,108 +450,101 @@ void leave_critical_section(irqstate_t flags)
 {
   int cpu;
 
-  /* Verify that the system has sufficiently initialized so that the task
-   * lists are valid.
+  /* If called from an interrupt handler, then just release the
+   * spinlock.  The interrupt handling logic should already hold the
+   * spinlock if enter_critical_section() has been called.  Unlocking
+   * the spinlock will allow interrupt handlers on other CPUs to execute
+   * again.
    */
 
-  if (nxsched_get_initstate() >= OSINIT_TASKLISTS)
+  if (up_interrupt_context())
     {
-      /* If called from an interrupt handler, then just release the
-       * spinlock.  The interrupt handling logic should already hold the
-       * spinlock if enter_critical_section() has been called.  Unlocking
-       * the spinlock will allow interrupt handlers on other CPUs to execute
-       * again.
+      /* We are in an interrupt handler. Check if the last call to
+       * enter_critical_section() was nested.
        */
 
-      if (up_interrupt_context())
+      cpu = this_cpu();
+      if (g_cpu_nestcount[cpu] > 1)
         {
-          /* We are in an interrupt handler. Check if the last call to
-           * enter_critical_section() was nested.
-           */
+          /* Yes.. then just decrement the nesting count */
 
-          cpu = this_cpu();
-          if (g_cpu_nestcount[cpu] > 1)
-            {
-              /* Yes.. then just decrement the nesting count */
-
-              DEBUGASSERT(spin_is_locked(&g_cpu_irqlock));
-              g_cpu_nestcount[cpu]--;
-            }
-          else
-            {
-              /* No, not nested. Restore the g_cpu_irqset for this CPU
-               * and release the spinlock (if necessary).
-               */
-
-              DEBUGASSERT(spin_is_locked(&g_cpu_irqlock) &&
-                          g_cpu_nestcount[cpu] == 1);
+          DEBUGASSERT(spin_is_locked(&g_cpu_irqlock));
+          g_cpu_nestcount[cpu]--;
+        }
+      else
+        {
+          /* No, not nested. Restore the g_cpu_irqset for this CPU
+           * and release the spinlock (if necessary).
+           */
 
-              FAR struct tcb_s *rtcb = current_task(cpu);
-              DEBUGASSERT(rtcb != NULL);
+          DEBUGASSERT(spin_is_locked(&g_cpu_irqlock) &&
+                      g_cpu_nestcount[cpu] == 1);
 
-              if (rtcb->irqcount <= 0)
-                {
-                  spin_clrbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock,
-                              &g_cpu_irqlock);
-                }
+          FAR struct tcb_s *rtcb = current_task(cpu);
+          DEBUGASSERT(rtcb != NULL);
 
-              g_cpu_nestcount[cpu] = 0;
+          if (rtcb->irqcount <= 0)
+            {
+              spin_clrbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock,
+                          &g_cpu_irqlock);
             }
+
+          g_cpu_nestcount[cpu] = 0;
         }
-      else
-        {
-          FAR struct tcb_s *rtcb;
+    }
+  else
+    {
+      FAR struct tcb_s *rtcb;
 
-          /* Get the TCB of the currently executing task on this CPU (avoid
-           * using this_task() which can recurse.
-           */
+      /* Get the TCB of the currently executing task on this CPU (avoid
+       * using this_task() which can recurse.
+       */
 
-          cpu  = this_cpu();
-          rtcb = current_task(cpu);
-          DEBUGASSERT(rtcb != NULL && rtcb->irqcount > 0);
+      cpu  = this_cpu();
+      rtcb = current_task(cpu);
+      DEBUGASSERT(rtcb != NULL && rtcb->irqcount > 0);
 
-          /* Normal tasking context.  We need to coordinate with other
-           * tasks.
-           *
-           * Will we still have interrupts disabled after decrementing the
-           * count?
-           */
+      /* Normal tasking context.  We need to coordinate with other
+       * tasks.
+       *
+       * Will we still have interrupts disabled after decrementing the
+       * count?
+       */
 
-          if (rtcb->irqcount > 1)
-            {
-              /* Yes... the spinlock should remain set */
+      if (rtcb->irqcount > 1)
+        {
+          /* Yes... the spinlock should remain set */
 
-              DEBUGASSERT(spin_is_locked(&g_cpu_irqlock));
-              rtcb->irqcount--;
-            }
-          else
-            {
-              /* No.. Note that we have left the critical section */
+          DEBUGASSERT(spin_is_locked(&g_cpu_irqlock));
+          rtcb->irqcount--;
+        }
+      else
+        {
+          /* No.. Note that we have left the critical section */
 
 #ifdef CONFIG_SCHED_CRITMONITOR
-              nxsched_critmon_csection(rtcb, false);
+          nxsched_critmon_csection(rtcb, false);
 #endif
 #ifdef CONFIG_SCHED_INSTRUMENTATION_CSECTION
-              sched_note_csection(rtcb, false);
+          sched_note_csection(rtcb, false);
 #endif
-              /* Decrement our count on the lock.  If all CPUs have
-               * released, then unlock the spinlock.
-               */
+          /* Decrement our count on the lock.  If all CPUs have
+           * released, then unlock the spinlock.
+           */
 
-              DEBUGASSERT(spin_is_locked(&g_cpu_irqlock) &&
-                          (g_cpu_irqset & (1 << cpu)) != 0);
+          DEBUGASSERT(spin_is_locked(&g_cpu_irqlock) &&
+                      (g_cpu_irqset & (1 << cpu)) != 0);
 
-              /* Now, possibly on return from a context switch, clear our
-               * count on the lock.  If all CPUs have released the lock,
-               * then unlock the global IRQ spinlock.
-               */
+          /* Now, possibly on return from a context switch, clear our
+           * count on the lock.  If all CPUs have released the lock,
+           * then unlock the global IRQ spinlock.
+           */
 
-              rtcb->irqcount = 0;
-              spin_clrbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock,
-                          &g_cpu_irqlock);
+          rtcb->irqcount = 0;
+          spin_clrbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock,
+                      &g_cpu_irqlock);
 
-              /* Have all CPUs released the lock? */
-            }
+          /* Have all CPUs released the lock? */
         }
     }
 
@@ -579,7 +563,7 @@ void leave_critical_section(irqstate_t flags)
    * lists have been initialized.
    */
 
-  if (!up_interrupt_context() && nxsched_get_initstate() >= OSINIT_TASKLISTS)
+  if (!up_interrupt_context())
     {
       FAR struct tcb_s *rtcb = this_task();
       DEBUGASSERT(rtcb != NULL);

Reply via email to