This is an automated email from the ASF dual-hosted git repository.

xiaoxiang pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/nuttx.git

commit 684ddc6ada40415e6c3083f04391eed8f10e52a6
Author: hujun5 <[email protected]>
AuthorDate: Mon Jan 13 15:14:02 2025 +0800

    irq: enter_critical_section_wo_note/leave_critical_section_wo_note
    
    Signed-off-by: hujun5 <[email protected]>
---
 include/nuttx/irq.h      | 42 +++++++++++++++++++++
 include/nuttx/sched.h    | 26 -------------
 sched/irq/irq_csection.c | 95 ++++++++++++++++++++++++++----------------------
 3 files changed, 94 insertions(+), 69 deletions(-)

diff --git a/include/nuttx/irq.h b/include/nuttx/irq.h
index 0c94f71a3a..ea4de3faf8 100644
--- a/include/nuttx/irq.h
+++ b/include/nuttx/irq.h
@@ -93,6 +93,32 @@
 
 #define IRQ_WAKE_THREAD 1
 
+/* Scheduling monitor */
+
+#ifndef CONFIG_SCHED_CRITMONITOR_MAXTIME_THREAD
+#  define CONFIG_SCHED_CRITMONITOR_MAXTIME_THREAD -1
+#endif
+
+#ifndef CONFIG_SCHED_CRITMONITOR_MAXTIME_WQUEUE
+#  define CONFIG_SCHED_CRITMONITOR_MAXTIME_WQUEUE -1
+#endif
+
+#ifndef CONFIG_SCHED_CRITMONITOR_MAXTIME_PREEMPTION
+#  define CONFIG_SCHED_CRITMONITOR_MAXTIME_PREEMPTION -1
+#endif
+
+#ifndef CONFIG_SCHED_CRITMONITOR_MAXTIME_CSECTION
+#  define CONFIG_SCHED_CRITMONITOR_MAXTIME_CSECTION -1
+#endif
+
+#ifndef CONFIG_SCHED_CRITMONITOR_MAXTIME_IRQ
+#  define CONFIG_SCHED_CRITMONITOR_MAXTIME_IRQ -1
+#endif
+
+#ifndef CONFIG_SCHED_CRITMONITOR_MAXTIME_WDOG
+#  define CONFIG_SCHED_CRITMONITOR_MAXTIME_WDOG -1
+#endif
+
 /****************************************************************************
  * Public Types
  ****************************************************************************/
@@ -258,9 +284,17 @@ int irqchain_detach(int irq, xcpt_t isr, FAR void *arg);
  ****************************************************************************/
 
 #ifdef CONFIG_IRQCOUNT
+#  if CONFIG_SCHED_CRITMONITOR_MAXTIME_CSECTION >= 0 || \
+      defined(CONFIG_SCHED_INSTRUMENTATION_CSECTION)
 irqstate_t enter_critical_section(void) noinstrument_function;
+#  else
+#    define enter_critical_section() enter_critical_section_wo_note()
+#  endif
+
+irqstate_t enter_critical_section_wo_note(void) noinstrument_function;
 #else
 #  define enter_critical_section() up_irq_save()
+#  define enter_critical_section_wo_note() up_irq_save()
 #endif
 
 /****************************************************************************
@@ -288,9 +322,17 @@ irqstate_t enter_critical_section(void) 
noinstrument_function;
  ****************************************************************************/
 
 #ifdef CONFIG_IRQCOUNT
+#  if CONFIG_SCHED_CRITMONITOR_MAXTIME_CSECTION >= 0 || \
+      defined(CONFIG_SCHED_INSTRUMENTATION_CSECTION)
 void leave_critical_section(irqstate_t flags) noinstrument_function;
+#  else
+#    define leave_critical_section(f) leave_critical_section_wo_note(f)
+#  endif
+
+void leave_critical_section_wo_note(irqstate_t flags) noinstrument_function;
 #else
 #  define leave_critical_section(f) up_irq_restore(f)
+#  define leave_critical_section_wo_note(f) up_irq_restore(f)
 #endif
 
 /****************************************************************************
diff --git a/include/nuttx/sched.h b/include/nuttx/sched.h
index 68284461a6..42b2f02e48 100644
--- a/include/nuttx/sched.h
+++ b/include/nuttx/sched.h
@@ -75,32 +75,6 @@
 #  define CONFIG_SCHED_SPORADIC_MAXREPL 3
 #endif
 
-/* Scheduling monitor */
-
-#ifndef CONFIG_SCHED_CRITMONITOR_MAXTIME_THREAD
-#  define CONFIG_SCHED_CRITMONITOR_MAXTIME_THREAD -1
-#endif
-
-#ifndef CONFIG_SCHED_CRITMONITOR_MAXTIME_WQUEUE
-#  define CONFIG_SCHED_CRITMONITOR_MAXTIME_WQUEUE -1
-#endif
-
-#ifndef CONFIG_SCHED_CRITMONITOR_MAXTIME_PREEMPTION
-#  define CONFIG_SCHED_CRITMONITOR_MAXTIME_PREEMPTION -1
-#endif
-
-#ifndef CONFIG_SCHED_CRITMONITOR_MAXTIME_CSECTION
-#  define CONFIG_SCHED_CRITMONITOR_MAXTIME_CSECTION -1
-#endif
-
-#ifndef CONFIG_SCHED_CRITMONITOR_MAXTIME_IRQ
-#  define CONFIG_SCHED_CRITMONITOR_MAXTIME_IRQ -1
-#endif
-
-#ifndef CONFIG_SCHED_CRITMONITOR_MAXTIME_WDOG
-#  define CONFIG_SCHED_CRITMONITOR_MAXTIME_WDOG -1
-#endif
-
 /* Task Management Definitions **********************************************/
 
 /* Special task IDS.  Any negative PID is invalid. */
diff --git a/sched/irq/irq_csection.c b/sched/irq/irq_csection.c
index b8277c758f..30e7f8820f 100644
--- a/sched/irq/irq_csection.c
+++ b/sched/irq/irq_csection.c
@@ -37,7 +37,6 @@
 #include "sched/sched.h"
 #include "irq/irq.h"
 
-#ifdef CONFIG_IRQCOUNT
 /****************************************************************************
  * Pre-processor Definitions
  ****************************************************************************/
@@ -80,7 +79,7 @@ volatile uint8_t g_cpu_nestcount[CONFIG_SMP_NCPUS];
  ****************************************************************************/
 
 /****************************************************************************
- * Name: enter_critical_section
+ * Name: enter_critical_section_wo_note
  *
  * Description:
  *   Take the CPU IRQ lock and disable interrupts on all CPUs.  A thread-
@@ -90,7 +89,7 @@ volatile uint8_t g_cpu_nestcount[CONFIG_SMP_NCPUS];
  ****************************************************************************/
 
 #ifdef CONFIG_SMP
-irqstate_t enter_critical_section(void)
+irqstate_t enter_critical_section_wo_note(void)
 {
   FAR struct tcb_s *rtcb;
   irqstate_t ret;
@@ -246,15 +245,6 @@ irqstate_t enter_critical_section(void)
 
           cpu_irqlock_set(cpu);
           rtcb->irqcount = 1;
-
-          /* Note that we have entered the critical section */
-
-#if CONFIG_SCHED_CRITMONITOR_MAXTIME_CSECTION >= 0
-          nxsched_critmon_csection(rtcb, true, return_address(0));
-#endif
-#ifdef CONFIG_SCHED_INSTRUMENTATION_CSECTION
-          sched_note_csection(rtcb, true);
-#endif
         }
     }
 
@@ -265,7 +255,7 @@ irqstate_t enter_critical_section(void)
 
 #else
 
-irqstate_t enter_critical_section(void)
+irqstate_t enter_critical_section_wo_note(void)
 {
   irqstate_t ret;
 
@@ -285,10 +275,28 @@ irqstate_t enter_critical_section(void)
        */
 
       DEBUGASSERT(rtcb->irqcount >= 0 && rtcb->irqcount < INT16_MAX);
-      if (++rtcb->irqcount == 1)
-        {
-          /* Note that we have entered the critical section */
+      rtcb->irqcount++;
+    }
 
+  /* Return interrupt status */
+
+  return ret;
+}
+#endif
+
+#if CONFIG_SCHED_CRITMONITOR_MAXTIME_CSECTION >= 0 || \
+    defined(CONFIG_SCHED_INSTRUMENTATION_CSECTION)
+irqstate_t enter_critical_section(void)
+{
+  FAR struct tcb_s *rtcb;
+  irqstate_t flags;
+  flags = enter_critical_section_wo_note();
+
+  if (!up_interrupt_context())
+    {
+      rtcb = this_task();
+      if (rtcb->irqcount == 1)
+        {
 #if CONFIG_SCHED_CRITMONITOR_MAXTIME_CSECTION >= 0
           nxsched_critmon_csection(rtcb, true, return_address(0));
 #endif
@@ -298,14 +306,12 @@ irqstate_t enter_critical_section(void)
         }
     }
 
-  /* Return interrupt status */
-
-  return ret;
+  return flags;
 }
 #endif
 
 /****************************************************************************
- * Name: leave_critical_section
+ * Name: leave_critical_section_wo_note
  *
  * Description:
  *   Decrement the IRQ lock count and if it decrements to zero then release
@@ -314,7 +320,7 @@ irqstate_t enter_critical_section(void)
  ****************************************************************************/
 
 #ifdef CONFIG_SMP
-void leave_critical_section(irqstate_t flags)
+void leave_critical_section_wo_note(irqstate_t flags)
 {
   int cpu;
 
@@ -388,14 +394,6 @@ void leave_critical_section(irqstate_t flags)
         }
       else
         {
-          /* No.. Note that we have left the critical section */
-
-#if CONFIG_SCHED_CRITMONITOR_MAXTIME_CSECTION >= 0
-          nxsched_critmon_csection(rtcb, false, return_address(0));
-#endif
-#ifdef CONFIG_SCHED_INSTRUMENTATION_CSECTION
-          sched_note_csection(rtcb, false);
-#endif
           /* Decrement our count on the lock.  If all CPUs have
            * released, then unlock the spinlock.
            */
@@ -421,10 +419,8 @@ void leave_critical_section(irqstate_t flags)
 
   up_irq_restore(flags);
 }
-
 #else
-
-void leave_critical_section(irqstate_t flags)
+void leave_critical_section_wo_note(irqstate_t flags)
 {
   /* Check if we were called from an interrupt handler and that the tasks
    * lists have been initialized.
@@ -440,22 +436,35 @@ void leave_critical_section(irqstate_t flags)
        */
 
       DEBUGASSERT(rtcb->irqcount > 0);
-      if (--rtcb->irqcount <= 0)
-        {
-          /* Note that we have left the critical section */
+      --rtcb->irqcount;
+    }
 
-#if CONFIG_SCHED_CRITMONITOR_MAXTIME_CSECTION >= 0
-          nxsched_critmon_csection(rtcb, false, return_address(0));
+  /* Restore the previous interrupt state. */
+
+  up_irq_restore(flags);
+}
 #endif
-#ifdef CONFIG_SCHED_INSTRUMENTATION_CSECTION
+
+#if CONFIG_SCHED_CRITMONITOR_MAXTIME_CSECTION >= 0 || \
+    defined(CONFIG_SCHED_INSTRUMENTATION_CSECTION)
+void leave_critical_section(irqstate_t flags)
+{
+  FAR struct tcb_s *rtcb;
+
+  if (!up_interrupt_context())
+    {
+      rtcb = this_task();
+      if (rtcb->irqcount == 1)
+        {
+#  if CONFIG_SCHED_CRITMONITOR_MAXTIME_CSECTION >= 0
+          nxsched_critmon_csection(rtcb, false, return_address(0));
+#  endif
+#  ifdef CONFIG_SCHED_INSTRUMENTATION_CSECTION
           sched_note_csection(rtcb, false);
-#endif
+#  endif
         }
     }
 
-  /* Restore the previous interrupt state. */
-
-  up_irq_restore(flags);
+  leave_critical_section_wo_note(flags);
 }
 #endif
-#endif /* CONFIG_IRQCOUNT */

Reply via email to