This is an automated email from the ASF dual-hosted git repository.
xiaoxiang pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/nuttx.git
The following commit(s) were added to refs/heads/master by this push:
new 1c5a0bf6cc irq: add [enter|leave]_critical_section_nonirq
1c5a0bf6cc is described below
commit 1c5a0bf6ccf7500150c9a0a29a49a2f98bfdf224
Author: hujun5 <[email protected]>
AuthorDate: Thu Mar 14 10:42:30 2024 +0800
irq: add [enter|leave]_critical_section_nonirq
Configuring NuttX and compile:
$ ./tools/configure.sh -l qemu-armv8a:nsh_smp
$ make
Running with qemu
$ qemu-system-aarch64 -cpu cortex-a53 -smp 4 -nographic \
-machine virt,virtualization=on,gic-version=3 \
-net none -chardev stdio,id=con,mux=on -serial chardev:con \
-mon chardev=con,mode=readline -kernel ./nuttx
reason:
In some non-irq scenarios, we can simplify
the implementation of critical sections to improve performance.
Signed-off-by: hujun5 <[email protected]>
---
include/nuttx/irq.h | 4 +
sched/irq/irq_csection.c | 335 +++++++++++++++++++++++-----------------
sched/mqueue/mq_receive.c | 4 +-
sched/mqueue/mq_timedreceive.c | 4 +-
sched/mqueue/mq_timedsend.c | 2 -
sched/semaphore/sem_clockwait.c | 4 +-
sched/semaphore/sem_tickwait.c | 4 +-
sched/semaphore/sem_wait.c | 4 +-
8 files changed, 209 insertions(+), 152 deletions(-)
diff --git a/include/nuttx/irq.h b/include/nuttx/irq.h
index 09fa6438ea..b281eb9a41 100644
--- a/include/nuttx/irq.h
+++ b/include/nuttx/irq.h
@@ -258,8 +258,10 @@ int irqchain_detach(int irq, xcpt_t isr, FAR void *arg);
****************************************************************************/
#ifdef CONFIG_IRQCOUNT
+irqstate_t enter_critical_section_nonirq(void) noinstrument_function;
irqstate_t enter_critical_section(void) noinstrument_function;
#else
+# define enter_critical_section_nonirq() up_irq_save()
# define enter_critical_section() up_irq_save()
#endif
@@ -288,8 +290,10 @@ irqstate_t enter_critical_section(void)
noinstrument_function;
****************************************************************************/
#ifdef CONFIG_IRQCOUNT
+void leave_critical_section_nonirq(irqstate_t flags) noinstrument_function;
void leave_critical_section(irqstate_t flags) noinstrument_function;
#else
+# define leave_critical_section_nonirq(f) up_irq_restore(f)
# define leave_critical_section(f) up_irq_restore(f)
#endif
diff --git a/sched/irq/irq_csection.c b/sched/irq/irq_csection.c
index 24f6a98904..7909f58cd7 100644
--- a/sched/irq/irq_csection.c
+++ b/sched/irq/irq_csection.c
@@ -180,24 +180,13 @@ static inline_function bool irq_waitlock(int cpu)
#ifdef CONFIG_SMP
irqstate_t enter_critical_section(void)
{
- FAR struct tcb_s *rtcb;
- irqstate_t ret;
int cpu;
- /* Disable interrupts.
- *
- * NOTE 1: Ideally this should disable interrupts on all CPUs, but most
- * architectures only support disabling interrupts on the local CPU.
- * NOTE 2: Interrupts may already be disabled, but we call up_irq_save()
- * unconditionally because we need to return valid interrupt status in any
- * event.
- * NOTE 3: We disable local interrupts BEFORE taking the spinlock in order
- * to prevent possible waits on the spinlock from interrupt handling on
- * the local CPU.
+ /* Verify that the system has sufficiently initialized so that the task
+ * lists are valid.
*/
-try_again:
- ret = up_irq_save();
+ DEBUGASSERT(g_nx_initstate >= OSINIT_TASKLISTS);
/* If called from an interrupt handler, then just take the spinlock.
* If we are already in a critical section, this will lock the CPU
@@ -312,81 +301,114 @@ try_again_in_irq:
DEBUGASSERT(spin_is_locked(&g_cpu_irqlock) &&
(g_cpu_irqset & (1 << cpu)) != 0);
}
+
+ return 0;
}
else
{
- /* Normal tasking environment.
+ return enter_critical_section_nonirq();
+ }
+}
+
+inline_function irqstate_t enter_critical_section_nonirq(void)
+{
+ FAR struct tcb_s *rtcb;
+ irqstate_t ret;
+ int cpu;
+
+ /* Disable interrupts.
+ *
+ * NOTE 1: Ideally this should disable interrupts on all CPUs, but most
+ * architectures only support disabling interrupts on the local CPU.
+ * NOTE 2: Interrupts may already be disabled, but we call up_irq_save()
+ * unconditionally because we need to return valid interrupt status in any
+ * event.
+ * NOTE 3: We disable local interrupts BEFORE taking the spinlock in order
+ * to prevent possible waits on the spinlock from interrupt handling on
+ * the local CPU.
+ */
+
+try_again:
+ ret = up_irq_save();
+
+ /* Verify that the system has sufficiently initialized so that the task
+ * lists are valid.
+ */
+
+ DEBUGASSERT(g_nx_initstate >= OSINIT_TASKLISTS);
+ DEBUGASSERT(!up_interrupt_context());
+
+ /* Normal tasking environment.
+ *
+ * Get the TCB of the currently executing task on this CPU (avoid
+ * using this_task() which can recurse.
+ */
+
+ cpu = this_cpu();
+ rtcb = current_task(cpu);
+ DEBUGASSERT(rtcb != NULL);
+
+ /* Do we already have interrupts disabled? */
+
+ if (rtcb->irqcount > 0)
+ {
+ /* Yes... make sure that the spinlock is set and increment the
+ * IRQ lock count.
*
- * Get the TCB of the currently executing task on this CPU (avoid
- * using this_task() which can recurse.
+ * NOTE: If irqcount > 0 then (1) we are in a critical section,
+ * and (2) this CPU should hold the lock.
*/
- cpu = this_cpu();
- rtcb = current_task(cpu);
- DEBUGASSERT(rtcb != NULL);
+ DEBUGASSERT(spin_is_locked(&g_cpu_irqlock) &&
+ (g_cpu_irqset & (1 << this_cpu())) != 0 &&
+ rtcb->irqcount < INT16_MAX);
+ rtcb->irqcount++;
+ }
+ else
+ {
+ /* If we get here with irqcount == 0, then we know that the
+ * current task running on this CPU is not in a critical
+ * section. However other tasks on other CPUs may be in a
+ * critical section. If so, we must wait until they release
+ * the spinlock.
+ */
- /* Do we already have interrupts disabled? */
+ DEBUGASSERT((g_cpu_irqset & (1 << cpu)) == 0);
- if (rtcb->irqcount > 0)
+ if (!irq_waitlock(cpu))
{
- /* Yes... make sure that the spinlock is set and increment the
- * IRQ lock count.
- *
- * NOTE: If irqcount > 0 then (1) we are in a critical section,
- * and (2) this CPU should hold the lock.
+ /* We are in a deadlock condition due to a pending pause
+ * request interrupt. Re-enable interrupts on this CPU
+ * and try again. Briefly re-enabling interrupts should
+ * be sufficient to permit processing the pending pause
+ * request.
*/
- DEBUGASSERT(spin_is_locked(&g_cpu_irqlock) &&
- (g_cpu_irqset & (1 << this_cpu())) != 0 &&
- rtcb->irqcount < INT16_MAX);
- rtcb->irqcount++;
+ up_irq_restore(ret);
+ goto try_again;
}
- else
- {
- /* If we get here with irqcount == 0, then we know that the
- * current task running on this CPU is not in a critical
- * section. However other tasks on other CPUs may be in a
- * critical section. If so, we must wait until they release
- * the spinlock.
- */
-
- DEBUGASSERT((g_cpu_irqset & (1 << cpu)) == 0);
-
- if (!irq_waitlock(cpu))
- {
- /* We are in a deadlock condition due to a pending pause
- * request interrupt. Re-enable interrupts on this CPU
- * and try again. Briefly re-enabling interrupts should
- * be sufficient to permit processing the pending pause
- * request.
- */
- up_irq_restore(ret);
- goto try_again;
- }
-
- /* Then set the lock count to 1.
- *
- * Interrupts disables must follow a stacked order. We
- * cannot other context switches to re-order the enabling
- * disabling of interrupts.
- *
- * The scheduler accomplishes this by treating the irqcount
- * like lockcount: Both will disable pre-emption.
- */
+ /* Then set the lock count to 1.
+ *
+ * Interrupts disables must follow a stacked order. We
+ * cannot other context switches to re-order the enabling
+ * disabling of interrupts.
+ *
+ * The scheduler accomplishes this by treating the irqcount
+ * like lockcount: Both will disable pre-emption.
+ */
- cpu_irqlock_set(cpu);
- rtcb->irqcount = 1;
+ cpu_irqlock_set(cpu);
+ rtcb->irqcount = 1;
- /* Note that we have entered the critical section */
+ /* Note that we have entered the critical section */
#ifdef CONFIG_SCHED_CRITMONITOR
- nxsched_critmon_csection(rtcb, true);
+ nxsched_critmon_csection(rtcb, true);
#endif
#ifdef CONFIG_SCHED_INSTRUMENTATION_CSECTION
- sched_note_csection(rtcb, true);
+ sched_note_csection(rtcb, true);
#endif
- }
}
/* Return interrupt status */
@@ -398,35 +420,56 @@ try_again_in_irq:
irqstate_t enter_critical_section(void)
{
+ /* Verify that the system has sufficiently initialized so that the task
+ * lists are valid.
+ */
+
+ DEBUGASSERT(g_nx_initstate >= OSINIT_TASKLISTS);
+
+ /* Check if we were called from an interrupt handler */
+
+ if (!up_interrupt_context())
+ {
+ return enter_critical_section_nonirq();
+ }
+
+ /* Return interrupt status */
+
+ return 0;
+}
+
+inline_function irqstate_t enter_critical_section_nonirq(void)
+{
+ FAR struct tcb_s *rtcb;
irqstate_t ret;
/* Disable interrupts */
ret = up_irq_save();
- /* Check if we were called from an interrupt handler */
+ /* Verify that the system has sufficiently initialized so that the task
+ * lists are valid.
+ */
- if (!up_interrupt_context())
- {
- FAR struct tcb_s *rtcb = this_task();
- DEBUGASSERT(rtcb != NULL);
+ DEBUGASSERT(g_nx_initstate >= OSINIT_TASKLISTS);
+ DEBUGASSERT(!up_interrupt_context());
- /* Have we just entered the critical section? Or is this a nested
- * call to enter_critical_section.
- */
+ rtcb = this_task();
+ DEBUGASSERT(rtcb != NULL);
- DEBUGASSERT(rtcb->irqcount >= 0 && rtcb->irqcount < INT16_MAX);
- if (++rtcb->irqcount == 1)
- {
- /* Note that we have entered the critical section */
+ /* Have we just entered the critical section? Or is this a nested
+ * call to enter_critical_section.
+ */
+ DEBUGASSERT(rtcb->irqcount >= 0 && rtcb->irqcount < INT16_MAX);
+ if (++rtcb->irqcount == 1)
+ {
#ifdef CONFIG_SCHED_CRITMONITOR
- nxsched_critmon_csection(rtcb, true);
+ nxsched_critmon_csection(rtcb, true);
#endif
#ifdef CONFIG_SCHED_INSTRUMENTATION_CSECTION
- sched_note_csection(rtcb, true);
+ sched_note_csection(rtcb, true);
#endif
- }
}
/* Return interrupt status */
@@ -447,6 +490,7 @@ irqstate_t enter_critical_section(void)
#ifdef CONFIG_SMP
void leave_critical_section(irqstate_t flags)
{
+ FAR struct tcb_s *rtcb;
int cpu;
/* If called from an interrupt handler, then just release the
@@ -479,7 +523,7 @@ void leave_critical_section(irqstate_t flags)
DEBUGASSERT(spin_is_locked(&g_cpu_irqlock) &&
g_cpu_nestcount[cpu] == 1);
- FAR struct tcb_s *rtcb = current_task(cpu);
+ rtcb = current_task(cpu);
DEBUGASSERT(rtcb != NULL);
DEBUGASSERT((g_cpu_irqset & (1 << cpu)) != 0);
@@ -493,63 +537,68 @@ void leave_critical_section(irqstate_t flags)
}
else
{
- FAR struct tcb_s *rtcb;
+ leave_critical_section_nonirq(flags);
+ }
+}
- /* Get the TCB of the currently executing task on this CPU (avoid
- * using this_task() which can recurse.
- */
+inline_function void leave_critical_section_nonirq(irqstate_t flags)
+{
+ FAR struct tcb_s *rtcb;
+ int cpu;
- cpu = this_cpu();
- rtcb = current_task(cpu);
- DEBUGASSERT(rtcb != NULL && rtcb->irqcount > 0);
+ DEBUGASSERT(g_nx_initstate >= OSINIT_TASKLISTS);
+ DEBUGASSERT(!up_interrupt_context());
- /* Normal tasking context. We need to coordinate with other
- * tasks.
- *
- * Will we still have interrupts disabled after decrementing the
- * count?
- */
+ /* Get the TCB of the currently executing task on this CPU (avoid
+ * using this_task() which can recurse.
+ */
- if (rtcb->irqcount > 1)
- {
- /* Yes... the spinlock should remain set */
+ cpu = this_cpu();
+ rtcb = current_task(cpu);
+ DEBUGASSERT(rtcb != NULL && rtcb->irqcount > 0);
- DEBUGASSERT(spin_is_locked(&g_cpu_irqlock));
- rtcb->irqcount--;
- }
- else
- {
- /* No.. Note that we have left the critical section */
+ /* Normal tasking context. We need to coordinate with other
+ * tasks.
+ *
+ * Will we still have interrupts disabled after decrementing the
+ * count?
+ */
+
+ if (rtcb->irqcount > 1)
+ {
+ /* Yes... the spinlock should remain set */
+
+ DEBUGASSERT(spin_is_locked(&g_cpu_irqlock));
+ rtcb->irqcount--;
+ }
+ else
+ {
+ /* No.. Note that we have left the critical section */
#ifdef CONFIG_SCHED_CRITMONITOR
- nxsched_critmon_csection(rtcb, false);
+ nxsched_critmon_csection(rtcb, false);
#endif
#ifdef CONFIG_SCHED_INSTRUMENTATION_CSECTION
- sched_note_csection(rtcb, false);
+ sched_note_csection(rtcb, false);
#endif
- /* Decrement our count on the lock. If all CPUs have
- * released, then unlock the spinlock.
- */
+ /* Decrement our count on the lock. If all CPUs have
+ * released, then unlock the spinlock.
+ */
- DEBUGASSERT(spin_is_locked(&g_cpu_irqlock) &&
- (g_cpu_irqset & (1 << cpu)) != 0);
+ DEBUGASSERT(spin_is_locked(&g_cpu_irqlock) &&
+ (g_cpu_irqset & (1 << cpu)) != 0);
- /* Now, possibly on return from a context switch, clear our
- * count on the lock. If all CPUs have released the lock,
- * then unlock the global IRQ spinlock.
- */
+ /* Now, possibly on return from a context switch, clear our
+ * count on the lock. If all CPUs have released the lock,
+ * then unlock the global IRQ spinlock.
+ */
- rtcb->irqcount = 0;
- cpu_irqlock_clear();
+ rtcb->irqcount = 0;
+ cpu_irqlock_clear();
- /* Have all CPUs released the lock? */
- }
+ /* Have all CPUs released the lock? */
}
- /* Restore the previous interrupt state which may still be interrupts
- * disabled (but we don't have a mechanism to verify that now)
- */
-
up_irq_restore(flags);
}
@@ -563,29 +612,35 @@ void leave_critical_section(irqstate_t flags)
if (!up_interrupt_context())
{
- FAR struct tcb_s *rtcb = this_task();
- DEBUGASSERT(rtcb != NULL);
+ leave_critical_section_nonirq(flags);
+ }
+}
- /* Have we left entered the critical section? Or are we still
- * nested.
- */
+inline_function void leave_critical_section_nonirq(irqstate_t flags)
+{
+ FAR struct tcb_s *rtcb = this_task();
- DEBUGASSERT(rtcb->irqcount > 0);
- if (--rtcb->irqcount <= 0)
- {
- /* Note that we have left the critical section */
+ DEBUGASSERT(g_nx_initstate >= OSINIT_TASKLISTS);
+ DEBUGASSERT(!up_interrupt_context());
+ DEBUGASSERT(rtcb != NULL);
+
+ /* Have we left entered the critical section? Or are we still
+ * nested.
+ */
+
+ DEBUGASSERT(rtcb->irqcount > 0);
+ if (--rtcb->irqcount <= 0)
+ {
+ /* Note that we have left the critical section */
#ifdef CONFIG_SCHED_CRITMONITOR
- nxsched_critmon_csection(rtcb, false);
+ nxsched_critmon_csection(rtcb, false);
#endif
#ifdef CONFIG_SCHED_INSTRUMENTATION_CSECTION
- sched_note_csection(rtcb, false);
+ sched_note_csection(rtcb, false);
#endif
- }
}
- /* Restore the previous interrupt state. */
-
up_irq_restore(flags);
}
#endif
diff --git a/sched/mqueue/mq_receive.c b/sched/mqueue/mq_receive.c
index eecae0c480..16770d88b9 100644
--- a/sched/mqueue/mq_receive.c
+++ b/sched/mqueue/mq_receive.c
@@ -96,7 +96,7 @@ ssize_t file_mq_receive(FAR struct file *mq, FAR char *msg,
size_t msglen,
* because messages can be sent from interrupt level.
*/
- flags = enter_critical_section();
+ flags = enter_critical_section_nonirq();
/* Get the message from the message queue */
@@ -114,7 +114,7 @@ ssize_t file_mq_receive(FAR struct file *mq, FAR char *msg,
size_t msglen,
ret = nxmq_do_receive(msgq, mqmsg, msg, prio);
}
- leave_critical_section(flags);
+ leave_critical_section_nonirq(flags);
return ret;
}
diff --git a/sched/mqueue/mq_timedreceive.c b/sched/mqueue/mq_timedreceive.c
index db5833fe31..583f4714d9 100644
--- a/sched/mqueue/mq_timedreceive.c
+++ b/sched/mqueue/mq_timedreceive.c
@@ -156,7 +156,7 @@ file_mq_timedreceive_internal(FAR struct file *mq, FAR char
*msg,
* because messages can be sent from interrupt level.
*/
- flags = enter_critical_section();
+ flags = enter_critical_section_nonirq();
/* Check if the message queue is empty. If it is NOT empty, then we
* will not need to start timer.
@@ -231,7 +231,7 @@ file_mq_timedreceive_internal(FAR struct file *mq, FAR char
*msg,
/* We can now restore interrupts */
errout_in_critical_section:
- leave_critical_section(flags);
+ leave_critical_section_nonirq(flags);
return ret;
}
diff --git a/sched/mqueue/mq_timedsend.c b/sched/mqueue/mq_timedsend.c
index abe0ab4705..5961cb2b80 100644
--- a/sched/mqueue/mq_timedsend.c
+++ b/sched/mqueue/mq_timedsend.c
@@ -144,8 +144,6 @@ file_mq_timedsend_internal(FAR struct file *mq, FAR const
char *msg,
irqstate_t flags;
int ret;
- DEBUGASSERT(up_interrupt_context() == false);
-
/* Verify the input parameters on any failures to verify. */
ret = nxmq_verify_send(mq, msg, msglen, prio);
diff --git a/sched/semaphore/sem_clockwait.c b/sched/semaphore/sem_clockwait.c
index 5f1a1401c3..55ce90d5d3 100644
--- a/sched/semaphore/sem_clockwait.c
+++ b/sched/semaphore/sem_clockwait.c
@@ -107,7 +107,7 @@ int nxsem_clockwait(FAR sem_t *sem, clockid_t clockid,
* enabled while we are blocked waiting for the semaphore.
*/
- flags = enter_critical_section();
+ flags = enter_critical_section_nonirq();
/* Try to take the semaphore without waiting. */
@@ -173,7 +173,7 @@ int nxsem_clockwait(FAR sem_t *sem, clockid_t clockid,
/* We can now restore interrupts and delete the watchdog */
out:
- leave_critical_section(flags);
+ leave_critical_section_nonirq(flags);
return ret;
}
diff --git a/sched/semaphore/sem_tickwait.c b/sched/semaphore/sem_tickwait.c
index c5cc43c1ce..a15d87b9fd 100644
--- a/sched/semaphore/sem_tickwait.c
+++ b/sched/semaphore/sem_tickwait.c
@@ -80,7 +80,7 @@ int nxsem_tickwait(FAR sem_t *sem, uint32_t delay)
* enabled while we are blocked waiting for the semaphore.
*/
- flags = enter_critical_section();
+ flags = enter_critical_section_nonirq();
/* Try to take the semaphore without waiting. */
@@ -118,7 +118,7 @@ int nxsem_tickwait(FAR sem_t *sem, uint32_t delay)
/* We can now restore interrupts */
out:
- leave_critical_section(flags);
+ leave_critical_section_nonirq(flags);
return ret;
}
diff --git a/sched/semaphore/sem_wait.c b/sched/semaphore/sem_wait.c
index c4a2415af3..f4f06bc837 100644
--- a/sched/semaphore/sem_wait.c
+++ b/sched/semaphore/sem_wait.c
@@ -84,7 +84,7 @@ int nxsem_wait(FAR sem_t *sem)
* handler.
*/
- flags = enter_critical_section();
+ flags = enter_critical_section_nonirq();
/* Make sure we were supplied with a valid semaphore. */
@@ -215,7 +215,7 @@ int nxsem_wait(FAR sem_t *sem)
#endif
}
- leave_critical_section(flags);
+ leave_critical_section_nonirq(flags);
return ret;
}