xiaoxiang781216 commented on code in PR #14578:
URL: https://github.com/apache/nuttx/pull/14578#discussion_r1845964106
##########
sched/sched/sched.h:
##########
@@ -37,6 +37,7 @@
#include <nuttx/queue.h>
#include <nuttx/kmalloc.h>
#include <nuttx/spinlock.h>
+#include <nuttx/init.h>
Review Comment:
why add
##########
sched/sched/sched.h:
##########
@@ -421,13 +422,18 @@ void nxsched_update_critmon(FAR struct tcb_s *tcb);
#if CONFIG_SCHED_CRITMONITOR_MAXTIME_PREEMPTION >= 0
void nxsched_critmon_preemption(FAR struct tcb_s *tcb, bool state,
FAR void *caller);
+#else
+# define nxsched_critmon_preemption(t, s, c)
#endif
#if CONFIG_SCHED_CRITMONITOR_MAXTIME_CSECTION >= 0
void nxsched_critmon_csection(FAR struct tcb_s *tcb, bool state,
FAR void *caller);
#endif
+void sched_rschedule(FAR struct tcb_s *tcb);
Review Comment:
remove the declaration and change to static function
##########
sched/sched/sched_lock.c:
##########
@@ -46,6 +46,30 @@
* Public Functions
****************************************************************************/
+/****************************************************************************
+ * Name: sched_lock_wo_note
+ *
+ * Description:
+ * This function disables context switching.
+ * It does not perform instrumentation logic.
+ *
+ ****************************************************************************/
+
+bool sched_lock_wo_note(void)
Review Comment:
static inline_function
##########
include/nuttx/spinlock.h:
##########
@@ -592,6 +592,7 @@ irqstate_t spin_lock_irqsave_wo_note(FAR volatile
spinlock_t *lock)
while (!spin_trylock_irqsave_wo_note(lock, ret));
}
+ sched_lock_wo_note();
Review Comment:
should move to spin_trylock_wo_note, spin_lock without irq save sould
disable sched too
##########
sched/sched/sched_lock.c:
##########
@@ -64,112 +88,20 @@
*
****************************************************************************/
-#ifdef CONFIG_SMP
-
-int sched_lock(void)
+void sched_lock(void)
{
- FAR struct tcb_s *rtcb;
-
- /* If the CPU supports suppression of interprocessor interrupts, then
- * simple disabling interrupts will provide sufficient protection for
- * the following operation.
- */
-
- rtcb = this_task();
-
- /* Check for some special cases: (1) rtcb may be NULL only during early
- * boot-up phases, and (2) sched_lock() should have no effect if called
- * from the interrupt level.
- */
-
- if (rtcb != NULL && !up_interrupt_context())
+ if (sched_lock_wo_note())
{
+#if (CONFIG_SCHED_CRITMONITOR_MAXTIME_PREEMPTION >= 0) ||\
+ defined(CONFIG_SCHED_INSTRUMENTATION_PREEMPTION)
irqstate_t flags;
- /* Catch attempts to increment the lockcount beyond the range of the
- * integer type.
- */
-
- DEBUGASSERT(rtcb->lockcount < MAX_LOCK_COUNT);
-
flags = enter_critical_section();
Review Comment:
merge line 97
##########
sched/sched/sched_lock.c:
##########
@@ -64,112 +88,20 @@
*
****************************************************************************/
-#ifdef CONFIG_SMP
-
-int sched_lock(void)
+void sched_lock(void)
{
- FAR struct tcb_s *rtcb;
-
- /* If the CPU supports suppression of interprocessor interrupts, then
- * simple disabling interrupts will provide sufficient protection for
- * the following operation.
- */
-
- rtcb = this_task();
-
- /* Check for some special cases: (1) rtcb may be NULL only during early
- * boot-up phases, and (2) sched_lock() should have no effect if called
- * from the interrupt level.
- */
-
- if (rtcb != NULL && !up_interrupt_context())
+ if (sched_lock_wo_note())
{
+#if (CONFIG_SCHED_CRITMONITOR_MAXTIME_PREEMPTION >= 0) ||\
+ defined(CONFIG_SCHED_INSTRUMENTATION_PREEMPTION)
irqstate_t flags;
- /* Catch attempts to increment the lockcount beyond the range of the
- * integer type.
- */
-
- DEBUGASSERT(rtcb->lockcount < MAX_LOCK_COUNT);
-
flags = enter_critical_section();
+ FAR struct tcb_s *tcb = this_task();
Review Comment:
should we merge sched_lock_wo_note to avoid get tcb twice
##########
include/sched.h:
##########
@@ -265,8 +265,10 @@ int sched_cpucount(FAR const cpu_set_t *set);
/* Task Switching Interfaces (non-standard) */
-int sched_lock(void);
-int sched_unlock(void);
+bool sched_lock_wo_note(void);
+void sched_unlock_wo_note(void);
Review Comment:
remove the declaration and change to static function
##########
sched/sched/sched_unlock.c:
##########
@@ -40,306 +40,171 @@
* Public Functions
****************************************************************************/
-/****************************************************************************
- * Name: sched_unlock
- *
- * Description:
- * This function decrements the preemption lock count. Typically this
- * is paired with sched_lock() and concludes a critical section of
- * code. Preemption will not be unlocked until sched_unlock() has
- * been called as many times as sched_lock(). When the lockcount is
- * decremented to zero, any tasks that were eligible to preempt the
- * current task will execute.
- *
- ****************************************************************************/
-
-#ifdef CONFIG_SMP
-
-int sched_unlock(void)
+void sched_rschedule_wo_note(FAR struct tcb_s *tcb)
{
- FAR struct tcb_s *rtcb;
+ bool need_leave_csection = false;
+ irqstate_t flags;
- /* This operation is safe because the scheduler is locked and no context
- * switch may occur.
- */
+ if (g_pendingtasks.head != NULL)
+ {
+ flags = enter_critical_section();
+ need_leave_csection = true;
- rtcb = this_task();
+ if (nxsched_merge_pending())
+ {
+ up_switch_context(this_task(), tcb);
+ }
+ }
- /* Check for some special cases: (1) rtcb may be NULL only during
- * early boot-up phases, and (2) sched_unlock() should have no
- * effect if called from the interrupt level.
+#if CONFIG_RR_INTERVAL > 0
+ /* If (1) the task that was running supported round-robin
+ * scheduling and (2) if its time slice has already expired, but
+ * (3) it could not slice out because pre-emption was disabled,
+ * then we need to swap the task out now and reassess the interval
+ * timer for the next time slice.
*/
- if (rtcb != NULL && !up_interrupt_context())
+ if ((tcb->flags & TCB_FLAG_POLICY_MASK) == TCB_FLAG_SCHED_RR &&
+ tcb->timeslice == 0)
{
- /* Prevent context switches throughout the following. */
-
- irqstate_t flags = enter_critical_section();
- int cpu = this_cpu();
-
- DEBUGASSERT(rtcb->lockcount > 0);
-
- /* Decrement the preemption lock counter */
-
- rtcb->lockcount--;
+ if (!need_leave_csection)
+ {
+ flags = enter_critical_section();
+ need_leave_csection = true;
+ }
- /* Check if the lock counter has decremented to zero. If so,
- * then pre-emption has been re-enabled.
+ /* Yes.. that is the situation. But one more thing. The call
+ * to nxsched_merge_pending() above may have actually replaced
+ * the task at the head of the ready-to-run list. In that
+ * case, we need only to reset the timeslice value back to the
+ * maximum.
*/
- if (rtcb->lockcount <= 0)
+ if (tcb != this_task())
{
- /* Note that we no longer have pre-emption disabled. */
-
-#if CONFIG_SCHED_CRITMONITOR_MAXTIME_PREEMPTION >= 0
- nxsched_critmon_preemption(rtcb, false, return_address(0));
-#endif
-#ifdef CONFIG_SCHED_INSTRUMENTATION_PREEMPTION
- sched_note_premption(rtcb, false);
+ tcb->timeslice = MSEC2TICK(CONFIG_RR_INTERVAL);
+ }
+# ifdef CONFIG_SCHED_TICKLESS
+ else
+ {
+ nxsched_reassess_timer();
+ }
+# endif
+ }
#endif
- /* Set the lock count to zero */
+#ifdef CONFIG_SCHED_SPORADIC
+# if CONFIG_RR_INTERVAL > 0
+ else
+# endif
+ /* If (1) the task that was running supported sporadic scheduling
+ * and (2) if its budget slice has already expired, but (3) it
+ * could not slice out because pre-emption was disabled, then we
+ * need to swap the task out now and reassess the interval timer
+ * for the next time slice.
+ */
- rtcb->lockcount = 0;
+ if ((tcb->flags & TCB_FLAG_POLICY_MASK) == TCB_FLAG_SCHED_SPORADIC
+ && tcb->timeslice < 0)
+ {
+ if (!need_leave_csection)
+ {
+ flags = enter_critical_section();
+ need_leave_csection = true;
+ }
- /* Release any ready-to-run tasks that have collected in
- * g_pendingtasks.
- *
- * NOTE: This operation has a very high likelihood of causing
- * this task to be switched out!
- */
+ /* Yes.. that is the situation. Force the low-priority state
+ * now
+ */
- /* In the SMP case, the tasks remains pend(1) if we are
- * in a critical section, i.e., g_cpu_irqlock is locked by other
- * CPUs, or (2) other CPUs still have pre-emption disabled, i.e.,
- * g_cpu_lockset is locked. In those cases, the release of the
- * pending tasks must be deferred until those conditions are met.
- *
- * There are certain conditions that we must avoid by preventing
- * releasing the pending tasks while within the critical section
- * of other CPUs. This logic does that and there is matching
- * logic in nxsched_add_readytorun to avoid starting new tasks
- * within the critical section (unless the CPU is the holder of
- * the lock).
- *
- * REVISIT: If this CPU is only one that holds the IRQ lock, then
- * we should go ahead and release the pending tasks. See the logic
- * leave_critical_section(): It will call nxsched_merge_pending()
- * BEFORE it clears IRQ lock.
- */
+ nxsched_sporadic_lowpriority(tcb);
- if (!nxsched_islocked_tcb(rtcb) &&
- list_pendingtasks()->head != NULL)
- {
- if (nxsched_merge_pending())
- {
- up_switch_context(this_task(), rtcb);
- }
- }
+# ifdef CONFIG_SCHED_TICKLESS
+ /* Make sure that the call to nxsched_merge_pending() did not
+ * change the currently active task.
+ */
-#if CONFIG_RR_INTERVAL > 0
- /* If (1) the task that was running supported round-robin
- * scheduling and (2) if its time slice has already expired, but
- * (3) it could not slice out because pre-emption was disabled,
- * then we need to swap the task out now and reassess the interval
- * timer for the next time slice.
- */
+ if (tcb == current_task(cpu))
+ {
+ nxsched_reassess_timer();
+ }
+# endif
+ }
+#endif
- if ((rtcb->flags & TCB_FLAG_POLICY_MASK) == TCB_FLAG_SCHED_RR &&
- rtcb->timeslice == 0)
- {
- /* Yes.. that is the situation. But one more thing. The call
- * to nxsched_merge_pending() above may have actually replaced
- * the task at the head of the ready-to-run list. In that
- * case, we need only to reset the timeslice value back to the
- * maximum.
- */
+ if (need_leave_csection)
+ {
+ leave_critical_section(flags);
+ }
+}
- if (rtcb != current_task(cpu))
- {
- rtcb->timeslice = MSEC2TICK(CONFIG_RR_INTERVAL);
- }
-#ifdef CONFIG_SCHED_TICKLESS
- else
- {
- nxsched_reassess_timer();
- }
-#endif
- }
+void sched_rschedule(FAR struct tcb_s *tcb)
+{
+#if (CONFIG_SCHED_CRITMONITOR_MAXTIME_PREEMPTION >= 0) ||\
+ defined(CONFIG_SCHED_INSTRUMENTATION_PREEMPTION)
+ irqstate_t flags;
+
+ flags = enter_critical_section();
+ nxsched_critmon_preemption(tcb, false, return_address(0));
+ sched_note_premption(tcb, false);
+ leave_critical_section(flags);
#endif
-#ifdef CONFIG_SCHED_SPORADIC
-#if CONFIG_RR_INTERVAL > 0
- else
-#endif
- /* If (1) the task that was running supported sporadic scheduling
- * and (2) if its budget slice has already expired, but (3) it
- * could not slice out because pre-emption was disabled, then we
- * need to swap the task out now and reassess the interval timer
- * for the next time slice.
- */
+ sched_rschedule_wo_note(tcb);
+}
- if ((rtcb->flags & TCB_FLAG_POLICY_MASK) == TCB_FLAG_SCHED_SPORADIC
- && rtcb->timeslice < 0)
- {
- /* Yes.. that is the situation. Force the low-priority state
- * now
- */
+/****************************************************************************
+ * Name: sched_unlock_wo_note
+ *
+ * Description:
+ * This function decrements the preemption lock count.
+ * It does not perform instrumentation logic.
+ *
+ ****************************************************************************/
- nxsched_sporadic_lowpriority(rtcb);
+void sched_unlock_wo_note(void)
Review Comment:
remove, nobody call it
##########
sched/sched/sched_unlock.c:
##########
@@ -40,306 +40,171 @@
* Public Functions
****************************************************************************/
-/****************************************************************************
- * Name: sched_unlock
- *
- * Description:
- * This function decrements the preemption lock count. Typically this
- * is paired with sched_lock() and concludes a critical section of
- * code. Preemption will not be unlocked until sched_unlock() has
- * been called as many times as sched_lock(). When the lockcount is
- * decremented to zero, any tasks that were eligible to preempt the
- * current task will execute.
- *
- ****************************************************************************/
-
-#ifdef CONFIG_SMP
-
-int sched_unlock(void)
+void sched_rschedule_wo_note(FAR struct tcb_s *tcb)
{
- FAR struct tcb_s *rtcb;
+ bool need_leave_csection = false;
+ irqstate_t flags;
- /* This operation is safe because the scheduler is locked and no context
- * switch may occur.
- */
+ if (g_pendingtasks.head != NULL)
+ {
+ flags = enter_critical_section();
+ need_leave_csection = true;
- rtcb = this_task();
+ if (nxsched_merge_pending())
+ {
+ up_switch_context(this_task(), tcb);
+ }
+ }
- /* Check for some special cases: (1) rtcb may be NULL only during
- * early boot-up phases, and (2) sched_unlock() should have no
- * effect if called from the interrupt level.
+#if CONFIG_RR_INTERVAL > 0
+ /* If (1) the task that was running supported round-robin
+ * scheduling and (2) if its time slice has already expired, but
+ * (3) it could not slice out because pre-emption was disabled,
+ * then we need to swap the task out now and reassess the interval
+ * timer for the next time slice.
*/
- if (rtcb != NULL && !up_interrupt_context())
+ if ((tcb->flags & TCB_FLAG_POLICY_MASK) == TCB_FLAG_SCHED_RR &&
+ tcb->timeslice == 0)
{
- /* Prevent context switches throughout the following. */
-
- irqstate_t flags = enter_critical_section();
- int cpu = this_cpu();
-
- DEBUGASSERT(rtcb->lockcount > 0);
-
- /* Decrement the preemption lock counter */
-
- rtcb->lockcount--;
+ if (!need_leave_csection)
+ {
+ flags = enter_critical_section();
+ need_leave_csection = true;
+ }
- /* Check if the lock counter has decremented to zero. If so,
- * then pre-emption has been re-enabled.
+ /* Yes.. that is the situation. But one more thing. The call
+ * to nxsched_merge_pending() above may have actually replaced
+ * the task at the head of the ready-to-run list. In that
+ * case, we need only to reset the timeslice value back to the
+ * maximum.
*/
- if (rtcb->lockcount <= 0)
+ if (tcb != this_task())
{
- /* Note that we no longer have pre-emption disabled. */
-
-#if CONFIG_SCHED_CRITMONITOR_MAXTIME_PREEMPTION >= 0
- nxsched_critmon_preemption(rtcb, false, return_address(0));
-#endif
-#ifdef CONFIG_SCHED_INSTRUMENTATION_PREEMPTION
- sched_note_premption(rtcb, false);
+ tcb->timeslice = MSEC2TICK(CONFIG_RR_INTERVAL);
+ }
+# ifdef CONFIG_SCHED_TICKLESS
+ else
+ {
+ nxsched_reassess_timer();
+ }
+# endif
+ }
#endif
- /* Set the lock count to zero */
+#ifdef CONFIG_SCHED_SPORADIC
+# if CONFIG_RR_INTERVAL > 0
+ else
+# endif
+ /* If (1) the task that was running supported sporadic scheduling
+ * and (2) if its budget slice has already expired, but (3) it
+ * could not slice out because pre-emption was disabled, then we
+ * need to swap the task out now and reassess the interval timer
+ * for the next time slice.
+ */
- rtcb->lockcount = 0;
+ if ((tcb->flags & TCB_FLAG_POLICY_MASK) == TCB_FLAG_SCHED_SPORADIC
+ && tcb->timeslice < 0)
+ {
+ if (!need_leave_csection)
+ {
+ flags = enter_critical_section();
+ need_leave_csection = true;
+ }
- /* Release any ready-to-run tasks that have collected in
- * g_pendingtasks.
- *
- * NOTE: This operation has a very high likelihood of causing
- * this task to be switched out!
- */
+ /* Yes.. that is the situation. Force the low-priority state
+ * now
+ */
- /* In the SMP case, the tasks remains pend(1) if we are
- * in a critical section, i.e., g_cpu_irqlock is locked by other
- * CPUs, or (2) other CPUs still have pre-emption disabled, i.e.,
- * g_cpu_lockset is locked. In those cases, the release of the
- * pending tasks must be deferred until those conditions are met.
- *
- * There are certain conditions that we must avoid by preventing
- * releasing the pending tasks while within the critical section
- * of other CPUs. This logic does that and there is matching
- * logic in nxsched_add_readytorun to avoid starting new tasks
- * within the critical section (unless the CPU is the holder of
- * the lock).
- *
- * REVISIT: If this CPU is only one that holds the IRQ lock, then
- * we should go ahead and release the pending tasks. See the logic
- * leave_critical_section(): It will call nxsched_merge_pending()
- * BEFORE it clears IRQ lock.
- */
+ nxsched_sporadic_lowpriority(tcb);
- if (!nxsched_islocked_tcb(rtcb) &&
- list_pendingtasks()->head != NULL)
- {
- if (nxsched_merge_pending())
- {
- up_switch_context(this_task(), rtcb);
- }
- }
+# ifdef CONFIG_SCHED_TICKLESS
+ /* Make sure that the call to nxsched_merge_pending() did not
+ * change the currently active task.
+ */
-#if CONFIG_RR_INTERVAL > 0
- /* If (1) the task that was running supported round-robin
- * scheduling and (2) if its time slice has already expired, but
- * (3) it could not slice out because pre-emption was disabled,
- * then we need to swap the task out now and reassess the interval
- * timer for the next time slice.
- */
+ if (tcb == current_task(cpu))
+ {
+ nxsched_reassess_timer();
+ }
+# endif
+ }
+#endif
- if ((rtcb->flags & TCB_FLAG_POLICY_MASK) == TCB_FLAG_SCHED_RR &&
- rtcb->timeslice == 0)
- {
- /* Yes.. that is the situation. But one more thing. The call
- * to nxsched_merge_pending() above may have actually replaced
- * the task at the head of the ready-to-run list. In that
- * case, we need only to reset the timeslice value back to the
- * maximum.
- */
+ if (need_leave_csection)
+ {
+ leave_critical_section(flags);
+ }
+}
- if (rtcb != current_task(cpu))
- {
- rtcb->timeslice = MSEC2TICK(CONFIG_RR_INTERVAL);
- }
-#ifdef CONFIG_SCHED_TICKLESS
- else
- {
- nxsched_reassess_timer();
- }
-#endif
- }
+void sched_rschedule(FAR struct tcb_s *tcb)
+{
+#if (CONFIG_SCHED_CRITMONITOR_MAXTIME_PREEMPTION >= 0) ||\
+ defined(CONFIG_SCHED_INSTRUMENTATION_PREEMPTION)
+ irqstate_t flags;
+
+ flags = enter_critical_section();
+ nxsched_critmon_preemption(tcb, false, return_address(0));
+ sched_note_premption(tcb, false);
+ leave_critical_section(flags);
Review Comment:
should we continue to hold the critical section?
##########
sched/sched/sched_unlock.c:
##########
@@ -40,306 +40,171 @@
* Public Functions
****************************************************************************/
-/****************************************************************************
- * Name: sched_unlock
- *
- * Description:
- * This function decrements the preemption lock count. Typically this
- * is paired with sched_lock() and concludes a critical section of
- * code. Preemption will not be unlocked until sched_unlock() has
- * been called as many times as sched_lock(). When the lockcount is
- * decremented to zero, any tasks that were eligible to preempt the
- * current task will execute.
- *
- ****************************************************************************/
-
-#ifdef CONFIG_SMP
-
-int sched_unlock(void)
+void sched_rschedule_wo_note(FAR struct tcb_s *tcb)
{
- FAR struct tcb_s *rtcb;
+ bool need_leave_csection = false;
+ irqstate_t flags;
- /* This operation is safe because the scheduler is locked and no context
- * switch may occur.
- */
+ if (g_pendingtasks.head != NULL)
+ {
+ flags = enter_critical_section();
+ need_leave_csection = true;
- rtcb = this_task();
+ if (nxsched_merge_pending())
+ {
+ up_switch_context(this_task(), tcb);
+ }
+ }
- /* Check for some special cases: (1) rtcb may be NULL only during
- * early boot-up phases, and (2) sched_unlock() should have no
- * effect if called from the interrupt level.
+#if CONFIG_RR_INTERVAL > 0
+ /* If (1) the task that was running supported round-robin
+ * scheduling and (2) if its time slice has already expired, but
+ * (3) it could not slice out because pre-emption was disabled,
+ * then we need to swap the task out now and reassess the interval
+ * timer for the next time slice.
*/
- if (rtcb != NULL && !up_interrupt_context())
+ if ((tcb->flags & TCB_FLAG_POLICY_MASK) == TCB_FLAG_SCHED_RR &&
+ tcb->timeslice == 0)
{
- /* Prevent context switches throughout the following. */
-
- irqstate_t flags = enter_critical_section();
- int cpu = this_cpu();
-
- DEBUGASSERT(rtcb->lockcount > 0);
-
- /* Decrement the preemption lock counter */
-
- rtcb->lockcount--;
+ if (!need_leave_csection)
+ {
+ flags = enter_critical_section();
+ need_leave_csection = true;
+ }
- /* Check if the lock counter has decremented to zero. If so,
- * then pre-emption has been re-enabled.
+ /* Yes.. that is the situation. But one more thing. The call
+ * to nxsched_merge_pending() above may have actually replaced
+ * the task at the head of the ready-to-run list. In that
+ * case, we need only to reset the timeslice value back to the
+ * maximum.
*/
- if (rtcb->lockcount <= 0)
+ if (tcb != this_task())
{
- /* Note that we no longer have pre-emption disabled. */
-
-#if CONFIG_SCHED_CRITMONITOR_MAXTIME_PREEMPTION >= 0
- nxsched_critmon_preemption(rtcb, false, return_address(0));
-#endif
-#ifdef CONFIG_SCHED_INSTRUMENTATION_PREEMPTION
- sched_note_premption(rtcb, false);
+ tcb->timeslice = MSEC2TICK(CONFIG_RR_INTERVAL);
+ }
+# ifdef CONFIG_SCHED_TICKLESS
+ else
+ {
+ nxsched_reassess_timer();
+ }
+# endif
+ }
#endif
- /* Set the lock count to zero */
+#ifdef CONFIG_SCHED_SPORADIC
+# if CONFIG_RR_INTERVAL > 0
+ else
+# endif
+ /* If (1) the task that was running supported sporadic scheduling
+ * and (2) if its budget slice has already expired, but (3) it
+ * could not slice out because pre-emption was disabled, then we
+ * need to swap the task out now and reassess the interval timer
+ * for the next time slice.
+ */
- rtcb->lockcount = 0;
+ if ((tcb->flags & TCB_FLAG_POLICY_MASK) == TCB_FLAG_SCHED_SPORADIC
+ && tcb->timeslice < 0)
+ {
+ if (!need_leave_csection)
+ {
+ flags = enter_critical_section();
+ need_leave_csection = true;
+ }
- /* Release any ready-to-run tasks that have collected in
- * g_pendingtasks.
- *
- * NOTE: This operation has a very high likelihood of causing
- * this task to be switched out!
- */
+ /* Yes.. that is the situation. Force the low-priority state
+ * now
+ */
- /* In the SMP case, the tasks remains pend(1) if we are
- * in a critical section, i.e., g_cpu_irqlock is locked by other
- * CPUs, or (2) other CPUs still have pre-emption disabled, i.e.,
- * g_cpu_lockset is locked. In those cases, the release of the
- * pending tasks must be deferred until those conditions are met.
- *
- * There are certain conditions that we must avoid by preventing
- * releasing the pending tasks while within the critical section
- * of other CPUs. This logic does that and there is matching
- * logic in nxsched_add_readytorun to avoid starting new tasks
- * within the critical section (unless the CPU is the holder of
- * the lock).
- *
- * REVISIT: If this CPU is only one that holds the IRQ lock, then
- * we should go ahead and release the pending tasks. See the logic
- * leave_critical_section(): It will call nxsched_merge_pending()
- * BEFORE it clears IRQ lock.
- */
+ nxsched_sporadic_lowpriority(tcb);
- if (!nxsched_islocked_tcb(rtcb) &&
- list_pendingtasks()->head != NULL)
- {
- if (nxsched_merge_pending())
- {
- up_switch_context(this_task(), rtcb);
- }
- }
+# ifdef CONFIG_SCHED_TICKLESS
+ /* Make sure that the call to nxsched_merge_pending() did not
+ * change the currently active task.
+ */
-#if CONFIG_RR_INTERVAL > 0
- /* If (1) the task that was running supported round-robin
- * scheduling and (2) if its time slice has already expired, but
- * (3) it could not slice out because pre-emption was disabled,
- * then we need to swap the task out now and reassess the interval
- * timer for the next time slice.
- */
+ if (tcb == current_task(cpu))
+ {
+ nxsched_reassess_timer();
+ }
+# endif
+ }
+#endif
- if ((rtcb->flags & TCB_FLAG_POLICY_MASK) == TCB_FLAG_SCHED_RR &&
- rtcb->timeslice == 0)
- {
- /* Yes.. that is the situation. But one more thing. The call
- * to nxsched_merge_pending() above may have actually replaced
- * the task at the head of the ready-to-run list. In that
- * case, we need only to reset the timeslice value back to the
- * maximum.
- */
+ if (need_leave_csection)
+ {
+ leave_critical_section(flags);
+ }
+}
- if (rtcb != current_task(cpu))
- {
- rtcb->timeslice = MSEC2TICK(CONFIG_RR_INTERVAL);
- }
-#ifdef CONFIG_SCHED_TICKLESS
- else
- {
- nxsched_reassess_timer();
- }
-#endif
- }
+void sched_rschedule(FAR struct tcb_s *tcb)
+{
+#if (CONFIG_SCHED_CRITMONITOR_MAXTIME_PREEMPTION >= 0) ||\
+ defined(CONFIG_SCHED_INSTRUMENTATION_PREEMPTION)
+ irqstate_t flags;
+
+ flags = enter_critical_section();
+ nxsched_critmon_preemption(tcb, false, return_address(0));
+ sched_note_premption(tcb, false);
+ leave_critical_section(flags);
#endif
-#ifdef CONFIG_SCHED_SPORADIC
-#if CONFIG_RR_INTERVAL > 0
- else
-#endif
- /* If (1) the task that was running supported sporadic scheduling
- * and (2) if its budget slice has already expired, but (3) it
- * could not slice out because pre-emption was disabled, then we
- * need to swap the task out now and reassess the interval timer
- * for the next time slice.
- */
+ sched_rschedule_wo_note(tcb);
+}
- if ((rtcb->flags & TCB_FLAG_POLICY_MASK) == TCB_FLAG_SCHED_SPORADIC
- && rtcb->timeslice < 0)
- {
- /* Yes.. that is the situation. Force the low-priority state
- * now
- */
+/****************************************************************************
+ * Name: sched_unlock_wo_note
+ *
+ * Description:
+ * This function decrements the preemption lock count.
+ * It does not perform instrumentation logic.
+ *
+ ****************************************************************************/
- nxsched_sporadic_lowpriority(rtcb);
+void sched_unlock_wo_note(void)
+{
+ FAR struct tcb_s *tcb;
-#ifdef CONFIG_SCHED_TICKLESS
- /* Make sure that the call to nxsched_merge_pending() did not
- * change the currently active task.
- */
+ if (OSINIT_TASK_READY() && !up_interrupt_context())
+ {
+ tcb = this_task();
- if (rtcb == current_task(cpu))
- {
- nxsched_reassess_timer();
- }
-#endif
- }
-#endif
+ tcb->lockcount--;
+ DEBUGASSERT(tcb->lockcount >= 0);
+ if (tcb->lockcount == 0)
+ {
+ sched_rschedule_wo_note(tcb);
}
-
- UNUSED(cpu);
- leave_critical_section(flags);
}
-
- return OK;
}
-#else /* CONFIG_SMP */
+/****************************************************************************
+ * Name: sched_unlock
+ *
+ * Description:
+ * This function decrements the preemption lock count. Typically this
+ * is paired with sched_lock() and concludes a critical section of
+ * code. Preemption will not be unlocked until sched_unlock() has
+ * been called as many times as sched_lock(). When the lockcount is
+ * decremented to zero, any tasks that were eligible to preempt the
+ * current task will execute.
+ *
+ ****************************************************************************/
-int sched_unlock(void)
+void sched_unlock(void)
{
- FAR struct tcb_s *rtcb = this_task();
+ FAR struct tcb_s *tcb;
- /* Check for some special cases: (1) rtcb may be NULL only during
- * early boot-up phases, and (2) sched_unlock() should have no
- * effect if called from the interrupt level.
- */
-
- if (rtcb != NULL && !up_interrupt_context())
+ if (OSINIT_TASK_READY() && !up_interrupt_context())
{
- /* Prevent context switches throughout the following. */
-
- irqstate_t flags = enter_critical_section();
-
- DEBUGASSERT(rtcb->lockcount > 0);
+ tcb = this_task();
- /* Decrement the preemption lock counter */
-
- rtcb->lockcount--;
-
- /* Check if the lock counter has decremented to zero. If so,
- * then pre-emption has been re-enabled.
- */
-
- if (rtcb->lockcount <= 0)
+ tcb->lockcount--;
+ DEBUGASSERT(tcb->lockcount >= 0);
+ if (tcb->lockcount == 0)
{
- /* Note that we no longer have pre-emption disabled. */
-
-#if CONFIG_SCHED_CRITMONITOR_MAXTIME_PREEMPTION >= 0
- nxsched_critmon_preemption(rtcb, false, return_address(0));
-#endif
-#ifdef CONFIG_SCHED_INSTRUMENTATION_PREEMPTION
- sched_note_premption(rtcb, false);
-#endif
-
- /* Set the lock count to zero */
-
- rtcb->lockcount = 0;
-
- /* Release any ready-to-run tasks that have collected in
- * g_pendingtasks.
- *
- * NOTE: This operation has a very high likelihood of causing
- * this task to be switched out!
- *
- * In the single CPU case, decrementing lockcount to zero is
- * sufficient to release the pending tasks. Further, in that
- * configuration, critical sections and pre-emption can operate
- * fully independently.
- */
-
- if (list_pendingtasks()->head != NULL)
- {
- if (nxsched_merge_pending())
- {
- up_switch_context(this_task(), rtcb);
- }
- }
-
-#if CONFIG_RR_INTERVAL > 0
- /* If (1) the task that was running supported round-robin
- * scheduling and (2) if its time slice has already expired, but
- * (3) it could not be sliced out because pre-emption was disabled,
- * then we need to swap the task out now and reassess the interval
- * timer for the next time slice.
- */
-
- if ((rtcb->flags & TCB_FLAG_POLICY_MASK) == TCB_FLAG_SCHED_RR &&
- rtcb->timeslice == 0)
- {
- /* Yes.. that is the situation. But one more thing: The call
- * to nxsched_merge_pending() above may have actually replaced
- * the task at the head of the ready-to-run list. In that
- * case, we need only to reset the timeslice value back to the
- * maximum.
- */
-
- if (rtcb != this_task())
- {
- rtcb->timeslice = MSEC2TICK(CONFIG_RR_INTERVAL);
- }
-#ifdef CONFIG_SCHED_TICKLESS
- else
- {
- nxsched_reassess_timer();
- }
-#endif
- }
-#endif
-
-#ifdef CONFIG_SCHED_SPORADIC
-#if CONFIG_RR_INTERVAL > 0
- else
-#endif
- /* If (1) the task that was running supported sporadic scheduling
- * and (2) if its budget slice has already expired, but (3) it
- * could not slice out because pre-emption was disabled, then we
- * need to swap the task out now and reassess the interval timer
- * for the next time slice.
- */
-
- if ((rtcb->flags & TCB_FLAG_POLICY_MASK) == TCB_FLAG_SCHED_SPORADIC
- && rtcb->timeslice < 0)
- {
- /* Yes.. that is the situation. Force the low-priority state
- * now
- */
-
- nxsched_sporadic_lowpriority(rtcb);
-
-#ifdef CONFIG_SCHED_TICKLESS
- /* Make sure that the call to nxsched_merge_pending() did not
- * change the currently active task.
- */
-
- if (rtcb == this_task())
- {
- nxsched_reassess_timer();
- }
-#endif
- }
-#endif
+ sched_rschedule(tcb);
Review Comment:
sched_reschedule
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]