This is an automated email from the ASF dual-hosted git repository. jiuzhudong pushed a commit to branch master in repository https://gitbox.apache.org/repos/asf/nuttx.git
commit 142f32121a65e70deaeb205519e415965aa3b4c7 Author: Jukka Laitinen <jukka.laiti...@tii.ae> AuthorDate: Fri Jul 4 09:59:10 2025 +0300 sched: Remove pending list for CONFIG_SMP This fixes several places, where the sched erroneously checks if the scheduling is locked on current cpu/task, when it should check if the scheduling is locked on the target cpu/task. The original code randomly caused a task to be added to the pending list, and never taken out from there, leading to system halt. For SMP, there is no need for the pending list. Each CPU has got it's own running list (assigned tasks list), and pending tasks can just be kept in the unassigned (readytorun) list. In addition, the smp scheduling is changed in a way that every CPU just picks up the tasks from the ready-to-run list themselves. Which task to pick is not tried to be dictated by another CPU. This also allows using up_send_smp_sched for asynchronously - re-prioritizing a running task - triggering round robin scheduling switch Iow, no separate smp call mechanism is needed for those and the code can be simplified. Signed-off-by: Jukka Laitinen <jukka.laiti...@tii.ae> --- sched/init/nx_start.c | 14 +- sched/sched/CMakeLists.txt | 5 +- sched/sched/Make.defs | 5 +- sched/sched/sched.h | 72 +++++++++-- sched/sched/sched_addreadytorun.c | 235 +++++++++++++++++----------------- sched/sched/sched_mergepending.c | 133 ------------------- sched/sched/sched_mergeprioritized.c | 162 ----------------------- sched/sched/sched_process_delivered.c | 86 ++++--------- sched/sched/sched_removereadytorun.c | 122 ++---------------- sched/sched/sched_roundrobin.c | 11 +- sched/sched/sched_setpriority.c | 160 +++-------------------- sched/sched/sched_suspend.c | 13 +- sched/sched/sched_unlock.c | 18 ++- 13 files changed, 269 insertions(+), 767 deletions(-) diff --git a/sched/init/nx_start.c b/sched/init/nx_start.c index 5602caf9b0d..503580e3081 100644 --- a/sched/init/nx_start.c +++ b/sched/init/nx_start.c @@ -126,7 +126,7 @@ dq_queue_t g_readytorun; #ifdef CONFIG_SMP dq_queue_t g_assignedtasks[CONFIG_SMP_NCPUS]; -FAR struct tcb_s *g_delivertasks[CONFIG_SMP_NCPUS]; +enum task_deliver_e g_delivertasks[CONFIG_SMP_NCPUS]; #endif /* g_running_tasks[] holds a references to the running task for each CPU. @@ -141,7 +141,9 @@ FAR struct tcb_s *g_running_tasks[CONFIG_SMP_NCPUS]; * currently active task has disabled pre-emption. */ +#ifndef CONFIG_SMP dq_queue_t g_pendingtasks; +#endif /* This is the list of all tasks that are blocked waiting for a signal */ @@ -242,11 +244,6 @@ static void tasklist_initialize(void) tlist[TSTATE_TASK_INVALID].list = NULL; tlist[TSTATE_TASK_INVALID].attr = 0; - /* TSTATE_TASK_PENDING */ - - tlist[TSTATE_TASK_PENDING].list = list_pendingtasks(); - tlist[TSTATE_TASK_PENDING].attr = TLIST_ATTR_PRIORITIZED; - #ifdef CONFIG_SMP /* TSTATE_TASK_READYTORUN */ @@ -269,6 +266,11 @@ static void tasklist_initialize(void) TLIST_ATTR_RUNNABLE; #else + /* TSTATE_TASK_PENDING */ + + tlist[TSTATE_TASK_PENDING].list = list_pendingtasks(); + tlist[TSTATE_TASK_PENDING].attr = TLIST_ATTR_PRIORITIZED; + /* TSTATE_TASK_READYTORUN */ tlist[TSTATE_TASK_READYTORUN].list = list_readytorun(); diff --git a/sched/sched/CMakeLists.txt b/sched/sched/CMakeLists.txt index 064ff0d523c..dc9d061be8e 100644 --- a/sched/sched/CMakeLists.txt +++ b/sched/sched/CMakeLists.txt @@ -24,8 +24,6 @@ set(SRCS sched_profil.c sched_addreadytorun.c sched_removereadytorun.c - sched_mergeprioritized.c - sched_mergepending.c sched_addblocked.c sched_removeblocked.c sched_gettcb.c @@ -48,7 +46,6 @@ set(SRCS sched_get_stackinfo.c sched_get_tls.c sched_sysinfo.c - sched_reprioritizertr.c sched_get_stateinfo.c) if(CONFIG_PRIORITY_INHERITANCE) @@ -58,6 +55,8 @@ endif() if(CONFIG_SMP) list(APPEND SRCS sched_getaffinity.c sched_setaffinity.c sched_process_delivered.c) +else() + list(APPEND SRCS sched_reprioritizertr.c sched_mergepending.c) endif() if(CONFIG_SIG_SIGSTOP_ACTION) diff --git a/sched/sched/Make.defs b/sched/sched/Make.defs index 7a1965a964d..3ee5b68033d 100644 --- a/sched/sched/Make.defs +++ b/sched/sched/Make.defs @@ -22,7 +22,6 @@ CSRCS += sched_getfiles.c sched_profil.c CSRCS += sched_addreadytorun.c sched_removereadytorun.c -CSRCS += sched_mergeprioritized.c sched_mergepending.c CSRCS += sched_addblocked.c sched_removeblocked.c CSRCS += sched_gettcb.c sched_verifytcb.c sched_releasetcb.c CSRCS += sched_setparam.c sched_setpriority.c sched_getparam.c @@ -30,7 +29,7 @@ CSRCS += sched_setscheduler.c sched_getscheduler.c CSRCS += sched_yield.c sched_rrgetinterval.c sched_foreach.c CSRCS += sched_lock.c sched_unlock.c sched_lockcount.c CSRCS += sched_idletask.c sched_self.c sched_get_stackinfo.c sched_get_tls.c -CSRCS += sched_sysinfo.c sched_reprioritizertr.c sched_get_stateinfo.c sched_getcpu.c +CSRCS += sched_sysinfo.c sched_get_stateinfo.c sched_getcpu.c ifeq ($(CONFIG_PRIORITY_INHERITANCE),y) CSRCS += sched_reprioritize.c @@ -39,6 +38,8 @@ endif ifeq ($(CONFIG_SMP),y) CSRCS += sched_process_delivered.c CSRCS += sched_getaffinity.c sched_setaffinity.c +else +CSRCS += sched_reprioritizertr.c sched_mergepending.c endif ifeq ($(CONFIG_SIG_SIGSTOP_ACTION),y) diff --git a/sched/sched/sched.h b/sched/sched/sched.h index 52156fa99bf..992edeedf77 100644 --- a/sched/sched/sched.h +++ b/sched/sched/sched.h @@ -53,7 +53,9 @@ */ #define list_readytorun() (&g_readytorun) +#ifndef CONFIG_SMP #define list_pendingtasks() (&g_pendingtasks) +#endif #define list_waitingforsignal() (&g_waitingforsignal) #define list_waitingforfill() (&g_waitingforfill) #define list_stoppedtasks() (&g_stoppedtasks) @@ -132,6 +134,17 @@ struct tasklist_s uint8_t attr; /* List attribute flags */ }; +/* This enumeration defines smp schedule task switch rule */ + +enum task_deliver_e +{ + SWITCH_NONE = 0, /* No schedule switch pending */ + SWITCH_HIGHER = 1, /* Higher priority task needs to be scheduled in */ + SWITCH_EQUAL /* Higher or equal priority task needs to be scheduled + * in + */ +}; + /**************************************************************************** * Public Data ****************************************************************************/ @@ -187,15 +200,13 @@ extern dq_queue_t g_readytorun; */ extern dq_queue_t g_assignedtasks[CONFIG_SMP_NCPUS]; -#endif -/* g_delivertasks is used to record the tcb that needs to be passed to - * another cpu for scheduling. When it is null, it means that there - * is no tcb that needs to be processed. When it is not null, - * it indicates that there is a tcb that needs to be processed. +/* g_delivertasks is used to indicate that a task switch is scheduled for + * another cpu to be processed. */ -extern FAR struct tcb_s *g_delivertasks[CONFIG_SMP_NCPUS]; +extern enum task_deliver_e g_delivertasks[CONFIG_SMP_NCPUS]; +#endif /* This is the list of all tasks that are ready-to-run, but cannot be placed * in the g_readytorun list because: (1) They are higher priority than the @@ -203,7 +214,9 @@ extern FAR struct tcb_s *g_delivertasks[CONFIG_SMP_NCPUS]; * currently active task has disabled pre-emption. */ +#ifndef CONFIG_SMP extern dq_queue_t g_pendingtasks; +#endif /* This is the list of all tasks that are blocked waiting for a signal */ @@ -310,13 +323,13 @@ int nxthread_create(FAR const char *name, uint8_t ttype, int priority, bool nxsched_add_readytorun(FAR struct tcb_s *rtrtcb); bool nxsched_remove_readytorun(FAR struct tcb_s *rtrtcb); void nxsched_remove_self(FAR struct tcb_s *rtrtcb); -void nxsched_merge_prioritized(FAR dq_queue_t *list1, FAR dq_queue_t *list2, - uint8_t task_state); -bool nxsched_merge_pending(void); void nxsched_add_blocked(FAR struct tcb_s *btcb, tstate_t task_state); void nxsched_remove_blocked(FAR struct tcb_s *btcb); int nxsched_set_priority(FAR struct tcb_s *tcb, int sched_priority); +#ifndef CONFIG_SMP +bool nxsched_merge_pending(void); bool nxsched_reprioritize_rtr(FAR struct tcb_s *tcb, int priority); +#endif /* Priority inheritance support */ @@ -388,6 +401,7 @@ static inline_function FAR struct tcb_s *this_task(void) #endif #ifdef CONFIG_SMP +bool nxsched_switch_running(int cpu, bool switch_equal); void nxsched_process_delivered(int cpu); #else # define nxsched_select_cpu(a) (0) @@ -518,6 +532,37 @@ static inline_function bool nxsched_add_prioritized(FAR struct tcb_s *tcb, } # ifdef CONFIG_SMP + +/* Try to switch the head of the ready-to-run list to active on "target_cpu". + * "cpu" is "this_cpu()", and passed only for optimization. + */ + +static inline_function bool +nxsched_deliver_task(int cpu, int target_cpu, + enum task_deliver_e priority) +{ + bool ret = false; + + /* If there is already a schedule interrupt pending, there is + * no need to do anything now. + */ + + if (g_delivertasks[target_cpu] != priority) + { + if (cpu == target_cpu) + { + ret = nxsched_switch_running(cpu, priority == SWITCH_EQUAL); + } + else + { + g_delivertasks[target_cpu] = priority; + up_send_smp_sched(target_cpu); + } + } + + return ret; +} + static inline_function int nxsched_select_cpu(cpu_set_t affinity) { uint8_t minprio; @@ -525,7 +570,7 @@ static inline_function int nxsched_select_cpu(cpu_set_t affinity) int i; minprio = SCHED_PRIORITY_MAX; - cpu = 0xff; + cpu = CONFIG_SMP_NCPUS; for (i = 0; i < CONFIG_SMP_NCPUS; i++) { @@ -533,8 +578,7 @@ static inline_function int nxsched_select_cpu(cpu_set_t affinity) if ((affinity & (1 << i)) != 0) { - FAR struct tcb_s *rtcb = (FAR struct tcb_s *) - g_assignedtasks[i].head; + FAR struct tcb_s *rtcb = current_task(i); /* If this CPU is executing its IDLE task, then use it. The * IDLE task is always the last task in the assigned task list. @@ -549,7 +593,8 @@ static inline_function int nxsched_select_cpu(cpu_set_t affinity) DEBUGASSERT(rtcb->sched_priority == 0); return i; } - else if (rtcb->sched_priority <= minprio) + else if (rtcb->sched_priority <= minprio && + !nxsched_islocked_tcb(rtcb)) { DEBUGASSERT(rtcb->sched_priority > 0); minprio = rtcb->sched_priority; @@ -558,7 +603,6 @@ static inline_function int nxsched_select_cpu(cpu_set_t affinity) } } - DEBUGASSERT(cpu != 0xff); return cpu; } # endif diff --git a/sched/sched/sched_addreadytorun.c b/sched/sched/sched_addreadytorun.c index ffcfcc9ae0c..b5d374f0b46 100644 --- a/sched/sched/sched_addreadytorun.c +++ b/sched/sched/sched_addreadytorun.c @@ -114,170 +114,169 @@ bool nxsched_add_readytorun(FAR struct tcb_s *btcb) return ret; } -#endif /* !CONFIG_SMP */ + +#else /* !CONFIG_SMP */ /**************************************************************************** - * Name: nxsched_add_readytorun + * Name: nxsched_switch_running * * Description: - * This function adds a TCB to one of the ready to run lists. That might - * be: - * - * 1. The g_readytorun list if the task is ready-to-run but not running - * and not assigned to a CPU. - * 2. The g_assignedtask[cpu] list if the task is running or if has been - * assigned to a CPU. - * - * If the currently active task has preemption disabled and the new TCB - * would cause this task to be preempted, the new task is added to the - * g_pendingtasks list instead. The pending tasks will be made - * ready-to-run when preemption isunlocked. + * This function switches the head of the current CPU's assigned tasks + * list to the TCB given as parameter. The idle task is not switched out. + * If the running task can't be swapped out, the btcb is pushed to + * the ready-to-run list. * * Input Parameters: - * btcb - Points to the blocked TCB that is ready-to-run + * cpu - Always this_cpu(). Given as argument only for + * optimization + * switch_equal - When true, switch away a task of equal priority compared + * to the pending one * * Returned Value: - * true if the currently active task (the head of the ready-to-run list) - * has changed. + * true if the currently active task is switched * * Assumptions: - * - The caller has established a critical section before calling this - * function (calling sched_lock() first is NOT a good idea -- use - * enter_critical_section()). + * - The caller has established a critical section * - The caller has already removed the input rtcb from whatever list it * was in. * - The caller handles the condition that occurs if the head of the - * ready-to-run list has changed. + * assigned tasks list has changed. * ****************************************************************************/ -#ifdef CONFIG_SMP -bool nxsched_add_readytorun(FAR struct tcb_s *btcb) +bool nxsched_switch_running(int cpu, bool switch_equal) { - FAR struct tcb_s *rtcb; - FAR struct tcb_s *headtcb; - FAR dq_queue_t *tasklist; - bool doswitch; - int task_state; - int cpu; - int me; - - cpu = nxsched_select_cpu(btcb->affinity); - - /* Get the task currently running on the CPU (may be the IDLE task) */ + FAR struct tcb_s *rtcb = current_task(cpu); + int sched_priority = rtcb->sched_priority; + FAR struct tcb_s *btcb; + bool ret = false; - rtcb = current_task(cpu); + DEBUGASSERT(cpu == this_cpu()); - /* Determine the desired new task state. First, if the new task priority - * is higher then the priority of the lowest priority, running task, then - * the new task will be running and a context switch switch will be - * required. - */ - - if (rtcb->sched_priority < btcb->sched_priority) + if (nxsched_islocked_tcb(rtcb)) { - task_state = TSTATE_TASK_RUNNING; + return false; } - else + + if (switch_equal) { - task_state = TSTATE_TASK_READYTORUN; + sched_priority--; } - /* If the selected state is TSTATE_TASK_RUNNING, then we would like to - * start running the task. Be we cannot do that if pre-emption is - * disabled. If the selected state is TSTATE_TASK_READYTORUN, then it - * should also go to the pending task list so that it will have a chance - * to be restarted when the scheduler is unlocked. - * - * There is an interaction here with IRQ locking. Even if the pre- - * emption is enabled, tasks will be forced to pend if the IRQ lock - * is also set UNLESS the CPU starting the thread is also the holder of - * the IRQ lock. irq_cpu_locked() performs an atomic check for that - * situation. + /* If there is a task in readytorun list, which is eglible to run on this + * CPU, and has higher priority than the current task, + * switch the current task to that one. */ - if (nxsched_islocked_tcb(this_task())) + for (btcb = (FAR struct tcb_s *)dq_peek(list_readytorun()); + btcb && btcb->sched_priority > sched_priority; + btcb = btcb->flink) { - /* Add the new ready-to-run task to the g_pendingtasks task list for - * now. + /* Check if the task found in ready-to-run list is allowed to run on + * this CPU. TCB_FLAG_CPU_LOCKED may be used to override affinity. If + * the flag is set, assume that btcb->cpu is valid, and it is the only + * CPU on which the btcb can run. */ - nxsched_add_prioritized(btcb, list_pendingtasks()); - btcb->task_state = TSTATE_TASK_PENDING; - doswitch = false; - } - else if (task_state == TSTATE_TASK_READYTORUN) - { - /* The new btcb was added either (1) in the middle of the assigned - * task list (the btcb->cpu field is already valid) or (2) was - * added to the ready-to-run list (the btcb->cpu field does not - * matter). Either way, it won't be running. - * - * Add the task to the ready-to-run (but not running) task list - */ + if (CPU_ISSET(cpu, &btcb->affinity) && + ((btcb->flags & TCB_FLAG_CPU_LOCKED) == 0 || btcb->cpu == cpu)) + { + FAR dq_queue_t *tasklist = list_assignedtasks(cpu); - nxsched_add_prioritized(btcb, list_readytorun()); + /* Found a task, remove it from ready-to-run list */ - btcb->task_state = TSTATE_TASK_READYTORUN; - doswitch = false; - } - else /* (task_state == TSTATE_TASK_RUNNING) */ - { - /* If we are modifying some assigned task list other than our own, we - * will need to switch that CPU. - */ + dq_rem((FAR struct dq_entry_s *)btcb, list_readytorun()); - me = this_cpu(); - if (cpu != me) - { - if (g_delivertasks[cpu] == NULL) + /* Remove the current task from assigned tasks list and put it + * to the ready-to-run. But leave idle task. + */ + + if (!is_idle_task(rtcb)) { - g_delivertasks[cpu] = btcb; - btcb->cpu = cpu; - btcb->task_state = TSTATE_TASK_ASSIGNED; - up_send_smp_sched(cpu); + dq_remfirst(tasklist); + rtcb->task_state = TSTATE_TASK_READYTORUN; + nxsched_add_prioritized(rtcb, list_readytorun()); + + /* We should now have only the idle task assigned */ + + DEBUGASSERT( + is_idle_task((FAR struct tcb_s *)dq_peek(tasklist))); } else { - rtcb = g_delivertasks[cpu]; - if (rtcb->sched_priority < btcb->sched_priority) - { - g_delivertasks[cpu] = btcb; - btcb->cpu = cpu; - btcb->task_state = TSTATE_TASK_ASSIGNED; - nxsched_add_prioritized(rtcb, &g_readytorun); - rtcb->task_state = TSTATE_TASK_READYTORUN; - } - else - { - nxsched_add_prioritized(btcb, &g_readytorun); - btcb->task_state = TSTATE_TASK_READYTORUN; - } + rtcb->task_state = TSTATE_TASK_ASSIGNED; } - return false; + dq_addfirst((FAR dq_entry_t *)btcb, tasklist); + up_update_task(btcb); + + btcb->cpu = cpu; + btcb->task_state = TSTATE_TASK_RUNNING; + ret = true; + break; } + } - tasklist = &g_assignedtasks[cpu]; + return ret; +} - /* Change "head" from TSTATE_TASK_RUNNING to TSTATE_TASK_ASSIGNED */ +/**************************************************************************** + * Name: nxsched_add_readytorun + * + * Description: + * This function adds a TCB to one of the ready to run lists. The list + * will be: + * + * 1. The g_readytorun list if the task is ready-to-run but not running + * and not assigned to a CPU. + * 2. The g_assignedtask[cpu] list if the task is running or if has been + * assigned to a CPU. + * + * If the currently active task has preemption disabled and the new TCB + * would cause this task to be preempted, the new task is added to the + * g_pendingtasks list instead. The pending tasks will be made + * ready-to-run when preemption isunlocked. + * + * Input Parameters: + * btcb - Points to the blocked TCB that is ready-to-run + * + * Returned Value: + * true if the currently active task (the head of the ready-to-run list) + * has changed. + * + * Assumptions: + * - The caller has established a critical section before calling this + * function (calling sched_lock() first is NOT a good idea -- use + * enter_critical_section()). + * - The caller has already removed the input rtcb from whatever list it + * was in. + * - The caller handles the condition that occurs if the head of the + * ready-to-run list has changed. + * + ****************************************************************************/ - headtcb = (FAR struct tcb_s *)tasklist->head; - DEBUGASSERT(headtcb->task_state == TSTATE_TASK_RUNNING); - headtcb->task_state = TSTATE_TASK_ASSIGNED; +bool nxsched_add_readytorun(FAR struct tcb_s *btcb) +{ + bool doswitch = false; + int target_cpu = btcb->flags & TCB_FLAG_CPU_LOCKED ? btcb->cpu : + nxsched_select_cpu(btcb->affinity); - /* Add btcb to the head of the g_assignedtasks - * task list and mark it as running - */ + /* Add the btcb to the ready to run list, and try to run it on the target + * CPU + */ - dq_addfirst_nonempty((FAR dq_entry_t *)btcb, tasklist); - up_update_task(btcb); + btcb->task_state = TSTATE_TASK_READYTORUN; + nxsched_add_prioritized(btcb, list_readytorun()); - DEBUGASSERT(task_state == TSTATE_TASK_RUNNING); - btcb->cpu = cpu; - btcb->task_state = TSTATE_TASK_RUNNING; + if (target_cpu < CONFIG_SMP_NCPUS) + { + FAR struct tcb_s *tcb = current_task(target_cpu); - doswitch = true; + if (tcb->sched_priority < btcb->sched_priority) + { + doswitch = nxsched_deliver_task(this_cpu(), target_cpu, + SWITCH_HIGHER); + } } return doswitch; diff --git a/sched/sched/sched_mergepending.c b/sched/sched/sched_mergepending.c index a3064400e3d..d48e1ebc2f7 100644 --- a/sched/sched/sched_mergepending.c +++ b/sched/sched/sched_mergepending.c @@ -32,19 +32,9 @@ #include <nuttx/queue.h> -#ifdef CONFIG_SMP -# include <nuttx/spinlock.h> -#endif - #include "irq/irq.h" #include "sched/sched.h" -/**************************************************************************** - * Pre-processor Definitions - ****************************************************************************/ - -#define ALL_CPUS ((cpu_set_t)-1) - /**************************************************************************** * Public Functions ****************************************************************************/ @@ -71,7 +61,6 @@ * ****************************************************************************/ -#ifndef CONFIG_SMP bool nxsched_merge_pending(void) { FAR struct tcb_s *ptcb; @@ -161,125 +150,3 @@ bool nxsched_merge_pending(void) return ret; } -#endif /* !CONFIG_SMP */ - -/**************************************************************************** - * Name: nxsched_merge_pending - * - * Description: - * This function merges the prioritized g_pendingtasks list into the - * prioritized ready-to-run task list. - * - * Input Parameters: - * None - * - * Returned Value: - * true if the head of the ready-to-run task list has changed indicating - * a context switch is needed. - * - * Assumptions: - * - The caller has established a critical section before calling this - * function. - * - The caller handles the condition that occurs if the head of the - * ready-to-run task list is changed. - * - ****************************************************************************/ - -#ifdef CONFIG_SMP -bool nxsched_merge_pending(void) -{ - FAR struct tcb_s *rtcb; - FAR struct tcb_s *ptcb; - FAR struct tcb_s *tcb; - bool ret = false; - int cpu; - - /* Remove and process every TCB in the g_pendingtasks list. - * - * Do nothing if (1) pre-emption is still disabled (by any CPU), or (2) if - * some CPU other than this one is in a critical section. - */ - - if (!nxsched_islocked_tcb(this_task())) - { - /* Find the CPU that is executing the lowest priority task */ - - ptcb = (FAR struct tcb_s *)dq_peek(list_pendingtasks()); - if (ptcb == NULL) - { - /* The pending task list is empty */ - - return false; - } - - cpu = nxsched_select_cpu(ALL_CPUS); /* REVISIT: Maybe ptcb->affinity */ - rtcb = current_task(cpu); - - /* Loop while there is a higher priority task in the pending task list - * than in the lowest executing task. - * - * Normally, this loop should execute no more than CONFIG_SMP_NCPUS - * times. That number could be larger, however, if the CPU affinity - * sets do not include all CPUs. In that case, the excess TCBs will - * end up in the g_readytorun list. - */ - - while (ptcb->sched_priority > rtcb->sched_priority) - { - /* Remove the task from the pending task list */ - - tcb = (FAR struct tcb_s *)dq_remfirst(list_pendingtasks()); - - /* Add the pending task to the correct ready-to-run list. */ - - ret |= nxsched_add_readytorun(tcb); - - /* This operation could cause the scheduler to become locked. - * Check if that happened. - */ - - if (nxsched_islocked_tcb(this_task())) - { - /* Yes.. then we may have incorrectly placed some TCBs in the - * g_readytorun list (unlikely, but possible). We will have to - * move them back to the pending task list. - */ - - nxsched_merge_prioritized(list_readytorun(), - list_pendingtasks(), - TSTATE_TASK_PENDING); - - /* And return with the scheduler locked and tasks in the - * pending task list. - */ - - goto errout; - } - - /* Set up for the next time through the loop */ - - ptcb = (FAR struct tcb_s *)dq_peek(list_pendingtasks()); - if (ptcb == NULL) - { - /* The pending task list is empty */ - - goto errout; - } - - cpu = nxsched_select_cpu(ALL_CPUS); /* REVISIT: Maybe ptcb->affinity */ - rtcb = current_task(cpu); - } - - /* No more pending tasks can be made running. Move any remaining - * tasks in the pending task list to the ready-to-run task list. - */ - - nxsched_merge_prioritized(list_pendingtasks(), - list_readytorun(), - TSTATE_TASK_READYTORUN); - } - -errout: - return ret; -} -#endif /* CONFIG_SMP */ diff --git a/sched/sched/sched_mergeprioritized.c b/sched/sched/sched_mergeprioritized.c deleted file mode 100644 index e664f358761..00000000000 --- a/sched/sched/sched_mergeprioritized.c +++ /dev/null @@ -1,162 +0,0 @@ -/**************************************************************************** - * sched/sched/sched_mergeprioritized.c - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. The - * ASF licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the - * License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - * - ****************************************************************************/ - -/**************************************************************************** - * Included Files - ****************************************************************************/ - -#include <nuttx/config.h> - -#include <stdint.h> -#include <stdbool.h> -#include <assert.h> - -#include <nuttx/queue.h> - -#include "sched/sched.h" - -/**************************************************************************** - * Public Functions - ****************************************************************************/ - -/**************************************************************************** - * Name: nxsched_merge_prioritized - * - * Description: - * This function merges the content of the prioritized task list 'list1' - * into the prioritized task list, 'list2'. On return 'list2' will contain - * the prioritized content of both lists; 'list1' will be empty. - * - * Input Parameters: - * list1 - Points to the prioritized list to merge into list 1. This list - * will be empty upon return. - * list2 - That list that will contained the prioritized content of - * both lists upon return. - * task_state - The task state/list index associated with list2. - * - * Returned Value: - * None - * - * Assumptions: - * - The caller has established a critical section before calling this - * function (calling sched_lock() first is NOT a good idea -- use - * enter_critical_section()). - * - ****************************************************************************/ - -void nxsched_merge_prioritized(FAR dq_queue_t *list1, FAR dq_queue_t *list2, - uint8_t task_state) -{ - dq_queue_t clone; - FAR struct tcb_s *tcb1; - FAR struct tcb_s *tcb2; - FAR struct tcb_s *tmp; - - DEBUGASSERT(list1 != NULL && list2 != NULL); - - /* Get a private copy of list1, clearing list1. We do this early so that - * we can be assured that the list is stationary before we start any - * operations on it. - */ - - dq_move(list1, &clone); - - /* Get the TCB at the head of list1 */ - - tcb1 = (FAR struct tcb_s *)dq_peek(&clone); - if (tcb1 == NULL) - { - /* Special case.. list1 is empty. There is nothing to be done. */ - - return; - } - - /* Now the TCBs are no longer accessible and we can change the state on - * each TCB. We go through extra precaution to assure that a TCB is never - * in a list with the wrong state. - */ - - for (tmp = tcb1; - tmp != NULL; - tmp = (FAR struct tcb_s *)dq_next((FAR dq_entry_t *)tmp)) - { - tmp->task_state = task_state; - } - - /* Get the head of list2 */ - - tcb2 = (FAR struct tcb_s *)dq_peek(list2); - if (tcb2 == NULL) - { - /* Special case.. list2 is empty. Move list1 to list2. */ - - dq_move(&clone, list2); - return; - } - - /* Now loop until all entries from list1 have been merged into list2. tcb1 - * points at the TCB at the head of list1; tcb2 points to the TCB at the - * current working position in list2 - */ - - do - { - /* Are we at the end of list2 with TCBs remaining to be merged in - * list1? - */ - - if (tcb2 == NULL) - { - /* Yes.. Just append the remainder of list1 to the end of list2. */ - - dq_cat(&clone, list2); - break; - } - - /* Which TCB has higher priority? */ - - else if (tcb1->sched_priority > tcb2->sched_priority) - { - /* The TCB from list1 has higher priority than the TCB from list2. - * Remove the TCB from list1 and insert it before the TCB from - * list2. - */ - - tmp = (FAR struct tcb_s *)dq_remfirst(&clone); - DEBUGASSERT(tmp == tcb1); - - dq_addbefore((FAR dq_entry_t *)tcb2, (FAR dq_entry_t *)tmp, - list2); - - tcb1 = (FAR struct tcb_s *)dq_peek(&clone); - } - else - { - /* The TCB from list2 has higher (or same) priority as the TCB - * from list2. Skip to the next, lower priority TCB in list2. - */ - - tcb2 = (FAR struct tcb_s *)dq_next((FAR dq_entry_t *)tcb2); - } - } - while (tcb1 != NULL); -} diff --git a/sched/sched/sched_process_delivered.c b/sched/sched/sched_process_delivered.c index 1e40d27adc9..c7b242b550b 100644 --- a/sched/sched/sched_process_delivered.c +++ b/sched/sched/sched_process_delivered.c @@ -62,11 +62,7 @@ void nxsched_process_delivered(int cpu) { - FAR dq_queue_t *tasklist; - FAR struct tcb_s *next; - FAR struct tcb_s *prev; - struct tcb_s *btcb = NULL; - struct tcb_s *tcb; + enum task_deliver_e priority; DEBUGASSERT(g_cpu_nestcount[cpu] == 0); DEBUGASSERT(up_interrupt_context()); @@ -74,71 +70,39 @@ void nxsched_process_delivered(int cpu) if ((g_cpu_irqset & (1 << cpu)) == 0) { spin_lock_notrace(&g_cpu_irqlock); - g_cpu_irqset |= (1 << cpu); } - tcb = current_task(cpu); - - if (g_delivertasks[cpu] == NULL) - { - if (tcb->irqcount <= 0) - { - cpu_irqlock_clear(); - } - - return; - } + priority = g_delivertasks[cpu]; + g_delivertasks[cpu] = SWITCH_NONE; - if (nxsched_islocked_tcb(tcb)) + if (priority != SWITCH_NONE && + !nxsched_switch_running(cpu, priority == SWITCH_EQUAL)) { - btcb = g_delivertasks[cpu]; - g_delivertasks[cpu] = NULL; - nxsched_add_prioritized(btcb, &g_pendingtasks); - btcb->task_state = TSTATE_TASK_PENDING; - if (tcb->irqcount <= 0) + /* Manage the (rare) case that task delivery to this CPU was not + * successful. This can happen in two cases: + * 1) The currently running task on this CPU just entered sched_lock + * 2) This CPU just picked a higher priority task to execute + * before this SMP call was executed + * To avoid schedule latency/priority inversion, just check once more + * if there is another CPU eglible to run the delivered task, and + * pass it forward. + */ + + FAR struct tcb_s *tcb = (FAR struct tcb_s *)dq_peek(list_readytorun()); + if (tcb) { - cpu_irqlock_clear(); + int target_cpu = tcb->flags & TCB_FLAG_CPU_LOCKED ? + tcb->cpu : nxsched_select_cpu(tcb->affinity); + if (target_cpu < CONFIG_SMP_NCPUS && target_cpu != cpu && + current_task(target_cpu)->sched_priority < tcb->sched_priority) + { + nxsched_deliver_task(cpu, target_cpu, priority); + } } - - return; - } - - btcb = g_delivertasks[cpu]; - - for (next = tcb; btcb->sched_priority <= next->sched_priority; - next = next->flink); - - DEBUGASSERT(next); - - prev = next->blink; - if (prev == NULL) - { - /* Special case: Insert at the head of the list */ - - tasklist = &g_assignedtasks[cpu]; - dq_addfirst_nonempty((FAR dq_entry_t *)btcb, tasklist); - btcb->cpu = cpu; - btcb->task_state = TSTATE_TASK_RUNNING; - up_update_task(btcb); - - DEBUGASSERT(btcb->flink != NULL); - DEBUGASSERT(next == btcb->flink); - next->task_state = TSTATE_TASK_ASSIGNED; } - else - { - /* Insert in the middle of the list */ - - dq_insert_mid(prev, btcb, next); - btcb->cpu = cpu; - btcb->task_state = TSTATE_TASK_ASSIGNED; - } - - g_delivertasks[cpu] = NULL; - tcb = current_task(cpu); - if (tcb->irqcount <= 0) + if (current_task(cpu)->irqcount <= 0) { cpu_irqlock_clear(); } diff --git a/sched/sched/sched_removereadytorun.c b/sched/sched/sched_removereadytorun.c index be7ebac2e29..513d8f13cfc 100644 --- a/sched/sched/sched_removereadytorun.c +++ b/sched/sched/sched_removereadytorun.c @@ -135,18 +135,19 @@ void nxsched_remove_self(FAR struct tcb_s *tcb) ****************************************************************************/ #ifdef CONFIG_SMP -void nxsched_remove_running(FAR struct tcb_s *tcb) +static void nxsched_remove_running(FAR struct tcb_s *tcb) { FAR dq_queue_t *tasklist; FAR struct tcb_s *nxttcb; - FAR struct tcb_s *rtrtcb = NULL; int cpu; /* Which CPU (if any) is the task running on? Which task list holds the * TCB? */ - DEBUGASSERT(tcb->task_state == TSTATE_TASK_RUNNING); + DEBUGASSERT(tcb->cpu == this_cpu() && + tcb->task_state == TSTATE_TASK_RUNNING); + cpu = tcb->cpu; tasklist = &g_assignedtasks[cpu]; @@ -174,145 +175,44 @@ void nxsched_remove_running(FAR struct tcb_s *tcb) */ nxttcb = tcb->flink; - DEBUGASSERT(nxttcb != NULL); + DEBUGASSERT(nxttcb != NULL && is_idle_task(nxttcb)); /* The task is running but the CPU that it was running on has been * paused. We can now safely remove its TCB from the running - * task list. In the SMP case this may be either the g_readytorun() - * or the g_assignedtasks[cpu] list. - */ - - dq_rem_head((FAR dq_entry_t *)tcb, tasklist); - - /* Find the highest priority non-running tasks in the g_assignedtasks - * list of other CPUs, and also non-idle tasks, place them in the - * g_readytorun list. so as to find the task with the highest priority, - * globally - */ - - for (int i = 0; i < CONFIG_SMP_NCPUS; i++) - { - if (i == cpu) - { - /* The highest priority task of the current - * CPU has been found, which is nxttcb. - */ - - continue; - } - - for (rtrtcb = (FAR struct tcb_s *)g_assignedtasks[i].head; - !is_idle_task(rtrtcb); rtrtcb = rtrtcb->flink) - { - if (rtrtcb->task_state != TSTATE_TASK_RUNNING && - CPU_ISSET(cpu, &rtrtcb->affinity)) - { - /* We have found the task with the highest priority whose - * CPU index is i. Since this task must be between the two - * tasks, we can use the dq_rem_mid macro to delete it. - */ - - dq_rem_mid(rtrtcb); - rtrtcb->task_state = TSTATE_TASK_READYTORUN; - - /* Add rtrtcb to g_readytorun to find - * the task with the highest global priority - */ - - nxsched_add_prioritized(rtrtcb, &g_readytorun); - break; - } - } - } - - /* Which task will go at the head of the list? It will be either the - * next tcb in the assigned task list (nxttcb) or a TCB in the - * g_readytorun list. We can only select a task from that list if - * the affinity mask includes the current CPU. - */ - - /* Search for the highest priority task that can run on this - * CPU. - */ - - for (rtrtcb = (FAR struct tcb_s *)g_readytorun.head; - rtrtcb != NULL && !CPU_ISSET(cpu, &rtrtcb->affinity); - rtrtcb = rtrtcb->flink); - - /* Did we find a task in the g_readytorun list? Which task should - * we use? We decide strictly by the priority of the two tasks: - * Either (1) the task currently at the head of the - * g_assignedtasks[cpu] list (nexttcb) or (2) the highest priority - * task from the g_readytorun list with matching affinity (rtrtcb). + * task list. */ - if (rtrtcb != NULL && rtrtcb->sched_priority >= nxttcb->sched_priority) - { - /* The TCB rtrtcb has the higher priority and it can be run on - * target CPU. Remove that task (rtrtcb) from the g_readytorun - * list and add to the head of the g_assignedtasks[cpu] list. - */ - - dq_rem((FAR dq_entry_t *)rtrtcb, &g_readytorun); - dq_addfirst_nonempty((FAR dq_entry_t *)rtrtcb, tasklist); - - rtrtcb->cpu = cpu; - nxttcb = rtrtcb; - } - - /* NOTE: If the task runs on another CPU(cpu), adjusting global IRQ - * controls will be done in the pause handler on the new CPU(cpu). - * If the task is scheduled on this CPU(me), do nothing because - * this CPU already has a critical section - */ - - nxttcb->task_state = TSTATE_TASK_RUNNING; + dq_remfirst(tasklist); /* Since the TCB is no longer in any list, it is now invalid */ tcb->task_state = TSTATE_TASK_INVALID; + /* Activate the idle task */ + + nxttcb->task_state = TSTATE_TASK_RUNNING; up_update_task(nxttcb); } void nxsched_remove_self(FAR struct tcb_s *tcb) { nxsched_remove_running(tcb); - if (g_pendingtasks.head) - { - nxsched_merge_pending(); - } + nxsched_switch_running(tcb->cpu, false); } bool nxsched_remove_readytorun(FAR struct tcb_s *tcb) { if (tcb->task_state == TSTATE_TASK_RUNNING) { - DEBUGASSERT(tcb->cpu == this_cpu()); nxsched_remove_running(tcb); return true; } else { FAR dq_queue_t *tasklist; - int i; - - /* if tcb == g_delivertasks[i] we set NULL to g_delivertasks[i] */ - - for (i = 0; i < CONFIG_SMP_NCPUS; i++) - { - if (tcb == g_delivertasks[i]) - { - g_delivertasks[i] = NULL; - tcb->task_state = TSTATE_TASK_INVALID; - return false; - } - } tasklist = TLIST_HEAD(tcb, tcb->cpu); - DEBUGASSERT(tcb->task_state != TSTATE_TASK_RUNNING); - /* The task is not running. Just remove its TCB from the task * list. In the SMP case this may be either the g_readytorun() or the * g_assignedtasks[cpu] list. diff --git a/sched/sched/sched_roundrobin.c b/sched/sched/sched_roundrobin.c index 0d15f3e1f98..856cbe99982 100644 --- a/sched/sched/sched_roundrobin.c +++ b/sched/sched/sched_roundrobin.c @@ -81,7 +81,7 @@ static int nxsched_roundrobin_handler(FAR void *cookie) } if (tcb->task_state == TSTATE_TASK_RUNNING && tcb->cpu == this_cpu() && - nxsched_reprioritize_rtr(tcb, tcb->sched_priority)) + nxsched_switch_running(tcb->cpu, true)) { up_switch_context(this_task(), tcb); } @@ -191,17 +191,18 @@ uint32_t nxsched_process_roundrobin(FAR struct tcb_s *tcb, uint32_t ticks, */ #ifdef CONFIG_SMP - if (tcb->task_state == TSTATE_TASK_RUNNING && - tcb->cpu != this_cpu()) + DEBUGASSERT(tcb->task_state == TSTATE_TASK_RUNNING); + if (tcb->cpu != this_cpu()) { nxsched_smp_call_init(&g_call_data, nxsched_roundrobin_handler, (FAR void *)(uintptr_t)tcb->pid); nxsched_smp_call_single_async(tcb->cpu, &g_call_data); } - else -#endif + else if (nxsched_switch_running(tcb->cpu, true)) +#else if (nxsched_reprioritize_rtr(tcb, tcb->sched_priority)) +#endif { up_switch_context(this_task(), rtcb); } diff --git a/sched/sched/sched_setpriority.c b/sched/sched/sched_setpriority.c index d05e130543f..09132d8c039 100644 --- a/sched/sched/sched_setpriority.c +++ b/sched/sched/sched_setpriority.c @@ -37,114 +37,10 @@ #include "irq/irq.h" #include "sched/sched.h" -/**************************************************************************** - * Private Types - ****************************************************************************/ - -#ifdef CONFIG_SMP -struct reprioritize_arg_s -{ - pid_t pid; - cpu_set_t saved_affinity; - int sched_priority; - bool need_restore; -}; - /**************************************************************************** * Private Functions ****************************************************************************/ -static int reprioritize_handler(FAR void *cookie) -{ - FAR struct reprioritize_arg_s *arg = cookie; - FAR struct tcb_s *rtcb = this_task(); - FAR struct tcb_s *tcb; - irqstate_t flags; - - flags = enter_critical_section(); - - tcb = nxsched_get_tcb(arg->pid); - - if (!tcb || tcb->task_state == TSTATE_TASK_INVALID || - (tcb->flags & TCB_FLAG_EXIT_PROCESSING) != 0) - { - leave_critical_section(flags); - return OK; - } - - if (arg->need_restore) - { - tcb->affinity = arg->saved_affinity; - tcb->flags &= ~TCB_FLAG_CPU_LOCKED; - } - - if (nxsched_reprioritize_rtr(tcb, arg->sched_priority)) - { - up_switch_context(this_task(), rtcb); - } - - leave_critical_section(flags); - return OK; -} -#endif - -/**************************************************************************** - * Name: nxsched_nexttcb - * - * Description: - * Get the next highest priority ready-to-run task. - * - * Input Parameters: - * tcb - the TCB of task to reprioritize. - * - * Returned Value: - * TCB of the next highest priority ready-to-run task. - * - ****************************************************************************/ - -#ifdef CONFIG_SMP -static FAR struct tcb_s *nxsched_nexttcb(FAR struct tcb_s *tcb) -{ - FAR struct tcb_s *nxttcb = tcb->flink; - FAR struct tcb_s *rtrtcb; - - /* Which task should run next? It will be either the next tcb in the - * assigned task list (nxttcb) or a TCB in the g_readytorun list. We can - * only select a task from that list if the affinity mask includes the - * tcb->cpu. - * - * If pre-emption is locked or another CPU is in a critical section, - * then use the 'nxttcb' which will probably be the IDLE thread. - */ - - if (!nxsched_islocked_tcb(this_task())) - { - /* Search for the highest priority task that can run on tcb->cpu. */ - - for (rtrtcb = (FAR struct tcb_s *)list_readytorun()->head; - rtrtcb != NULL && !CPU_ISSET(tcb->cpu, &rtrtcb->affinity); - rtrtcb = rtrtcb->flink); - - /* Return the TCB from the readyt-to-run list if it is the next - * highest priority task. - */ - - if (rtrtcb != NULL && - rtrtcb->sched_priority >= nxttcb->sched_priority) - { - return rtrtcb; - } - } - - /* Otherwise, return the next TCB in the g_assignedtasks[] list... - * probably the TCB of the IDLE thread. - * REVISIT: What if it is not the IDLE thread? - */ - - return nxttcb; -} -#endif - /**************************************************************************** * Name: nxsched_running_setpriority * @@ -175,20 +71,25 @@ static inline void nxsched_running_setpriority(FAR struct tcb_s *tcb, /* Get the TCB of the next highest priority, ready to run task */ #ifdef CONFIG_SMP - nxttcb = nxsched_nexttcb(tcb); + nxttcb = (FAR struct tcb_s *)dq_peek(list_readytorun()); #else nxttcb = tcb->flink; #endif - DEBUGASSERT(nxttcb != NULL); - /* A context switch will occur if the new priority of the running * task becomes less than OR EQUAL TO the next highest priority * ready to run task. */ - if (sched_priority <= nxttcb->sched_priority) + if (nxttcb && sched_priority <= nxttcb->sched_priority) { +#ifdef CONFIG_SMP + tcb->sched_priority = (uint8_t)sched_priority; + if (nxsched_deliver_task(this_cpu(), tcb->cpu, SWITCH_EQUAL)) + { + up_switch_context(this_task(), tcb); + } +#else FAR struct tcb_s *rtcb = this_task(); if (nxsched_islocked_tcb(rtcb)) @@ -206,11 +107,7 @@ static inline void nxsched_running_setpriority(FAR struct tcb_s *tcb, nxsched_add_prioritized(nxttcb, list_pendingtasks()); nxttcb->task_state = TSTATE_TASK_PENDING; -#ifdef CONFIG_SMP - nxttcb = nxsched_nexttcb(tcb); -#else nxttcb = tcb->flink; -#endif } while (sched_priority < nxttcb->sched_priority); @@ -222,38 +119,12 @@ static inline void nxsched_running_setpriority(FAR struct tcb_s *tcb, { /* A context switch will occur. */ -#ifdef CONFIG_SMP - if (tcb->cpu != this_cpu() && - tcb->task_state == TSTATE_TASK_RUNNING) - { - struct reprioritize_arg_s arg; - - if ((tcb->flags & TCB_FLAG_CPU_LOCKED) != 0) - { - arg.pid = tcb->pid; - arg.need_restore = false; - } - else - { - arg.pid = tcb->pid; - arg.saved_affinity = tcb->affinity; - arg.need_restore = true; - - tcb->flags |= TCB_FLAG_CPU_LOCKED; - CPU_ZERO(&tcb->affinity); - CPU_SET(tcb->cpu, &tcb->affinity); - } - - arg.sched_priority = sched_priority; - nxsched_smp_call_single(tcb->cpu, reprioritize_handler, &arg); - } - else -#endif if (nxsched_reprioritize_rtr(tcb, sched_priority)) { up_switch_context(this_task(), rtcb); } } +#endif } /* Otherwise, we can just change priority since it has no effect */ @@ -290,9 +161,13 @@ static void nxsched_readytorun_setpriority(FAR struct tcb_s *tcb, rtcb = this_task(); - /* A context switch will occur. */ - +#ifdef CONFIG_SMP + dq_rem((FAR struct dq_entry_s *)tcb, list_readytorun()); + tcb->sched_priority = sched_priority; + if (nxsched_add_readytorun(tcb)) +#else if (nxsched_reprioritize_rtr(tcb, sched_priority)) +#endif { up_switch_context(this_task(), rtcb); } @@ -414,9 +289,6 @@ int nxsched_set_priority(FAR struct tcb_s *tcb, int sched_priority) */ case TSTATE_TASK_READYTORUN: -#ifdef CONFIG_SMP - case TSTATE_TASK_ASSIGNED: -#endif nxsched_readytorun_setpriority(tcb, sched_priority); break; diff --git a/sched/sched/sched_suspend.c b/sched/sched/sched_suspend.c index 11cf431928f..d17956325d2 100644 --- a/sched/sched/sched_suspend.c +++ b/sched/sched/sched_suspend.c @@ -35,6 +35,7 @@ #include <nuttx/arch.h> #include "sched/sched.h" +#include "sched/queue.h" #ifdef CONFIG_SMP /**************************************************************************** @@ -131,6 +132,9 @@ void nxsched_suspend(FAR struct tcb_s *tcb) } else { +#ifdef CONFIG_SMP + int cpu = this_cpu(); +#endif FAR struct tcb_s *rtcb = this_task(); /* The task was running or runnable before being stopped. Simply @@ -145,7 +149,7 @@ void nxsched_suspend(FAR struct tcb_s *tcb) /* Remove the tcb task from the ready-to-run list. */ #ifdef CONFIG_SMP - if (tcb->task_state == TSTATE_TASK_RUNNING && tcb->cpu != this_cpu()) + if (tcb->task_state == TSTATE_TASK_RUNNING && tcb->cpu != cpu) { struct suspend_arg_s arg; @@ -172,9 +176,14 @@ void nxsched_suspend(FAR struct tcb_s *tcb) { switch_needed = nxsched_remove_readytorun(tcb); - if (list_pendingtasks()->head) + if (switch_needed || !nxsched_islocked_tcb(rtcb)) { +#ifdef CONFIG_SMP + switch_needed |= nxsched_deliver_task(cpu, tcb->cpu, + SWITCH_HIGHER); +#else switch_needed |= nxsched_merge_pending(); +#endif } /* Add the task to the specified blocked task list */ diff --git a/sched/sched/sched_unlock.c b/sched/sched/sched_unlock.c index dabd07cbc80..ef760545de9 100644 --- a/sched/sched/sched_unlock.c +++ b/sched/sched/sched_unlock.c @@ -35,6 +35,7 @@ #include "irq/irq.h" #include "sched/sched.h" +#include "sched/queue.h" /**************************************************************************** * Public Functions @@ -72,6 +73,7 @@ void sched_unlock(void) if (rtcb != NULL && rtcb->lockcount == 1) { irqstate_t flags = enter_critical_section_wo_note(); + FAR struct tcb_s *ptcb; rtcb->lockcount = 0; @@ -81,18 +83,22 @@ void sched_unlock(void) sched_note_preemption(rtcb, false); /* Release any ready-to-run tasks that have collected in - * g_pendingtasks. + * g_pendingtasks (or in g_readytorun for SMP) * * NOTE: This operation has a very high likelihood of causing * this task to be switched out! */ - if (list_pendingtasks()->head != NULL) +#ifdef CONFIG_SMP + ptcb = (FAR struct tcb_s *)dq_peek(list_readytorun()); + if (ptcb && ptcb->sched_priority > rtcb->sched_priority && + nxsched_deliver_task(rtcb->cpu, rtcb->cpu, SWITCH_HIGHER)) +#else + ptcb = (FAR struct tcb_s *)dq_peek(list_pendingtasks()); + if (ptcb && nxsched_merge_pending()) +#endif { - if (nxsched_merge_pending()) - { - up_switch_context(this_task(), rtcb); - } + up_switch_context(this_task(), rtcb); } #if CONFIG_RR_INTERVAL > 0