xiaoxiang781216 commented on code in PR #14578: URL: https://github.com/apache/nuttx/pull/14578#discussion_r1925573021
########## sched/sched/sched_lock.c: ########## @@ -64,112 +64,23 @@ * ****************************************************************************/ -#ifdef CONFIG_SMP - -int sched_lock(void) +void sched_lock(void) { FAR struct tcb_s *rtcb; Review Comment: move after line 72 and merge with line 73 ########## sched/sched/sched_lock.c: ########## @@ -64,112 +64,23 @@ * ****************************************************************************/ -#ifdef CONFIG_SMP - -int sched_lock(void) +void sched_lock(void) { FAR struct tcb_s *rtcb; - /* If the CPU supports suppression of interprocessor interrupts, then - * simple disabling interrupts will provide sufficient protection for - * the following operation. - */ - - rtcb = this_task(); - - /* Check for some special cases: (1) rtcb may be NULL only during early - * boot-up phases, and (2) sched_lock() should have no effect if called - * from the interrupt level. - */ - - if (rtcb != NULL && !up_interrupt_context()) - { - irqstate_t flags; - - /* Catch attempts to increment the lockcount beyond the range of the - * integer type. - */ - - DEBUGASSERT(rtcb->lockcount < MAX_LOCK_COUNT); - - flags = enter_critical_section(); - - /* A counter is used to support locking. This allows nested lock - * operations on this thread - */ - - rtcb->lockcount++; - - /* Check if we just acquired the lock */ - - if (rtcb->lockcount == 1) - { - /* Note that we have pre-emption locked */ - -#if CONFIG_SCHED_CRITMONITOR_MAXTIME_PREEMPTION >= 0 - nxsched_critmon_preemption(rtcb, true, return_address(0)); -#endif -#ifdef CONFIG_SCHED_INSTRUMENTATION_PREEMPTION - sched_note_preemption(rtcb, true); -#endif - } - - /* Move any tasks in the ready-to-run list to the pending task list - * where they will not be available to run until the scheduler is - * unlocked and nxsched_merge_pending() is called. - */ - - nxsched_merge_prioritized(list_readytorun(), - list_pendingtasks(), - TSTATE_TASK_PENDING); - - leave_critical_section(flags); - } - - return OK; -} - -#else /* CONFIG_SMP */ - -int sched_lock(void) -{ - FAR struct tcb_s *rtcb = this_task(); - - /* Check for some special cases: (1) rtcb may be NULL only during early - * boot-up phases, and (2) sched_lock() should have no effect if called - * from the interrupt level. - */ - - if (rtcb != NULL && !up_interrupt_context()) + if (!up_interrupt_context()) { - /* Catch attempts to increment the lockcount beyond the range of the - * integer type. - */ - - DEBUGASSERT(rtcb->lockcount < MAX_LOCK_COUNT); Review Comment: let's keep assert and comment: ``` DEBUGASSERT(rtcb == NULL || rtcb->lockcount < MAX_LOCK_COUNT); ``` ########## sched/sched/sched_unlock.c: ########## @@ -53,60 +53,30 @@ * ****************************************************************************/ -#ifdef CONFIG_SMP - -int sched_unlock(void) +void sched_unlock(void) { + bool need_leave_csection = false; + irqstate_t flags; Review Comment: remove keep line 77 ########## sched/sched/sched_unlock.c: ########## @@ -53,60 +53,30 @@ * ****************************************************************************/ -#ifdef CONFIG_SMP - -int sched_unlock(void) +void sched_unlock(void) { + bool need_leave_csection = false; + irqstate_t flags; FAR struct tcb_s *rtcb; - /* This operation is safe because the scheduler is locked and no context - * switch may occur. - */ - - rtcb = this_task(); - - /* Check for some special cases: (1) rtcb may be NULL only during - * early boot-up phases, and (2) sched_unlock() should have no - * effect if called from the interrupt level. - */ - - if (rtcb != NULL && !up_interrupt_context()) + if (!up_interrupt_context()) { - /* Prevent context switches throughout the following. */ - - irqstate_t flags = enter_critical_section(); - int cpu = this_cpu(); - - DEBUGASSERT(rtcb->lockcount > 0); - - /* Decrement the preemption lock counter */ - - rtcb->lockcount--; - - /* Check if the lock counter has decremented to zero. If so, - * then pre-emption has been re-enabled. - */ - - if (rtcb->lockcount <= 0) + rtcb = this_task(); + if (rtcb != NULL && rtcb->lockcount-- == 1) { - /* Note that we no longer have pre-emption disabled. */ - -#if CONFIG_SCHED_CRITMONITOR_MAXTIME_PREEMPTION >= 0 +#if (CONFIG_SCHED_CRITMONITOR_MAXTIME_PREEMPTION >= 0) || \ + defined(CONFIG_SCHED_INSTRUMENTATION_PREEMPTION) + flags = enter_critical_section_wo_note(); nxsched_critmon_preemption(rtcb, false, return_address(0)); -#endif -#ifdef CONFIG_SCHED_INSTRUMENTATION_PREEMPTION sched_note_preemption(rtcb, false); + leave_critical_section_wo_note(flags); #endif - /* Release any ready-to-run tasks that have collected in Review Comment: why remove comment ########## sched/sched/sched_lock.c: ########## @@ -64,112 +64,23 @@ * ****************************************************************************/ -#ifdef CONFIG_SMP - -int sched_lock(void) +void sched_lock(void) { FAR struct tcb_s *rtcb; - /* If the CPU supports suppression of interprocessor interrupts, then - * simple disabling interrupts will provide sufficient protection for - * the following operation. - */ - - rtcb = this_task(); - - /* Check for some special cases: (1) rtcb may be NULL only during early - * boot-up phases, and (2) sched_lock() should have no effect if called - * from the interrupt level. - */ - - if (rtcb != NULL && !up_interrupt_context()) - { - irqstate_t flags; - - /* Catch attempts to increment the lockcount beyond the range of the - * integer type. - */ - - DEBUGASSERT(rtcb->lockcount < MAX_LOCK_COUNT); - - flags = enter_critical_section(); - - /* A counter is used to support locking. This allows nested lock - * operations on this thread - */ - - rtcb->lockcount++; - - /* Check if we just acquired the lock */ - - if (rtcb->lockcount == 1) - { - /* Note that we have pre-emption locked */ - -#if CONFIG_SCHED_CRITMONITOR_MAXTIME_PREEMPTION >= 0 - nxsched_critmon_preemption(rtcb, true, return_address(0)); -#endif -#ifdef CONFIG_SCHED_INSTRUMENTATION_PREEMPTION - sched_note_preemption(rtcb, true); -#endif - } - - /* Move any tasks in the ready-to-run list to the pending task list - * where they will not be available to run until the scheduler is - * unlocked and nxsched_merge_pending() is called. - */ - - nxsched_merge_prioritized(list_readytorun(), Review Comment: why remove ########## sched/sched/sched_lock.c: ########## @@ -64,112 +64,23 @@ * ****************************************************************************/ -#ifdef CONFIG_SMP - -int sched_lock(void) +void sched_lock(void) { FAR struct tcb_s *rtcb; - /* If the CPU supports suppression of interprocessor interrupts, then - * simple disabling interrupts will provide sufficient protection for - * the following operation. - */ - - rtcb = this_task(); - - /* Check for some special cases: (1) rtcb may be NULL only during early - * boot-up phases, and (2) sched_lock() should have no effect if called - * from the interrupt level. - */ - - if (rtcb != NULL && !up_interrupt_context()) - { - irqstate_t flags; - - /* Catch attempts to increment the lockcount beyond the range of the - * integer type. - */ - - DEBUGASSERT(rtcb->lockcount < MAX_LOCK_COUNT); - - flags = enter_critical_section(); - - /* A counter is used to support locking. This allows nested lock - * operations on this thread - */ - - rtcb->lockcount++; - - /* Check if we just acquired the lock */ - - if (rtcb->lockcount == 1) - { - /* Note that we have pre-emption locked */ - -#if CONFIG_SCHED_CRITMONITOR_MAXTIME_PREEMPTION >= 0 - nxsched_critmon_preemption(rtcb, true, return_address(0)); -#endif -#ifdef CONFIG_SCHED_INSTRUMENTATION_PREEMPTION - sched_note_preemption(rtcb, true); -#endif - } - - /* Move any tasks in the ready-to-run list to the pending task list - * where they will not be available to run until the scheduler is - * unlocked and nxsched_merge_pending() is called. - */ - - nxsched_merge_prioritized(list_readytorun(), - list_pendingtasks(), - TSTATE_TASK_PENDING); - - leave_critical_section(flags); - } - - return OK; -} - -#else /* CONFIG_SMP */ - -int sched_lock(void) -{ - FAR struct tcb_s *rtcb = this_task(); - - /* Check for some special cases: (1) rtcb may be NULL only during early - * boot-up phases, and (2) sched_lock() should have no effect if called - * from the interrupt level. - */ - - if (rtcb != NULL && !up_interrupt_context()) + if (!up_interrupt_context()) { - /* Catch attempts to increment the lockcount beyond the range of the - * integer type. - */ - - DEBUGASSERT(rtcb->lockcount < MAX_LOCK_COUNT); - - /* A counter is used to support locking. This allows nested lock Review Comment: keep ########## sched/sched/sched_unlock.c: ########## @@ -53,60 +53,30 @@ * ****************************************************************************/ -#ifdef CONFIG_SMP - -int sched_unlock(void) +void sched_unlock(void) { + bool need_leave_csection = false; + irqstate_t flags; FAR struct tcb_s *rtcb; - /* This operation is safe because the scheduler is locked and no context - * switch may occur. - */ - - rtcb = this_task(); - - /* Check for some special cases: (1) rtcb may be NULL only during - * early boot-up phases, and (2) sched_unlock() should have no - * effect if called from the interrupt level. - */ - - if (rtcb != NULL && !up_interrupt_context()) + if (!up_interrupt_context()) { - /* Prevent context switches throughout the following. */ - - irqstate_t flags = enter_critical_section(); Review Comment: can we hold critical section here once, to avoid the code dup and remove need_leave_csection ########## sched/sched/sched_unlock.c: ########## @@ -53,60 +53,30 @@ * ****************************************************************************/ -#ifdef CONFIG_SMP - -int sched_unlock(void) +void sched_unlock(void) { + bool need_leave_csection = false; + irqstate_t flags; FAR struct tcb_s *rtcb; - /* This operation is safe because the scheduler is locked and no context - * switch may occur. - */ - - rtcb = this_task(); - - /* Check for some special cases: (1) rtcb may be NULL only during - * early boot-up phases, and (2) sched_unlock() should have no - * effect if called from the interrupt level. - */ - - if (rtcb != NULL && !up_interrupt_context()) + if (!up_interrupt_context()) { - /* Prevent context switches throughout the following. */ - - irqstate_t flags = enter_critical_section(); - int cpu = this_cpu(); - - DEBUGASSERT(rtcb->lockcount > 0); Review Comment: keep ########## sched/sched/sched_lock.c: ########## @@ -64,112 +64,23 @@ * ****************************************************************************/ -#ifdef CONFIG_SMP - -int sched_lock(void) +void sched_lock(void) { FAR struct tcb_s *rtcb; - /* If the CPU supports suppression of interprocessor interrupts, then - * simple disabling interrupts will provide sufficient protection for - * the following operation. - */ - - rtcb = this_task(); - - /* Check for some special cases: (1) rtcb may be NULL only during early - * boot-up phases, and (2) sched_lock() should have no effect if called - * from the interrupt level. - */ - - if (rtcb != NULL && !up_interrupt_context()) - { - irqstate_t flags; - - /* Catch attempts to increment the lockcount beyond the range of the - * integer type. - */ - - DEBUGASSERT(rtcb->lockcount < MAX_LOCK_COUNT); - - flags = enter_critical_section(); - - /* A counter is used to support locking. This allows nested lock - * operations on this thread - */ - - rtcb->lockcount++; - - /* Check if we just acquired the lock */ - - if (rtcb->lockcount == 1) - { - /* Note that we have pre-emption locked */ - -#if CONFIG_SCHED_CRITMONITOR_MAXTIME_PREEMPTION >= 0 - nxsched_critmon_preemption(rtcb, true, return_address(0)); -#endif -#ifdef CONFIG_SCHED_INSTRUMENTATION_PREEMPTION - sched_note_preemption(rtcb, true); -#endif - } - - /* Move any tasks in the ready-to-run list to the pending task list - * where they will not be available to run until the scheduler is - * unlocked and nxsched_merge_pending() is called. - */ - - nxsched_merge_prioritized(list_readytorun(), - list_pendingtasks(), - TSTATE_TASK_PENDING); - - leave_critical_section(flags); - } - - return OK; -} - -#else /* CONFIG_SMP */ - -int sched_lock(void) -{ - FAR struct tcb_s *rtcb = this_task(); - - /* Check for some special cases: (1) rtcb may be NULL only during early - * boot-up phases, and (2) sched_lock() should have no effect if called Review Comment: keep the second comment ########## sched/sched/sched_unlock.c: ########## @@ -53,60 +53,30 @@ * ****************************************************************************/ -#ifdef CONFIG_SMP - -int sched_unlock(void) +void sched_unlock(void) { + bool need_leave_csection = false; + irqstate_t flags; FAR struct tcb_s *rtcb; - /* This operation is safe because the scheduler is locked and no context - * switch may occur. - */ - - rtcb = this_task(); - - /* Check for some special cases: (1) rtcb may be NULL only during - * early boot-up phases, and (2) sched_unlock() should have no Review Comment: keep the second comment ########## sched/sched/sched_unlock.c: ########## @@ -53,60 +53,30 @@ * ****************************************************************************/ -#ifdef CONFIG_SMP - -int sched_unlock(void) +void sched_unlock(void) { + bool need_leave_csection = false; + irqstate_t flags; FAR struct tcb_s *rtcb; - /* This operation is safe because the scheduler is locked and no context - * switch may occur. - */ - - rtcb = this_task(); - - /* Check for some special cases: (1) rtcb may be NULL only during - * early boot-up phases, and (2) sched_unlock() should have no - * effect if called from the interrupt level. - */ - - if (rtcb != NULL && !up_interrupt_context()) + if (!up_interrupt_context()) { - /* Prevent context switches throughout the following. */ - - irqstate_t flags = enter_critical_section(); - int cpu = this_cpu(); - - DEBUGASSERT(rtcb->lockcount > 0); - - /* Decrement the preemption lock counter */ - - rtcb->lockcount--; - - /* Check if the lock counter has decremented to zero. If so, - * then pre-emption has been re-enabled. - */ - - if (rtcb->lockcount <= 0) + rtcb = this_task(); Review Comment: move before line 75 and merge with line 60 ########## sched/sched/sched_lock.c: ########## @@ -64,112 +64,23 @@ * ****************************************************************************/ -#ifdef CONFIG_SMP - -int sched_lock(void) +void sched_lock(void) { FAR struct tcb_s *rtcb; - /* If the CPU supports suppression of interprocessor interrupts, then - * simple disabling interrupts will provide sufficient protection for - * the following operation. - */ - - rtcb = this_task(); - - /* Check for some special cases: (1) rtcb may be NULL only during early - * boot-up phases, and (2) sched_lock() should have no effect if called - * from the interrupt level. - */ - - if (rtcb != NULL && !up_interrupt_context()) - { - irqstate_t flags; - - /* Catch attempts to increment the lockcount beyond the range of the - * integer type. - */ - - DEBUGASSERT(rtcb->lockcount < MAX_LOCK_COUNT); - - flags = enter_critical_section(); - - /* A counter is used to support locking. This allows nested lock - * operations on this thread - */ - - rtcb->lockcount++; - - /* Check if we just acquired the lock */ - - if (rtcb->lockcount == 1) - { - /* Note that we have pre-emption locked */ - -#if CONFIG_SCHED_CRITMONITOR_MAXTIME_PREEMPTION >= 0 - nxsched_critmon_preemption(rtcb, true, return_address(0)); -#endif -#ifdef CONFIG_SCHED_INSTRUMENTATION_PREEMPTION - sched_note_preemption(rtcb, true); -#endif - } - - /* Move any tasks in the ready-to-run list to the pending task list - * where they will not be available to run until the scheduler is - * unlocked and nxsched_merge_pending() is called. - */ - - nxsched_merge_prioritized(list_readytorun(), - list_pendingtasks(), - TSTATE_TASK_PENDING); - - leave_critical_section(flags); - } - - return OK; -} - -#else /* CONFIG_SMP */ - -int sched_lock(void) -{ - FAR struct tcb_s *rtcb = this_task(); - - /* Check for some special cases: (1) rtcb may be NULL only during early - * boot-up phases, and (2) sched_lock() should have no effect if called - * from the interrupt level. - */ - - if (rtcb != NULL && !up_interrupt_context()) + if (!up_interrupt_context()) { - /* Catch attempts to increment the lockcount beyond the range of the - * integer type. - */ - - DEBUGASSERT(rtcb->lockcount < MAX_LOCK_COUNT); - - /* A counter is used to support locking. This allows nested lock - * operations on this thread (on any CPU) - */ - - rtcb->lockcount++; - - /* Check if we just acquired the lock */ - - if (rtcb->lockcount == 1) + rtcb = this_task(); + if (rtcb != NULL && rtcb->lockcount++ == 0) { - /* Note that we have pre-emption locked */ Review Comment: move afte line 79 ########## sched/sched/sched_unlock.c: ########## @@ -124,137 +94,13 @@ int sched_unlock(void) if ((rtcb->flags & TCB_FLAG_POLICY_MASK) == TCB_FLAG_SCHED_RR && rtcb->timeslice == 0) { - /* Yes.. that is the situation. But one more thing. The call Review Comment: why remove comment -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: commits-unsubscr...@nuttx.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org