This is an automated email from the ASF dual-hosted git repository.

xiaoxiang pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/nuttx.git


The following commit(s) were added to refs/heads/master by this push:
     new 82acf6e6a7 irq: All irq_cpu_locked is called in the critical_section, 
and the parameter is the current cpu id. so it must return false, We can safely 
delete.
82acf6e6a7 is described below

commit 82acf6e6a7b834978444671ac707efe48ce8ec2b
Author: hujun5 <huj...@xiaomi.com>
AuthorDate: Wed Nov 29 17:19:36 2023 +0800

    irq: All irq_cpu_locked is called in the critical_section, and the 
parameter is the current cpu id.
    so it must return false, We can safely delete.
    
    Signed-off-by: hujun5 <huj...@xiaomi.com>
---
 sched/irq/irq.h                   | 27 --------------
 sched/irq/irq_csection.c          | 74 ---------------------------------------
 sched/sched/sched_addreadytorun.c |  2 +-
 sched/sched/sched_mergepending.c  |  6 ++--
 sched/sched/sched_setpriority.c   |  2 +-
 sched/sched/sched_unlock.c        |  2 +-
 6 files changed, 5 insertions(+), 108 deletions(-)

diff --git a/sched/irq/irq.h b/sched/irq/irq.h
index 7088249c5d..b1be90ea85 100644
--- a/sched/irq/irq.h
+++ b/sched/irq/irq.h
@@ -150,33 +150,6 @@ void irq_initialize(void);
 
 int irq_unexpected_isr(int irq, FAR void *context, FAR void *arg);
 
-/****************************************************************************
- * Name:  irq_cpu_locked
- *
- * Description:
- *   Test if the IRQ lock set OR if this CPU holds the IRQ lock
- *   There is an interaction with pre-emption controls and IRQ locking:
- *   Even if the pre-emption is enabled, tasks will be forced to pend if
- *   the IRQ lock is also set UNLESS the CPU starting the task is the
- *   holder of the IRQ lock.
- *
- * Input Parameters:
- *   rtcb - Points to the blocked TCB that is ready-to-run
- *
- * Returned Value:
- *   true  - IRQs are locked by a different CPU.
- *   false - IRQs are unlocked OR if they are locked BUT this CPU
- *           is the holder of the lock.
- *
- *   Warning: This values are volatile at only valid at the instance that
- *   the CPU set was queried.
- *
- ****************************************************************************/
-
-#ifdef CONFIG_SMP
-bool irq_cpu_locked(int cpu);
-#endif
-
 /****************************************************************************
  * Name: irq_foreach
  *
diff --git a/sched/irq/irq_csection.c b/sched/irq/irq_csection.c
index 83c4a65f7b..d4525d4760 100644
--- a/sched/irq/irq_csection.c
+++ b/sched/irq/irq_csection.c
@@ -608,80 +608,6 @@ void leave_critical_section(irqstate_t flags)
 }
 #endif
 
-/****************************************************************************
- * Name:  irq_cpu_locked
- *
- * Description:
- *   Test if the IRQ lock set OR if this CPU holds the IRQ lock
- *   There is an interaction with pre-emption controls and IRQ locking:
- *   Even if the pre-emption is enabled, tasks will be forced to pend if
- *   the IRQ lock is also set UNLESS the CPU starting the task is the
- *   holder of the IRQ lock.
- *
- * Input Parameters:
- *   cpu - Points to which cpu
- *
- * Returned Value:
- *   true  - IRQs are locked by a different CPU.
- *   false - IRQs are unlocked OR if they are locked BUT this CPU
- *           is the holder of the lock.
- *
- *   Warning: This values are volatile at only valid at the instance that
- *   the CPU set was queried.
- *
- ****************************************************************************/
-
-#ifdef CONFIG_SMP
-bool irq_cpu_locked(int cpu)
-{
-  cpu_set_t irqset;
-
-  /* g_cpu_irqset is not valid in early phases of initialization */
-
-  if (nxsched_get_initstate() < OSINIT_OSREADY)
-    {
-      /* We are still single threaded.  In either state of g_cpu_irqlock,
-       * the correct return value should always be false.
-       */
-
-      return false;
-    }
-
-  /* Test if g_cpu_irqlock is locked.  We don't really need to use check
-   * g_cpu_irqlock to do this, we can use the g_cpu_set.
-   *
-   * Sample the g_cpu_irqset once.  That is an atomic operation.  All
-   * subsequent operations will operate on the sampled cpu set.
-   */
-
-  irqset = (cpu_set_t)g_cpu_irqset;
-  if (irqset != 0)
-    {
-      /* Some CPU holds the lock.  So g_cpu_irqlock should be locked.
-       * Return false if the 'cpu' is the holder of the lock; return
-       * true if g_cpu_irqlock is locked, but this CPU is not the
-       * holder of the lock.
-       */
-
-      return ((irqset & (1 << cpu)) == 0);
-    }
-
-  /* No CPU holds the lock */
-
-  else
-    {
-      /* In this case g_cpu_irqlock should be unlocked.  However, if
-       * the lock was established in the interrupt handler AND there are
-       * no bits set in g_cpu_irqset, that probably means only that
-       * critical section was established from an interrupt handler.
-       * Return false in either case.
-       */
-
-      return false;
-    }
-}
-#endif
-
 /****************************************************************************
  * Name: restore_critical_section
  *
diff --git a/sched/sched/sched_addreadytorun.c 
b/sched/sched/sched_addreadytorun.c
index eb0957c92a..b82a8d1b75 100644
--- a/sched/sched/sched_addreadytorun.c
+++ b/sched/sched/sched_addreadytorun.c
@@ -223,7 +223,7 @@ bool nxsched_add_readytorun(FAR struct tcb_s *btcb)
    */
 
   me = this_cpu();
-  if ((nxsched_islocked_global() || irq_cpu_locked(me)) &&
+  if ((nxsched_islocked_global()) &&
       task_state != TSTATE_TASK_ASSIGNED)
     {
       /* Add the new ready-to-run task to the g_pendingtasks task list for
diff --git a/sched/sched/sched_mergepending.c b/sched/sched/sched_mergepending.c
index e86969a73b..a8fc20e342 100644
--- a/sched/sched/sched_mergepending.c
+++ b/sched/sched/sched_mergepending.c
@@ -190,7 +190,6 @@ bool nxsched_merge_pending(void)
   FAR struct tcb_s *tcb;
   bool ret = false;
   int cpu;
-  int me;
 
   /* Remove and process every TCB in the g_pendingtasks list.
    *
@@ -198,8 +197,7 @@ bool nxsched_merge_pending(void)
    * some CPU other than this one is in a critical section.
    */
 
-  me = this_cpu();
-  if (!nxsched_islocked_global() && !irq_cpu_locked(me))
+  if (!nxsched_islocked_global())
     {
       /* Find the CPU that is executing the lowest priority task */
 
@@ -237,7 +235,7 @@ bool nxsched_merge_pending(void)
            * Check if that happened.
            */
 
-          if (nxsched_islocked_global() || irq_cpu_locked(me))
+          if (nxsched_islocked_global())
             {
               /* Yes.. then we may have incorrectly placed some TCBs in the
                * g_readytorun list (unlikely, but possible).  We will have to
diff --git a/sched/sched/sched_setpriority.c b/sched/sched/sched_setpriority.c
index a93f92978b..abe031242b 100644
--- a/sched/sched/sched_setpriority.c
+++ b/sched/sched/sched_setpriority.c
@@ -68,7 +68,7 @@ static FAR struct tcb_s *nxsched_nexttcb(FAR struct tcb_s 
*tcb)
    * then use the 'nxttcb' which will probably be the IDLE thread.
    */
 
-  if (!nxsched_islocked_global() && !irq_cpu_locked(this_cpu()))
+  if (!nxsched_islocked_global())
     {
       /* Search for the highest priority task that can run on tcb->cpu. */
 
diff --git a/sched/sched/sched_unlock.c b/sched/sched/sched_unlock.c
index fdbb291a82..87beda977f 100644
--- a/sched/sched/sched_unlock.c
+++ b/sched/sched/sched_unlock.c
@@ -137,7 +137,7 @@ int sched_unlock(void)
            * BEFORE it clears IRQ lock.
            */
 
-          if (!nxsched_islocked_global() && !irq_cpu_locked(cpu) &&
+          if (!nxsched_islocked_global() &&
               list_pendingtasks()->head != NULL)
             {
               if (nxsched_merge_pending())

Reply via email to