This is an automated email from the ASF dual-hosted git repository.

xiaoxiang pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/nuttx.git


The following commit(s) were added to refs/heads/master by this push:
     new aa7ec1083b9 nuttx/spinlock.h: add rspin_trylock/_irqsave/_nopreempt 
support
aa7ec1083b9 is described below

commit aa7ec1083b9f7d3d7bcd82ed66144df4c8cfe5bf
Author: Bowen Wang <[email protected]>
AuthorDate: Thu Jul 24 20:23:03 2025 +0800

    nuttx/spinlock.h: add rspin_trylock/_irqsave/_nopreempt support
    
    Add try-lock variants for rspinlock_t to support non-blocking spinlock
    acquisition with optional IRQ save/restore and preemption control.
    These variants enable fine-grained synchronization patterns in nested
    contexts with maximum nesting depth support (UINT16_MAX).
    
    Signed-off-by: Bowen Wang <[email protected]>
---
 include/nuttx/spinlock.h | 70 ++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 70 insertions(+)

diff --git a/include/nuttx/spinlock.h b/include/nuttx/spinlock.h
index 506feb804c6..00005751bfd 100644
--- a/include/nuttx/spinlock.h
+++ b/include/nuttx/spinlock.h
@@ -617,6 +617,76 @@ irqstate_t rspin_lock_irqsave_nopreempt(FAR rspinlock_t 
*lock)
   return flags;
 }
 
+/****************************************************************************
+ * Name: rspin_trylock/rspin_trylock_irqsave/rspin_trylock_irqsave_nopreempt
+ *
+ * Description:
+ *   Nest supported spinlock, try once to lock the rspinlock, can support
+ *   UINT16_MAX max depth.
+ *   As we should not disable irq for long time, sched also locked.
+ *   Similar feature with enter_critical_section, but isolate by instance.
+ *
+ *   If SPINLOCK is enabled:
+ *     Will take spinlock each cpu first call.
+ *
+ *   If SPINLOCK is not enabled:
+ *     Equivalent to up_irq_save() + sched_lock().
+ *     Will only sched_lock once when first called.
+ *
+ * Input Parameters:
+ *   lock - Caller specific rspinlock_s. not NULL.
+ *
+ * Returned Value:
+ *   true  - Success, the spinlock was successfully locked
+ *   false - Failure, the spinlock was already locked
+ *
+ ****************************************************************************/
+
+#ifdef CONFIG_SPINLOCK
+static inline_function bool rspin_trylock(FAR rspinlock_t *lock)
+{
+  rspinlock_t new_val;
+  rspinlock_t old_val = RSPINLOCK_INITIALIZER;
+  int         cpu     = this_cpu() + 1;
+
+  /* Already owned this lock. */
+
+  if (lock->owner == cpu)
+    {
+      lock->count += 1;
+      return true;
+    }
+
+  new_val.count = 1;
+  new_val.owner = cpu;
+
+  /* Try seize the ownership of the lock. */
+
+  return atomic_cmpxchg_acquire((FAR atomic_t *)&lock->val,
+                                (FAR atomic_t *)&old_val.val, new_val.val);
+}
+
+#  define rspin_trylock_irqsave(l, f) \
+    ({ \
+      (f) = up_irq_save(); \
+      rspin_trylock(l) ? \
+      true : ({ up_irq_restore(f); false; }); \
+    })
+#else
+#  define rspin_trylock_irqsave(l, f) \
+    ({ \
+      (void)(l); \
+      (f) = up_irq_save(); \
+      true; \
+    })
+#endif
+
+#define rspin_trylock_irqsave_nopreempt(l, f) \
+  ({ \
+    rspin_trylock_irqsave(l, f) ? \
+    ({ sched_lock(); true; }) : false; \
+  })
+
 /****************************************************************************
  * Name: spin_trylock_irqsave_notrace
  *

Reply via email to