Module: xenomai-forge
Branch: rtdm-api-waitqueues
Commit: 729b1b0283e8a2d4ece7de305bb3875a990d5406
URL:    
http://git.xenomai.org/?p=xenomai-forge.git;a=commit;h=729b1b0283e8a2d4ece7de305bb3875a990d5406

Author: Philippe Gerum <r...@xenomai.org>
Date:   Thu Apr 10 11:40:15 2014 +0200

cobalt/rtdm: introduce wait queues in API

---

 include/cobalt/kernel/rtdm/driver.h |  183 +++++++++++++++++++++++++++++++++--
 1 file changed, 176 insertions(+), 7 deletions(-)

diff --git a/include/cobalt/kernel/rtdm/driver.h 
b/include/cobalt/kernel/rtdm/driver.h
index 78ce5e4..9aa0637 100644
--- a/include/cobalt/kernel/rtdm/driver.h
+++ b/include/cobalt/kernel/rtdm/driver.h
@@ -603,6 +603,12 @@ static inline nanosecs_abs_t 
rtdm_clock_read_monotonic(void)
 }
 #endif /* !DOXYGEN_CPP */
 
+/* --- timeout sequences */
+
+typedef nanosecs_abs_t rtdm_toseq_t;
+
+void rtdm_toseq_init(rtdm_toseq_t *timeout_seq, nanosecs_rel_t timeout);
+
 /*!
  * @addtogroup rtdmsync
  * @{
@@ -618,7 +624,7 @@ int rtdm_select_bind(int fd, rtdm_selector_t *selector,
  */
 
 /**
- * @brief Execute code block atomically
+ * @brief Execute code block atomically (DEPRECATED)
  *
  * Generally, it is illegal to suspend the current task by calling
  * rtdm_task_sleep(), rtdm_event_wait(), etc. while holding a spinlock. In
@@ -648,6 +654,9 @@ int rtdm_select_bind(int fd, rtdm_selector_t *selector,
  * - User-space task (RT, non-RT)
  *
  * Rescheduling: possible, depends on functions called within @a code_block.
+ *
+ * @warning This construct is deprecated. Please use
+ * rtdm_atomic_enter/leave, or rtdm_waitqueue services instead.
  */
 #ifdef DOXYGEN_CPP /* Beautify doxygen output */
 #define RTDM_EXECUTE_ATOMICALLY(code_block)    \
@@ -668,6 +677,7 @@ int rtdm_select_bind(int fd, rtdm_selector_t *selector,
        xnlock_put_irqrestore(&nklock, __rtdm_s);       \
 }
 #endif
+
 /** @} Global Lock across Scheduler Invocation */
 
 /*!
@@ -843,8 +853,173 @@ void rtdm_lock_put_irqrestore(rtdm_lock_t *lock, 
rtdm_lockctx_t s)
  */
 #define rtdm_lock_irqrestore(context)  \
        splexit(context)
+
+/**
+ * @brief Enter atomic section
+ *
+ * This call opens a fully atomic section, serializing execution with
+ * respect to all interrupt handlers (including for real-time IRQs)
+ * and Xenomai threads running on all CPUs.
+ *
+ * @param context name of local variable to store the context in. This
+ * variable updated by the real-time core will hold the information
+ * required to leave the atomic section properly.
+ *
+ * @note Atomic sections may be nested.
+ *
+ * @note Since the strongest lock is acquired by this service, it can
+ * be used to synchronize real-time and non-real-time contexts.
+ */
+#define rtdm_atomic_enter(context)                     \
+       do {                                            \
+               xnlock_get_irqsave(&nklock, (context)); \
+               __xnsched_lock();                       \
+       } while (0)
+
+/**
+ * @brief Leave atomic section
+ *
+ * This call closes an atomic section previously opened by a call to
+ * rtdm_atomic_enter(), restoring the preemption and interrupt state
+ * which prevailed prior to entering the exited section.
+ *
+ * @param context name of local variable which stored the context.
+ */
+#define rtdm_atomic_leave(context)                             \
+       do {                                                    \
+               __xnsched_unlock();                             \
+               xnlock_put_irqrestore(&nklock, (context));      \
+       } while (0)
+
 /** @} Spinlock with Preemption Deactivation */
 
+/*!
+ * @name Signal, test and wait for a condition atomically
+ * @{
+ */
+struct rtdm_waitqueue {
+       struct xnsynch wait;
+};
+typedef struct rtdm_waitqueue rtdm_waitqueue_t;
+
+#define RTDM_WAITQUEUE_INITIALIZER(__name) {            \
+           .wait = XNSYNCH_WAITQUEUE_INITIALIZER((__name).wait), \
+       }
+
+#define DEFINE_RTDM_WAITQUEUE(__name)                          \
+       struct rtdm_waitqueue __name = RTDM_WAITQUEUE_INITIALIZER(__name)
+
+#define DEFINE_RTDM_WAITQUEUE_ONSTACK(__name)  \
+       DEFINE_RTDM_WAITQUEUE(__name)
+
+static inline void rtdm_waitqueue_init(struct rtdm_waitqueue *wq)
+{
+       *wq = (struct rtdm_waitqueue)RTDM_WAITQUEUE_INITIALIZER(*wq);
+}
+
+static inline void rtdm_waitqueue_destroy(struct rtdm_waitqueue *wq)
+{
+       xnsynch_destroy(&wq->wait);
+}
+
+static inline int __rtdm_timedwait(struct rtdm_waitqueue *wq,
+                                  nanosecs_rel_t timeout, rtdm_toseq_t *toseq)
+{
+       if (toseq && timeout > 0)
+               return xnsynch_sleep_on(&wq->wait, *toseq, XN_ABSOLUTE);
+
+       return xnsynch_sleep_on(&wq->wait, timeout, XN_RELATIVE);
+}
+
+#define rtdm_timedwait_condition_locked(__wq, __cond, __timeout, __toseq) \
+       ({                                                              \
+               int __ret = 0;                                          \
+               while (__ret == 0 && !(__cond))                         \
+                       __ret = __rtdm_timedwait(__wq, __timeout, __toseq); \
+               __ret;                                                  \
+       })
+
+#define rtdm_wait_condition_locked(__wq, __cond)                       \
+       ({                                                              \
+               int __ret = 0;                                          \
+               while (__ret == 0 && !(__cond))                         \
+                       __ret = xnsynch_sleep_on(&(__wq)->wait,         \
+                                                XN_INFINITE, XN_RELATIVE); \
+               __ret;                                                  \
+       })
+
+#define rtdm_timedwait_condition(__wq, __cond, __timeout, __toseq)     \
+       ({                                                              \
+               spl_t __s;                                              \
+               int __ret;                                              \
+               xnlock_get_irqsave(&nklock, __s);                       \
+               __ret = rtdm_timedwait_condition_locked(__wq, __cond,   \
+                                             __timeout, __toseq);      \
+               xnlock_put_irqrestore(&nklock, __s);                    \
+               __ret;                                                  \
+       })
+
+#define rtdm_timedwait(__wq, __timeout, __toseq)                       \
+       __rtdm_timedwait(__wq, __timeout, __toseq)
+
+#define rtdm_timedwait_locked(__wq, __timeout, __toseq)                        
\
+       rtdm_timedwait(__wq, __timeout, __toseq)
+
+#define rtdm_wait_condition(__wq, __cond)                              \
+       ({                                                              \
+               spl_t __s;                                              \
+               int __ret;                                              \
+               xnlock_get_irqsave(&nklock, __s);                       \
+               __ret = rtdm_wait_condition_locked(__wq, __cond);       \
+               xnlock_put_irqrestore(&nklock, __s);                    \
+               __ret;                                                  \
+       })
+
+#define rtdm_wait(__wq)                                                        
\
+       xnsynch_sleep_on(&(__wq)->wait, XN_INFINITE, XN_RELATIVE)
+
+#define rtdm_wait_locked(__wq)  rtdm_wait(__wq)
+
+#define rtdm_waitqueue_lock(__wq, __context)  rtdm_atomic_enter(__context)
+
+#define rtdm_waitqueue_unlock(__wq, __context)  rtdm_atomic_leave(__context)
+
+#define rtdm_waitqueue_signal(__wq)                                    \
+       ({                                                              \
+               struct xnthread *__waiter;                              \
+               __waiter = xnsynch_wakeup_one_sleeper(&(__wq)->wait);   \
+               xnsched_run();                                          \
+               __waiter != NULL;                                       \
+       })
+
+#define __rtdm_waitqueue_flush(__wq, __reason)                         \
+       ({                                                              \
+               int __ret;                                              \
+               __ret = xnsynch_flush(&(__wq)->wait, __reason);         \
+               xnsched_run();                                          \
+               __ret == XNSYNCH_RESCHED;                               \
+       })
+
+#define rtdm_waitqueue_broadcast(__wq) \
+       __rtdm_waitqueue_flush(__wq, 0)
+
+#define rtdm_waitqueue_flush(__wq)     \
+       __rtdm_waitqueue_flush(__wq, XNBREAK)
+
+#define rtdm_waitqueue_wakeup(__wq, __waiter)                          \
+       do {                                                            \
+               xnsynch_wakeup_this_sleeper(&(__wq)->wait, __waiter);   \
+               xnsched_run();                                          \
+       } while (0)
+
+#define rtdm_for_each_waiter(__pos, __wq)              \
+       xnsynch_for_each_sleeper(__pos, &(__wq)->wait)
+
+#define rtdm_for_each_waiter_safe(__pos, __tmp, __wq)  \
+       xnsynch_for_each_sleeper_safe(__pos, __tmp, &(__wq)->wait)
+
+/** @} Signal, test and wait for a condition atomically */
+
 /** @} rtdmsync */
 
 /* --- Interrupt management services --- */
@@ -1156,12 +1331,6 @@ static inline int __deprecated 
rtdm_task_sleep_until(nanosecs_abs_t wakeup_time)
 }
 #endif /* !DOXYGEN_CPP */
 
-/* --- timeout sequences */
-
-typedef nanosecs_abs_t rtdm_toseq_t;
-
-void rtdm_toseq_init(rtdm_toseq_t *timeout_seq, nanosecs_rel_t timeout);
-
 /* --- event services --- */
 
 typedef struct {


_______________________________________________
Xenomai-git mailing list
Xenomai-git@xenomai.org
http://www.xenomai.org/mailman/listinfo/xenomai-git

Reply via email to