Module: xenomai-forge
Branch: next
Commit: e29ee96807b58ea4c22f5112bb03cd06fe901494
URL:    
http://git.xenomai.org/?p=xenomai-forge.git;a=commit;h=e29ee96807b58ea4c22f5112bb03cd06fe901494

Author: Philippe Gerum <r...@xenomai.org>
Date:   Thu Apr 10 11:40:15 2014 +0200

cobalt/rtdm: introduce wait queue API

---

 include/cobalt/kernel/rtdm/driver.h |  517 ++++++++++++++++++++++++++++++++++-
 1 file changed, 510 insertions(+), 7 deletions(-)

diff --git a/include/cobalt/kernel/rtdm/driver.h 
b/include/cobalt/kernel/rtdm/driver.h
index 9c432c4..5234b22 100644
--- a/include/cobalt/kernel/rtdm/driver.h
+++ b/include/cobalt/kernel/rtdm/driver.h
@@ -605,6 +605,12 @@ static inline nanosecs_abs_t 
rtdm_clock_read_monotonic(void)
 }
 #endif /* !DOXYGEN_CPP */
 
+/* --- timeout sequences */
+
+typedef nanosecs_abs_t rtdm_toseq_t;
+
+void rtdm_toseq_init(rtdm_toseq_t *timeout_seq, nanosecs_rel_t timeout);
+
 /*!
  * @addtogroup rtdmsync
  * @{
@@ -620,7 +626,7 @@ int rtdm_select_bind(int fd, rtdm_selector_t *selector,
  */
 
 /**
- * @brief Execute code block atomically
+ * @brief Execute code block atomically (DEPRECATED)
  *
  * Generally, it is illegal to suspend the current task by calling
  * rtdm_task_sleep(), rtdm_event_wait(), etc. while holding a spinlock. In
@@ -650,6 +656,9 @@ int rtdm_select_bind(int fd, rtdm_selector_t *selector,
  * - User-space task (RT, non-RT)
  *
  * Rescheduling: possible, depends on functions called within @a code_block.
+ *
+ * @warning This construct is deprecated and will be phased out in
+ * Xenomai 3.0. Please use rtdm_waitqueue services instead.
  */
 #ifdef DOXYGEN_CPP /* Beautify doxygen output */
 #define RTDM_EXECUTE_ATOMICALLY(code_block)    \
@@ -659,10 +668,14 @@ int rtdm_select_bind(int fd, rtdm_selector_t *selector,
        <LEAVE_ATOMIC_SECTION>                  \
 }
 #else /* This is how it really works */
+static inline __attribute__((deprecated)) void
+rtdm_execute_atomically(void) { }
+
 #define RTDM_EXECUTE_ATOMICALLY(code_block)            \
 {                                                      \
        spl_t __rtdm_s;                                 \
                                                        \
+       rtdm_execute_atomically();                      \
        xnlock_get_irqsave(&nklock, __rtdm_s);          \
        __xnsched_lock();                               \
        code_block;                                     \
@@ -670,6 +683,7 @@ int rtdm_select_bind(int fd, rtdm_selector_t *selector,
        xnlock_put_irqrestore(&nklock, __rtdm_s);       \
 }
 #endif
+
 /** @} Global Lock across Scheduler Invocation */
 
 /*!
@@ -845,8 +859,503 @@ void rtdm_lock_put_irqrestore(rtdm_lock_t *lock, 
rtdm_lockctx_t s)
  */
 #define rtdm_lock_irqrestore(context)  \
        splexit(context)
+
+/**
+ * @brief Enter atomic section (dual kernel only)
+ *
+ * This call opens a fully atomic section, serializing execution with
+ * respect to all interrupt handlers (including for real-time IRQs)
+ * and Xenomai threads running on all CPUs.
+ *
+ * @param context name of local variable to store the context in. This
+ * variable updated by the real-time core will hold the information
+ * required to leave the atomic section properly.
+ *
+ * @note Atomic sections may be nested.
+ *
+ * @note Since the strongest lock is acquired by this service, it can
+ * be used to synchronize real-time and non-real-time contexts.
+ *
+ * @warning This service is not portable to the Mercury core, and
+ * should be restricted to Cobalt-specific use cases.
+ */
+#define cobalt_atomic_enter(context)                   \
+       do {                                            \
+               xnlock_get_irqsave(&nklock, (context)); \
+               __xnsched_lock();                       \
+       } while (0)
+
+/**
+ * @brief Leave atomic section (dual kernel only)
+ *
+ * This call closes an atomic section previously opened by a call to
+ * cobalt_atomic_enter(), restoring the preemption and interrupt state
+ * which prevailed prior to entering the exited section.
+ *
+ * @param context name of local variable which stored the context.
+ *
+ * @warning This service is not portable to the Mercury core, and
+ * should be restricted to Cobalt-specific use cases.
+ */
+#define cobalt_atomic_leave(context)                           \
+       do {                                                    \
+               __xnsched_unlock();                             \
+               xnlock_put_irqrestore(&nklock, (context));      \
+       } while (0)
+
 /** @} Spinlock with Preemption Deactivation */
 
+/*!
+ * @name Signal, test and wait for a condition atomically
+ * @{
+ */
+struct rtdm_waitqueue {
+       struct xnsynch wait;
+};
+typedef struct rtdm_waitqueue rtdm_waitqueue_t;
+
+#define RTDM_WAITQUEUE_INITIALIZER(__name) {            \
+           .wait = XNSYNCH_WAITQUEUE_INITIALIZER((__name).wait), \
+       }
+
+#define DEFINE_RTDM_WAITQUEUE(__name)                          \
+       struct rtdm_waitqueue __name = RTDM_WAITQUEUE_INITIALIZER(__name)
+
+#define DEFINE_RTDM_WAITQUEUE_ONSTACK(__name)  \
+       DEFINE_RTDM_WAITQUEUE(__name)
+
+/**
+ * @brief  Initialize a RTDM wait queue
+ *
+ * Sets up a wait queue structure for further use.
+ *
+ * @param wq waitqueue to initialize.
+ */
+static inline void rtdm_waitqueue_init(struct rtdm_waitqueue *wq)
+{
+       *wq = (struct rtdm_waitqueue)RTDM_WAITQUEUE_INITIALIZER(*wq);
+}
+
+/**
+ * @brief  Deletes a RTDM wait queue
+ *
+ * Dismantles a wait queue structure, releasing all resources attached
+ * to it.
+ *
+ * @param wq waitqueue to delete.
+ */
+static inline void rtdm_waitqueue_destroy(struct rtdm_waitqueue *wq)
+{
+       xnsynch_destroy(&wq->wait);
+}
+
+static inline int __rtdm_timedwait(struct rtdm_waitqueue *wq,
+                                  nanosecs_rel_t timeout, rtdm_toseq_t *toseq)
+{
+       if (toseq && timeout > 0)
+               return xnsynch_sleep_on(&wq->wait, *toseq, XN_ABSOLUTE);
+
+       return xnsynch_sleep_on(&wq->wait, timeout, XN_RELATIVE);
+}
+
+/**
+ * @brief  Timed sleep on a locked waitqueue until a condition gets true
+ *
+ * The calling task is put to sleep until @a __cond evaluates to true
+ * or a timeout occurs. The condition is checked each time the
+ * waitqueue @a __wq is signaled.
+ *
+ * The waitqueue must have been locked by a call to
+ * rtdm_waitqueue_lock() prior to calling this service.
+ *
+ * @param __wq locked waitqueue to wait on. The waitqueue lock is
+ * dropped when sleeping, then reacquired before this service returns
+ * to the caller.
+ *
+ * @param __cond C expression for the event to wait for.
+ *
+ * @param __timeout relative timeout in nanoseconds, see
+ * @ref RTDM_TIMEOUT_xxx for special values.
+ * 
+ * @param[in,out] __toseq handle of a timeout sequence as returned by
+ * rtdm_toseq_init() or NULL.
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EINTR is returned if calling task has received a Linux signal or
+ * has been forcibly unblocked by a call to rtdm_task_unblock().
+ *
+ * - -ETIMEDOUT is returned if the if the request has not been satisfied
+ * within the specified amount of time.
+ *
+ * @note rtdm_waitqueue_signal() has to be called after changing any
+ * variable that could change the result of the wait condition.
+ *
+ * @note Passing RTDM_TIMEOUT_NONE to @a __timeout makes no sense for
+ * such service, and might cause unexpected behavior.
+ */
+#define rtdm_timedwait_condition_locked(__wq, __cond, __timeout, __toseq) \
+       ({                                                              \
+               int __ret = 0;                                          \
+               while (__ret == 0 && !(__cond))                         \
+                       __ret = __rtdm_timedwait(__wq, __timeout, __toseq); \
+               __ret;                                                  \
+       })
+
+/**
+ * @brief  Sleep on a locked waitqueue until a condition gets true
+ *
+ * The calling task is put to sleep until @a __cond evaluates to
+ * true. The condition is checked each time the waitqueue @a __wq is
+ * signaled.
+ *
+ * The waitqueue must have been locked by a call to
+ * rtdm_waitqueue_lock() prior to calling this service.
+ *
+ * @param __wq locked waitqueue to wait on. The waitqueue lock is
+ * dropped when sleeping, then reacquired before this service returns
+ * to the caller.
+ *
+ * @param __cond C expression for the event to wait for.
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EINTR is returned if calling task has received a Linux signal or
+ * has been forcibly unblocked by a call to rtdm_task_unblock().
+ *
+ * @note rtdm_waitqueue_signal() has to be called after changing any
+ * variable that could change the result of the wait condition.
+ */
+#define rtdm_wait_condition_locked(__wq, __cond)                       \
+       ({                                                              \
+               int __ret = 0;                                          \
+               while (__ret == 0 && !(__cond))                         \
+                       __ret = xnsynch_sleep_on(&(__wq)->wait,         \
+                                                XN_INFINITE, XN_RELATIVE); \
+               __ret;                                                  \
+       })
+
+/**
+ * @brief  Timed sleep on a waitqueue until a condition gets true
+ *
+ * The calling task is put to sleep until @a __cond evaluates to true
+ * or a timeout occurs. The condition is checked each time the
+ * waitqueue @a __wq is signaled.
+ *
+ * @param __wq waitqueue to wait on.
+ *
+ * @param __cond C expression for the event to wait for.
+ *
+ * @param __timeout relative timeout in nanoseconds, see
+ * @ref RTDM_TIMEOUT_xxx for special values.
+ * 
+ * @param[in,out] __toseq handle of a timeout sequence as returned by
+ * rtdm_toseq_init() or NULL.
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EINTR is returned if calling task has received a Linux signal or
+ * has been forcibly unblocked by a call to rtdm_task_unblock().
+ *
+ * - -ETIMEDOUT is returned if the if the request has not been satisfied
+ * within the specified amount of time.
+ *
+ * @note rtdm_waitqueue_signal() has to be called after changing any
+ * variable that could change the result of the wait condition.
+ *
+ * @note Passing RTDM_TIMEOUT_NONE to @a __timeout makes no sense for
+ * such service, and might cause unexpected behavior.
+ */
+#define rtdm_timedwait_condition(__wq, __cond, __timeout, __toseq)     \
+       ({                                                              \
+               spl_t __s;                                              \
+               int __ret;                                              \
+               xnlock_get_irqsave(&nklock, __s);                       \
+               __ret = rtdm_timedwait_condition_locked(__wq, __cond,   \
+                                             __timeout, __toseq);      \
+               xnlock_put_irqrestore(&nklock, __s);                    \
+               __ret;                                                  \
+       })
+
+/**
+ * @brief Timed sleep on a waitqueue unconditionally
+ *
+ * The calling task is put to sleep until the waitqueue is signaled by
+ * either rtdm_waitqueue_signal() or rtdm_waitqueue_broadcast(), or
+ * flushed by a call to rtdm_waitqueue_flush(), or a timeout occurs.
+ *
+ * @param __wq waitqueue to wait on.
+ *
+ * @param __timeout relative timeout in nanoseconds, see
+ * @ref RTDM_TIMEOUT_xxx for special values.
+ * 
+ * @param[in,out] __toseq handle of a timeout sequence as returned by
+ * rtdm_toseq_init() or NULL.
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EINTR is returned if the waitqueue has been flushed, or the
+ * calling task has received a Linux signal or has been forcibly
+ * unblocked by a call to rtdm_task_unblock().
+ *
+ * - -ETIMEDOUT is returned if the if the request has not been satisfied
+ * within the specified amount of time.
+ *
+ * @note Passing RTDM_TIMEOUT_NONE to @a __timeout makes no sense for
+ * such service, and might cause unexpected behavior.
+ */
+#define rtdm_timedwait(__wq, __timeout, __toseq)                       \
+       __rtdm_timedwait(__wq, __timeout, __toseq)
+
+/**
+ * @brief Timed sleep on a locked waitqueue unconditionally
+ *
+ * The calling task is put to sleep until the waitqueue is signaled by
+ * either rtdm_waitqueue_signal() or rtdm_waitqueue_broadcast(), or
+ * flushed by a call to rtdm_waitqueue_flush(), or a timeout occurs.
+ *
+ * The waitqueue must have been locked by a call to
+ * rtdm_waitqueue_lock() prior to calling this service.
+ *
+ * @param __wq locked waitqueue to wait on. The waitqueue lock is
+ * dropped when sleeping, then reacquired before this service returns
+ * to the caller.
+ *
+ * @param __timeout relative timeout in nanoseconds, see
+ * @ref RTDM_TIMEOUT_xxx for special values.
+ * 
+ * @param[in,out] __toseq handle of a timeout sequence as returned by
+ * rtdm_toseq_init() or NULL.
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EINTR is returned if the waitqueue has been flushed, or the
+ * calling task has received a Linux signal or has been forcibly
+ * unblocked by a call to rtdm_task_unblock().
+ *
+ * - -ETIMEDOUT is returned if the if the request has not been satisfied
+ * within the specified amount of time.
+ *
+ * @note Passing RTDM_TIMEOUT_NONE to @a __timeout makes no sense for
+ * such service, and might cause unexpected behavior.
+ */
+#define rtdm_timedwait_locked(__wq, __timeout, __toseq)                        
\
+       rtdm_timedwait(__wq, __timeout, __toseq)
+
+/**
+ * @brief  Sleep on a waitqueue until a condition gets true
+ *
+ * The calling task is put to sleep until @a __cond evaluates to
+ * true. The condition is checked each time the waitqueue @a __wq is
+ * signaled.
+ *
+ * @param __wq waitqueue to wait on
+ *
+ * @param __cond C expression for the event to wait for.
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EINTR is returned if calling task has received a Linux signal or
+ * has been forcibly unblocked by a call to rtdm_task_unblock().
+ *
+ * @note rtdm_waitqueue_signal() has to be called after changing any
+ * variable that could change the result of the wait condition.
+ */
+#define rtdm_wait_condition(__wq, __cond)                              \
+       ({                                                              \
+               spl_t __s;                                              \
+               int __ret;                                              \
+               xnlock_get_irqsave(&nklock, __s);                       \
+               __ret = rtdm_wait_condition_locked(__wq, __cond);       \
+               xnlock_put_irqrestore(&nklock, __s);                    \
+               __ret;                                                  \
+       })
+
+/**
+ * @brief Sleep on a waitqueue unconditionally
+ *
+ * The calling task is put to sleep until the waitqueue is signaled by
+ * either rtdm_waitqueue_signal() or rtdm_waitqueue_broadcast(), or
+ * flushed by a call to rtdm_waitqueue_flush().
+ *
+ * @param __wq waitqueue to wait on.
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EINTR is returned if the waitqueue has been flushed, or the
+ * calling task has received a Linux signal or has been forcibly
+ * unblocked by a call to rtdm_task_unblock().
+ */
+#define rtdm_wait(__wq)                                                        
\
+       xnsynch_sleep_on(&(__wq)->wait, XN_INFINITE, XN_RELATIVE)
+
+/**
+ * @brief Sleep on a locked waitqueue unconditionally
+ *
+ * The calling task is put to sleep until the waitqueue is signaled by
+ * either rtdm_waitqueue_signal() or rtdm_waitqueue_broadcast(), or
+ * flushed by a call to rtdm_waitqueue_flush().
+ *
+ * The waitqueue must have been locked by a call to
+ * rtdm_waitqueue_lock() prior to calling this service.
+ *
+ * @param __wq locked waitqueue to wait on. The waitqueue lock is
+ * dropped when sleeping, then reacquired before this service returns
+ * to the caller.
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EINTR is returned if the waitqueue has been flushed, or the
+ * calling task has received a Linux signal or has been forcibly
+ * unblocked by a call to rtdm_task_unblock().
+ */
+#define rtdm_wait_locked(__wq)  rtdm_wait(__wq)
+
+/**
+ * @brief Lock a waitqueue
+ *
+ * Acquires the lock on the waitqueue @a __wq.
+ *
+ * @param __wq waitqueue to lock.
+ *
+ * @param context name of local variable to store the context in.
+ *
+ * @note Recursive locking might lead to unexpected behavior,
+ * including lock up.
+ */
+#define rtdm_waitqueue_lock(__wq, __context)  cobalt_atomic_enter(__context)
+
+/**
+ * @brief Unlock a waitqueue
+ *
+ * Releases the lock on the waitqueue @a __wq.
+ *
+ * @param __wq waitqueue to unlock.
+ *
+ * @param context name of local variable to store the context in.
+ */
+#define rtdm_waitqueue_unlock(__wq, __context)  cobalt_atomic_leave(__context)
+
+/**
+ * @brief Signal a waitqueue
+ *
+ * Signals the waitqueue @a __wq, waking up a single waiter (if
+ * any).
+ *
+ * @param __wq waitqueue to signal.
+ *
+ * @return non-zero if a task has been readied as a result of this
+ * call, zero otherwise.
+ */
+#define rtdm_waitqueue_signal(__wq)                                    \
+       ({                                                              \
+               struct xnthread *__waiter;                              \
+               __waiter = xnsynch_wakeup_one_sleeper(&(__wq)->wait);   \
+               xnsched_run();                                          \
+               __waiter != NULL;                                       \
+       })
+
+#define __rtdm_waitqueue_flush(__wq, __reason)                         \
+       ({                                                              \
+               int __ret;                                              \
+               __ret = xnsynch_flush(&(__wq)->wait, __reason);         \
+               xnsched_run();                                          \
+               __ret == XNSYNCH_RESCHED;                               \
+       })
+
+/**
+ * @brief Broadcast a waitqueue
+ *
+ * Broadcast the waitqueue @a __wq, waking up all waiters. Each
+ * readied task may assume to have received the wake up event.
+ *
+ * @param __wq waitqueue to broadcast.
+ *
+ * @return non-zero if at least one task has been readied as a result
+ * of this call, zero otherwise.
+ */
+#define rtdm_waitqueue_broadcast(__wq) \
+       __rtdm_waitqueue_flush(__wq, 0)
+
+/**
+ * @brief Flush a waitqueue
+ *
+ * Flushes the waitqueue @a __wq, unblocking all waiters with an error
+ * status (-EINTR).
+ *
+ * @param __wq waitqueue to flush.
+ *
+ * @return non-zero if at least one task has been readied as a result
+ * of this call, zero otherwise.
+ */
+#define rtdm_waitqueue_flush(__wq)     \
+       __rtdm_waitqueue_flush(__wq, XNBREAK)
+
+/**
+ * @brief Signal a particular waiter on a waitqueue
+ *
+ * Signals the waitqueue @a __wq, waking up waiter @a __waiter only,
+ * which must be currently sleeping on the waitqueue.
+ *
+ * @param __wq waitqueue to signal.
+ *
+ * @param __waiter RTDM task to wake up.
+ */
+#define rtdm_waitqueue_wakeup(__wq, __waiter)                          \
+       do {                                                            \
+               xnsynch_wakeup_this_sleeper(&(__wq)->wait, __waiter);   \
+               xnsched_run();                                          \
+       } while (0)
+
+/**
+ * @brief Simple iterator for waitqueues
+ *
+ * This construct traverses the wait list of a given waitqueue
+ * @a __wq, assigning each RTDM task pointer to the cursor variable
+ * @a __pos, which must be of type rtdm_task_t.
+ *
+ * @a __wq must have been locked by a call to rtdm_waitqueue_lock()
+ * prior to traversing its wait list.
+ *
+ * @param __pos cursor variable holding a pointer to the RTDM task
+ * being fetched.
+ *
+ * @param __wq waitqueue to scan.
+ *
+ * @note The waitqueue should not be signaled, broadcast or flushed
+ * during the traversal, unless the loop is aborted immediately
+ * after. Should multiple waiters be readied while iterating, the safe
+ * form rtdm_for_each_waiter_safe() must be used for traversal
+ * instead.
+ */
+#define rtdm_for_each_waiter(__pos, __wq)              \
+       xnsynch_for_each_sleeper(__pos, &(__wq)->wait)
+
+/**
+ * @brief Safe iterator for waitqueues
+ *
+ * This construct traverses the wait list of a given waitqueue
+ * @a __wq, assigning each RTDM task pointer to the cursor variable
+ * @a __pos, which must be of type rtdm_task_t.
+ *
+ * Unlike with rtdm_for_each_waiter(), the waitqueue may be signaled,
+ * broadcast or flushed during the traversal.
+ *
+ * @a __wq must have been locked by a call to rtdm_waitqueue_lock()
+ * prior to traversing its wait list.
+ *
+ * @param __pos cursor variable holding a pointer to the RTDM task
+ * being fetched.
+ *
+ * @param __tmp temporary cursor variable.
+ *
+ * @param __wq waitqueue to scan.
+ */
+#define rtdm_for_each_waiter_safe(__pos, __tmp, __wq)  \
+       xnsynch_for_each_sleeper_safe(__pos, __tmp, &(__wq)->wait)
+
+/** @} Signal, test and wait for a condition atomically */
+
 /** @} rtdmsync */
 
 /* --- Interrupt management services --- */
@@ -1158,12 +1667,6 @@ static inline int __deprecated 
rtdm_task_sleep_until(nanosecs_abs_t wakeup_time)
 }
 #endif /* !DOXYGEN_CPP */
 
-/* --- timeout sequences */
-
-typedef nanosecs_abs_t rtdm_toseq_t;
-
-void rtdm_toseq_init(rtdm_toseq_t *timeout_seq, nanosecs_rel_t timeout);
-
 /* --- event services --- */
 
 typedef struct rtdm_event {


_______________________________________________
Xenomai-git mailing list
Xenomai-git@xenomai.org
http://www.xenomai.org/mailman/listinfo/xenomai-git

Reply via email to