To make signal delivery not require the kernel lock I need a basic TAILQ
implementation that is SMR safe. This diff implements this TAILQ.
Without the write lock only SMR_TAILQ_FOREACH() (including SMR_TAIL_FIRST and
SMR_TAILQ_NEXT) can be used. No other traversals are supported.
For the locked version the one function I need is
SMR_TAILQ_INSERT_TAIL_LOCKED() the other functions are more or less
adapted copies of the SMR_LIST version (with the usual TAILQ extra
checks).
Please review. I tried to ensure that smr_tqh_first and smr_tqe_next are
updates last after the membar to ensure that the list can always be
traversed.
--
:wq Claudio
Index: sys/smr.h
===================================================================
RCS file: /cvs/src/sys/sys/smr.h,v
retrieving revision 1.4
diff -u -p -r1.4 smr.h
--- sys/smr.h 3 Apr 2020 03:36:57 -0000 1.4
+++ sys/smr.h 6 Apr 2020 10:25:04 -0000
@@ -318,6 +318,115 @@ struct {
\
* any concurrent readers to proceed iteration. */ \
} while (0)
+/*
+ * Tail queue definitions.
+ */
+#define SMR_TAILQ_HEAD(name, type)
\
+struct name { \
+ struct type *smr_tqh_first; /* first element, SMR-protected */\
+ struct type **smr_tqh_last; /* last element, SMR-protected */\
+}
+
+#define SMR_TAILQ_HEAD_INITIALIZER(head)
\
+ { .smr_tqh_first = NULL, .smr_tqh_last = &(head).smr_tqh_first }
+
+#define SMR_TAILQ_ENTRY(type)
\
+struct { \
+ struct type *smr_tqe_next; /* next element, SMR-protected */\
+ struct type **smr_tqe_prev; /* address of previous next element */\
+}
+
+/*
+ * Tail queue access methods.
+ */
+#define SMR_TAILQ_END(head) NULL
+
+#define SMR_TAILQ_FIRST(head) \
+ SMR_PTR_GET(&(head)->smr_tqh_first)
+#define SMR_TAILQ_NEXT(elm, field) \
+ SMR_PTR_GET(&(elm)->field.smr_tqe_next)
+
+#define SMR_TAILQ_FIRST_LOCKED(head) ((head)->smr_tqh_first)
+#define SMR_TAILQ_NEXT_LOCKED(elm, field)
((elm)->field.smr_tqe_next)
+#define SMR_TAILQ_LAST_LOCKED(head, headname) \
+ (*(((struct headname *)((head)->smr_tqh_last))->smr_tqh_last))
+#define SMR_TAILQ_EMPTY_LOCKED(head) \
+ (SMR_TAILQ_FIRST_LOCKED(head) == SMR_TAILQ_END(head))
+
+#define SMR_TAILQ_FOREACH(var, head, field)
\
+ for((var) = SMR_TAILQ_FIRST(head); \
+ (var)!= SMR_TAILQ_END(head); \
+ (var) = SMR_TAILQ_NEXT(var, field))
+
+#define SMR_TAILQ_FOREACH_LOCKED(var, head, field)
\
+ for((var) = SMR_TAILQ_FIRST_LOCKED(head); \
+ (var)!= SMR_TAILQ_END(head); \
+ (var) = SMR_TAILQ_NEXT_LOCKED(var, field))
+
+#define SMR_TAILQ_FOREACH_SAFE_LOCKED(var, head, field, tvar)
\
+ for ((var) = SMR_TAILQ_FIRST_LOCKED(head); \
+ (var) && ((tvar) = SMR_TAILQ_NEXT_LOCKED(var, field), 1); \
+ (var) = (tvar))
+
+/*
+ * Tail queue functions.
+ */
+#define SMR_TAILQ_INIT(head) do {
\
+ (head)->smr_tqh_first = TAILQ_END(head); \
+ (head)->smr_tqh_last = &(head)->smr_tqh_first; \
+} while (0)
+
+#define SMR_TAILQ_INSERT_AFTER_LOCKED(head, listelm, elm, field) do {
\
+ (elm)->field.smr_tqe_next = (listelm)->field.smr_tqe_next; \
+ if ((listelm)->field.smr_tqe_next != NULL) \
+ (listelm)->field.smr_tqe_next->field.smr_tqe_prev = \
+ &(elm)->field.smr_tqe_next; \
+ else \
+ (head)->smr_tqh_last = &(elm)->field.smr_tqe_next; \
+ (elm)->field.smr_tqe_prev = &(listelm)->field.smr_tqe_next; \
+ membar_producer(); \
+ (listelm)->field.smr_tqe_next = (elm); \
+} while (0)
+
+#define SMR_TAILQ_INSERT_BEFORE_LOCKED(listelm, elm, field) do {
\
+ (elm)->field.smr_tqe_prev = (listelm)->field.smr_tqe_prev; \
+ (elm)->field.smr_tqe_next = (listelm); \
+ membar_producer(); \
+ *(listelm)->field.smr_tqe_prev = (elm); \
+ (listelm)->field.smr_tqe_prev = &(elm)->field.smr_tqe_next; \
+} while (0)
+
+#define SMR_TAILQ_INSERT_HEAD_LOCKED(head, elm, field) do {
\
+ (elm)->field.smr_tqe_next = (head)->smr_tqh_first; \
+ (elm)->field.smr_tqe_prev = &(head)->smr_tqh_first; \
+ if ((head)->smr_tqh_first != NULL) \
+ (head)->smr_tqh_first->field.smr_tqe_prev = \
+ &(elm)->field.smr_tqe_next; \
+ else \
+ (head)->smr_tqh_last = &(elm)->field.smr_tqe_next; \
+ membar_producer(); \
+ (head)->smr_tqh_first = (elm); \
+} while (0)
+
+#define SMR_TAILQ_INSERT_TAIL_LOCKED(head, elm, field) do {
\
+ (elm)->field.smr_tqe_next = NULL; \
+ (elm)->field.smr_tqe_prev = (head)->smr_tqh_last; \
+ membar_producer(); \
+ *(head)->smr_tqh_last = (elm); \
+ (head)->smr_tqh_last = &(elm)->field.smr_tqe_next; \
+} while (0)
+
+#define SMR_TAILQ_REMOVE_LOCKED(head, elm, field) do {
\
+ if ((elm)->field.smr_tqe_next != NULL) \
+ (elm)->field.smr_tqe_next->field.smr_tqe_prev = \
+ (elm)->field.smr_tqe_prev; \
+ else \
+ (head)->smr_tqh_last = (elm)->field.smr_tqe_prev; \
+ *(elm)->field.smr_tqe_prev = (elm)->field.smr_tqe_next; \
+ /* (elm)->field.smr_tqe_next must be left intact to allow \
+ * any concurrent readers to proceed iteration. */ \
+} while (0)
+
#endif /* _KERNEL */
#endif /* !_SYS_SMR_ */