This is a cleanup patch to follow-up the patch which introduced TSAN.
This patch makes separate start_switch_fiber_ functions for TSAN and ASAN.
This does two things:
1. Unrelated ASAN and TSAN code is separate and each function only
has arguments that are actually needed.
2. The co->tsan_caller_fiber and co->tsan_co_fiber fields are only
access from within #ifdef CONFIG_TSAN.
Signed-off-by: Robert Foley
---
util/coroutine-ucontext.c | 52 +--
1 file changed, 34 insertions(+), 18 deletions(-)
diff --git a/util/coroutine-ucontext.c b/util/coroutine-ucontext.c
index 613f4c118e..bac164d1f1 100644
--- a/util/coroutine-ucontext.c
+++ b/util/coroutine-ucontext.c
@@ -47,8 +47,10 @@ typedef struct {
size_t stack_size;
sigjmp_buf env;
+#ifdef CONFIG_TSAN
void *tsan_co_fiber;
void *tsan_caller_fiber;
+#endif
#ifdef CONFIG_VALGRIND_H
unsigned int valgrind_stack_id;
@@ -72,7 +74,10 @@ union cc_arg {
int i[2];
};
-/* QEMU_ALWAYS_INLINE only does so if __OPTIMIZE__, so we cannot use it. */
+/*
+ * QEMU_ALWAYS_INLINE only does so if __OPTIMIZE__, so we cannot use it.
+ * always_inline is required to avoid TSan runtime fatal errors.
+ */
static inline __attribute__((always_inline))
void on_new_fiber(CoroutineUContext *co)
{
@@ -82,6 +87,7 @@ void on_new_fiber(CoroutineUContext *co)
#endif
}
+/* always_inline is required to avoid TSan runtime fatal errors. */
static inline __attribute__((always_inline))
void finish_switch_fiber(void *fake_stack_save)
{
@@ -104,18 +110,29 @@ void finish_switch_fiber(void *fake_stack_save)
#endif
}
-static inline __attribute__((always_inline)) void start_switch_fiber(
-CoroutineAction action, void **fake_stack_save,
-const void *bottom, size_t size, void *new_fiber)
+/* always_inline is required to avoid TSan runtime fatal errors. */
+static inline __attribute__((always_inline))
+void start_switch_fiber_asan(CoroutineAction action, void **fake_stack_save,
+ const void *bottom, size_t size)
{
#ifdef CONFIG_ASAN
__sanitizer_start_switch_fiber(
action == COROUTINE_TERMINATE ? NULL : fake_stack_save,
bottom, size);
#endif
+}
+
+/* always_inline is required to avoid TSan runtime fatal errors. */
+static inline __attribute__((always_inline))
+void start_switch_fiber_tsan(void **fake_stack_save,
+ CoroutineUContext *co,
+ bool caller)
+{
#ifdef CONFIG_TSAN
-void *curr_fiber =
-__tsan_get_current_fiber();
+void *new_fiber = caller ?
+ co->tsan_caller_fiber :
+ co->tsan_co_fiber;
+void *curr_fiber = __tsan_get_current_fiber();
__tsan_acquire(curr_fiber);
*fake_stack_save = curr_fiber;
@@ -139,12 +156,9 @@ static void coroutine_trampoline(int i0, int i1)
/* Initialize longjmp environment and switch back the caller */
if (!sigsetjmp(self->env, 0)) {
-start_switch_fiber(
-COROUTINE_YIELD,
-_stack_save,
-leader.stack,
-leader.stack_size,
-self->tsan_caller_fiber);
+start_switch_fiber_asan(COROUTINE_YIELD, _stack_save,
leader.stack,
+leader.stack_size);
+start_switch_fiber_tsan(_stack_save, self, true); /* true=caller
*/
siglongjmp(*(sigjmp_buf *)co->entry_arg, 1);
}
@@ -199,10 +213,10 @@ Coroutine *qemu_coroutine_new(void)
/* swapcontext() in, siglongjmp() back out */
if (!sigsetjmp(old_env, 0)) {
-start_switch_fiber(
-COROUTINE_YIELD,
-_stack_save,
-co->stack, co->stack_size, co->tsan_co_fiber);
+start_switch_fiber_asan(COROUTINE_YIELD, _stack_save, co->stack,
+co->stack_size);
+start_switch_fiber_tsan(_stack_save,
+co, false); /* false=not caller */
swapcontext(_uc, );
}
@@ -259,8 +273,10 @@ qemu_coroutine_switch(Coroutine *from_, Coroutine *to_,
ret = sigsetjmp(from->env, 0);
if (ret == 0) {
-start_switch_fiber(action, _stack_save,
- to->stack, to->stack_size, to->tsan_co_fiber);
+start_switch_fiber_asan(action, _stack_save, to->stack,
+to->stack_size);
+start_switch_fiber_tsan(_stack_save,
+to, false); /* false=not caller */
siglongjmp(to->env, action);
}
--
2.17.1