Hi Willy,

On Wed, Jun 06, 2018 at 02:09:01PM +0200, Willy Tarreau wrote:
> On Wed, Jun 06, 2018 at 02:04:35PM +0200, Olivier Houchard wrote:
> > When building without threads enabled, instead of just using the global
> > runqueue, just use the local runqueue associated with the only thread, as
> > that's what is now expected for a single thread in prcoess_runnable_tasks().
> 
> Just out of curiosity, shouldn't we #ifdef out the global runqueue
> definition when running without threads in order to catch such cases
> in the future ?
> 

I think this is actually a good idea.
My only concern is it adds quite a bit of #ifdef USE_THREAD, see the attached
patch.

Regards,

Olivier
>From baeb750ed13307010bfef39de92ec9bb8af54022 Mon Sep 17 00:00:00 2001
From: Olivier Houchard <ohouch...@haproxy.com>
Date: Wed, 6 Jun 2018 14:22:03 +0200
Subject: [PATCH] MINOR: tasks: Don't define rqueue if we're building without
 threads.

To make sure we don't inadvertently insert task in the global runqueue,
while only the local runqueue is used without threads, make its definition
and usage conditional on USE_THREAD.
---
 include/proto/task.h |  2 ++
 src/task.c           | 28 +++++++++++++++++++++++++---
 2 files changed, 27 insertions(+), 3 deletions(-)

diff --git a/include/proto/task.h b/include/proto/task.h
index dc8a54481..266246098 100644
--- a/include/proto/task.h
+++ b/include/proto/task.h
@@ -93,7 +93,9 @@ extern struct pool_head *pool_head_tasklet;
 extern struct pool_head *pool_head_notification;
 extern THREAD_LOCAL struct task *curr_task; /* task currently running or NULL 
*/
 extern THREAD_LOCAL struct eb32sc_node *rq_next; /* Next task to be 
potentially run */
+#ifdef USE_THREAD
 extern struct eb_root rqueue;      /* tree constituting the run queue */
+#endif
 extern struct eb_root rqueue_local[MAX_THREADS]; /* tree constituting the 
per-thread run queue */
 extern struct list task_list[MAX_THREADS]; /* List of tasks to be run, mixing 
tasks and tasklets */
 extern int task_list_size[MAX_THREADS]; /* Number of task sin the task_list */
diff --git a/src/task.c b/src/task.c
index 16c723230..c961725a1 100644
--- a/src/task.c
+++ b/src/task.c
@@ -49,9 +49,11 @@ __decl_hathreads(HA_SPINLOCK_T __attribute__((aligned(64))) 
rq_lock); /* spin lo
 __decl_hathreads(HA_SPINLOCK_T __attribute__((aligned(64))) wq_lock); /* spin 
lock related to wait queue */
 
 static struct eb_root timers;      /* sorted timers tree */
+#ifdef USE_THREAD
 struct eb_root rqueue;      /* tree constituting the run queue */
-struct eb_root rqueue_local[MAX_THREADS]; /* tree constituting the per-thread 
run queue */
 static int global_rqueue_size; /* Number of element sin the global runqueue */
+#endif
+struct eb_root rqueue_local[MAX_THREADS]; /* tree constituting the per-thread 
run queue */
 static int rqueue_size[MAX_THREADS]; /* Number of elements in the per-thread 
run queue */
 static unsigned int rqueue_ticks;  /* insertion count */
 
@@ -68,10 +70,13 @@ void __task_wakeup(struct task *t, struct eb_root *root)
        void *expected = NULL;
        int *rq_size;
 
+#ifdef USE_THREAD
        if (root == &rqueue) {
                rq_size = &global_rqueue_size;
                HA_SPIN_LOCK(TASK_RQ_LOCK, &rq_lock);
-       } else {
+       } else
+#endif
+       {
                int nb = root - &rqueue_local[0];
                rq_size = &rqueue_size[nb];
        }
@@ -80,8 +85,10 @@ void __task_wakeup(struct task *t, struct eb_root *root)
         */
 redo:
        if (unlikely(!HA_ATOMIC_CAS(&t->rq.node.leaf_p, &expected, (void 
*)0x1))) {
+#ifdef USE_THREAD
                if (root == &rqueue)
                        HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
+#endif
                return;
        }
        /* There's a small race condition, when running a task, the thread
@@ -104,8 +111,10 @@ redo:
                state = (volatile unsigned short)(t->state);
                if (unlikely(state != 0 && !(state & TASK_RUNNING)))
                        goto redo;
+#ifdef USE_THREAD
                if (root == &rqueue)
                        HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
+#endif
                return;
        }
        HA_ATOMIC_ADD(&tasks_run_queue, 1);
@@ -124,10 +133,13 @@ redo:
        }
 
        eb32sc_insert(root, &t->rq, t->thread_mask);
+#ifdef USE_THREAD
        if (root == &rqueue) {
                global_rqueue_size++;
                HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
-       } else {
+       } else
+#endif
+       {
                int nb = root - &rqueue_local[0];
 
                rqueue_size[nb]++;
@@ -239,7 +251,9 @@ void process_runnable_tasks()
 {
        struct task *t;
        int max_processed;
+#ifdef USE_THREAD
        uint64_t average = 0;
+#endif
 
        tasks_run_queue_cur = tasks_run_queue; /* keep a copy for reporting */
        nb_tasks_cur = nb_tasks;
@@ -253,6 +267,7 @@ void process_runnable_tasks()
                        return;
                }
 
+#ifdef USE_THREAD
                average = tasks_run_queue / global.nbthread;
 
                /* Get some elements from the global run queue and put it in the
@@ -284,6 +299,7 @@ void process_runnable_tasks()
                        __task_unlink_rq(t);
                        __task_wakeup(t, &rqueue_local[tid]);
                }
+#endif
 
                HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
        } else {
@@ -361,8 +377,12 @@ void process_runnable_tasks()
                if (t != NULL) {
                        state = HA_ATOMIC_AND(&t->state, ~TASK_RUNNING);
                        if (state)
+#ifdef USE_THREAD
                                __task_wakeup(t, (t->thread_mask == tid_bit) ?
                                    &rqueue_local[tid] : &rqueue);
+#else
+                               __task_wakeup(t, &rqueue_local[tid]);
+#endif
                        else
                                task_queue(t);
                }
@@ -382,7 +402,9 @@ int init_task()
        int i;
 
        memset(&timers, 0, sizeof(timers));
+#ifdef USE_THREAD
        memset(&rqueue, 0, sizeof(rqueue));
+#endif
        HA_SPIN_INIT(&wq_lock);
        HA_SPIN_INIT(&rq_lock);
        for (i = 0; i < MAX_THREADS; i++) {
-- 
2.14.3

Reply via email to