A bit orthogonal, I'd also like to sync 2.4.x "event" with trunk's
w.r.t. cosmetic changes before (and to help) further backport
proposals.

That's possibly something that'll help *us* for later backports, but
not necessarily distros with (security-)fixes only policy.
Is that something we should more care about? I suppose distro
maintainers do care...

For instance, the three attached patches are how I would stage latest
"event" changes in 2.4.x:
- patch 1: align with trunk what can/needs to be (cosmetics);
- patch 2: optimizations and correctness which don't seem to have
bitten us so far (not a proven fix someow);
- patch 3: a wakeup fix (corner case) that applies almost cleanly
thanks to 1/ and 2/.

Would this work or should I go with 3/ directly and resolve backport
conflicts there?
Or maybe go with 3/ then 2/ then 1/, for the same result but at least
distros would care of the first step only (for this time...)?
Merge r1605328, r1629576 from trunk:

event: minify local variables scope.

event: have_idle_worker must not be cleared in every listener_thread iteration.
Fixes bug when workers were not stopped after graceful restart (introduced in
r1605328).

Submitted by: takashi, jkaluza

diff --git a/server/mpm/event/event.c b/server/mpm/event/event.c
index 54c9256074..1a5f70e55c 100644
--- a/server/mpm/event/event.c
+++ b/server/mpm/event/event.c
@@ -1545,22 +1545,14 @@ static void process_keepalive_queue(apr_time_t timeout_time)
 
 static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy)
 {
-    timer_event_t *te;
     apr_status_t rc;
     proc_info *ti = dummy;
     int process_slot = ti->pslot;
     struct process_score *ps = ap_get_scoreboard_process(process_slot);
     apr_pool_t *tpool = apr_thread_pool_get(thd);
-    void *csd = NULL;
-    apr_pool_t *ptrans;         /* Pool for per-transaction stuff */
-    ap_listen_rec *lr;
-    int have_idle_worker = 0;
-    const apr_pollfd_t *out_pfd;
-    apr_int32_t num = 0;
-    apr_interval_time_t timeout_interval;
-    apr_time_t timeout_time = 0, now, last_log;
-    listener_poll_type *pt;
     int closed = 0, listeners_disabled = 0;
+    int have_idle_worker = 0;
+    apr_time_t last_log;
 
     last_log = apr_time_now();
     free(ti);
@@ -1581,6 +1573,11 @@ static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy)
     apr_signal(LISTENER_SIGNAL, dummy_signal_handler);
 
     for (;;) {
+        timer_event_t *te;
+        const apr_pollfd_t *out_pfd;
+        apr_int32_t num = 0;
+        apr_interval_time_t timeout_interval;
+        apr_time_t now, timeout_time;
         int workers_were_busy = 0;
 
         if (listener_may_exit) {
@@ -1693,7 +1690,7 @@ static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy)
         }
 
         while (num) {
-            pt = (listener_poll_type *) out_pfd->client_data;
+            listener_poll_type *pt = (listener_poll_type *) out_pfd->client_data;
             if (pt->type == PT_CSD) {
                 /* one of the sockets is readable */
                 event_conn_state_t *cs = (event_conn_state_t *) pt->baton;
@@ -1785,7 +1782,9 @@ static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy)
                     enable_listensocks(process_slot);
                 }
                 if (!listeners_disabled) {
-                    lr = (ap_listen_rec *) pt->baton;
+                    void *csd = NULL;
+                    ap_listen_rec *lr = (ap_listen_rec *) pt->baton;
+                    apr_pool_t *ptrans;         /* Pool for per-transaction stuff */
                     ap_pop_pool(&ptrans, worker_queue_info);
 
                     if (ptrans == NULL) {
@@ -1968,12 +1967,8 @@ static void *APR_THREAD_FUNC worker_thread(apr_thread_t * thd, void *dummy)
     proc_info *ti = dummy;
     int process_slot = ti->pslot;
     int thread_slot = ti->tslot;
-    apr_socket_t *csd = NULL;
-    event_conn_state_t *cs;
-    apr_pool_t *ptrans;         /* Pool for per-transaction stuff */
     apr_status_t rv;
     int is_idle = 0;
-    timer_event_t *te = NULL;
 
     free(ti);
 
@@ -1984,6 +1979,11 @@ static void *APR_THREAD_FUNC worker_thread(apr_thread_t * thd, void *dummy)
                                         SERVER_STARTING, NULL);
 
     while (!workers_may_exit) {
+        apr_socket_t *csd = NULL;
+        event_conn_state_t *cs;
+        timer_event_t *te = NULL;
+        apr_pool_t *ptrans;         /* Pool for per-transaction stuff */
+
         if (!is_idle) {
             rv = ap_queue_info_set_idle(worker_queue_info, NULL);
             if (rv != APR_SUCCESS) {
@@ -2007,7 +2007,6 @@ static void *APR_THREAD_FUNC worker_thread(apr_thread_t * thd, void *dummy)
             break;
         }
 
-        te = NULL;
         rv = ap_queue_pop_something(worker_queue, &csd, &cs, &ptrans, &te);
 
         if (rv != APR_SUCCESS) {
Merge r1643279, r1703241, r1802535, r1819847, r1819848, r1819852, r1819853 from trunk:

mpm_event(opt): avoid casts/comparisons from unsigned to signed (atomics).

mpm_event/worker: make ap_queue_term() atomic (acquire/release the mutex once).

mpm_event: ap_queue_info_try_get_idler() may atomically decrement and then
re-increment the number idlers if it went under or to zero.  We can avoid
this by switching to a compare-and-swap scheme.

mpm_event: avoid unexpected compiler optimizations.

Make sure the compiler doesn't play games with our synchronization variables
by marking them volatile.

mpm_event: make sure wakeup_listener() does its minimal job.

Even if the listener thread is not created yet (i.e. about to be), we must
still tell it to leave, and terminate the worker queue in any case.

mpm_event: worker factor vs pollset.

Make sure the worker factor is at least one (w.r.t. WORKER_FACTOR_SCALE), and
use it to size the pollset appropriately (including K-A and lingering close
connections), in addition to the listening sockets.

mpm_event: remove atomics for timeout_queue's total counter.

It's always updated under the timeout_mutex lock, or read for logging and
scoreboard updates (not critical).

For the read cases a volatile access is enough, while removing the atomic ops
for the already protected write cases saves cycles and context switches.

Submitted by: ylavic

diff --git a/server/mpm/event/event.c b/server/mpm/event/event.c
index 1a5f70e55c..6fc74c0d16 100644
--- a/server/mpm/event/event.c
+++ b/server/mpm/event/event.c
@@ -173,10 +173,10 @@ static int max_workers = 0;                 /* MaxRequestWorkers */
 static int server_limit = 0;                /* ServerLimit */
 static int thread_limit = 0;                /* ThreadLimit */
 static int had_healthy_child = 0;
-static int dying = 0;
-static int workers_may_exit = 0;
-static int start_thread_may_exit = 0;
-static int listener_may_exit = 0;
+static volatile int dying = 0;
+static volatile int workers_may_exit = 0;
+static volatile int start_thread_may_exit = 0;
+static volatile int listener_may_exit = 0;
 static int listener_is_wakeable = 0;        /* Pollset supports APR_POLLSET_WAKEABLE */
 static int num_listensocks = 0;
 static apr_int32_t conns_this_child;        /* MaxConnectionsPerChild, only access
@@ -285,7 +285,7 @@ static void TO_QUEUE_APPEND(struct timeout_queue *q, event_conn_state_t *el)
     apr_time_t next_expiry;
 
     APR_RING_INSERT_TAIL(&q->head, el, event_conn_state_t, timeout_list);
-    apr_atomic_inc32(q->total);
+    ++*q->total;
     ++q->count;
 
     /* Cheaply update the overall queues' next expiry according to the
@@ -307,7 +307,7 @@ static void TO_QUEUE_REMOVE(struct timeout_queue *q, event_conn_state_t *el)
 {
     APR_RING_REMOVE(el, timeout_list);
     APR_RING_ELEM_INIT(el, timeout_list);
-    apr_atomic_dec32(q->total);
+    --*q->total;
     --q->count;
 }
 
@@ -539,6 +539,17 @@ static void close_worker_sockets(void)
 static void wakeup_listener(void)
 {
     listener_may_exit = 1;
+
+    /* Unblock the listener if it's poll()ing */
+    if (event_pollset && listener_is_wakeable) {
+        apr_pollset_wakeup(event_pollset);
+    }
+
+    /* unblock the listener if it's waiting for a worker */
+    if (worker_queue_info) {
+        ap_queue_info_term(worker_queue_info);
+    }
+
     if (!listener_os_thread) {
         /* XXX there is an obscure path that this doesn't handle perfectly:
          *     right after listener thread is created but before
@@ -547,15 +558,6 @@ static void wakeup_listener(void)
          */
         return;
     }
-
-    /* Unblock the listener if it's poll()ing */
-    if (listener_is_wakeable) {
-        apr_pollset_wakeup(event_pollset);
-    }
-
-    /* unblock the listener if it's waiting for a worker */
-    ap_queue_info_term(worker_queue_info);
-
     /*
      * we should just be able to "kill(ap_my_pid, LISTENER_SIGNAL)" on all
      * platforms and wake up the listener thread since it is the only thread
@@ -576,7 +578,7 @@ static int terminate_mode = ST_INIT;
 
 static void signal_threads(int mode)
 {
-    if (terminate_mode == mode) {
+    if (terminate_mode >= mode) {
         return;
     }
     terminate_mode = mode;
@@ -1459,7 +1461,7 @@ static void process_timeout_queue(struct timeout_queue *q,
     struct timeout_queue *qp;
     apr_status_t rv;
 
-    if (!apr_atomic_read32(q->total)) {
+    if (!*q->total) {
         return;
     }
 
@@ -1509,8 +1511,8 @@ static void process_timeout_queue(struct timeout_queue *q,
         APR_RING_UNSPLICE(first, last, timeout_list);
         APR_RING_SPLICE_TAIL(&trash, first, last, event_conn_state_t,
                              timeout_list);
-        AP_DEBUG_ASSERT(apr_atomic_read32(q->total) >= count);
-        apr_atomic_sub32(q->total, count);
+        AP_DEBUG_ASSERT(*q->total >= count && qp->count >= count);
+        *q->total -= count;
         qp->count -= count;
         total += count;
     }
@@ -1536,8 +1538,7 @@ static void process_keepalive_queue(apr_time_t timeout_time)
     if (!timeout_time) {
         ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, ap_server_conf,
                      "All workers are busy or dying, will close %u "
-                     "keep-alive connections",
-                     apr_atomic_read32(keepalive_q->total));
+                     "keep-alive connections", *keepalive_q->total);
     }
     process_timeout_queue(keepalive_q, timeout_time,
                           start_lingering_close_nonblocking);
@@ -1576,6 +1577,7 @@ static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy)
         timer_event_t *te;
         const apr_pollfd_t *out_pfd;
         apr_int32_t num = 0;
+        apr_uint32_t c_count, l_count, i_count;
         apr_interval_time_t timeout_interval;
         apr_time_t now, timeout_time;
         int workers_were_busy = 0;
@@ -1601,8 +1603,8 @@ static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy)
                              "keep-alive: %d lingering: %d suspended: %u)",
                              apr_atomic_read32(&connection_count),
                              apr_atomic_read32(&clogged_count),
-                             apr_atomic_read32(write_completion_q->total),
-                             apr_atomic_read32(keepalive_q->total),
+                             *(volatile apr_uint32_t*)write_completion_q->total,
+                             *(volatile apr_uint32_t*)keepalive_q->total,
                              apr_atomic_read32(&lingering_count),
                              apr_atomic_read32(&suspended_count));
                 if (dying) {
@@ -1760,11 +1762,12 @@ static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy)
                                  "All workers busy, not accepting new conns "
                                  "in this process");
                 }
-                else if (  (int)apr_atomic_read32(&connection_count)
-                           - (int)apr_atomic_read32(&lingering_count)
-                         > threads_per_child
-                           + ap_queue_info_get_idlers(worker_queue_info) *
-                             worker_factor / WORKER_FACTOR_SCALE)
+                else if ((c_count = apr_atomic_read32(&connection_count))
+                             > (l_count = apr_atomic_read32(&lingering_count))
+                         && (c_count - l_count
+                                > ap_queue_info_get_idlers(worker_queue_info)
+                                  * worker_factor / WORKER_FACTOR_SCALE
+                                  + threads_per_child))
                 {
                     if (!listeners_disabled)
                         disable_listensocks(process_slot);
@@ -1873,14 +1876,14 @@ static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy)
 
             apr_thread_mutex_unlock(timeout_mutex);
 
-            ps->keep_alive = apr_atomic_read32(keepalive_q->total);
-            ps->write_completion = apr_atomic_read32(write_completion_q->total);
+            ps->keep_alive = *(volatile apr_uint32_t*)keepalive_q->total;
+            ps->write_completion = *(volatile apr_uint32_t*)write_completion_q->total;
             ps->connections = apr_atomic_read32(&connection_count);
             ps->suspended = apr_atomic_read32(&suspended_count);
             ps->lingering_close = apr_atomic_read32(&lingering_count);
         }
         else if ((workers_were_busy || dying)
-                 && apr_atomic_read32(keepalive_q->total)) {
+                 && *(volatile apr_uint32_t*)keepalive_q->total) {
             apr_thread_mutex_lock(timeout_mutex);
             process_keepalive_queue(0); /* kill'em all \m/ */
             apr_thread_mutex_unlock(timeout_mutex);
@@ -1906,10 +1909,12 @@ static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy)
         }
 
         if (listeners_disabled && !workers_were_busy
-            && (int)apr_atomic_read32(&connection_count)
-               - (int)apr_atomic_read32(&lingering_count)
-               < ((int)ap_queue_info_get_idlers(worker_queue_info) - 1)
-                 * worker_factor / WORKER_FACTOR_SCALE + threads_per_child)
+            && ((c_count = apr_atomic_read32(&connection_count))
+                    >= (l_count = apr_atomic_read32(&lingering_count))
+                && (i_count = ap_queue_info_get_idlers(worker_queue_info)) > 0
+                && (c_count - l_count
+                        < (i_count - 1) * worker_factor / WORKER_FACTOR_SCALE
+                          + threads_per_child)))
         {
             listeners_disabled = 0;
             enable_listensocks(process_slot);
@@ -2141,9 +2146,14 @@ static void *APR_THREAD_FUNC start_threads(apr_thread_t * thd, void *dummy)
     int loops;
     int prev_threads_created;
     int max_recycled_pools = -1;
-    int good_methods[] = {APR_POLLSET_KQUEUE, APR_POLLSET_PORT, APR_POLLSET_EPOLL};
-    /* XXX don't we need more to handle K-A or lingering close? */
-    const apr_uint32_t pollset_size = threads_per_child * 2;
+    const int good_methods[] = { APR_POLLSET_KQUEUE,
+                                 APR_POLLSET_PORT,
+                                 APR_POLLSET_EPOLL };
+    /* XXX: K-A or lingering close connection included in the async factor */
+    const apr_uint32_t async_factor = worker_factor / WORKER_FACTOR_SCALE;
+    const apr_uint32_t pollset_size = (apr_uint32_t)num_listensocks +
+                                      (apr_uint32_t)threads_per_child *
+                                      (async_factor > 2 ? async_factor : 2);
 
     /* We must create the fd queues before we start up the listener
      * and worker threads. */
@@ -3335,6 +3345,10 @@ static int event_pre_config(apr_pool_t * pconf, apr_pool_t * plog,
     had_healthy_child = 0;
     ap_extended_status = 0;
 
+    event_pollset = NULL;
+    worker_queue_info = NULL;
+    listener_os_thread = NULL;
+
     return OK;
 }
 
@@ -3748,8 +3762,9 @@ static const char *set_worker_factor(cmd_parms * cmd, void *dummy,
         return "AsyncRequestWorkerFactor argument must be a positive number";
 
     worker_factor = val * WORKER_FACTOR_SCALE;
-    if (worker_factor == 0)
-        worker_factor = 1;
+    if (worker_factor < WORKER_FACTOR_SCALE) {
+        worker_factor = WORKER_FACTOR_SCALE;
+    }
     return NULL;
 }
 
diff --git a/server/mpm/event/fdqueue.c b/server/mpm/event/fdqueue.c
index 64b318d0e0..175d86a720 100644
--- a/server/mpm/event/fdqueue.c
+++ b/server/mpm/event/fdqueue.c
@@ -27,18 +27,18 @@ struct recycled_pool
 
 struct fd_queue_info_t
 {
-    apr_uint32_t idlers;     /**
-                              * >= zero_pt: number of idle worker threads
-                              * < zero_pt:  number of threads blocked waiting
-                              *             for an idle worker
-                              */
+    apr_uint32_t volatile idlers; /**
+                                   * >= zero_pt: number of idle worker threads
+                                   * <  zero_pt: number of threads blocked,
+                                   *             waiting for an idle worker
+                                   */
     apr_thread_mutex_t *idlers_mutex;
     apr_thread_cond_t *wait_for_idler;
     int terminated;
     int max_idlers;
     int max_recycled_pools;
     apr_uint32_t recycled_pools_count;
-    struct recycled_pool *recycled_pools;
+    struct recycled_pool *volatile recycled_pools;
 };
 
 static apr_status_t queue_info_cleanup(void *data_)
@@ -97,15 +97,11 @@ apr_status_t ap_queue_info_set_idle(fd_queue_info_t * queue_info,
                                     apr_pool_t * pool_to_recycle)
 {
     apr_status_t rv;
-    apr_int32_t prev_idlers;
 
     ap_push_pool(queue_info, pool_to_recycle);
 
-    /* Atomically increment the count of idle workers */
-    prev_idlers = apr_atomic_inc32(&(queue_info->idlers)) - zero_pt;
-
     /* If other threads are waiting on a worker, wake one up */
-    if (prev_idlers < 0) {
+    if (apr_atomic_inc32(&queue_info->idlers) < zero_pt) {
         rv = apr_thread_mutex_lock(queue_info->idlers_mutex);
         if (rv != APR_SUCCESS) {
             AP_DEBUG_ASSERT(0);
@@ -127,31 +123,32 @@ apr_status_t ap_queue_info_set_idle(fd_queue_info_t * queue_info,
 
 apr_status_t ap_queue_info_try_get_idler(fd_queue_info_t * queue_info)
 {
-    apr_int32_t new_idlers;
-    new_idlers = apr_atomic_add32(&(queue_info->idlers), -1) - zero_pt;
-    if (--new_idlers <= 0) {
-        apr_atomic_inc32(&(queue_info->idlers));    /* back out dec */
-        return APR_EAGAIN;
+    /* Don't block if there isn't any idle worker. */
+    for (;;) {
+        apr_uint32_t idlers = queue_info->idlers;
+        if (idlers <= zero_pt) {
+            return APR_EAGAIN;
+        }
+        if (apr_atomic_cas32(&queue_info->idlers, idlers - 1,
+                             idlers) == idlers) {
+            return APR_SUCCESS;
+        }
     }
-    return APR_SUCCESS;
 }
 
 apr_status_t ap_queue_info_wait_for_idler(fd_queue_info_t * queue_info,
                                           int *had_to_block)
 {
     apr_status_t rv;
-    apr_int32_t prev_idlers;
-
-    /* Atomically decrement the idle worker count, saving the old value */
-    /* See TODO in ap_queue_info_set_idle() */
-    prev_idlers = apr_atomic_add32(&(queue_info->idlers), -1) - zero_pt;
 
-    /* Block if there weren't any idle workers */
-    if (prev_idlers <= 0) {
+    /* Block if there isn't any idle worker.
+     * apr_atomic_add32(x, -1) does the same as dec32(x), except
+     * that it returns the previous value (unlike dec32's bool).
+     */
+    if (apr_atomic_add32(&queue_info->idlers, -1) <= zero_pt) {
         rv = apr_thread_mutex_lock(queue_info->idlers_mutex);
         if (rv != APR_SUCCESS) {
             AP_DEBUG_ASSERT(0);
-            /* See TODO in ap_queue_info_set_idle() */
             apr_atomic_inc32(&(queue_info->idlers));    /* back out dec */
             return rv;
         }
@@ -205,11 +202,11 @@ apr_status_t ap_queue_info_wait_for_idler(fd_queue_info_t * queue_info,
 
 apr_uint32_t ap_queue_info_get_idlers(fd_queue_info_t * queue_info)
 {
-    apr_int32_t val;
-    val = (apr_int32_t)apr_atomic_read32(&queue_info->idlers) - zero_pt;
-    if (val < 0)
+    apr_uint32_t val;
+    val = apr_atomic_read32(&queue_info->idlers);
+    if (val <= zero_pt)
         return 0;
-    return val;
+    return val - zero_pt;
 }
 
 void ap_push_pool(fd_queue_info_t * queue_info,
@@ -490,13 +487,20 @@ apr_status_t ap_queue_pop_something(fd_queue_t * queue, apr_socket_t ** sd,
     return rv;
 }
 
-static apr_status_t queue_interrupt(fd_queue_t * queue, int all)
+static apr_status_t queue_interrupt(fd_queue_t * queue, int all, int term)
 {
     apr_status_t rv;
 
     if ((rv = apr_thread_mutex_lock(queue->one_big_mutex)) != APR_SUCCESS) {
         return rv;
     }
+    /* we must hold one_big_mutex when setting this... otherwise,
+     * we could end up setting it and waking everybody up just after a
+     * would-be popper checks it but right before they block
+     */
+    if (term) {
+        queue->terminated = 1;
+    }
     if (all)
         apr_thread_cond_broadcast(queue->not_empty);
     else
@@ -506,28 +510,15 @@ static apr_status_t queue_interrupt(fd_queue_t * queue, int all)
 
 apr_status_t ap_queue_interrupt_all(fd_queue_t * queue)
 {
-    return queue_interrupt(queue, 1);
+    return queue_interrupt(queue, 1, 0);
 }
 
 apr_status_t ap_queue_interrupt_one(fd_queue_t * queue)
 {
-    return queue_interrupt(queue, 0);
+    return queue_interrupt(queue, 0, 0);
 }
 
 apr_status_t ap_queue_term(fd_queue_t * queue)
 {
-    apr_status_t rv;
-
-    if ((rv = apr_thread_mutex_lock(queue->one_big_mutex)) != APR_SUCCESS) {
-        return rv;
-    }
-    /* we must hold one_big_mutex when setting this... otherwise,
-     * we could end up setting it and waking everybody up just after a
-     * would-be popper checks it but right before they block
-     */
-    queue->terminated = 1;
-    if ((rv = apr_thread_mutex_unlock(queue->one_big_mutex)) != APR_SUCCESS) {
-        return rv;
-    }
-    return ap_queue_interrupt_all(queue);
+    return queue_interrupt(queue, 1, 1);
 }
diff --git a/server/mpm/worker/fdqueue.c b/server/mpm/worker/fdqueue.c
index fe5881b4ce..803267afbd 100644
--- a/server/mpm/worker/fdqueue.c
+++ b/server/mpm/worker/fdqueue.c
@@ -382,31 +382,30 @@ apr_status_t ap_queue_pop(fd_queue_t *queue, apr_socket_t **sd, apr_pool_t **p)
     return rv;
 }
 
-apr_status_t ap_queue_interrupt_all(fd_queue_t *queue)
+static apr_status_t queue_interrupt_all(fd_queue_t *queue, int term)
 {
     apr_status_t rv;
 
     if ((rv = apr_thread_mutex_lock(queue->one_big_mutex)) != APR_SUCCESS) {
         return rv;
     }
+    /* we must hold one_big_mutex when setting this... otherwise,
+     * we could end up setting it and waking everybody up just after a
+     * would-be popper checks it but right before they block
+     */
+    if (term) {
+        queue->terminated = 1;
+    }
     apr_thread_cond_broadcast(queue->not_empty);
     return apr_thread_mutex_unlock(queue->one_big_mutex);
 }
 
-apr_status_t ap_queue_term(fd_queue_t *queue)
+apr_status_t ap_queue_interrupt_all(fd_queue_t *queue)
 {
-    apr_status_t rv;
+    return queue_interrupt_all(queue, 0);
+}
 
-    if ((rv = apr_thread_mutex_lock(queue->one_big_mutex)) != APR_SUCCESS) {
-        return rv;
-    }
-    /* we must hold one_big_mutex when setting this... otherwise,
-     * we could end up setting it and waking everybody up just after a
-     * would-be popper checks it but right before they block
-     */
-    queue->terminated = 1;
-    if ((rv = apr_thread_mutex_unlock(queue->one_big_mutex)) != APR_SUCCESS) {
-        return rv;
-    }
-    return ap_queue_interrupt_all(queue);
+apr_status_t ap_queue_term(fd_queue_t *queue)
+{
+    return queue_interrupt_all(queue, 1);
 }
Merge r1819855 from trunk:

mpm_event: wakeup the listener to re-enable listening sockets.

When listening sockets are disabled (too many connections) and the number of
workers / active connections comes back below the limit, we need to wake up
the listener to re-enable them.

Add a new connections_above_limit() helper to determine when this applies.

Submitted by: ylavic

diff --git a/server/mpm/event/event.c b/server/mpm/event/event.c
index 6fc74c0d16..db1de96580 100644
--- a/server/mpm/event/event.c
+++ b/server/mpm/event/event.c
@@ -177,6 +177,7 @@ static volatile int dying = 0;
 static volatile int workers_may_exit = 0;
 static volatile int start_thread_may_exit = 0;
 static volatile int listener_may_exit = 0;
+static volatile int listeners_disabled = 0;
 static int listener_is_wakeable = 0;        /* Pollset supports APR_POLLSET_WAKEABLE */
 static int num_listensocks = 0;
 static apr_int32_t conns_this_child;        /* MaxConnectionsPerChild, only access
@@ -454,6 +455,7 @@ static apr_socket_t **worker_sockets;
 static void disable_listensocks(int process_slot)
 {
     int i;
+    listeners_disabled = 1;
     for (i = 0; i < num_listensocks; i++) {
         apr_pollset_remove(event_pollset, &listener_pollfd[i]);
     }
@@ -466,6 +468,7 @@ static void enable_listensocks(int process_slot)
     if (listener_may_exit) {
         return;
     }
+    listeners_disabled = 0;
     ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf, APLOGNO(00457)
                  "Accepting new connections again: "
                  "%u active conns (%u lingering/%u clogged/%u suspended), "
@@ -484,6 +487,23 @@ static void enable_listensocks(int process_slot)
     ap_scoreboard_image->parent[process_slot].not_accepting = 0;
 }
 
+static APR_INLINE int connections_above_limit(void)
+{
+    apr_uint32_t i_count = ap_queue_info_get_idlers(worker_queue_info);
+    if (i_count > 0) {
+        apr_uint32_t c_count = apr_atomic_read32(&connection_count);
+        apr_uint32_t l_count = apr_atomic_read32(&lingering_count);
+        if (c_count <= l_count
+                /* Off by 'listeners_disabled' to avoid flip flop */
+                || c_count - l_count < (apr_uint32_t)threads_per_child +
+                                       (i_count - (listeners_disabled != 0)) *
+                                       (worker_factor / WORKER_FACTOR_SCALE)) {
+            return 0;
+        }
+    }
+    return 1;
+}
+
 static void abort_socket_nonblocking(apr_socket_t *csd)
 {
     apr_status_t rv;
@@ -712,6 +732,7 @@ static int child_fatal;
 
 static apr_status_t decrement_connection_count(void *cs_)
 {
+    int is_last_connection;
     event_conn_state_t *cs = cs_;
     switch (cs->pub.state) {
         case CONN_STATE_LINGER_NORMAL:
@@ -724,9 +745,14 @@ static apr_status_t decrement_connection_count(void *cs_)
         default:
             break;
     }
-    /* Unblock the listener if it's waiting for connection_count = 0 */
-    if (!apr_atomic_dec32(&connection_count)
-             && listener_is_wakeable && listener_may_exit) {
+    /* Unblock the listener if it's waiting for connection_count = 0,
+     * or if the listening sockets were disabled due to limits and can
+     * now accept new connections.
+     */
+    is_last_connection = !apr_atomic_dec32(&connection_count);
+    if (listener_is_wakeable
+            && ((is_last_connection && listener_may_exit)
+                || (listeners_disabled && !connections_above_limit()))) {
         apr_pollset_wakeup(event_pollset);
     }
     return APR_SUCCESS;
@@ -1551,7 +1577,7 @@ static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy)
     int process_slot = ti->pslot;
     struct process_score *ps = ap_get_scoreboard_process(process_slot);
     apr_pool_t *tpool = apr_thread_pool_get(thd);
-    int closed = 0, listeners_disabled = 0;
+    int closed = 0;
     int have_idle_worker = 0;
     apr_time_t last_log;
 
@@ -1577,11 +1603,13 @@ static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy)
         timer_event_t *te;
         const apr_pollfd_t *out_pfd;
         apr_int32_t num = 0;
-        apr_uint32_t c_count, l_count, i_count;
         apr_interval_time_t timeout_interval;
         apr_time_t now, timeout_time;
         int workers_were_busy = 0;
 
+        if (conns_this_child <= 0)
+            check_infinite_requests();
+
         if (listener_may_exit) {
             close_listeners(process_slot, &closed);
             if (terminate_mode == ST_UNGRACEFUL
@@ -1589,9 +1617,6 @@ static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy)
                 break;
         }
 
-        if (conns_this_child <= 0)
-            check_infinite_requests();
-
         now = apr_time_now();
         if (APLOGtrace6(ap_server_conf)) {
             /* trace log status every second */
@@ -1666,11 +1691,13 @@ static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy)
         rc = apr_pollset_poll(event_pollset, timeout_interval, &num, &out_pfd);
         if (rc != APR_SUCCESS) {
             if (APR_STATUS_IS_EINTR(rc)) {
-                /* Woken up, if we are exiting we must fall through to kill
-                 * kept-alive connections, otherwise we only need to update
-                 * timeouts (logic is above, so restart the loop).
+                /* Woken up, if we are exiting or listeners are disabled we
+                 * must fall through to kill kept-alive connections or test
+                 * whether listeners should be re-enabled. Otherwise we only
+                 * need to update timeouts (logic is above, so simply restart
+                 * the loop).
                  */
-                if (!listener_may_exit) {
+                if (!listener_may_exit && !listeners_disabled) {
                     continue;
                 }
                 timeout_time = 0;
@@ -1752,25 +1779,16 @@ static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy)
                     ap_assert(0);
                 }
             }
-            else if (pt->type == PT_ACCEPT) {
+            else if (pt->type == PT_ACCEPT && !listeners_disabled) {
                 /* A Listener Socket is ready for an accept() */
                 if (workers_were_busy) {
-                    if (!listeners_disabled)
-                        disable_listensocks(process_slot);
-                    listeners_disabled = 1;
+                    disable_listensocks(process_slot);
                     ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf,
                                  "All workers busy, not accepting new conns "
                                  "in this process");
                 }
-                else if ((c_count = apr_atomic_read32(&connection_count))
-                             > (l_count = apr_atomic_read32(&lingering_count))
-                         && (c_count - l_count
-                                > ap_queue_info_get_idlers(worker_queue_info)
-                                  * worker_factor / WORKER_FACTOR_SCALE
-                                  + threads_per_child))
-                {
-                    if (!listeners_disabled)
-                        disable_listensocks(process_slot);
+                else if (connections_above_limit()) {
+                    disable_listensocks(process_slot);
                     ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf,
                                  "Too many open connections (%u), "
                                  "not accepting new conns in this process",
@@ -1778,13 +1796,9 @@ static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy)
                     ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, ap_server_conf,
                                  "Idle workers: %u",
                                  ap_queue_info_get_idlers(worker_queue_info));
-                    listeners_disabled = 1;
-                }
-                else if (listeners_disabled) {
-                    listeners_disabled = 0;
-                    enable_listensocks(process_slot);
+                    workers_were_busy = 1;
                 }
-                if (!listeners_disabled) {
+                else if (!listener_may_exit) {
                     void *csd = NULL;
                     ap_listen_rec *lr = (ap_listen_rec *) pt->baton;
                     apr_pool_t *ptrans;         /* Pool for per-transaction stuff */
@@ -1908,22 +1922,12 @@ static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy)
             }
         }
 
-        if (listeners_disabled && !workers_were_busy
-            && ((c_count = apr_atomic_read32(&connection_count))
-                    >= (l_count = apr_atomic_read32(&lingering_count))
-                && (i_count = ap_queue_info_get_idlers(worker_queue_info)) > 0
-                && (c_count - l_count
-                        < (i_count - 1) * worker_factor / WORKER_FACTOR_SCALE
-                          + threads_per_child)))
-        {
-            listeners_disabled = 0;
+        if (listeners_disabled
+                && !workers_were_busy
+                && !connections_above_limit()) {
             enable_listensocks(process_slot);
         }
-        /*
-         * XXX: do we need to set some timeout that re-enables the listensocks
-         * XXX: in case no other event occurs?
-         */
-    }     /* listener main loop */
+    } /* listener main loop */
 
     close_listeners(process_slot, &closed);
     ap_queue_term(worker_queue);

Reply via email to