Hi,

there a several optimizations and correctness fixes in event/fdqueue.c
that don't land in worker/fdqueue.c.

The patch would look like the attached.
It also include some cosmectic changes in event (mainly s/type *
arg/type *arg/) ala worker which suits more with the style used
elsewhere in httpd.

Thoughts?

For now things that are event only are not aligned (e.g. timers), but
ultimately I'd like to have a single fdqueue.[ch] for both MPMs (not
too far once this patch is applied), that'd certainly help maintenance
and improvements for both.
If you agree on this, what would be the best practice/place for the common code?


Regards,
Yann.
Index: server/mpm/event/fdqueue.c
===================================================================
--- server/mpm/event/fdqueue.c	(revision 1820807)
+++ server/mpm/event/fdqueue.c	(working copy)
@@ -53,9 +53,8 @@ static apr_status_t queue_info_cleanup(void *data_
         if (first_pool == NULL) {
             break;
         }
-        if (apr_atomic_casptr
-            ((void*) &(qi->recycled_pools), first_pool->next,
-             first_pool) == first_pool) {
+        if (apr_atomic_casptr((void*)&qi->recycled_pools, first_pool->next,
+                              first_pool) == first_pool) {
             apr_pool_destroy(first_pool->pool);
         }
     }
@@ -63,8 +62,8 @@ static apr_status_t queue_info_cleanup(void *data_
     return APR_SUCCESS;
 }
 
-apr_status_t ap_queue_info_create(fd_queue_info_t ** queue_info,
-                                  apr_pool_t * pool, int max_idlers,
+apr_status_t ap_queue_info_create(fd_queue_info_t **queue_info,
+                                  apr_pool_t *pool, int max_idlers,
                                   int max_recycled_pools)
 {
     apr_status_t rv;
@@ -93,8 +92,8 @@ static apr_status_t queue_info_cleanup(void *data_
     return APR_SUCCESS;
 }
 
-apr_status_t ap_queue_info_set_idle(fd_queue_info_t * queue_info,
-                                    apr_pool_t * pool_to_recycle)
+apr_status_t ap_queue_info_set_idle(fd_queue_info_t *queue_info,
+                                    apr_pool_t *pool_to_recycle)
 {
     apr_status_t rv;
 
@@ -121,7 +120,7 @@ static apr_status_t queue_info_cleanup(void *data_
     return APR_SUCCESS;
 }
 
-apr_status_t ap_queue_info_try_get_idler(fd_queue_info_t * queue_info)
+apr_status_t ap_queue_info_try_get_idler(fd_queue_info_t *queue_info)
 {
     /* Don't block if there isn't any idle worker. */
     for (;;) {
@@ -136,7 +135,7 @@ static apr_status_t queue_info_cleanup(void *data_
     }
 }
 
-apr_status_t ap_queue_info_wait_for_idler(fd_queue_info_t * queue_info,
+apr_status_t ap_queue_info_wait_for_idler(fd_queue_info_t *queue_info,
                                           int *had_to_block)
 {
     apr_status_t rv;
@@ -200,7 +199,7 @@ static apr_status_t queue_info_cleanup(void *data_
     }
 }
 
-apr_uint32_t ap_queue_info_get_idlers(fd_queue_info_t * queue_info)
+apr_uint32_t ap_queue_info_get_idlers(fd_queue_info_t *queue_info)
 {
     apr_uint32_t val;
     val = apr_atomic_read32(&queue_info->idlers);
@@ -209,8 +208,8 @@ static apr_status_t queue_info_cleanup(void *data_
     return val - zero_pt;
 }
 
-void ap_push_pool(fd_queue_info_t * queue_info,
-                                    apr_pool_t * pool_to_recycle)
+
+void ap_push_pool(fd_queue_info_t *queue_info, apr_pool_t *pool_to_recycle)
 {
     struct recycled_pool *new_recycle;
     /* If we have been given a pool to recycle, atomically link
@@ -246,7 +245,7 @@ static apr_status_t queue_info_cleanup(void *data_
     }
 }
 
-void ap_pop_pool(apr_pool_t ** recycled_pool, fd_queue_info_t * queue_info)
+void ap_pop_pool(apr_pool_t ** recycled_pool, fd_queue_info_t *queue_info)
 {
     /* Atomically pop a pool from the recycled list */
 
@@ -290,7 +289,7 @@ void ap_free_idle_pools(fd_queue_info_t *queue_inf
 }
 
 
-apr_status_t ap_queue_info_term(fd_queue_info_t * queue_info)
+apr_status_t ap_queue_info_term(fd_queue_info_t *queue_info)
 {
     apr_status_t rv;
     rv = apr_thread_mutex_lock(queue_info->idlers_mutex);
@@ -334,8 +333,8 @@ static apr_status_t ap_queue_destroy(void *data)
 /**
  * Initialize the fd_queue_t.
  */
-apr_status_t ap_queue_init(fd_queue_t * queue, int queue_capacity,
-                           apr_pool_t * a)
+apr_status_t ap_queue_init(fd_queue_t *queue, int queue_capacity,
+                           apr_pool_t *a)
 {
     int i;
     apr_status_t rv;
@@ -373,8 +372,8 @@ static apr_status_t ap_queue_destroy(void *data)
  * precondition: ap_queue_info_wait_for_idler has already been called
  *               to reserve an idle worker thread
  */
-apr_status_t ap_queue_push(fd_queue_t * queue, apr_socket_t * sd,
-                           event_conn_state_t * ecs, apr_pool_t * p)
+apr_status_t ap_queue_push(fd_queue_t *queue, apr_socket_t *sd,
+                           event_conn_state_t *ecs, apr_pool_t *p)
 {
     fd_queue_elem_t *elem;
     apr_status_t rv;
@@ -404,7 +403,7 @@ static apr_status_t ap_queue_destroy(void *data)
     return APR_SUCCESS;
 }
 
-apr_status_t ap_queue_push_timer(fd_queue_t * queue, timer_event_t *te)
+apr_status_t ap_queue_push_timer(fd_queue_t *queue, timer_event_t *te)
 {
     apr_status_t rv;
 
@@ -431,9 +430,9 @@ static apr_status_t ap_queue_destroy(void *data)
  * Once retrieved, the socket is placed into the address specified by
  * 'sd'.
  */
-apr_status_t ap_queue_pop_something(fd_queue_t * queue, apr_socket_t ** sd,
-                                    event_conn_state_t ** ecs, apr_pool_t ** p,
-                                    timer_event_t ** te_out)
+apr_status_t ap_queue_pop_something(fd_queue_t *queue, apr_socket_t **sd,
+                                    event_conn_state_t **ecs, apr_pool_t **p,
+                                    timer_event_t **te_out)
 {
     fd_queue_elem_t *elem;
     apr_status_t rv;
@@ -508,17 +507,17 @@ static apr_status_t queue_interrupt(fd_queue_t *qu
     return apr_thread_mutex_unlock(queue->one_big_mutex);
 }
 
-apr_status_t ap_queue_interrupt_all(fd_queue_t * queue)
+apr_status_t ap_queue_interrupt_all(fd_queue_t *queue)
 {
     return queue_interrupt(queue, 1, 0);
 }
 
-apr_status_t ap_queue_interrupt_one(fd_queue_t * queue)
+apr_status_t ap_queue_interrupt_one(fd_queue_t *queue)
 {
     return queue_interrupt(queue, 0, 0);
 }
 
-apr_status_t ap_queue_term(fd_queue_t * queue)
+apr_status_t ap_queue_term(fd_queue_t *queue)
 {
     return queue_interrupt(queue, 1, 1);
 }
Index: server/mpm/event/fdqueue.h
===================================================================
--- server/mpm/event/fdqueue.h	(revision 1820807)
+++ server/mpm/event/fdqueue.h	(working copy)
@@ -42,17 +42,16 @@
 typedef struct fd_queue_info_t fd_queue_info_t;
 typedef struct event_conn_state_t event_conn_state_t;
 
-apr_status_t ap_queue_info_create(fd_queue_info_t ** queue_info,
-                                  apr_pool_t * pool, int max_idlers,
+apr_status_t ap_queue_info_create(fd_queue_info_t **queue_info,
+                                  apr_pool_t *pool, int max_idlers,
                                   int max_recycled_pools);
-apr_status_t ap_queue_info_set_idle(fd_queue_info_t * queue_info,
-                                    apr_pool_t * pool_to_recycle);
-apr_status_t ap_queue_info_try_get_idler(fd_queue_info_t * queue_info);
-apr_status_t ap_queue_info_wait_for_idler(fd_queue_info_t * queue_info,
+apr_status_t ap_queue_info_set_idle(fd_queue_info_t *queue_info,
+                                    apr_pool_t *pool_to_recycle);
+apr_status_t ap_queue_info_try_get_idler(fd_queue_info_t *queue_info);
+apr_status_t ap_queue_info_wait_for_idler(fd_queue_info_t *queue_info,
                                           int *had_to_block);
-apr_status_t ap_queue_info_term(fd_queue_info_t * queue_info);
-apr_uint32_t ap_queue_info_get_idlers(fd_queue_info_t * queue_info);
-void ap_free_idle_pools(fd_queue_info_t *queue_info);
+apr_status_t ap_queue_info_term(fd_queue_info_t *queue_info);
+apr_uint32_t ap_queue_info_get_idlers(fd_queue_info_t *queue_info);
 
 struct fd_queue_elem_t
 {
@@ -88,21 +87,21 @@ struct fd_queue_t
 };
 typedef struct fd_queue_t fd_queue_t;
 
-void ap_pop_pool(apr_pool_t ** recycled_pool, fd_queue_info_t * queue_info);
-void ap_push_pool(fd_queue_info_t * queue_info,
-                                    apr_pool_t * pool_to_recycle);
+void ap_pop_pool(apr_pool_t **recycled_pool, fd_queue_info_t *queue_info);
+void ap_push_pool(fd_queue_info_t *queue_info, apr_pool_t *pool_to_recycle);
+void ap_free_idle_pools(fd_queue_info_t *queue_info);
 
-apr_status_t ap_queue_init(fd_queue_t * queue, int queue_capacity,
-                           apr_pool_t * a);
-apr_status_t ap_queue_push(fd_queue_t * queue, apr_socket_t * sd,
-                           event_conn_state_t * ecs, apr_pool_t * p);
+apr_status_t ap_queue_init(fd_queue_t *queue, int queue_capacity,
+                           apr_pool_t *a);
+apr_status_t ap_queue_push(fd_queue_t *queue, apr_socket_t *sd,
+                           event_conn_state_t *ecs, apr_pool_t *p);
 apr_status_t ap_queue_push_timer(fd_queue_t *queue, timer_event_t *te);
-apr_status_t ap_queue_pop_something(fd_queue_t * queue, apr_socket_t ** sd,
-                                    event_conn_state_t ** ecs, apr_pool_t ** p,
-                                    timer_event_t ** te);
-apr_status_t ap_queue_interrupt_all(fd_queue_t * queue);
-apr_status_t ap_queue_interrupt_one(fd_queue_t * queue);
-apr_status_t ap_queue_term(fd_queue_t * queue);
+apr_status_t ap_queue_pop_something(fd_queue_t *queue, apr_socket_t **sd,
+                                    event_conn_state_t **ecs, apr_pool_t **p,
+                                    timer_event_t **te);
+apr_status_t ap_queue_interrupt_all(fd_queue_t *queue);
+apr_status_t ap_queue_interrupt_one(fd_queue_t *queue);
+apr_status_t ap_queue_term(fd_queue_t *queue);
 
 #endif /* FDQUEUE_H */
 /** @} */
Index: server/mpm/worker/fdqueue.c
===================================================================
--- server/mpm/worker/fdqueue.c	(revision 1820807)
+++ server/mpm/worker/fdqueue.c	(working copy)
@@ -17,18 +17,28 @@
 #include "fdqueue.h"
 #include "apr_atomic.h"
 
-typedef struct recycled_pool {
+static const apr_uint32_t zero_pt = APR_UINT32_MAX/2;
+
+struct recycled_pool
+{
     apr_pool_t *pool;
     struct recycled_pool *next;
-} recycled_pool;
+};
 
-struct fd_queue_info_t {
-    volatile apr_uint32_t idlers;
+struct fd_queue_info_t
+{
+    apr_uint32_t volatile idlers; /**
+                                   * >= zero_pt: number of idle worker threads
+                                   * <  zero_pt: number of threads blocked,
+                                   *             waiting for an idle worker
+                                   */
     apr_thread_mutex_t *idlers_mutex;
     apr_thread_cond_t *wait_for_idler;
     int terminated;
     int max_idlers;
-    recycled_pool  *recycled_pools;
+    int max_recycled_pools;
+    apr_uint32_t recycled_pools_count;
+    struct recycled_pool *volatile recycled_pools;
 };
 
 static apr_status_t queue_info_cleanup(void *data_)
@@ -43,7 +53,7 @@ static apr_status_t queue_info_cleanup(void *data_
         if (first_pool == NULL) {
             break;
         }
-        if (apr_atomic_casptr((void*)&(qi->recycled_pools), first_pool->next,
+        if (apr_atomic_casptr((void*)&qi->recycled_pools, first_pool->next,
                               first_pool) == first_pool) {
             apr_pool_destroy(first_pool->pool);
         }
@@ -53,7 +63,8 @@ static apr_status_t queue_info_cleanup(void *data_
 }
 
 apr_status_t ap_queue_info_create(fd_queue_info_t **queue_info,
-                                  apr_pool_t *pool, int max_idlers)
+                                  apr_pool_t *pool, int max_idlers,
+                                  int max_recycled_pools)
 {
     apr_status_t rv;
     fd_queue_info_t *qi;
@@ -70,7 +81,9 @@ apr_status_t ap_queue_info_create(fd_queue_info_t
         return rv;
     }
     qi->recycled_pools = NULL;
+    qi->max_recycled_pools = max_recycled_pools;
     qi->max_idlers = max_idlers;
+    qi->idlers = zero_pt;
     apr_pool_cleanup_register(pool, qi, queue_info_cleanup,
                               apr_pool_cleanup_null);
 
@@ -84,33 +97,13 @@ apr_status_t ap_queue_info_set_idle(fd_queue_info_
 {
     apr_status_t rv;
 
-    /* If we have been given a pool to recycle, atomically link
-     * it into the queue_info's list of recycled pools
-     */
-    if (pool_to_recycle) {
-        struct recycled_pool *new_recycle;
-        new_recycle = (struct recycled_pool *)apr_palloc(pool_to_recycle,
-                                                         sizeof(*new_recycle));
-        new_recycle->pool = pool_to_recycle;
-        for (;;) {
-            /* Save queue_info->recycled_pool in local variable next because
-             * new_recycle->next can be changed after apr_atomic_casptr
-             * function call. For gory details see PR 44402.
-             */
-            struct recycled_pool *next = queue_info->recycled_pools;
-            new_recycle->next = next;
-            if (apr_atomic_casptr((void*)&(queue_info->recycled_pools),
-                                  new_recycle, next) == next) {
-                break;
-            }
-        }
-    }
+    ap_push_pool(queue_info, pool_to_recycle);
 
-    /* If this thread makes the idle worker count nonzero,
-     * wake up the listener. */
-    if (apr_atomic_inc32(&queue_info->idlers) == 0) {
+    /* If other threads are waiting on a worker, wake one up */
+    if (apr_atomic_inc32(&queue_info->idlers) < zero_pt) {
         rv = apr_thread_mutex_lock(queue_info->idlers_mutex);
         if (rv != APR_SUCCESS) {
+            AP_DEBUG_ASSERT(0);
             return rv;
         }
         rv = apr_thread_cond_signal(queue_info->wait_for_idler);
@@ -132,29 +125,24 @@ apr_status_t ap_queue_info_wait_for_idler(fd_queue
 {
     apr_status_t rv;
 
-    *recycled_pool = NULL;
-
-    /* Block if the count of idle workers is zero */
-    if (queue_info->idlers == 0) {
+    /* Block if there isn't any idle worker.
+     * apr_atomic_add32(x, -1) does the same as dec32(x), except
+     * that it returns the previous value (unlike dec32's bool).
+     */
+    if (apr_atomic_add32(&queue_info->idlers, -1) <= zero_pt) {
         rv = apr_thread_mutex_lock(queue_info->idlers_mutex);
         if (rv != APR_SUCCESS) {
+            AP_DEBUG_ASSERT(0);
+            apr_atomic_inc32(&(queue_info->idlers));    /* back out dec */
             return rv;
         }
         /* Re-check the idle worker count to guard against a
          * race condition.  Now that we're in the mutex-protected
          * region, one of two things may have happened:
-         *   - If the idle worker count is still zero, the
+         *   - If the idle worker count is still negative, the
          *     workers are all still busy, so it's safe to
-         *     block on a condition variable, BUT
-         *     we need to check for idle worker count again
-         *     when we are signaled since it can happen that
-         *     we are signaled by a worker thread that went idle
-         *     but received a context switch before it could
-         *     tell us. If it does signal us later once it is on
-         *     CPU again there might be no idle worker left.
-         *     See
-         *     https://issues.apache.org/bugzilla/show_bug.cgi?id=45605#c4
-         *   - If the idle worker count is nonzero, then a
+         *     block on a condition variable.
+         *   - If the idle worker count is non-negative, then a
          *     worker has become idle since the first check
          *     of queue_info->idlers above.  It's possible
          *     that the worker has also signaled the condition
@@ -161,14 +149,19 @@ apr_status_t ap_queue_info_wait_for_idler(fd_queue
          *     variable--and if so, the listener missed it
          *     because it wasn't yet blocked on the condition
          *     variable.  But if the idle worker count is
-         *     now nonzero, it's safe for this function to
+         *     now non-negative, it's safe for this function to
          *     return immediately.
+         *
+         *     A "negative value" (relative to zero_pt) in
+         *     queue_info->idlers tells how many
+         *     threads are waiting on an idle worker.
          */
-        while (queue_info->idlers == 0) {
+        if (queue_info->idlers < zero_pt) {
             rv = apr_thread_cond_wait(queue_info->wait_for_idler,
-                                  queue_info->idlers_mutex);
+                                      queue_info->idlers_mutex);
             if (rv != APR_SUCCESS) {
                 apr_status_t rv2;
+                AP_DEBUG_ASSERT(0);
                 rv2 = apr_thread_mutex_unlock(queue_info->idlers_mutex);
                 if (rv2 != APR_SUCCESS) {
                     return rv2;
@@ -182,9 +175,55 @@ apr_status_t ap_queue_info_wait_for_idler(fd_queue
         }
     }
 
-    /* Atomically decrement the idle worker count */
-    apr_atomic_dec32(&(queue_info->idlers));
+    ap_pop_pool(recycled_pool, queue_info);
 
+    if (queue_info->terminated) {
+        return APR_EOF;
+    }
+    else {
+        return APR_SUCCESS;
+    }
+}
+
+
+void ap_push_pool(fd_queue_info_t *queue_info, apr_pool_t *pool_to_recycle)
+{
+    struct recycled_pool *new_recycle;
+    /* If we have been given a pool to recycle, atomically link
+     * it into the queue_info's list of recycled pools
+     */
+    if (!pool_to_recycle)
+        return;
+
+    if (queue_info->max_recycled_pools >= 0) {
+        apr_uint32_t cnt = apr_atomic_read32(&queue_info->recycled_pools_count);
+        if (cnt >= queue_info->max_recycled_pools) {
+            apr_pool_destroy(pool_to_recycle);
+            return;
+        }
+        apr_atomic_inc32(&queue_info->recycled_pools_count);
+    }
+
+    apr_pool_clear(pool_to_recycle);
+    new_recycle = (struct recycled_pool *) apr_palloc(pool_to_recycle,
+                                                      sizeof (*new_recycle));
+    new_recycle->pool = pool_to_recycle;
+    for (;;) {
+        /*
+         * Save queue_info->recycled_pool in local variable next because
+         * new_recycle->next can be changed after apr_atomic_casptr
+         * function call. For gory details see PR 44402.
+         */
+        struct recycled_pool *next = queue_info->recycled_pools;
+        new_recycle->next = next;
+        if (apr_atomic_casptr((void*) &(queue_info->recycled_pools),
+                              new_recycle, next) == next)
+            break;
+    }
+}
+
+void ap_pop_pool(apr_pool_t ** recycled_pool, fd_queue_info_t *queue_info)
+{
     /* Atomically pop a pool from the recycled list */
 
     /* This function is safe only as long as it is single threaded because
@@ -194,26 +233,39 @@ apr_status_t ap_queue_info_wait_for_idler(fd_queue
      * happen concurrently with a single cas-based pop.
      */
 
+    *recycled_pool = NULL;
+
+
+    /* Atomically pop a pool from the recycled list */
     for (;;) {
         struct recycled_pool *first_pool = queue_info->recycled_pools;
         if (first_pool == NULL) {
             break;
         }
-        if (apr_atomic_casptr((void*)&(queue_info->recycled_pools), first_pool->next,
-                              first_pool) == first_pool) {
+        if (apr_atomic_casptr
+            ((void*) &(queue_info->recycled_pools),
+             first_pool->next, first_pool) == first_pool) {
             *recycled_pool = first_pool->pool;
+            if (queue_info->max_recycled_pools >= 0)
+                apr_atomic_dec32(&queue_info->recycled_pools_count);
             break;
         }
     }
+}
 
-    if (queue_info->terminated) {
-        return APR_EOF;
-    }
-    else {
-        return APR_SUCCESS;
-    }
+void ap_free_idle_pools(fd_queue_info_t *queue_info)
+{
+    apr_pool_t *p;
+
+    queue_info->max_recycled_pools = 0;
+    do {
+        ap_pop_pool(&p, queue_info);
+        if (p != NULL)
+            apr_pool_destroy(p);
+    } while (p != NULL);
 }
 
+
 apr_status_t ap_queue_info_term(fd_queue_info_t *queue_info)
 {
     apr_status_t rv;
@@ -258,13 +310,15 @@ static apr_status_t ap_queue_destroy(void *data)
 /**
  * Initialize the fd_queue_t.
  */
-apr_status_t ap_queue_init(fd_queue_t *queue, int queue_capacity, apr_pool_t *a)
+apr_status_t ap_queue_init(fd_queue_t *queue, int queue_capacity,
+                           apr_pool_t *a)
 {
     int i;
     apr_status_t rv;
 
     if ((rv = apr_thread_mutex_create(&queue->one_big_mutex,
-                                      APR_THREAD_MUTEX_DEFAULT, a)) != APR_SUCCESS) {
+                                      APR_THREAD_MUTEX_DEFAULT,
+                                      a)) != APR_SUCCESS) {
         return rv;
     }
     if ((rv = apr_thread_cond_create(&queue->not_empty, a)) != APR_SUCCESS) {
@@ -281,7 +335,8 @@ static apr_status_t ap_queue_destroy(void *data)
     for (i = 0; i < queue_capacity; ++i)
         queue->data[i].sd = NULL;
 
-    apr_pool_cleanup_register(a, queue, ap_queue_destroy, apr_pool_cleanup_null);
+    apr_pool_cleanup_register(a, queue, ap_queue_destroy,
+                              apr_pool_cleanup_null);
 
     return APR_SUCCESS;
 }
Index: server/mpm/worker/fdqueue.h
===================================================================
--- server/mpm/worker/fdqueue.h	(revision 1820807)
+++ server/mpm/worker/fdqueue.h	(working copy)
@@ -40,7 +40,8 @@
 typedef struct fd_queue_info_t fd_queue_info_t;
 
 apr_status_t ap_queue_info_create(fd_queue_info_t **queue_info,
-                                  apr_pool_t *pool, int max_idlers);
+                                  apr_pool_t *pool, int max_idlers,
+                                  int max_recycled_pools);
 apr_status_t ap_queue_info_set_idle(fd_queue_info_t *queue_info,
                                     apr_pool_t *pool_to_recycle);
 apr_status_t ap_queue_info_wait_for_idler(fd_queue_info_t *queue_info,
@@ -47,27 +48,36 @@ apr_status_t ap_queue_info_wait_for_idler(fd_queue
                                           apr_pool_t **recycled_pool);
 apr_status_t ap_queue_info_term(fd_queue_info_t *queue_info);
 
-struct fd_queue_elem_t {
-    apr_socket_t      *sd;
-    apr_pool_t        *p;
+struct fd_queue_elem_t
+{
+    apr_socket_t *sd;
+    apr_pool_t *p;
 };
 typedef struct fd_queue_elem_t fd_queue_elem_t;
 
-struct fd_queue_t {
-    fd_queue_elem_t    *data;
-    unsigned int       nelts;
-    unsigned int       bounds;
-    unsigned int       in;
-    unsigned int       out;
+struct fd_queue_t
+{
+    fd_queue_elem_t *data;
+    unsigned int nelts;
+    unsigned int bounds;
+    unsigned int in;
+    unsigned int out;
     apr_thread_mutex_t *one_big_mutex;
-    apr_thread_cond_t  *not_empty;
-    int                 terminated;
+    apr_thread_cond_t *not_empty;
+    int terminated;
 };
 typedef struct fd_queue_t fd_queue_t;
 
-apr_status_t ap_queue_init(fd_queue_t *queue, int queue_capacity, apr_pool_t *a);
-apr_status_t ap_queue_push(fd_queue_t *queue, apr_socket_t *sd, apr_pool_t *p);
-apr_status_t ap_queue_pop(fd_queue_t *queue, apr_socket_t **sd, apr_pool_t **p);
+void ap_pop_pool(apr_pool_t **recycled_pool, fd_queue_info_t *queue_info);
+void ap_push_pool(fd_queue_info_t *queue_info, apr_pool_t *pool_to_recycle);
+void ap_free_idle_pools(fd_queue_info_t *queue_info);
+
+apr_status_t ap_queue_init(fd_queue_t *queue, int queue_capacity,
+                           apr_pool_t *a);
+apr_status_t ap_queue_push(fd_queue_t *queue, apr_socket_t *sd,
+                           apr_pool_t *p);
+apr_status_t ap_queue_pop(fd_queue_t *queue, apr_socket_t **sd,
+                          apr_pool_t **p);
 apr_status_t ap_queue_interrupt_all(fd_queue_t *queue);
 apr_status_t ap_queue_term(fd_queue_t *queue);
 
Index: server/mpm/worker/worker.c
===================================================================
--- server/mpm/worker/worker.c	(revision 1820807)
+++ server/mpm/worker/worker.c	(working copy)
@@ -918,7 +918,7 @@ static void * APR_THREAD_FUNC start_threads(apr_th
     }
 
     rv = ap_queue_info_create(&worker_queue_info, pchild,
-                              threads_per_child);
+                              threads_per_child, -1);
     if (rv != APR_SUCCESS) {
         ap_log_error(APLOG_MARK, APLOG_ALERT, rv, ap_server_conf, APLOGNO(03141)
                      "ap_queue_info_create() failed");

Reply via email to