I'm working on replacing some mutex locks with atomic-compare-and-swap
based algorithms in the worker MPM, in order to get better concurrency
and lower overhead.
Here's the first change: take the pool recycling code out of the
mutex-protected critical region in the "queue_info" code. Comments
welcome...
Next on my list is the code that synchronizes the idle worker count.
I think I can eliminate the need to lock a mutex except in the
special case where all the workers are busy.
Brian
Index: server/mpm/worker/fdqueue.c
===================================================================
RCS file: /home/cvs/httpd-2.0/server/mpm/worker/fdqueue.c,v
retrieving revision 1.23
diff -u -r1.23 fdqueue.c
--- server/mpm/worker/fdqueue.c 2 Aug 2002 17:37:52 -0000 1.23
+++ server/mpm/worker/fdqueue.c 1 Jan 2003 01:20:11 -0000
@@ -57,6 +57,12 @@
*/
#include "fdqueue.h"
+#include "apr_atomic.h"
+
+typedef struct recycled_pool {
+ apr_pool_t *pool;
+ struct recycled_pool *next;
+} recycled_pool;
struct fd_queue_info_t {
int idlers;
@@ -64,19 +70,27 @@
apr_thread_cond_t *wait_for_idler;
int terminated;
int max_idlers;
- apr_pool_t **recycled_pools;
- int num_recycled;
+ recycled_pool *recycled_pools;
};
static apr_status_t queue_info_cleanup(void *data_)
{
fd_queue_info_t *qi = data_;
- int i;
apr_thread_cond_destroy(qi->wait_for_idler);
apr_thread_mutex_destroy(qi->idlers_mutex);
- for (i = 0; i < qi->num_recycled; i++) {
- apr_pool_destroy(qi->recycled_pools[i]);
+
+ /* Clean up any pools in the recycled list */
+ for (;;) {
+ struct recycled_pool *first_pool = qi->recycled_pools;
+ if (first_pool == NULL) {
+ break;
+ }
+ if (apr_atomic_casptr(&(qi->recycled_pools), first_pool->next,
+ first_pool) == first_pool) {
+ apr_pool_destroy(first_pool->pool);
+ }
}
+
return APR_SUCCESS;
}
@@ -98,9 +112,7 @@
if (rv != APR_SUCCESS) {
return rv;
}
- qi->recycled_pools = (apr_pool_t **)apr_palloc(pool, max_idlers *
- sizeof(apr_pool_t *));
- qi->num_recycled = 0;
+ qi->recycled_pools = NULL;
qi->max_idlers = max_idlers;
apr_pool_cleanup_register(pool, qi, queue_info_cleanup,
apr_pool_cleanup_null);
@@ -114,16 +126,30 @@
apr_pool_t *pool_to_recycle)
{
apr_status_t rv;
+
+ /* If we have been given a pool to recycle, atomically link
+ * it into the queue_info's list of recycled pools
+ */
+ if (pool_to_recycle) {
+ struct recycled_pool *new_recycle;
+ new_recycle = (struct recycled_pool *)apr_palloc(pool_to_recycle,
+ sizeof(*new_recycle));
+ new_recycle->pool = pool_to_recycle;
+ for (;;) {
+ new_recycle->next = queue_info->recycled_pools;
+ if (apr_atomic_casptr(&(queue_info->recycled_pools),
+ new_recycle, new_recycle->next) ==
+ new_recycle->next) {
+ break;
+ }
+ }
+ }
+
rv = apr_thread_mutex_lock(queue_info->idlers_mutex);
if (rv != APR_SUCCESS) {
return rv;
}
AP_DEBUG_ASSERT(queue_info->idlers >= 0);
- AP_DEBUG_ASSERT(queue_info->num_recycled < queue_info->max_idlers);
- if (pool_to_recycle) {
- queue_info->recycled_pools[queue_info->num_recycled++] =
- pool_to_recycle;
- }
if (queue_info->idlers++ == 0) {
/* Only signal if we had no idlers before. */
apr_thread_cond_signal(queue_info->wait_for_idler);
@@ -158,11 +184,21 @@
}
}
queue_info->idlers--; /* Oh, and idler? Let's take 'em! */
- if (queue_info->num_recycled) {
- *recycled_pool =
- queue_info->recycled_pools[--queue_info->num_recycled];
- }
rv = apr_thread_mutex_unlock(queue_info->idlers_mutex);
+
+ /* Atomically pop a pool from the recycled list */
+ for (;;) {
+ struct recycled_pool *first_pool = queue_info->recycled_pools;
+ if (first_pool == NULL) {
+ break;
+ }
+ if (apr_atomic_casptr(&(queue_info->recycled_pools), first_pool->next,
+ first_pool) == first_pool) {
+ *recycled_pool = first_pool->pool;
+ break;
+ }
+ }
+
if (rv != APR_SUCCESS) {
return rv;
}