On Mon, Nov 25, 2013 at 2:59 PM, <[email protected]> wrote:
> Author: jim
> Date: Mon Nov 25 13:59:06 2013
> New Revision: 1545286
>
> URL: http://svn.apache.org/r1545286
> Log:
> Use a normalized offset point for idlers... still need to worry
> that atomics work as "expected", in this case that a add32 of a -1
> is the "same" as dec32 (as far as effect on idlers)
>
> Modified:
> httpd/httpd/trunk/server/mpm/event/event.c
> httpd/httpd/trunk/server/mpm/event/fdqueue.c
>
>
> @@ -131,7 +128,7 @@ apr_status_t ap_queue_info_set_idle(fd_q
> apr_status_t ap_queue_info_try_get_idler(fd_queue_info_t * queue_info)
> {
> int prev_idlers;
> - prev_idlers = apr_atomic_dec32((apr_uint32_t *)&(queue_info->idlers));
> + prev_idlers = apr_atomic_add32((apr_uint32_t *)&(queue_info->idlers),
> -1) - zero_pt;
> if (prev_idlers <= 0) {
> apr_atomic_inc32((apr_uint32_t *)&(queue_info->idlers)); /*
> back out dec */
> return APR_EAGAIN;
>
As Jeff said in the other thread, I think the test here should be :
if (prev_idlers <= 1)
that's because dec32 was returning the new value while add32 now returns
the old one (fetch_and_sub vs sub_and_fetch).
Also, couldn't
"
queue_info->idlers
"
and "
zero_pt
"
become apr_uint32_t now, like in the following patch?
(Note the "int prev_idlers" => "apr_int32_t prev_idlers" too in the patch,
should int be larger than int32_t, eg. with 64bits int, prev_idlers would
not become negative is this case...).
Regards,
Yann.
Index: server/mpm/event/fdqueue.c
===================================================================
--- server/mpm/event/fdqueue.c (revision 1545301)
+++ server/mpm/event/fdqueue.c (working copy)
@@ -17,7 +17,7 @@
#include "fdqueue.h"
#include "apr_atomic.h"
-static zero_pt = (APR_INT32_MAX-1)>>2;
+static const apr_uint32_t zero_pt = ((apr_uint32_t)1 << 31);
struct recycled_pool
{
@@ -27,10 +27,12 @@ struct recycled_pool
struct fd_queue_info_t
{
- apr_int32_t idlers; /**
- * 0 or positive: number of idle worker
threads
- * negative: number of threads blocked waiting
- * for an idle worker
+ apr_uint32_t idlers; /**
+ * >= zero_pt: (zero_pt - idlers) is the
number
+ * of idle worker threads
+ * < zero_pt: (idlers - zero_pt) is the
number
+ * of threads blocked waiting for
+ * an idle worker
*/
apr_thread_mutex_t *idlers_mutex;
apr_thread_cond_t *wait_for_idler;
@@ -97,12 +99,12 @@ apr_status_t ap_queue_info_set_idle(fd_queue_info_
apr_pool_t * pool_to_recycle)
{
apr_status_t rv;
- int prev_idlers;
+ apr_int32_t prev_idlers;
ap_push_pool(queue_info, pool_to_recycle);
/* Atomically increment the count of idle workers */
- prev_idlers = apr_atomic_inc32((apr_uint32_t *)&(queue_info->idlers))
- zero_pt;
+ prev_idlers = apr_atomic_inc32(&queue_info->idlers) - zero_pt;
/* If other threads are waiting on a worker, wake one up */
if (prev_idlers < 0) {
@@ -127,10 +129,10 @@ apr_status_t ap_queue_info_set_idle(fd_queue_info_
apr_status_t ap_queue_info_try_get_idler(fd_queue_info_t * queue_info)
{
- int prev_idlers;
- prev_idlers = apr_atomic_add32((apr_uint32_t *)&(queue_info->idlers),
-1) - zero_pt;
- if (prev_idlers <= 0) {
- apr_atomic_inc32((apr_uint32_t *)&(queue_info->idlers)); /*
back out dec */
+ apr_int32_t prev_idlers;
+ prev_idlers = apr_atomic_add32(&queue_info->idlers, -1) - zero_pt;
+ if (prev_idlers <= 1) {
+ apr_atomic_inc32(&queue_info->idlers); /* back out dec */
return APR_EAGAIN;
}
return APR_SUCCESS;
@@ -140,11 +142,11 @@ apr_status_t ap_queue_info_wait_for_idler(fd_queue
int *had_to_block)
{
apr_status_t rv;
- int prev_idlers;
+ apr_int32_t prev_idlers;
/* Atomically decrement the idle worker count, saving the old value */
/* See TODO in ap_queue_info_set_idle() */
- prev_idlers = apr_atomic_add32((apr_uint32_t *)&(queue_info->idlers),
-1) - zero_pt;
+ prev_idlers = apr_atomic_add32(&queue_info->idlers, -1) - zero_pt;
/* Block if there weren't any idle workers */
if (prev_idlers <= 0) {
@@ -152,7 +154,7 @@ apr_status_t ap_queue_info_wait_for_idler(fd_queue
if (rv != APR_SUCCESS) {
AP_DEBUG_ASSERT(0);
/* See TODO in ap_queue_info_set_idle() */
- apr_atomic_inc32((apr_uint32_t *)&(queue_info->idlers)); /*
back out dec */
+ apr_atomic_inc32(&queue_info->idlers); /* back out dec */
return rv;
}
/* Re-check the idle worker count to guard against a
@@ -206,8 +208,8 @@ apr_status_t ap_queue_info_wait_for_idler(fd_queue
apr_uint32_t ap_queue_info_get_idlers(fd_queue_info_t * queue_info)
{
apr_int32_t val;
- val = (apr_int32_t)apr_atomic_read32((apr_uint32_t
*)&queue_info->idlers) - zero_pt;
- if (val < 0)
+ val = apr_atomic_read32(&queue_info->idlers) - zero_pt;
+ if (val <= 0)
return 0;
return val;
}
Index: server/mpm/eventopt/fdqueue.c
===================================================================
--- server/mpm/eventopt/fdqueue.c (revision 1545301)
+++ server/mpm/eventopt/fdqueue.c (working copy)
@@ -17,6 +17,8 @@
#include "fdqueue.h"
#include "apr_atomic.h"
+static const apr_uint32_t zero_pt = ((apr_uint32_t)1 << 31);
+
typedef struct recycled_pool
{
apr_pool_t *pool;
@@ -25,10 +27,12 @@ typedef struct recycled_pool
struct fd_queue_info_t
{
- apr_int32_t idlers; /**
- * 0 or positive: number of idle worker
threads
- * negative: number of threads blocked waiting
- * for an idle worker
+ apr_uint32_t idlers; /**
+ * >= zero_pt: (zero_pt - idlers) is the
number
+ * of idle worker threads
+ * < zero_pt: (idlers - zero_pt) is the
number
+ * of threads blocked waiting for
+ * an idle worker
*/
apr_thread_mutex_t *idlers_mutex;
apr_thread_cond_t *wait_for_idler;
@@ -95,12 +99,12 @@ apr_status_t ap_queue_info_set_idle(fd_queue_info_
apr_pool_t * pool_to_recycle)
{
apr_status_t rv;
- int prev_idlers;
+ apr_int32_t prev_idlers;
ap_push_pool(queue_info, pool_to_recycle);
/* Atomically increment the count of idle workers */
- prev_idlers = apr_atomic_inc32((apr_uint32_t *)&(queue_info->idlers))
- zero_pt;
+ prev_idlers = apr_atomic_inc32(&queue_info->idlers) - zero_pt;
/* If other threads are waiting on a worker, wake one up */
if (prev_idlers < 0) {
@@ -125,10 +129,10 @@ apr_status_t ap_queue_info_set_idle(fd_queue_info_
apr_status_t ap_queue_info_try_get_idler(fd_queue_info_t * queue_info)
{
- int prev_idlers;
- prev_idlers = apr_atomic_add32((apr_uint32_t *)&(queue_info->idlers),
-1) - zero_pt;
- if (prev_idlers <= 0) {
- apr_atomic_inc32((apr_uint32_t *)&(queue_info->idlers)); /*
back out dec */
+ apr_int32_t prev_idlers;
+ prev_idlers = apr_atomic_add32(&queue_info->idlers, -1) - zero_pt;
+ if (prev_idlers <= 1) {
+ apr_atomic_inc32(&queue_info->idlers); /* back out dec */
return APR_EAGAIN;
}
return APR_SUCCESS;
@@ -138,11 +142,11 @@ apr_status_t ap_queue_info_wait_for_idler(fd_queue
int *had_to_block)
{
apr_status_t rv;
- int prev_idlers;
+ apr_int32_t prev_idlers;
/* Atomically decrement the idle worker count, saving the old value */
/* See TODO in ap_queue_info_set_idle() */
- prev_idlers = apr_atomic_add32((apr_uint32_t *)&(queue_info->idlers),
-1) - zero_pt;
+ prev_idlers = apr_atomic_add32(&queue_info->idlers, -1) - zero_pt;
/* Block if there weren't any idle workers */
if (prev_idlers <= 0) {
@@ -150,7 +154,7 @@ apr_status_t ap_queue_info_wait_for_idler(fd_queue
if (rv != APR_SUCCESS) {
AP_DEBUG_ASSERT(0);
/* See TODO in ap_queue_info_set_idle() */
- apr_atomic_inc32((apr_uint32_t *)&(queue_info->idlers)); /*
back out dec */
+ apr_atomic_inc32(&queue_info->idlers); /* back out dec */
return rv;
}
/* Re-check the idle worker count to guard against a
@@ -204,8 +208,8 @@ apr_status_t ap_queue_info_wait_for_idler(fd_queue
apr_uint32_t ap_queue_info_get_idlers(fd_queue_info_t * queue_info)
{
apr_int32_t val;
- val = (apr_int32_t)apr_atomic_read32((apr_uint32_t
*)&queue_info->idlers) - zero_pt;
- if (val < 0)
+ val = apr_atomic_read32(&queue_info->idlers) - zero_pt;
+ if (val <= 0)
return 0;
return val;
}
[END]
Index: server/mpm/event/fdqueue.c
===================================================================
--- server/mpm/event/fdqueue.c (revision 1545301)
+++ server/mpm/event/fdqueue.c (working copy)
@@ -17,7 +17,7 @@
#include "fdqueue.h"
#include "apr_atomic.h"
-static zero_pt = (APR_INT32_MAX-1)>>2;
+static const apr_uint32_t zero_pt = ((apr_uint32_t)1 << 31);
struct recycled_pool
{
@@ -27,10 +27,12 @@ struct recycled_pool
struct fd_queue_info_t
{
- apr_int32_t idlers; /**
- * 0 or positive: number of idle worker threads
- * negative: number of threads blocked waiting
- * for an idle worker
+ apr_uint32_t idlers; /**
+ * >= zero_pt: (zero_pt - idlers) is the number
+ * of idle worker threads
+ * < zero_pt: (idlers - zero_pt) is the number
+ * of threads blocked waiting for
+ * an idle worker
*/
apr_thread_mutex_t *idlers_mutex;
apr_thread_cond_t *wait_for_idler;
@@ -97,12 +99,12 @@ apr_status_t ap_queue_info_set_idle(fd_queue_info_
apr_pool_t * pool_to_recycle)
{
apr_status_t rv;
- int prev_idlers;
+ apr_int32_t prev_idlers;
ap_push_pool(queue_info, pool_to_recycle);
/* Atomically increment the count of idle workers */
- prev_idlers = apr_atomic_inc32((apr_uint32_t *)&(queue_info->idlers)) - zero_pt;
+ prev_idlers = apr_atomic_inc32(&queue_info->idlers) - zero_pt;
/* If other threads are waiting on a worker, wake one up */
if (prev_idlers < 0) {
@@ -127,10 +129,10 @@ apr_status_t ap_queue_info_set_idle(fd_queue_info_
apr_status_t ap_queue_info_try_get_idler(fd_queue_info_t * queue_info)
{
- int prev_idlers;
- prev_idlers = apr_atomic_add32((apr_uint32_t *)&(queue_info->idlers), -1) - zero_pt;
- if (prev_idlers <= 0) {
- apr_atomic_inc32((apr_uint32_t *)&(queue_info->idlers)); /* back out dec */
+ apr_int32_t prev_idlers;
+ prev_idlers = apr_atomic_add32(&queue_info->idlers, -1) - zero_pt;
+ if (prev_idlers <= 1) {
+ apr_atomic_inc32(&queue_info->idlers); /* back out dec */
return APR_EAGAIN;
}
return APR_SUCCESS;
@@ -140,11 +142,11 @@ apr_status_t ap_queue_info_wait_for_idler(fd_queue
int *had_to_block)
{
apr_status_t rv;
- int prev_idlers;
+ apr_int32_t prev_idlers;
/* Atomically decrement the idle worker count, saving the old value */
/* See TODO in ap_queue_info_set_idle() */
- prev_idlers = apr_atomic_add32((apr_uint32_t *)&(queue_info->idlers), -1) - zero_pt;
+ prev_idlers = apr_atomic_add32(&queue_info->idlers, -1) - zero_pt;
/* Block if there weren't any idle workers */
if (prev_idlers <= 0) {
@@ -152,7 +154,7 @@ apr_status_t ap_queue_info_wait_for_idler(fd_queue
if (rv != APR_SUCCESS) {
AP_DEBUG_ASSERT(0);
/* See TODO in ap_queue_info_set_idle() */
- apr_atomic_inc32((apr_uint32_t *)&(queue_info->idlers)); /* back out dec */
+ apr_atomic_inc32(&queue_info->idlers); /* back out dec */
return rv;
}
/* Re-check the idle worker count to guard against a
@@ -206,8 +208,8 @@ apr_status_t ap_queue_info_wait_for_idler(fd_queue
apr_uint32_t ap_queue_info_get_idlers(fd_queue_info_t * queue_info)
{
apr_int32_t val;
- val = (apr_int32_t)apr_atomic_read32((apr_uint32_t *)&queue_info->idlers) - zero_pt;
- if (val < 0)
+ val = apr_atomic_read32(&queue_info->idlers) - zero_pt;
+ if (val <= 0)
return 0;
return val;
}
Index: server/mpm/eventopt/fdqueue.c
===================================================================
--- server/mpm/eventopt/fdqueue.c (revision 1545301)
+++ server/mpm/eventopt/fdqueue.c (working copy)
@@ -17,6 +17,8 @@
#include "fdqueue.h"
#include "apr_atomic.h"
+static const apr_uint32_t zero_pt = ((apr_uint32_t)1 << 31);
+
typedef struct recycled_pool
{
apr_pool_t *pool;
@@ -25,10 +27,12 @@ typedef struct recycled_pool
struct fd_queue_info_t
{
- apr_int32_t idlers; /**
- * 0 or positive: number of idle worker threads
- * negative: number of threads blocked waiting
- * for an idle worker
+ apr_uint32_t idlers; /**
+ * >= zero_pt: (zero_pt - idlers) is the number
+ * of idle worker threads
+ * < zero_pt: (idlers - zero_pt) is the number
+ * of threads blocked waiting for
+ * an idle worker
*/
apr_thread_mutex_t *idlers_mutex;
apr_thread_cond_t *wait_for_idler;
@@ -95,12 +99,12 @@ apr_status_t ap_queue_info_set_idle(fd_queue_info_
apr_pool_t * pool_to_recycle)
{
apr_status_t rv;
- int prev_idlers;
+ apr_int32_t prev_idlers;
ap_push_pool(queue_info, pool_to_recycle);
/* Atomically increment the count of idle workers */
- prev_idlers = apr_atomic_inc32((apr_uint32_t *)&(queue_info->idlers)) - zero_pt;
+ prev_idlers = apr_atomic_inc32(&queue_info->idlers) - zero_pt;
/* If other threads are waiting on a worker, wake one up */
if (prev_idlers < 0) {
@@ -125,10 +129,10 @@ apr_status_t ap_queue_info_set_idle(fd_queue_info_
apr_status_t ap_queue_info_try_get_idler(fd_queue_info_t * queue_info)
{
- int prev_idlers;
- prev_idlers = apr_atomic_add32((apr_uint32_t *)&(queue_info->idlers), -1) - zero_pt;
- if (prev_idlers <= 0) {
- apr_atomic_inc32((apr_uint32_t *)&(queue_info->idlers)); /* back out dec */
+ apr_int32_t prev_idlers;
+ prev_idlers = apr_atomic_add32(&queue_info->idlers, -1) - zero_pt;
+ if (prev_idlers <= 1) {
+ apr_atomic_inc32(&queue_info->idlers); /* back out dec */
return APR_EAGAIN;
}
return APR_SUCCESS;
@@ -138,11 +142,11 @@ apr_status_t ap_queue_info_wait_for_idler(fd_queue
int *had_to_block)
{
apr_status_t rv;
- int prev_idlers;
+ apr_int32_t prev_idlers;
/* Atomically decrement the idle worker count, saving the old value */
/* See TODO in ap_queue_info_set_idle() */
- prev_idlers = apr_atomic_add32((apr_uint32_t *)&(queue_info->idlers), -1) - zero_pt;
+ prev_idlers = apr_atomic_add32(&queue_info->idlers, -1) - zero_pt;
/* Block if there weren't any idle workers */
if (prev_idlers <= 0) {
@@ -150,7 +154,7 @@ apr_status_t ap_queue_info_wait_for_idler(fd_queue
if (rv != APR_SUCCESS) {
AP_DEBUG_ASSERT(0);
/* See TODO in ap_queue_info_set_idle() */
- apr_atomic_inc32((apr_uint32_t *)&(queue_info->idlers)); /* back out dec */
+ apr_atomic_inc32(&queue_info->idlers); /* back out dec */
return rv;
}
/* Re-check the idle worker count to guard against a
@@ -204,8 +208,8 @@ apr_status_t ap_queue_info_wait_for_idler(fd_queue
apr_uint32_t ap_queue_info_get_idlers(fd_queue_info_t * queue_info)
{
apr_int32_t val;
- val = (apr_int32_t)apr_atomic_read32((apr_uint32_t *)&queue_info->idlers) - zero_pt;
- if (val < 0)
+ val = apr_atomic_read32(&queue_info->idlers) - zero_pt;
+ if (val <= 0)
return 0;
return val;
}