Too many changes in one patch. Break this up into multiple consumable in 15 minute patches and I'll review them.
* modules/experimental/mod_cache.h: Always use atomics. * modules/experimental/mod_mem_cache.c: Always use atomics.
Index: modules/experimental/mod_cache.h =================================================================== RCS file: /home/cvs/httpd-2.0/modules/experimental/mod_cache.h,v retrieving revision 1.44 diff -u -r1.44 mod_cache.h --- modules/experimental/mod_cache.h 9 Feb 2004 20:29:18 -0000 1.44 +++ modules/experimental/mod_cache.h 1 Aug 2004 08:24:52 -0000 @@ -64,11 +64,7 @@ #include <arpa/inet.h> #endif
-/* USE_ATOMICS should be replaced with the appropriate APR feature macro */ -#define USE_ATOMICS -#ifdef USE_ATOMICS #include "apr_atomic.h" -#endif
#ifndef MAX
#define MAX(a,b) ((a) > (b) ? (a) : (b))
@@ -175,11 +171,7 @@
void *vobj; /* Opaque portion (specific to the cache implementation) of the cache object */
apr_size_t count; /* Number of body bytes written to the cache so far */
int complete;
-#ifdef USE_ATOMICS
apr_uint32_t refcount;
-#else
- apr_size_t refcount;
-#endif
apr_size_t cleanup;
};
Index: modules/experimental/mod_mem_cache.c
===================================================================
RCS file: /home/cvs/httpd-2.0/modules/experimental/mod_mem_cache.c,v
retrieving revision 1.109
diff -u -r1.109 mod_mem_cache.c
--- modules/experimental/mod_mem_cache.c 25 May 2004 18:22:58 -0000 1.109
+++ modules/experimental/mod_mem_cache.c 1 Aug 2004 08:24:52 -0000
@@ -57,13 +57,8 @@
apr_os_file_t fd;
apr_int32_t flags; /* File open flags */
long priority; /**< the priority of this entry */
- long total_refs; /**< total number of references this entry has had */
-
-#ifdef USE_ATOMICS
+ long total_refs; /**< total number of references this entry has had */
apr_uint32_t pos; /**< the position of this entry in the cache */
-#else
- apr_ssize_t pos;
-#endif
} mem_cache_object_t;
@@ -122,21 +120,14 @@
cache_object_t *obj = (cache_object_t *)a;
mem_cache_object_t *mobj = obj->vobj;-#ifdef USE_ATOMICS
apr_atomic_set32(&mobj->pos, pos);
-#else
- mobj->pos = pos;
-#endif
}
+
static apr_ssize_t memcache_get_pos(void *a)
{
cache_object_t *obj = (cache_object_t *)a;
mem_cache_object_t *mobj = obj->vobj;-#ifdef USE_ATOMICS
return apr_atomic_read32(&mobj->pos);
-#else
- return mobj->pos;
-#endif
}static apr_size_t memcache_cache_get_size(void*a)
@@ -167,24 +160,15 @@
* now. Increment the refcount before setting cleanup to avoid a race
* condition. A similar pattern is used in remove_url()
*/
-#ifdef USE_ATOMICS
apr_atomic_inc32(&obj->refcount);
-#else
- obj->refcount++;
-#endifobj->cleanup = 1;
-#ifdef USE_ATOMICS
if (!apr_atomic_dec32(&obj->refcount)) {
cleanup_cache_object(obj);
}
-#else
- obj->refcount--;
- if (!obj->refcount) {
- cleanup_cache_object(obj);
- }
-#endif
}
+
/*
* functions return a 'negative' score since priority queues
* dequeue the object with the highest value first
@@ -310,29 +294,14 @@
} /* Cleanup the cache object */
-#ifdef USE_ATOMICS
if (!apr_atomic_dec32(&obj->refcount)) {
if (obj->cleanup) {
cleanup_cache_object(obj);
}
}
-#else
- if (sconf->lock) {
- apr_thread_mutex_lock(sconf->lock);
- }
- obj->refcount--;
- /* If the object is marked for cleanup and the refcount
- * has dropped to zero, cleanup the object
- */
- if ((obj->cleanup) && (!obj->refcount)) {
- cleanup_cache_object(obj);
- }
- if (sconf->lock) {
- apr_thread_mutex_unlock(sconf->lock);
- }
-#endif
return APR_SUCCESS;
}
+
static apr_status_t cleanup_cache_mem(void *sconfv)
{
cache_object_t *obj;
@@ -349,23 +318,18 @@
apr_thread_mutex_lock(sconf->lock);
}
obj = cache_pop(co->cache_cache);
- while (obj) {
- /* Iterate over the cache and clean up each entry */
- /* Free the object if the recount == 0 */
-#ifdef USE_ATOMICS
+ while (obj) {
+ /* Iterate over the cache and clean up each entry */
+ /* Free the object if the recount == 0 */
apr_atomic_inc32(&obj->refcount);
obj->cleanup = 1;
if (!apr_atomic_dec32(&obj->refcount)) {
-#else
- obj->cleanup = 1;
- if (!obj->refcount) {
-#endif
cleanup_cache_object(obj);
}
obj = cache_pop(co->cache_cache);
}- /* Cache is empty, free the cache table */
+ /* Cache is empty, free the cache table */
cache_free(co->cache_cache); if (sconf->lock) {
@@ -468,11 +433,7 @@
} /* Finish initing the cache object */
-#ifdef USE_ATOMICS
apr_atomic_set32(&obj->refcount, 1);
-#else
- obj->refcount = 1;
-#endif
mobj->total_refs = 1;
obj->complete = 0;
obj->cleanup = 0;
@@ -543,11 +505,7 @@
if (obj) {
if (obj->complete) {
request_rec *rmain=r, *rtmp;
-#ifdef USE_ATOMICS
apr_atomic_inc32(&obj->refcount);
-#else
- obj->refcount++;
-#endif
/* cache is worried about overall counts, not 'open' ones */
cache_update(sconf->cache_cache, obj);@@ -691,24 +652,17 @@
if (sconf->lock) {
apr_thread_mutex_lock(sconf->lock);
}
-
- obj = cache_find(sconf->cache_cache, key);
+
+ obj = cache_find(sconf->cache_cache, key);
if (obj) {
mem_cache_object_t *mobj;
cache_remove(sconf->cache_cache, obj);
mobj = (mem_cache_object_t *) obj->vobj;-#ifdef USE_ATOMICS
- /* Refcount increment in this case MUST be made under
+ /* Refcount increment in this case MUST be made under
* protection of the lock
*/
apr_atomic_inc32(&obj->refcount);
-#else
- if (!obj->refcount) {
- cleanup_cache_object(obj);
- obj = NULL;
- }
-#endif
if (obj) {
obj->cleanup = 1;
}
@@ -716,21 +670,19 @@
if (sconf->lock) {
apr_thread_mutex_unlock(sconf->lock);
}
-#ifdef USE_ATOMICS
if (obj) {
if (!apr_atomic_dec32(&obj->refcount)) {
cleanup_cache_object(obj);
}
}
-#endif
return OK;
}-static apr_status_t read_headers(cache_handle_t *h, request_rec *r)
+static apr_status_t load_headers(cache_handle_t *h, request_rec *r)
{
int rc;
mem_cache_object_t *mobj = (mem_cache_object_t*) h->cache_obj->vobj;
-
+
h->req_hdrs = apr_table_make(r->pool, mobj->num_req_hdrs);
r->headers_out = apr_table_make(r->pool, mobj->num_header_out);
r->err_headers_out = apr_table_make(r->pool, mobj->num_err_header_out);