At 06:53 PM 9/10/2004, Jean-Jacques Clar wrote:
I replaced the cleanup field with a bit set in refcount. This is done to prevent race conditions when refcount is accessed on two different threads/CPUS.

+#define OBJECT_CLEANUP_BIT 0x00F00000

0x00F00000 isn't a bit, it's 4 bits: 0x00100000 | 0x00200000 | 0x00400000 | 0x00800000


Why would you not use something farther up, such as 0x40000000, for the bit? (I imagine 0x80000000 would cause problems if apr_atomic_t isn't unsigned).

+
/* Forward declarations */
static int remove_entity(cache_handle_t *h);
static apr_status_t store_headers(cache_handle_t *h, request_rec *r, cache_info *i);
@@ -152,17 +154,15 @@
{
cache_object_t *obj = (cache_object_t *)a;


- /* Cleanup the cache object. Object should be removed from the cache
- * now. Increment the refcount before setting cleanup to avoid a race
- * condition. A similar pattern is used in remove_url()
- */
- apr_atomic_inc(&obj->refcount);
-
- obj->cleanup = 1;
-
- if (!apr_atomic_dec(&obj->refcount)) {
+ apr_atomic_t tmp_refcount = obj->refcount;
+ /* Cleanup the cache object. Object should be removed from the cache now. */
+ if (!apr_atomic_read(&obj->refcount)) {
cleanup_cache_object(obj);
}
+ else if(tmp_refcount != apr_atomic_cas(&obj->refcount, tmp_refcount | OBJECT_CLEANUP_BIT, tmp_refcount)) {
+ memcache_cache_free(obj);
+ }
+ return;
}
/*
* functions return a 'negative' score since priority queues
@@ -276,11 +276,11 @@
}
/* Remember, objects marked for cleanup are, by design, already
* removed from the cache. remove_url() could have already
- * removed the object from the cache (and set obj->cleanup)
+ * removed the object from the cache (and set the OBJECT_CLEANUP_BIT)
*/
- if (!obj->cleanup) {
+ if (!(apr_atomic_read(&obj->refcount) & OBJECT_CLEANUP_BIT)) {
cache_remove(sconf->cache_cache, obj);
- obj->cleanup = 1;
+ apr_atomic_set(&obj->refcount, obj->refcount | OBJECT_CLEANUP_BIT);
}
if (sconf->lock) {
apr_thread_mutex_unlock(sconf->lock);
@@ -288,10 +288,8 @@
}


     /* Cleanup the cache object */
-    if (!apr_atomic_dec(&obj->refcount)) {
-        if (obj->cleanup) {
-            cleanup_cache_object(obj);
-        }
+    if(apr_atomic_dec(&obj->refcount) == OBJECT_CLEANUP_BIT) {
+        cleanup_cache_object(obj);
     }
     return APR_SUCCESS;
 }
@@ -314,11 +312,7 @@
     while (obj) {
     /* Iterate over the cache and clean up each entry */
     /* Free the object if the recount == 0 */
-        apr_atomic_inc(&obj->refcount);
-        obj->cleanup = 1;
-        if (!apr_atomic_dec(&obj->refcount)) {
-            cleanup_cache_object(obj);
-        }
+        memcache_cache_free(obj);
         obj = cache_pop(co->cache_cache);
     }

@@ -415,7 +409,6 @@
apr_atomic_set(&obj->refcount, 1);
mobj->total_refs = 1;
obj->complete = 0;
- obj->cleanup = 0;
obj->vobj = mobj;
/* Safe cast: We tested < sconf->max_cache_object_size above */
mobj->m_len = (apr_size_t)len;
@@ -536,9 +529,9 @@
* an object marked for cleanup is by design not in the
* hash table.
*/
- if (!obj->cleanup) {
+ if (!(obj->refcount & OBJECT_CLEANUP_BIT)) {
cache_remove(sconf->cache_cache, obj);
- obj->cleanup = 1;
+ apr_atomic_set(&obj->refcount, obj->refcount | OBJECT_CLEANUP_BIT);
ap_log_error(APLOG_MARK, APLOG_INFO, 0, NULL, "gcing a cache entry");
}


@@ -629,20 +622,13 @@
mem_cache_object_t *mobj;
cache_remove(sconf->cache_cache, obj);
mobj = (mem_cache_object_t *) obj->vobj;
-
- /* Refcount increment in this case MUST be made under
- * protection of the lock
- */
- apr_atomic_inc(&obj->refcount);
- if (obj) {
- obj->cleanup = 1;
- }
+ apr_atomic_set(&obj->refcount, obj->refcount | OBJECT_CLEANUP_BIT);
}
if (sconf->lock) {
apr_thread_mutex_unlock(sconf->lock);
}
if (obj) {
- if (!apr_atomic_dec(&obj->refcount)) {
+ if(apr_atomic_read(&obj->refcount) == OBJECT_CLEANUP_BIT) {
cleanup_cache_object(obj);
}
}
@@ -908,8 +894,8 @@
if (sconf->lock) {
apr_thread_mutex_lock(sconf->lock);
}
- if (obj->cleanup) {
- /* If obj->cleanup is set, the object has been prematurly
+ if ((apr_atomic_read(&obj->refcount) & OBJECT_CLEANUP_BIT)) {
+ /* If OBJECT_CLEANUP_BIT is set, the object has been prematurly
* ejected from the cache by the garbage collector. Add the
* object back to the cache. If an object with the same key is
* found in the cache, eject it in favor of the completed obj.
@@ -918,12 +904,12 @@
(cache_object_t *) cache_find(sconf->cache_cache, obj->key);
if (tmp_obj) {
cache_remove(sconf->cache_cache, tmp_obj);
- tmp_obj->cleanup = 1;
- if (!tmp_obj->refcount) {
+ apr_atomic_set(&obj->refcount, obj->refcount | OBJECT_CLEANUP_BIT);
+ if (!apr_atomic_read(&obj->refcount) & OBJECT_CLEANUP_BIT) {
cleanup_cache_object(tmp_obj);
}
}
- obj->cleanup = 0;
+ apr_atomic_set(&obj->refcount, obj->refcount & ~OBJECT_CLEANUP_BIT);
}
else {
cache_remove(sconf->cache_cache, obj);



Reply via email to