On Tue, 2006-09-12 at 10:40 -0400, Garrett Rooney wrote:

> I would prefer to leave the global pool alone, and just create a
> separate subpool and mutex around that, since that keeps the global
> pool isolated to apr_pools.c, rather than making it accessed from
> multiple parts of the source tree.

Very much incomplete and most likely dodgy in many places, but is this
what you had in mind?

-- 
Bojan
Index: memory/unix/apr_pools.c
===================================================================
--- memory/unix/apr_pools.c	(revision 447666)
+++ memory/unix/apr_pools.c	(working copy)
@@ -504,6 +504,10 @@
 static void pool_destroy_debug(apr_pool_t *pool, const char *file_line);
 #endif
 
+#if APR_HAS_DSO
+APR_DECLARE(apr_status_t) apr_dso_pool_initialize(apr_pool_t *pool);
+#endif
+
 #if !APR_POOL_DEBUG
 /*
  * Initialization
@@ -531,6 +535,12 @@
 
     apr_pool_tag(global_pool, "apr_global_pool");
 
+#if APR_HAS_DSO
+    if ((rv = apr_dso_pool_initialize(global_pool)) != APR_SUCCESS) {
+        return rv;
+    }
+#endif /* APR_HAS_DSO */
+
     /* This has to happen here because mutexes might be backed by
      * atomics.  It used to be snug and safe in apr_initialize().
      */
@@ -1227,6 +1237,12 @@
 
     apr_pool_tag(global_pool, "APR global pool");
 
+#if APR_HAS_DSO
+    if ((rv = apr_dso_pool_initialize(global_pool)) != APR_SUCCESS) {
+        return rv;
+    }
+#endif /* APR_HAS_DSO */
+
     apr_pools_initialized = 1;
 
     /* This has to happen here because mutexes might be backed by
Index: dso/unix/dso.c
===================================================================
--- dso/unix/dso.c	(revision 447666)
+++ dso/unix/dso.c	(working copy)
@@ -38,13 +38,47 @@
 #define DYLD_LIBRARY_HANDLE (void *)-1
 #endif
 
+static apr_pool_t         *dso_global_pool  = NULL;
+static apr_thread_mutex_t *dso_global_mutex = NULL;
+
+APR_DECLARE(apr_status_t) apr_dso_pool_initialize(apr_pool_t *pool)
+{
+    apr_status_t rv;
+
+    if ((rv = apr_pool_create(&dso_global_pool, pool)) != APR_SUCCESS) {
+        return rv;
+    }
+
+    apr_pool_tag(dso_global_pool, "apr_dso_global_pool");
+
+#if APR_HAS_THREADS
+    if ((rv = apr_thread_mutex_create(&dso_global_mutex,
+                                      APR_THREAD_MUTEX_DEFAULT,
+                                      dso_global_pool)) != APR_SUCCESS) {
+        return rv;
+    }
+#endif /* APR_HAS_THREADS */
+
+    return APR_SUCCESS;
+}
+
 APR_DECLARE(apr_status_t) apr_os_dso_handle_put(apr_dso_handle_t **aprdso,
                                                 apr_os_dso_handle_t osdso,
                                                 apr_pool_t *pool)
 {
-    *aprdso = apr_pcalloc(pool, sizeof **aprdso);
+#if APR_HAS_THREADS
+    if (!pool) {
+        apr_thread_mutex_lock(dso_global_mutex);
+    }
+#endif /* APR_HAS_THREADS */
+    *aprdso = apr_pcalloc(pool ? pool : dso_global_pool, sizeof **aprdso);
     (*aprdso)->handle = osdso;
-    (*aprdso)->pool = pool;
+    (*aprdso)->pool = pool ? pool : dso_global_pool;
+#if APR_HAS_THREADS
+    if (!pool) {
+       apr_thread_mutex_unlock(dso_global_mutex);
+    }
+#endif /* APR_HAS_THREADS */
     return APR_SUCCESS;
 }
 
@@ -80,6 +114,12 @@
 APR_DECLARE(apr_status_t) apr_dso_load(apr_dso_handle_t **res_handle, 
                                        const char *path, apr_pool_t *pool)
 {
+#if APR_HAS_THREADS
+    if (!pool) {
+        apr_thread_mutex_lock(dso_global_mutex);
+    }
+#endif /* APR_HAS_THREADS */
+
 #if defined(DSO_USE_SHL)
     shl_t os_handle = shl_load(path, BIND_IMMEDIATE, 0L);
 
@@ -140,27 +180,49 @@
 #endif    
 #endif /* DSO_USE_x */
 
-    *res_handle = apr_pcalloc(pool, sizeof(**res_handle));
+    *res_handle = apr_pcalloc(pool ? pool : dso_global_pool,
+                              sizeof(**res_handle));
 
     if(os_handle == NULL) {
 #if defined(DSO_USE_SHL)
         (*res_handle)->errormsg = strerror(errno);
+#if APR_HAS_THREADS
+        if (!pool) {
+           apr_thread_mutex_unlock(dso_global_mutex);
+        }
+#endif /* APR_HAS_THREADS */
         return APR_EDSOOPEN;
 #elif defined(DSO_USE_DYLD)
         (*res_handle)->errormsg = (err_msg) ? err_msg : "link failed";
+#if APR_HAS_THREADS
+        if (!pool) {
+           apr_thread_mutex_unlock(dso_global_mutex);
+        }
+#endif /* APR_HAS_THREADS */
         return APR_EDSOOPEN;
 #elif defined(DSO_USE_DLFCN)
         (*res_handle)->errormsg = dlerror();
+#if APR_HAS_THREADS
+        if (!pool) {
+           apr_thread_mutex_unlock(dso_global_mutex);
+        }
+#endif /* APR_HAS_THREADS */
         return APR_EDSOOPEN;
 #endif
     }
 
     (*res_handle)->handle = (void*)os_handle;
-    (*res_handle)->pool = pool;
+    (*res_handle)->pool = pool ? pool : dso_global_pool;
     (*res_handle)->errormsg = NULL;
 
-    apr_pool_cleanup_register(pool, *res_handle, dso_cleanup, apr_pool_cleanup_null);
+    apr_pool_cleanup_register(pool ? pool : dso_global_pool,
+                              *res_handle, dso_cleanup, apr_pool_cleanup_null);
 
+#if APR_HAS_THREADS
+    if (!pool) {
+       apr_thread_mutex_unlock(dso_global_mutex);
+    }
+#endif /* APR_HAS_THREADS */
     return APR_SUCCESS;
 }
     

Reply via email to