striker 02/01/01 15:49:23
Modified: include apr_pools.h
memory/unix apr_pools.c
Log:
Fill in code that is to be used when APR_POOL_DEBUG is defined.
Every apr_p[c]alloc is hereby effectively turned into a malloc.
apr_pool_clear free()s all memory in a pool. This will make
using third party tools like electric fence a lot more helpfull.
Revision Changes Path
1.66 +149 -117 apr/include/apr_pools.h
Index: apr_pools.h
===================================================================
RCS file: /home/cvs/apr/include/apr_pools.h,v
retrieving revision 1.65
retrieving revision 1.66
diff -u -r1.65 -r1.66
--- apr_pools.h 18 Dec 2001 18:55:39 -0000 1.65
+++ apr_pools.h 1 Jan 2002 23:49:23 -0000 1.66
@@ -107,119 +107,19 @@
/** A function that is called when allocation fails. */
typedef int (*apr_abortfunc_t)(int retcode);
-/**
- * @defgroup PoolDebug Pool Debugging functions.
- *
- * pools have nested lifetimes -- sub_pools are destroyed when the
- * parent pool is cleared. We allow certain liberties with operations
- * on things such as tables (and on other structures in a more general
- * sense) where we allow the caller to insert values into a table which
- * were not allocated from the table's pool. The table's data will
- * remain valid as long as all the pools from which its values are
- * allocated remain valid.
- *
- * For example, if B is a sub pool of A, and you build a table T in
- * pool B, then it's safe to insert data allocated in A or B into T
- * (because B lives at most as long as A does, and T is destroyed when
- * B is cleared/destroyed). On the other hand, if S is a table in
- * pool A, it is safe to insert data allocated in A into S, but it
- * is *not safe* to insert data allocated from B into S... because
- * B can be cleared/destroyed before A is (which would leave dangling
- * pointers in T's data structures).
- *
- * In general we say that it is safe to insert data into a table T
- * if the data is allocated in any ancestor of T's pool. This is the
- * basis on which the APR_POOL_DEBUG code works -- it tests these ancestor
- * relationships for all data inserted into tables. APR_POOL_DEBUG also
- * provides tools (apr_find_pool, and apr_pool_is_ancestor) for other
- * folks to implement similar restrictions for their own data
- * structures.
- *
- * However, sometimes this ancestor requirement is inconvenient --
- * sometimes we're forced to create a sub pool (such as through
- * apr_sub_req_lookup_uri), and the sub pool is guaranteed to have
- * the same lifetime as the parent pool. This is a guarantee implemented
- * by the *caller*, not by the pool code. That is, the caller guarantees
- * they won't destroy the sub pool individually prior to destroying the
- * parent pool.
- *
- * In this case the caller must call apr_pool_join() to indicate this
- * guarantee to the APR_POOL_DEBUG code. There are a few examples spread
- * through the standard modules.
- *
- * These functions are only implemented when #APR_POOL_DEBUG is set.
- *
- * @{
- */
-#if defined(APR_POOL_DEBUG) || defined(DOXYGEN)
-/**
- * Guarantee that a subpool has the same lifetime as the parent.
- * @param p The parent pool
- * @param sub The subpool
- */
-APR_DECLARE(void) apr_pool_join(apr_pool_t *p, apr_pool_t *sub);
-
-/**
- * Find a pool from something allocated in it.
- * @param ts The thing allocated in the pool
- * @return The pool it is allocated in
- */
-APR_DECLARE(apr_pool_t *) apr_find_pool(const void *ts);
+/** Pool creation flags */
-/**
- * Report the number of bytes currently in the pool
- * @param p The pool to inspect
- * @param recurse Recurse/include the subpools' sizes
- * @return The number of bytes
- */
-APR_DECLARE(apr_size_t) apr_pool_num_bytes(apr_pool_t *p, int recurse);
-
-/**
- * Report the number of bytes currently in the list of free blocks
- * @return The number of bytes
- */
-APR_DECLARE(apr_size_t) apr_pool_free_blocks_num_bytes(void);
-
-/**
- * Lock a pool
- * @param pool The pool to lock
- * @param flag The flag
- */
-APR_DECLARE(void) apr_pool_lock(apr_pool_t *pool, int flag);
-
-/* @} */
-
-#else
-# ifdef apr_pool_join
-# undef apr_pool_join
-# endif
-# define apr_pool_join(a,b)
-
-# ifdef apr_pool_lock
-# undef apr_pool_lock
-# endif
-# define apr_pool_lock(pool, lock)
-#endif
+#define APR_POOL_FDEFAULT 0x0
+#define APR_POOL_FNEW_ALLOCATOR 0x1
+#define APR_POOL_FLOCK 0x2
-/**
- * Tag a pool (give it a name)
- * @param pool The pool to tag
- * @param tag The tag
- */
-APR_DECLARE(void) apr_pool_tag(apr_pool_t *pool, const char *tag);
-/**
- * Determine if pool a is an ancestor of pool b
- * @param a The pool to search
- * @param b The pool to search for
- * @return True if a is an ancestor of b, NULL is considered an ancestor
- * of all pools.
+/*
+ * APR memory structure manipulators (pools, tables, and arrays).
*/
-APR_DECLARE(int) apr_pool_is_ancestor(apr_pool_t *a, apr_pool_t *b);
-
/*
- * APR memory structure manipulators (pools, tables, and arrays).
+ * Initialization
*/
/**
@@ -237,12 +137,11 @@
* @internal
*/
APR_DECLARE(void) apr_pool_terminate(void);
-
-/* pool functions */
-#define APR_POOL_FDEFAULT 0x0
-#define APR_POOL_FNEW_ALLOCATOR 0x1
-#define APR_POOL_FLOCK 0x2
+
+/*
+ * Pool creation/destruction
+ */
/**
* Create a new pool.
@@ -299,6 +198,19 @@
#endif
/**
+ * Destroy the pool. This takes similar action as apr_pool_clear() and then
+ * frees all the memory.
+ * @param p The pool to destroy
+ * @remark This will actually free the memory
+ */
+APR_DECLARE(void) apr_pool_destroy(apr_pool_t *p);
+
+
+/*
+ * Memory allocation
+ */
+
+/**
* Allocate a block of memory from a pool
* @param p The pool to allocate from
* @param reqsize The amount of memory to allocate
@@ -324,12 +236,10 @@
*/
APR_DECLARE(void) apr_pool_clear(apr_pool_t *p);
-/**
- * Destroy the pool. This runs apr_pool_clear() and then frees all the
memory.
- * @param p The pool to destroy
- * @remark This will actually free the memory
+
+/*
+ * Pool Properties
*/
-APR_DECLARE(void) apr_pool_destroy(apr_pool_t *p);
/**
* Set the function to be called when an allocation failure occurs.
@@ -360,6 +270,27 @@
APR_DECLARE(apr_pool_t *) apr_pool_get_parent(apr_pool_t *pool);
/**
+ * Determine if pool a is an ancestor of pool b
+ * @param a The pool to search
+ * @param b The pool to search for
+ * @return True if a is an ancestor of b, NULL is considered an ancestor
+ * of all pools.
+ */
+APR_DECLARE(int) apr_pool_is_ancestor(apr_pool_t *a, apr_pool_t *b);
+
+/**
+ * Tag a pool (give it a name)
+ * @param pool The pool to tag
+ * @param tag The tag
+ */
+APR_DECLARE(void) apr_pool_tag(apr_pool_t *pool, const char *tag);
+
+
+/*
+ * User data management
+ */
+
+/**
* Set the data associated with the current pool
* @param data The user data associated with the pool.
* @param key The key to use for association
@@ -407,6 +338,11 @@
APR_DECLARE(apr_status_t) apr_pool_userdata_get(void **data, const char *key,
apr_pool_t *pool);
+
+/*
+ * Cleanup
+ */
+
/**
* Register a function to be called when a pool is cleared or destroyed
* @param p The pool register the cleanup with
@@ -466,6 +402,102 @@
* closed because we are about to exec a new program
*/
APR_DECLARE(void) apr_pool_cleanup_for_exec(void);
+
+
+/**
+ * @defgroup PoolDebug Pool Debugging functions.
+ *
+ * pools have nested lifetimes -- sub_pools are destroyed when the
+ * parent pool is cleared. We allow certain liberties with operations
+ * on things such as tables (and on other structures in a more general
+ * sense) where we allow the caller to insert values into a table which
+ * were not allocated from the table's pool. The table's data will
+ * remain valid as long as all the pools from which its values are
+ * allocated remain valid.
+ *
+ * For example, if B is a sub pool of A, and you build a table T in
+ * pool B, then it's safe to insert data allocated in A or B into T
+ * (because B lives at most as long as A does, and T is destroyed when
+ * B is cleared/destroyed). On the other hand, if S is a table in
+ * pool A, it is safe to insert data allocated in A into S, but it
+ * is *not safe* to insert data allocated from B into S... because
+ * B can be cleared/destroyed before A is (which would leave dangling
+ * pointers in T's data structures).
+ *
+ * In general we say that it is safe to insert data into a table T
+ * if the data is allocated in any ancestor of T's pool. This is the
+ * basis on which the APR_POOL_DEBUG code works -- it tests these ancestor
+ * relationships for all data inserted into tables. APR_POOL_DEBUG also
+ * provides tools (apr_find_pool, and apr_pool_is_ancestor) for other
+ * folks to implement similar restrictions for their own data
+ * structures.
+ *
+ * However, sometimes this ancestor requirement is inconvenient --
+ * sometimes we're forced to create a sub pool (such as through
+ * apr_sub_req_lookup_uri), and the sub pool is guaranteed to have
+ * the same lifetime as the parent pool. This is a guarantee implemented
+ * by the *caller*, not by the pool code. That is, the caller guarantees
+ * they won't destroy the sub pool individually prior to destroying the
+ * parent pool.
+ *
+ * In this case the caller must call apr_pool_join() to indicate this
+ * guarantee to the APR_POOL_DEBUG code. There are a few examples spread
+ * through the standard modules.
+ *
+ * These functions are only implemented when #APR_POOL_DEBUG is set.
+ *
+ * @{
+ */
+#if defined(APR_POOL_DEBUG) || defined(DOXYGEN)
+/**
+ * Guarantee that a subpool has the same lifetime as the parent.
+ * @param p The parent pool
+ * @param sub The subpool
+ */
+APR_DECLARE(void) apr_pool_join(apr_pool_t *p, apr_pool_t *sub);
+
+/**
+ * Find a pool from something allocated in it.
+ * @param mem The thing allocated in the pool
+ * @return The pool it is allocated in
+ */
+APR_DECLARE(apr_pool_t *) apr_find_pool(const void *mem);
+
+/**
+ * Report the number of bytes currently in the pool
+ * @param p The pool to inspect
+ * @param recurse Recurse/include the subpools' sizes
+ * @return The number of bytes
+ */
+APR_DECLARE(apr_size_t) apr_pool_num_bytes(apr_pool_t *p, int recurse);
+
+/**
+ * Report the number of bytes currently in the list of free blocks
+ * @return The number of bytes
+ */
+APR_DECLARE(apr_size_t) apr_pool_free_blocks_num_bytes(void);
+
+/**
+ * Lock a pool
+ * @param pool The pool to lock
+ * @param flag The flag
+ */
+APR_DECLARE(void) apr_pool_lock(apr_pool_t *pool, int flag);
+
+/* @} */
+
+#else
+# ifdef apr_pool_join
+# undef apr_pool_join
+# endif
+# define apr_pool_join(a,b)
+
+# ifdef apr_pool_lock
+# undef apr_pool_lock
+# endif
+# define apr_pool_lock(pool, lock)
+#endif
+
/*
* Pool accessor functions.
1.126 +657 -233 apr/memory/unix/apr_pools.c
Index: apr_pools.c
===================================================================
RCS file: /home/cvs/apr/memory/unix/apr_pools.c,v
retrieving revision 1.125
retrieving revision 1.126
diff -u -r1.125 -r1.126
--- apr_pools.c 1 Jan 2002 22:46:48 -0000 1.125
+++ apr_pools.c 1 Jan 2002 23:49:23 -0000 1.126
@@ -80,6 +80,7 @@
#define BOUNDARY_INDEX 12
#define BOUNDARY_SIZE (1 << BOUNDARY_INDEX)
+
/*
* Macros and defines
*/
@@ -90,11 +91,14 @@
#define APR_ALIGN_DEFAULT(size) APR_ALIGN(size, 8)
+
/*
* Structures
*/
typedef struct cleanup_t cleanup_t;
+
+#if !defined(APR_POOL_DEBUG)
typedef struct allocator_t allocator_t;
typedef struct node_t node_t;
@@ -114,6 +118,24 @@
node_t *free[MAX_INDEX];
};
+#define SIZEOF_NODE_T APR_ALIGN_DEFAULT(sizeof(node_t))
+#define SIZEOF_ALLOCATOR_T APR_ALIGN_DEFAULT(sizeof(allocator_t))
+
+#else /* !defined(APR_POOL_DEBUG) */
+
+typedef struct debug_node_t debug_node_t;
+
+struct debug_node_t {
+ debug_node_t *next;
+ apr_uint32_t index;
+ void *beginp[64];
+ void *endp[64];
+};
+
+#define SIZEOF_DEBUG_NODE_T APR_ALIGN_DEFAULT(sizeof(debug_node_t))
+
+#endif /* !defined(APR_POOL_DEBUG) */
+
/* The ref field in the apr_pool_t struct holds a
* pointer to the pointer referencing this pool.
* It is used for parent, child, sibling management.
@@ -121,10 +143,6 @@
* to see how it is used.
*/
struct apr_pool_t {
- allocator_t *allocator;
- node_t *active;
- node_t *self; /* The node containing the pool itself */
- char *self_first_avail;
apr_pool_t *parent;
apr_pool_t *child;
apr_pool_t *sibling;
@@ -134,18 +152,32 @@
apr_abortfunc_t abort_fn;
apr_hash_t *user_data;
const char *tag;
+
+#if !defined(APR_POOL_DEBUG)
+ allocator_t *allocator;
+ node_t *active;
+ node_t *self; /* The node containing the pool itself */
+ char *self_first_avail;
+
+#else /* !defined(APR_POOL_DEBUG) */
+ debug_node_t *nodes;
+#if APR_HAS_THREADS
+ apr_thread_mutex_t *mutex;
+#endif
+#endif /* !defined(APR_POOL_DEBUG) */
};
-#define SIZEOF_NODE_T APR_ALIGN_DEFAULT(sizeof(node_t))
-#define SIZEOF_ALLOCATOR_T APR_ALIGN_DEFAULT(sizeof(allocator_t))
#define SIZEOF_POOL_T APR_ALIGN_DEFAULT(sizeof(apr_pool_t))
+
/*
* Variables
*/
+static apr_byte_t apr_pools_initialized = 0;
static apr_pool_t *global_pool = NULL;
-static apr_byte_t global_allocator_initialized = 0;
+
+#if !defined(APR_POOL_DEBUG)
static allocator_t global_allocator = {
0, /* max_index */
#if APR_HAS_THREADS
@@ -154,6 +186,62 @@
NULL, /* owner */
{ NULL } /* free[0] */
};
+#endif /* !defined(APR_POOL_DEBUG) */
+
+
+/*
+ * Local functions
+ */
+
+static void run_cleanups(cleanup_t *c);
+static void run_child_cleanups(cleanup_t *c);
+static void free_proc_chain(struct process_chain *procs);
+
+
+#if !defined(APR_POOL_DEBUG)
+/*
+ * Initialization
+ */
+
+APR_DECLARE(apr_status_t) apr_pool_initialize(void)
+{
+ apr_status_t rv;
+
+ if (apr_pools_initialized++)
+ return APR_SUCCESS;
+
+ memset(&global_allocator, 0, SIZEOF_ALLOCATOR_T);
+
+ if ((rv = apr_pool_create_ex(&global_pool, NULL, NULL,
APR_POOL_FDEFAULT)) != APR_SUCCESS) {
+ return rv;
+ }
+
+#if APR_HAS_THREADS
+ if ((rv = apr_thread_mutex_create(&global_allocator.mutex,
+ APR_THREAD_MUTEX_DEFAULT, global_pool)) != APR_SUCCESS) {
+ return rv;
+ }
+#endif
+
+ global_allocator.owner = global_pool;
+ apr_pools_initialized = 1;
+
+ return APR_SUCCESS;
+}
+
+APR_DECLARE(void) apr_pool_terminate(void)
+{
+ if (!apr_pools_initialized)
+ return;
+
+ apr_pools_initialized = 0;
+
+ apr_pool_destroy(global_pool); /* This will also destroy the mutex */
+ global_pool = NULL;
+
+ memset(&global_allocator, 0, SIZEOF_ALLOCATOR_T);
+}
+
/*
* Memory allocation
@@ -409,13 +497,11 @@
return mem;
}
+
/*
- * Pool management
+ * Pool creation/destruction
*/
-static void run_cleanups(cleanup_t *c);
-static void free_proc_chain(struct process_chain *procs);
-
APR_DECLARE(void) apr_pool_clear(apr_pool_t *pool)
{
node_t *active;
@@ -544,8 +630,8 @@
if (!parent)
parent = global_pool;
- if (!abort_fn)
- abort_fn = parent ? parent->abort_fn : NULL;
+ if (!abort_fn && parent)
+ abort_fn = parent->abort_fn;
allocator = parent ? parent->allocator : &global_allocator;
if ((node = node_malloc(allocator, MIN_ALLOC - SIZEOF_NODE_T)) == NULL) {
@@ -624,220 +710,554 @@
return APR_SUCCESS;
}
-APR_DECLARE(void) apr_pool_set_abort(apr_abortfunc_t abort_fn,
- apr_pool_t *pool)
-{
- pool->abort_fn = abort_fn;
-}
-APR_DECLARE(apr_abortfunc_t) apr_pool_get_abort(apr_pool_t *pool)
-{
- return pool->abort_fn;
-}
+/*
+ * "Print" functions
+ */
-APR_DECLARE(apr_pool_t *) apr_pool_get_parent(apr_pool_t *pool)
+/*
+ * apr_psprintf is implemented by writing directly into the current
+ * block of the pool, starting right at first_avail. If there's
+ * insufficient room, then a new block is allocated and the earlier
+ * output is copied over. The new block isn't linked into the pool
+ * until all the output is done.
+ *
+ * Note that this is completely safe because nothing else can
+ * allocate in this apr_pool_t while apr_psprintf is running. alarms are
+ * blocked, and the only thing outside of apr_pools.c that's invoked
+ * is apr_vformatter -- which was purposefully written to be
+ * self-contained with no callouts.
+ */
+
+struct psprintf_data {
+ apr_vformatter_buff_t vbuff;
+ node_t *node;
+ allocator_t *allocator;
+ apr_byte_t got_a_new_node;
+ node_t *free;
+};
+
+static int psprintf_flush(apr_vformatter_buff_t *vbuff)
{
- return pool->parent;
+ struct psprintf_data *ps = (struct psprintf_data *)vbuff;
+ node_t *node, *active;
+ apr_size_t cur_len;
+ char *strp;
+ allocator_t *allocator;
+
+ allocator = ps->allocator;
+ node = ps->node;
+ strp = ps->vbuff.curpos;
+ cur_len = strp - node->first_avail;
+
+ if ((active = node_malloc(allocator, cur_len << 1)) == NULL)
+ return -1;
+
+ memcpy(active->first_avail, node->first_avail, cur_len);
+
+ /* Reset the previous active node */
+ node->first_avail = (char *)node + SIZEOF_NODE_T;
+
+ if (ps->got_a_new_node) {
+ node->next = ps->free;
+ ps->free = node;
+ }
+
+ ps->node = active;
+ ps->vbuff.curpos = active->first_avail + cur_len;
+ ps->vbuff.endpos = active->endp - 1; /* Save a byte for NUL terminator */
+ ps->got_a_new_node = 1;
+
+ return 0;
}
-/* return TRUE if a is an ancestor of b
- * NULL is considered an ancestor of all pools
- */
-APR_DECLARE(int) apr_pool_is_ancestor(apr_pool_t *a, apr_pool_t *b)
+APR_DECLARE(char *) apr_pvsprintf(apr_pool_t *pool, const char *fmt, va_list
ap)
{
- if (a == NULL)
- return 1;
+ struct psprintf_data ps;
+ char *strp;
+ apr_size_t size;
+ node_t *active;
- while (b) {
- if (a == b)
- return 1;
+ ps.node = active = pool->active;
+ ps.allocator = pool->allocator;
+ ps.vbuff.curpos = ps.node->first_avail;
- b = b->parent;
+ /* Save a byte for the NUL terminator */
+ ps.vbuff.endpos = ps.node->endp - 1;
+ ps.got_a_new_node = 0;
+ ps.free = NULL;
+
+ if (apr_vformatter(psprintf_flush, &ps.vbuff, fmt, ap) == -1) {
+ if (pool->abort_fn)
+ pool->abort_fn(APR_ENOMEM);
+
+ return NULL;
}
- return 0;
+ strp = ps.vbuff.curpos;
+ *strp++ = '\0';
+
+ size = strp - ps.node->first_avail;
+ size = APR_ALIGN_DEFAULT(size);
+ strp = ps.node->first_avail;
+ ps.node->first_avail += size;
+
+ /*
+ * Link the node in if it's a new one
+ */
+ if (ps.got_a_new_node) {
+ active->next = pool->active = ps.node;
+ }
+
+ if (ps.free)
+ node_free(ps.allocator, ps.free);
+
+ return strp;
}
+
+#else /* !defined(APR_POOL_DEBUG) */
/*
- * Initialization
+ * Initialization (debug)
*/
APR_DECLARE(apr_status_t) apr_pool_initialize(void)
{
apr_status_t rv;
- if (global_allocator_initialized++)
+ if (apr_pools_initialized++)
return APR_SUCCESS;
- memset(&global_allocator, 0, SIZEOF_ALLOCATOR_T);
-
- if ((rv = apr_pool_create_ex(&global_pool, NULL, NULL,
APR_POOL_FDEFAULT)) != APR_SUCCESS) {
+ if ((rv = apr_pool_create_ex(&global_pool, NULL, NULL,
+ APR_POOL_FNEW_ALLOCATOR|APR_POOL_FLOCK)) != APR_SUCCESS) {
return rv;
}
-#if APR_HAS_THREADS
- if ((rv = apr_thread_mutex_create(&global_allocator.mutex,
- APR_THREAD_MUTEX_DEFAULT, global_pool)) != APR_SUCCESS) {
- return rv;
- }
-#endif
-
- global_allocator.owner = global_pool;
- global_allocator_initialized = 1;
+ apr_pools_initialized = 1;
return APR_SUCCESS;
}
APR_DECLARE(void) apr_pool_terminate(void)
{
- if (!global_allocator_initialized)
+ if (!apr_pools_initialized)
return;
- global_allocator_initialized = 0;
+ apr_pools_initialized = 0;
apr_pool_destroy(global_pool); /* This will also destroy the mutex */
global_pool = NULL;
-
- memset(&global_allocator, 0, SIZEOF_ALLOCATOR_T);
}
+
/*
- * Cleanup
+ * Memory allocation (debug)
*/
-struct cleanup_t {
- struct cleanup_t *next;
- const void *data;
- apr_status_t (*plain_cleanup_fn)(void *data);
- apr_status_t (*child_cleanup_fn)(void *data);
-};
-
-APR_DECLARE(void) apr_pool_cleanup_register(apr_pool_t *p, const void *data,
- apr_status_t (*plain_cleanup_fn)(void *data),
- apr_status_t (*child_cleanup_fn)(void *data))
+APR_DECLARE(void *) apr_palloc(apr_pool_t *pool, apr_size_t size)
{
- cleanup_t *c;
+ debug_node_t *node;
+ void *mem;
- if (p != NULL) {
- c = (cleanup_t *) apr_palloc(p, sizeof(cleanup_t));
- c->data = data;
- c->plain_cleanup_fn = plain_cleanup_fn;
- c->child_cleanup_fn = child_cleanup_fn;
- c->next = p->cleanups;
- p->cleanups = c;
- }
-}
+ if ((mem = malloc(size)) == NULL) {
+ if (pool->abort_fn)
+ pool->abort_fn(APR_ENOMEM);
-APR_DECLARE(void) apr_pool_cleanup_kill(apr_pool_t *p, const void *data,
- apr_status_t (*cleanup_fn)(void *))
-{
- cleanup_t *c, **lastp;
+ return NULL;
+ }
- if (p == NULL)
- return;
+ node = pool->nodes;
+ if (node == NULL || node->index == 64) {
+ if ((node = malloc(SIZEOF_DEBUG_NODE_T)) == NULL) {
+ if (pool->abort_fn)
+ pool->abort_fn(APR_ENOMEM);
- c = p->cleanups;
- lastp = &p->cleanups;
- while (c) {
- if (c->data == data && c->plain_cleanup_fn == cleanup_fn) {
- *lastp = c->next;
- break;
+ return NULL;
}
- lastp = &c->next;
- c = c->next;
+ node->next = pool->nodes;
+ pool->nodes = node;
+ node->index = 0;
}
+
+ node->beginp[node->index] = mem;
+ node->endp[node->index] = (char *)mem + size;
+ node->index++;
+
+ return mem;
}
-APR_DECLARE(void) apr_pool_child_cleanup_set(apr_pool_t *p, const void *data,
- apr_status_t (*plain_cleanup_fn)
(void *),
- apr_status_t (*child_cleanup_fn)
(void *))
+APR_DECLARE(void *) apr_pcalloc(apr_pool_t *pool, apr_size_t size)
{
- cleanup_t *c;
+ void *mem;
+
+ mem = apr_palloc(pool, size);
+ memset(mem, 0, size);
- if (p == NULL)
- return;
+ return mem;
+}
- c = p->cleanups;
- while (c) {
- if (c->data == data && c->plain_cleanup_fn == plain_cleanup_fn) {
- c->child_cleanup_fn = child_cleanup_fn;
- break;
+
+/*
+ * Pool creation/destruction (debug)
+ */
+
+APR_DECLARE(void) apr_pool_clear(apr_pool_t *pool)
+{
+ debug_node_t *node;
+ apr_uint32_t index;
+
+ /* Destroy the subpools. The subpools will detach themselves from
+ * this pool thus this loop is safe and easy.
+ */
+ while (pool->child)
+ apr_pool_destroy(pool->child);
+
+ /* Run cleanups */
+ run_cleanups(pool->cleanups);
+ pool->cleanups = NULL;
+
+ /* Free subprocesses */
+ free_proc_chain(pool->subprocesses);
+ pool->subprocesses = NULL;
+
+ /* Clear the user data. */
+ pool->user_data = NULL;
+
+ /* Free the blocks */
+ while ((node = pool->nodes) != NULL) {
+ pool->nodes = node->next;
+
+ for (index = 0; index < node->index; index++)
+ free(node->beginp[index]);
+
+ free(node);
+ }
+}
+
+APR_DECLARE(void) apr_pool_destroy(apr_pool_t *pool)
+{
+ apr_pool_clear(pool);
+
+ /* Remove the pool from the parents child list */
+ if (pool->parent) {
+#if APR_HAS_THREADS
+ apr_thread_mutex_t *mutex;
+
+ if ((mutex = pool->parent->mutex) != NULL)
+ apr_thread_mutex_lock(mutex);
+#endif
+
+ if ((*pool->ref = pool->sibling) != NULL)
+ pool->sibling->ref = pool->ref;
+
+#if APR_HAS_THREADS
+ if (mutex)
+ apr_thread_mutex_unlock(mutex);
+#endif
+ }
+
+ /* Free the pool itself */
+ free(pool);
+}
+
+APR_DECLARE(apr_status_t) apr_pool_create_ex(apr_pool_t **newpool,
+ apr_pool_t *parent,
+ apr_abortfunc_t abort_fn,
+ apr_uint32_t flags)
+{
+ apr_pool_t *pool;
+
+ *newpool = NULL;
+
+ if (!parent)
+ parent = global_pool;
+
+ if (!abort_fn && parent)
+ abort_fn = parent->abort_fn;
+
+ if ((pool = malloc(SIZEOF_POOL_T)) == NULL) {
+ if (abort_fn)
+ abort_fn(APR_ENOMEM);
+
+ return APR_ENOMEM;
+ }
+
+ pool->abort_fn = abort_fn;
+ pool->child = NULL;
+ pool->cleanups = NULL;
+ pool->subprocesses = NULL;
+ pool->user_data = NULL;
+ pool->tag = NULL;
+ pool->nodes = NULL;
+
+ if ((flags & APR_POOL_FNEW_ALLOCATOR) == APR_POOL_FNEW_ALLOCATOR) {
+#if APR_HAS_THREADS
+ if ((flags & APR_POOL_FLOCK) == APR_POOL_FLOCK) {
+ apr_status_t rv;
+
+ if ((rv = apr_thread_mutex_create(&pool->mutex,
+ APR_THREAD_MUTEX_DEFAULT, pool)) != APR_SUCCESS) {
+ free(pool);
+ return rv;
+ }
}
+#endif
+ }
+ else {
+ if (parent)
+ pool->mutex = parent->mutex;
+ }
- c = c->next;
+ if ((pool->parent = parent) != NULL) {
+#if APR_HAS_THREADS
+ if (parent->mutex)
+ apr_thread_mutex_lock(parent->mutex);
+#endif
+ if ((pool->sibling = parent->child) != NULL)
+ pool->sibling->ref = &pool->sibling;
+
+ parent->child = pool;
+ pool->ref = &parent->child;
+
+#if APR_HAS_THREADS
+ if (parent->mutex)
+ apr_thread_mutex_unlock(parent->mutex);
+#endif
+ }
+ else {
+ pool->sibling = NULL;
+ pool->ref = NULL;
}
+
+ *newpool = pool;
+
+ return APR_SUCCESS;
}
-APR_DECLARE(apr_status_t) apr_pool_cleanup_run(apr_pool_t *p, void *data,
- apr_status_t (*cleanup_fn) (void *))
+
+/*
+ * "Print" functions (debug)
+ */
+
+struct psprintf_data {
+ apr_vformatter_buff_t vbuff;
+ char *mem;
+ apr_size_t size;
+};
+
+static int psprintf_flush(apr_vformatter_buff_t *vbuff)
{
- apr_pool_cleanup_kill(p, data, cleanup_fn);
- return (*cleanup_fn)(data);
+ struct psprintf_data *ps = (struct psprintf_data *)vbuff;
+ apr_size_t size;
+
+ size = ps->vbuff.curpos - ps->mem;
+
+ ps->size <<= 1;
+ if ((ps->mem = realloc(ps->mem, ps->size)) == NULL)
+ return -1;
+
+ ps->vbuff.curpos = ps->mem + size;
+ ps->vbuff.endpos = ps->mem + ps->size - 1;
+
+ return 0;
}
-static void run_cleanups(cleanup_t *c)
+APR_DECLARE(char *) apr_pvsprintf(apr_pool_t *pool, const char *fmt, va_list
ap)
{
- while (c) {
- (*c->plain_cleanup_fn)((void *)c->data);
- c = c->next;
+ struct psprintf_data ps;
+ debug_node_t *node;
+
+ ps.size = 64;
+ ps.mem = malloc(ps.size);
+ ps.vbuff.curpos = ps.mem;
+
+ /* Save a byte for the NUL terminator */
+ ps.vbuff.endpos = ps.mem + ps.size - 1;
+
+ if (apr_vformatter(psprintf_flush, &ps.vbuff, fmt, ap) == -1) {
+ if (pool->abort_fn)
+ pool->abort_fn(APR_ENOMEM);
+
+ return NULL;
}
+
+ *ps.vbuff.curpos++ = '\0';
+
+ /*
+ * Link the node in
+ */
+ node = pool->nodes;
+ if (node == NULL || node->index == 64) {
+ if ((node = malloc(SIZEOF_DEBUG_NODE_T)) == NULL) {
+ if (pool->abort_fn)
+ pool->abort_fn(APR_ENOMEM);
+
+ return NULL;
+ }
+
+ node->next = pool->nodes;
+ pool->nodes = node;
+ node->index = 0;
+ }
+
+ node->beginp[node->index] = ps.mem;
+ node->endp[node->index] = ps.mem + ps.size;
+ node->index++;
+
+ return ps.mem;
}
-static void run_child_cleanups(cleanup_t *c)
+
+/*
+ * Debug functions
+ */
+
+APR_DECLARE(void) apr_pool_join(apr_pool_t *p, apr_pool_t *sub)
{
- while (c) {
- (*c->child_cleanup_fn)((void *)c->data);
- c = c->next;
+}
+
+static apr_pool_t *find_pool(apr_pool_t *pool, const void *mem)
+{
+ apr_pool_t *found;
+ debug_node_t *node;
+ apr_uint32_t index;
+
+ while (pool) {
+ node = pool->nodes;
+
+ while (node) {
+ for (index = 0; index < node->index; index++) {
+ if (node->beginp[index] <= mem &&
+ node->endp[index] > mem)
+ return pool;
+ }
+
+ node = node->next;
+ }
+
+ if ((found = find_pool(pool->child, mem)) != NULL)
+ return found;
+
+ pool = pool->sibling;
}
+
+ return NULL;
}
-static void cleanup_pool_for_exec(apr_pool_t *p)
+APR_DECLARE(apr_pool_t *) apr_find_pool(const void *mem)
{
- run_child_cleanups(p->cleanups);
- p->cleanups = NULL;
+ return find_pool(global_pool, mem);
+}
- for (p = p->child; p; p = p->sibling)
- cleanup_pool_for_exec(p);
+static apr_size_t pool_num_bytes(apr_pool_t *pool)
+{
+ apr_size_t size = 0;
+ debug_node_t *node;
+ apr_uint32_t index;
+
+ node = pool->nodes;
+
+ while (node) {
+ for (index = 0; index < node->index; index++) {
+ size += node->endp[index] - node->beginp[index];
+ }
+
+ node = node->next;
+ }
+
+ return size;
}
-APR_DECLARE(void) apr_pool_cleanup_for_exec(void)
+APR_DECLARE(apr_size_t) apr_pool_num_bytes(apr_pool_t *pool, int recurse)
{
-#if !defined(WIN32) && !defined(OS2)
- /*
- * Don't need to do anything on NT or OS/2, because I
- * am actually going to spawn the new process - not
- * exec it. All handles that are not inheritable, will
- * be automajically closed. The only problem is with
- * file handles that are open, but there isn't much
- * I can do about that (except if the child decides
- * to go out and close them
- */
- cleanup_pool_for_exec(global_pool);
-#endif /* !defined(WIN32) && !defined(OS2) */
+ apr_size_t size;
+
+ size = pool_num_bytes(pool);
+
+ if (recurse) {
+ pool = pool->child;
+
+ while (pool) {
+ size += apr_pool_num_bytes(pool, 1);
+
+ pool = pool->sibling;
+ }
+ }
+
+ return size;
}
-APR_DECLARE_NONSTD(apr_status_t) apr_pool_cleanup_null(void *data)
+APR_DECLARE(apr_size_t) apr_pool_free_blocks_num_bytes(void)
+{
+ /* This really doesn't apply with our current debug code, so: */
+ return 0;
+}
+
+APR_DECLARE(void) apr_pool_lock(apr_pool_t *pool, int flag)
{
- /* do nothing cleanup routine */
- return APR_SUCCESS;
}
+#endif /* !defined(APR_POOL_DEBUG) */
+
/*
- * Debug functions
+ * "Print" functions (common)
*/
-APR_DECLARE(void) apr_pool_tag(apr_pool_t *pool, const char *tag)
+APR_DECLARE_NONSTD(char *) apr_psprintf(apr_pool_t *p, const char *fmt, ...)
{
- pool->tag = tag;
+ va_list ap;
+ char *res;
+
+ va_start(ap, fmt);
+ res = apr_pvsprintf(p, fmt, ap);
+ va_end(ap);
+ return res;
+}
+
+/*
+ * Pool Properties
+ */
+
+APR_DECLARE(void) apr_pool_set_abort(apr_abortfunc_t abort_fn,
+ apr_pool_t *pool)
+{
+ pool->abort_fn = abort_fn;
+}
+
+APR_DECLARE(apr_abortfunc_t) apr_pool_get_abort(apr_pool_t *pool)
+{
+ return pool->abort_fn;
+}
+
+APR_DECLARE(apr_pool_t *) apr_pool_get_parent(apr_pool_t *pool)
+{
+ return pool->parent;
}
-#if defined(APR_POOL_DEBUG)
-APR_DECLARE(apr_size_t) apr_pool_num_bytes(apr_pool_t *p, int recurse)
+/* return TRUE if a is an ancestor of b
+ * NULL is considered an ancestor of all pools
+ */
+APR_DECLARE(int) apr_pool_is_ancestor(apr_pool_t *a, apr_pool_t *b)
{
+ if (a == NULL)
+ return 1;
+
+ while (b) {
+ if (a == b)
+ return 1;
+
+ b = b->parent;
+ }
+
return 0;
}
-#endif
+
+APR_DECLARE(void) apr_pool_tag(apr_pool_t *pool, const char *tag)
+{
+ pool->tag = tag;
+}
+
/*
* User data management
@@ -891,130 +1311,134 @@
/*
- * "Print" functions
- */
-
-/*
- * apr_psprintf is implemented by writing directly into the current
- * block of the pool, starting right at first_avail. If there's
- * insufficient room, then a new block is allocated and the earlier
- * output is copied over. The new block isn't linked into the pool
- * until all the output is done.
- *
- * Note that this is completely safe because nothing else can
- * allocate in this apr_pool_t while apr_psprintf is running. alarms are
- * blocked, and the only thing outside of alloc.c that's invoked
- * is apr_vformatter -- which was purposefully written to be
- * self-contained with no callouts.
+ * Cleanup
*/
-struct psprintf_data {
- apr_vformatter_buff_t vbuff;
- node_t *node;
- allocator_t *allocator;
- apr_byte_t got_a_new_node;
- node_t *free;
+struct cleanup_t {
+ struct cleanup_t *next;
+ const void *data;
+ apr_status_t (*plain_cleanup_fn)(void *data);
+ apr_status_t (*child_cleanup_fn)(void *data);
};
-static int psprintf_flush(apr_vformatter_buff_t *vbuff)
+APR_DECLARE(void) apr_pool_cleanup_register(apr_pool_t *p, const void *data,
+ apr_status_t (*plain_cleanup_fn)(void *data),
+ apr_status_t (*child_cleanup_fn)(void *data))
{
- struct psprintf_data *ps = (struct psprintf_data *)vbuff;
- node_t *node, *active;
- apr_size_t cur_len;
- char *strp;
- allocator_t *allocator;
+ cleanup_t *c;
- allocator = ps->allocator;
- node = ps->node;
- strp = ps->vbuff.curpos;
- cur_len = strp - node->first_avail;
+ if (p != NULL) {
+ c = (cleanup_t *) apr_palloc(p, sizeof(cleanup_t));
+ c->data = data;
+ c->plain_cleanup_fn = plain_cleanup_fn;
+ c->child_cleanup_fn = child_cleanup_fn;
+ c->next = p->cleanups;
+ p->cleanups = c;
+ }
+}
- if ((active = node_malloc(allocator, cur_len << 1)) == NULL)
- return -1;
+APR_DECLARE(void) apr_pool_cleanup_kill(apr_pool_t *p, const void *data,
+ apr_status_t (*cleanup_fn)(void *))
+{
+ cleanup_t *c, **lastp;
- memcpy(active->first_avail, node->first_avail, cur_len);
+ if (p == NULL)
+ return;
- /* Reset the previous active node */
- node->first_avail = (char *)node + SIZEOF_NODE_T;
+ c = p->cleanups;
+ lastp = &p->cleanups;
+ while (c) {
+ if (c->data == data && c->plain_cleanup_fn == cleanup_fn) {
+ *lastp = c->next;
+ break;
+ }
- if (ps->got_a_new_node) {
- node->next = ps->free;
- ps->free = node;
+ lastp = &c->next;
+ c = c->next;
}
-
- ps->node = active;
- ps->vbuff.curpos = active->first_avail + cur_len;
- ps->vbuff.endpos = active->endp - 1; /* Save a byte for NUL terminator */
- ps->got_a_new_node = 1;
-
- return 0;
}
-APR_DECLARE(char *) apr_pvsprintf(apr_pool_t *pool, const char *fmt, va_list
ap)
+APR_DECLARE(void) apr_pool_child_cleanup_set(apr_pool_t *p, const void *data,
+ apr_status_t (*plain_cleanup_fn)
(void *),
+ apr_status_t (*child_cleanup_fn)
(void *))
{
- struct psprintf_data ps;
- char *strp;
- apr_size_t size;
- node_t *active;
+ cleanup_t *c;
- ps.node = active = pool->active;
- ps.allocator = pool->allocator;
- ps.vbuff.curpos = ps.node->first_avail;
- /* Save a byte for the NUL terminator */
- ps.vbuff.endpos = ps.node->endp - 1;
- ps.got_a_new_node = 0;
- ps.free = NULL;
+ if (p == NULL)
+ return;
- if (apr_vformatter(psprintf_flush, &ps.vbuff, fmt, ap) == -1) {
- if (pool->abort_fn)
- pool->abort_fn(APR_ENOMEM);
+ c = p->cleanups;
+ while (c) {
+ if (c->data == data && c->plain_cleanup_fn == plain_cleanup_fn) {
+ c->child_cleanup_fn = child_cleanup_fn;
+ break;
+ }
- return NULL;
+ c = c->next;
}
+}
- strp = ps.vbuff.curpos;
- *strp++ = '\0';
+APR_DECLARE(apr_status_t) apr_pool_cleanup_run(apr_pool_t *p, void *data,
+ apr_status_t (*cleanup_fn) (void *))
+{
+ apr_pool_cleanup_kill(p, data, cleanup_fn);
+ return (*cleanup_fn)(data);
+}
- size = strp - ps.node->first_avail;
- size = APR_ALIGN_DEFAULT(size);
- strp = ps.node->first_avail;
- ps.node->first_avail += size;
+static void run_cleanups(cleanup_t *c)
+{
+ while (c) {
+ (*c->plain_cleanup_fn)((void *)c->data);
+ c = c->next;
+ }
+}
- /*
- * Link the node in if it's a new one
- */
- if (ps.got_a_new_node) {
- active->next = pool->active = ps.node;
+static void run_child_cleanups(cleanup_t *c)
+{
+ while (c) {
+ (*c->child_cleanup_fn)((void *)c->data);
+ c = c->next;
}
+}
- if (ps.free)
- node_free(ps.allocator, ps.free);
+static void cleanup_pool_for_exec(apr_pool_t *p)
+{
+ run_child_cleanups(p->cleanups);
+ p->cleanups = NULL;
- return strp;
+ for (p = p->child; p; p = p->sibling)
+ cleanup_pool_for_exec(p);
}
-APR_DECLARE_NONSTD(char *) apr_psprintf(apr_pool_t *p, const char *fmt, ...)
+APR_DECLARE(void) apr_pool_cleanup_for_exec(void)
{
- va_list ap;
- char *res;
+#if !defined(WIN32) && !defined(OS2)
+ /*
+ * Don't need to do anything on NT or OS/2, because I
+ * am actually going to spawn the new process - not
+ * exec it. All handles that are not inheritable, will
+ * be automajically closed. The only problem is with
+ * file handles that are open, but there isn't much
+ * I can do about that (except if the child decides
+ * to go out and close them
+ */
+ cleanup_pool_for_exec(global_pool);
+#endif /* !defined(WIN32) && !defined(OS2) */
+}
- va_start(ap, fmt);
- res = apr_pvsprintf(p, fmt, ap);
- va_end(ap);
- return res;
+APR_DECLARE_NONSTD(apr_status_t) apr_pool_cleanup_null(void *data)
+{
+ /* do nothing cleanup routine */
+ return APR_SUCCESS;
}
-/*****************************************************************
- *
- * More grotty system stuff... subprocesses. Frump. These don't use
- * the generic cleanup interface because I don't want multiple
- * subprocesses to result in multiple three-second pauses; the
- * subprocesses have to be "freed" all at once. If someone comes
- * along with another resource they want to allocate which has the
- * same property, we might want to fold support for that into the
- * generic interface, but for now, it's a special case
+/* Subprocesses don't use the generic cleanup interface because
+ * we don't want multiple subprocesses to result in multiple
+ * three-second pauses; the subprocesses have to be "freed" all
+ * at once. If other resources are introduced with the same property,
+ * we might want to fold support for that into the generic interface.
+ * For now, it's a special case.
*/
-
APR_DECLARE(void) apr_pool_note_subprocess(apr_pool_t *pool, apr_proc_t *pid,
enum kill_conditions how)
{
@@ -1092,6 +1516,7 @@
if (pc->kill_how != kill_never)
(void)apr_proc_wait(pc->pid, NULL, NULL, APR_WAIT);
}
+
#ifdef WIN32
/*
* XXX: Do we need an APR function to clean-up a proc_t?
@@ -1107,6 +1532,5 @@
}
}
}
-
#endif /* WIN32 */
}