striker 02/05/26 02:00:08
Modified: include apr_allocator.h
memory/unix apr_pools.c
Log:
The 'reuse' patch. This will make pools a bit more carefull about
when it has to allocate a new block from the allocator. If it has
an old block which can hold the allocation it allocates from that
and makes it the active block.
See: <[EMAIL PROTECTED]> for why we need it.
And: <[EMAIL PROTECTED]> for a
benchmark showing it doesn't affect httpd performance.
Revision Changes Path
1.6 +2 -0 apr/include/apr_allocator.h
Index: apr_allocator.h
===================================================================
RCS file: /home/cvs/apr/include/apr_allocator.h,v
retrieving revision 1.5
retrieving revision 1.6
diff -u -r1.5 -r1.6
--- apr_allocator.h 26 May 2002 08:52:01 -0000 1.5
+++ apr_allocator.h 26 May 2002 09:00:08 -0000 1.6
@@ -83,7 +83,9 @@
struct apr_memnode_t {
apr_memnode_t *next;
+ apr_memnode_t **ref;
apr_uint32_t index;
+ apr_uint32_t free_index;
char *first_avail;
char *endp;
};
1.169 +149 -30 apr/memory/unix/apr_pools.c
Index: apr_pools.c
===================================================================
RCS file: /home/cvs/apr/memory/unix/apr_pools.c,v
retrieving revision 1.168
retrieving revision 1.169
diff -u -r1.168 -r1.169
--- apr_pools.c 26 May 2002 08:52:02 -0000 1.168
+++ apr_pools.c 26 May 2002 09:00:08 -0000 1.169
@@ -596,6 +596,7 @@
apr_memnode_t *active, *node;
void *mem;
char *endp;
+ apr_uint32_t free_index;
size = APR_ALIGN_DEFAULT(size);
active = pool->active;
@@ -609,17 +610,54 @@
return mem;
}
- if ((node = apr_allocator_alloc(pool->allocator, size)) == NULL) {
- if (pool->abort_fn)
- pool->abort_fn(APR_ENOMEM);
+ node = active->next;
+ endp = node->first_avail + size;
+ if (endp < node->endp) {
+ *node->ref = node->next;
+ node->next->ref = node->ref;
+ }
+ else {
+ if ((node = apr_allocator_alloc(pool->allocator, size)) == NULL) {
+ if (pool->abort_fn)
+ pool->abort_fn(APR_ENOMEM);
- return NULL;
+ return NULL;
+ }
+ endp = node->first_avail + size;
}
- active->next = pool->active = node;
+ node->free_index = 0;
mem = node->first_avail;
- node->first_avail += size;
+ node->first_avail = endp;
+
+ node->ref = active->ref;
+ *node->ref = node;
+ node->next = active;
+ active->ref = &node->next;
+
+ pool->active = node;
+
+ free_index = (APR_ALIGN(active->endp - active->first_avail + 1,
+ BOUNDARY_SIZE) - BOUNDARY_SIZE) >>
BOUNDARY_INDEX;
+
+ active->free_index = free_index;
+ node = active->next;
+ if (free_index >= node->free_index)
+ return mem;
+
+ do {
+ node = node->next;
+ }
+ while (free_index < node->free_index);
+
+ *active->ref = active->next;
+ active->next->ref = active->ref;
+
+ active->ref = node->ref;
+ *active->ref = active;
+ active->next = node;
+ node->ref = &active->next;
return mem;
}
@@ -677,11 +715,13 @@
active = pool->active = pool->self;
active->first_avail = pool->self_first_avail;
- if (active->next == NULL)
+ if (active->next == active)
return;
+ *active->ref = NULL;
apr_allocator_free(pool->allocator, active->next);
- active->next = NULL;
+ active->next = active;
+ active->ref = &active->next;
}
APR_DECLARE(void) apr_pool_destroy(apr_pool_t *pool)
@@ -724,6 +764,7 @@
*/
allocator = pool->allocator;
active = pool->self;
+ *active->ref = NULL;
#if APR_HAS_THREADS
if (apr_allocator_get_owner(allocator) == pool) {
@@ -776,6 +817,9 @@
return APR_ENOMEM;
}
+ node->next = node;
+ node->ref = &node->next;
+
pool = (apr_pool_t *)node->first_avail;
node->first_avail = pool->self_first_avail = (char *)pool +
SIZEOF_POOL_T;
@@ -843,7 +887,7 @@
struct psprintf_data {
apr_vformatter_buff_t vbuff;
apr_memnode_t *node;
- apr_allocator_t *allocator;
+ apr_pool_t *pool;
apr_byte_t got_a_new_node;
apr_memnode_t *free;
};
@@ -852,29 +896,70 @@
{
struct psprintf_data *ps = (struct psprintf_data *)vbuff;
apr_memnode_t *node, *active;
- apr_size_t cur_len;
+ apr_size_t cur_len, size;
char *strp;
- apr_allocator_t *allocator;
+ apr_pool_t *pool;
+ apr_uint32_t free_index;
- allocator = ps->allocator;
- node = ps->node;
+ pool = ps->pool;
+ active = ps->node;
strp = ps->vbuff.curpos;
- cur_len = strp - node->first_avail;
+ cur_len = strp - active->first_avail;
+ size = cur_len << 1;
- if ((active = apr_allocator_alloc(allocator, cur_len << 1)) == NULL)
- return -1;
+ node = active->next;
+ if (!ps->got_a_new_node && node->first_avail + size < node->endp) {
+ *node->ref = node->next;
+ node->next->ref = node->ref;
+
+ node->ref = active->ref;
+ *node->ref = node;
+ node->next = active;
+ active->ref = &node->next;
+
+ node->free_index = 0;
+
+ pool->active = node;
+
+ free_index = (APR_ALIGN(active->endp - active->first_avail + 1,
+ BOUNDARY_SIZE) - BOUNDARY_SIZE) >>
BOUNDARY_INDEX;
+
+ active->free_index = free_index;
+ node = active->next;
+ if (free_index < node->free_index) {
+ do {
+ node = node->next;
+ }
+ while (free_index < node->free_index);
+
+ *active->ref = active->next;
+ active->next->ref = active->ref;
+
+ active->ref = node->ref;
+ *active->ref = active;
+ active->next = node;
+ node->ref = &active->next;
+ }
+
+ node = pool->active;
+ }
+ else {
+ if ((node = apr_allocator_alloc(pool->allocator, size)) == NULL)
+ return -1;
- memcpy(active->first_avail, node->first_avail, cur_len);
+ if (ps->got_a_new_node) {
+ active->next = ps->free;
+ ps->free = node;
+ }
- if (ps->got_a_new_node) {
- node->next = ps->free;
- ps->free = node;
+ ps->got_a_new_node = 1;
}
- ps->node = active;
- ps->vbuff.curpos = active->first_avail + cur_len;
- ps->vbuff.endpos = active->endp - 1; /* Save a byte for NUL terminator */
- ps->got_a_new_node = 1;
+ memcpy(node->first_avail, active->first_avail, cur_len);
+
+ ps->node = node;
+ ps->vbuff.curpos = node->first_avail + cur_len;
+ ps->vbuff.endpos = node->endp - 1; /* Save a byte for NUL terminator */
return 0;
}
@@ -884,10 +969,11 @@
struct psprintf_data ps;
char *strp;
apr_size_t size;
- apr_memnode_t *active;
+ apr_memnode_t *active, *node;
+ apr_uint32_t free_index;
ps.node = active = pool->active;
- ps.allocator = pool->allocator;
+ ps.pool = pool;
ps.vbuff.curpos = ps.node->first_avail;
/* Save a byte for the NUL terminator */
@@ -910,15 +996,48 @@
strp = ps.node->first_avail;
ps.node->first_avail += size;
+ if (ps.free)
+ apr_allocator_free(pool->allocator, ps.free);
+
/*
* Link the node in if it's a new one
*/
- if (ps.got_a_new_node) {
- active->next = pool->active = ps.node;
+ if (!ps.got_a_new_node)
+ return strp;
+
+ active = pool->active;
+ node = ps.node;
+
+ node->free_index = 0;
+
+ node->ref = active->ref;
+ *node->ref = node;
+ node->next = active;
+ active->ref = &node->next;
+
+ pool->active = node;
+
+ free_index = (APR_ALIGN(active->endp - active->first_avail + 1,
+ BOUNDARY_SIZE) - BOUNDARY_SIZE) >>
BOUNDARY_INDEX;
+
+ active->free_index = free_index;
+ node = active->next;
+
+ if (free_index >= node->free_index)
+ return strp;
+
+ do {
+ node = node->next;
}
+ while (free_index < node->free_index);
+
+ *active->ref = active->next;
+ active->next->ref = active->ref;
- if (ps.free)
- apr_allocator_free(ps.allocator, ps.free);
+ active->ref = node->ref;
+ *active->ref = active;
+ active->next = node;
+ node->ref = &active->next;
return strp;
}