Author: adrian.chadd
Date: Sat Jan 31 23:26:03 2009
New Revision: 13762
Modified:
branches/LUSCA_HEAD/libmem/MemPool.c
branches/LUSCA_HEAD/libmem/MemPool.h
branches/LUSCA_HEAD/src/MemPool.c
branches/LUSCA_HEAD/src/stat.c
Log:
Remove the mempools -pools- code, replace with just direct calls to
malloc/free.
The main reason why mempools saves CPU (and why this commit increases CPU
use!)
is that just -way- too many calls to the memory allocator/free routines
happen.
There are two reasons to remove this code:
* removing it will show which areas of the code calls the allocator too
often;
* removing it will make writing threaded code easier down the track - I
don't
really think its a great idea to implement a really high performance,
scalable
allocator given it is 2009 and others do a much better job of this!
So yes, in the short term, CPU time will increase. In the longer term, the
real
solution to this problem is to -reduce- the number of memory allocator calls
which are made, rather than trying to cache the calls which are going on.
Modified: branches/LUSCA_HEAD/libmem/MemPool.c
==============================================================================
--- branches/LUSCA_HEAD/libmem/MemPool.c (original)
+++ branches/LUSCA_HEAD/libmem/MemPool.c Sat Jan 31 23:26:03 2009
@@ -21,10 +21,6 @@
extern time_t squid_curtime;
-/* exported */
-unsigned int mem_pool_alloc_calls = 0;
-unsigned int mem_pool_free_calls = 0;
-
/* module globals */
/* huge constant to set mem_idle_limit to "unlimited" */
@@ -79,12 +75,6 @@
new_pool_limit = MemPoolConfig.limit;
else
new_pool_limit = mem_unlimited_size;
- /* shrink memory pools if needed */
- if (TheMeter.idle.level > new_pool_limit) {
- debug(63, 1) ("Shrinking idle mem pools to %.2f MB\n",
toMB(new_pool_limit));
- memShrink(new_pool_limit);
- }
- assert(TheMeter.idle.level <= new_pool_limit);
mem_idle_limit = new_pool_limit;
}
@@ -123,22 +113,7 @@
static void
memShrink(size_t new_limit)
{
- size_t start_limit = TheMeter.idle.level;
- int i;
- debug(63, 1) ("memShrink: started with %ld KB goal: %ld KB\n",
- (long int) toKB(TheMeter.idle.level), (long int) toKB(new_limit));
- /* first phase: cut proportionally to the pool idle size */
- for (i = 0; i < Pools.count && TheMeter.idle.level > new_limit; ++i) {
- MemPool *pool = Pools.items[i];
- const size_t target_pool_size = (size_t) ((double)
pool->meter.idle.level
* new_limit) / start_limit;
- memPoolShrink(pool, target_pool_size);
- }
- debug(63, 1) ("memShrink: 1st phase done with %ld KB left\n", (long
int) toKB(TheMeter.idle.level));
- /* second phase: cut to 0 */
- for (i = 0; i < Pools.count && TheMeter.idle.level > new_limit; ++i)
- memPoolShrink(Pools.items[i], 0);
- debug(63, 1) ("memShrink: 2nd phase done with %ld KB left\n", (long
int) toKB(TheMeter.idle.level));
- assert(TheMeter.idle.level <= new_limit); /* paranoid */
+ /* NULL operation */
}
/* MemPoolMeter */
@@ -168,7 +143,6 @@
pool->real_obj_size = (obj_size & 7) ? (obj_size | 7) + 1 : obj_size;
#endif
pool->flags.dozero = 1;
- stackInit(&pool->pstack);
/* other members are set to 0 */
stackPush(&Pools, pool);
return pool;
@@ -191,7 +165,6 @@
break;
}
}
- stackClean(&pool->pstack);
xfree(pool);
}
@@ -215,34 +188,10 @@
gb_inc(&TheMeter.total, pool->obj_size);
memMeterAdd(TheMeter.inuse, pool->obj_size);
gb_inc(&mem_traffic_volume, pool->obj_size);
- mem_pool_alloc_calls++;
- if (pool->pstack.count) {
- assert(pool->meter.idle.level);
- memMeterDec(pool->meter.idle);
- memMeterDel(TheMeter.idle, pool->obj_size);
- gb_inc(&pool->meter.saved, 1);
- gb_inc(&TheMeter.saved, pool->obj_size);
- obj = stackPop(&pool->pstack);
-#if DEBUG_MEMPOOL
- (void) VALGRIND_MAKE_MEM_DEFINED(obj, pool->real_obj_size +
sizeof(struct
mempool_cookie));
-#else
- (void) VALGRIND_MAKE_MEM_DEFINED(obj, pool->obj_size);
-#endif
-#if DEBUG_MEMPOOL
- {
- struct mempool_cookie *cookie = (void *) (((unsigned char *) obj) +
pool->real_obj_size);
- assert(cookie->cookie == MEMPOOL_COOKIE2(obj));
- assert(cookie->pool == pool);
- cookie->cookie = MEMPOOL_COOKIE(obj);
- (void) VALGRIND_MAKE_MEM_NOACCESS(cookie, sizeof(cookie));
- }
- if (Config.onoff.zero_buffers || pool->flags.dozero)
- memset(obj, 0, pool->obj_size);
-#endif
- } else {
- assert(!pool->meter.idle.level);
- memMeterInc(pool->meter.alloc);
- memMeterAdd(TheMeter.alloc, pool->obj_size);
+ MemPoolStats.alloc_calls++;
+
+ memMeterInc(pool->meter.alloc);
+ memMeterAdd(TheMeter.alloc, pool->obj_size);
#if DEBUG_MEMPOOL
{
struct mempool_cookie *cookie;
@@ -253,12 +202,11 @@
(void) VALGRIND_MAKE_MEM_NOACCESS(cookie, sizeof(cookie));
}
#else
- if (MemPoolConfig.do_zero || pool->flags.dozero)
- obj = xcalloc(1, pool->obj_size);
- else
- obj = xmalloc(pool->obj_size);
+ if (MemPoolConfig.do_zero || pool->flags.dozero)
+ obj = xcalloc(1, pool->obj_size);
+ else
+ obj = xmalloc(pool->obj_size);
#endif
- }
return obj;
}
@@ -268,7 +216,7 @@
assert(pool && obj);
memMeterDec(pool->meter.inuse);
memMeterDel(TheMeter.inuse, pool->obj_size);
- mem_pool_free_calls++;
+ MemPoolStats.free_calls++;
(void) VALGRIND_CHECK_MEM_IS_ADDRESSABLE(obj, pool->obj_size);
#if DEBUG_MEMPOOL
{
@@ -279,38 +227,23 @@
cookie->cookie = MEMPOOL_COOKIE2(obj);
}
#endif
- if (TheMeter.idle.level + pool->obj_size <= mem_idle_limit) {
- memMeterInc(pool->meter.idle);
- memMeterAdd(TheMeter.idle, pool->obj_size);
- if (MemPoolConfig.do_zero || pool->flags.dozero)
+ if (MemPoolConfig.do_zero || pool->flags.dozero)
#if DEBUG_MEMPOOL
- memset(obj, 0xf0, pool->obj_size);
- (void) VALGRIND_MAKE_MEM_NOACCESS(obj, pool->real_obj_size +
sizeof(struct mempool_cookie));
+ memset(obj, 0xf0, pool->obj_size);
+ (void) VALGRIND_MAKE_MEM_NOACCESS(obj, pool->real_obj_size +
sizeof(struct mempool_cookie));
#else
- memset(obj, 0, pool->obj_size);
- (void) VALGRIND_MAKE_MEM_NOACCESS(obj, pool->obj_size);
+ memset(obj, 0, pool->obj_size);
+ (void) VALGRIND_MAKE_MEM_NOACCESS(obj, pool->obj_size);
#endif
- stackPush(&pool->pstack, obj);
- } else {
- memMeterDec(pool->meter.alloc);
- memMeterDel(TheMeter.alloc, pool->obj_size);
- xfree(obj);
- }
- assert(pool->meter.idle.level <= pool->meter.alloc.level);
+ memMeterDec(pool->meter.alloc);
+ memMeterDel(TheMeter.alloc, pool->obj_size);
+ xfree(obj);
}
static void
memPoolShrink(MemPool * pool, size_t new_limit)
{
- assert(pool);
- while (pool->meter.idle.level > new_limit && pool->pstack.count > 0) {
- memMeterDec(pool->meter.alloc);
- memMeterDec(pool->meter.idle);
- memMeterDel(TheMeter.idle, pool->obj_size);
- memMeterDel(TheMeter.alloc, pool->obj_size);
- xfree(stackPop(&pool->pstack));
- }
- assert(pool->meter.idle.level <= new_limit); /* paranoid */
+ /* NULL operation now */
}
int
Modified: branches/LUSCA_HEAD/libmem/MemPool.h
==============================================================================
--- branches/LUSCA_HEAD/libmem/MemPool.h (original)
+++ branches/LUSCA_HEAD/libmem/MemPool.h Sat Jan 31 23:26:03 2009
@@ -13,7 +13,6 @@
struct _MemPoolMeter {
MemMeter alloc;
MemMeter inuse;
- MemMeter idle;
gb_t saved;
gb_t total;
};
@@ -38,7 +37,6 @@
struct {
int dozero:1;
} flags;
- Stack pstack; /* stack for free pointers */
MemPoolMeter meter;
#if DEBUG_MEMPOOL
MemPoolMeter diff_meter;
Modified: branches/LUSCA_HEAD/src/MemPool.c
==============================================================================
--- branches/LUSCA_HEAD/src/MemPool.c (original)
+++ branches/LUSCA_HEAD/src/MemPool.c Sat Jan 31 23:26:03 2009
@@ -48,10 +48,10 @@
static void
memPoolMeterReport(const MemPoolMeter * pm, size_t obj_size,
- int alloc_count, int inuse_count, int idle_count, StoreEntry * e)
+ int alloc_count, int inuse_count, StoreEntry * e)
{
assert(pm);
-
storeAppendPrintf(e, "%d\t %ld\t %ld\t %.2f\t %d\t %d\t %ld\t %ld\t %d\t %d\t
%ld\t %ld\t %ld\t %.2f\t %.2f\t %.2f\t %ld\n",
+
storeAppendPrintf(e, "%d\t %ld\t %ld\t %.2f\t %d\t %d\t %ld\t %ld\t %d\t %ld\n",
/* alloc */
alloc_count,
(long int) toKB(obj_size * pm->alloc.level),
@@ -63,16 +63,7 @@
(long int) toKB(obj_size * pm->inuse.level),
(long int) toKB(obj_size * pm->inuse.hwater_level),
xpercentInt(pm->inuse.level, pm->alloc.level),
- /* idle */
- idle_count,
- (long int) toKB(obj_size * pm->idle.level),
- (long int) toKB(obj_size * pm->idle.hwater_level),
- /* (int)rint(xpercent(pm->idle.level, pm->alloc.level)), */
- /* saved */
- (long int) pm->saved.count,
- xpercent(pm->saved.count, mem_traffic_volume.count),
- xpercent(obj_size * gb_to_double(&pm->saved),
gb_to_double(&mem_traffic_volume)),
- xpercent(pm->saved.count, pm->total.count),
+ /* total */
(long int) pm->total.count);
}
@@ -93,8 +84,7 @@
return;
storeAppendPrintf(e, " \t \t ");
memPoolMeterReport(&diff, pool->obj_size,
- diff.alloc.level, pool->meter.inuse.level, pool->meter.idle.level,
- e);
+ diff.alloc.level, pool->meter.inuse.level, e);
}
#endif
@@ -102,11 +92,10 @@
memPoolReport(MemPool * pool, StoreEntry * e, int diff)
{
assert(pool);
- storeAppendPrintf(e, "%-20s\t %4d\t ",
- pool->label, (int) pool->obj_size);
+ storeAppendPrintf(e, "%-20s %s \t %4d\t ",
+ pool->label, pool->flags.dozero ? "" : "(no-zero)", (int)
pool->obj_size);
memPoolMeterReport(&pool->meter, pool->obj_size,
- pool->meter.alloc.level, pool->meter.inuse.level,
pool->meter.idle.level,
- e);
+ pool->meter.alloc.level, pool->meter.inuse.level, e);
#if DEBUG_MEMPOOL
if (diff)
memPoolDiffReport(pool, e);
@@ -121,7 +110,6 @@
size_t overhd_size = 0;
int alloc_count = 0;
int inuse_count = 0;
- int idle_count = 0;
int i;
int diff = 0;
#if DEBUG_MEMPOOL
@@ -140,12 +128,10 @@
storeAppendPrintf(e, "Current memory usage:\n");
/* heading */
storeAppendPrintf(e, "Pool\t Obj Size\t"
- "Allocated\t\t\t\t\t In Use\t\t\t\t Idle\t\t\t Allocations Saved\t\t\t
Hit Rate\t\n"
+ "Allocated\t\t\t\t\t In Use\t\t\t\t Hit Rate\t\n"
" \t (bytes)\t"
"(#)\t (KB)\t high (KB)\t high (hrs)\t impact (%%total)\t"
- "(#)\t (KB)\t high (KB)\t portion (%%alloc)\t"
"(#)\t (KB)\t high (KB)\t"
- "(number)\t (%%num)\t (%%vol)\t"
"(%%num)\t"
"(number)"
"\n");
@@ -156,16 +142,14 @@
memPoolReport(pool, e, diff);
alloc_count += pool->meter.alloc.level;
inuse_count += pool->meter.inuse.level;
- idle_count += pool->meter.idle.level;
}
overhd_size += sizeof(MemPool) + sizeof(MemPool *) +
- strlen(pool->label) + 1 +
- pool->pstack.capacity * sizeof(void *);
+ strlen(pool->label) + 1;
}
overhd_size += sizeof(Pools) + Pools.capacity * sizeof(MemPool *);
/* totals */
- storeAppendPrintf(e, "%-20s\t %-4s\t ", "Total", "-");
- memPoolMeterReport(&TheMeter, 1, alloc_count, inuse_count, idle_count,
e);
+ storeAppendPrintf(e, "%-20s\t\t ", "Total", "-");
+ memPoolMeterReport(&TheMeter, 1, alloc_count, inuse_count, e);
storeAppendPrintf(e, "Cumulative allocated volume: %s\n",
gb_to_str(&mem_traffic_volume));
/* overhead */
storeAppendPrintf(e, "Current overhead: %ld bytes (%.3f%%)\n",
Modified: branches/LUSCA_HEAD/src/stat.c
==============================================================================
--- branches/LUSCA_HEAD/src/stat.c (original)
+++ branches/LUSCA_HEAD/src/stat.c Sat Jan 31 23:26:03 2009
@@ -86,9 +86,6 @@
static int NCountHourHist = 0;
CBDATA_TYPE(StatObjectsState);
-extern unsigned int mem_pool_alloc_calls;
-extern unsigned int mem_pool_free_calls;
-
static void
statUtilization(StoreEntry * e)
{
@@ -657,9 +654,9 @@
storeAppendPrintf(sentry, "\tTotal accounted: %6d KB\n",
(int) (statMemoryAccounted() >> 10));
storeAppendPrintf(sentry, "\tmemPoolAlloc calls: %u\n",
- mem_pool_alloc_calls);
+ MemPoolStats.alloc_calls);
storeAppendPrintf(sentry, "\tmemPoolFree calls: %u\n",
- mem_pool_free_calls);
+ MemPoolStats.free_calls);
storeAppendPrintf(sentry, "File descriptor usage for %s:\n", appname);
storeAppendPrintf(sentry, "\tMaximum number of file
descriptors: %4d\n",
--~--~---------~--~----~------------~-------~--~----~
You received this message because you are subscribed to the Google Groups
"lusca-commit" group.
To post to this group, send email to [email protected]
To unsubscribe from this group, send email to
[email protected]
For more options, visit this group at
http://groups.google.com/group/lusca-commit?hl=en
-~----------~----~----~----~------~----~------~--~---