This change splits MemPool.cc into framework and specific allocators MemPool.cc general framework MemPoolChunked.cc chunked allocator MemPoolMalloc.cc malloc allocator, no freelist (yet)
This may also fix some statistics bugs. There was some oddness there with a bit of confusion between allocator local counters and MemMeter counters.. Next step is adding back the simple freelist to MemPoolMalloc.cc making it a fullblown pooled allocator, configurable by squid.conf. Regards Henrik
# Bazaar merge directive format 2 (Bazaar 0.90) # revision_id: [email protected]\ # wyillk99j8xh6gsz # target_branch: http://www.squid-cache.org/bzr/squid3/trunk/ # testament_sha1: 2620126df44429193c1456740f28dc63960ed438 # timestamp: 2010-05-28 23:58:31 +0200 # base_revision_id: [email protected]\ # agkq7l8rn7lertwg # # Begin patch === modified file 'include/MemPool.h' --- include/MemPool.h 2010-03-21 03:08:26 +0000 +++ include/MemPool.h 2010-05-28 21:53:06 +0000 @@ -1,5 +1,5 @@ -#ifndef _MEM_POOLS_H_ -#define _MEM_POOLS_H_ +#ifndef _MEM_POOL_H_ +#define _MEM_POOL_H_ /** \defgroup MemPoolsAPI Memory Management (Memory Pool Allocator) @@ -63,7 +63,6 @@ #define MEM_MAX_FREE 65535 /* ushort is max number of items per chunk */ class MemImplementingAllocator; -class MemChunk; class MemPoolStats; /// \ingroup MemPoolsAPI @@ -130,13 +129,6 @@ MemImplementingAllocator * create(const char *label, size_t obj_size); /** - \param label Name for the pool. Displayed in stats. - \param obj_size Size of elements in MemPool. - \param chunked ?? - */ - MemImplementingAllocator * create(const char *label, size_t obj_size, bool const chunked); - - /** * Sets upper limit in bytes to amount of free ram kept in pools. This is * not strict upper limit, but a hint. When MemPools are over this limit, * totally free chunks are immediately considered for release. Otherwise @@ -197,7 +189,7 @@ \param stats Object to be filled with statistical data about pool. \retval Number of objects in use, ie. allocated. */ - virtual int getStats(MemPoolStats * stats) = 0; + virtual int getStats(MemPoolStats * stats, int accumulate = 0) = 0; virtual MemPoolMeter const &getMeter() const = 0; @@ -342,15 +334,13 @@ virtual bool idleTrigger(int shift) const = 0; virtual void clean(time_t maxage) = 0; - /** Hint to the allocator - may be ignored */ - virtual void setChunkSize(size_t chunksize) {} virtual size_t objectSize() const; virtual int getInUseCount() = 0; protected: virtual void *allocate() = 0; virtual void deallocate(void *) = 0; -private: MemPoolMeter meter; + int memPID; public: MemImplementingAllocator *next; public: @@ -360,93 +350,6 @@ }; /// \ingroup MemPoolsAPI -class MemPool : public MemImplementingAllocator -{ -public: - friend class MemChunk; - MemPool(const char *label, size_t obj_size); - ~MemPool(); - void convertFreeCacheToChunkFreeCache(); - virtual void clean(time_t maxage); - - /** - \param stats Object to be filled with statistical data about pool. - \retval Number of objects in use, ie. allocated. - */ - virtual int getStats(MemPoolStats * stats); - - void createChunk(); - void *get(); - void push(void *obj); - virtual int getInUseCount(); -protected: - virtual void *allocate(); - virtual void deallocate(void *); -public: - /** - * Allows you tune chunk size of pooling. Objects are allocated in chunks - * instead of individually. This conserves memory, reduces fragmentation. - * Because of that memory can be freed also only in chunks. Therefore - * there is tradeoff between memory conservation due to chunking and free - * memory fragmentation. - * - \note As a general guideline, increase chunk size only for pools that keep - * very many items for relatively long time. - */ - virtual void setChunkSize(size_t chunksize); - - virtual bool idleTrigger(int shift) const; - - size_t chunk_size; - int chunk_capacity; - int memPID; - int chunkCount; - size_t inuse; - size_t idle; - void *freeCache; - MemChunk *nextFreeChunk; - MemChunk *Chunks; - Splay<MemChunk *> allChunks; -}; - -/// \ingroup MemPoolsAPI -class MemMalloc : public MemImplementingAllocator -{ -public: - MemMalloc(char const *label, size_t aSize); - virtual bool idleTrigger(int shift) const; - virtual void clean(time_t maxage); - - /** - \param stats Object to be filled with statistical data about pool. - \retval Number of objects in use, ie. allocated. - */ - virtual int getStats(MemPoolStats * stats); - - virtual int getInUseCount(); -protected: - virtual void *allocate(); - virtual void deallocate(void *); -private: - int inuse; -}; - -/// \ingroup MemPoolsAPI -class MemChunk -{ -public: - MemChunk(MemPool *pool); - ~MemChunk(); - void *freeList; - void *objCache; - int inuse_count; - MemChunk *nextFreeChunk; - MemChunk *next; - time_t lastref; - MemPool *pool; -}; - -/// \ingroup MemPoolsAPI class MemPoolStats { public: @@ -536,4 +439,4 @@ } -#endif /* _MEM_POOLS_H_ */ +#endif /* _MEM_POOL_H_ */ === added file 'include/MemPoolChunked.h' --- include/MemPoolChunked.h 1970-01-01 00:00:00 +0000 +++ include/MemPoolChunked.h 2010-05-28 21:53:06 +0000 @@ -0,0 +1,82 @@ +#ifndef _MEM_POOL_CHUNKED_H_ +#define _MEM_POOL_CHUNKED_H_ + +#include "MemPool.h" + +/// \ingroup MemPoolsAPI +#define MEM_PAGE_SIZE 4096 +/// \ingroup MemPoolsAPI +#define MEM_CHUNK_SIZE 4096 * 4 +/// \ingroup MemPoolsAPI +#define MEM_CHUNK_MAX_SIZE 256 * 1024 /* 2MB */ +/// \ingroup MemPoolsAPI +#define MEM_MIN_FREE 32 +/// \ingroup MemPoolsAPI +#define MEM_MAX_FREE 65535 /* ushort is max number of items per chunk */ + +class MemChunk; + +/// \ingroup MemPoolsAPI +class MemPoolChunked : public MemImplementingAllocator +{ +public: + friend class MemChunk; + MemPoolChunked(const char *label, size_t obj_size); + ~MemPoolChunked(); + void convertFreeCacheToChunkFreeCache(); + virtual void clean(time_t maxage); + + /** + \param stats Object to be filled with statistical data about pool. + \retval Number of objects in use, ie. allocated. + */ + virtual int getStats(MemPoolStats * stats, int accumulate); + + void createChunk(); + void *get(); + void push(void *obj); + virtual int getInUseCount(); +protected: + virtual void *allocate(); + virtual void deallocate(void *); +public: + /** + * Allows you tune chunk size of pooling. Objects are allocated in chunks + * instead of individually. This conserves memory, reduces fragmentation. + * Because of that memory can be freed also only in chunks. Therefore + * there is tradeoff between memory conservation due to chunking and free + * memory fragmentation. + * + \note As a general guideline, increase chunk size only for pools that keep + * very many items for relatively long time. + */ + virtual void setChunkSize(size_t chunksize); + + virtual bool idleTrigger(int shift) const; + + size_t chunk_size; + int chunk_capacity; + int memPID; + int chunkCount; + void *freeCache; + MemChunk *nextFreeChunk; + MemChunk *Chunks; + Splay<MemChunk *> allChunks; +}; + +/// \ingroup MemPoolsAPI +class MemChunk +{ +public: + MemChunk(MemPoolChunked *pool); + ~MemChunk(); + void *freeList; + void *objCache; + int inuse_count; + MemChunk *nextFreeChunk; + MemChunk *next; + time_t lastref; + MemPoolChunked *pool; +}; + +#endif /* _MEM_POOL_CHUNKED_H_ */ === added file 'include/MemPoolMalloc.h' --- include/MemPoolMalloc.h 1970-01-01 00:00:00 +0000 +++ include/MemPoolMalloc.h 2010-05-28 21:53:06 +0000 @@ -0,0 +1,47 @@ +#ifndef _MEM_POOL_MALLOC_H_ +#define _MEM_POOL_MALLOC_H_ + +/** + \defgroup MemPoolsAPI Memory Management (Memory Pool Allocator) + \ingroup Components + * + *\par + * MemPools are a pooled memory allocator running on top of malloc(). It's + * purpose is to reduce memory fragmentation and provide detailed statistics + * on memory consumption. + * + \par + * Preferably all memory allocations in Squid should be done using MemPools + * or one of the types built on top of it (i.e. cbdata). + * + \note Usually it is better to use cbdata types as these gives you additional + * safeguards in references and typechecking. However, for high usage pools where + * the cbdata functionality of cbdata is not required directly using a MemPool + * might be the way to go. + */ + +#include "MemPool.h" + +/// \ingroup MemPoolsAPI +class MemPoolMalloc : public MemImplementingAllocator +{ +public: + MemPoolMalloc(char const *label, size_t aSize); + virtual bool idleTrigger(int shift) const; + virtual void clean(time_t maxage); + + /** + \param stats Object to be filled with statistical data about pool. + \retval Number of objects in use, ie. allocated. + */ + virtual int getStats(MemPoolStats * stats, int accumulate); + + virtual int getInUseCount(); +protected: + virtual void *allocate(); + virtual void deallocate(void *); +private: +}; + + +#endif /* _MEM_POOL_MALLOC_H_ */ === modified file 'lib/Makefile.am' --- lib/Makefile.am 2009-12-19 11:56:02 +0000 +++ lib/Makefile.am 2010-05-28 21:53:06 +0000 @@ -49,6 +49,8 @@ win32lib.c libmiscutil_a_SOURCES = \ MemPool.cc \ + MemPoolChunked.cc \ + MemPoolMalloc.cc \ base64.c \ charset.c \ getfullhostname.c \ === modified file 'lib/MemPool.cc' --- lib/MemPool.cc 2010-04-14 21:04:28 +0000 +++ lib/MemPool.cc 2010-05-28 21:53:06 +0000 @@ -87,6 +87,8 @@ #endif #include "MemPool.h" +#include "MemPoolChunked.h" +#include "MemPoolMalloc.h" #define FLUSH_LIMIT 1000 /* Flush memPool counters to memMeters after flush limit calls */ #define MEM_MAX_MMAP_CHUNKS 2048 @@ -107,10 +109,6 @@ static int Pool_id_counter = 0; -/* local prototypes */ -static int memCompChunks(MemChunk * const &, MemChunk * const &); -static int memCompObjChunks(void * const &, MemChunk * const &); - MemPools & MemPools::GetInstance() { @@ -165,195 +163,6 @@ return mem_idle_limit; } -/* Compare chunks */ -static int -memCompChunks(MemChunk * const &chunkA, MemChunk * const &chunkB) -{ - if (chunkA->objCache > chunkB->objCache) - return 1; - else if (chunkA->objCache < chunkB->objCache) - return -1; - else - return 0; -} - -/* Compare object to chunk */ -static int -memCompObjChunks(void *const &obj, MemChunk * const &chunk) -{ - /* object is lower in memory than the chunks arena */ - if (obj < chunk->objCache) - return -1; - /* object is within the pool */ - if (obj < (void *) ((char *) chunk->objCache + chunk->pool->chunk_size)) - return 0; - /* object is above the pool */ - return 1; -} - -MemChunk::MemChunk(MemPool *aPool) -{ - /* should have a pool for this too - - * note that this requres: - * allocate one chunk for the pool of chunks's first chunk - * allocate a chunk from that pool - * move the contents of one chunk into the other - * free the first chunk. - */ - inuse_count = 0; - next = NULL; - pool = aPool; - - objCache = xcalloc(1, pool->chunk_size); - freeList = objCache; - void **Free = (void **)freeList; - - for (int i = 1; i < pool->chunk_capacity; i++) { - *Free = (void *) ((char *) Free + pool->obj_size); - void **nextFree = (void **)*Free; - (void) VALGRIND_MAKE_MEM_NOACCESS(Free, pool->obj_size); - Free = nextFree; - } - nextFreeChunk = pool->nextFreeChunk; - pool->nextFreeChunk = this; - - memMeterAdd(pool->getMeter().alloc, pool->chunk_capacity); - memMeterAdd(pool->getMeter().idle, pool->chunk_capacity); - pool->idle += pool->chunk_capacity; - pool->chunkCount++; - lastref = squid_curtime; - pool->allChunks.insert(this, memCompChunks); -} - -MemPool::MemPool(const char *aLabel, size_t aSize) : MemImplementingAllocator(aLabel, aSize) -{ - chunk_size = 0; - chunk_capacity = 0; - memPID = 0; - chunkCount = 0; - inuse = 0; - idle = 0; - freeCache = 0; - nextFreeChunk = 0; - Chunks = 0; - next = 0; - MemImplementingAllocator *last_pool; - - assert(aLabel != NULL && aSize); - - setChunkSize(MEM_CHUNK_SIZE); - - /* Append as Last */ - for (last_pool = MemPools::GetInstance().pools; last_pool && last_pool->next;) - last_pool = last_pool->next; - if (last_pool) - last_pool->next = this; - else - MemPools::GetInstance().pools = this; - - memPID = ++Pool_id_counter; -} - -MemChunk::~MemChunk() -{ - memMeterDel(pool->getMeter().alloc, pool->chunk_capacity); - memMeterDel(pool->getMeter().idle, pool->chunk_capacity); - pool->idle -= pool->chunk_capacity; - pool->chunkCount--; - pool->allChunks.remove(this, memCompChunks); - xfree(objCache); -} - -void -MemPool::push(void *obj) -{ - void **Free; - /* XXX We should figure out a sane way of avoiding having to clear - * all buffers. For example data buffers such as used by MemBuf do - * not really need to be cleared.. There was a condition based on - * the object size here, but such condition is not safe. - */ - if (doZeroOnPush) - memset(obj, 0, obj_size); - Free = (void **)obj; - *Free = freeCache; - freeCache = obj; - (void) VALGRIND_MAKE_MEM_NOACCESS(obj, obj_size); -} - -/* - * Find a chunk with a free item. - * Create new chunk on demand if no chunk with frees found. - * Insert new chunk in front of lowest ram chunk, making it preferred in future, - * and resulting slow compaction towards lowest ram area. - */ -void * -MemPool::get() -{ - void **Free; - - /* first, try cache */ - if (freeCache) { - Free = (void **)freeCache; - (void) VALGRIND_MAKE_MEM_DEFINED(Free, obj_size); - freeCache = *Free; - *Free = NULL; - return Free; - } - /* then try perchunk freelist chain */ - if (nextFreeChunk == NULL) { - /* no chunk with frees, so create new one */ - createChunk(); - } - /* now we have some in perchunk freelist chain */ - MemChunk *chunk = nextFreeChunk; - - Free = (void **)chunk->freeList; - chunk->freeList = *Free; - *Free = NULL; - chunk->inuse_count++; - chunk->lastref = squid_curtime; - - if (chunk->freeList == NULL) { - /* last free in this chunk, so remove us from perchunk freelist chain */ - nextFreeChunk = chunk->nextFreeChunk; - } - (void) VALGRIND_MAKE_MEM_DEFINED(Free, obj_size); - return Free; -} - -/* just create a new chunk and place it into a good spot in the chunk chain */ -void -MemPool::createChunk() -{ - MemChunk *chunk, *newChunk; - - newChunk = new MemChunk(this); - - chunk = Chunks; - if (chunk == NULL) { /* first chunk in pool */ - Chunks = newChunk; - return; - } - if (newChunk->objCache < chunk->objCache) { - /* we are lowest ram chunk, insert as first chunk */ - newChunk->next = chunk; - Chunks = newChunk; - return; - } - while (chunk->next) { - if (newChunk->objCache < chunk->next->objCache) { - /* new chunk is in lower ram, insert here */ - newChunk->next = chunk->next; - chunk->next = newChunk; - return; - } - chunk = chunk->next; - } - /* we are the worst chunk in chain, add as last */ - chunk->next = newChunk; -} - /* Change the default calue of defaultIsChunked to override * all pools - including those used before main() starts where * MemPools::GetInstance().setDefaultPoolChunking() can be called. @@ -369,49 +178,14 @@ #endif } -void -MemPool::setChunkSize(size_t chunksize) -{ - int cap; - size_t csize = chunksize; - - if (Chunks) /* unsafe to tamper */ - return; - - csize = ((csize + MEM_PAGE_SIZE - 1) / MEM_PAGE_SIZE) * MEM_PAGE_SIZE; /* round up to page size */ - cap = csize / obj_size; - - if (cap < MEM_MIN_FREE) - cap = MEM_MIN_FREE; - if (cap * obj_size > MEM_CHUNK_MAX_SIZE) - cap = MEM_CHUNK_MAX_SIZE / obj_size; - if (cap > MEM_MAX_FREE) - cap = MEM_MAX_FREE; - if (cap < 1) - cap = 1; - - csize = cap * obj_size; - csize = ((csize + MEM_PAGE_SIZE - 1) / MEM_PAGE_SIZE) * MEM_PAGE_SIZE; /* round up to page size */ - cap = csize / obj_size; - - chunk_capacity = cap; - chunk_size = csize; -} - MemImplementingAllocator * MemPools::create(const char *label, size_t obj_size) { - return create (label, obj_size, defaultIsChunked); -} - -MemImplementingAllocator * -MemPools::create(const char *label, size_t obj_size, bool const chunked) -{ ++poolCount; - if (chunked) - return new MemPool (label, obj_size); + if (defaultIsChunked) + return new MemPoolChunked (label, obj_size); else - return new MemMalloc (label, obj_size); + return new MemPoolMalloc (label, obj_size); } void @@ -420,40 +194,6 @@ defaultIsChunked = aBool; } -/* - * warning: we do not clean this entry from Pools assuming destruction - * is used at the end of the program only - */ -MemPool::~MemPool() -{ - MemChunk *chunk, *fchunk; - MemImplementingAllocator *find_pool, *prev_pool; - - flushMetersFull(); - clean(0); - assert(inuse == 0 && "While trying to destroy pool"); - - chunk = Chunks; - while ( (fchunk = chunk) != NULL) { - chunk = chunk->next; - delete fchunk; - } - /* TODO we should be doing something about the original Chunks pointer here. */ - - assert(MemPools::GetInstance().pools != NULL && "Called MemPool::~MemPool, but no pool exists!"); - - /* Pool clean, remove it from List and free */ - for (find_pool = MemPools::GetInstance().pools, prev_pool = NULL; (find_pool && this != find_pool); find_pool = find_pool->next) - prev_pool = find_pool; - assert(find_pool != NULL && "pool to destroy not found"); - - if (prev_pool) - prev_pool->next = next; - else - MemPools::GetInstance().pools = next; - --MemPools::GetInstance().poolCount; -} - char const * MemAllocator::objectType() const { @@ -473,16 +213,12 @@ calls = free_calls; if (calls) { - getMeter().gb_freed.count += calls; - memMeterDel(getMeter().inuse, calls); - memMeterAdd(getMeter().idle, calls); + meter.gb_freed.count += calls; free_calls = 0; } calls = alloc_calls; if (calls) { meter.gb_saved.count += calls; - memMeterAdd(meter.inuse, calls); - memMeterDel(meter.idle, calls); alloc_calls = 0; } } @@ -532,20 +268,6 @@ } void * -MemMalloc::allocate() -{ - inuse++; - return xcalloc(1, obj_size); -} - -void -MemMalloc::deallocate(void *obj) -{ - inuse--; - xfree(obj); -} - -void * MemImplementingAllocator::alloc() { if (++alloc_calls == FLUSH_LIMIT) @@ -563,123 +285,6 @@ ++free_calls; } -int -MemPool::getInUseCount() -{ - return inuse; -} - -void * -MemPool::allocate() -{ - void *p = get(); - assert(idle); - --idle; - ++inuse; - return p; -} - -void -MemPool::deallocate(void *obj) -{ - push(obj); - assert(inuse); - --inuse; - ++idle; -} - -void -MemPool::convertFreeCacheToChunkFreeCache() -{ - void *Free; - /* - * OK, so we have to go through all the global freeCache and find the Chunk - * any given Free belongs to, and stuff it into that Chunk's freelist - */ - - while ((Free = freeCache) != NULL) { - MemChunk *chunk = NULL; - chunk = const_cast<MemChunk *>(*allChunks.find(Free, memCompObjChunks)); - assert(splayLastResult == 0); - assert(chunk->inuse_count > 0); - chunk->inuse_count--; - (void) VALGRIND_MAKE_MEM_DEFINED(Free, sizeof(void *)); - freeCache = *(void **)Free; /* remove from global cache */ - *(void **)Free = chunk->freeList; /* stuff into chunks freelist */ - (void) VALGRIND_MAKE_MEM_NOACCESS(Free, sizeof(void *)); - chunk->freeList = Free; - chunk->lastref = squid_curtime; - } - -} - -/* removes empty Chunks from pool */ -void -MemPool::clean(time_t maxage) -{ - MemChunk *chunk, *freechunk, *listTail; - time_t age; - - if (!this) - return; - if (!Chunks) - return; - - flushMetersFull(); - convertFreeCacheToChunkFreeCache(); - /* Now we have all chunks in this pool cleared up, all free items returned to their home */ - /* We start now checking all chunks to see if we can release any */ - /* We start from Chunks->next, so first chunk is not released */ - /* Recreate nextFreeChunk list from scratch */ - - chunk = Chunks; - while ((freechunk = chunk->next) != NULL) { - age = squid_curtime - freechunk->lastref; - freechunk->nextFreeChunk = NULL; - if (freechunk->inuse_count == 0) - if (age >= maxage) { - chunk->next = freechunk->next; - delete freechunk; - freechunk = NULL; - } - if (chunk->next == NULL) - break; - chunk = chunk->next; - } - - /* Recreate nextFreeChunk list from scratch */ - /* Populate nextFreeChunk list in order of "most filled chunk first" */ - /* in case of equal fill, put chunk in lower ram first */ - /* First (create time) chunk is always on top, no matter how full */ - - chunk = Chunks; - nextFreeChunk = chunk; - chunk->nextFreeChunk = NULL; - - while (chunk->next) { - chunk->next->nextFreeChunk = NULL; - if (chunk->next->inuse_count < chunk_capacity) { - listTail = nextFreeChunk; - while (listTail->nextFreeChunk) { - if (chunk->next->inuse_count > listTail->nextFreeChunk->inuse_count) - break; - if ((chunk->next->inuse_count == listTail->nextFreeChunk->inuse_count) && - (chunk->next->objCache < listTail->nextFreeChunk->objCache)) - break; - listTail = listTail->nextFreeChunk; - } - chunk->next->nextFreeChunk = listTail->nextFreeChunk; - listTail->nextFreeChunk = chunk->next; - } - chunk = chunk->next; - } - /* We started from 2nd chunk. If first chunk is full, remove it */ - if (nextFreeChunk->inuse_count == chunk_capacity) - nextFreeChunk = nextFreeChunk->nextFreeChunk; - - return; -} - /* * Returns all cached frees to their home chunks * If chunks unreferenced age is over, destroys Idle chunk @@ -707,96 +312,10 @@ memPoolIterateDone(&iter); } -bool -MemPool::idleTrigger(int shift) const -{ - return getMeter().idle.level > (chunk_capacity << shift); -} - /* Persistent Pool stats. for GlobalStats accumulation */ static MemPoolStats pp_stats; /* - * Update MemPoolStats struct for single pool - */ -int -MemPool::getStats(MemPoolStats * stats) -{ - MemChunk *chunk; - int chunks_free = 0; - int chunks_partial = 0; - - if (stats != &pp_stats) /* need skip memset for GlobalStats accumulation */ - /* XXX Fixme */ - memset(stats, 0, sizeof(MemPoolStats)); - - clean((time_t) 555555); /* don't want to get chunks released before reporting */ - - stats->pool = this; - stats->label = objectType(); - stats->meter = &getMeter(); - stats->obj_size = obj_size; - stats->chunk_capacity = chunk_capacity; - - /* gather stats for each Chunk */ - chunk = Chunks; - while (chunk) { - if (chunk->inuse_count == 0) - chunks_free++; - else if (chunk->inuse_count < chunk_capacity) - chunks_partial++; - chunk = chunk->next; - } - - stats->chunks_alloc += chunkCount; - stats->chunks_inuse += chunkCount - chunks_free; - stats->chunks_partial += chunks_partial; - stats->chunks_free += chunks_free; - - stats->items_alloc += getMeter().alloc.level; - stats->items_inuse += getMeter().inuse.level; - stats->items_idle += getMeter().idle.level; - - stats->overhead += sizeof(MemPool) + chunkCount * sizeof(MemChunk) + strlen(objectType()) + 1; - - return getMeter().inuse.level; -} - -/* TODO extract common logic to MemAllocate */ -int -MemMalloc::getStats(MemPoolStats * stats) -{ - if (stats != &pp_stats) /* need skip memset for GlobalStats accumulation */ - /* XXX Fixme */ - memset(stats, 0, sizeof(MemPoolStats)); - - stats->pool = this; - stats->label = objectType(); - stats->meter = &getMeter(); - stats->obj_size = obj_size; - stats->chunk_capacity = 0; - - stats->chunks_alloc += 0; - stats->chunks_inuse += 0; - stats->chunks_partial += 0; - stats->chunks_free += 0; - - stats->items_alloc += getMeter().alloc.level; - stats->items_inuse += getMeter().inuse.level; - stats->items_idle += getMeter().idle.level; - - stats->overhead += sizeof(MemMalloc) + strlen(objectType()) + 1; - - return getMeter().inuse.level; -} - -int -MemMalloc::getInUseCount() -{ - return inuse; -} - -/* * Totals statistics is returned */ int @@ -814,7 +333,7 @@ /* gather all stats for Totals */ iter = memPoolIterate(); while ((pool = memPoolIterateNext(iter))) { - if (pool->getStats(&pp_stats) > 0) + if (pool->getStats(&pp_stats, 1) > 0) pools_inuse++; } memPoolIterateDone(&iter); @@ -833,7 +352,7 @@ stats->tot_items_inuse = pp_stats.items_inuse; stats->tot_items_idle = pp_stats.items_idle; - stats->tot_overhead += pp_stats.overhead + MemPools::GetInstance().poolCount * sizeof(MemPool *); + stats->tot_overhead += pp_stats.overhead + MemPools::GetInstance().poolCount * sizeof(MemAllocator *); stats->mem_idle_limit = MemPools::GetInstance().mem_idle_limit; return pools_inuse; @@ -848,19 +367,6 @@ return ((s + sizeof(void*) - 1) / sizeof(void*)) * sizeof(void*); } -MemMalloc::MemMalloc(char const *aLabel, size_t aSize) : MemImplementingAllocator(aLabel, aSize) { inuse = 0; } - -bool -MemMalloc::idleTrigger(int shift) const -{ - return false; -} - -void -MemMalloc::clean(time_t maxage) -{ -} - int memPoolInUseCount(MemAllocator * pool) { @@ -937,6 +443,7 @@ free_calls(0), obj_size(RoundedSize(aSize)) { + memPID = ++Pool_id_counter; } void === added file 'lib/MemPoolChunked.cc' --- lib/MemPoolChunked.cc 1970-01-01 00:00:00 +0000 +++ lib/MemPoolChunked.cc 2010-05-28 21:53:06 +0000 @@ -0,0 +1,519 @@ + +/* + * $Id$ + * + * DEBUG: section 63 Low Level Memory Pool Management + * AUTHOR: Alex Rousskov, Andres Kroonmaa, Robert Collins + * + * SQUID Internet Object Cache http://squid.nlanr.net/Squid/ + * ---------------------------------------------------------- + * + * Squid is the result of efforts by numerous individuals from the + * Internet community. Development is led by Duane Wessels of the + * National Laboratory for Applied Network Research and funded by the + * National Science Foundation. Squid is Copyrighted (C) 1998 by + * the Regents of the University of California. Please see the + * COPYRIGHT file for full details. Squid incorporates software + * developed and/or copyrighted by other sources. Please see the + * CREDITS file for full details. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA. + * + */ + +/* + * Old way: + * xmalloc each item separately, upon free stack into idle pool array. + * each item is individually malloc()ed from system, imposing libmalloc + * overhead, and additionally we add our overhead of pointer size per item + * as we keep a list of pointer to free items. + * + * Chunking: + * xmalloc Chunk that fits at least MEM_MIN_FREE (32) items in an array, but + * limit Chunk size to MEM_CHUNK_MAX_SIZE (256K). Chunk size is rounded up to + * MEM_PAGE_SIZE (4K), trying to have chunks in multiples of VM_PAGE size. + * Minimum Chunk size is MEM_CHUNK_SIZE (16K). + * A number of items fits into a single chunk, depending on item size. + * Maximum number of items per chunk is limited to MEM_MAX_FREE (65535). + * + * We populate Chunk with a linkedlist, each node at first word of item, + * and pointing at next free item. Chunk->FreeList is pointing at first + * free node. Thus we stuff free housekeeping into the Chunk itself, and + * omit pointer overhead per item. + * + * Chunks are created on demand, and new chunks are inserted into linklist + * of chunks so that Chunks with smaller pointer value are placed closer + * to the linklist head. Head is a hotspot, servicing most of requests, so + * slow sorting occurs and Chunks in highest memory tend to become idle + * and freeable. + * + * event is registered that runs every 15 secs and checks reference time + * of each idle chunk. If a chunk is not referenced for 15 secs, it is + * released. + * + * [If mem_idle_limit is exceeded with pools, every chunk that becomes + * idle is immediately considered for release, unless this is the only + * chunk with free items in it.] (not implemented) + * + * In cachemgr output, there are new columns for chunking. Special item, + * Frag, is shown to estimate approximately fragmentation of chunked + * pools. Fragmentation is calculated by taking amount of items in use, + * calculating needed amount of chunks to fit all, and then comparing to + * actual amount of chunks in use. Frag number, in percent, is showing + * how many percent of chunks are in use excessively. 100% meaning that + * twice the needed amount of chunks are in use. + * "part" item shows number of chunks partially filled. This shows how + * badly fragmentation is spread across all chunks. + * + * Andres Kroonmaa. + * Copyright (c) 2003, Robert Collins <[email protected]> + */ + +#include "config.h" +#if HAVE_ASSERT_H +#include <assert.h> +#endif + +#include "MemPoolChunked.h" + +#define FLUSH_LIMIT 1000 /* Flush memPool counters to memMeters after flush limit calls */ +#define MEM_MAX_MMAP_CHUNKS 2048 + +#if HAVE_STRING_H +#include <string.h> +#endif + +/* + * XXX This is a boundary violation between lib and src.. would be good + * if it could be solved otherwise, but left for now. + */ +extern time_t squid_curtime; + +/* local prototypes */ +static int memCompChunks(MemChunk * const &, MemChunk * const &); +static int memCompObjChunks(void * const &, MemChunk * const &); + +/* Compare chunks */ +static int +memCompChunks(MemChunk * const &chunkA, MemChunk * const &chunkB) +{ + if (chunkA->objCache > chunkB->objCache) + return 1; + else if (chunkA->objCache < chunkB->objCache) + return -1; + else + return 0; +} + +/* Compare object to chunk */ +static int +memCompObjChunks(void *const &obj, MemChunk * const &chunk) +{ + /* object is lower in memory than the chunks arena */ + if (obj < chunk->objCache) + return -1; + /* object is within the pool */ + if (obj < (void *) ((char *) chunk->objCache + chunk->pool->chunk_size)) + return 0; + /* object is above the pool */ + return 1; +} + +MemChunk::MemChunk(MemPoolChunked *aPool) +{ + /* should have a pool for this too - + * note that this requres: + * allocate one chunk for the pool of chunks's first chunk + * allocate a chunk from that pool + * move the contents of one chunk into the other + * free the first chunk. + */ + inuse_count = 0; + next = NULL; + pool = aPool; + + objCache = xcalloc(1, pool->chunk_size); + freeList = objCache; + void **Free = (void **)freeList; + + for (int i = 1; i < pool->chunk_capacity; i++) { + *Free = (void *) ((char *) Free + pool->obj_size); + void **nextFree = (void **)*Free; + (void) VALGRIND_MAKE_MEM_NOACCESS(Free, pool->obj_size); + Free = nextFree; + } + nextFreeChunk = pool->nextFreeChunk; + pool->nextFreeChunk = this; + + memMeterAdd(pool->getMeter().alloc, pool->chunk_capacity); + memMeterAdd(pool->getMeter().idle, pool->chunk_capacity); + pool->chunkCount++; + lastref = squid_curtime; + pool->allChunks.insert(this, memCompChunks); +} + +MemPoolChunked::MemPoolChunked(const char *aLabel, size_t aSize) : MemImplementingAllocator(aLabel, aSize) +{ + chunk_size = 0; + chunk_capacity = 0; + chunkCount = 0; + freeCache = 0; + nextFreeChunk = 0; + Chunks = 0; + next = 0; + MemImplementingAllocator *last_pool; + + assert(aLabel != NULL && aSize); + + setChunkSize(MEM_CHUNK_SIZE); + + /* Append as Last */ + for (last_pool = MemPools::GetInstance().pools; last_pool && last_pool->next;) + last_pool = last_pool->next; + if (last_pool) + last_pool->next = this; + else + MemPools::GetInstance().pools = this; +} + +MemChunk::~MemChunk() +{ + memMeterDel(pool->getMeter().alloc, pool->chunk_capacity); + memMeterDel(pool->getMeter().idle, pool->chunk_capacity); + pool->chunkCount--; + pool->allChunks.remove(this, memCompChunks); + xfree(objCache); +} + +void +MemPoolChunked::push(void *obj) +{ + void **Free; + /* XXX We should figure out a sane way of avoiding having to clear + * all buffers. For example data buffers such as used by MemBuf do + * not really need to be cleared.. There was a condition based on + * the object size here, but such condition is not safe. + */ + if (doZeroOnPush) + memset(obj, 0, obj_size); + Free = (void **)obj; + *Free = freeCache; + freeCache = obj; + (void) VALGRIND_MAKE_MEM_NOACCESS(obj, obj_size); +} + +/* + * Find a chunk with a free item. + * Create new chunk on demand if no chunk with frees found. + * Insert new chunk in front of lowest ram chunk, making it preferred in future, + * and resulting slow compaction towards lowest ram area. + */ +void * +MemPoolChunked::get() +{ + void **Free; + + /* first, try cache */ + if (freeCache) { + Free = (void **)freeCache; + (void) VALGRIND_MAKE_MEM_DEFINED(Free, obj_size); + freeCache = *Free; + *Free = NULL; + return Free; + } + /* then try perchunk freelist chain */ + if (nextFreeChunk == NULL) { + /* no chunk with frees, so create new one */ + createChunk(); + } + /* now we have some in perchunk freelist chain */ + MemChunk *chunk = nextFreeChunk; + + Free = (void **)chunk->freeList; + chunk->freeList = *Free; + *Free = NULL; + chunk->inuse_count++; + chunk->lastref = squid_curtime; + + if (chunk->freeList == NULL) { + /* last free in this chunk, so remove us from perchunk freelist chain */ + nextFreeChunk = chunk->nextFreeChunk; + } + (void) VALGRIND_MAKE_MEM_DEFINED(Free, obj_size); + return Free; +} + +/* just create a new chunk and place it into a good spot in the chunk chain */ +void +MemPoolChunked::createChunk() +{ + MemChunk *chunk, *newChunk; + + newChunk = new MemChunk(this); + + chunk = Chunks; + if (chunk == NULL) { /* first chunk in pool */ + Chunks = newChunk; + return; + } + if (newChunk->objCache < chunk->objCache) { + /* we are lowest ram chunk, insert as first chunk */ + newChunk->next = chunk; + Chunks = newChunk; + return; + } + while (chunk->next) { + if (newChunk->objCache < chunk->next->objCache) { + /* new chunk is in lower ram, insert here */ + newChunk->next = chunk->next; + chunk->next = newChunk; + return; + } + chunk = chunk->next; + } + /* we are the worst chunk in chain, add as last */ + chunk->next = newChunk; +} + +void +MemPoolChunked::setChunkSize(size_t chunksize) +{ + int cap; + size_t csize = chunksize; + + if (Chunks) /* unsafe to tamper */ + return; + + csize = ((csize + MEM_PAGE_SIZE - 1) / MEM_PAGE_SIZE) * MEM_PAGE_SIZE; /* round up to page size */ + cap = csize / obj_size; + + if (cap < MEM_MIN_FREE) + cap = MEM_MIN_FREE; + if (cap * obj_size > MEM_CHUNK_MAX_SIZE) + cap = MEM_CHUNK_MAX_SIZE / obj_size; + if (cap > MEM_MAX_FREE) + cap = MEM_MAX_FREE; + if (cap < 1) + cap = 1; + + csize = cap * obj_size; + csize = ((csize + MEM_PAGE_SIZE - 1) / MEM_PAGE_SIZE) * MEM_PAGE_SIZE; /* round up to page size */ + cap = csize / obj_size; + + chunk_capacity = cap; + chunk_size = csize; +} + +/* + * warning: we do not clean this entry from Pools assuming destruction + * is used at the end of the program only + */ +MemPoolChunked::~MemPoolChunked() +{ + MemChunk *chunk, *fchunk; + MemImplementingAllocator *find_pool, *prev_pool; + + flushMetersFull(); + clean(0); + assert(getMeter().inuse.level == 0 && "While trying to destroy pool"); + + chunk = Chunks; + while ( (fchunk = chunk) != NULL) { + chunk = chunk->next; + delete fchunk; + } + /* TODO we should be doing something about the original Chunks pointer here. */ + + assert(MemPools::GetInstance().pools != NULL && "Called MemPoolChunked::~MemPoolChunked, but no pool exists!"); + + /* Pool clean, remove it from List and free */ + for (find_pool = MemPools::GetInstance().pools, prev_pool = NULL; (find_pool && this != find_pool); find_pool = find_pool->next) + prev_pool = find_pool; + assert(find_pool != NULL && "pool to destroy not found"); + + if (prev_pool) + prev_pool->next = next; + else + MemPools::GetInstance().pools = next; + --MemPools::GetInstance().poolCount; +} + +int +MemPoolChunked::getInUseCount() +{ + return getMeter().inuse.level; +} + +void * +MemPoolChunked::allocate() +{ + void *p = get(); + assert(meter.idle.level > 0); + memMeterDec(meter.idle); + memMeterInc(meter.inuse); + return p; +} + +void +MemPoolChunked::deallocate(void *obj) +{ + push(obj); + assert(meter.inuse.level > 0); + memMeterDec(meter.inuse); + memMeterInc(meter.idle); +} + +void +MemPoolChunked::convertFreeCacheToChunkFreeCache() +{ + void *Free; + /* + * OK, so we have to go through all the global freeCache and find the Chunk + * any given Free belongs to, and stuff it into that Chunk's freelist + */ + + while ((Free = freeCache) != NULL) { + MemChunk *chunk = NULL; + chunk = const_cast<MemChunk *>(*allChunks.find(Free, memCompObjChunks)); + assert(splayLastResult == 0); + assert(chunk->inuse_count > 0); + chunk->inuse_count--; + (void) VALGRIND_MAKE_MEM_DEFINED(Free, sizeof(void *)); + freeCache = *(void **)Free; /* remove from global cache */ + *(void **)Free = chunk->freeList; /* stuff into chunks freelist */ + (void) VALGRIND_MAKE_MEM_NOACCESS(Free, sizeof(void *)); + chunk->freeList = Free; + chunk->lastref = squid_curtime; + } + +} + +/* removes empty Chunks from pool */ +void +MemPoolChunked::clean(time_t maxage) +{ + MemChunk *chunk, *freechunk, *listTail; + time_t age; + + if (!this) + return; + if (!Chunks) + return; + + flushMetersFull(); + convertFreeCacheToChunkFreeCache(); + /* Now we have all chunks in this pool cleared up, all free items returned to their home */ + /* We start now checking all chunks to see if we can release any */ + /* We start from Chunks->next, so first chunk is not released */ + /* Recreate nextFreeChunk list from scratch */ + + chunk = Chunks; + while ((freechunk = chunk->next) != NULL) { + age = squid_curtime - freechunk->lastref; + freechunk->nextFreeChunk = NULL; + if (freechunk->inuse_count == 0) + if (age >= maxage) { + chunk->next = freechunk->next; + delete freechunk; + freechunk = NULL; + } + if (chunk->next == NULL) + break; + chunk = chunk->next; + } + + /* Recreate nextFreeChunk list from scratch */ + /* Populate nextFreeChunk list in order of "most filled chunk first" */ + /* in case of equal fill, put chunk in lower ram first */ + /* First (create time) chunk is always on top, no matter how full */ + + chunk = Chunks; + nextFreeChunk = chunk; + chunk->nextFreeChunk = NULL; + + while (chunk->next) { + chunk->next->nextFreeChunk = NULL; + if (chunk->next->inuse_count < chunk_capacity) { + listTail = nextFreeChunk; + while (listTail->nextFreeChunk) { + if (chunk->next->inuse_count > listTail->nextFreeChunk->inuse_count) + break; + if ((chunk->next->inuse_count == listTail->nextFreeChunk->inuse_count) && + (chunk->next->objCache < listTail->nextFreeChunk->objCache)) + break; + listTail = listTail->nextFreeChunk; + } + chunk->next->nextFreeChunk = listTail->nextFreeChunk; + listTail->nextFreeChunk = chunk->next; + } + chunk = chunk->next; + } + /* We started from 2nd chunk. If first chunk is full, remove it */ + if (nextFreeChunk->inuse_count == chunk_capacity) + nextFreeChunk = nextFreeChunk->nextFreeChunk; + + return; +} + +bool +MemPoolChunked::idleTrigger(int shift) const +{ + return getMeter().idle.level > (chunk_capacity << shift); +} + +/* + * Update MemPoolStats struct for single pool + */ +int +MemPoolChunked::getStats(MemPoolStats * stats, int accumulate) +{ + MemChunk *chunk; + int chunks_free = 0; + int chunks_partial = 0; + + if (!accumulate) /* need skip memset for GlobalStats accumulation */ + memset(stats, 0, sizeof(MemPoolStats)); + + clean((time_t) 555555); /* don't want to get chunks released before reporting */ + + stats->pool = this; + stats->label = objectType(); + stats->meter = &getMeter(); + stats->obj_size = obj_size; + stats->chunk_capacity = chunk_capacity; + + /* gather stats for each Chunk */ + chunk = Chunks; + while (chunk) { + if (chunk->inuse_count == 0) + chunks_free++; + else if (chunk->inuse_count < chunk_capacity) + chunks_partial++; + chunk = chunk->next; + } + + stats->chunks_alloc += chunkCount; + stats->chunks_inuse += chunkCount - chunks_free; + stats->chunks_partial += chunks_partial; + stats->chunks_free += chunks_free; + + stats->items_alloc += getMeter().alloc.level; + stats->items_inuse += getMeter().inuse.level; + stats->items_idle += getMeter().idle.level; + + stats->overhead += sizeof(MemPoolChunked) + chunkCount * sizeof(MemChunk) + strlen(objectType()) + 1; + + return getMeter().inuse.level; +} === added file 'lib/MemPoolMalloc.cc' --- lib/MemPoolMalloc.cc 1970-01-01 00:00:00 +0000 +++ lib/MemPoolMalloc.cc 2010-05-28 21:53:06 +0000 @@ -0,0 +1,117 @@ + +/* + * $Id$ + * + * DEBUG: section 63 Low Level Memory Pool Management + * AUTHOR: Alex Rousskov, Andres Kroonmaa, Robert Collins + * + * SQUID Internet Object Cache http://squid.nlanr.net/Squid/ + * ---------------------------------------------------------- + * + * Squid is the result of efforts by numerous individuals from the + * Internet community. Development is led by Duane Wessels of the + * National Laboratory for Applied Network Research and funded by the + * National Science Foundation. Squid is Copyrighted (C) 1998 by + * the Regents of the University of California. Please see the + * COPYRIGHT file for full details. Squid incorporates software + * developed and/or copyrighted by other sources. Please see the + * CREDITS file for full details. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA. + * + */ + + +#include "config.h" +#if HAVE_ASSERT_H +#include <assert.h> +#endif + +#include "MemPoolMalloc.h" + +#if HAVE_STRING_H +#include <string.h> +#endif + +/* + * XXX This is a boundary violation between lib and src.. would be good + * if it could be solved otherwise, but left for now. + */ +extern time_t squid_curtime; + +void * +MemPoolMalloc::allocate() +{ + memMeterInc(meter.alloc); + memMeterInc(meter.inuse); + return xcalloc(1, obj_size); +} + +void +MemPoolMalloc::deallocate(void *obj) +{ + memMeterDec(meter.inuse); + memMeterDec(meter.alloc); + xfree(obj); +} + +/* TODO extract common logic to MemAllocate */ +int +MemPoolMalloc::getStats(MemPoolStats * stats, int accumulate) +{ + if (!accumulate) /* need skip memset for GlobalStats accumulation */ + memset(stats, 0, sizeof(MemPoolStats)); + + stats->pool = this; + stats->label = objectType(); + stats->meter = &getMeter(); + stats->obj_size = obj_size; + stats->chunk_capacity = 0; + + stats->chunks_alloc += 0; + stats->chunks_inuse += 0; + stats->chunks_partial += 0; + stats->chunks_free += 0; + + stats->items_alloc += meter.alloc.level; + stats->items_inuse += meter.inuse.level; + stats->items_idle += meter.idle.level; + + stats->overhead += sizeof(MemPoolMalloc) + strlen(objectType()) + 1; + + return meter.inuse.level; +} + +int +MemPoolMalloc::getInUseCount() +{ + return meter.inuse.level; +} + +MemPoolMalloc::MemPoolMalloc(char const *aLabel, size_t aSize) : MemImplementingAllocator(aLabel, aSize) +{ +} + +bool +MemPoolMalloc::idleTrigger(int shift) const +{ + return false; +} + +void +MemPoolMalloc::clean(time_t maxage) +{ +} + # Begin bundle IyBCYXphYXIgcmV2aXNpb24gYnVuZGxlIHY0CiMKQlpoOTFBWSZTWXJQMOIAFSj/gHR0RAB///// f+///r////tgKg6hVvh89mut7rvbl0w0AFADPc9z3OLjJvdqd5kFJaATrNe16rJqilXs111XB13O 56zti8vdCu7061I2Pe90LqF3NbDRc201rSw1Ixs2ObHKslvHaLrOLcxk1VbbPcJQgIJpkTaCYI0a qfpop7amKnqfqm8qeapjRA2oeptI2JGagSgICaQAjSJqn6p4Uw001A8pk9IAAAAGgAcDTTTQaGho ZGgGQBoaA00ZAAAwmIDQSaSIpqeiRpkGNVP1TR+qeFP1T09CIeoepoeozRqGGiBkwQCJKaBIaZGg CY1AEZT2kyptqaP1EZ6p5Uep5TxQ9QZPUxNBIiEAQJoTGoaE9J6aaQ1GapmU9NNTan6hGmTahoA0 0UoahUEQA2KAiGT3G3Xv/SAwWTwjHwG3fbzS5vmwsKiqSFYVi+Z5+LP89/mHfq/m6adj0YvM3CyI k1iiYreoooQscYnVLR/u677j5jA7qXjyPtz4kDtRh4+62jlHMMzPKtPl2aU3sLNILoQ0UGxZdWsR BMWzTmkoo3rs1xQ3vUOjp0zSqyobtJmrTEaMYiREghulVujkwK7iC7RR4oB8v6vWy/L8aVbX7+TL 2/F/5VIVIpgS4uZUZmVYisaQUuHMDuwuUvj3NlSk4dutUyG7eLMzSFYrjrmMqaBUT0FwwqlxWxgF 9LYnDDPf8vzb5ylj+NTF/z/nWNvWnsMVEPwjMC4AY0KaLooQeC426psV2Z1SU9cXycHJzs8361mz 2LAKeV94z0nthQ5Z+sGku7zVyoGAqvvYYjFQaZ4xdsoYOaQtnMD5zhJpWslStBd+EGucRGsnu8Io 5ZTgWkbN3GGMFsMt2WQZ3zXHOjwUDi1pVzwbpdqdwetYZXNplIMmwXEg/J1lGeI4QaiG2HaICgWE DYRRF5AgAMBC6HwaPDcFHUUVFFF920MBSJKiIgiIhoaMGMbGDD0dRoo9lhNnqtNxb9vFmBzJfeeB GWQO8ipOxHnKWehEIqwYw7Zg+7j0yUjz4eya4QP5QN77InMA5MFE2kA1kXqLQKfmNJzom+xoMbQp 6+j/n5qLwWeVb/bw9vjBddDcxl9LikOhK7vx/j3B3otazCctK98QkiL9cMMsOOQkAN1MiXDZfGPk jBw5iBssvo85anqNTEkr1rgIzyPUQQkUkNtzCTfdHxLBUBn6Cbev0a9w3Lzb85Xye/JTRX1UnW/r 7F3bMUIs05dr3rpL/4mUawIzeWOk/318M/V9kU6fXrN+V/iYzjtYqsK+cdIPiKVUhU5WGHca5mtz jnOrxk7bLpJWs1rUoUjBnVmnd/zzmRlNotWljvZyGgq/3P5bZ99prDviMsbIpKvJrSFDki6Yamkc zNl2JvM0vJoXeGriUy/FLrRvdwHqqjUKM+Ssv4F+41n7SbaiDXDM4YKCwp6l5d8PEM97dPGqX+dB kqc7dh0YucTb5yU1QgGZezh8TZWCT/MS50CwR1MeRyN05i/cdhNjrgb6uP4rQQ57i/zOweUh3lw8 wmZqmrwBaxaaTrthDSzWdtdtxqQJ2jlIGGB2m8zOiPnPQaF4cyr5HOYoKmJxX9B5AiRCphNkiJ0E E+JEGIEiK8om35GInRJJCo5NJ+w2AKQoizMzNo4x78zlN7bVbTFLIacQ48F2m2HQUzyrj/yJpwm0 V9/CvzWLehQ2M4X2Xb21KreNraKe97af/skxkuZr7hpEUPg2N7WWhOtNvDNPpN/ECjMF8EM970LV OyLHDAoWPrFSBUOlLxXPpTwacir6XvCd5Y+bTfeXnFpXnMKP3Z6y7ybxwubz3N0nrYGcHdndrYr1 2ks6Dg4Zbd2NDdFXxoXMhuz2+1mUXTKTZbPexRGbn56815nsicRjeNtrlDY0u80sQsjwaPNT2mgo p4K8DpJxXGor8dnPuL2V9cC0Vu1df7bPv+I+odEpljrTh0SfdsdOfPFPfMPTlnYggsHNRyIl77r+ UoRRgXtU1PhfXG/i535T0M+zcipyTJTY40oORRxs+3UI4yTu/E1gWVFGTbbstBSvHDwwx5sNowrF l0/NPFVnnGdjwScjew+e+XK0kr1fmzQMu62raYihDqqKrEt3UYi+B0NkGLUq97zzdvAsGbDPK9Oo yj0VnGUU6y5Oh7WIN6adFFFrx10KGOXU4vzxORsezuinLfL8X69/lpgWgpo5cuU9fGt0f4a+RTjt vhhb8u6yedU5GFP7lsKsscb+PWNGt3fb5zSOeod6v5ROY4uJrQ7ZrTZSgUahNa13V223Z22heRc9 hTx9vLdgal+oYtd5LoMm4xotY0ppTnlI9m7sxjp3WkKK7F4iVVy4p+qKTFm9y7PZqea75bovdzqf SZ50OJujVSZax5RNJStqi5eJ4aMPMSlqtOPL43D3L3X6Mld97+7k1630EKenL8Y5Z6VTAtjmcptM NRld7eaFWtvWrmG6okiiqUHxU49T7DgbbBGtWDrLrt+A0lttUL+QdZ/JsGDsMGx7dfEBuqmkJ9tF ibm4o5ULjaK7Ieper0e9fPVangRG3g5klvS2IRGHLEVTmQIop6ldl8uFH3zSJnjagwzk4t1Wvy3G XoZz1Qxk51OgfI8b/QuBVeOS21tSnSk4FITpi0FwXr0erebW8zop2kGjr0wJsQVXjugpyRMzkoiy 5393k2z0qUmhClyl428hg8pjMXHeTmLM6ABnonrDKBDl/dgs53d/0cNeEoKZ96C1pybkid6KuZlW lEVrR9fhH7JXf48ejmVMMdB4DlvCEIQhJGEBY/OCGJRX2JQAcRrqjrw5VRLSi+hwsmHN60iAJ2kE jVS1YZb8l8GAGV31nwHj8uo8xkIZng9CAMTqWGkMfsZ2jgzTK1OAScBiDZO/rmBrCi1VkPIpYzRC qyJYRG6RQe4rP6e0Fe8ycnWm62jz2umdC46j2VzWco5ea+rJWqp9S/U4VUVRkpit5iNjafPU/xGt c4ziwnkl6B9/r9ikipGyv2V1OJ3S6f69/dBVIP4cf6sDCOdzDXczK+OMdLJ2OhEHRBCoDrCVMY6q l2I3BhUeM78cJWFlcw+v+EHpEpGmIgyoGIQhjLW0QRjbQRFiIWVZLK0bbKII0QGRqpU9Nk0xJgUA 0hBYKxITWvTW4PdYcqES0I8nx1Ms61BiB/OTQfUEbdq+k/ocx5g4kfHUdtZuKyf7/Lzn0Q2THriv x9LzIzQc9CHcz1IV0nE4x2scgoyyjYt1bBQRIj6udjXChxxon0gxtwZfVxdD3ue/7A9gVpwKyJIy HdnLEkDwYETvKktDiyheMiBYWDwxVDAGIwwVlZKgLjawqoz7WN+YcdmxCvHZPoPm+1PeP7TyzyxL jUPCaCDCL27DRhqGtLc2uEJEEMhBz252Nk583rIwFKTrFKSiFNMHrTqsIsdMJxkDzWOUx4+LT+or TwV9/GIhQQWj26XTWw9SSZZBBcUKmCbdzCpJITMDAgakAsZHFZxxWepVLyI+M+YBno+gD7LRrCsM mFmfccPz5oM0lVJgJsAwPvLhG86+rYlEckv7DYDAxOvu7/Xy8prA3wU/kZDEA+ELqYH9CHxDEnQu umTkY2MBDwGgRQwIwdLFxCV2p7wwl5mMKCfkSxkokggMj5i2iEPmKzJQlZGRqtDGBELKwQOUmSMB 1FZYUkR5veVBWTL/IXFKWeZCLIHVTophQjeOccbIMDnY/UGSAg4ICC2KzB6sl0+2tRyr5n3vck01 LlUtjiLwfpaJPQoWNvx8EoWTl6azegvUUrBBnLZCvSFgwK4OtDe4L4CMeHtkxGY9+6dCkN8yTGQV lSNbEG4yD/0hpVt1G101140qcGXdiL4W8KLwIGUEQ6MXDoKbGEu1x0SEBNhUSts3g1cpZEuidTcu bIjDciaqL1qXM+SdkW5wMhVURAsTvdEycjoVwCVU2FOXfmaMnA9jJsMaNvROppyyXfg65i0EIlTY UGQQjqW4crmS5wO1YbtiETYk2BhTmIJTGSNYJMx2G4tLqbS2hONmVbHxiKwdjI5UoIwzWMy1ktok FeMCuKOgOaNoJoURheYrmiKJgnAMe4uOLkm9e1qHA699gMK2eo8YJ2IOFxcUPBLkhHZOyvHOHuDf ZxSGRtOhM21wxN5wMjFScL6XFQ3mhuSQeNbNKyEQJJ1JYyer2qTbmcDjvNsFK79zDColDiU9T1IU oKMOELSJAmV0lw4xFIYTQHp9AbUTVZo0GPOXL42ijadoHPFOVpkdBDlfra6YRFO3UVJhYTIyV9jz 5xoWTVw9D1nFF96lFyeShCRjR3NjkIVr7FxRjoaGPB6J6oCex3DFfQa66huPqSnZdOSdxilOsnod hfT9DJseChSlsaO52MDBHJynhEHJKjmE2FuWNiw5J8E+ZGc+J60AjTc1yyt1efArvpYORDEJYiHD cUALnj4Ijl6wllc6HA8BvWlOtrHihGPYkU6PY5F09TwXEnsghoOQp2OZucj6vgn64Y3lrHDplpf0 ngc0SLFiRTJGJhbFA9jMD8Q4iOrRI7AYSszZ4og8pOYIm1Oxi8kXG3bQHGUEioM6CDwxnV6NUP5m QPgQ5zMmJUKEnt9e0RRYqMqeIJsSd4w81JTz5A8sg6CvQMyCRcnZi7Tz/5ePvYs0sPXoxvadkQjj nKYz61qtCA/NlpatP3pML96v0KuWha7K9yJaPvtfVUpr7qWPNDFb5KRPHmPSvF8WbntgCEtqn1SW VN4K8F2V8DocBd0WatFNRKZyXMvhrheibyPXq5FYc4ylB5srpCsqQiVyzZ47Q97M+lvZoK4sWz09 ph/iZm6nYp7dG97gyIX6MaLle8iUgfxZE8iQYwn1TAZSGMkTLRhiSxxN40IRgm7vzhIFSZhCiPOU tQ8JvrFC4R4HwO7wCDcbyEVMCQnAvS+xjoRysNCBxIKcSAX28/s/mZOD8D7j7X9EMB/oMfzPmMze Qp6j+ZefiWLwsg6yT5OobCEFNq4HpbTbE4RPgelwcv4Fn7ngNKmAfmcTR2puhCBGSSIiFO96yKEw D76GMSOCINZgo4NailFR+mfBPkgeZhDn8H1Fh6jtPH67hbF8lJUdKwa5XfCBV3A8jCjY3Gwj/Qek bw/tYhJIn0yiPkIUfOdWT0n3MAs9iCZF55l3FwsgIMVmjYAxLjy7OyYZ9XKKWRIqUCbUUcrcgHt6 z/g8aHQYj6B2FD+1Yag4ig+UOJKRRB0V9Y/YguHoM7D8DSBfzTOSPEk8xUzV/kbp/95xrmDyBTSw JPr7zz8z0DAL3dUNhYeToLlC5s68kPgmT74J6QkTylG4hyRDvuHy3NIRgEQhA0zzinoiB2epAwTG A23+gy8GB/8euK29xASMMAdCYsWmk/vsFyHFxxxwwDBbkwhH6KzLgah6epHsR7i3Ch8hDqU4nqD0 jgoUXhSGo8xtdroL+xD5UPSek/MSM/KrGQz4bUEGxBwUoPcUMysCqwfraSKj++F0AIB3esxMSGR5 DWawPrOUdhzGAXGYuto+GhcNXGhBG9JWL0YEkh9K4ChHNsZE9Z5AMIijjbWaTYMOMopAtGCA8mIQ QfUtBYf8D+5zlyhuKbTI8PIcZc+AItBAfR/MabkMzoDMsa1fDSP5z6JQflvodZqc/aJu6tjHoeg7 +a+HEOMZFCvR6QyFqgkMWhDpLMlVRUD9oeJXn6Be9bD9/Ot63rtvPRAuSasOaj9laNG1lvM169/1 IodT6YsWQkKXtNCB9xnw3kJFkZFlwk9Lf+JZfwPwP5FggcRcUDDyBIwe0qIkj7yQ4gHcm9LUTKlR /A/LE6+ghCENPfcZp9pBvC9PGfeQBjAJjAM8aHNFEFBzXewynuNqSkeN76T4IRcEQTYn0tD7D1Ca l7EvNb8nSXzzGjxsdwYbXj5CqtSX2hKChJ0lqbgPP9hFcjT7QcBgQveDJGmQQlY/UYG4JRcZalNI qM7Y3PckZtjLIHlQN66YzDjmBsVkrywja0hsNTpu8x/hcWLmPvWYJTvL+w1F8sKTTJK4oxNkDoOU kSoNSWxdpNGlUxaXW9iPuO1g/iXU7rRfMIWze1YYNUX0mB3SeQ7OTL02hrKgUnnyXKnfvnYn7+AS my+FgMYP6gwhVhBcSkhX9FNANaCaMhMWgX1YRLz+57hj9YanHA4FxSHmOBnM+Y85/UyOP1hPGajA 5whu3o/V57FzipOhcwoN0pF/exQhAhEkL/ML21z/aZFxCBun5/1LCYAknQnJAEwD2S0lWUnr9wUS /ChomHoJSPKeUuPzQEnAxCyXdob06FIPmGLVs9oeJEnvSTdgFrIfQC6EFMjkHAp5NZ2tKhE7x4nj sUA6kyxNzofMdTFCowWL8UOYopCIgRm4YcEe+na+cU3WDJMEfyc0LOQvurP4wWddrn7/RrfLDjVw DjbQOr0tzwcDgcfOU1cHIgjRKesEIUHolBqHkOz6x8XigCU9hhCaDylLsV6qwIh4iA8ENRExypMi F/nSUef4FZMjDCed7NlixQzTJK9UITZGEBHE9JDwk3vwnUCxhBVCJyDpiltg/xXsIyGKbKg5lGMS xsMFNpczvbgcZk2povKVURVRnLPQfDr6jRU6DOwmFJlNRMApCQ6Ly/tGfMQewu2niM7QgvIMRClE JDECoAmCI4DecBi9MAdzhMJWPLC0+0UzAl+Bkao0nKWmBFVNP5/oIbOiukwLroHPeCbsqpcyDYWm 1RkHYMKJBS+pq1VYoQWC16ysfFF+H1/aDigQMfS5oCiJT9stJta0kSXFtV7NnBwHQMOSYazMSqMt P57WprweY6mhBxIyOWBR4QU/LT7GadTMezQau9YW6NgtGOzPF2LSTXNUSCGCCIs8uUTdzPCgzjth JwtHvgXeqGMUhqOoS71suqTCksuHFXh/k2xLFw2mhk3LNCTU7m0iUXdrZedcqfJBwzFzjhYI+3Nt PcZJlHDt7hOcJqezdLXE9qzMtKaEPaN2YURpa0ayMMpqEYqMjdE47TFyzU7n4aJCiqKJWjImI7TH hsuwm45jnKCmOL+FD0pTpKDpOgN44k7yTYdiEdeR1bGbJY1xlscwTMwQ1L5+olegwQxDn3DKJoIU q0xXMIhoeELhYlcCeKwYNDayuXnReE/HGUSuQPAC95DUtAxHqPiFj7KG/QBmK6z+8yOxC1IL5X4U HRQ37Hym25oqaKjXSR/k2VY/J4h4VJq2TJEw/rimhFjVFzGYTagXtFRiULjQliFZiGH1FCbhEeqK j1EE3mQ00nmZmxNLHmIU0dZzzVMRTp3mw6SnLpxNC163C6yAM1IjAWZhOJMLwse59U5S0kU+oAOZ YMAJIKhMzoeEDXeF4m8PeULwZq0mjHfaKzMukj1Rhh86VP2C1MvQZCAOgDVGsGiUiUiz5aYlkl6Q OGZzxFL2s67MBjXlgv6RU5qkXKpCgaSzDW6QCukJLr2JZIwStWKrowS0vsB/LeXS22WE2SoiMjNk OCKDJklLRxToxHR2tgaIKeEL+8y0B6zG/ibHHsfXnuOw5IcBCmmhAjQcyxQxXSaDkRgBFZxZgsfs KS8O8gmJtN91QwVNIpoxLBcCByAHvLrzjD2sBDlaUGz+u97A91DMq7LZMyWTcYSoHXyn1nQegJHk OBUcCZ5iJwGKCJc8hUHp8ITNR2EywxMxoIyRBu5ltMT6TmQULg5T1Gz8EmFV1JQj79jRsheJFe0z c6bhSxEsaiiiAdywX3kL8hcUMcY+H6SrIdKfZaXJcy9pX5QkszNHR0ASUF9owqbUBZoXdHcSqDCG KvCoiw0BVqheeARg9sEG8bGbmvvJigzQPIwNjxoZLYQQJo9K9dyVUG90DMvuw9x5TzrvOEBDGxKG 7jUsEGx4g8AciJ9LzGAYmMEInRIhO0BCWSIAoSMkOZM9zJs3U/8PrvPxKEi0VCij+6AXjuZ0d1Ep 1biWL2fPsOFEQYpDznl4nKkdDKHvQhnQmj2PrAVGEYKiAxYorICCiKqxgIqgKAqMEQIoiKCAqIiR WESMIJIjESRFEGEIEgSQSEEghn5QSz2Y/QiYGZ8Q6iCPUvXp60RwKAtrwwvBjH4+kCXhUR5gBN4j GxMD2CQxVfRTmqNeYDykE3BBnqG80zB7w9ge1/Fiw8VSJCW2huq/SFBQgwlYnSfal5zG6FxPVEug IG0Sy3KSIFc5SYhIund8rEjINRdVLBKSMWBHZsBfIqL8dDoBMBwKIVcttmswTRxj4uR8b4kMbbGy 6cxRhV4sJCUnxQ+nHCJwm2HCEXpSJ46ohQb63IVtDAH2GEEv9xkUb/AanUQYQGEYRihANJPZs29H TBMhdHRBIQkWMEiyRkSRiHScRZUj5UiyECCdm67geHM68Ny4uiwgtDsJlIOItEjaNYheVseVHAyh LmGvQZ5GzHViTdC4U/iEjJBkATR+xT9gQBPmgBb7OJE7vCHDcbyo/rNZUjwwCQBhBUPoXSFAGyLC GCAQFDI2WdDY8QOdrpVHu2EGJigCT7A51UQXMSReYjYZMwYSmToaKIORKWBdFFZZSha2lElttsjT CwzvLhpEQsIbUFGE8Elz3PyncXtiZ0GMC5VyCANiB1noP3Gf4E0mxHMrfQ3Bdf4OXvEQNZGuRAaF BH93buSZY425IXGoeiygye4FCyPikOvjBDu8iPh5hSZAdZPQp57zxEv2kXnvB7QVwcxJydtYWauK eLChDN/5fpOVEdmmhkHUuJIQDTHEQiAeloKFDXBYkgQsQbBzHeYihogDCptR5OooKt42FixAZCYF GClFDFV+4iwnSSb58nx/MmGvMb69M9zLlT42i8zqzsBQzIkJUEIcJ/QMhSgD9CUkiSAvX28t/DX2 Em8GRRtTiKXfWtMhDbE0oRC/tnujJ5gX/mxLdubbbbbxJO9arzHlFAGwtefUansPSZZQoaGrt1jH /HKVV7xwibijLCV2yYCVDUFkn6kKuJkd4wvsLWdDxkVpDKxoMCiEOBQocUwzsVOzAptm3CxsXBpb cFLazYqpoSuH9Ow3oms2aEKTVsDRBCevkKQ43Lwk6QNk3NyImyB4KVx+S0BQMa6mWoUIWQ9MTQHY aD4pYSsNQVZMwZYoi4CQ9yAIZQR1eHHieFQ+2cHvAYhEdMBuYHFtt41LUquxlcjMBNgxgJ5nyE/J sptX4jQkAV9bhMYAG4eCTyFkbUAQatL6xozOAIDFX9WMUA0e8mghZLcKR0SWAWINDMDjANJGSa1D Ng00RgJFCGzmTAgcTCdvu+r1Hq4pDyDMYsCuspjWKT0tb8fg/vnu9n3nyh/k24zpZIds4fj0J1xb YX3EGSRG4gZqrPjsdh0G7egPMh3HIQhnev1ILw/qXuDwO8CgcpB72CBz7hoQHxXXayq1UmKyElZI THk1G5w09GHIYkFxQKfaXUNwVDVXURDwodPROz6H5xALFVKMKkkRGIKJJESESCxGDIxgcBueI6mA J1MMMzfYWMjaxjRH2k98SiBdG0Yrkbp21/Yc6ALuI+5fahFrlRLQjfl01IwQaA0MI3ueDP3Gqo0N lmqmaOiMq/qX+Q6UNTYQEmpwOIp5yzY2ltCAhMY0yRGBpvJMflKQYqPcUg5S/x3kUfRP6zJmcLoc l7hPugDSTfuEPIvUIO3GocCxNyZA9ZgKDFiQNp7XoiPep2BY4cic56jwUt3dCYfPZDgNocqJPX4b GvTjmhYDF1ol/lKmGF6R5MSCmJZLyUg1TBr3tdpKNzk7GVCUVfvRRFJRcEq3pJCgwhQm3BHlN2sU 2n5bfWtnZWh+fY9PuolyOQMkk14mpJQGSPhyg831FUvYP457Z5pPagjJTSbKMEbapRIjRCoL56FI yPkPS5t0iCCihBEFEHcMxPCADM8SwLzwwolsDJGJEFPA5GJx0DD2DoeE2WbKIQLQUFKMrMccxkLn vWWB4sCDcXZO8oGZqxibSGkmA2yDEjEiAQJATfhHsYfNe72VKbZncwUoPc6qetOeRYQifuq75DQn YdRZewi3LpWjQhgbQe7efnDvRbyggOF57zWqkaMTggGP8EwICS6NLXJMzEmgB/xWtAOxrx3O0TQW Mz3m8adIYFRTrS0WgtLpRFZIRDRMhokyTidCk0TGoFrTgYUPf8Y011+n/8ryg1aVZHJIS7fOgo30 sWPml2zW1LoO70dhkGXC5z+CKNQoUtwW5Hqt7Srt+ir4Giw7JlQouVC7TguMe2HFqBcZlV1BqjU3 s6V4VmI4wqml1aYZ40bGhhSCIZCq0baEzZgirUcDt0U6upt1knDlRGLZhoGWB/dwbLhiYxjRbbzf fptCViqlA2QRDgzoca1YNGoyxMCK/kCBK4vCstshELLEnHxxVAGxqmqPVWfiYASg5INhVFGIvgxk 3lRjQhOuHkJ2IhZPHVkKNSYyZO35RsDonJvqUPDFcJdPvfIargOFM9i0JNZWvXOASjDMEKhTJNcb 3DhyzZinCuxIWgc1JShdwMWhuhG5IUV68AvyNAWSKB6jE5JAXEOjheRkBxywpyChveVgIEP0RjJC hqCFG/iYZkRVY9Gdw9SlDukvownRLILEqUzZ6SBVQYrZhjniCpSajWST5ytp22w5lBXaXWB1oow0 y73TK5GCAeaNLTIaFKDWrUaEuFHYMLy6rcqlx2svqPHQ89xCZKUNj0lvLUMHDl7td2sNHZJJuTre gnE1u0ra0C+yVvlt4MGxiwVpS+I3wYJvhMl0UBgLktYLrokFwWJ7FkQgIaMDKbpGo5GhApdBpLEa uobwm93ERdjBmNSlexBnXI2RyabuKISiSAV1jQqFGyxQS2GCRCBHaxJbFWhJI4tCD+PYoRQddgrg zUA6Qxh3nIYKGdpxe3CezoR8CCl+AoLDEMt2hnMOBIpFBEiMWeAQ1AoRJF9Oddxw3z24IV7SQSQ1 gQ0sc9SZJopswBsEoCtISMqb6HEvuEr1hncCXw5wIVlWDNOCxvFytF+9CDkHvNb+kB3uwPyOn2jp As3mZ0gE4fdivga0QjKY8xzHhoMXQMvEf2lUfAbqeiYPKH0kbuCaEMdAMJymvDM6UAHigqBWB2BI aMIRwv3sQt2aV1jCNAZsgIQxSBbeMXjos2ObiawhTCSqCteko7+9ek8CbjKebar00xv4uhsJLwRK a3CdUrwoUVD/uvTbaHTk98k5kh8kh8BJk+h2ZYglERaOHw39DmyDGRPIcHIWEzvwApeD8QpS5srV Scxf10kruIEocloRK9fpChsE6jMD6wurriEO85MkhDu853hfL4fCTRVmkiqLETVGJdF67MDq+Aw5 54yicc4PMmZlwph3YUTxTV7UEJzRVFkPtEmDi0T1BIMKE+eahb3W6wBL9h4Tc7BSKweUipBOw5y5 /XaSf/i7kinChIOSgYcQ
