diff -urN python.orig/dist/src/Objects/obmalloc.c python/dist/src/Objects/obmalloc.c
--- python.orig/dist/src/Objects/obmalloc.c	2004-06-08 11:00:54.000000000 -0400
+++ python/dist/src/Objects/obmalloc.c	2005-01-23 17:07:34.000000000 -0500
@@ -246,6 +246,18 @@
 
 typedef struct pool_header *poolp;
 
+/* Record keeping for arenas. */
+struct arena_object {
+	uptr address; /* The address of the 256k arena block, as returned by malloc. */
+	block* base_address; /* pool aligned pointer to the next pool to be carved off. */
+	uint nfreepools; /* The number of available pools in the arena. freepools + never allocated pools. */
+	uint ntotalpools; /* The total number of pools available in the arena. */
+	struct pool_header* freepools; /* Singly linked list of available pools. */
+	
+	struct arena_object* nextarena; /* Doubly linked list of arena objects used to allocate pools. */
+	struct arena_object* prevarena;
+};
+
 #undef  ROUNDUP
 #define ROUNDUP(x)		(((x) + ALIGNMENT_MASK) & ~ALIGNMENT_MASK)
 #define POOL_OVERHEAD		ROUNDUP(sizeof(struct pool_header))
@@ -392,129 +404,98 @@
 #endif /* NB_SMALL_SIZE_CLASSES >  8 */
 };
 
-/*
- * Free (cached) pools
- */
-static poolp freepools = NULL;		/* free list for cached pools */
-
 /*==========================================================================*/
 /* Arena management. */
 
-/* arenas is a vector of arena base addresses, in order of allocation time.
- * arenas currently contains narenas entries, and has space allocated
- * for at most maxarenas entries.
+/* arenas is a vector of arena objects. It currently contains maxarenas
+ * entries, some of which may not currently be used.
  *
  * CAUTION:  See the long comment block about thread safety in new_arena():
  * the code currently relies in deep ways on that this vector only grows,
  * and only grows by appending at the end.  For now we never return an arena
  * to the OS.
  */
-static uptr *volatile arenas = NULL;	/* the pointer itself is volatile */
-static volatile uint narenas = 0;
-static uint maxarenas = 0;
+static struct arena_object* arenas = NULL; /* Array of arena objects, used to track big chunks of memory (arenas). */
+static uint maxarenas = 0; /* Size of the arenas array. */
+static struct arena_object* available_arenas = NULL; /* Singly linked list of available arena objects. */
+static struct arena_object* partially_allocated_arenas = NULL; /* The head of the linked list of arenas with pages available. */
 
-/* Number of pools still available to be allocated in the current arena. */
-static uint nfreepools = 0;
+/* How many arena objects do we initially allocate? 16 = can allocate 4 MB before growing the array. */
+#define INITIAL_ARENA_OBJECTS 16
 
-/* Free space start address in current arena.  This is pool-aligned. */
-static block *arenabase = NULL;
-
-/* Allocate a new arena and return its base address.  If we run out of
- * memory, return NULL.
- */
-static block *
+/* Allocate a new arena and its object.  If we run out of memory, return NULL. */
+static struct arena_object*
 new_arena(void)
 {
+	struct arena_object* arenaobj;
+
+	if ( available_arenas == NULL ) {
+		int i;
+		uint numarenas;
+		if ( maxarenas == 0 ) numarenas = INITIAL_ARENA_OBJECTS;
+		else numarenas = maxarenas << 1; /* Double the number of arena objects on each allocate. */
+		
+		/* Grow the arenas vector. */
+		arenaobj = realloc( arenas, numarenas*sizeof(*arenas) );
+		if ( arenaobj == NULL ) return NULL; /* Return NULL on allocation failure. */
+		arenas = arenaobj;
+		
+		/* If the resize actually moved the entire array: We might need to fix pointers.
+		However, new_arena only gets called when all the pages in the previous arenas
+		are full. Thus, there are *no* pointers into the previous memory space. So we don't
+		have to worry about invalid pointers. Just to be sure, let's add some asserts: */
+		assert( partially_allocated_arenas == NULL );
+		assert( available_arenas == NULL );
+		
+		/* Zero fill the new section of the array. */
+		memset( &(arenas[maxarenas]), 0, sizeof(*arenas) * (numarenas-maxarenas) );
+		
+		/* Put the new arenas on  the available_arenas list, in order. */
+		assert( arenas[numarenas-1].nextarena == NULL );
+		for ( i = maxarenas; i < numarenas-1; ++ i ) {
+			arenas[i].nextarena = &arenas[i+1];
+		}
+		available_arenas = &arenas[maxarenas];
+		
+		maxarenas = numarenas;
+	}
+	
+	/* Unlink the next available arena object off the head of the list. */
+	assert( available_arenas != NULL );
+	arenaobj = available_arenas;
+	available_arenas = available_arenas->nextarena;
+		
+	assert( arenaobj->address == NULL );
+	
+	/* Fill the newly allocated or reused arena object with zeros. */
+	memset( arenaobj, 0, sizeof(*arenaobj) );
+	
 	uint excess;	/* number of bytes above pool alignment */
-	block *bp = (block *)malloc(ARENA_SIZE);
-	if (bp == NULL)
+	arenaobj->address = (uptr)malloc(ARENA_SIZE);
+	if (arenaobj->address == NULL) {
+		// The allocation failed: return -1
 		return NULL;
+	}
 
 #ifdef PYMALLOC_DEBUG
 	if (Py_GETENV("PYTHONMALLOCSTATS"))
 		_PyObject_DebugMallocStats();
 #endif
 
-	/* arenabase <- first pool-aligned address in the arena
+	/* base_address <- first pool-aligned address in the arena
 	   nfreepools <- number of whole pools that fit after alignment */
-	arenabase = bp;
-	nfreepools = ARENA_SIZE / POOL_SIZE;
-	assert(POOL_SIZE * nfreepools == ARENA_SIZE);
-	excess = (uint) ((Py_uintptr_t)bp & POOL_SIZE_MASK);
+	arenaobj->base_address = (block*) arenaobj->address;
+	arenaobj->nfreepools = ARENA_SIZE / POOL_SIZE;
+	assert(POOL_SIZE * arenaobj->nfreepools == ARENA_SIZE);
+	excess = (uint) ((Py_uintptr_t)arenaobj->address & POOL_SIZE_MASK);
 	if (excess != 0) {
-		--nfreepools;
-		arenabase += POOL_SIZE - excess;
+		--arenaobj->nfreepools;
+		arenaobj->base_address += POOL_SIZE - excess;
 	}
+	
+	arenaobj->ntotalpools = arenaobj->nfreepools;
 
-	/* Make room for a new entry in the arenas vector. */
-	if (arenas == NULL) {
-		assert(narenas == 0 && maxarenas == 0);
-		arenas = (uptr *)malloc(16 * sizeof(*arenas));
-		if (arenas == NULL)
-			goto error;
-		maxarenas = 16;
-	}
-	else if (narenas == maxarenas) {
-		/* Grow arenas.
-		 *
-		 * Exceedingly subtle:  Someone may be calling the pymalloc
-		 * free via PyMem_{DEL, Del, FREE, Free} without holding the
-		 *.GIL.  Someone else may simultaneously be calling the
-		 * pymalloc malloc while holding the GIL via, e.g.,
-		 * PyObject_New.  Now the pymalloc free may index into arenas
-		 * for an address check, while the pymalloc malloc calls
-		 * new_arena and we end up here to grow a new arena *and*
-		 * grow the arenas vector.  If the value for arenas pymalloc
-		 * free picks up "vanishes" during this resize, anything may
-		 * happen, and it would be an incredibly rare bug.  Therefore
-		 * the code here takes great pains to make sure that, at every
-		 * moment, arenas always points to an intact vector of
-		 * addresses.  It doesn't matter whether arenas points to a
-		 * wholly up-to-date vector when pymalloc free checks it in
-		 * this case, because the only legal (and that even this is
-		 * legal is debatable) way to call PyMem_{Del, etc} while not
-		 * holding the GIL is if the memory being released is not
-		 * object memory, i.e. if the address check in pymalloc free
-		 * is supposed to fail.  Having an incomplete vector can't
-		 * make a supposed-to-fail case succeed by mistake (it could
-		 * only make a supposed-to-succeed case fail by mistake).
-		 *
-		 * In addition, without a lock we can't know for sure when
-		 * an old vector is no longer referenced, so we simply let
-		 * old vectors leak.
-		 *
-		 * And on top of that, since narenas and arenas can't be
-		 * changed as-a-pair atomically without a lock, we're also
-		 * careful to declare them volatile and ensure that we change
-		 * arenas first.  This prevents another thread from picking
-		 * up an narenas value too large for the arenas value it
-		 * reads up (arenas never shrinks).
-		 *
-		 * Read the above 50 times before changing anything in this
-		 * block.
-		 */
-		uptr *p;
-		uint newmax = maxarenas << 1;
-		if (newmax <= maxarenas)	/* overflow */
-			goto error;
-		p = (uptr *)malloc(newmax * sizeof(*arenas));
-		if (p == NULL)
-			goto error;
-		memcpy(p, arenas, narenas * sizeof(*arenas));
-		arenas = p;	/* old arenas deliberately leaked */
-		maxarenas = newmax;
-	}
-
-	/* Append the new arena address to arenas. */
-	assert(narenas < maxarenas);
-	arenas[narenas] = (uptr)bp;
-	++narenas;	/* can't overflow, since narenas < maxarenas before */
-	return bp;
-
-error:
-	free(bp);
-	nfreepools = 0;
-	return NULL;
+	return arenaobj;
 }
 
 /* Return true if and only if P is an address that was allocated by
@@ -531,12 +512,13 @@
  * Obscure:  A PyMem "free memory" function can call the pymalloc free or
  * realloc before the first arena has been allocated.  arenas is still
  * NULL in that case.  We're relying on that narenas is also 0 in that case,
- * so the (I) < narenas must be false, saving us from trying to index into
+ * so the (I) < maxarenas must be false, saving us from trying to index into
  * a NULL arenas.
  */
 #define Py_ADDRESS_IN_RANGE(P, POOL)	\
-	((POOL)->arenaindex < narenas &&		\
-	 (uptr)(P) - arenas[(POOL)->arenaindex] < (uptr)ARENA_SIZE)
+	((POOL)->arenaindex < maxarenas &&		\
+	 (uptr)(P) - arenas[(POOL)->arenaindex].address < (uptr)ARENA_SIZE)
+
 
 /* This is only useful when running memory debuggers such as
  * Purify or Valgrind.  Uncomment to use.
@@ -630,15 +612,55 @@
 			UNLOCK();
 			return (void *)bp;
 		}
+		if ( partially_allocated_arenas == NULL ) {
+			/*
+			 * Allocate new arena
+			 */
+#ifdef WITH_MEMORY_LIMITS
+			//~ if (!(narenas < MAX_ARENAS)) {
+				//~ UNLOCK();
+				//~ goto redirect;
+			//~ }
+#endif
+			partially_allocated_arenas = new_arena();
+			assert( partially_allocated_arenas->address != NULL );
+			if (partially_allocated_arenas == NULL) {
+				UNLOCK();
+				goto redirect;
+			}
+			// This is the beginning of a new list
+			// This initialization is done in new_arena
+			assert( partially_allocated_arenas->nextarena == NULL && partially_allocated_arenas->prevarena == NULL );
+			//~ partially_allocated_arenas->nextarena = NULL;
+			//~ partially_allocated_arenas->prevarena = NULL;
+		}
+		
 		/*
 		 * Try to get a cached free pool
 		 */
-		pool = freepools;
+		pool = partially_allocated_arenas->freepools;
 		if (pool != NULL) {
 			/*
 			 * Unlink from cached pools
 			 */
-			freepools = pool->nextpool;
+			partially_allocated_arenas->freepools = pool->nextpool;
+			/* This moves the arena *towards* the head of the list. But it is already at the head of the list. */
+			partially_allocated_arenas->nfreepools --;
+			if ( partially_allocated_arenas->nfreepools == 0 ) {
+				assert( partially_allocated_arenas->freepools == NULL );
+				
+				assert( partially_allocated_arenas->nextarena == NULL || partially_allocated_arenas->nextarena->prevarena == partially_allocated_arenas );
+				
+				/* Unlink the arena: it is completely allocated. */
+				/** This is a dequeue from the head operation. */
+				partially_allocated_arenas = partially_allocated_arenas->nextarena;
+				if ( partially_allocated_arenas != NULL ) partially_allocated_arenas->prevarena = NULL;
+				assert( partially_allocated_arenas == NULL || partially_allocated_arenas->address != NULL );
+			}
+			else {
+				assert( partially_allocated_arenas->freepools != NULL
+					|| partially_allocated_arenas->base_address <= ((block*) partially_allocated_arenas->address) + ARENA_SIZE - POOL_SIZE );
+			}
 		init_pool:
 			/*
 			 * Frontlink to used pools
@@ -678,29 +700,29 @@
 		/*
 		 * Allocate new pool
 		 */
-		if (nfreepools) {
-		commit_pool:
-			--nfreepools;
-			pool = (poolp)arenabase;
-			arenabase += POOL_SIZE;
-			pool->arenaindex = narenas - 1;
+		assert( partially_allocated_arenas->nfreepools > 0 );
+		if (partially_allocated_arenas->nfreepools) {
+			/* Verify that the arenabase address is in range. */
+			assert( partially_allocated_arenas->base_address <= partially_allocated_arenas->base_address + ARENA_SIZE - POOL_SIZE );
+			pool = (poolp) partially_allocated_arenas->base_address;			
+			pool->arenaindex = partially_allocated_arenas - arenas;
+			assert( &arenas[pool->arenaindex] == partially_allocated_arenas );
 			pool->szidx = DUMMY_SIZE_IDX;
+			
+			--partially_allocated_arenas->nfreepools;
+			partially_allocated_arenas->base_address += POOL_SIZE;
+			
+			if ( partially_allocated_arenas->nfreepools == 0 ) {
+				assert( partially_allocated_arenas->nextarena == NULL || partially_allocated_arenas->nextarena->prevarena == partially_allocated_arenas );
+				
+				/* Unlink the arena: it is completely allocated. */
+				partially_allocated_arenas = partially_allocated_arenas->nextarena;
+				if ( partially_allocated_arenas != NULL ) partially_allocated_arenas->prevarena = NULL;
+				assert( partially_allocated_arenas == NULL || partially_allocated_arenas->address != NULL );
+			}
+			
 			goto init_pool;
 		}
-		/*
-		 * Allocate new arena
-		 */
-#ifdef WITH_MEMORY_LIMITS
-		if (!(narenas < MAX_ARENAS)) {
-			UNLOCK();
-			goto redirect;
-		}
-#endif
-		bp = new_arena();
-		if (bp != NULL)
-			goto commit_pool;
-		UNLOCK();
-		goto redirect;
 	}
 
         /* The small block allocator ends here. */
@@ -765,11 +787,96 @@
 			prev = pool->prevpool;
 			next->prevpool = prev;
 			prev->nextpool = next;
-			/* Link to freepools.  This is a singly-linked list,
+			/* Link the pool to freepools.  This is a singly-linked list,
 			 * and pool->prevpool isn't used there.
 			 */
-			pool->nextpool = freepools;
-			freepools = pool;
+			pool->nextpool = arenas[pool->arenaindex].freepools;
+			arenas[pool->arenaindex].freepools = pool;
+			arenas[pool->arenaindex].nfreepools ++;
+			
+			if ( arenas[pool->arenaindex].nfreepools == arenas[pool->arenaindex].ntotalpools ) {
+				/* This arena is completely deallocated. Unlink it from the partially allocated arenas. */
+				
+				assert( arenas[pool->arenaindex].prevarena == NULL || arenas[pool->arenaindex].prevarena->address != NULL );
+				assert( arenas[pool->arenaindex].nextarena == NULL || arenas[pool->arenaindex].nextarena->address != NULL );
+				
+				/* Fix the pointer in the prevarena, or the partially_allocated_arenas pointer, if necissary. */
+				if ( arenas[pool->arenaindex].prevarena == NULL ) {
+					partially_allocated_arenas = arenas[pool->arenaindex].nextarena;
+					assert( partially_allocated_arenas == NULL || partially_allocated_arenas->address != NULL );
+				}
+				else {
+					assert( arenas[pool->arenaindex].prevarena->nextarena == &arenas[pool->arenaindex] );
+					arenas[pool->arenaindex].prevarena->nextarena = arenas[pool->arenaindex].nextarena;
+				}
+				
+				/* Fix the pointer in the nextarena. */
+				if ( arenas[pool->arenaindex].nextarena != NULL ) {
+					assert( arenas[pool->arenaindex].nextarena->prevarena == &arenas[pool->arenaindex] );
+					arenas[pool->arenaindex].nextarena->prevarena = arenas[pool->arenaindex].prevarena;
+				}
+					
+				/* Record that this arena slot is available to be reused. */
+				arenas[pool->arenaindex].nextarena = available_arenas;
+				available_arenas = &arenas[pool->arenaindex];
+				
+				/* Free the entire arena. */
+				void* address = (void*) arenas[pool->arenaindex].address;
+				arenas[pool->arenaindex].address = NULL;
+				free( address );
+				
+			} else if ( arenas[pool->arenaindex].nfreepools == 1 ) {
+				/* If this arena was completely allocated, go link it to the head of the partially allocated link. */
+				arenas[pool->arenaindex].nextarena = partially_allocated_arenas;
+				arenas[pool->arenaindex].prevarena = NULL;
+				partially_allocated_arenas = &arenas[pool->arenaindex];
+				
+				/* Fix the pointer in the nextarena. */
+				if ( arenas[pool->arenaindex].nextarena != NULL ) {
+					arenas[pool->arenaindex].nextarena->prevarena = &arenas[pool->arenaindex];
+				}
+				
+				assert( partially_allocated_arenas->address != NULL );
+				
+				/* Verify that we did the insert in the correct order. */
+				//~ assert( arenas[pool->arenaindex].nextarena == -1 || arenas[pool->arenaindex].nfreepools <= arenas[pool->arenaindex].nextarena->nfreepools );
+			}
+			/* If this arena is now out of order, we need to keep the list sorted. */
+			else if ( arenas[pool->arenaindex].nextarena != NULL && arenas[pool->arenaindex].nfreepools > arenas[pool->arenaindex].nextarena->nfreepools ) {
+				/* We have to move the arena towards the end of the list. */
+				struct arena_object** lastPointer;
+				if ( arenas[pool->arenaindex].prevarena != NULL ) lastPointer = &( arenas[pool->arenaindex].prevarena->nextarena );
+				else lastPointer = &partially_allocated_arenas;
+				assert( *lastPointer == &arenas[pool->arenaindex] );
+				
+				/* Step one: unlink the arena from the list. */
+				*lastPointer = arenas[pool->arenaindex].nextarena;
+				arenas[pool->arenaindex].nextarena->prevarena = arenas[pool->arenaindex].prevarena;
+				
+				/* Step two: Locate the new insertion point by iterating over the list, using our nextarena pointer. */
+				while ( arenas[pool->arenaindex].nextarena != NULL && arenas[pool->arenaindex].nfreepools > arenas[pool->arenaindex].nextarena->nfreepools )
+				{
+					arenas[pool->arenaindex].prevarena = arenas[pool->arenaindex].nextarena;
+					arenas[pool->arenaindex].nextarena = arenas[pool->arenaindex].nextarena->nextarena;
+				}
+				
+				/* Step three: insert the arena at this point. */
+				assert( arenas[pool->arenaindex].nextarena == NULL || arenas[pool->arenaindex].prevarena == arenas[pool->arenaindex].nextarena->prevarena );
+				assert( arenas[pool->arenaindex].prevarena->nextarena == arenas[pool->arenaindex].nextarena );
+				
+				arenas[pool->arenaindex].prevarena->nextarena = &arenas[pool->arenaindex];
+				if ( arenas[pool->arenaindex].nextarena != NULL ) {
+					arenas[pool->arenaindex].nextarena->prevarena = &arenas[pool->arenaindex];
+				}
+				
+				/* Verify that the swaps worked. */
+				assert( arenas[pool->arenaindex].nextarena == NULL || arenas[pool->arenaindex].nfreepools <= arenas[pool->arenaindex].nextarena->nfreepools );
+				assert( arenas[pool->arenaindex].prevarena == NULL || arenas[pool->arenaindex].nfreepools > arenas[pool->arenaindex].prevarena->nfreepools );
+				
+				assert( arenas[pool->arenaindex].nextarena == NULL || arenas[pool->arenaindex].nextarena->prevarena == &arenas[pool->arenaindex] );
+				assert( (partially_allocated_arenas == &arenas[pool->arenaindex] && arenas[pool->arenaindex].prevarena == NULL) || arenas[pool->arenaindex].prevarena->nextarena == &arenas[pool->arenaindex] );
+			}
+			
 			UNLOCK();
 			return;
 		}
@@ -1310,7 +1417,7 @@
 	 * to march over all the arenas.  If we're lucky, most of the memory
 	 * will be living in full pools -- would be a shame to miss them.
 	 */
-	for (i = 0; i < narenas; ++i) {
+	for (i = 0; i < maxarenas; ++i) {
 		uint poolsinarena;
 		uint j;
 		uptr base = arenas[i];
