Index: src/java/org/apache/fop/fo/properties/PropertyCache.java
===================================================================
--- src/java/org/apache/fop/fo/properties/PropertyCache.java	(revision 595394)
+++ src/java/org/apache/fop/fo/properties/PropertyCache.java	(working copy)
@@ -41,6 +41,9 @@
     /** the table of hash-buckets */
     private CacheEntry[] table = new CacheEntry[8];
     
+    private CacheCleaner cleaner = new CacheCleaner();
+    //private Thread cleanerThread = new Thread(cleaner);
+    
     /* same hash function as used by java.util.HashMap */
     private static int hash(Object x) {
         int h = x.hashCode();
@@ -77,6 +80,10 @@
             this.hash = old.hash;
         }
         
+        boolean isCleared() {
+            return (ref == null || ref.get() == null);
+        }
+        
     }
     
     /* Wrapper objects to synchronize on */
@@ -85,7 +92,7 @@
     }
     
     /*
-     * Class modeling a cleanup thread.
+     * Class modeling the cleanup thread.
      * 
      * Once run() is called, the segment is locked and the hash-bucket
      * will be traversed, removing any obsolete entries.
@@ -93,33 +100,31 @@
      */
     private final class CacheCleaner implements Runnable {
         
-        private int hash;
+        boolean[] votesForRehash = new boolean[SEGMENT_MASK + 1];
+        volatile boolean isRunning;
+        int hash;
         
-        CacheCleaner(int hash) {
+        CacheCleaner() {
+        }
+        
+        public void init(int hash) {
             this.hash = hash;
         }
         
         public void run() {
+            isRunning = true;
+            int segmentIndex = this.hash & SEGMENT_MASK;
             //System.out.println("Cleaning segment " + this.segment);
-            CacheSegment segment = segments[this.hash & SEGMENT_MASK];
+            CacheSegment segment = segments[segmentIndex];
             int oldCount;
             int newCount;
             synchronized (segment) {
                 oldCount = segment.count;
-                /* check first to see if another cleaner thread already
-                 * pushed the number of entries back below the threshold
-                 * if so, return immediately
-                 */
-                if (segment.count < (2 * table.length)) {
-                    return;
-                }
                 
                 int index = this.hash & (table.length - 1);
                 CacheEntry first = table[index];
-                WeakReference ref;
                 for (CacheEntry e = first; e != null; e = e.next) {
-                    ref = e.ref;
-                    if (ref != null && ref.get() == null) {
+                    if (e.isCleared()) {
                         /* remove obsolete entry
                         /* 1. clear value, cause interference for non-blocking get() */
                         e.ref = null;
@@ -127,7 +132,12 @@
                         /* 2. clone the segment, without the obsolete entry */
                         CacheEntry head = e.next;
                         for (CacheEntry c = first; c != e; c = c.next) {
-                            head = new CacheEntry(c, head);
+                            if (!c.isCleared()) {
+                                head = new CacheEntry(c, head);
+                            } else {
+                                /* WeakReference cleared by the GC? */
+                                segment.count--;
+                            }
                         }
                         table[index] = head;
                         segment.count--;
@@ -136,9 +146,31 @@
                 newCount = segment.count;
             }
             if (oldCount == newCount) {
-                /* cleanup had no effect, try rehashing */
-                rehash(SEGMENT_MASK);
+                /* cleanup had no effect */
+                if (!votesForRehash[segmentIndex]) {
+                    /* first time for this segment */
+                    votesForRehash[segmentIndex] = true;
+                    int voteCount = 0;
+                    for (int i = SEGMENT_MASK + 1; --i >= 0; ) {
+                        if (votesForRehash[i]) {
+                            voteCount++;
+                        }
+                    }
+                    if (voteCount > SEGMENT_MASK / 4) {
+                        rehash(SEGMENT_MASK);
+                        /* reset votes */
+                        for (int i = SEGMENT_MASK + 1; --i >= 0;) {
+                            votesForRehash[i] = false;
+                        }
+
+                    }
+                }
+            } else {
+                if (votesForRehash[segmentIndex]) {
+                    votesForRehash[segmentIndex] = false;
+                }
             }
+            isRunning = false;
         }
     }
     
@@ -173,12 +205,14 @@
                 }
             }
             
-            if (segment.count > (2 * table.length)) {
-                /* launch cleanup in a separate thread, 
-                 * so it acquires its own lock, and put()
-                 * can return immediately */
-                Thread cleaner = new Thread(new CacheCleaner(hash));
-                cleaner.start();
+            if (segment.count > (2 * table.length)
+                    && !cleaner.isRunning) {
+                synchronized (cleaner) {
+                    /* start cleanup thread, which acquires its own lock, 
+                     * so put() can return immediately */
+                    cleaner.init(hash);
+                    new Thread(cleaner).start();
+                }
             }
         }
     }
@@ -224,16 +258,8 @@
     
     /*
      * Recursively acquires locks on all 32 segments,
-     * then performs a check on the segments first to see `
-     * how many precisely exceed the threshold ( 2 x table.length ). 
-     * If this number exceeds half the amount of buckets, 
      * extends the cache and redistributes the entries.
      * 
-     * Example:
-     * For a cache with default size of 8 buckets, each bucket is
-     * a segment, and as such, rehash() will only have effect
-     * if more than 4 buckets exceed the size of 16 entries.
-     * 
      */
     private final void rehash(int index) {
         
@@ -246,40 +272,22 @@
                 /* double the amount of buckets */
                 int newLength = table.length << 1;
                 if (newLength > 0) { //no overflow?
-                    /* Check segmentcounts first */
-                    int countSegments = 0;
-                    int threshold = table.length * 2;
+                    /* reset segmentcounts */
                     for (int i = segments.length; --i >= 0;) {
-                        if (segments[i].count > threshold) {
-                            countSegments++;
-                        }
+                        segments[i].count = 0;
                     }
                     
-                    if (countSegments <= (table.length / 2)) {
-                        return;
-                    } else {
-                        /* reset segmentcounts */
-                        for (int i = segments.length; --i >= 0;) {
-                            segments[i].count = 0;
-                        }
-                    }
-                    
                     CacheEntry[] newTable = new CacheEntry[newLength];
                     
                     int hash, idx;
-                    WeakReference ref;
-                    Object o;
                     newLength--;
                     for (int i = table.length; --i >= 0;) {
                         for (CacheEntry c = table[i]; c != null; c = c.next) {
-                            ref = c.ref;
-                            if (ref != null) {
-                                if ((o = ref.get()) != null) {
-                                    hash = hash(o);
-                                    idx = hash & newLength;
-                                    newTable[idx] = new CacheEntry(c, newTable[idx]);
-                                    segments[hash & SEGMENT_MASK].count++;
-                                }
+                            if (!c.isCleared()) {
+                                hash = c.hash;
+                                idx = hash & newLength;
+                                newTable[idx] = new CacheEntry(c, newTable[idx]);
+                                segments[hash & SEGMENT_MASK].count++;
                             }
                         }
                     }
