hbase git commit: HBASE-16460 Can't rebuild the BucketAllocator's data structures when BucketCache uses FileIOEngine (Guanghao Zhang)

2016-09-05 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/branch-1.1 a30f7ddc9 -> d91a28a45


HBASE-16460 Can't rebuild the BucketAllocator's data structures when 
BucketCache uses FileIOEngine (Guanghao Zhang)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d91a28a4
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d91a28a4
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d91a28a4

Branch: refs/heads/branch-1.1
Commit: d91a28a450fc0f697bf78aab07543cd48f7dedfc
Parents: a30f7dd
Author: tedyu 
Authored: Mon Sep 5 17:29:44 2016 -0700
Committer: tedyu 
Committed: Mon Sep 5 17:29:44 2016 -0700

--
 .../hbase/io/hfile/bucket/BucketAllocator.java  | 34 ++
 .../hbase/io/hfile/bucket/BucketCache.java  |  7 +--
 .../hadoop/hbase/io/hfile/CacheTestUtils.java   | 15 +-
 .../hbase/io/hfile/bucket/TestBucketCache.java  | 48 
 4 files changed, 90 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d91a28a4/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java
index fb95007..aece6a6 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java
@@ -22,6 +22,7 @@ package org.apache.hadoop.hbase.io.hfile.bucket;
 
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Iterator;
 import java.util.Map;
 import java.util.concurrent.atomic.AtomicLong;
 
@@ -341,25 +342,31 @@ public final class BucketAllocator {
 // we've found. we can only reconfigure each bucket once; if more than 
once,
 // we know there's a bug, so we just log the info, throw, and start 
again...
 boolean[] reconfigured = new boolean[buckets.length];
-for (Map.Entry entry : map.entrySet()) {
+int sizeNotMatchedCount = 0;
+int insufficientCapacityCount = 0;
+Iterator> iterator = 
map.entrySet().iterator();
+while (iterator.hasNext()) {
+  Map.Entry entry = iterator.next();
   long foundOffset = entry.getValue().offset();
   int foundLen = entry.getValue().getLength();
   int bucketSizeIndex = -1;
-  for (int i = 0; i < bucketSizes.length; ++i) {
-if (foundLen <= bucketSizes[i]) {
+  for (int i = 0; i < this.bucketSizes.length; ++i) {
+if (foundLen <= this.bucketSizes[i]) {
   bucketSizeIndex = i;
   break;
 }
   }
   if (bucketSizeIndex == -1) {
-throw new BucketAllocatorException(
-"Can't match bucket size for the block with size " + foundLen);
+sizeNotMatchedCount++;
+iterator.remove();
+continue;
   }
   int bucketNo = (int) (foundOffset / bucketCapacity);
-  if (bucketNo < 0 || bucketNo >= buckets.length)
-throw new BucketAllocatorException("Can't find bucket " + bucketNo
-+ ", total buckets=" + buckets.length
-+ "; did you shrink the cache?");
+  if (bucketNo < 0 || bucketNo >= buckets.length) {
+insufficientCapacityCount++;
+iterator.remove();
+continue;
+  }
   Bucket b = buckets[bucketNo];
   if (reconfigured[bucketNo]) {
 if (b.sizeIndex() != bucketSizeIndex)
@@ -382,6 +389,15 @@ public final class BucketAllocator {
   usedSize += buckets[bucketNo].getItemAllocationSize();
   bucketSizeInfos[bucketSizeIndex].blockAllocated(b);
 }
+
+if (sizeNotMatchedCount > 0) {
+  LOG.warn("There are " + sizeNotMatchedCount + " blocks which can't be 
rebuilt because "
+  + "there is no matching bucket size for these blocks");
+}
+if (insufficientCapacityCount > 0) {
+  LOG.warn("There are " + insufficientCapacityCount + " blocks which can't 
be rebuilt - "
+  + "did you shrink the cache?");
+}
   }
 
   public String toString() {

http://git-wip-us.apache.org/repos/asf/hbase/blob/d91a28a4/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
index 54b9398..cbd38f4 100644
--- 

hbase git commit: HBASE-16460 Can't rebuild the BucketAllocator's data structures when BucketCache uses FileIOEngine (Guanghao Zhang)

2016-09-05 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/branch-1.2 00d33291a -> 0bdcfd90e


HBASE-16460 Can't rebuild the BucketAllocator's data structures when 
BucketCache uses FileIOEngine (Guanghao Zhang)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0bdcfd90
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0bdcfd90
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0bdcfd90

Branch: refs/heads/branch-1.2
Commit: 0bdcfd90e30790b2891b84de0403487cba1af44b
Parents: 00d3329
Author: tedyu 
Authored: Mon Sep 5 17:28:41 2016 -0700
Committer: tedyu 
Committed: Mon Sep 5 17:28:41 2016 -0700

--
 .../hbase/io/hfile/bucket/BucketAllocator.java  | 34 ++
 .../hbase/io/hfile/bucket/BucketCache.java  |  7 +--
 .../hadoop/hbase/io/hfile/CacheTestUtils.java   | 15 +-
 .../hbase/io/hfile/bucket/TestBucketCache.java  | 48 
 4 files changed, 90 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/0bdcfd90/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java
index 8c0d0b5..938ab99 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java
@@ -22,6 +22,7 @@ package org.apache.hadoop.hbase.io.hfile.bucket;
 
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Iterator;
 import java.util.Map;
 import java.util.concurrent.atomic.AtomicLong;
 
@@ -341,25 +342,31 @@ public final class BucketAllocator {
 // we've found. we can only reconfigure each bucket once; if more than 
once,
 // we know there's a bug, so we just log the info, throw, and start 
again...
 boolean[] reconfigured = new boolean[buckets.length];
-for (Map.Entry entry : map.entrySet()) {
+int sizeNotMatchedCount = 0;
+int insufficientCapacityCount = 0;
+Iterator> iterator = 
map.entrySet().iterator();
+while (iterator.hasNext()) {
+  Map.Entry entry = iterator.next();
   long foundOffset = entry.getValue().offset();
   int foundLen = entry.getValue().getLength();
   int bucketSizeIndex = -1;
-  for (int i = 0; i < bucketSizes.length; ++i) {
-if (foundLen <= bucketSizes[i]) {
+  for (int i = 0; i < this.bucketSizes.length; ++i) {
+if (foundLen <= this.bucketSizes[i]) {
   bucketSizeIndex = i;
   break;
 }
   }
   if (bucketSizeIndex == -1) {
-throw new BucketAllocatorException(
-"Can't match bucket size for the block with size " + foundLen);
+sizeNotMatchedCount++;
+iterator.remove();
+continue;
   }
   int bucketNo = (int) (foundOffset / bucketCapacity);
-  if (bucketNo < 0 || bucketNo >= buckets.length)
-throw new BucketAllocatorException("Can't find bucket " + bucketNo
-+ ", total buckets=" + buckets.length
-+ "; did you shrink the cache?");
+  if (bucketNo < 0 || bucketNo >= buckets.length) {
+insufficientCapacityCount++;
+iterator.remove();
+continue;
+  }
   Bucket b = buckets[bucketNo];
   if (reconfigured[bucketNo]) {
 if (b.sizeIndex() != bucketSizeIndex)
@@ -382,6 +389,15 @@ public final class BucketAllocator {
   usedSize += buckets[bucketNo].getItemAllocationSize();
   bucketSizeInfos[bucketSizeIndex].blockAllocated(b);
 }
+
+if (sizeNotMatchedCount > 0) {
+  LOG.warn("There are " + sizeNotMatchedCount + " blocks which can't be 
rebuilt because "
+  + "there is no matching bucket size for these blocks");
+}
+if (insufficientCapacityCount > 0) {
+  LOG.warn("There are " + insufficientCapacityCount + " blocks which can't 
be rebuilt - "
+  + "did you shrink the cache?");
+}
   }
 
   public String toString() {

http://git-wip-us.apache.org/repos/asf/hbase/blob/0bdcfd90/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
index 75d7ae3..0ca173c 100644
--- 

hbase git commit: HBASE-16460 Can't rebuild the BucketAllocator's data structures when BucketCache uses FileIOEngine (Guanghao Zhang)

2016-09-05 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 161f61787 -> 2530258c1


HBASE-16460 Can't rebuild the BucketAllocator's data structures when 
BucketCache uses FileIOEngine (Guanghao Zhang)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2530258c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2530258c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2530258c

Branch: refs/heads/branch-1.3
Commit: 2530258c19b84269bb8ed21f6723a2beeafb
Parents: 161f617
Author: tedyu 
Authored: Mon Sep 5 06:55:30 2016 -0700
Committer: tedyu 
Committed: Mon Sep 5 06:55:30 2016 -0700

--
 .../hbase/io/hfile/bucket/BucketAllocator.java  | 34 ++
 .../hbase/io/hfile/bucket/BucketCache.java  |  7 +--
 .../hadoop/hbase/io/hfile/CacheTestUtils.java   | 15 +-
 .../hbase/io/hfile/bucket/TestBucketCache.java  | 48 
 4 files changed, 90 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/2530258c/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java
index fedfd20..4777607 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java
@@ -22,6 +22,7 @@ package org.apache.hadoop.hbase.io.hfile.bucket;
 
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Iterator;
 import java.util.Map;
 import java.util.concurrent.atomic.AtomicLong;
 
@@ -350,25 +351,31 @@ public final class BucketAllocator {
 // we've found. we can only reconfigure each bucket once; if more than 
once,
 // we know there's a bug, so we just log the info, throw, and start 
again...
 boolean[] reconfigured = new boolean[buckets.length];
-for (Map.Entry entry : map.entrySet()) {
+int sizeNotMatchedCount = 0;
+int insufficientCapacityCount = 0;
+Iterator> iterator = 
map.entrySet().iterator();
+while (iterator.hasNext()) {
+  Map.Entry entry = iterator.next();
   long foundOffset = entry.getValue().offset();
   int foundLen = entry.getValue().getLength();
   int bucketSizeIndex = -1;
-  for (int i = 0; i < bucketSizes.length; ++i) {
-if (foundLen <= bucketSizes[i]) {
+  for (int i = 0; i < this.bucketSizes.length; ++i) {
+if (foundLen <= this.bucketSizes[i]) {
   bucketSizeIndex = i;
   break;
 }
   }
   if (bucketSizeIndex == -1) {
-throw new BucketAllocatorException(
-"Can't match bucket size for the block with size " + foundLen);
+sizeNotMatchedCount++;
+iterator.remove();
+continue;
   }
   int bucketNo = (int) (foundOffset / bucketCapacity);
-  if (bucketNo < 0 || bucketNo >= buckets.length)
-throw new BucketAllocatorException("Can't find bucket " + bucketNo
-+ ", total buckets=" + buckets.length
-+ "; did you shrink the cache?");
+  if (bucketNo < 0 || bucketNo >= buckets.length) {
+insufficientCapacityCount++;
+iterator.remove();
+continue;
+  }
   Bucket b = buckets[bucketNo];
   if (reconfigured[bucketNo]) {
 if (b.sizeIndex() != bucketSizeIndex)
@@ -391,6 +398,15 @@ public final class BucketAllocator {
   usedSize += buckets[bucketNo].getItemAllocationSize();
   bucketSizeInfos[bucketSizeIndex].blockAllocated(b);
 }
+
+if (sizeNotMatchedCount > 0) {
+  LOG.warn("There are " + sizeNotMatchedCount + " blocks which can't be 
rebuilt because "
+  + "there is no matching bucket size for these blocks");
+}
+if (insufficientCapacityCount > 0) {
+  LOG.warn("There are " + insufficientCapacityCount + " blocks which can't 
be rebuilt - "
+  + "did you shrink the cache?");
+}
   }
 
   public String toString() {

http://git-wip-us.apache.org/repos/asf/hbase/blob/2530258c/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
index c0a9e17..a321556 100644
--- 

hbase git commit: HBASE-16460 Can't rebuild the BucketAllocator's data structures when BucketCache uses FileIOEngine (Guanghao Zhang)

2016-09-05 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/branch-1 e1aab356b -> b694b63ed


HBASE-16460 Can't rebuild the BucketAllocator's data structures when 
BucketCache uses FileIOEngine (Guanghao Zhang)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b694b63e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b694b63e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b694b63e

Branch: refs/heads/branch-1
Commit: b694b63ed7ec9275a5ada77739e836e36853de8b
Parents: e1aab35
Author: tedyu 
Authored: Mon Sep 5 06:52:03 2016 -0700
Committer: tedyu 
Committed: Mon Sep 5 06:52:03 2016 -0700

--
 .../hbase/io/hfile/bucket/BucketAllocator.java  | 34 ++
 .../hbase/io/hfile/bucket/BucketCache.java  |  7 +--
 .../hadoop/hbase/io/hfile/CacheTestUtils.java   | 15 +-
 .../hbase/io/hfile/bucket/TestBucketCache.java  | 48 
 4 files changed, 90 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b694b63e/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java
index fedfd20..4777607 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java
@@ -22,6 +22,7 @@ package org.apache.hadoop.hbase.io.hfile.bucket;
 
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Iterator;
 import java.util.Map;
 import java.util.concurrent.atomic.AtomicLong;
 
@@ -350,25 +351,31 @@ public final class BucketAllocator {
 // we've found. we can only reconfigure each bucket once; if more than 
once,
 // we know there's a bug, so we just log the info, throw, and start 
again...
 boolean[] reconfigured = new boolean[buckets.length];
-for (Map.Entry entry : map.entrySet()) {
+int sizeNotMatchedCount = 0;
+int insufficientCapacityCount = 0;
+Iterator> iterator = 
map.entrySet().iterator();
+while (iterator.hasNext()) {
+  Map.Entry entry = iterator.next();
   long foundOffset = entry.getValue().offset();
   int foundLen = entry.getValue().getLength();
   int bucketSizeIndex = -1;
-  for (int i = 0; i < bucketSizes.length; ++i) {
-if (foundLen <= bucketSizes[i]) {
+  for (int i = 0; i < this.bucketSizes.length; ++i) {
+if (foundLen <= this.bucketSizes[i]) {
   bucketSizeIndex = i;
   break;
 }
   }
   if (bucketSizeIndex == -1) {
-throw new BucketAllocatorException(
-"Can't match bucket size for the block with size " + foundLen);
+sizeNotMatchedCount++;
+iterator.remove();
+continue;
   }
   int bucketNo = (int) (foundOffset / bucketCapacity);
-  if (bucketNo < 0 || bucketNo >= buckets.length)
-throw new BucketAllocatorException("Can't find bucket " + bucketNo
-+ ", total buckets=" + buckets.length
-+ "; did you shrink the cache?");
+  if (bucketNo < 0 || bucketNo >= buckets.length) {
+insufficientCapacityCount++;
+iterator.remove();
+continue;
+  }
   Bucket b = buckets[bucketNo];
   if (reconfigured[bucketNo]) {
 if (b.sizeIndex() != bucketSizeIndex)
@@ -391,6 +398,15 @@ public final class BucketAllocator {
   usedSize += buckets[bucketNo].getItemAllocationSize();
   bucketSizeInfos[bucketSizeIndex].blockAllocated(b);
 }
+
+if (sizeNotMatchedCount > 0) {
+  LOG.warn("There are " + sizeNotMatchedCount + " blocks which can't be 
rebuilt because "
+  + "there is no matching bucket size for these blocks");
+}
+if (insufficientCapacityCount > 0) {
+  LOG.warn("There are " + insufficientCapacityCount + " blocks which can't 
be rebuilt - "
+  + "did you shrink the cache?");
+}
   }
 
   public String toString() {

http://git-wip-us.apache.org/repos/asf/hbase/blob/b694b63e/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
index c0a9e17..a321556 100644
--- 

hbase git commit: HBASE-16460 Can't rebuild the BucketAllocator's data structures when BucketCache uses FileIOEngine (Guanghao Zhang)

2016-09-05 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/master ab07f0087 -> b6ba13c37


HBASE-16460 Can't rebuild the BucketAllocator's data structures when 
BucketCache uses FileIOEngine (Guanghao Zhang)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b6ba13c3
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b6ba13c3
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b6ba13c3

Branch: refs/heads/master
Commit: b6ba13c37715422710a142f6f82ba4817129c3d6
Parents: ab07f00
Author: tedyu 
Authored: Mon Sep 5 06:50:50 2016 -0700
Committer: tedyu 
Committed: Mon Sep 5 06:50:50 2016 -0700

--
 .../hbase/io/hfile/bucket/BucketAllocator.java  | 49 ++--
 .../hbase/io/hfile/bucket/BucketCache.java  |  7 +--
 .../hadoop/hbase/io/hfile/CacheTestUtils.java   | 17 +--
 .../hbase/io/hfile/bucket/TestBucketCache.java  | 48 +++
 4 files changed, 100 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b6ba13c3/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java
index b5cd0c3..67a4f1f 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java
@@ -20,7 +20,10 @@
 
 package org.apache.hadoop.hbase.io.hfile.bucket;
 
+import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Iterator;
+import java.util.List;
 import java.util.Map;
 import java.util.concurrent.atomic.AtomicLong;
 
@@ -349,34 +352,41 @@ public final class BucketAllocator {
 // we've found. we can only reconfigure each bucket once; if more than 
once,
 // we know there's a bug, so we just log the info, throw, and start 
again...
 boolean[] reconfigured = new boolean[buckets.length];
-for (Map.Entry entry : map.entrySet()) {
+int sizeNotMatchedCount = 0;
+int insufficientCapacityCount = 0;
+Iterator> iterator = 
map.entrySet().iterator();
+while (iterator.hasNext()) {
+  Map.Entry entry = iterator.next();
   long foundOffset = entry.getValue().offset();
   int foundLen = entry.getValue().getLength();
   int bucketSizeIndex = -1;
-  for (int i = 0; i < bucketSizes.length; ++i) {
-if (foundLen <= bucketSizes[i]) {
+  for (int i = 0; i < this.bucketSizes.length; ++i) {
+if (foundLen <= this.bucketSizes[i]) {
   bucketSizeIndex = i;
   break;
 }
   }
   if (bucketSizeIndex == -1) {
-throw new BucketAllocatorException(
-"Can't match bucket size for the block with size " + foundLen);
+sizeNotMatchedCount++;
+iterator.remove();
+continue;
   }
   int bucketNo = (int) (foundOffset / bucketCapacity);
-  if (bucketNo < 0 || bucketNo >= buckets.length)
-throw new BucketAllocatorException("Can't find bucket " + bucketNo
-+ ", total buckets=" + buckets.length
-+ "; did you shrink the cache?");
+  if (bucketNo < 0 || bucketNo >= buckets.length) {
+insufficientCapacityCount++;
+iterator.remove();
+continue;
+  }
   Bucket b = buckets[bucketNo];
   if (reconfigured[bucketNo]) {
-if (b.sizeIndex() != bucketSizeIndex)
-  throw new BucketAllocatorException(
-  "Inconsistent allocation in bucket map;");
+if (b.sizeIndex() != bucketSizeIndex) {
+  throw new BucketAllocatorException("Inconsistent allocation in 
bucket map;");
+}
   } else {
-if (!b.isCompletelyFree())
-  throw new BucketAllocatorException("Reconfiguring bucket "
-  + bucketNo + " but it's already allocated; corrupt data");
+if (!b.isCompletelyFree()) {
+  throw new BucketAllocatorException(
+  "Reconfiguring bucket " + bucketNo + " but it's already 
allocated; corrupt data");
+}
 // Need to remove the bucket from whichever list it's currently in at
 // the moment...
 BucketSizeInfo bsi = bucketSizeInfos[bucketSizeIndex];
@@ -390,6 +400,15 @@ public final class BucketAllocator {
   usedSize += buckets[bucketNo].getItemAllocationSize();
   bucketSizeInfos[bucketSizeIndex].blockAllocated(b);
 }
+
+if (sizeNotMatchedCount > 0) {
+  LOG.warn("There are " + sizeNotMatchedCount +