Author: stack
Date: Sat Apr 17 19:47:50 2010
New Revision: 935238

URL: http://svn.apache.org/viewvc?rev=935238&view=rev
Log:
HBASE-2457 RS gets stuck compacting region ad infinitum

Modified:
    hadoop/hbase/branches/0.20_pre_durability/CHANGES.txt
    
hadoop/hbase/branches/0.20_pre_durability/src/java/org/apache/hadoop/hbase/regionserver/Store.java

Modified: hadoop/hbase/branches/0.20_pre_durability/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_pre_durability/CHANGES.txt?rev=935238&r1=935237&r2=935238&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20_pre_durability/CHANGES.txt (original)
+++ hadoop/hbase/branches/0.20_pre_durability/CHANGES.txt Sat Apr 17 19:47:50 
2010
@@ -53,6 +53,7 @@ Release 0.20.4 - Thu Apr 15 16:29:44 PDT
    HBASE-2456  Client stuck in TreeMap,remove (Todd Lipcon via Stack)
    HBASE-2460  add_table.rb deletes any tables for which the target table
                name is a prefix (Todd Lipcon via Stack)
+   HBASE-2457  RS gets stuck compacting region ad infinitum
 
   IMPROVEMENTS
    HBASE-2180  Bad read performance from synchronizing hfile.fddatainputstream

Modified: 
hadoop/hbase/branches/0.20_pre_durability/src/java/org/apache/hadoop/hbase/regionserver/Store.java
URL: 
http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_pre_durability/src/java/org/apache/hadoop/hbase/regionserver/Store.java?rev=935238&r1=935237&r2=935238&view=diff
==============================================================================
--- 
hadoop/hbase/branches/0.20_pre_durability/src/java/org/apache/hadoop/hbase/regionserver/Store.java
 (original)
+++ 
hadoop/hbase/branches/0.20_pre_durability/src/java/org/apache/hadoop/hbase/regionserver/Store.java
 Sat Apr 17 19:47:50 2010
@@ -713,7 +713,7 @@ public class Store implements HConstants
         Reader r = file.getReader();
         if (r == null) {
           LOG.warn("StoreFile " + file + " has a null Reader");
-          continue;
+          return null;
         }
         long len = file.getReader().length();
         fileSizes[i] = len;
@@ -725,9 +725,16 @@ public class Store implements HConstants
         // The rule is: if the largest(oldest) one is more than twice the 
         // size of the second, skip the largest, and continue to next...,
         // until we meet the compactionThreshold limit.
-        for (point = 0; point < countOfFiles - 1; point++) {
-          if ((fileSizes[point] < fileSizes[point + 1] * 2) && 
-               (countOfFiles - point) <= maxFilesToCompact) {
+
+        // A problem with the above heuristic is that we could go through all 
of
+        // filesToCompact and the above condition could hold for all files and
+        // we'd end up with nothing to compact.  To protect against this, we'll
+        // compact the tail -- up to the last 3 files -- of filesToCompact
+        // regardless.
+        int tail = Math.min(countOfFiles, 3);
+        for (point = 0; point < (countOfFiles - tail); point++) {
+          if (((fileSizes[point] < fileSizes[point + 1] * 2) &&
+               (countOfFiles - point) <= maxFilesToCompact)) {
             break;
           }
           skipped += fileSizes[point];


Reply via email to