Author: stack
Date: Fri Sep 17 03:56:16 2010
New Revision: 997968

URL: http://svn.apache.org/viewvc?rev=997968&view=rev
Log:
HBASE-3006 Reading compressed HFile blocks causes way too many DFS RPC calls 
severly impacting performance

Modified:
    hbase/trunk/CHANGES.txt
    hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java

Modified: hbase/trunk/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hbase/trunk/CHANGES.txt?rev=997968&r1=997967&r2=997968&view=diff
==============================================================================
--- hbase/trunk/CHANGES.txt (original)
+++ hbase/trunk/CHANGES.txt Fri Sep 17 03:56:16 2010
@@ -523,6 +523,9 @@ Release 0.21.0 - Unreleased
    HBASE-2986  multi writable can npe causing client hang
    HBASE-2979  Fix failing TestMultParrallel in hudson build
    HBASE-2899  hfile.min.blocksize.size ignored/documentation wrong
+   HBASE-3006  Reading compressed HFile blocks causes way too many DFS RPC
+               calls severly impacting performance
+               (Kannan Muthukkaruppan via Stack)
 
   IMPROVEMENTS
    HBASE-1760  Cleanup TODOs in HTable

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
URL: 
http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java?rev=997968&r1=997967&r2=997968&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java 
(original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java Fri 
Sep 17 03:56:16 2010
@@ -19,6 +19,7 @@
  */
 package org.apache.hadoop.hbase.io.hfile;
 
+import java.io.BufferedInputStream;
 import java.io.Closeable;
 import java.io.DataInputStream;
 import java.io.DataOutputStream;
@@ -1051,10 +1052,15 @@ public class HFile {
         // decompressor reading into next block -- IIRC, it just grabs a
         // bunch of data w/o regard to whether decompressor is coming to end 
of a
         // decompression.
+
+        // We use a buffer of DEFAULT_BLOCKSIZE size.  This might be extreme.
+        // Could maybe do with less. Study and figure it: TODO
         InputStream is = this.compressAlgo.createDecompressionStream(
-          new BoundedRangeFileInputStream(this.istream, offset, compressedSize,
-            pread),
-          decompressor, 0);
+            new BufferedInputStream(
+                new BoundedRangeFileInputStream(this.istream, offset, 
compressedSize,
+                                                pread),
+                Math.min(DEFAUT_BLOCKSIZE, compressedSize)),
+            decompressor, 0);
         buf = ByteBuffer.allocate(decompressedSize);
         IOUtils.readFully(is, buf.array(), 0, buf.capacity());
         is.close();


Reply via email to