Author: stack
Date: Fri Sep 17 04:21:28 2010
New Revision: 997975
URL: http://svn.apache.org/viewvc?rev=997975&view=rev
Log:
HBASE-3006 Reading compressed HFile blocks causes way too many DFS RPC calls
severly impacting performance--Now add fix I intended, a spelling mistake in
HFile
Modified:
hbase/trunk/CHANGES.txt
hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
Modified: hbase/trunk/CHANGES.txt
URL:
http://svn.apache.org/viewvc/hbase/trunk/CHANGES.txt?rev=997975&r1=997974&r2=997975&view=diff
==============================================================================
--- hbase/trunk/CHANGES.txt (original)
+++ hbase/trunk/CHANGES.txt Fri Sep 17 04:21:28 2010
@@ -523,6 +523,9 @@ Release 0.21.0 - Unreleased
HBASE-2986 multi writable can npe causing client hang
HBASE-2979 Fix failing TestMultParrallel in hudson build
HBASE-2899 hfile.min.blocksize.size ignored/documentation wrong
+ HBASE-3006 Reading compressed HFile blocks causes way too many DFS RPC
+ calls severly impacting performance
+ (Kannan Muthukkaruppan via Stack)
IMPROVEMENTS
HBASE-1760 Cleanup TODOs in HTable
Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
URL:
http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java?rev=997975&r1=997974&r2=997975&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
(original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java Fri
Sep 17 04:21:28 2010
@@ -19,6 +19,7 @@
*/
package org.apache.hadoop.hbase.io.hfile;
+import java.io.BufferedInputStream;
import java.io.Closeable;
import java.io.DataInputStream;
import java.io.DataOutputStream;
@@ -1051,10 +1052,15 @@ public class HFile {
// decompressor reading into next block -- IIRC, it just grabs a
// bunch of data w/o regard to whether decompressor is coming to end
of a
// decompression.
+
+ // We use a buffer of DEFAULT_BLOCKSIZE size. This might be extreme.
+ // Could maybe do with less. Study and figure it: TODO
InputStream is = this.compressAlgo.createDecompressionStream(
- new BoundedRangeFileInputStream(this.istream, offset, compressedSize,
- pread),
- decompressor, 0);
+ new BufferedInputStream(
+ new BoundedRangeFileInputStream(this.istream, offset,
compressedSize,
+ pread),
+ Math.min(DEFAULT_BLOCKSIZE, compressedSize)),
+ decompressor, 0);
buf = ByteBuffer.allocate(decompressedSize);
IOUtils.readFully(is, buf.array(), 0, buf.capacity());
is.close();