Author: stack
Date: Fri Jul 3 19:11:12 2009
New Revision: 790989
URL: http://svn.apache.org/viewvc?rev=790989&view=rev
Log:
HBASE-1597 Prevent unnecessary caching of blocks during compactions
Modified:
hadoop/hbase/trunk/CHANGES.txt
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/KeyValue.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/Store.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestCompaction.java
Modified: hadoop/hbase/trunk/CHANGES.txt
URL:
http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=790989&r1=790988&r2=790989&view=diff
==============================================================================
--- hadoop/hbase/trunk/CHANGES.txt (original)
+++ hadoop/hbase/trunk/CHANGES.txt Fri Jul 3 19:11:12 2009
@@ -433,6 +433,8 @@
(Lars George via Stack)
HBASE-1596 Remove WatcherWrapper and have all users of Zookeeper provide a
Watcher
+ HBASE-1597 Prevent unnecessary caching of blocks during compactions
+ (Jon Gray via Stack)
OPTIMIZATIONS
HBASE-1412 Change values for delete column and column family in KeyValue
Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/KeyValue.java
URL:
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/KeyValue.java?rev=790989&r1=790988&r2=790989&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/KeyValue.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/KeyValue.java Fri Jul
3 19:11:12 2009
@@ -639,9 +639,9 @@
int familylength = b[columnoffset - 1];
int columnlength = l - ((columnoffset - o) + TIMESTAMP_TYPE_SIZE);
String family = familylength == 0? "":
- Bytes.toString(b, columnoffset, familylength);
+ Bytes.toStringBinary(b, columnoffset, familylength);
String qualifier = columnlength == 0? "":
- Bytes.toString(b, columnoffset + familylength,
+ Bytes.toStringBinary(b, columnoffset + familylength,
columnlength - familylength);
long timestamp = Bytes.toLong(b, o + (l - TIMESTAMP_TYPE_SIZE));
byte type = b[o + l - 1];
Modified:
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java
URL:
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java?rev=790989&r1=790988&r2=790989&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java
(original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java Fri
Jul 3 19:11:12 2009
@@ -513,8 +513,7 @@
}
}
- private void checkValue(final byte [] value,
- @SuppressWarnings("unused") final int offset,
+ private void checkValue(final byte [] value, final int offset,
final int length) throws IOException {
if (value == null) {
throw new IOException("Value cannot be null");
@@ -699,8 +698,7 @@
* @throws IOException
*/
public Reader(final FSDataInputStream fsdis, final long size,
- final BlockCache cache)
- throws IOException {
+ final BlockCache cache) {
this.cache = cache;
this.fileSize = size;
this.istream = fsdis;
@@ -722,11 +720,11 @@
}
protected String toStringFirstKey() {
- return Bytes.toStringBinary(getFirstKey());
+ return KeyValue.keyToString(getFirstKey());
}
protected String toStringLastKey() {
- return Bytes.toStringBinary(getFirstKey());
+ return KeyValue.keyToString(getFirstKey());
}
public long length() {
@@ -918,15 +916,26 @@
buf.limit(buf.limit() - DATABLOCKMAGIC.length);
buf.rewind();
- // Cache a copy, not the one we are sending back, so the position
doesnt
- // get messed.
- if (cache != null) {
- cache.cacheBlock(name + block, buf.duplicate());
- }
+ // Cache the block
+ cacheBlock(name + block, buf.duplicate());
return buf;
}
}
+
+ /**
+ * Cache this block if there is a block cache available.<p>
+ *
+ * Makes a copy of the ByteBuffer, not the one we are sending back, so the
+ * position does not get messed up.
+ * @param blockName
+ * @param buf
+ */
+ void cacheBlock(String blockName, ByteBuffer buf) {
+ if (cache != null) {
+ cache.cacheBlock(blockName, buf.duplicate());
+ }
+ }
/*
* Decompress <code>compressedSize</code> bytes off the backing
@@ -1241,6 +1250,36 @@
return trailer.toString();
}
}
+
+
+ /**
+ * HFile Reader that does not cache blocks that were not already cached.<p>
+ *
+ * Used for compactions.
+ */
+ public static class CompactionReader extends Reader {
+ public CompactionReader(Reader reader) {
+ super(reader.istream, reader.fileSize, reader.cache);
+ super.blockIndex = reader.blockIndex;
+ super.trailer = reader.trailer;
+ super.lastkey = reader.lastkey;
+ super.avgKeyLen = reader.avgKeyLen;
+ super.avgValueLen = reader.avgValueLen;
+ super.comparator = reader.comparator;
+ super.metaIndex = reader.metaIndex;
+ super.fileInfoLoaded = reader.fileInfoLoaded;
+ super.compressAlgo = reader.compressAlgo;
+ }
+
+ /**
+ * Do not cache this block when doing a compaction.
+ */
+ @Override
+ void cacheBlock(String blockName, ByteBuffer buf) {
+ return;
+ }
+ }
+
/*
* The RFile has a fixed trailer which contains offsets to other variable
* parts of the file. Also includes basic metadata on this file.
Modified:
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
URL:
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java?rev=790989&r1=790988&r2=790989&view=diff
==============================================================================
---
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
(original)
+++
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
Fri Jul 3 19:11:12 2009
@@ -539,19 +539,15 @@
LruBlockCache.LOG.debug("Cache Stats: Sizes: " +
"Total=" + sizeMB + "MB (" + totalSize + "), " +
"Free=" + freeMB + "MB (" + freeSize + "), " +
- "Max=" + maxMB + "MB (" + maxSize +")");
-
- // Log hit/miss and eviction counts
- LruBlockCache.LOG.debug("Cache Stats: Counts: " +
+ "Max=" + maxMB + "MB (" + maxSize +")" +
+ ", Counts: " +
"Blocks=" + size() +", " +
"Access=" + stats.getRequestCount() + ", " +
"Hit=" + stats.getHitCount() + ", " +
"Miss=" + stats.getMissCount() + ", " +
"Evictions=" + stats.getEvictionCount() + ", " +
- "Evicted=" + stats.getEvictedCount());
-
- // Log hit/miss and eviction ratios
- LruBlockCache.LOG.debug("Cache Stats: Ratios: " +
+ "Evicted=" + stats.getEvictedCount() +
+ ", Ratios: " +
"Hit Ratio=" + stats.getHitRatio()*100 + "%, " +
"Miss Ratio=" + stats.getMissRatio()*100 + "%, " +
"Evicted/Run=" + stats.evictedPerEviction());
Modified:
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/Store.java
URL:
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/Store.java?rev=790989&r1=790988&r2=790989&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/Store.java
(original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/Store.java
Fri Jul 3 19:11:12 2009
@@ -53,6 +53,7 @@
import org.apache.hadoop.hbase.io.hfile.Compression;
import org.apache.hadoop.hbase.io.hfile.HFile;
import org.apache.hadoop.hbase.io.hfile.HFileScanner;
+import org.apache.hadoop.hbase.io.hfile.HFile.CompactionReader;
import org.apache.hadoop.hbase.io.hfile.HFile.Reader;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSUtils;
@@ -674,12 +675,12 @@
LOG.warn("Path is null for " + file);
return null;
}
- Reader r = file.getReader();
+ CompactionReader r = file.getCompactionReader();
if (r == null) {
LOG.warn("StoreFile " + file + " has a null Reader");
continue;
}
- long len = file.getReader().length();
+ long len = file.getCompactionReader().length();
fileSizes[i] = len;
totalSize += len;
}
@@ -838,7 +839,7 @@
// init:
for (int i = 0; i < filesToCompact.size(); ++i) {
// TODO open a new HFile.Reader w/o block cache.
- Reader r = filesToCompact.get(i).getReader();
+ CompactionReader r = filesToCompact.get(i).getCompactionReader();
if (r == null) {
LOG.warn("StoreFile " + filesToCompact.get(i) + " has a null Reader");
continue;
@@ -953,7 +954,7 @@
// 4. Compute new store size
this.storeSize = 0L;
for (StoreFile hsf : this.storefiles.values()) {
- Reader r = hsf.getReader();
+ Reader r = hsf.getCompactionReader();
if (r == null) {
LOG.warn("StoreFile " + hsf + " has a null Reader");
continue;
Modified:
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
URL:
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/StoreFile.java?rev=790989&r1=790988&r2=790989&view=diff
==============================================================================
---
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
(original)
+++
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
Fri Jul 3 19:11:12 2009
@@ -42,6 +42,7 @@
import org.apache.hadoop.hbase.io.hfile.Compression;
import org.apache.hadoop.hbase.io.hfile.HFile;
import org.apache.hadoop.hbase.io.hfile.LruBlockCache;
+import org.apache.hadoop.hbase.io.hfile.HFile.Reader;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.util.StringUtils;
@@ -262,7 +263,7 @@
this.reader = new HalfHFileReader(this.fs, this.referencePath,
getBlockCache(), this.reference);
} else {
- this.reader = new StoreFileReader(this.fs, this.path, getBlockCache());
+ this.reader = new Reader(this.fs, this.path, getBlockCache());
}
// Load up indices and fileinfo.
Map<byte [], byte []> map = this.reader.loadFileInfo();
@@ -298,71 +299,18 @@
}
/**
- * Override to add some customization on HFile.Reader
- */
- static class StoreFileReader extends HFile.Reader {
- /**
- *
- * @param fs
- * @param path
- * @param cache
- * @throws IOException
- */
- public StoreFileReader(FileSystem fs, Path path, BlockCache cache)
- throws IOException {
- super(fs, path, cache);
- }
-
- @Override
- protected String toStringFirstKey() {
- return KeyValue.keyToString(getFirstKey());
- }
-
- @Override
- protected String toStringLastKey() {
- return KeyValue.keyToString(getLastKey());
- }
- }
-
- /**
- * Override to add some customization on HalfHFileReader.
+ * @return Current reader. Must call open first else returns null.
*/
- static class HalfStoreFileReader extends HalfHFileReader {
- /**
- *
- * @param fs
- * @param p
- * @param c
- * @param r
- * @throws IOException
- */
- public HalfStoreFileReader(FileSystem fs, Path p, BlockCache c, Reference
r)
- throws IOException {
- super(fs, p, c, r);
- }
-
- @Override
- public String toString() {
- return super.toString() + (isTop()? ", half=top": ", half=bottom") +
- " splitKey: " + KeyValue.keyToString(splitkey);
- }
-
- @Override
- protected String toStringFirstKey() {
- return KeyValue.keyToString(getFirstKey());
- }
-
- @Override
- protected String toStringLastKey() {
- return KeyValue.keyToString(getLastKey());
- }
+ public HFile.Reader getReader() {
+ return this.reader;
}
/**
+ * Gets a special Reader for use during compactions. Will not cache blocks.
* @return Current reader. Must call open first else returns null.
*/
- public HFile.Reader getReader() {
- return this.reader;
+ public HFile.CompactionReader getCompactionReader() {
+ return new HFile.CompactionReader(this.reader);
}
/**
Modified:
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestCompaction.java
URL:
http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestCompaction.java?rev=790989&r1=790988&r2=790989&view=diff
==============================================================================
---
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestCompaction.java
(original)
+++
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestCompaction.java
Fri Jul 3 19:11:12 2009
@@ -169,7 +169,7 @@
boolean containsStartRow = false;
for (StoreFile f: this.r.stores.get(COLUMN_FAMILY_TEXT).getStorefiles().
values()) {
- HFileScanner scanner = f.getReader().getScanner();
+ HFileScanner scanner = f.getCompactionReader().getScanner();
scanner.seekTo();
do {
byte [] row = scanner.getKeyValue().getRow();