HBASE-15640 L1 cache doesn't give fair warning that it is showing partial stats 
only when it hits limit


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6dd938c2
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6dd938c2
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6dd938c2

Branch: refs/heads/HBASE-14850
Commit: 6dd938c20bf4634b5efa1ceca4ded7e54f8d9c0e
Parents: fa215a6
Author: stack <[email protected]>
Authored: Wed Apr 20 14:14:37 2016 -0700
Committer: stack <[email protected]>
Committed: Wed Apr 20 14:14:37 2016 -0700

----------------------------------------------------------------------
 .../hbase/tmpl/regionserver/BlockCacheTmpl.jamon     |  6 +++++-
 .../apache/hadoop/hbase/io/hfile/BlockCacheUtil.java | 14 ++++++++++----
 .../hadoop/hbase/io/hfile/HFileReaderImpl.java       | 15 +++++++++++----
 3 files changed, 26 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/6dd938c2/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheTmpl.jamon
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheTmpl.jamon
 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheTmpl.jamon
index 1277acc..49a1e1b 100644
--- 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheTmpl.jamon
+++ 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheTmpl.jamon
@@ -305,7 +305,11 @@ are combined counts. Request count is sum of hits and 
misses.</p>
   }
 </%java>
 <%if cbsbf.isFull() %>
-<p><b>Statistics below is based on sampling first <% cbsbfSnapshot.getMax() %> 
blocks only</b> (hbase.ui.blockcache.by.file.max)</p> 
+<p>
+<div class="alert alert-danger">
+<strong>The stats below are incomplete!</strong> We ran into our accounting 
limit of <% cbsbf.getCount() %> blocks. Up the configuration 
<i>hbase.ui.blockcache.by.file.max</i>.
+</div>
+</p> 
 </%if>
 <table id="blocks_summary" class="table table-striped">
     <tr>

http://git-wip-us.apache.org/repos/asf/hbase/blob/6dd938c2/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java
index 977284b..e31c340 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java
@@ -180,16 +180,22 @@ public class BlockCacheUtil {
     private long size;
     private long dataSize;
     private final long now = System.nanoTime();
+    /**
+     * How many blocks to look at before we give up.
+     * There could be many millions of blocks. We don't want the
+     * ui to freeze while we run through 1B blocks... users will
+     * think hbase dead. UI displays warning in red when stats
+     * are incomplete.
+     */
     private final int max;
-    public static final int DEFAULT_MAX = 100000;
+    public static final int DEFAULT_MAX = 1000000;
 
     CachedBlocksByFile() {
       this(null);
     }
 
     CachedBlocksByFile(final Configuration c) {
-      this.max = c == null? DEFAULT_MAX:
-        c.getInt("hbase.ui.blockcache.by.file.max", DEFAULT_MAX);
+      this.max = c == null? DEFAULT_MAX: 
c.getInt("hbase.ui.blockcache.by.file.max", DEFAULT_MAX);
     }
 
     /**
@@ -231,7 +237,7 @@ public class BlockCacheUtil {
     public boolean isFull() {
       return this.count >= this.max;
     }
- 
+
     public NavigableMap<String, NavigableSet<CachedBlock>> 
getCachedBlockStatsByFile() {
       return this.cachedBlockByFile;
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/6dd938c2/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
index 9a63d2d..1677448 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
@@ -253,7 +253,7 @@ public class HFileReaderImpl implements HFile.Reader, 
Configurable {
           try {
             end = getTrailer().getLoadOnOpenDataOffset();
             if (LOG.isTraceEnabled()) {
-              LOG.trace("Prefetch=" + path.toString() + ", offset=" + offset + 
", end=" + end);
+              LOG.trace("Prefetch start " + getPathOffsetEndStr(path, offset, 
end));
             }
             // TODO: Could we use block iterator in here? Would that get stuff 
into the cache?
             HFileBlock prevBlock = null;
@@ -267,7 +267,7 @@ public class HFileReaderImpl implements HFile.Reader, 
Configurable {
               // cached block. This 'optimization' triggers extremely rarely 
I'd say.
               long onDiskSize = prevBlock != null? 
prevBlock.getNextBlockOnDiskSize(): -1;
               HFileBlock block = readBlock(offset, onDiskSize, true, false, 
false, false,
-                null, null);
+                  null, null);
               // Need not update the current block. Ideally here the readBlock 
won't find the
               // block in cache. We call this readBlock so that block data is 
read from FS and
               // cached in BC. So there is no reference count increment that 
happens here.
@@ -279,11 +279,14 @@ public class HFileReaderImpl implements HFile.Reader, 
Configurable {
           } catch (IOException e) {
             // IOExceptions are probably due to region closes (relocation, 
etc.)
             if (LOG.isTraceEnabled()) {
-              LOG.trace("Prefetch=" + path.toString() + ", offset=" + offset + 
", end=" + end, e);
+              LOG.trace("Prefetch " + getPathOffsetEndStr(path, offset, end), 
e);
             }
+          } catch (NullPointerException e) {
+            LOG.warn("Stream moved/closed or prefetch cancelled?" +
+                getPathOffsetEndStr(path, offset, end), e);
           } catch (Exception e) {
             // Other exceptions are interesting
-            LOG.warn("Prefetch=" + path.toString() + ", offset=" + offset + ", 
end=" + end, e);
+            LOG.warn("Prefetch " + getPathOffsetEndStr(path, offset, end), e);
           } finally {
             PrefetchExecutor.complete(path);
           }
@@ -302,6 +305,10 @@ public class HFileReaderImpl implements HFile.Reader, 
Configurable {
     }
   }
 
+  private static String getPathOffsetEndStr(final Path path, final long 
offset, final long end) {
+    return "path=" + path.toString() + ", offset=" + offset + ", end=" + end;
+  }
+
   /**
    * File version check is a little sloppy. We read v3 files but can also read 
v2 files if their
    * content has been pb'd; files written with 0.98.

Reply via email to