hbase git commit: HBASE-20231 Not able to delete column family from a row using RemoteHTable

2018-04-03 Thread ashishsinghi
Repository: hbase
Updated Branches:
  refs/heads/branch-1.4 98c6f8a3f -> 0ccdffe95


HBASE-20231 Not able to delete column family from a row using RemoteHTable

Signed-off-by: Ashish Singhi 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0ccdffe9
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0ccdffe9
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0ccdffe9

Branch: refs/heads/branch-1.4
Commit: 0ccdffe95236617678bf09f5bf670524cb2ae666
Parents: 98c6f8a
Author: Pankaj Kumar 
Authored: Wed Apr 4 10:16:58 2018 +0530
Committer: Ashish Singhi 
Committed: Wed Apr 4 10:16:58 2018 +0530

--
 .../hadoop/hbase/rest/client/RemoteHTable.java  |  7 +--
 .../hbase/rest/client/TestRemoteTable.java  | 22 
 2 files changed, 27 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/0ccdffe9/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java
--
diff --git 
a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java
 
b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java
index 463b232..fc6a90f 100644
--- 
a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java
+++ 
b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java
@@ -112,13 +112,16 @@ public class RemoteHTable implements Table {
   Iterator ii = quals.iterator();
   while (ii.hasNext()) {
 sb.append(toURLEncodedBytes((byte[])e.getKey()));
-sb.append(':');
 Object o = ii.next();
 // Puts use byte[] but Deletes use KeyValue
 if (o instanceof byte[]) {
+  sb.append(':');
   sb.append(toURLEncodedBytes((byte[])o));
 } else if (o instanceof KeyValue) {
-  sb.append(toURLEncodedBytes(((KeyValue)o).getQualifier()));
+  if (((KeyValue) o).getQualifierLength() != 0) {
+sb.append(':');
+sb.append(toURLEncodedBytes(((KeyValue) o).getQualifier()));
+  }
 } else {
   throw new RuntimeException("object type not handled");
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/0ccdffe9/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java
--
diff --git 
a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java
 
b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java
index 342fc4e..28f3798 100644
--- 
a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java
+++ 
b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java
@@ -349,18 +349,27 @@ public class TestRemoteTable {
 Put put = new Put(ROW_3);
 put.add(COLUMN_1, QUALIFIER_1, VALUE_1);
 put.add(COLUMN_2, QUALIFIER_2, VALUE_2);
+put.add(COLUMN_3, QUALIFIER_1, VALUE_1);
+put.add(COLUMN_3, QUALIFIER_2, VALUE_2);
 remoteTable.put(put);
 
 Get get = new Get(ROW_3);
 get.addFamily(COLUMN_1);
 get.addFamily(COLUMN_2);
+get.addFamily(COLUMN_3);
 Result result = remoteTable.get(get);
 byte[] value1 = result.getValue(COLUMN_1, QUALIFIER_1);
 byte[] value2 = result.getValue(COLUMN_2, QUALIFIER_2);
+byte[] value3 = result.getValue(COLUMN_3, QUALIFIER_1);
+byte[] value4 = result.getValue(COLUMN_3, QUALIFIER_2);
 assertNotNull(value1);
 assertTrue(Bytes.equals(VALUE_1, value1));
 assertNotNull(value2);
 assertTrue(Bytes.equals(VALUE_2, value2));
+assertNotNull(value3);
+assertTrue(Bytes.equals(VALUE_1, value3));
+assertNotNull(value4);
+assertTrue(Bytes.equals(VALUE_2, value4));
 
 Delete delete = new Delete(ROW_3);
 delete.addColumn(COLUMN_2, QUALIFIER_2);
@@ -390,6 +399,19 @@ public class TestRemoteTable {
 assertTrue(Bytes.equals(VALUE_1, value1));
 assertNull(value2);
 
+// Delete column family from row
+delete = new Delete(ROW_3);
+delete.addFamily(COLUMN_3);
+remoteTable.delete(delete);
+
+get = new Get(ROW_3);
+get.addFamily(COLUMN_3);
+result = remoteTable.get(get);
+value3 = result.getValue(COLUMN_3, QUALIFIER_1);
+value4 = result.getValue(COLUMN_3, QUALIFIER_2);
+assertNull(value3);
+assertNull(value4);
+
 delete = new Delete(ROW_3);
 remoteTable.delete(delete);
 



hbase git commit: HBASE-20231 Not able to delete column family from a row using RemoteHTable

2018-04-03 Thread ashishsinghi
Repository: hbase
Updated Branches:
  refs/heads/branch-1 9ced0c936 -> 2eae8104d


HBASE-20231 Not able to delete column family from a row using RemoteHTable

Signed-off-by: Ashish Singhi 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2eae8104
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2eae8104
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2eae8104

Branch: refs/heads/branch-1
Commit: 2eae8104d19cc8be1b69f4969623b9a9f15e2593
Parents: 9ced0c9
Author: Pankaj Kumar 
Authored: Wed Apr 4 10:16:11 2018 +0530
Committer: Ashish Singhi 
Committed: Wed Apr 4 10:16:11 2018 +0530

--
 .../hadoop/hbase/rest/client/RemoteHTable.java  |  7 +--
 .../hbase/rest/client/TestRemoteTable.java  | 22 
 2 files changed, 27 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/2eae8104/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java
--
diff --git 
a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java
 
b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java
index 463b232..fc6a90f 100644
--- 
a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java
+++ 
b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java
@@ -112,13 +112,16 @@ public class RemoteHTable implements Table {
   Iterator ii = quals.iterator();
   while (ii.hasNext()) {
 sb.append(toURLEncodedBytes((byte[])e.getKey()));
-sb.append(':');
 Object o = ii.next();
 // Puts use byte[] but Deletes use KeyValue
 if (o instanceof byte[]) {
+  sb.append(':');
   sb.append(toURLEncodedBytes((byte[])o));
 } else if (o instanceof KeyValue) {
-  sb.append(toURLEncodedBytes(((KeyValue)o).getQualifier()));
+  if (((KeyValue) o).getQualifierLength() != 0) {
+sb.append(':');
+sb.append(toURLEncodedBytes(((KeyValue) o).getQualifier()));
+  }
 } else {
   throw new RuntimeException("object type not handled");
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/2eae8104/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java
--
diff --git 
a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java
 
b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java
index 342fc4e..28f3798 100644
--- 
a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java
+++ 
b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java
@@ -349,18 +349,27 @@ public class TestRemoteTable {
 Put put = new Put(ROW_3);
 put.add(COLUMN_1, QUALIFIER_1, VALUE_1);
 put.add(COLUMN_2, QUALIFIER_2, VALUE_2);
+put.add(COLUMN_3, QUALIFIER_1, VALUE_1);
+put.add(COLUMN_3, QUALIFIER_2, VALUE_2);
 remoteTable.put(put);
 
 Get get = new Get(ROW_3);
 get.addFamily(COLUMN_1);
 get.addFamily(COLUMN_2);
+get.addFamily(COLUMN_3);
 Result result = remoteTable.get(get);
 byte[] value1 = result.getValue(COLUMN_1, QUALIFIER_1);
 byte[] value2 = result.getValue(COLUMN_2, QUALIFIER_2);
+byte[] value3 = result.getValue(COLUMN_3, QUALIFIER_1);
+byte[] value4 = result.getValue(COLUMN_3, QUALIFIER_2);
 assertNotNull(value1);
 assertTrue(Bytes.equals(VALUE_1, value1));
 assertNotNull(value2);
 assertTrue(Bytes.equals(VALUE_2, value2));
+assertNotNull(value3);
+assertTrue(Bytes.equals(VALUE_1, value3));
+assertNotNull(value4);
+assertTrue(Bytes.equals(VALUE_2, value4));
 
 Delete delete = new Delete(ROW_3);
 delete.addColumn(COLUMN_2, QUALIFIER_2);
@@ -390,6 +399,19 @@ public class TestRemoteTable {
 assertTrue(Bytes.equals(VALUE_1, value1));
 assertNull(value2);
 
+// Delete column family from row
+delete = new Delete(ROW_3);
+delete.addFamily(COLUMN_3);
+remoteTable.delete(delete);
+
+get = new Get(ROW_3);
+get.addFamily(COLUMN_3);
+result = remoteTable.get(get);
+value3 = result.getValue(COLUMN_3, QUALIFIER_1);
+value4 = result.getValue(COLUMN_3, QUALIFIER_2);
+assertNull(value3);
+assertNull(value4);
+
 delete = new Delete(ROW_3);
 remoteTable.delete(delete);
 



hbase git commit: HBASE-20231 Not able to delete column family from a row using RemoteHTable

2018-04-03 Thread ashishsinghi
Repository: hbase
Updated Branches:
  refs/heads/branch-2.0 79bb54ddf -> d7cb0bd41


HBASE-20231 Not able to delete column family from a row using RemoteHTable

Signed-off-by: Ashish Singhi 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d7cb0bd4
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d7cb0bd4
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d7cb0bd4

Branch: refs/heads/branch-2.0
Commit: d7cb0bd4179951d973d60eff6ad68b4a5822f507
Parents: 79bb54d
Author: Pankaj Kumar 
Authored: Wed Apr 4 10:14:46 2018 +0530
Committer: Ashish Singhi 
Committed: Wed Apr 4 10:14:46 2018 +0530

--
 .../hadoop/hbase/rest/client/RemoteHTable.java  |  9 +---
 .../hbase/rest/client/TestRemoteTable.java  | 22 
 2 files changed, 28 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d7cb0bd4/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java
--
diff --git 
a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java
 
b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java
index cc3efdd..29b48e1 100644
--- 
a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java
+++ 
b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java
@@ -115,13 +115,16 @@ public class RemoteHTable implements Table {
   Iterator ii = quals.iterator();
   while (ii.hasNext()) {
 sb.append(toURLEncodedBytes((byte[])e.getKey()));
-sb.append(':');
 Object o = ii.next();
 // Puts use byte[] but Deletes use KeyValue
 if (o instanceof byte[]) {
-  sb.append(toURLEncodedBytes((byte[])o));
+  sb.append(':');
+  sb.append(toURLEncodedBytes((byte[]) o));
 } else if (o instanceof KeyValue) {
-  
sb.append(toURLEncodedBytes(CellUtil.cloneQualifier((KeyValue)o)));
+  if (((KeyValue) o).getQualifierLength() != 0) {
+sb.append(':');
+sb.append(toURLEncodedBytes(CellUtil.cloneQualifier((KeyValue) 
o)));
+  }
 } else {
   throw new RuntimeException("object type not handled");
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/d7cb0bd4/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java
--
diff --git 
a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java
 
b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java
index 5053d91..c6f5195 100644
--- 
a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java
+++ 
b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java
@@ -353,18 +353,27 @@ public class TestRemoteTable {
 Put put = new Put(ROW_3);
 put.addColumn(COLUMN_1, QUALIFIER_1, VALUE_1);
 put.addColumn(COLUMN_2, QUALIFIER_2, VALUE_2);
+put.addColumn(COLUMN_3, QUALIFIER_1, VALUE_1);
+put.addColumn(COLUMN_3, QUALIFIER_2, VALUE_2);
 remoteTable.put(put);
 
 Get get = new Get(ROW_3);
 get.addFamily(COLUMN_1);
 get.addFamily(COLUMN_2);
+get.addFamily(COLUMN_3);
 Result result = remoteTable.get(get);
 byte[] value1 = result.getValue(COLUMN_1, QUALIFIER_1);
 byte[] value2 = result.getValue(COLUMN_2, QUALIFIER_2);
+byte[] value3 = result.getValue(COLUMN_3, QUALIFIER_1);
+byte[] value4 = result.getValue(COLUMN_3, QUALIFIER_2);
 assertNotNull(value1);
 assertTrue(Bytes.equals(VALUE_1, value1));
 assertNotNull(value2);
 assertTrue(Bytes.equals(VALUE_2, value2));
+assertNotNull(value3);
+assertTrue(Bytes.equals(VALUE_1, value3));
+assertNotNull(value4);
+assertTrue(Bytes.equals(VALUE_2, value4));
 
 Delete delete = new Delete(ROW_3);
 delete.addColumn(COLUMN_2, QUALIFIER_2);
@@ -394,6 +403,19 @@ public class TestRemoteTable {
 assertTrue(Bytes.equals(VALUE_1, value1));
 assertNull(value2);
 
+// Delete column family from row
+delete = new Delete(ROW_3);
+delete.addFamily(COLUMN_3);
+remoteTable.delete(delete);
+
+get = new Get(ROW_3);
+get.addFamily(COLUMN_3);
+result = remoteTable.get(get);
+value3 = result.getValue(COLUMN_3, QUALIFIER_1);
+value4 = result.getValue(COLUMN_3, QUALIFIER_2);
+assertNull(value3);
+assertNull(value4);
+
 delete = new Delete(ROW_3);
 remoteTable.delete(delete);
 



hbase git commit: HBASE-20231 Not able to delete column family from a row using RemoteHTable

2018-04-03 Thread ashishsinghi
Repository: hbase
Updated Branches:
  refs/heads/branch-2 b8a13ba10 -> a761f175a


HBASE-20231 Not able to delete column family from a row using RemoteHTable

Signed-off-by: Ashish Singhi 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a761f175
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a761f175
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a761f175

Branch: refs/heads/branch-2
Commit: a761f175ab1ab48be3462b4a2161a1663a719620
Parents: b8a13ba
Author: Pankaj Kumar 
Authored: Wed Apr 4 10:13:34 2018 +0530
Committer: Ashish Singhi 
Committed: Wed Apr 4 10:13:34 2018 +0530

--
 .../hadoop/hbase/rest/client/RemoteHTable.java  |  9 +---
 .../hbase/rest/client/TestRemoteTable.java  | 22 
 2 files changed, 28 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a761f175/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java
--
diff --git 
a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java
 
b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java
index cc3efdd..29b48e1 100644
--- 
a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java
+++ 
b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java
@@ -115,13 +115,16 @@ public class RemoteHTable implements Table {
   Iterator ii = quals.iterator();
   while (ii.hasNext()) {
 sb.append(toURLEncodedBytes((byte[])e.getKey()));
-sb.append(':');
 Object o = ii.next();
 // Puts use byte[] but Deletes use KeyValue
 if (o instanceof byte[]) {
-  sb.append(toURLEncodedBytes((byte[])o));
+  sb.append(':');
+  sb.append(toURLEncodedBytes((byte[]) o));
 } else if (o instanceof KeyValue) {
-  
sb.append(toURLEncodedBytes(CellUtil.cloneQualifier((KeyValue)o)));
+  if (((KeyValue) o).getQualifierLength() != 0) {
+sb.append(':');
+sb.append(toURLEncodedBytes(CellUtil.cloneQualifier((KeyValue) 
o)));
+  }
 } else {
   throw new RuntimeException("object type not handled");
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a761f175/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java
--
diff --git 
a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java
 
b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java
index 5053d91..c6f5195 100644
--- 
a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java
+++ 
b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java
@@ -353,18 +353,27 @@ public class TestRemoteTable {
 Put put = new Put(ROW_3);
 put.addColumn(COLUMN_1, QUALIFIER_1, VALUE_1);
 put.addColumn(COLUMN_2, QUALIFIER_2, VALUE_2);
+put.addColumn(COLUMN_3, QUALIFIER_1, VALUE_1);
+put.addColumn(COLUMN_3, QUALIFIER_2, VALUE_2);
 remoteTable.put(put);
 
 Get get = new Get(ROW_3);
 get.addFamily(COLUMN_1);
 get.addFamily(COLUMN_2);
+get.addFamily(COLUMN_3);
 Result result = remoteTable.get(get);
 byte[] value1 = result.getValue(COLUMN_1, QUALIFIER_1);
 byte[] value2 = result.getValue(COLUMN_2, QUALIFIER_2);
+byte[] value3 = result.getValue(COLUMN_3, QUALIFIER_1);
+byte[] value4 = result.getValue(COLUMN_3, QUALIFIER_2);
 assertNotNull(value1);
 assertTrue(Bytes.equals(VALUE_1, value1));
 assertNotNull(value2);
 assertTrue(Bytes.equals(VALUE_2, value2));
+assertNotNull(value3);
+assertTrue(Bytes.equals(VALUE_1, value3));
+assertNotNull(value4);
+assertTrue(Bytes.equals(VALUE_2, value4));
 
 Delete delete = new Delete(ROW_3);
 delete.addColumn(COLUMN_2, QUALIFIER_2);
@@ -394,6 +403,19 @@ public class TestRemoteTable {
 assertTrue(Bytes.equals(VALUE_1, value1));
 assertNull(value2);
 
+// Delete column family from row
+delete = new Delete(ROW_3);
+delete.addFamily(COLUMN_3);
+remoteTable.delete(delete);
+
+get = new Get(ROW_3);
+get.addFamily(COLUMN_3);
+result = remoteTable.get(get);
+value3 = result.getValue(COLUMN_3, QUALIFIER_1);
+value4 = result.getValue(COLUMN_3, QUALIFIER_2);
+assertNull(value3);
+assertNull(value4);
+
 delete = new Delete(ROW_3);
 remoteTable.delete(delete);
 



hbase git commit: HBASE-20231 Not able to delete column family from a row using RemoteHTable

2018-04-03 Thread ashishsinghi
Repository: hbase
Updated Branches:
  refs/heads/master 5937202fd -> 7abaf22a1


HBASE-20231 Not able to delete column family from a row using RemoteHTable

Signed-off-by: Ashish Singhi 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7abaf22a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7abaf22a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7abaf22a

Branch: refs/heads/master
Commit: 7abaf22a12cc9e2655ff57ad46f66e2189fd52e2
Parents: 5937202
Author: Pankaj Kumar 
Authored: Wed Apr 4 10:11:09 2018 +0530
Committer: Ashish Singhi 
Committed: Wed Apr 4 10:11:09 2018 +0530

--
 .../hadoop/hbase/rest/client/RemoteHTable.java  |  9 +---
 .../hbase/rest/client/TestRemoteTable.java  | 22 
 2 files changed, 28 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7abaf22a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java
--
diff --git 
a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java
 
b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java
index cc3efdd..29b48e1 100644
--- 
a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java
+++ 
b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java
@@ -115,13 +115,16 @@ public class RemoteHTable implements Table {
   Iterator ii = quals.iterator();
   while (ii.hasNext()) {
 sb.append(toURLEncodedBytes((byte[])e.getKey()));
-sb.append(':');
 Object o = ii.next();
 // Puts use byte[] but Deletes use KeyValue
 if (o instanceof byte[]) {
-  sb.append(toURLEncodedBytes((byte[])o));
+  sb.append(':');
+  sb.append(toURLEncodedBytes((byte[]) o));
 } else if (o instanceof KeyValue) {
-  
sb.append(toURLEncodedBytes(CellUtil.cloneQualifier((KeyValue)o)));
+  if (((KeyValue) o).getQualifierLength() != 0) {
+sb.append(':');
+sb.append(toURLEncodedBytes(CellUtil.cloneQualifier((KeyValue) 
o)));
+  }
 } else {
   throw new RuntimeException("object type not handled");
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/7abaf22a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java
--
diff --git 
a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java
 
b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java
index 5053d91..c6f5195 100644
--- 
a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java
+++ 
b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java
@@ -353,18 +353,27 @@ public class TestRemoteTable {
 Put put = new Put(ROW_3);
 put.addColumn(COLUMN_1, QUALIFIER_1, VALUE_1);
 put.addColumn(COLUMN_2, QUALIFIER_2, VALUE_2);
+put.addColumn(COLUMN_3, QUALIFIER_1, VALUE_1);
+put.addColumn(COLUMN_3, QUALIFIER_2, VALUE_2);
 remoteTable.put(put);
 
 Get get = new Get(ROW_3);
 get.addFamily(COLUMN_1);
 get.addFamily(COLUMN_2);
+get.addFamily(COLUMN_3);
 Result result = remoteTable.get(get);
 byte[] value1 = result.getValue(COLUMN_1, QUALIFIER_1);
 byte[] value2 = result.getValue(COLUMN_2, QUALIFIER_2);
+byte[] value3 = result.getValue(COLUMN_3, QUALIFIER_1);
+byte[] value4 = result.getValue(COLUMN_3, QUALIFIER_2);
 assertNotNull(value1);
 assertTrue(Bytes.equals(VALUE_1, value1));
 assertNotNull(value2);
 assertTrue(Bytes.equals(VALUE_2, value2));
+assertNotNull(value3);
+assertTrue(Bytes.equals(VALUE_1, value3));
+assertNotNull(value4);
+assertTrue(Bytes.equals(VALUE_2, value4));
 
 Delete delete = new Delete(ROW_3);
 delete.addColumn(COLUMN_2, QUALIFIER_2);
@@ -394,6 +403,19 @@ public class TestRemoteTable {
 assertTrue(Bytes.equals(VALUE_1, value1));
 assertNull(value2);
 
+// Delete column family from row
+delete = new Delete(ROW_3);
+delete.addFamily(COLUMN_3);
+remoteTable.delete(delete);
+
+get = new Get(ROW_3);
+get.addFamily(COLUMN_3);
+result = remoteTable.get(get);
+value3 = result.getValue(COLUMN_3, QUALIFIER_1);
+value4 = result.getValue(COLUMN_3, QUALIFIER_2);
+assertNull(value3);
+assertNull(value4);
+
 delete = new Delete(ROW_3);
 remoteTable.delete(delete);
 



hbase git commit: HBASE-20159 Reintroduce misspelled HConstants.DEFAULT_ZOOKEPER_CLIENT_PORT

2018-04-03 Thread mdrob
Repository: hbase
Updated Branches:
  refs/heads/branch-2 9a3488072 -> b8a13ba10


HBASE-20159 Reintroduce misspelled HConstants.DEFAULT_ZOOKEPER_CLIENT_PORT


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b8a13ba1
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b8a13ba1
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b8a13ba1

Branch: refs/heads/branch-2
Commit: b8a13ba10f37cced0e0d891e65dded651f939700
Parents: 9a34880
Author: Mike Drob 
Authored: Tue Apr 3 15:30:42 2018 -0700
Committer: Mike Drob 
Committed: Tue Apr 3 21:36:22 2018 -0700

--
 .../src/main/java/org/apache/hadoop/hbase/HConstants.java | 7 +++
 1 file changed, 7 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b8a13ba1/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
--
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
index 7ee31a5..ac56ce5 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -217,6 +217,13 @@ public final class HConstants {
   public static final String ZOOKEEPER_CLIENT_PORT =
   ZK_CFG_PROPERTY_PREFIX + CLIENT_PORT_STR;
 
+  /**
+   * Will be removed in hbase 3.0
+   * @deprecated use {@link #DEFAULT_ZOOKEEPER_CLIENT_PORT} instead
+   */
+  @Deprecated
+  public static final int DEFAULT_ZOOKEPER_CLIENT_PORT = 2181;
+
   /** Default client port that the zookeeper listens on */
   public static final int DEFAULT_ZOOKEEPER_CLIENT_PORT = 2181;
 



hbase git commit: HBASE-16499 slow replication for small HBase clusters

2018-04-03 Thread ashishsinghi
Repository: hbase
Updated Branches:
  refs/heads/branch-2.0 fb2a0eb66 -> 79bb54ddf


HBASE-16499 slow replication for small HBase clusters

Signed-off-by: Ashish Singhi 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/79bb54dd
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/79bb54dd
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/79bb54dd

Branch: refs/heads/branch-2.0
Commit: 79bb54ddf4306796ab508a0612ff17c2e4ab863c
Parents: fb2a0eb
Author: Ashish Singhi 
Authored: Wed Apr 4 10:00:44 2018 +0530
Committer: Ashish Singhi 
Committed: Wed Apr 4 10:00:44 2018 +0530

--
 .../regionserver/ReplicationSinkManager.java|  2 +-
 .../TestReplicationSinkManager.java | 36 +++-
 2 files changed, 21 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/79bb54dd/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkManager.java
index af6888c..3cd7884 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkManager.java
@@ -58,7 +58,7 @@ public class ReplicationSinkManager {
* Default ratio of the total number of peer cluster region servers to 
consider
* replicating to.
*/
-  static final float DEFAULT_REPLICATION_SOURCE_RATIO = 0.1f;
+  static final float DEFAULT_REPLICATION_SOURCE_RATIO = 0.5f;
 
 
   private final Connection conn;

http://git-wip-us.apache.org/repos/asf/hbase/blob/79bb54dd/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSinkManager.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSinkManager.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSinkManager.java
index 3be3bfb..39dabb4 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSinkManager.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSinkManager.java
@@ -27,7 +27,6 @@ import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint;
-import org.apache.hadoop.hbase.replication.ReplicationPeers;
 import 
org.apache.hadoop.hbase.replication.regionserver.ReplicationSinkManager.SinkPeer;
 import org.apache.hadoop.hbase.testclassification.ReplicationTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
@@ -49,13 +48,11 @@ public class TestReplicationSinkManager {
 
   private static final String PEER_CLUSTER_ID = "PEER_CLUSTER_ID";
 
-  private ReplicationPeers replicationPeers;
   private HBaseReplicationEndpoint replicationEndpoint;
   private ReplicationSinkManager sinkManager;
 
   @Before
   public void setUp() {
-replicationPeers = mock(ReplicationPeers.class);
 replicationEndpoint = mock(HBaseReplicationEndpoint.class);
 sinkManager = new ReplicationSinkManager(mock(ClusterConnection.class),
   PEER_CLUSTER_ID, replicationEndpoint, new 
Configuration());
@@ -64,7 +61,8 @@ public class TestReplicationSinkManager {
   @Test
   public void testChooseSinks() {
 List serverNames = Lists.newArrayList();
-for (int i = 0; i < 20; i++) {
+int totalServers = 20;
+for (int i = 0; i < totalServers; i++) {
   serverNames.add(mock(ServerName.class));
 }
 
@@ -73,7 +71,8 @@ public class TestReplicationSinkManager {
 
 sinkManager.chooseSinks();
 
-assertEquals(2, sinkManager.getNumSinks());
+int expected = (int) (totalServers * 
ReplicationSinkManager.DEFAULT_REPLICATION_SOURCE_RATIO);
+assertEquals(expected, sinkManager.getNumSinks());
 
   }
 
@@ -117,7 +116,8 @@ public class TestReplicationSinkManager {
   @Test
   public void testReportBadSink_PastThreshold() {
 List serverNames = Lists.newArrayList();
-for (int i = 0; i < 30; i++) {
+int totalServers = 30;
+for (int i = 0; i < totalServers; i++) {
   serverNames.add(mock(ServerName.class));
 }
 when(replicationEndpoint.getRegionServers())
@@ -126,7 +126,8 @@ public class TestReplicationSinkManager {
 
 sinkManager.chooseSinks();

hbase git commit: HBASE-16499 slow replication for small HBase clusters

2018-04-03 Thread ashishsinghi
Repository: hbase
Updated Branches:
  refs/heads/branch-2 ed21f2617 -> 9a3488072


HBASE-16499 slow replication for small HBase clusters

Signed-off-by: Ashish Singhi 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9a348807
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9a348807
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9a348807

Branch: refs/heads/branch-2
Commit: 9a3488072456809dbd139c343f849410df4cc0ee
Parents: ed21f26
Author: Ashish Singhi 
Authored: Wed Apr 4 09:59:50 2018 +0530
Committer: Ashish Singhi 
Committed: Wed Apr 4 09:59:50 2018 +0530

--
 .../regionserver/ReplicationSinkManager.java|  2 +-
 .../TestReplicationSinkManager.java | 36 +++-
 2 files changed, 21 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/9a348807/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkManager.java
index af6888c..3cd7884 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkManager.java
@@ -58,7 +58,7 @@ public class ReplicationSinkManager {
* Default ratio of the total number of peer cluster region servers to 
consider
* replicating to.
*/
-  static final float DEFAULT_REPLICATION_SOURCE_RATIO = 0.1f;
+  static final float DEFAULT_REPLICATION_SOURCE_RATIO = 0.5f;
 
 
   private final Connection conn;

http://git-wip-us.apache.org/repos/asf/hbase/blob/9a348807/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSinkManager.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSinkManager.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSinkManager.java
index 3be3bfb..39dabb4 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSinkManager.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSinkManager.java
@@ -27,7 +27,6 @@ import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint;
-import org.apache.hadoop.hbase.replication.ReplicationPeers;
 import 
org.apache.hadoop.hbase.replication.regionserver.ReplicationSinkManager.SinkPeer;
 import org.apache.hadoop.hbase.testclassification.ReplicationTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
@@ -49,13 +48,11 @@ public class TestReplicationSinkManager {
 
   private static final String PEER_CLUSTER_ID = "PEER_CLUSTER_ID";
 
-  private ReplicationPeers replicationPeers;
   private HBaseReplicationEndpoint replicationEndpoint;
   private ReplicationSinkManager sinkManager;
 
   @Before
   public void setUp() {
-replicationPeers = mock(ReplicationPeers.class);
 replicationEndpoint = mock(HBaseReplicationEndpoint.class);
 sinkManager = new ReplicationSinkManager(mock(ClusterConnection.class),
   PEER_CLUSTER_ID, replicationEndpoint, new 
Configuration());
@@ -64,7 +61,8 @@ public class TestReplicationSinkManager {
   @Test
   public void testChooseSinks() {
 List serverNames = Lists.newArrayList();
-for (int i = 0; i < 20; i++) {
+int totalServers = 20;
+for (int i = 0; i < totalServers; i++) {
   serverNames.add(mock(ServerName.class));
 }
 
@@ -73,7 +71,8 @@ public class TestReplicationSinkManager {
 
 sinkManager.chooseSinks();
 
-assertEquals(2, sinkManager.getNumSinks());
+int expected = (int) (totalServers * 
ReplicationSinkManager.DEFAULT_REPLICATION_SOURCE_RATIO);
+assertEquals(expected, sinkManager.getNumSinks());
 
   }
 
@@ -117,7 +116,8 @@ public class TestReplicationSinkManager {
   @Test
   public void testReportBadSink_PastThreshold() {
 List serverNames = Lists.newArrayList();
-for (int i = 0; i < 30; i++) {
+int totalServers = 30;
+for (int i = 0; i < totalServers; i++) {
   serverNames.add(mock(ServerName.class));
 }
 when(replicationEndpoint.getRegionServers())
@@ -126,7 +126,8 @@ public class TestReplicationSinkManager {
 
 sinkManager.chooseSinks();

hbase git commit: HBASE-16499 slow replication for small HBase clusters

2018-04-03 Thread ashishsinghi
Repository: hbase
Updated Branches:
  refs/heads/master b1b0db319 -> 5937202fd


HBASE-16499 slow replication for small HBase clusters

Signed-off-by: Ashish Singhi 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5937202f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5937202f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5937202f

Branch: refs/heads/master
Commit: 5937202fd5d6c5fba74bae21846f62da4ee35583
Parents: b1b0db3
Author: Ashish Singhi 
Authored: Wed Apr 4 09:54:41 2018 +0530
Committer: Ashish Singhi 
Committed: Wed Apr 4 09:54:41 2018 +0530

--
 .../regionserver/ReplicationSinkManager.java|  2 +-
 .../TestReplicationSinkManager.java | 36 +++-
 2 files changed, 21 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5937202f/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkManager.java
index af6888c..3cd7884 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkManager.java
@@ -58,7 +58,7 @@ public class ReplicationSinkManager {
* Default ratio of the total number of peer cluster region servers to 
consider
* replicating to.
*/
-  static final float DEFAULT_REPLICATION_SOURCE_RATIO = 0.1f;
+  static final float DEFAULT_REPLICATION_SOURCE_RATIO = 0.5f;
 
 
   private final Connection conn;

http://git-wip-us.apache.org/repos/asf/hbase/blob/5937202f/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSinkManager.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSinkManager.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSinkManager.java
index 3be3bfb..39dabb4 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSinkManager.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSinkManager.java
@@ -27,7 +27,6 @@ import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint;
-import org.apache.hadoop.hbase.replication.ReplicationPeers;
 import 
org.apache.hadoop.hbase.replication.regionserver.ReplicationSinkManager.SinkPeer;
 import org.apache.hadoop.hbase.testclassification.ReplicationTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
@@ -49,13 +48,11 @@ public class TestReplicationSinkManager {
 
   private static final String PEER_CLUSTER_ID = "PEER_CLUSTER_ID";
 
-  private ReplicationPeers replicationPeers;
   private HBaseReplicationEndpoint replicationEndpoint;
   private ReplicationSinkManager sinkManager;
 
   @Before
   public void setUp() {
-replicationPeers = mock(ReplicationPeers.class);
 replicationEndpoint = mock(HBaseReplicationEndpoint.class);
 sinkManager = new ReplicationSinkManager(mock(ClusterConnection.class),
   PEER_CLUSTER_ID, replicationEndpoint, new 
Configuration());
@@ -64,7 +61,8 @@ public class TestReplicationSinkManager {
   @Test
   public void testChooseSinks() {
 List serverNames = Lists.newArrayList();
-for (int i = 0; i < 20; i++) {
+int totalServers = 20;
+for (int i = 0; i < totalServers; i++) {
   serverNames.add(mock(ServerName.class));
 }
 
@@ -73,7 +71,8 @@ public class TestReplicationSinkManager {
 
 sinkManager.chooseSinks();
 
-assertEquals(2, sinkManager.getNumSinks());
+int expected = (int) (totalServers * 
ReplicationSinkManager.DEFAULT_REPLICATION_SOURCE_RATIO);
+assertEquals(expected, sinkManager.getNumSinks());
 
   }
 
@@ -117,7 +116,8 @@ public class TestReplicationSinkManager {
   @Test
   public void testReportBadSink_PastThreshold() {
 List serverNames = Lists.newArrayList();
-for (int i = 0; i < 30; i++) {
+int totalServers = 30;
+for (int i = 0; i < totalServers; i++) {
   serverNames.add(mock(ServerName.class));
 }
 when(replicationEndpoint.getRegionServers())
@@ -126,7 +126,8 @@ public class TestReplicationSinkManager {
 
 sinkManager.chooseSinks();
 // 

hbase git commit: HBASE-20298 Doc change in read/write/total accounting metrics

2018-04-03 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-2.0 e6f567e31 -> fb2a0eb66


HBASE-20298 Doc change in read/write/total accounting metrics


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/fb2a0eb6
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/fb2a0eb6
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/fb2a0eb6

Branch: refs/heads/branch-2.0
Commit: fb2a0eb663537b7436e9a93a28297e4821881611
Parents: e6f567e
Author: Michael Stack 
Authored: Tue Mar 27 14:50:00 2018 -0700
Committer: Michael Stack 
Committed: Tue Apr 3 17:03:08 2018 -0700

--
 .../regionserver/MetricsRegionServerSource.java | 32 +++-
 .../ipc/FastPathBalancedQueueRpcExecutor.java   |  3 +-
 .../hbase/regionserver/HRegionServer.java   | 14 ++---
 3 files changed, 22 insertions(+), 27 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/fb2a0eb6/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
--
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
index 70c6da8..1a1535d 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
@@ -238,26 +238,30 @@ public interface MetricsRegionServerSource extends 
BaseSource, JvmPauseMonitorSo
   String MIN_STORE_FILE_AGE = "minStoreFileAge";
   String AVG_STORE_FILE_AGE = "avgStoreFileAge";
   String NUM_REFERENCE_FILES = "numReferenceFiles";
-  String MAX_STORE_FILE_AGE_DESC = "Max age of store files hosted on this 
region server";
-  String MIN_STORE_FILE_AGE_DESC = "Min age of store files hosted on this 
region server";
-  String AVG_STORE_FILE_AGE_DESC = "Average age of store files hosted on this 
region server";
-  String NUM_REFERENCE_FILES_DESC = "Number of reference file on this region 
server";
+  String MAX_STORE_FILE_AGE_DESC = "Max age of store files hosted on this 
RegionServer";
+  String MIN_STORE_FILE_AGE_DESC = "Min age of store files hosted on this 
RegionServer";
+  String AVG_STORE_FILE_AGE_DESC = "Average age of store files hosted on this 
RegionServer";
+  String NUM_REFERENCE_FILES_DESC = "Number of reference file on this 
RegionServer";
   String STOREFILE_SIZE_DESC = "Size of storefiles being served.";
   String TOTAL_REQUEST_COUNT = "totalRequestCount";
   String TOTAL_REQUEST_COUNT_DESC =
-  "Total number of requests this RegionServer has answered.";
+  "Total number of requests this RegionServer has answered; increments the 
count once for " +
+  "EVERY access whether an admin operation, a Scan, a Put or Put of 1M 
rows, or a Get " +
+  "of a non-existent row";
   String TOTAL_ROW_ACTION_REQUEST_COUNT = "totalRowActionRequestCount";
   String TOTAL_ROW_ACTION_REQUEST_COUNT_DESC =
-  "Total number of region requests this RegionServer has answered, count 
by row-level action";
+  "Total number of region requests this RegionServer has answered; counts 
by row-level " +
+  "action at the RPC Server (Sums 'readRequestsCount' and 
'writeRequestsCount'); counts" +
+  "once per access whether a Put of 1M rows or a Get that returns 1M 
Results";
   String READ_REQUEST_COUNT = "readRequestCount";
   String READ_REQUEST_COUNT_DESC =
-  "Number of read requests this region server has answered.";
+  "Number of read requests with non-empty Results that this RegionServer 
has answered.";
   String FILTERED_READ_REQUEST_COUNT = "filteredReadRequestCount";
   String FILTERED_READ_REQUEST_COUNT_DESC =
-"Number of filtered read requests this region server has answered.";
+"Number of filtered read requests this RegionServer has answered.";
   String WRITE_REQUEST_COUNT = "writeRequestCount";
   String WRITE_REQUEST_COUNT_DESC =
-  "Number of mutation requests this region server has answered.";
+  "Number of mutation requests this RegionServer has answered.";
   String CHECK_MUTATE_FAILED_COUNT = "checkMutateFailedCount";
   String CHECK_MUTATE_FAILED_COUNT_DESC =
   "Number of Check and Mutate calls that failed the checks.";
@@ -527,17 +531,17 @@ public interface MetricsRegionServerSource extends 
BaseSource, JvmPauseMonitorSo
 = "Total number of bytes that is output from compaction, major only";
 
   String RPC_GET_REQUEST_COUNT = "rpcGetRequestCount";
-  String RPC_GET_REQUEST_COUNT_DESC = "Number of rpc get requests this region 
server has answered.";
+  String RPC_GET_REQUEST_COUNT_DESC = "Number of rpc ge

hbase git commit: HBASE-20298 Doc change in read/write/total accounting metrics

2018-04-03 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-2 40fbecd97 -> ed21f2617


HBASE-20298 Doc change in read/write/total accounting metrics


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ed21f261
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ed21f261
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ed21f261

Branch: refs/heads/branch-2
Commit: ed21f26171a0224a2d082ea413f76aa5ca8410e5
Parents: 40fbecd
Author: Michael Stack 
Authored: Tue Mar 27 14:50:00 2018 -0700
Committer: Michael Stack 
Committed: Tue Apr 3 17:02:27 2018 -0700

--
 .../regionserver/MetricsRegionServerSource.java | 32 +++-
 .../ipc/FastPathBalancedQueueRpcExecutor.java   |  3 +-
 .../hbase/regionserver/HRegionServer.java   | 14 ++---
 3 files changed, 22 insertions(+), 27 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/ed21f261/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
--
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
index 70c6da8..1a1535d 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
@@ -238,26 +238,30 @@ public interface MetricsRegionServerSource extends 
BaseSource, JvmPauseMonitorSo
   String MIN_STORE_FILE_AGE = "minStoreFileAge";
   String AVG_STORE_FILE_AGE = "avgStoreFileAge";
   String NUM_REFERENCE_FILES = "numReferenceFiles";
-  String MAX_STORE_FILE_AGE_DESC = "Max age of store files hosted on this 
region server";
-  String MIN_STORE_FILE_AGE_DESC = "Min age of store files hosted on this 
region server";
-  String AVG_STORE_FILE_AGE_DESC = "Average age of store files hosted on this 
region server";
-  String NUM_REFERENCE_FILES_DESC = "Number of reference file on this region 
server";
+  String MAX_STORE_FILE_AGE_DESC = "Max age of store files hosted on this 
RegionServer";
+  String MIN_STORE_FILE_AGE_DESC = "Min age of store files hosted on this 
RegionServer";
+  String AVG_STORE_FILE_AGE_DESC = "Average age of store files hosted on this 
RegionServer";
+  String NUM_REFERENCE_FILES_DESC = "Number of reference file on this 
RegionServer";
   String STOREFILE_SIZE_DESC = "Size of storefiles being served.";
   String TOTAL_REQUEST_COUNT = "totalRequestCount";
   String TOTAL_REQUEST_COUNT_DESC =
-  "Total number of requests this RegionServer has answered.";
+  "Total number of requests this RegionServer has answered; increments the 
count once for " +
+  "EVERY access whether an admin operation, a Scan, a Put or Put of 1M 
rows, or a Get " +
+  "of a non-existent row";
   String TOTAL_ROW_ACTION_REQUEST_COUNT = "totalRowActionRequestCount";
   String TOTAL_ROW_ACTION_REQUEST_COUNT_DESC =
-  "Total number of region requests this RegionServer has answered, count 
by row-level action";
+  "Total number of region requests this RegionServer has answered; counts 
by row-level " +
+  "action at the RPC Server (Sums 'readRequestsCount' and 
'writeRequestsCount'); counts" +
+  "once per access whether a Put of 1M rows or a Get that returns 1M 
Results";
   String READ_REQUEST_COUNT = "readRequestCount";
   String READ_REQUEST_COUNT_DESC =
-  "Number of read requests this region server has answered.";
+  "Number of read requests with non-empty Results that this RegionServer 
has answered.";
   String FILTERED_READ_REQUEST_COUNT = "filteredReadRequestCount";
   String FILTERED_READ_REQUEST_COUNT_DESC =
-"Number of filtered read requests this region server has answered.";
+"Number of filtered read requests this RegionServer has answered.";
   String WRITE_REQUEST_COUNT = "writeRequestCount";
   String WRITE_REQUEST_COUNT_DESC =
-  "Number of mutation requests this region server has answered.";
+  "Number of mutation requests this RegionServer has answered.";
   String CHECK_MUTATE_FAILED_COUNT = "checkMutateFailedCount";
   String CHECK_MUTATE_FAILED_COUNT_DESC =
   "Number of Check and Mutate calls that failed the checks.";
@@ -527,17 +531,17 @@ public interface MetricsRegionServerSource extends 
BaseSource, JvmPauseMonitorSo
 = "Total number of bytes that is output from compaction, major only";
 
   String RPC_GET_REQUEST_COUNT = "rpcGetRequestCount";
-  String RPC_GET_REQUEST_COUNT_DESC = "Number of rpc get requests this region 
server has answered.";
+  String RPC_GET_REQUEST_COUNT_DESC = "Number of rpc get re

hbase git commit: HBASE-20298 Doc change in read/write/total accounting metrics

2018-04-03 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/master 6318e3bf5 -> b1b0db319


HBASE-20298 Doc change in read/write/total accounting metrics


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b1b0db31
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b1b0db31
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b1b0db31

Branch: refs/heads/master
Commit: b1b0db3195e08cd3742767996914e2ffa99f40cb
Parents: 6318e3b
Author: Michael Stack 
Authored: Tue Mar 27 14:50:00 2018 -0700
Committer: Michael Stack 
Committed: Tue Apr 3 17:00:43 2018 -0700

--
 .../regionserver/MetricsRegionServerSource.java | 32 +++-
 .../ipc/FastPathBalancedQueueRpcExecutor.java   |  3 +-
 .../hbase/regionserver/HRegionServer.java   | 14 ++---
 src/main/asciidoc/_chapters/upgrading.adoc  |  6 
 4 files changed, 28 insertions(+), 27 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b1b0db31/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
--
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
index 9a826e7..4636fe0 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
@@ -238,26 +238,30 @@ public interface MetricsRegionServerSource extends 
BaseSource, JvmPauseMonitorSo
   String MIN_STORE_FILE_AGE = "minStoreFileAge";
   String AVG_STORE_FILE_AGE = "avgStoreFileAge";
   String NUM_REFERENCE_FILES = "numReferenceFiles";
-  String MAX_STORE_FILE_AGE_DESC = "Max age of store files hosted on this 
region server";
-  String MIN_STORE_FILE_AGE_DESC = "Min age of store files hosted on this 
region server";
-  String AVG_STORE_FILE_AGE_DESC = "Average age of store files hosted on this 
region server";
-  String NUM_REFERENCE_FILES_DESC = "Number of reference file on this region 
server";
+  String MAX_STORE_FILE_AGE_DESC = "Max age of store files hosted on this 
RegionServer";
+  String MIN_STORE_FILE_AGE_DESC = "Min age of store files hosted on this 
RegionServer";
+  String AVG_STORE_FILE_AGE_DESC = "Average age of store files hosted on this 
RegionServer";
+  String NUM_REFERENCE_FILES_DESC = "Number of reference file on this 
RegionServer";
   String STOREFILE_SIZE_DESC = "Size of storefiles being served.";
   String TOTAL_REQUEST_COUNT = "totalRequestCount";
   String TOTAL_REQUEST_COUNT_DESC =
-  "Total number of requests this RegionServer has answered.";
+  "Total number of requests this RegionServer has answered; increments the 
count once for " +
+  "EVERY access whether an admin operation, a Scan, a Put or Put of 1M 
rows, or a Get " +
+  "of a non-existent row";
   String TOTAL_ROW_ACTION_REQUEST_COUNT = "totalRowActionRequestCount";
   String TOTAL_ROW_ACTION_REQUEST_COUNT_DESC =
-  "Total number of region requests this RegionServer has answered, count 
by row-level action";
+  "Total number of region requests this RegionServer has answered; counts 
by row-level " +
+  "action at the RPC Server (Sums 'readRequestsCount' and 
'writeRequestsCount'); counts" +
+  "once per access whether a Put of 1M rows or a Get that returns 1M 
Results";
   String READ_REQUEST_COUNT = "readRequestCount";
   String READ_REQUEST_COUNT_DESC =
-  "Number of read requests this region server has answered.";
+  "Number of read requests with non-empty Results that this RegionServer 
has answered.";
   String FILTERED_READ_REQUEST_COUNT = "filteredReadRequestCount";
   String FILTERED_READ_REQUEST_COUNT_DESC =
-"Number of filtered read requests this region server has answered.";
+"Number of filtered read requests this RegionServer has answered.";
   String WRITE_REQUEST_COUNT = "writeRequestCount";
   String WRITE_REQUEST_COUNT_DESC =
-  "Number of mutation requests this region server has answered.";
+  "Number of mutation requests this RegionServer has answered.";
   String CHECK_MUTATE_FAILED_COUNT = "checkMutateFailedCount";
   String CHECK_MUTATE_FAILED_COUNT_DESC =
   "Number of Check and Mutate calls that failed the checks.";
@@ -527,17 +531,17 @@ public interface MetricsRegionServerSource extends 
BaseSource, JvmPauseMonitorSo
 = "Total number of bytes that is output from compaction, major only";
 
   String RPC_GET_REQUEST_COUNT = "rpcGetRequestCount";
-  String RPC_GET_REQUEST_COUNT_DESC = "Number of rpc get requests this region 
server has answered.";
+  St

hbase git commit: HBASE-17730 Migration to 2.0 for coprocessors. Upload AsciiDoc for coprocessor design improvements made in HBASE-17732.

2018-04-03 Thread appy
Repository: hbase
Updated Branches:
  refs/heads/branch-2 40924bb4a -> 40fbecd97


HBASE-17730 Migration to 2.0 for coprocessors. Upload AsciiDoc for coprocessor 
design improvements made in HBASE-17732.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/40fbecd9
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/40fbecd9
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/40fbecd9

Branch: refs/heads/branch-2
Commit: 40fbecd97c6e07b4d9ececb7c8d96feb9292c473
Parents: 40924bb
Author: Apekshit Sharma 
Authored: Tue Apr 3 15:45:53 2018 -0700
Committer: Apekshit Sharma 
Committed: Tue Apr 3 15:50:09 2018 -0700

--
 ...ad_of_inheritance-HBASE-17732-2017_09_27.pdf | Bin 161724 -> 0 bytes
 ...tion_instead_of_inheritance-HBASE-17732.adoc | 236 +++
 2 files changed, 236 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/40fbecd9/dev-support/design-docs/Coprocessor_Design_Improvements-Use_composition_instead_of_inheritance-HBASE-17732-2017_09_27.pdf
--
diff --git 
a/dev-support/design-docs/Coprocessor_Design_Improvements-Use_composition_instead_of_inheritance-HBASE-17732-2017_09_27.pdf
 
b/dev-support/design-docs/Coprocessor_Design_Improvements-Use_composition_instead_of_inheritance-HBASE-17732-2017_09_27.pdf
deleted file mode 100644
index 30a6d54..000
Binary files 
a/dev-support/design-docs/Coprocessor_Design_Improvements-Use_composition_instead_of_inheritance-HBASE-17732-2017_09_27.pdf
 and /dev/null differ

http://git-wip-us.apache.org/repos/asf/hbase/blob/40fbecd9/dev-support/design-docs/Coprocessor_Design_Improvements-Use_composition_instead_of_inheritance-HBASE-17732.adoc
--
diff --git 
a/dev-support/design-docs/Coprocessor_Design_Improvements-Use_composition_instead_of_inheritance-HBASE-17732.adoc
 
b/dev-support/design-docs/Coprocessor_Design_Improvements-Use_composition_instead_of_inheritance-HBASE-17732.adoc
new file mode 100644
index 000..a61b37b
--- /dev/null
+++ 
b/dev-support/design-docs/Coprocessor_Design_Improvements-Use_composition_instead_of_inheritance-HBASE-17732.adoc
@@ -0,0 +1,236 @@
+= Coprocessor Design Improvements 
(link:https://issues.apache.org/jira/browse/HBASE-17732[HBASE-17732])
+
+Author: Apekshit Sharma
+
+== Introduction
+
+This doc explains current design of Coprocessor feature in brief, few issues I 
noticed, and
+suggestions on how to fix them & further improve overall design.
+
+*TL;DR* +
+We are moving from
+
+* Observer *is* Coprocessor
+* FooService *is* CoprocessorService
+
+To
+
+* Coprocessor *has* Observer
+* Coprocessor *has* Service
+
+See code example in <>.
+
+== Terminology
+
+hooks = functions in observers. Named because third-party use these functions 
to “hook up” custom
+logic to internal code paths.
+
+[[background]]
+== Background
+
+Coprocessors are well link:http://hbase.apache.org/book.html#cp[documented in 
the refguide].
+
+Here we give a little background information on involved classes, their 
responsibilities, and
+relationship to each other.
+
+* Main classes
+** Coprocessor (interface)
+*** All *Observer* interfaces derive from Coprocessor interface.
+ Coprocessor Interface is a _Marker _Interface. It just has start/stop 
methods and enums for
+stages in the Coprocessor Lifecycle.
+** http://hbase.apache.org/book.html#_observer_coprocessors[Observers] 
(interface)
+*** Contain hooks which third-party programs can override to inject 
functionality in various
+internal code paths. For e.g preCreateTable(...) will be called just before 
any table is created.
+*** Current set of observers: _MasterObserver, RegionObserver, 
RegionServerObserver, WALObserver,
+EndpointObserver, BulkLoadObserver._
+** CoprocessorEnvironment (interface)
+*** Encapsulates a coprocessor instance and other information like versions, 
priority, etc.
+*** Coprocessor implementations use it to get access to tables.
+*** Four main implementations: _MasterEnvironment, RegionEnvironment, 
RegionServerEnvironment,
+WALEnvironment._
+** CoprocessorHost (abstract class)
+*** Responsible for loading coprocessors
+*** Four concrete sub-classes: MasterCoprocessorHost, RegionCoprocessorHost,
+RegionServerCoprocessorHost, WALCoprocessorHost
+*** Each host is tied to corresponding environment type using template 
argument ‘E’.
+
+== Problems
+
+* CoprocessorEnvironment has `Coprocessor getInstance()`. Since Observer types 
which can be
+handled by an environment are not statically tied to it, coprocessor hosts 
(which are statically
+tied to Environment) don’t know which kind of coprocessors are relevant to 
them, i.e.
+MasterCoprocessorHost is tied to MasterEnvironment, but it doesn’t know t

hbase git commit: HBASE-17730 Migration to 2.0 for coprocessors. Upload AsciiDoc for coprocessor design improvements made in HBASE-17732.

2018-04-03 Thread appy
Repository: hbase
Updated Branches:
  refs/heads/branch-2.0 89e977dbc -> e6f567e31


HBASE-17730 Migration to 2.0 for coprocessors. Upload AsciiDoc for coprocessor 
design improvements made in HBASE-17732.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e6f567e3
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e6f567e3
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e6f567e3

Branch: refs/heads/branch-2.0
Commit: e6f567e317af44f76c5d7ded3fcf5fca3b21fd29
Parents: 89e977d
Author: Apekshit Sharma 
Authored: Tue Apr 3 15:45:53 2018 -0700
Committer: Apekshit Sharma 
Committed: Tue Apr 3 15:50:26 2018 -0700

--
 ...ad_of_inheritance-HBASE-17732-2017_09_27.pdf | Bin 161724 -> 0 bytes
 ...tion_instead_of_inheritance-HBASE-17732.adoc | 236 +++
 2 files changed, 236 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e6f567e3/dev-support/design-docs/Coprocessor_Design_Improvements-Use_composition_instead_of_inheritance-HBASE-17732-2017_09_27.pdf
--
diff --git 
a/dev-support/design-docs/Coprocessor_Design_Improvements-Use_composition_instead_of_inheritance-HBASE-17732-2017_09_27.pdf
 
b/dev-support/design-docs/Coprocessor_Design_Improvements-Use_composition_instead_of_inheritance-HBASE-17732-2017_09_27.pdf
deleted file mode 100644
index 30a6d54..000
Binary files 
a/dev-support/design-docs/Coprocessor_Design_Improvements-Use_composition_instead_of_inheritance-HBASE-17732-2017_09_27.pdf
 and /dev/null differ

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6f567e3/dev-support/design-docs/Coprocessor_Design_Improvements-Use_composition_instead_of_inheritance-HBASE-17732.adoc
--
diff --git 
a/dev-support/design-docs/Coprocessor_Design_Improvements-Use_composition_instead_of_inheritance-HBASE-17732.adoc
 
b/dev-support/design-docs/Coprocessor_Design_Improvements-Use_composition_instead_of_inheritance-HBASE-17732.adoc
new file mode 100644
index 000..a61b37b
--- /dev/null
+++ 
b/dev-support/design-docs/Coprocessor_Design_Improvements-Use_composition_instead_of_inheritance-HBASE-17732.adoc
@@ -0,0 +1,236 @@
+= Coprocessor Design Improvements 
(link:https://issues.apache.org/jira/browse/HBASE-17732[HBASE-17732])
+
+Author: Apekshit Sharma
+
+== Introduction
+
+This doc explains current design of Coprocessor feature in brief, few issues I 
noticed, and
+suggestions on how to fix them & further improve overall design.
+
+*TL;DR* +
+We are moving from
+
+* Observer *is* Coprocessor
+* FooService *is* CoprocessorService
+
+To
+
+* Coprocessor *has* Observer
+* Coprocessor *has* Service
+
+See code example in <>.
+
+== Terminology
+
+hooks = functions in observers. Named because third-party use these functions 
to “hook up” custom
+logic to internal code paths.
+
+[[background]]
+== Background
+
+Coprocessors are well link:http://hbase.apache.org/book.html#cp[documented in 
the refguide].
+
+Here we give a little background information on involved classes, their 
responsibilities, and
+relationship to each other.
+
+* Main classes
+** Coprocessor (interface)
+*** All *Observer* interfaces derive from Coprocessor interface.
+ Coprocessor Interface is a _Marker _Interface. It just has start/stop 
methods and enums for
+stages in the Coprocessor Lifecycle.
+** http://hbase.apache.org/book.html#_observer_coprocessors[Observers] 
(interface)
+*** Contain hooks which third-party programs can override to inject 
functionality in various
+internal code paths. For e.g preCreateTable(...) will be called just before 
any table is created.
+*** Current set of observers: _MasterObserver, RegionObserver, 
RegionServerObserver, WALObserver,
+EndpointObserver, BulkLoadObserver._
+** CoprocessorEnvironment (interface)
+*** Encapsulates a coprocessor instance and other information like versions, 
priority, etc.
+*** Coprocessor implementations use it to get access to tables.
+*** Four main implementations: _MasterEnvironment, RegionEnvironment, 
RegionServerEnvironment,
+WALEnvironment._
+** CoprocessorHost (abstract class)
+*** Responsible for loading coprocessors
+*** Four concrete sub-classes: MasterCoprocessorHost, RegionCoprocessorHost,
+RegionServerCoprocessorHost, WALCoprocessorHost
+*** Each host is tied to corresponding environment type using template 
argument ‘E’.
+
+== Problems
+
+* CoprocessorEnvironment has `Coprocessor getInstance()`. Since Observer types 
which can be
+handled by an environment are not statically tied to it, coprocessor hosts 
(which are statically
+tied to Environment) don’t know which kind of coprocessors are relevant to 
them, i.e.
+MasterCoprocessorHost is tied to MasterEnvironment, but it doesn’t kn

hbase git commit: HBASE-17730 Migration to 2.0 for coprocessors. Upload AsciiDoc for coprocessor design improvements made in HBASE-17732.

2018-04-03 Thread appy
Repository: hbase
Updated Branches:
  refs/heads/master d9e64aa6b -> 6318e3bf5


HBASE-17730 Migration to 2.0 for coprocessors. Upload AsciiDoc for coprocessor 
design improvements made in HBASE-17732.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6318e3bf
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6318e3bf
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6318e3bf

Branch: refs/heads/master
Commit: 6318e3bf5ff3108388d6d9506f40031e7c3684f5
Parents: d9e64aa
Author: Apekshit Sharma 
Authored: Tue Apr 3 15:45:53 2018 -0700
Committer: Apekshit Sharma 
Committed: Tue Apr 3 15:47:48 2018 -0700

--
 ...ad_of_inheritance-HBASE-17732-2017_09_27.pdf | Bin 161724 -> 0 bytes
 ...tion_instead_of_inheritance-HBASE-17732.adoc | 236 +++
 2 files changed, 236 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6318e3bf/dev-support/design-docs/Coprocessor_Design_Improvements-Use_composition_instead_of_inheritance-HBASE-17732-2017_09_27.pdf
--
diff --git 
a/dev-support/design-docs/Coprocessor_Design_Improvements-Use_composition_instead_of_inheritance-HBASE-17732-2017_09_27.pdf
 
b/dev-support/design-docs/Coprocessor_Design_Improvements-Use_composition_instead_of_inheritance-HBASE-17732-2017_09_27.pdf
deleted file mode 100644
index 30a6d54..000
Binary files 
a/dev-support/design-docs/Coprocessor_Design_Improvements-Use_composition_instead_of_inheritance-HBASE-17732-2017_09_27.pdf
 and /dev/null differ

http://git-wip-us.apache.org/repos/asf/hbase/blob/6318e3bf/dev-support/design-docs/Coprocessor_Design_Improvements-Use_composition_instead_of_inheritance-HBASE-17732.adoc
--
diff --git 
a/dev-support/design-docs/Coprocessor_Design_Improvements-Use_composition_instead_of_inheritance-HBASE-17732.adoc
 
b/dev-support/design-docs/Coprocessor_Design_Improvements-Use_composition_instead_of_inheritance-HBASE-17732.adoc
new file mode 100644
index 000..a61b37b
--- /dev/null
+++ 
b/dev-support/design-docs/Coprocessor_Design_Improvements-Use_composition_instead_of_inheritance-HBASE-17732.adoc
@@ -0,0 +1,236 @@
+= Coprocessor Design Improvements 
(link:https://issues.apache.org/jira/browse/HBASE-17732[HBASE-17732])
+
+Author: Apekshit Sharma
+
+== Introduction
+
+This doc explains current design of Coprocessor feature in brief, few issues I 
noticed, and
+suggestions on how to fix them & further improve overall design.
+
+*TL;DR* +
+We are moving from
+
+* Observer *is* Coprocessor
+* FooService *is* CoprocessorService
+
+To
+
+* Coprocessor *has* Observer
+* Coprocessor *has* Service
+
+See code example in <>.
+
+== Terminology
+
+hooks = functions in observers. Named because third-party use these functions 
to “hook up” custom
+logic to internal code paths.
+
+[[background]]
+== Background
+
+Coprocessors are well link:http://hbase.apache.org/book.html#cp[documented in 
the refguide].
+
+Here we give a little background information on involved classes, their 
responsibilities, and
+relationship to each other.
+
+* Main classes
+** Coprocessor (interface)
+*** All *Observer* interfaces derive from Coprocessor interface.
+ Coprocessor Interface is a _Marker _Interface. It just has start/stop 
methods and enums for
+stages in the Coprocessor Lifecycle.
+** http://hbase.apache.org/book.html#_observer_coprocessors[Observers] 
(interface)
+*** Contain hooks which third-party programs can override to inject 
functionality in various
+internal code paths. For e.g preCreateTable(...) will be called just before 
any table is created.
+*** Current set of observers: _MasterObserver, RegionObserver, 
RegionServerObserver, WALObserver,
+EndpointObserver, BulkLoadObserver._
+** CoprocessorEnvironment (interface)
+*** Encapsulates a coprocessor instance and other information like versions, 
priority, etc.
+*** Coprocessor implementations use it to get access to tables.
+*** Four main implementations: _MasterEnvironment, RegionEnvironment, 
RegionServerEnvironment,
+WALEnvironment._
+** CoprocessorHost (abstract class)
+*** Responsible for loading coprocessors
+*** Four concrete sub-classes: MasterCoprocessorHost, RegionCoprocessorHost,
+RegionServerCoprocessorHost, WALCoprocessorHost
+*** Each host is tied to corresponding environment type using template 
argument ‘E’.
+
+== Problems
+
+* CoprocessorEnvironment has `Coprocessor getInstance()`. Since Observer types 
which can be
+handled by an environment are not statically tied to it, coprocessor hosts 
(which are statically
+tied to Environment) don’t know which kind of coprocessors are relevant to 
them, i.e.
+MasterCoprocessorHost is tied to MasterEnvironment, but it doesn’t know that 

[1/3] hbase git commit: Amend HBASE-20322 CME in StoreScanner causes region server crash

2018-04-03 Thread apurtell
Repository: hbase
Updated Branches:
  refs/heads/branch-1 875a12de0 -> 9ced0c936
  refs/heads/branch-1.3 9eaafe1ee -> 0db4bd3aa
  refs/heads/branch-1.4 7e2d7edbc -> 98c6f8a3f


Amend HBASE-20322 CME in StoreScanner causes region server crash

memStoreScanners is immutable so create a new list for closing.

Signed-off-by: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0db4bd3a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0db4bd3a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0db4bd3a

Branch: refs/heads/branch-1.3
Commit: 0db4bd3aa71ecd7770430c428cf01d238ec06fa7
Parents: 9eaafe1
Author: Thiruvel Thirumoolan 
Authored: Fri Mar 30 13:12:53 2018 -0700
Committer: Andrew Purtell 
Committed: Tue Apr 3 13:20:47 2018 -0700

--
 .../java/org/apache/hadoop/hbase/regionserver/StoreScanner.java   | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/0db4bd3a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
index 1b2361d..da2f5ae 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
@@ -823,7 +823,8 @@ public class StoreScanner extends 
NonReversedNonLazyKeyValueScanner
 try {
   if (this.closing) {
 // Lets close scanners created by caller, since close() won't notice 
this.
-clearAndClose(memStoreScanners);
+// memStoreScanners is immutable, so lets create a new list.
+clearAndClose(new ArrayList<>(memStoreScanners));
 return;
   }
   flushed = true;



[2/3] hbase git commit: Amend HBASE-20322 CME in StoreScanner causes region server crash

2018-04-03 Thread apurtell
Amend HBASE-20322 CME in StoreScanner causes region server crash

memStoreScanners is immutable so create a new list for closing.

Signed-off-by: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/98c6f8a3
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/98c6f8a3
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/98c6f8a3

Branch: refs/heads/branch-1.4
Commit: 98c6f8a3f6a48610d825351717d82053db045b3a
Parents: 7e2d7ed
Author: Thiruvel Thirumoolan 
Authored: Fri Mar 30 13:12:53 2018 -0700
Committer: Andrew Purtell 
Committed: Tue Apr 3 13:22:39 2018 -0700

--
 .../java/org/apache/hadoop/hbase/regionserver/StoreScanner.java   | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/98c6f8a3/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
index 0280906..678308b 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
@@ -859,7 +859,8 @@ public class StoreScanner extends 
NonReversedNonLazyKeyValueScanner
 try {
   if (this.closing) {
 // Lets close scanners created by caller, since close() won't notice 
this.
-clearAndClose(memStoreScanners);
+// memStoreScanners is immutable, so lets create a new list.
+clearAndClose(new ArrayList<>(memStoreScanners));
 return;
   }
   flushed = true;



[3/3] hbase git commit: Amend HBASE-20322 CME in StoreScanner causes region server crash

2018-04-03 Thread apurtell
Amend HBASE-20322 CME in StoreScanner causes region server crash

memStoreScanners is immutable so create a new list for closing.

Signed-off-by: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9ced0c93
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9ced0c93
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9ced0c93

Branch: refs/heads/branch-1
Commit: 9ced0c936f4cdea0580482278c24ebecb7c18bb0
Parents: 875a12d
Author: Thiruvel Thirumoolan 
Authored: Fri Mar 30 13:12:53 2018 -0700
Committer: Andrew Purtell 
Committed: Tue Apr 3 13:22:43 2018 -0700

--
 .../java/org/apache/hadoop/hbase/regionserver/StoreScanner.java   | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/9ced0c93/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
index 0280906..678308b 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
@@ -859,7 +859,8 @@ public class StoreScanner extends 
NonReversedNonLazyKeyValueScanner
 try {
   if (this.closing) {
 // Lets close scanners created by caller, since close() won't notice 
this.
-clearAndClose(memStoreScanners);
+// memStoreScanners is immutable, so lets create a new list.
+clearAndClose(new ArrayList<>(memStoreScanners));
 return;
   }
   flushed = true;



hbase git commit: HBASE-20299 Update MOB in hbase refguide

2018-04-03 Thread huaxiangsun
Repository: hbase
Updated Branches:
  refs/heads/branch-2.0 586f1ea0e -> 89e977dbc


HBASE-20299 Update MOB in hbase refguide


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/89e977db
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/89e977db
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/89e977db

Branch: refs/heads/branch-2.0
Commit: 89e977dbc3cfd4e61f2e5fc26d7b4d5e8a7cc0b1
Parents: 586f1ea
Author: Huaxiang Sun 
Authored: Tue Apr 3 13:12:45 2018 -0700
Committer: Huaxiang Sun 
Committed: Tue Apr 3 13:29:40 2018 -0700

--
 src/main/asciidoc/_chapters/hbase_mob.adoc | 42 +
 1 file changed, 42 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/89e977db/src/main/asciidoc/_chapters/hbase_mob.adoc
--
diff --git a/src/main/asciidoc/_chapters/hbase_mob.adoc 
b/src/main/asciidoc/_chapters/hbase_mob.adoc
index 5da0343..9730529 100644
--- a/src/main/asciidoc/_chapters/hbase_mob.adoc
+++ b/src/main/asciidoc/_chapters/hbase_mob.adoc
@@ -46,6 +46,12 @@ configure the MOB file reader's cache settings for each 
RegionServer (see
 Client code does not need to change to take advantage of HBase MOB support. The
 feature is transparent to the client.
 
+MOB compaction
+
+MOB data is flushed into MOB files after MemStore flush. There will be lots of 
MOB files
+after some time. To reduce MOB file count, there is a periodic task which 
compacts
+small MOB files into a large one (MOB compaction).
+
 === Configuring Columns for MOB
 
 You can configure columns to support MOB during table creation or alteration,
@@ -75,6 +81,42 @@ hcd.setMobThreshold(102400L);
 
 
 
+=== Configure MOB Compaction Policy
+
+By default, MOB files for one specific day are compacted into one large MOB 
file.
+To reduce MOB file count more, there are other MOB Compaction policies 
supported.
+
+daily policy  - compact MOB Files for one day into one large MOB file (default 
policy)
+weekly policy - compact MOB Files for one week into one large MOB file
+montly policy - compact MOB Files for one  month into one large MOB File
+
+.Configure MOB compaction policy Using HBase Shell
+
+
+hbase> create 't1', {NAME => 'f1', IS_MOB => true, MOB_THRESHOLD => 102400, 
MOB_COMPACT_PARTITION_POLICY => 'daily'}
+hbase> create 't1', {NAME => 'f1', IS_MOB => true, MOB_THRESHOLD => 102400, 
MOB_COMPACT_PARTITION_POLICY => 'weekly'}
+hbase> create 't1', {NAME => 'f1', IS_MOB => true, MOB_THRESHOLD => 102400, 
MOB_COMPACT_PARTITION_POLICY => 'monthly'}
+
+hbase> alter 't1', {NAME => 'f1', IS_MOB => true, MOB_THRESHOLD => 102400, 
MOB_COMPACT_PARTITION_POLICY => 'daily'}
+hbase> alter 't1', {NAME => 'f1', IS_MOB => true, MOB_THRESHOLD => 102400, 
MOB_COMPACT_PARTITION_POLICY => 'weekly'}
+hbase> alter 't1', {NAME => 'f1', IS_MOB => true, MOB_THRESHOLD => 102400, 
MOB_COMPACT_PARTITION_POLICY => 'monthly'}
+
+
+
+=== Configure MOB Compaction mergeable threshold
+
+If the size of a mob file is less than this value, it's regarded as a small 
file and needs to
+be merged in mob compaction. The default value is 1280MB.
+
+
+[source,xml]
+
+
+hbase.mob.compaction.mergeable.threshold
+100
+
+
+
 
 === Testing MOB
 



hbase git commit: HBASE-20299 Update MOB in hbase refguide

2018-04-03 Thread huaxiangsun
Repository: hbase
Updated Branches:
  refs/heads/branch-2 8df45d33c -> 40924bb4a


HBASE-20299 Update MOB in hbase refguide


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/40924bb4
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/40924bb4
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/40924bb4

Branch: refs/heads/branch-2
Commit: 40924bb4afc9f0780783f8aabf58fcb92c188cea
Parents: 8df45d3
Author: Huaxiang Sun 
Authored: Tue Apr 3 13:12:45 2018 -0700
Committer: Huaxiang Sun 
Committed: Tue Apr 3 13:26:12 2018 -0700

--
 src/main/asciidoc/_chapters/hbase_mob.adoc | 42 +
 1 file changed, 42 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/40924bb4/src/main/asciidoc/_chapters/hbase_mob.adoc
--
diff --git a/src/main/asciidoc/_chapters/hbase_mob.adoc 
b/src/main/asciidoc/_chapters/hbase_mob.adoc
index 5da0343..9730529 100644
--- a/src/main/asciidoc/_chapters/hbase_mob.adoc
+++ b/src/main/asciidoc/_chapters/hbase_mob.adoc
@@ -46,6 +46,12 @@ configure the MOB file reader's cache settings for each 
RegionServer (see
 Client code does not need to change to take advantage of HBase MOB support. The
 feature is transparent to the client.
 
+MOB compaction
+
+MOB data is flushed into MOB files after MemStore flush. There will be lots of 
MOB files
+after some time. To reduce MOB file count, there is a periodic task which 
compacts
+small MOB files into a large one (MOB compaction).
+
 === Configuring Columns for MOB
 
 You can configure columns to support MOB during table creation or alteration,
@@ -75,6 +81,42 @@ hcd.setMobThreshold(102400L);
 
 
 
+=== Configure MOB Compaction Policy
+
+By default, MOB files for one specific day are compacted into one large MOB 
file.
+To reduce MOB file count more, there are other MOB Compaction policies 
supported.
+
+daily policy  - compact MOB Files for one day into one large MOB file (default 
policy)
+weekly policy - compact MOB Files for one week into one large MOB file
+montly policy - compact MOB Files for one  month into one large MOB File
+
+.Configure MOB compaction policy Using HBase Shell
+
+
+hbase> create 't1', {NAME => 'f1', IS_MOB => true, MOB_THRESHOLD => 102400, 
MOB_COMPACT_PARTITION_POLICY => 'daily'}
+hbase> create 't1', {NAME => 'f1', IS_MOB => true, MOB_THRESHOLD => 102400, 
MOB_COMPACT_PARTITION_POLICY => 'weekly'}
+hbase> create 't1', {NAME => 'f1', IS_MOB => true, MOB_THRESHOLD => 102400, 
MOB_COMPACT_PARTITION_POLICY => 'monthly'}
+
+hbase> alter 't1', {NAME => 'f1', IS_MOB => true, MOB_THRESHOLD => 102400, 
MOB_COMPACT_PARTITION_POLICY => 'daily'}
+hbase> alter 't1', {NAME => 'f1', IS_MOB => true, MOB_THRESHOLD => 102400, 
MOB_COMPACT_PARTITION_POLICY => 'weekly'}
+hbase> alter 't1', {NAME => 'f1', IS_MOB => true, MOB_THRESHOLD => 102400, 
MOB_COMPACT_PARTITION_POLICY => 'monthly'}
+
+
+
+=== Configure MOB Compaction mergeable threshold
+
+If the size of a mob file is less than this value, it's regarded as a small 
file and needs to
+be merged in mob compaction. The default value is 1280MB.
+
+
+[source,xml]
+
+
+hbase.mob.compaction.mergeable.threshold
+100
+
+
+
 
 === Testing MOB
 



hbase git commit: HBASE-20299 Update MOB in hbase refguide

2018-04-03 Thread huaxiangsun
Repository: hbase
Updated Branches:
  refs/heads/master f92fb0aff -> d9e64aa6b


HBASE-20299 Update MOB in hbase refguide


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d9e64aa6
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d9e64aa6
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d9e64aa6

Branch: refs/heads/master
Commit: d9e64aa6b83fb6ed5230b0fde86fdf8d8732e1a4
Parents: f92fb0a
Author: Huaxiang Sun 
Authored: Tue Apr 3 13:12:45 2018 -0700
Committer: Huaxiang Sun 
Committed: Tue Apr 3 13:13:16 2018 -0700

--
 src/main/asciidoc/_chapters/hbase_mob.adoc | 42 +
 1 file changed, 42 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d9e64aa6/src/main/asciidoc/_chapters/hbase_mob.adoc
--
diff --git a/src/main/asciidoc/_chapters/hbase_mob.adoc 
b/src/main/asciidoc/_chapters/hbase_mob.adoc
index 5da0343..9730529 100644
--- a/src/main/asciidoc/_chapters/hbase_mob.adoc
+++ b/src/main/asciidoc/_chapters/hbase_mob.adoc
@@ -46,6 +46,12 @@ configure the MOB file reader's cache settings for each 
RegionServer (see
 Client code does not need to change to take advantage of HBase MOB support. The
 feature is transparent to the client.
 
+MOB compaction
+
+MOB data is flushed into MOB files after MemStore flush. There will be lots of 
MOB files
+after some time. To reduce MOB file count, there is a periodic task which 
compacts
+small MOB files into a large one (MOB compaction).
+
 === Configuring Columns for MOB
 
 You can configure columns to support MOB during table creation or alteration,
@@ -75,6 +81,42 @@ hcd.setMobThreshold(102400L);
 
 
 
+=== Configure MOB Compaction Policy
+
+By default, MOB files for one specific day are compacted into one large MOB 
file.
+To reduce MOB file count more, there are other MOB Compaction policies 
supported.
+
+daily policy  - compact MOB Files for one day into one large MOB file (default 
policy)
+weekly policy - compact MOB Files for one week into one large MOB file
+montly policy - compact MOB Files for one  month into one large MOB File
+
+.Configure MOB compaction policy Using HBase Shell
+
+
+hbase> create 't1', {NAME => 'f1', IS_MOB => true, MOB_THRESHOLD => 102400, 
MOB_COMPACT_PARTITION_POLICY => 'daily'}
+hbase> create 't1', {NAME => 'f1', IS_MOB => true, MOB_THRESHOLD => 102400, 
MOB_COMPACT_PARTITION_POLICY => 'weekly'}
+hbase> create 't1', {NAME => 'f1', IS_MOB => true, MOB_THRESHOLD => 102400, 
MOB_COMPACT_PARTITION_POLICY => 'monthly'}
+
+hbase> alter 't1', {NAME => 'f1', IS_MOB => true, MOB_THRESHOLD => 102400, 
MOB_COMPACT_PARTITION_POLICY => 'daily'}
+hbase> alter 't1', {NAME => 'f1', IS_MOB => true, MOB_THRESHOLD => 102400, 
MOB_COMPACT_PARTITION_POLICY => 'weekly'}
+hbase> alter 't1', {NAME => 'f1', IS_MOB => true, MOB_THRESHOLD => 102400, 
MOB_COMPACT_PARTITION_POLICY => 'monthly'}
+
+
+
+=== Configure MOB Compaction mergeable threshold
+
+If the size of a mob file is less than this value, it's regarded as a small 
file and needs to
+be merged in mob compaction. The default value is 1280MB.
+
+
+[source,xml]
+
+
+hbase.mob.compaction.mergeable.threshold
+100
+
+
+
 
 === Testing MOB
 



[hbase] Git Push Summary

2018-04-03 Thread toffer
Repository: hbase
Updated Tags:  refs/tags/rel/1.3.2 [created] 1f9620f96


hbase git commit: HBASE-20224 Web UI is broken in standalone mode - addendum for hbase-spark and hbase-spark-it modules

2018-04-03 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/master bf29a1fee -> f92fb0aff


HBASE-20224 Web UI is broken in standalone mode - addendum for hbase-spark and 
hbase-spark-it modules


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f92fb0af
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f92fb0af
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f92fb0af

Branch: refs/heads/master
Commit: f92fb0affd81094e4c3a1f1f056a06984a34535b
Parents: bf29a1f
Author: tedyu 
Authored: Tue Apr 3 11:43:53 2018 -0700
Committer: tedyu 
Committed: Tue Apr 3 11:43:53 2018 -0700

--
 .../src/test/resources/hbase-site.xml   | 39 
 hbase-spark/src/test/resources/hbase-site.xml   |  7 
 2 files changed, 46 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/f92fb0af/hbase-spark-it/src/test/resources/hbase-site.xml
--
diff --git a/hbase-spark-it/src/test/resources/hbase-site.xml 
b/hbase-spark-it/src/test/resources/hbase-site.xml
new file mode 100644
index 000..858d428
--- /dev/null
+++ b/hbase-spark-it/src/test/resources/hbase-site.xml
@@ -0,0 +1,39 @@
+
+
+
+
+  
+hbase.defaults.for.version.skip
+true
+  
+  
+hbase.hconnection.threads.keepalivetime
+3
+  
+  
+hbase.localcluster.assign.random.ports
+true
+
+  Assign random ports to master and RS info server (UI).
+
+  
+

http://git-wip-us.apache.org/repos/asf/hbase/blob/f92fb0af/hbase-spark/src/test/resources/hbase-site.xml
--
diff --git a/hbase-spark/src/test/resources/hbase-site.xml 
b/hbase-spark/src/test/resources/hbase-site.xml
index b3fb0d9..b354a2b 100644
--- a/hbase-spark/src/test/resources/hbase-site.xml
+++ b/hbase-spark/src/test/resources/hbase-site.xml
@@ -154,4 +154,11 @@
   Enable replay sanity checks on procedure tests.
 
   
+  
+hbase.localcluster.assign.random.ports
+true
+
+  Assign random ports to master and RS info server (UI).
+
+  
 



[1/3] hbase git commit: HBASE-20322 CME in StoreScanner causes region server crash

2018-04-03 Thread apurtell
Repository: hbase
Updated Branches:
  refs/heads/branch-1 18c54b02b -> 875a12de0
  refs/heads/branch-1.3 988f6d1f8 -> 9eaafe1ee
  refs/heads/branch-1.4 52ea97986 -> 7e2d7edbc


HBASE-20322 CME in StoreScanner causes region server crash

Signed-off-by: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9eaafe1e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9eaafe1e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9eaafe1e

Branch: refs/heads/branch-1.3
Commit: 9eaafe1ee91bc12d8933e17ad6cab7af8251c0f5
Parents: 988f6d1
Author: Thiruvel Thirumoolan 
Authored: Fri Mar 30 13:12:53 2018 -0700
Committer: Andrew Purtell 
Committed: Mon Apr 2 19:36:03 2018 -0700

--
 .../hadoop/hbase/regionserver/StoreScanner.java | 35 +++-
 1 file changed, 26 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/9eaafe1e/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
index c95151b..1b2361d 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
@@ -443,17 +443,29 @@ public class StoreScanner extends 
NonReversedNonLazyKeyValueScanner
 
   @Override
   public void close() {
-if (this.closing) return;
-this.closing = true;
-clearAndClose(scannersForDelayedClose);
-clearAndClose(memStoreScannersAfterFlush);
-// clear them at any case. In case scanner.next() was never called
-// and there were some lease expiry we need to close all the scanners
-// on the flushed files which are open
-clearAndClose(flushedstoreFileScanners);
+if (this.closing) {
+  return;
+}
+// Lets remove from observers as early as possible
 // Under test, we dont have a this.store
-if (this.store != null)
+if (this.store != null) {
   this.store.deleteChangedReaderObserver(this);
+}
+// There is a race condition between close() and updateReaders(), during 
region flush. So,
+// even though its just close, we will still acquire the flush lock, as a
+// ConcurrentModificationException will abort the regionserver.
+flushLock.lock();
+try {
+  this.closing = true;
+  clearAndClose(scannersForDelayedClose);
+  clearAndClose(memStoreScannersAfterFlush);
+  // clear them at any case. In case scanner.next() was never called
+  // and there were some lease expiry we need to close all the scanners
+  // on the flushed files which are open
+  clearAndClose(flushedstoreFileScanners);
+} finally {
+  flushLock.unlock();
+}
 if (this.heap != null)
   this.heap.close();
 this.heap = null; // CLOSED!
@@ -809,6 +821,11 @@ public class StoreScanner extends 
NonReversedNonLazyKeyValueScanner
 }
 flushLock.lock();
 try {
+  if (this.closing) {
+// Lets close scanners created by caller, since close() won't notice 
this.
+clearAndClose(memStoreScanners);
+return;
+  }
   flushed = true;
   final boolean isCompaction = false;
   boolean usePread = get || scanUsePread;



[3/3] hbase git commit: HBASE-20322 CME in StoreScanner causes region server crash

2018-04-03 Thread apurtell
HBASE-20322 CME in StoreScanner causes region server crash

Signed-off-by: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/875a12de
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/875a12de
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/875a12de

Branch: refs/heads/branch-1
Commit: 875a12de054e8867fc5f70f470cf17251e862ee3
Parents: 18c54b0
Author: Thiruvel Thirumoolan 
Authored: Fri Mar 30 13:21:26 2018 -0700
Committer: Andrew Purtell 
Committed: Mon Apr 2 19:36:16 2018 -0700

--
 .../hadoop/hbase/regionserver/StoreScanner.java | 35 +++-
 1 file changed, 26 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/875a12de/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
index bb761ba..0280906 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
@@ -462,17 +462,29 @@ public class StoreScanner extends 
NonReversedNonLazyKeyValueScanner
 
   @Override
   public void close() {
-if (this.closing) return;
-this.closing = true;
-clearAndClose(scannersForDelayedClose);
-clearAndClose(memStoreScannersAfterFlush);
-// clear them at any case. In case scanner.next() was never called
-// and there were some lease expiry we need to close all the scanners
-// on the flushed files which are open
-clearAndClose(flushedstoreFileScanners);
+if (this.closing) {
+  return;
+}
+// Lets remove from observers as early as possible
 // Under test, we dont have a this.store
-if (this.store != null)
+if (this.store != null) {
   this.store.deleteChangedReaderObserver(this);
+}
+// There is a race condition between close() and updateReaders(), during 
region flush. So,
+// even though its just close, we will still acquire the flush lock, as a
+// ConcurrentModificationException will abort the regionserver.
+flushLock.lock();
+try {
+  this.closing = true;
+  clearAndClose(scannersForDelayedClose);
+  clearAndClose(memStoreScannersAfterFlush);
+  // clear them at any case. In case scanner.next() was never called
+  // and there were some lease expiry we need to close all the scanners
+  // on the flushed files which are open
+  clearAndClose(flushedstoreFileScanners);
+} finally {
+  flushLock.unlock();
+}
 if (this.heap != null)
   this.heap.close();
 this.heap = null; // CLOSED!
@@ -845,6 +857,11 @@ public class StoreScanner extends 
NonReversedNonLazyKeyValueScanner
 }
 flushLock.lock();
 try {
+  if (this.closing) {
+// Lets close scanners created by caller, since close() won't notice 
this.
+clearAndClose(memStoreScanners);
+return;
+  }
   flushed = true;
   final boolean isCompaction = false;
   boolean usePread = get || scanUsePread;



[2/3] hbase git commit: HBASE-20322 CME in StoreScanner causes region server crash

2018-04-03 Thread apurtell
HBASE-20322 CME in StoreScanner causes region server crash

Signed-off-by: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7e2d7edb
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7e2d7edb
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7e2d7edb

Branch: refs/heads/branch-1.4
Commit: 7e2d7edbcc7e01880bc6899703807472f6939106
Parents: 52ea979
Author: Thiruvel Thirumoolan 
Authored: Fri Mar 30 13:21:26 2018 -0700
Committer: Andrew Purtell 
Committed: Mon Apr 2 19:36:12 2018 -0700

--
 .../hadoop/hbase/regionserver/StoreScanner.java | 35 +++-
 1 file changed, 26 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7e2d7edb/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
index bb761ba..0280906 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
@@ -462,17 +462,29 @@ public class StoreScanner extends 
NonReversedNonLazyKeyValueScanner
 
   @Override
   public void close() {
-if (this.closing) return;
-this.closing = true;
-clearAndClose(scannersForDelayedClose);
-clearAndClose(memStoreScannersAfterFlush);
-// clear them at any case. In case scanner.next() was never called
-// and there were some lease expiry we need to close all the scanners
-// on the flushed files which are open
-clearAndClose(flushedstoreFileScanners);
+if (this.closing) {
+  return;
+}
+// Lets remove from observers as early as possible
 // Under test, we dont have a this.store
-if (this.store != null)
+if (this.store != null) {
   this.store.deleteChangedReaderObserver(this);
+}
+// There is a race condition between close() and updateReaders(), during 
region flush. So,
+// even though its just close, we will still acquire the flush lock, as a
+// ConcurrentModificationException will abort the regionserver.
+flushLock.lock();
+try {
+  this.closing = true;
+  clearAndClose(scannersForDelayedClose);
+  clearAndClose(memStoreScannersAfterFlush);
+  // clear them at any case. In case scanner.next() was never called
+  // and there were some lease expiry we need to close all the scanners
+  // on the flushed files which are open
+  clearAndClose(flushedstoreFileScanners);
+} finally {
+  flushLock.unlock();
+}
 if (this.heap != null)
   this.heap.close();
 this.heap = null; // CLOSED!
@@ -845,6 +857,11 @@ public class StoreScanner extends 
NonReversedNonLazyKeyValueScanner
 }
 flushLock.lock();
 try {
+  if (this.closing) {
+// Lets close scanners created by caller, since close() won't notice 
this.
+clearAndClose(memStoreScanners);
+return;
+  }
   flushed = true;
   final boolean isCompaction = false;
   boolean usePread = get || scanUsePread;



hbase git commit: HBASE-20259 Doc configs for in-memory-compaction and add detail to ... ADDENDUM to fix compile

2018-04-03 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-2.0 80724e9ba -> 586f1ea0e


HBASE-20259 Doc configs for in-memory-compaction and add detail to ... ADDENDUM 
to fix compile


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/586f1ea0
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/586f1ea0
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/586f1ea0

Branch: refs/heads/branch-2.0
Commit: 586f1ea0e42d82ac40278af38b2d08655f1a8fb2
Parents: 80724e9
Author: Michael Stack 
Authored: Tue Apr 3 09:01:51 2018 -0700
Committer: Michael Stack 
Committed: Tue Apr 3 09:01:51 2018 -0700

--
 .../src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java  | 1 -
 1 file changed, 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/586f1ea0/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
index 3a7218e..351a847 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
@@ -340,7 +340,6 @@ public class HStore implements Store, HeapSize, 
StoreConfigInformation, Propagat
 this.getHRegion().getRegionServicesForStores(), 
inMemoryCompaction});
 }
 return ms;
->>> c88ca54c16... HBASE-20259 Doc configs for in-memory-compaction and add 
detail to
   }
 
   /**



hbase git commit: HBASE-20329 Add note for operators to refguide on AsyncFSWAL

2018-04-03 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/master 219625233 -> bf29a1fee


HBASE-20329 Add note for operators to refguide on AsyncFSWAL


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/bf29a1fe
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/bf29a1fe
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/bf29a1fe

Branch: refs/heads/master
Commit: bf29a1fee93c9a681a3b8f91b86ea3db528f53aa
Parents: 2196252
Author: Michael Stack 
Authored: Mon Apr 2 15:35:59 2018 -0700
Committer: Michael Stack 
Committed: Tue Apr 3 08:51:59 2018 -0700

--
 src/main/asciidoc/_chapters/architecture.adoc | 45 --
 1 file changed, 41 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/bf29a1fe/src/main/asciidoc/_chapters/architecture.adoc
--
diff --git a/src/main/asciidoc/_chapters/architecture.adoc 
b/src/main/asciidoc/_chapters/architecture.adoc
index f35e118..1f4b77c 100644
--- a/src/main/asciidoc/_chapters/architecture.adoc
+++ b/src/main/asciidoc/_chapters/architecture.adoc
@@ -951,8 +951,11 @@ However, if a RegionServer crashes or becomes unavailable 
before the MemStore is
 If writing to the WAL fails, the entire operation to modify the data fails.
 
 HBase uses an implementation of the 
link:https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/wal/WAL.html[WAL]
 interface.
-Usually, there is only one instance of a WAL per RegionServer.
-The RegionServer records Puts and Deletes to it, before recording them to the 
<> for the affected <>.
+Usually, there is only one instance of a WAL per RegionServer. An exception
+is the RegionServer that is carrying _hbase:meta_; the _meta_ table gets its
+own dedicated WAL.
+The RegionServer records Puts and Deletes to its WAL, before recording them
+these Mutations <> for the affected <>.
 
 .The HLog
 [NOTE]
@@ -962,9 +965,30 @@ In 0.94, HLog was the name of the implementation of the 
WAL.
 You will likely find references to the HLog in documentation tailored to these 
older versions.
 
 
-The WAL resides in HDFS in the _/hbase/WALs/_ directory (prior to HBase 0.94, 
they were stored in _/hbase/.logs/_), with subdirectories per region.
+The WAL resides in HDFS in the _/hbase/WALs/_ directory, with subdirectories 
per region.
+
+For more general information about the concept of write ahead logs, see the 
Wikipedia
+link:http://en.wikipedia.org/wiki/Write-ahead_logging[Write-Ahead Log] article.
+
+
+[[wal.providers]]
+ WAL Providers
+In HBase, there are a number of WAL imlementations (or 'Providers'). Each is 
known
+by a short name label (that unfortunately is not always descriptive). You set 
the provider in
+_hbase-site.xml_ passing the WAL provder short-name as the value on the
+_hbase.wal.provider_ property (Set the provider for _hbase:meta_ using the
+_hbase.wal.meta_provider_ property).
+
+ * _asyncfs_: The *default*. New since hbase-2.0.0 (HBASE-15536, HBASE-14790). 
This _AsyncFSWAL_ provider, as it identifies itself in RegionServer logs, is 
built on a new non-blocking dfsclient implementation. It is currently resident 
in the hbase codebase but intent is to move it back up into HDFS itself. WALs 
edits are written concurrently ("fan-out") style to each of the WAL-block 
replicas on each DataNode rather than in a chained pipeline as the default 
client does. Latencies should be better. See 
link:https://www.slideshare.net/HBaseCon/apache-hbase-improvements-and-practices-at-xiaomi[Apache
 HBase Improements and Practices at Xiaomi] at slide 14 onward for more detail 
on implementation.
+ * _filesystem_: This was the default in hbase-1.x releases. It is built on 
the blocking _DFSClient_ and writes to replicas in classic _DFSCLient_ pipeline 
mode. In logs it identifies as _FSHLog_ or _FSHLogProvider_.
+ * _multiwal_: This provider is made of multiple instances of _asyncfs_ or  
_filesystem_. See the next section for more on _multiwal_.
+
+Look for the lines like the below in the RegionServer log to see which 
provider is in place (The below shows the default AsyncFSWALProvider):
+
+
+2018-04-02 13:22:37,983 INFO  [regionserver/ve0528:16020] wal.WALFactory: 
Instantiating WALProvider of type class 
org.apache.hadoop.hbase.wal.AsyncFSWALProvider
+
 
-For more general information about the concept of write ahead logs, see the 
Wikipedia link:http://en.wikipedia.org/wiki/Write-ahead_logging[Write-Ahead 
Log] article.
 
  MultiWAL
 With a single WAL per RegionServer, the RegionServer must write to the WAL 
serially, because HDFS files must be sequential. This causes the WAL to be a 
performance bottleneck.
@@ -1219,6 +1243,18 @@ A possible downside to WAL compression is that we lose 
more data from the last b
 mid-write.

hbase git commit: HBASE-20224 Web UI is broken in standalone mode - addendum for hbase-endpoint and hbase-rsgroup modules

2018-04-03 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/branch-2 1c898cbbe -> 8df45d33c


HBASE-20224 Web UI is broken in standalone mode - addendum for hbase-endpoint 
and hbase-rsgroup modules


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8df45d33
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8df45d33
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8df45d33

Branch: refs/heads/branch-2
Commit: 8df45d33cd0c8250998db719478dcb36c9e1ea4b
Parents: 1c898cb
Author: tedyu 
Authored: Tue Apr 3 08:34:17 2018 -0700
Committer: tedyu 
Committed: Tue Apr 3 08:34:17 2018 -0700

--
 .../src/test/resources/hbase-site.xml   | 39 
 hbase-rsgroup/src/test/resources/hbase-site.xml | 39 
 2 files changed, 78 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/8df45d33/hbase-endpoint/src/test/resources/hbase-site.xml
--
diff --git a/hbase-endpoint/src/test/resources/hbase-site.xml 
b/hbase-endpoint/src/test/resources/hbase-site.xml
new file mode 100644
index 000..858d428
--- /dev/null
+++ b/hbase-endpoint/src/test/resources/hbase-site.xml
@@ -0,0 +1,39 @@
+
+
+
+
+  
+hbase.defaults.for.version.skip
+true
+  
+  
+hbase.hconnection.threads.keepalivetime
+3
+  
+  
+hbase.localcluster.assign.random.ports
+true
+
+  Assign random ports to master and RS info server (UI).
+
+  
+

http://git-wip-us.apache.org/repos/asf/hbase/blob/8df45d33/hbase-rsgroup/src/test/resources/hbase-site.xml
--
diff --git a/hbase-rsgroup/src/test/resources/hbase-site.xml 
b/hbase-rsgroup/src/test/resources/hbase-site.xml
new file mode 100644
index 000..858d428
--- /dev/null
+++ b/hbase-rsgroup/src/test/resources/hbase-site.xml
@@ -0,0 +1,39 @@
+
+
+
+
+  
+hbase.defaults.for.version.skip
+true
+  
+  
+hbase.hconnection.threads.keepalivetime
+3
+  
+  
+hbase.localcluster.assign.random.ports
+true
+
+  Assign random ports to master and RS info server (UI).
+
+  
+



hbase git commit: HBASE-20224 Web UI is broken in standalone mode - addendum for hbase-archetypes module

2018-04-03 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/branch-2 d4e115bf3 -> 1c898cbbe


HBASE-20224 Web UI is broken in standalone mode - addendum for hbase-archetypes 
module


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1c898cbb
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1c898cbb
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1c898cbb

Branch: refs/heads/branch-2
Commit: 1c898cbbe51271fedcf1020cdbed12e9252ef8ca
Parents: d4e115b
Author: tedyu 
Authored: Tue Apr 3 08:33:31 2018 -0700
Committer: tedyu 
Committed: Tue Apr 3 08:33:31 2018 -0700

--
 .../src/test/resources/hbase-site.xml   | 39 
 .../src/test/resources/hbase-site.xml   | 39 
 2 files changed, 78 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/1c898cbb/hbase-archetypes/hbase-client-project/src/test/resources/hbase-site.xml
--
diff --git 
a/hbase-archetypes/hbase-client-project/src/test/resources/hbase-site.xml 
b/hbase-archetypes/hbase-client-project/src/test/resources/hbase-site.xml
new file mode 100644
index 000..858d428
--- /dev/null
+++ b/hbase-archetypes/hbase-client-project/src/test/resources/hbase-site.xml
@@ -0,0 +1,39 @@
+
+
+
+
+  
+hbase.defaults.for.version.skip
+true
+  
+  
+hbase.hconnection.threads.keepalivetime
+3
+  
+  
+hbase.localcluster.assign.random.ports
+true
+
+  Assign random ports to master and RS info server (UI).
+
+  
+

http://git-wip-us.apache.org/repos/asf/hbase/blob/1c898cbb/hbase-archetypes/hbase-shaded-client-project/src/test/resources/hbase-site.xml
--
diff --git 
a/hbase-archetypes/hbase-shaded-client-project/src/test/resources/hbase-site.xml
 
b/hbase-archetypes/hbase-shaded-client-project/src/test/resources/hbase-site.xml
new file mode 100644
index 000..858d428
--- /dev/null
+++ 
b/hbase-archetypes/hbase-shaded-client-project/src/test/resources/hbase-site.xml
@@ -0,0 +1,39 @@
+
+
+
+
+  
+hbase.defaults.for.version.skip
+true
+  
+  
+hbase.hconnection.threads.keepalivetime
+3
+  
+  
+hbase.localcluster.assign.random.ports
+true
+
+  Assign random ports to master and RS info server (UI).
+
+  
+



hbase-site git commit: INFRA-10751 Empty commit

2018-04-03 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 21347dff7 -> 35735602f


INFRA-10751 Empty commit


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/35735602
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/35735602
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/35735602

Branch: refs/heads/asf-site
Commit: 35735602f0e3cb78be936e9a9dd50db360b82634
Parents: 21347df
Author: jenkins 
Authored: Tue Apr 3 14:47:49 2018 +
Committer: jenkins 
Committed: Tue Apr 3 14:47:49 2018 +

--

--




[21/26] hbase-site git commit: Published site at 219625233c1e8ad9daf2c35bc2e3a0844e1b97ba.

2018-04-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/21347dff/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.html 
b/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.html
index 72accd6..f0895d6 100644
--- a/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.html
+++ b/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileBlock.html
@@ -1503,7 +1503,7 @@ implements 
 
 sanityCheckUncompressed
-void sanityCheckUncompressed()
+void sanityCheckUncompressed()
   throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 An additional sanity-check in case no compression or 
encryption is being used.
 
@@ -1518,7 +1518,7 @@ implements 
 
 getSerializedLength
-public int getSerializedLength()
+public int getSerializedLength()
 Description copied from 
interface: Cacheable
 Returns the length of the ByteBuffer required to serialized 
the object. If the
  object cannot be serialized, it should return 0.
@@ -1536,7 +1536,7 @@ implements 
 
 serialize
-public void serialize(https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true";
 title="class or interface in java.nio">ByteBuffer destination)
+public void serialize(https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true";
 title="class or interface in java.nio">ByteBuffer destination)
 Description copied from 
interface: Cacheable
 Serializes its data into destination.
 
@@ -1553,7 +1553,7 @@ implements 
 
 getMetaData
-public https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true";
 title="class or interface in java.nio">ByteBuffer getMetaData()
+public https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true";
 title="class or interface in java.nio">ByteBuffer getMetaData()
 For use by bucketcache. This exposes internals.
 
 
@@ -1563,7 +1563,7 @@ implements 
 
 addMetaData
-private https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true";
 title="class or interface in java.nio">ByteBuffer addMetaData(https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true";
 title="class or interface in java.nio">ByteBuffer destination)
+private https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true";
 title="class or interface in java.nio">ByteBuffer addMetaData(https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true";
 title="class or interface in java.nio">ByteBuffer destination)
 Adds metadata at current position (position is moved 
forward). Does not flip or reset.
 
 Returns:
@@ -1577,7 +1577,7 @@ implements 
 
 getDeserializer
-public CacheableDeserializer getDeserializer()
+public CacheableDeserializer getDeserializer()
 Description copied from 
interface: Cacheable
 Returns CacheableDeserializer instance which reconstructs 
original object from ByteBuffer.
 
@@ -1594,7 +1594,7 @@ implements 
 
 hashCode
-public int hashCode()
+public int hashCode()
 
 Overrides:
 https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--";
 title="class or interface in java.lang">hashCode in 
class https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
@@ -1607,7 +1607,7 @@ implements 
 
 equals
-public boolean equals(https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object comparison)
+public boolean equals(https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object comparison)
 
 Overrides:
 https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-";
 title="class or interface in java.lang">equals in 
class https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
@@ -1620,7 +1620,7 @@ implements 
 
 getDataBlockEncoding
-DataBlockEncoding getDataBlockEncoding()
+DataBlockEncoding getDataBlockEncoding()
 
 
 
@@ -1629,7 +1629,7 @@ implements 
 
 getChecksumType
-byte getChecksumType()
+byte getChecksumType()
 
 
 
@@ -1638,7 +1638,7 @@ implements 
 
 getBytesPerChecksum
-int getBytesPerChecksum()
+int getBytesPerChecksum()
 
 
 
@@ -1647,7 +1647,7 @@ implements 
 
 getOnDiskDataSizeWithHeader
-int getOnDiskDataSizeWithHeader()
+int getOnDiskDataSizeWithHeader()
 
 Returns:
 the size of data on disk + header. Excludes checksum.
@@ -1660,7 +1660,7 @@ implements 
 
 totalChecksumBytes
-int totalChecksumBytes()
+int totalChecksumBytes()
 Calculate the number of bytes required to store all the 
checksums
  for this block.

[02/26] hbase-site git commit: Published site at 219625233c1e8ad9daf2c35bc2e3a0844e1b97ba.

2018-04-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/21347dff/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.html
index cc35a46..cca21a9 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.html
@@ -190,1967 +190,1967 @@
 182
 183  private final boolean 
verifyBulkLoads;
 184
-185  private final AtomicInteger 
currentParallelPutCount = new AtomicInteger(0);
-186  private final int 
parallelPutCountPrintThreshold;
-187
-188  private ScanInfo scanInfo;
-189
-190  // All access must be synchronized.
-191  // TODO: ideally, this should be part 
of storeFileManager, as we keep passing this to it.
-192  private final List 
filesCompacting = Lists.newArrayList();
-193
-194  // All access must be synchronized.
-195  private final 
Set changedReaderObservers =
-196Collections.newSetFromMap(new 
ConcurrentHashMap());
-197
-198  protected final int blocksize;
-199  private HFileDataBlockEncoder 
dataBlockEncoder;
-200
-201  /** Checksum configuration */
-202  protected ChecksumType checksumType;
-203  protected int bytesPerChecksum;
-204
-205  // Comparing KeyValues
-206  protected final CellComparator 
comparator;
-207
-208  final StoreEngine 
storeEngine;
+185  /**
+186   * Use this counter to track concurrent 
puts. If TRACE-log is enabled, if we are over the
+187   * threshold set by 
hbase.region.store.parallel.put.print.threshold (Default is 50) we will
+188   * log a message that identifies the 
Store experience this high-level of concurrency.
+189   */
+190  private final AtomicInteger 
currentParallelPutCount = new AtomicInteger(0);
+191  private final int 
parallelPutCountPrintThreshold;
+192
+193  private ScanInfo scanInfo;
+194
+195  // All access must be synchronized.
+196  // TODO: ideally, this should be part 
of storeFileManager, as we keep passing this to it.
+197  private final List 
filesCompacting = Lists.newArrayList();
+198
+199  // All access must be synchronized.
+200  private final 
Set changedReaderObservers =
+201Collections.newSetFromMap(new 
ConcurrentHashMap());
+202
+203  protected final int blocksize;
+204  private HFileDataBlockEncoder 
dataBlockEncoder;
+205
+206  /** Checksum configuration */
+207  protected ChecksumType checksumType;
+208  protected int bytesPerChecksum;
 209
-210  private static final AtomicBoolean 
offPeakCompactionTracker = new AtomicBoolean();
-211  private volatile OffPeakHours 
offPeakHours;
+210  // Comparing KeyValues
+211  protected final CellComparator 
comparator;
 212
-213  private static final int 
DEFAULT_FLUSH_RETRIES_NUMBER = 10;
-214  private int flushRetriesNumber;
-215  private int pauseTime;
-216
-217  private long blockingFileCount;
-218  private int 
compactionCheckMultiplier;
-219  protected Encryption.Context 
cryptoContext = Encryption.Context.NONE;
-220
-221  private AtomicLong flushedCellsCount = 
new AtomicLong();
-222  private AtomicLong compactedCellsCount 
= new AtomicLong();
-223  private AtomicLong 
majorCompactedCellsCount = new AtomicLong();
-224  private AtomicLong flushedCellsSize = 
new AtomicLong();
-225  private AtomicLong 
flushedOutputFileSize = new AtomicLong();
-226  private AtomicLong compactedCellsSize = 
new AtomicLong();
-227  private AtomicLong 
majorCompactedCellsSize = new AtomicLong();
-228
-229  /**
-230   * Constructor
-231   * @param region
-232   * @param family HColumnDescriptor for 
this column
-233   * @param confParam configuration 
object
-234   * failed.  Can be null.
-235   * @throws IOException
-236   */
-237  protected HStore(final HRegion region, 
final ColumnFamilyDescriptor family,
-238  final Configuration confParam) 
throws IOException {
-239
-240this.fs = 
region.getRegionFileSystem();
-241
-242// Assemble the store's home 
directory and Ensure it exists.
-243
fs.createStoreDir(family.getNameAsString());
-244this.region = region;
-245this.family = family;
-246// 'conf' renamed to 'confParam' b/c 
we use this.conf in the constructor
-247// CompoundConfiguration will look 
for keys in reverse order of addition, so we'd
-248// add global config first, then 
table and cf overrides, then cf metadata.
-249this.conf = new 
CompoundConfiguration()
-250  .add(confParam)
-251  
.addBytesMap(region.getTableDescriptor().getValues())
-252  
.addStringMap(family.getConfiguration())
-253  .addBytesMap(family.getValues());
-254this.blocksize = 
family.getBlocksize();
-255
-256// set block storage policy for store 
directory
-257String policyName = 
fam

[14/26] hbase-site git commit: Published site at 219625233c1e8ad9daf2c35bc2e3a0844e1b97ba.

2018-04-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/21347dff/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReaderImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReaderImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReaderImpl.html
index 12d10e1..97ceefd 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReaderImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReaderImpl.html
@@ -1740,384 +1740,380 @@
 1732  // and will save us having to seek 
the stream backwards to reread the header we
 1733  // read the last time through 
here.
 1734  ByteBuffer headerBuf = 
getCachedHeader(offset);
-1735  if (LOG.isTraceEnabled()) {
-1736LOG.trace("Reading " + 
this.fileContext.getHFileName() + " at offset=" + offset +
-1737  ", pread=" + pread + ", 
verifyChecksum=" + verifyChecksum + ", cachedHeader=" +
-1738  headerBuf + ", 
onDiskSizeWithHeader=" + onDiskSizeWithHeader);
-1739  }
-1740  // This is NOT same as 
verifyChecksum. This latter is whether to do hbase
-1741  // checksums. Can change with 
circumstances. The below flag is whether the
-1742  // file has support for checksums 
(version 2+).
-1743  boolean checksumSupport = 
this.fileContext.isUseHBaseChecksum();
-1744  long startTime = 
System.currentTimeMillis();
-1745  if (onDiskSizeWithHeader <= 0) 
{
-1746// We were not passed the block 
size. Need to get it from the header. If header was
-1747// not cached (see 
getCachedHeader above), need to seek to pull it in. This is costly
-1748// and should happen very 
rarely. Currently happens on open of a hfile reader where we
-1749// read the trailer blocks to 
pull in the indices. Otherwise, we are reading block sizes
-1750// out of the hfile index. To 
check, enable TRACE in this file and you'll get an exception
-1751// in a LOG every time we seek. 
See HBASE-17072 for more detail.
-1752if (headerBuf == null) {
-1753  if (LOG.isTraceEnabled()) {
-1754LOG.trace("Extra see to get 
block size!", new RuntimeException());
-1755  }
-1756  headerBuf = 
ByteBuffer.allocate(hdrSize);
-1757  readAtOffset(is, 
headerBuf.array(), headerBuf.arrayOffset(), hdrSize, false,
-1758  offset, pread);
-1759}
-1760onDiskSizeWithHeader = 
getOnDiskSizeWithHeader(headerBuf, checksumSupport);
-1761  }
-1762  int preReadHeaderSize = headerBuf 
== null? 0 : hdrSize;
-1763  // Allocate enough space to fit 
the next block's header too; saves a seek next time through.
-1764  // onDiskBlock is whole block + 
header + checksums then extra hdrSize to read next header;
-1765  // onDiskSizeWithHeader is header, 
body, and any checksums if present. preReadHeaderSize
-1766  // says where to start reading. If 
we have the header cached, then we don't need to read
-1767  // it again and we can likely read 
from last place we left off w/o need to backup and reread
-1768  // the header we read last time 
through here.
-1769  // TODO: Make this 
ByteBuffer-based. Will make it easier to go to HDFS with BBPool (offheap).
-1770  byte [] onDiskBlock = new 
byte[onDiskSizeWithHeader + hdrSize];
-1771  int nextBlockOnDiskSize = 
readAtOffset(is, onDiskBlock, preReadHeaderSize,
-1772  onDiskSizeWithHeader - 
preReadHeaderSize, true, offset + preReadHeaderSize, pread);
-1773  if (headerBuf != null) {
-1774// The header has been read when 
reading the previous block OR in a distinct header-only
-1775// read. Copy to this block's 
header.
-1776
System.arraycopy(headerBuf.array(), headerBuf.arrayOffset(), onDiskBlock, 0, 
hdrSize);
-1777  } else {
-1778headerBuf = 
ByteBuffer.wrap(onDiskBlock, 0, hdrSize);
-1779  }
-1780  // Do a few checks before we go 
instantiate HFileBlock.
-1781  assert onDiskSizeWithHeader > 
this.hdrSize;
-1782  
verifyOnDiskSizeMatchesHeader(onDiskSizeWithHeader, headerBuf, offset, 
checksumSupport);
-1783  ByteBuffer onDiskBlockByteBuffer = 
ByteBuffer.wrap(onDiskBlock, 0, onDiskSizeWithHeader);
-1784  // Verify checksum of the data 
before using it for building HFileBlock.
-1785  if (verifyChecksum &&
-1786  !validateChecksum(offset, 
onDiskBlockByteBuffer, hdrSize)) {
-1787return null;
-1788  }
-1789  long duration = 
System.currentTimeMillis() - startTime;
-1790  if (updateMetrics) {
-1791
HFile.updateReadLatency(duration, pread);
-1792  }
-1793  // The onDiskBlock will become the 
headerAndDataBuffer for this block.
-1794  // If 
nextBlockOnDiskSizeWithHeader is not zero, the onDiskBlock already
-1795  // contains the header of next 
block, so no 

[19/26] hbase-site git commit: Published site at 219625233c1e8ad9daf2c35bc2e3a0844e1b97ba.

2018-04-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/21347dff/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html
index 7ae1c82..3735b20 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":9,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":9,"i35":10,"i36":9,"i37":9,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":41,"i94":41,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i109"
 
:10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10,"i135":10,"i136":10,"i137":10,"i138":10,"i139":10,"i140":10,"i141":10,"i142":10,"i143":10,"i144":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":9,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":9,"i35":10,"i36":9,"i37":9,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":41,"i95":41,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i109"
 
:10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10,"i135":10,"i136":10,"i137":10,"i138":10,"i139":10,"i140":10,"i141":10,"i142":10,"i143":10,"i144":10,"i145":10};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete 
Methods"],32:["t6","Deprecated Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -243,7 +243,9 @@ implements 
 private https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true";
 title="class or interface in 
java.util.concurrent.atomic">AtomicInteger
-currentParallelPutCount 
+currentParallelPutCount
+Use this counter to track concurrent puts.
+
 
 
 private HFileDataBlockEncoder
@@ -753,42 +755,46 @@ implements getMaxStoreFileAge() 
 
 
+private MemStore
+getMemstore() 
+
+
 long
 getMemStoreFlushSize() 
 
-
+
 MemStoreSize
 getMemStoreSize() 
 
-
+
 https://docs.oracle.com/javase/8/docs/api/java/util/OptionalLong.html?is-external=true";
 title="class or interface in java.util">OptionalLong
 getMinStoreFileAge() 
 
-
+
 long
 getNumHFiles() 
 
-
+
 long
 getNumReferenceFiles() 
 
-
+
 protected OffPeakHours
 getOffPeakHours() 
 
-
+
 HRegionFileSystem
 getRegionFileSystem() 
 
-
+
 RegionInfo
 getRegionInfo() 
 
-
+
 ScanInfo
 getScanInfo() 
 
-
+
 KeyValueScanner
 getScanner(Scan scan,
   https://docs.oracle.com/javase/8/docs/api/java/util/NavigableSet.html?is-external=true";
 title="class or interface in 
java.util">NavigableSet targetCols,
@@ -796,7 +802,7 @@ implements Return a scanner for both the memstore and the HStore 
files.
 
 
-
+
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
 getScanners(boolean cacheBlocks,
boolean isGet,
@@ -809,7 +815,7 @@ implements Get all scanners with no filt

[22/26] hbase-site git commit: Published site at 219625233c1e8ad9daf2c35bc2e3a0844e1b97ba.

2018-04-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/21347dff/bulk-loads.html
--
diff --git a/bulk-loads.html b/bulk-loads.html
index ec1a372..0edf3af 100644
--- a/bulk-loads.html
+++ b/bulk-loads.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase –  
   Bulk Loads in Apache HBase (TM)
@@ -296,7 +296,7 @@ under the License. -->
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-04-01
+  Last Published: 
2018-04-03
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/21347dff/checkstyle-aggregate.html
--
diff --git a/checkstyle-aggregate.html b/checkstyle-aggregate.html
index d012532..5628338 100644
--- a/checkstyle-aggregate.html
+++ b/checkstyle-aggregate.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Checkstyle Results
 
@@ -10312,12 +10312,12 @@
 http://checkstyle.sourceforge.net/config_javadoc.html#JavadocTagContinuationIndentation";>JavadocTagContinuationIndentation
 
 offset: "2"
-798
+784
  Error
 
 
 http://checkstyle.sourceforge.net/config_javadoc.html#NonEmptyAtclauseDescription";>NonEmptyAtclauseDescription
-3833
+3847
  Error
 
 misc
@@ -14526,7 +14526,7 @@
 
  Error
 javadoc
-JavadocTagContinuationIndentation
+NonEmptyAtclauseDescription
 Javadoc comment at column 26 has parse error. Missed HTML close tag 'arg'. 
Sometimes it means that close tag missed for one of previous tags.
 44
 
@@ -15162,7 +15162,7 @@
 
  Error
 javadoc
-JavadocTagContinuationIndentation
+NonEmptyAtclauseDescription
 Javadoc comment at column 4 has parse error. Missed HTML close tag 'pre'. 
Sometimes it means that close tag missed for one of previous tags.
 59
 
@@ -16917,7 +16917,7 @@
 
  Error
 javadoc
-JavadocTagContinuationIndentation
+NonEmptyAtclauseDescription
 Javadoc comment at column 19 has parse error. Details: no viable 
alternative at input '\n   *   List

[05/26] hbase-site git commit: Published site at 219625233c1e8ad9daf2c35bc2e3a0844e1b97ba.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/21347dff/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactingMemStore.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactingMemStore.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactingMemStore.html
index 3dbdec3..22e7059 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactingMemStore.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactingMemStore.html
@@ -127,468 +127,470 @@
 119// initialization of the flush size 
should happen after initialization of the index type
 120// so do not transfer the following 
method
 121initInmemoryFlushSize(conf);
-122  }
-123
-124  @VisibleForTesting
-125  protected MemStoreCompactor 
createMemStoreCompactor(MemoryCompactionPolicy compactionPolicy)
-126  throws IllegalArgumentIOException 
{
-127return new MemStoreCompactor(this, 
compactionPolicy);
-128  }
-129
-130  private void 
initInmemoryFlushSize(Configuration conf) {
-131double factor = 0;
-132long memstoreFlushSize = 
getRegionServices().getMemStoreFlushSize();
-133int numStores = 
getRegionServices().getNumStores();
-134if (numStores <= 1) {
-135  // Family number might also be zero 
in some of our unit test case
-136  numStores = 1;
-137}
-138inmemoryFlushSize = memstoreFlushSize 
/ numStores;
-139// multiply by a factor (the same 
factor for all index types)
-140factor = 
conf.getDouble(IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY,
-141  
IN_MEMORY_FLUSH_THRESHOLD_FACTOR_DEFAULT);
-142
-143inmemoryFlushSize = (long) 
(inmemoryFlushSize * factor);
-144LOG.info("Setting in-memory flush 
size threshold to {} and immutable segments index to type={}",
-145
StringUtils.byteDesc(inmemoryFlushSize), indexType);
-146  }
-147
-148  /**
-149   * @return Total memory occupied by 
this MemStore. This won't include any size occupied by the
-150   * snapshot. We assume the 
snapshot will get cleared soon. This is not thread safe and
-151   * the memstore may be changed 
while computing its size. It is the responsibility of the
-152   * caller to make sure this 
doesn't happen.
-153   */
-154  @Override
-155  public MemStoreSize size() {
-156MemStoreSizing memstoreSizing = new 
MemStoreSizing();
-157
memstoreSizing.incMemStoreSize(active.getMemStoreSize());
-158for (Segment item : 
pipeline.getSegments()) {
-159  
memstoreSizing.incMemStoreSize(item.getMemStoreSize());
-160}
-161return memstoreSizing;
-162  }
-163
-164  /**
-165   * This method is called before the 
flush is executed.
-166   * @return an estimation (lower bound) 
of the unflushed sequence id in memstore after the flush
-167   * is executed. if memstore will be 
cleared returns {@code HConstants.NO_SEQNUM}.
-168   */
-169  @Override
-170  public long preFlushSeqIDEstimation() 
{
-171if(compositeSnapshot) {
-172  return HConstants.NO_SEQNUM;
-173}
-174Segment segment = getLastSegment();
-175if(segment == null) {
-176  return HConstants.NO_SEQNUM;
-177}
-178return segment.getMinSequenceId();
-179  }
-180
-181  @Override
-182  public boolean isSloppy() {
-183return true;
-184  }
-185
-186  /**
-187   * Push the current active memstore 
segment into the pipeline
-188   * and create a snapshot of the tail of 
current compaction pipeline
-189   * Snapshot must be cleared by call to 
{@link #clearSnapshot}.
-190   * {@link #clearSnapshot(long)}.
-191   * @return {@link MemStoreSnapshot}
-192   */
-193  @Override
-194  public MemStoreSnapshot snapshot() {
-195// If snapshot currently has entries, 
then flusher failed or didn't call
-196// cleanup.  Log a warning.
-197if (!this.snapshot.isEmpty()) {
-198  LOG.warn("Snapshot called again 
without clearing previous. " +
-199  "Doing nothing. Another ongoing 
flush or did we fail last attempt?");
-200} else {
-201  LOG.debug("FLUSHING TO DISK {}, 
store={}",
-202
getRegionServices().getRegionInfo().getEncodedName(), getFamilyName());
-203  stopCompaction();
-204  
pushActiveToPipeline(this.active);
-205  snapshotId = 
EnvironmentEdgeManager.currentTime();
-206  // in both cases whatever is pushed 
to snapshot is cleared from the pipeline
-207  if (compositeSnapshot) {
-208pushPipelineToSnapshot();
-209  } else {
-210pushTailToSnapshot();
-211  }
-212  compactor.resetStats();
-213}
-214return new 
MemStoreSnapshot(snapshotId, this.snapshot);
-215  }
-216
-217  /**
-218   * On flush, how much memory we will 
clear.
-219   * @return size of data that is going 
to be flushed
-220   */
-221  @Override
-222  public MemStoreSize getFlushableSize() 
{
-223MemStoreSizing snapshotSizing = 
getSnapshotSizing();
-224if (snapshotS

[13/26] hbase-site git commit: Published site at 219625233c1e8ad9daf2c35bc2e3a0844e1b97ba.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/21347dff/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Header.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Header.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Header.html
index 12d10e1..97ceefd 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Header.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Header.html
@@ -1740,384 +1740,380 @@
 1732  // and will save us having to seek 
the stream backwards to reread the header we
 1733  // read the last time through 
here.
 1734  ByteBuffer headerBuf = 
getCachedHeader(offset);
-1735  if (LOG.isTraceEnabled()) {
-1736LOG.trace("Reading " + 
this.fileContext.getHFileName() + " at offset=" + offset +
-1737  ", pread=" + pread + ", 
verifyChecksum=" + verifyChecksum + ", cachedHeader=" +
-1738  headerBuf + ", 
onDiskSizeWithHeader=" + onDiskSizeWithHeader);
-1739  }
-1740  // This is NOT same as 
verifyChecksum. This latter is whether to do hbase
-1741  // checksums. Can change with 
circumstances. The below flag is whether the
-1742  // file has support for checksums 
(version 2+).
-1743  boolean checksumSupport = 
this.fileContext.isUseHBaseChecksum();
-1744  long startTime = 
System.currentTimeMillis();
-1745  if (onDiskSizeWithHeader <= 0) 
{
-1746// We were not passed the block 
size. Need to get it from the header. If header was
-1747// not cached (see 
getCachedHeader above), need to seek to pull it in. This is costly
-1748// and should happen very 
rarely. Currently happens on open of a hfile reader where we
-1749// read the trailer blocks to 
pull in the indices. Otherwise, we are reading block sizes
-1750// out of the hfile index. To 
check, enable TRACE in this file and you'll get an exception
-1751// in a LOG every time we seek. 
See HBASE-17072 for more detail.
-1752if (headerBuf == null) {
-1753  if (LOG.isTraceEnabled()) {
-1754LOG.trace("Extra see to get 
block size!", new RuntimeException());
-1755  }
-1756  headerBuf = 
ByteBuffer.allocate(hdrSize);
-1757  readAtOffset(is, 
headerBuf.array(), headerBuf.arrayOffset(), hdrSize, false,
-1758  offset, pread);
-1759}
-1760onDiskSizeWithHeader = 
getOnDiskSizeWithHeader(headerBuf, checksumSupport);
-1761  }
-1762  int preReadHeaderSize = headerBuf 
== null? 0 : hdrSize;
-1763  // Allocate enough space to fit 
the next block's header too; saves a seek next time through.
-1764  // onDiskBlock is whole block + 
header + checksums then extra hdrSize to read next header;
-1765  // onDiskSizeWithHeader is header, 
body, and any checksums if present. preReadHeaderSize
-1766  // says where to start reading. If 
we have the header cached, then we don't need to read
-1767  // it again and we can likely read 
from last place we left off w/o need to backup and reread
-1768  // the header we read last time 
through here.
-1769  // TODO: Make this 
ByteBuffer-based. Will make it easier to go to HDFS with BBPool (offheap).
-1770  byte [] onDiskBlock = new 
byte[onDiskSizeWithHeader + hdrSize];
-1771  int nextBlockOnDiskSize = 
readAtOffset(is, onDiskBlock, preReadHeaderSize,
-1772  onDiskSizeWithHeader - 
preReadHeaderSize, true, offset + preReadHeaderSize, pread);
-1773  if (headerBuf != null) {
-1774// The header has been read when 
reading the previous block OR in a distinct header-only
-1775// read. Copy to this block's 
header.
-1776
System.arraycopy(headerBuf.array(), headerBuf.arrayOffset(), onDiskBlock, 0, 
hdrSize);
-1777  } else {
-1778headerBuf = 
ByteBuffer.wrap(onDiskBlock, 0, hdrSize);
-1779  }
-1780  // Do a few checks before we go 
instantiate HFileBlock.
-1781  assert onDiskSizeWithHeader > 
this.hdrSize;
-1782  
verifyOnDiskSizeMatchesHeader(onDiskSizeWithHeader, headerBuf, offset, 
checksumSupport);
-1783  ByteBuffer onDiskBlockByteBuffer = 
ByteBuffer.wrap(onDiskBlock, 0, onDiskSizeWithHeader);
-1784  // Verify checksum of the data 
before using it for building HFileBlock.
-1785  if (verifyChecksum &&
-1786  !validateChecksum(offset, 
onDiskBlockByteBuffer, hdrSize)) {
-1787return null;
-1788  }
-1789  long duration = 
System.currentTimeMillis() - startTime;
-1790  if (updateMetrics) {
-1791
HFile.updateReadLatency(duration, pread);
-1792  }
-1793  // The onDiskBlock will become the 
headerAndDataBuffer for this block.
-1794  // If 
nextBlockOnDiskSizeWithHeader is not zero, the onDiskBlock already
-1795  // contains the header of next 
block, so no need to set next block's header

[01/26] hbase-site git commit: Published site at 219625233c1e8ad9daf2c35bc2e3a0844e1b97ba.

Repository: hbase-site
Updated Branches:
  refs/heads/asf-site e8a9b2eef -> 21347dff7


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/21347dff/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreCompactionStrategy.Action.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreCompactionStrategy.Action.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreCompactionStrategy.Action.html
index 4234db1..d760e64 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreCompactionStrategy.Action.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreCompactionStrategy.Action.html
@@ -81,43 +81,49 @@
 073}
 074  }
 075
-076  // get next compaction action to apply 
on compaction pipeline
-077  public abstract Action 
getAction(VersionedSegmentsList versionedList);
-078  // update policy stats based on the 
segment that replaced previous versioned list (in
-079  // compaction pipeline)
-080  public void updateStats(Segment 
replacement) {}
-081  // resets policy stats
-082  public void resetStats() {}
-083
-084  protected Action 
simpleMergeOrFlatten(VersionedSegmentsList versionedList, String strategy) {
-085int numOfSegments = 
versionedList.getNumOfSegments();
-086if (numOfSegments > 
pipelineThreshold) {
-087  // to avoid too many segments, 
merge now
-088  LOG.trace("Strategy={}, store={}; 
merging {} segments", strategy, cfName, numOfSegments);
-089  return getMergingAction();
-090}
-091
-092// just flatten a segment
-093LOG.trace("Strategy={}, store={}; 
flattening a segment", strategy, cfName);
-094return getFlattenAction();
-095  }
-096
-097  protected Action getMergingAction() {
-098return Action.MERGE;
-099  }
-100
-101  protected Action getFlattenAction() {
-102return Action.FLATTEN;
-103  }
-104
-105  protected Action 
compact(VersionedSegmentsList versionedList, String strategyInfo) {
-106int numOfSegments = 
versionedList.getNumOfSegments();
-107LOG.trace("{} in-memory compaction 
for store={} compacting {} segments", strategyInfo,
-108cfName, numOfSegments);
-109return Action.COMPACT;
+076  @Override
+077  public String toString() {
+078return getName() + ", 
pipelineThreshold=" + this.pipelineThreshold;
+079  }
+080
+081  protected abstract String getName();
+082
+083  // get next compaction action to apply 
on compaction pipeline
+084  public abstract Action 
getAction(VersionedSegmentsList versionedList);
+085  // update policy stats based on the 
segment that replaced previous versioned list (in
+086  // compaction pipeline)
+087  public void updateStats(Segment 
replacement) {}
+088  // resets policy stats
+089  public void resetStats() {}
+090
+091  protected Action 
simpleMergeOrFlatten(VersionedSegmentsList versionedList, String strategy) {
+092int numOfSegments = 
versionedList.getNumOfSegments();
+093if (numOfSegments > 
pipelineThreshold) {
+094  // to avoid too many segments, 
merge now
+095  LOG.trace("Strategy={}, store={}; 
merging {} segments", strategy, cfName, numOfSegments);
+096  return getMergingAction();
+097}
+098
+099// just flatten a segment
+100LOG.trace("Strategy={}, store={}; 
flattening a segment", strategy, cfName);
+101return getFlattenAction();
+102  }
+103
+104  protected Action getMergingAction() {
+105return Action.MERGE;
+106  }
+107
+108  protected Action getFlattenAction() {
+109return Action.FLATTEN;
 110  }
 111
-112}
+112  protected Action 
compact(VersionedSegmentsList versionedList, String strategyInfo) {
+113int numOfSegments = 
versionedList.getNumOfSegments();
+114LOG.trace("{} in-memory compaction 
for store={} compacting {} segments", strategyInfo,
+115cfName, numOfSegments);
+116return Action.COMPACT;
+117  }
+118}
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/21347dff/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreCompactionStrategy.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreCompactionStrategy.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreCompactionStrategy.html
index 4234db1..d760e64 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreCompactionStrategy.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreCompactionStrategy.html
@@ -81,43 +81,49 @@
 073}
 074  }
 075
-076  // get next compaction action to apply 
on compaction pipeline
-077  public abstract Action 
getAction(VersionedSegmentsList versionedList);
-078  // update policy stats based on the 
segment that replaced previous versioned list (in
-079  // compaction pipeline)
-080  public void updateStats(Segment 
replacement) {}
-081  // resets policy st

[08/26] hbase-site git commit: Published site at 219625233c1e8ad9daf2c35bc2e3a0844e1b97ba.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/21347dff/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/BasicMemStoreCompactionStrategy.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/BasicMemStoreCompactionStrategy.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/BasicMemStoreCompactionStrategy.html
index c24fb8f..d1de577 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/BasicMemStoreCompactionStrategy.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/BasicMemStoreCompactionStrategy.html
@@ -37,18 +37,22 @@
 029 */
 030@InterfaceAudience.Private
 031public class 
BasicMemStoreCompactionStrategy extends MemStoreCompactionStrategy{
-032
-033  private static final String name = 
"BASIC";
-034
-035  public 
BasicMemStoreCompactionStrategy(Configuration conf, String cfName) {
-036super(conf, cfName);
-037  }
-038
-039  @Override
-040  public Action 
getAction(VersionedSegmentsList versionedList) {
-041return 
simpleMergeOrFlatten(versionedList, name);
-042  }
-043}
+032  private static final String NAME = 
"BASIC";
+033
+034  public 
BasicMemStoreCompactionStrategy(Configuration conf, String cfName) {
+035super(conf, cfName);
+036  }
+037
+038  @Override
+039  public Action 
getAction(VersionedSegmentsList versionedList) {
+040return 
simpleMergeOrFlatten(versionedList, getName());
+041  }
+042
+043  @Override
+044  protected String getName() {
+045return NAME;
+046  }
+047}
 
 
 



[10/26] hbase-site git commit: Published site at 219625233c1e8ad9daf2c35bc2e3a0844e1b97ba.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/21347dff/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.html
index 12d10e1..97ceefd 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.html
@@ -1740,384 +1740,380 @@
 1732  // and will save us having to seek 
the stream backwards to reread the header we
 1733  // read the last time through 
here.
 1734  ByteBuffer headerBuf = 
getCachedHeader(offset);
-1735  if (LOG.isTraceEnabled()) {
-1736LOG.trace("Reading " + 
this.fileContext.getHFileName() + " at offset=" + offset +
-1737  ", pread=" + pread + ", 
verifyChecksum=" + verifyChecksum + ", cachedHeader=" +
-1738  headerBuf + ", 
onDiskSizeWithHeader=" + onDiskSizeWithHeader);
-1739  }
-1740  // This is NOT same as 
verifyChecksum. This latter is whether to do hbase
-1741  // checksums. Can change with 
circumstances. The below flag is whether the
-1742  // file has support for checksums 
(version 2+).
-1743  boolean checksumSupport = 
this.fileContext.isUseHBaseChecksum();
-1744  long startTime = 
System.currentTimeMillis();
-1745  if (onDiskSizeWithHeader <= 0) 
{
-1746// We were not passed the block 
size. Need to get it from the header. If header was
-1747// not cached (see 
getCachedHeader above), need to seek to pull it in. This is costly
-1748// and should happen very 
rarely. Currently happens on open of a hfile reader where we
-1749// read the trailer blocks to 
pull in the indices. Otherwise, we are reading block sizes
-1750// out of the hfile index. To 
check, enable TRACE in this file and you'll get an exception
-1751// in a LOG every time we seek. 
See HBASE-17072 for more detail.
-1752if (headerBuf == null) {
-1753  if (LOG.isTraceEnabled()) {
-1754LOG.trace("Extra see to get 
block size!", new RuntimeException());
-1755  }
-1756  headerBuf = 
ByteBuffer.allocate(hdrSize);
-1757  readAtOffset(is, 
headerBuf.array(), headerBuf.arrayOffset(), hdrSize, false,
-1758  offset, pread);
-1759}
-1760onDiskSizeWithHeader = 
getOnDiskSizeWithHeader(headerBuf, checksumSupport);
-1761  }
-1762  int preReadHeaderSize = headerBuf 
== null? 0 : hdrSize;
-1763  // Allocate enough space to fit 
the next block's header too; saves a seek next time through.
-1764  // onDiskBlock is whole block + 
header + checksums then extra hdrSize to read next header;
-1765  // onDiskSizeWithHeader is header, 
body, and any checksums if present. preReadHeaderSize
-1766  // says where to start reading. If 
we have the header cached, then we don't need to read
-1767  // it again and we can likely read 
from last place we left off w/o need to backup and reread
-1768  // the header we read last time 
through here.
-1769  // TODO: Make this 
ByteBuffer-based. Will make it easier to go to HDFS with BBPool (offheap).
-1770  byte [] onDiskBlock = new 
byte[onDiskSizeWithHeader + hdrSize];
-1771  int nextBlockOnDiskSize = 
readAtOffset(is, onDiskBlock, preReadHeaderSize,
-1772  onDiskSizeWithHeader - 
preReadHeaderSize, true, offset + preReadHeaderSize, pread);
-1773  if (headerBuf != null) {
-1774// The header has been read when 
reading the previous block OR in a distinct header-only
-1775// read. Copy to this block's 
header.
-1776
System.arraycopy(headerBuf.array(), headerBuf.arrayOffset(), onDiskBlock, 0, 
hdrSize);
-1777  } else {
-1778headerBuf = 
ByteBuffer.wrap(onDiskBlock, 0, hdrSize);
-1779  }
-1780  // Do a few checks before we go 
instantiate HFileBlock.
-1781  assert onDiskSizeWithHeader > 
this.hdrSize;
-1782  
verifyOnDiskSizeMatchesHeader(onDiskSizeWithHeader, headerBuf, offset, 
checksumSupport);
-1783  ByteBuffer onDiskBlockByteBuffer = 
ByteBuffer.wrap(onDiskBlock, 0, onDiskSizeWithHeader);
-1784  // Verify checksum of the data 
before using it for building HFileBlock.
-1785  if (verifyChecksum &&
-1786  !validateChecksum(offset, 
onDiskBlockByteBuffer, hdrSize)) {
-1787return null;
-1788  }
-1789  long duration = 
System.currentTimeMillis() - startTime;
-1790  if (updateMetrics) {
-1791
HFile.updateReadLatency(duration, pread);
-1792  }
-1793  // The onDiskBlock will become the 
headerAndDataBuffer for this block.
-1794  // If 
nextBlockOnDiskSizeWithHeader is not zero, the onDiskBlock already
-1795  // contains the header of next 
block, so no need to set next block's header

[11/26] hbase-site git commit: Published site at 219625233c1e8ad9daf2c35bc2e3a0844e1b97ba.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/21347dff/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.State.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.State.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.State.html
index 12d10e1..97ceefd 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.State.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.State.html
@@ -1740,384 +1740,380 @@
 1732  // and will save us having to seek 
the stream backwards to reread the header we
 1733  // read the last time through 
here.
 1734  ByteBuffer headerBuf = 
getCachedHeader(offset);
-1735  if (LOG.isTraceEnabled()) {
-1736LOG.trace("Reading " + 
this.fileContext.getHFileName() + " at offset=" + offset +
-1737  ", pread=" + pread + ", 
verifyChecksum=" + verifyChecksum + ", cachedHeader=" +
-1738  headerBuf + ", 
onDiskSizeWithHeader=" + onDiskSizeWithHeader);
-1739  }
-1740  // This is NOT same as 
verifyChecksum. This latter is whether to do hbase
-1741  // checksums. Can change with 
circumstances. The below flag is whether the
-1742  // file has support for checksums 
(version 2+).
-1743  boolean checksumSupport = 
this.fileContext.isUseHBaseChecksum();
-1744  long startTime = 
System.currentTimeMillis();
-1745  if (onDiskSizeWithHeader <= 0) 
{
-1746// We were not passed the block 
size. Need to get it from the header. If header was
-1747// not cached (see 
getCachedHeader above), need to seek to pull it in. This is costly
-1748// and should happen very 
rarely. Currently happens on open of a hfile reader where we
-1749// read the trailer blocks to 
pull in the indices. Otherwise, we are reading block sizes
-1750// out of the hfile index. To 
check, enable TRACE in this file and you'll get an exception
-1751// in a LOG every time we seek. 
See HBASE-17072 for more detail.
-1752if (headerBuf == null) {
-1753  if (LOG.isTraceEnabled()) {
-1754LOG.trace("Extra see to get 
block size!", new RuntimeException());
-1755  }
-1756  headerBuf = 
ByteBuffer.allocate(hdrSize);
-1757  readAtOffset(is, 
headerBuf.array(), headerBuf.arrayOffset(), hdrSize, false,
-1758  offset, pread);
-1759}
-1760onDiskSizeWithHeader = 
getOnDiskSizeWithHeader(headerBuf, checksumSupport);
-1761  }
-1762  int preReadHeaderSize = headerBuf 
== null? 0 : hdrSize;
-1763  // Allocate enough space to fit 
the next block's header too; saves a seek next time through.
-1764  // onDiskBlock is whole block + 
header + checksums then extra hdrSize to read next header;
-1765  // onDiskSizeWithHeader is header, 
body, and any checksums if present. preReadHeaderSize
-1766  // says where to start reading. If 
we have the header cached, then we don't need to read
-1767  // it again and we can likely read 
from last place we left off w/o need to backup and reread
-1768  // the header we read last time 
through here.
-1769  // TODO: Make this 
ByteBuffer-based. Will make it easier to go to HDFS with BBPool (offheap).
-1770  byte [] onDiskBlock = new 
byte[onDiskSizeWithHeader + hdrSize];
-1771  int nextBlockOnDiskSize = 
readAtOffset(is, onDiskBlock, preReadHeaderSize,
-1772  onDiskSizeWithHeader - 
preReadHeaderSize, true, offset + preReadHeaderSize, pread);
-1773  if (headerBuf != null) {
-1774// The header has been read when 
reading the previous block OR in a distinct header-only
-1775// read. Copy to this block's 
header.
-1776
System.arraycopy(headerBuf.array(), headerBuf.arrayOffset(), onDiskBlock, 0, 
hdrSize);
-1777  } else {
-1778headerBuf = 
ByteBuffer.wrap(onDiskBlock, 0, hdrSize);
-1779  }
-1780  // Do a few checks before we go 
instantiate HFileBlock.
-1781  assert onDiskSizeWithHeader > 
this.hdrSize;
-1782  
verifyOnDiskSizeMatchesHeader(onDiskSizeWithHeader, headerBuf, offset, 
checksumSupport);
-1783  ByteBuffer onDiskBlockByteBuffer = 
ByteBuffer.wrap(onDiskBlock, 0, onDiskSizeWithHeader);
-1784  // Verify checksum of the data 
before using it for building HFileBlock.
-1785  if (verifyChecksum &&
-1786  !validateChecksum(offset, 
onDiskBlockByteBuffer, hdrSize)) {
-1787return null;
-1788  }
-1789  long duration = 
System.currentTimeMillis() - startTime;
-1790  if (updateMetrics) {
-1791
HFile.updateReadLatency(duration, pread);
-1792  }
-1793  // The onDiskBlock will become the 
headerAndDataBuffer for this block.
-1794  // If 
nextBlockOnDiskSizeWithHeader is not zero, the onDiskBlock already
-1795  // contains the header of next 
block, so no 

[18/26] hbase-site git commit: Published site at 219625233c1e8ad9daf2c35bc2e3a0844e1b97ba.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/21347dff/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreCompactionStrategy.Action.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreCompactionStrategy.Action.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreCompactionStrategy.Action.html
index 3d9a087..b48d11e 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreCompactionStrategy.Action.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreCompactionStrategy.Action.html
@@ -287,7 +287,7 @@ the order they are declared.
 
 
 values
-public static MemStoreCompactionStrategy.Action[] values()
+public static MemStoreCompactionStrategy.Action[] values()
 Returns an array containing the constants of this enum 
type, in
 the order they are declared.  This method may be used to iterate
 over the constants as follows:
@@ -307,7 +307,7 @@ for (MemStoreCompactionStrategy.Action c : 
MemStoreCompactionStrategy.Action.val
 
 
 valueOf
-public static MemStoreCompactionStrategy.Action valueOf(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String name)
+public static MemStoreCompactionStrategy.Action valueOf(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String name)
 Returns the enum constant of this type with the specified 
name.
 The string must match exactly an identifier used to declare an
 enum constant in this type.  (Extraneous whitespace characters are 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/21347dff/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreCompactionStrategy.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreCompactionStrategy.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreCompactionStrategy.html
index 371bde3..a5c2872 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreCompactionStrategy.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreCompactionStrategy.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = {"i0":10,"i1":6,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10};
+var methods = 
{"i0":10,"i1":6,"i2":10,"i3":10,"i4":6,"i5":10,"i6":10,"i7":10,"i8":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],4:["t3","Abstract Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -236,15 +236,23 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 getMergingAction() 
 
 
+protected abstract https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
+getName() 
+
+
 void
 resetStats() 
 
-
+
 protected MemStoreCompactionStrategy.Action
 simpleMergeOrFlatten(VersionedSegmentsList versionedList,
 https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in 
java.lang">String strategy) 
 
-
+
+https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
+toString() 
+
+
 void
 updateStats(Segment replacement) 
 
@@ -254,7 +262,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 Methods inherited from class java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
-https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--";
 title="class or interface in java.lang">clone, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-";
 title="class or interface in java.lang">equals, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--";
 title="class or interface in java.lang">finalize, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--";
 title="class or interface in java.lang">getClass, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--";
 title="class or interface in java.lang">hashCode, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notify--";
 title="class or interface in java.lang">notify, https://docs.oracle.com/javase/8/docs/api/ja
 va/lang/Object.html?is-external=true#notifyAll--" title="class or interface in 
java.lang">notifyAll, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#toString--";
 title="class or interface in java.lang">toString, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait--";
 title="class or interfac

[20/26] hbase-site git commit: Published site at 219625233c1e8ad9daf2c35bc2e3a0844e1b97ba.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/21347dff/devapidocs/org/apache/hadoop/hbase/regionserver/CompactingMemStore.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/CompactingMemStore.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/CompactingMemStore.html
index 2a7cd58..ef42115 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/CompactingMemStore.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/CompactingMemStore.html
@@ -697,7 +697,7 @@ extends 
 
 createMemStoreCompactor
-protected MemStoreCompactor createMemStoreCompactor(MemoryCompactionPolicy compactionPolicy)
+protected MemStoreCompactor createMemStoreCompactor(MemoryCompactionPolicy compactionPolicy)
  throws IllegalArgumentIOException
 
 Throws:
@@ -711,7 +711,7 @@ extends 
 
 initInmemoryFlushSize
-private void initInmemoryFlushSize(org.apache.hadoop.conf.Configuration conf)
+private void initInmemoryFlushSize(org.apache.hadoop.conf.Configuration conf)
 
 
 
@@ -720,7 +720,7 @@ extends 
 
 size
-public MemStoreSize size()
+public MemStoreSize size()
 
 Returns:
 Total memory occupied by this MemStore. This won't include any size 
occupied by the
@@ -736,7 +736,7 @@ extends 
 
 preFlushSeqIDEstimation
-public long preFlushSeqIDEstimation()
+public long preFlushSeqIDEstimation()
 This method is called before the flush is executed.
 
 Returns:
@@ -751,7 +751,7 @@ extends 
 
 isSloppy
-public boolean isSloppy()
+public boolean isSloppy()
 
 
 
@@ -760,7 +760,7 @@ extends 
 
 snapshot
-public MemStoreSnapshot snapshot()
+public MemStoreSnapshot snapshot()
 Push the current active memstore segment into the pipeline
  and create a snapshot of the tail of current compaction pipeline
  Snapshot must be cleared by call to AbstractMemStore.clearSnapshot(long).
@@ -777,7 +777,7 @@ extends 
 
 getFlushableSize
-public MemStoreSize getFlushableSize()
+public MemStoreSize getFlushableSize()
 On flush, how much memory we will clear.
 
 Returns:
@@ -791,7 +791,7 @@ extends 
 
 keySize
-protected long keySize()
+protected long keySize()
 
 Specified by:
 keySize in
 class AbstractMemStore
@@ -806,7 +806,7 @@ extends 
 
 heapSize
-protected long heapSize()
+protected long heapSize()
 
 Specified by:
 heapSize in
 class AbstractMemStore
@@ -822,7 +822,7 @@ extends 
 
 updateLowestUnflushedSequenceIdInWAL
-public void updateLowestUnflushedSequenceIdInWAL(boolean onlyIfGreater)
+public void updateLowestUnflushedSequenceIdInWAL(boolean onlyIfGreater)
 Description copied from 
class: AbstractMemStore
 Updates the wal with the lowest sequence id (oldest entry) 
that is still in memory
 
@@ -840,7 +840,7 @@ extends 
 
 startReplayingFromWAL
-public void startReplayingFromWAL()
+public void startReplayingFromWAL()
 This message intends to inform the MemStore that next 
coming updates
  are going to be part of the replaying edits from WAL
 
@@ -851,7 +851,7 @@ extends 
 
 stopReplayingFromWAL
-public void stopReplayingFromWAL()
+public void stopReplayingFromWAL()
 This message intends to inform the MemStore that the 
replaying edits from WAL
  are done
 
@@ -862,7 +862,7 @@ extends 
 
 getSegments
-protected https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List getSegments()
+protected https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List getSegments()
 
 Specified by:
 getSegments in
 class AbstractMemStore
@@ -877,7 +877,7 @@ extends 
 
 setCompositeSnapshot
-public void setCompositeSnapshot(boolean useCompositeSnapshot)
+public void setCompositeSnapshot(boolean useCompositeSnapshot)
 
 
 
@@ -886,7 +886,7 @@ extends 
 
 swapCompactedSegments
-public boolean swapCompactedSegments(VersionedSegmentsList versionedList,
+public boolean swapCompactedSegments(VersionedSegmentsList versionedList,
  ImmutableSegment result,
  boolean merge)
 
@@ -897,7 +897,7 @@ extends 
 
 flattenOneSegment
-public void flattenOneSegment(long requesterVersion,
+public void flattenOneSegment(long requesterVersion,
   MemStoreCompactionStrategy.Action action)
 
 Parameters:
@@ -913,7 +913,7 @@ extends 
 
 setIndexType
-void setIndexType(CompactingMemStore.IndexType type)
+void setIndexType(CompactingMemStore.IndexType type)
 
 
 
@@ -922,7 +922,7 @@ extends 
 
 getIndexType
-public CompactingMemStore.IndexType getIndexType()
+public CompactingMemStore.IndexType getIndexType()
 
 
 
@@ -931,7 +931,7 @@ extends 
 
 hasImmutableSegments
-public boolean hasImmutableSegments()
+public boolean hasImmutableSegments()
 
 
 
@@ -940,7 +940,7 @@ extends 
 
 getImmutableSegments
-public VersionedSegmentsList getImmutableSegments()
+public VersionedSegmentsList getImmutableSegme

[17/26] hbase-site git commit: Published site at 219625233c1e8ad9daf2c35bc2e3a0844e1b97ba.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/21347dff/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.BlockIterator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.BlockIterator.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.BlockIterator.html
index 12d10e1..97ceefd 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.BlockIterator.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.BlockIterator.html
@@ -1740,384 +1740,380 @@
 1732  // and will save us having to seek 
the stream backwards to reread the header we
 1733  // read the last time through 
here.
 1734  ByteBuffer headerBuf = 
getCachedHeader(offset);
-1735  if (LOG.isTraceEnabled()) {
-1736LOG.trace("Reading " + 
this.fileContext.getHFileName() + " at offset=" + offset +
-1737  ", pread=" + pread + ", 
verifyChecksum=" + verifyChecksum + ", cachedHeader=" +
-1738  headerBuf + ", 
onDiskSizeWithHeader=" + onDiskSizeWithHeader);
-1739  }
-1740  // This is NOT same as 
verifyChecksum. This latter is whether to do hbase
-1741  // checksums. Can change with 
circumstances. The below flag is whether the
-1742  // file has support for checksums 
(version 2+).
-1743  boolean checksumSupport = 
this.fileContext.isUseHBaseChecksum();
-1744  long startTime = 
System.currentTimeMillis();
-1745  if (onDiskSizeWithHeader <= 0) 
{
-1746// We were not passed the block 
size. Need to get it from the header. If header was
-1747// not cached (see 
getCachedHeader above), need to seek to pull it in. This is costly
-1748// and should happen very 
rarely. Currently happens on open of a hfile reader where we
-1749// read the trailer blocks to 
pull in the indices. Otherwise, we are reading block sizes
-1750// out of the hfile index. To 
check, enable TRACE in this file and you'll get an exception
-1751// in a LOG every time we seek. 
See HBASE-17072 for more detail.
-1752if (headerBuf == null) {
-1753  if (LOG.isTraceEnabled()) {
-1754LOG.trace("Extra see to get 
block size!", new RuntimeException());
-1755  }
-1756  headerBuf = 
ByteBuffer.allocate(hdrSize);
-1757  readAtOffset(is, 
headerBuf.array(), headerBuf.arrayOffset(), hdrSize, false,
-1758  offset, pread);
-1759}
-1760onDiskSizeWithHeader = 
getOnDiskSizeWithHeader(headerBuf, checksumSupport);
-1761  }
-1762  int preReadHeaderSize = headerBuf 
== null? 0 : hdrSize;
-1763  // Allocate enough space to fit 
the next block's header too; saves a seek next time through.
-1764  // onDiskBlock is whole block + 
header + checksums then extra hdrSize to read next header;
-1765  // onDiskSizeWithHeader is header, 
body, and any checksums if present. preReadHeaderSize
-1766  // says where to start reading. If 
we have the header cached, then we don't need to read
-1767  // it again and we can likely read 
from last place we left off w/o need to backup and reread
-1768  // the header we read last time 
through here.
-1769  // TODO: Make this 
ByteBuffer-based. Will make it easier to go to HDFS with BBPool (offheap).
-1770  byte [] onDiskBlock = new 
byte[onDiskSizeWithHeader + hdrSize];
-1771  int nextBlockOnDiskSize = 
readAtOffset(is, onDiskBlock, preReadHeaderSize,
-1772  onDiskSizeWithHeader - 
preReadHeaderSize, true, offset + preReadHeaderSize, pread);
-1773  if (headerBuf != null) {
-1774// The header has been read when 
reading the previous block OR in a distinct header-only
-1775// read. Copy to this block's 
header.
-1776
System.arraycopy(headerBuf.array(), headerBuf.arrayOffset(), onDiskBlock, 0, 
hdrSize);
-1777  } else {
-1778headerBuf = 
ByteBuffer.wrap(onDiskBlock, 0, hdrSize);
-1779  }
-1780  // Do a few checks before we go 
instantiate HFileBlock.
-1781  assert onDiskSizeWithHeader > 
this.hdrSize;
-1782  
verifyOnDiskSizeMatchesHeader(onDiskSizeWithHeader, headerBuf, offset, 
checksumSupport);
-1783  ByteBuffer onDiskBlockByteBuffer = 
ByteBuffer.wrap(onDiskBlock, 0, onDiskSizeWithHeader);
-1784  // Verify checksum of the data 
before using it for building HFileBlock.
-1785  if (verifyChecksum &&
-1786  !validateChecksum(offset, 
onDiskBlockByteBuffer, hdrSize)) {
-1787return null;
-1788  }
-1789  long duration = 
System.currentTimeMillis() - startTime;
-1790  if (updateMetrics) {
-1791
HFile.updateReadLatency(duration, pread);
-1792  }
-1793  // The onDiskBlock will become the 
headerAndDataBuffer for this block.
-1794  // If 
nextBlockOnDiskSizeWithHeader is not zero, the onDiskBlock already
-1795  // contains the header of next 
block, s

[12/26] hbase-site git commit: Published site at 219625233c1e8ad9daf2c35bc2e3a0844e1b97ba.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/21347dff/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.PrefetchedHeader.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.PrefetchedHeader.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.PrefetchedHeader.html
index 12d10e1..97ceefd 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.PrefetchedHeader.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.PrefetchedHeader.html
@@ -1740,384 +1740,380 @@
 1732  // and will save us having to seek 
the stream backwards to reread the header we
 1733  // read the last time through 
here.
 1734  ByteBuffer headerBuf = 
getCachedHeader(offset);
-1735  if (LOG.isTraceEnabled()) {
-1736LOG.trace("Reading " + 
this.fileContext.getHFileName() + " at offset=" + offset +
-1737  ", pread=" + pread + ", 
verifyChecksum=" + verifyChecksum + ", cachedHeader=" +
-1738  headerBuf + ", 
onDiskSizeWithHeader=" + onDiskSizeWithHeader);
-1739  }
-1740  // This is NOT same as 
verifyChecksum. This latter is whether to do hbase
-1741  // checksums. Can change with 
circumstances. The below flag is whether the
-1742  // file has support for checksums 
(version 2+).
-1743  boolean checksumSupport = 
this.fileContext.isUseHBaseChecksum();
-1744  long startTime = 
System.currentTimeMillis();
-1745  if (onDiskSizeWithHeader <= 0) 
{
-1746// We were not passed the block 
size. Need to get it from the header. If header was
-1747// not cached (see 
getCachedHeader above), need to seek to pull it in. This is costly
-1748// and should happen very 
rarely. Currently happens on open of a hfile reader where we
-1749// read the trailer blocks to 
pull in the indices. Otherwise, we are reading block sizes
-1750// out of the hfile index. To 
check, enable TRACE in this file and you'll get an exception
-1751// in a LOG every time we seek. 
See HBASE-17072 for more detail.
-1752if (headerBuf == null) {
-1753  if (LOG.isTraceEnabled()) {
-1754LOG.trace("Extra see to get 
block size!", new RuntimeException());
-1755  }
-1756  headerBuf = 
ByteBuffer.allocate(hdrSize);
-1757  readAtOffset(is, 
headerBuf.array(), headerBuf.arrayOffset(), hdrSize, false,
-1758  offset, pread);
-1759}
-1760onDiskSizeWithHeader = 
getOnDiskSizeWithHeader(headerBuf, checksumSupport);
-1761  }
-1762  int preReadHeaderSize = headerBuf 
== null? 0 : hdrSize;
-1763  // Allocate enough space to fit 
the next block's header too; saves a seek next time through.
-1764  // onDiskBlock is whole block + 
header + checksums then extra hdrSize to read next header;
-1765  // onDiskSizeWithHeader is header, 
body, and any checksums if present. preReadHeaderSize
-1766  // says where to start reading. If 
we have the header cached, then we don't need to read
-1767  // it again and we can likely read 
from last place we left off w/o need to backup and reread
-1768  // the header we read last time 
through here.
-1769  // TODO: Make this 
ByteBuffer-based. Will make it easier to go to HDFS with BBPool (offheap).
-1770  byte [] onDiskBlock = new 
byte[onDiskSizeWithHeader + hdrSize];
-1771  int nextBlockOnDiskSize = 
readAtOffset(is, onDiskBlock, preReadHeaderSize,
-1772  onDiskSizeWithHeader - 
preReadHeaderSize, true, offset + preReadHeaderSize, pread);
-1773  if (headerBuf != null) {
-1774// The header has been read when 
reading the previous block OR in a distinct header-only
-1775// read. Copy to this block's 
header.
-1776
System.arraycopy(headerBuf.array(), headerBuf.arrayOffset(), onDiskBlock, 0, 
hdrSize);
-1777  } else {
-1778headerBuf = 
ByteBuffer.wrap(onDiskBlock, 0, hdrSize);
-1779  }
-1780  // Do a few checks before we go 
instantiate HFileBlock.
-1781  assert onDiskSizeWithHeader > 
this.hdrSize;
-1782  
verifyOnDiskSizeMatchesHeader(onDiskSizeWithHeader, headerBuf, offset, 
checksumSupport);
-1783  ByteBuffer onDiskBlockByteBuffer = 
ByteBuffer.wrap(onDiskBlock, 0, onDiskSizeWithHeader);
-1784  // Verify checksum of the data 
before using it for building HFileBlock.
-1785  if (verifyChecksum &&
-1786  !validateChecksum(offset, 
onDiskBlockByteBuffer, hdrSize)) {
-1787return null;
-1788  }
-1789  long duration = 
System.currentTimeMillis() - startTime;
-1790  if (updateMetrics) {
-1791
HFile.updateReadLatency(duration, pread);
-1792  }
-1793  // The onDiskBlock will become the 
headerAndDataBuffer for this block.
-1794  // If 
nextBlockOnDiskSizeWithHeader is not zero, the onDiskBlock already
-1795  // contains the header of

[25/26] hbase-site git commit: Published site at 219625233c1e8ad9daf2c35bc2e3a0844e1b97ba.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/21347dff/apache_hbase_reference_guide.pdf
--
diff --git a/apache_hbase_reference_guide.pdf b/apache_hbase_reference_guide.pdf
index 1391d55..4b723bf 100644
--- a/apache_hbase_reference_guide.pdf
+++ b/apache_hbase_reference_guide.pdf
@@ -5,16 +5,16 @@
 /Author (Apache HBase Team)
 /Creator (Asciidoctor PDF 1.5.0.alpha.15, based on Prawn 2.2.2)
 /Producer (Apache HBase Team)
-/ModDate (D:20180401144554+00'00')
-/CreationDate (D:20180401144554+00'00')
+/ModDate (D:20180403144547+00'00')
+/CreationDate (D:20180403144547+00'00')
 >>
 endobj
 2 0 obj
 << /Type /Catalog
 /Pages 3 0 R
 /Names 26 0 R
-/Outlines 4513 0 R
-/PageLabels 4736 0 R
+/Outlines 4533 0 R
+/PageLabels 4759 0 R
 /PageMode /UseOutlines
 /OpenAction [7 0 R /FitH 842.89]
 /ViewerPreferences << /DisplayDocTitle true
@@ -23,8 +23,8 @@ endobj
 endobj
 3 0 obj
 << /Type /Pages
-/Count 706
-/Kids [7 0 R 12 0 R 14 0 R 16 0 R 18 0 R 20 0 R 22 0 R 24 0 R 44 0 R 47 0 R 50 
0 R 54 0 R 61 0 R 65 0 R 67 0 R 69 0 R 76 0 R 79 0 R 81 0 R 87 0 R 90 0 R 92 0 
R 94 0 R 101 0 R 107 0 R 112 0 R 114 0 R 130 0 R 135 0 R 142 0 R 151 0 R 159 0 
R 168 0 R 179 0 R 183 0 R 185 0 R 189 0 R 198 0 R 207 0 R 215 0 R 224 0 R 229 0 
R 238 0 R 246 0 R 255 0 R 268 0 R 275 0 R 285 0 R 293 0 R 301 0 R 308 0 R 316 0 
R 322 0 R 328 0 R 335 0 R 343 0 R 354 0 R 363 0 R 375 0 R 383 0 R 391 0 R 398 0 
R 407 0 R 415 0 R 425 0 R 433 0 R 440 0 R 449 0 R 461 0 R 470 0 R 477 0 R 485 0 
R 493 0 R 502 0 R 509 0 R 514 0 R 518 0 R 523 0 R 527 0 R 543 0 R 554 0 R 558 0 
R 573 0 R 578 0 R 583 0 R 585 0 R 587 0 R 590 0 R 592 0 R 594 0 R 602 0 R 608 0 
R 613 0 R 618 0 R 625 0 R 635 0 R 643 0 R 647 0 R 651 0 R 653 0 R 664 0 R 674 0 
R 681 0 R 693 0 R 704 0 R 713 0 R 721 0 R 727 0 R 730 0 R 734 0 R 738 0 R 741 0 
R 744 0 R 746 0 R 749 0 R 754 0 R 756 0 R 761 0 R 765 0 R 770 0 R 774 0 R 777 0 
R 783 0 R 785 0 R 790 0 R 798 0 R 800 0 
 R 803 0 R 806 0 R 810 0 R 813 0 R 828 0 R 835 0 R 844 0 R 855 0 R 861 0 R 871 
0 R 882 0 R 885 0 R 889 0 R 892 0 R 897 0 R 906 0 R 914 0 R 918 0 R 922 0 R 927 
0 R 931 0 R 933 0 R 948 0 R 959 0 R 964 0 R 970 0 R 973 0 R 981 0 R 989 0 R 994 
0 R 1000 0 R 1005 0 R 1007 0 R 1009 0 R 1011 0 R 1021 0 R 1029 0 R 1033 0 R 
1040 0 R 1047 0 R 1055 0 R 1059 0 R 1065 0 R 1070 0 R 1078 0 R 1082 0 R 1087 0 
R 1089 0 R 1095 0 R 1102 0 R 1104 0 R  0 R 1122 0 R 1126 0 R 1128 0 R 1130 
0 R 1134 0 R 1137 0 R 1142 0 R 1145 0 R 1157 0 R 1161 0 R 1167 0 R 1175 0 R 
1180 0 R 1184 0 R 1188 0 R 1190 0 R 1193 0 R 1196 0 R 1199 0 R 1203 0 R 1207 0 
R 1211 0 R 1216 0 R 1220 0 R 1223 0 R 1225 0 R 1235 0 R 1238 0 R 1246 0 R 1255 
0 R 1261 0 R 1265 0 R 1267 0 R 1277 0 R 1280 0 R 1286 0 R 1295 0 R 1298 0 R 
1305 0 R 1313 0 R 1315 0 R 1317 0 R 1326 0 R 1328 0 R 1330 0 R 1333 0 R 1335 0 
R 1337 0 R 1339 0 R 1341 0 R 1344 0 R 1348 0 R 1353 0 R 1355 0 R 1357 0 R 1359 
0 R 1364 0 R 1372 0 R 1377 0 R 1380 0 R 1382 0 R 1385 0 R
  1389 0 R 1393 0 R 1396 0 R 1398 0 R 1400 0 R 1403 0 R 1409 0 R 1414 0 R 1422 
0 R 1436 0 R 1450 0 R 1454 0 R 1459 0 R 1472 0 R 1477 0 R 1492 0 R 1500 0 R 
1504 0 R 1512 0 R 1527 0 R 1541 0 R 1553 0 R 1558 0 R 1564 0 R 1573 0 R 1579 0 
R 1584 0 R 1592 0 R 1595 0 R 1605 0 R 1611 0 R 1614 0 R 1627 0 R 1629 0 R 1635 
0 R 1639 0 R 1641 0 R 1649 0 R 1657 0 R 1661 0 R 1663 0 R 1665 0 R 1677 0 R 
1683 0 R 1692 0 R 1698 0 R 1712 0 R 1717 0 R 1726 0 R 1734 0 R 1740 0 R 1745 0 
R 1751 0 R 1754 0 R 1757 0 R 1762 0 R 1766 0 R 1773 0 R 1777 0 R 1782 0 R 1791 
0 R 1796 0 R 1801 0 R 1803 0 R 1811 0 R 1818 0 R 1824 0 R 1829 0 R 1833 0 R 
1836 0 R 1841 0 R 1846 0 R 1854 0 R 1856 0 R 1858 0 R 1861 0 R 1869 0 R 1872 0 
R 1879 0 R 1888 0 R 1891 0 R 1896 0 R 1898 0 R 1901 0 R 1904 0 R 1907 0 R 1918 
0 R 1923 0 R 1928 0 R 1930 0 R 1939 0 R 1946 0 R 1954 0 R 1960 0 R 1965 0 R 
1967 0 R 1976 0 R 1985 0 R 1996 0 R 2002 0 R 2009 0 R 2011 0 R 2016 0 R 2018 0 
R 2020 0 R 2023 0 R 2026 0 R 2029 0 R 2034 0 R 2038 0 R 2049 0
  R 2052 0 R 2057 0 R 2060 0 R 2062 0 R 2067 0 R 2077 0 R 2079 0 R 2081 0 R 
2083 0 R 2085 0 R 2088 0 R 2090 0 R 2092 0 R 2095 0 R 2097 0 R 2099 0 R 2104 0 
R 2109 0 R 2118 0 R 2120 0 R 2122 0 R 2129 0 R 2131 0 R 2136 0 R 2138 0 R 2140 
0 R 2147 0 R 2152 0 R 2156 0 R 2160 0 R 2164 0 R 2166 0 R 2168 0 R 2172 0 R 
2175 0 R 2177 0 R 2179 0 R 2183 0 R 2185 0 R 2188 0 R 2190 0 R 2192 0 R 2194 0 
R 2201 0 R 2204 0 R 2209 0 R 2211 0 R 2213 0 R 2215 0 R 2217 0 R 2225 0 R 2236 
0 R 2250 0 R 2261 0 R 2265 0 R 2271 0 R 2275 0 R 2278 0 R 2283 0 R 2288 0 R 
2290 0 R 2293 0 R 2295 0 R 2297 0 R 2299 0 R 2304 0 R 2306 0 R 2319 0 R 2322 0 
R 2330 0 R 2336 0 R 2348 0 R 2362 0 R 2375 0 R 2392 0 R 2396 0 R 2398 0 R 2402 
0 R 2420 0 R 2427 0 R 2439 0 R 2443 0 R 2447 0 R 2456 0 R 2468 0 R 2473 0 R 
2483 0 R 2496 0 R 2516 0 R 2525 0 R 2528 0 R 2537 0 R 2554 0 R 2561 0 R 2564 0 
R 2569 0 R 2573 0 R 2576 0 R 2585 0 R 2593 0 R 2597 0 R 2599 0 R 2603 0 R 2617 
0 R 2626 0 R 2631 0 R 2635 0 R 2638 0

[24/26] hbase-site git commit: Published site at 219625233c1e8ad9daf2c35bc2e3a0844e1b97ba.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/21347dff/apidocs/index-all.html
--
diff --git a/apidocs/index-all.html b/apidocs/index-all.html
index 5a8c6c3..c5562f9 100644
--- a/apidocs/index-all.html
+++ b/apidocs/index-all.html
@@ -3756,8 +3756,8 @@
 
 DEFAULT_MEMSTORE_FLUSH_SIZE
 - Static variable in class org.apache.hadoop.hbase.client.TableDescriptorBuilder
 
-Constant that denotes the maximum default size of the 
memstore after which
- the contents are flushed to the store files
+Constant that denotes the maximum default size of the 
memstore in bytes after which
+ the contents are flushed to the store files.
 
 DEFAULT_MEMSTORE_FLUSH_SIZE
 - Static variable in class org.apache.hadoop.hbase.HTableDescriptor
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/21347dff/apidocs/org/apache/hadoop/hbase/client/TableDescriptorBuilder.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/client/TableDescriptorBuilder.html 
b/apidocs/org/apache/hadoop/hbase/client/TableDescriptorBuilder.html
index e5e2a7a..0bf1b4d 100644
--- a/apidocs/org/apache/hadoop/hbase/client/TableDescriptorBuilder.html
+++ b/apidocs/org/apache/hadoop/hbase/client/TableDescriptorBuilder.html
@@ -143,8 +143,8 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 static long
 DEFAULT_MEMSTORE_FLUSH_SIZE
-Constant that denotes the maximum default size of the 
memstore after which
- the contents are flushed to the store files
+Constant that denotes the maximum default size of the 
memstore in bytes after which
+ the contents are flushed to the store files.
 
 
 
@@ -422,8 +422,8 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 DEFAULT_MEMSTORE_FLUSH_SIZE
 public static final long DEFAULT_MEMSTORE_FLUSH_SIZE
-Constant that denotes the maximum default size of the 
memstore after which
- the contents are flushed to the store files
+Constant that denotes the maximum default size of the 
memstore in bytes after which
+ the contents are flushed to the store files.
 
 See Also:
 Constant
 Field Values

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/21347dff/apidocs/src-html/org/apache/hadoop/hbase/client/TableDescriptorBuilder.html
--
diff --git 
a/apidocs/src-html/org/apache/hadoop/hbase/client/TableDescriptorBuilder.html 
b/apidocs/src-html/org/apache/hadoop/hbase/client/TableDescriptorBuilder.html
index 64a63e6..fc17f3d 100644
--- 
a/apidocs/src-html/org/apache/hadoop/hbase/client/TableDescriptorBuilder.html
+++ 
b/apidocs/src-html/org/apache/hadoop/hbase/client/TableDescriptorBuilder.html
@@ -190,8 +190,8 @@
 182  public static final boolean 
DEFAULT_NORMALIZATION_ENABLED = false;
 183
 184  /**
-185   * Constant that denotes the maximum 
default size of the memstore after which
-186   * the contents are flushed to the 
store files
+185   * Constant that denotes the maximum 
default size of the memstore in bytes after which
+186   * the contents are flushed to the 
store files.
 187   */
 188  public static final long 
DEFAULT_MEMSTORE_FLUSH_SIZE = 1024 * 1024 * 128L;
 189



[06/26] hbase-site git commit: Published site at 219625233c1e8ad9daf2c35bc2e3a0844e1b97ba.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/21347dff/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactingMemStore.IndexType.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactingMemStore.IndexType.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactingMemStore.IndexType.html
index 3dbdec3..22e7059 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactingMemStore.IndexType.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactingMemStore.IndexType.html
@@ -127,468 +127,470 @@
 119// initialization of the flush size 
should happen after initialization of the index type
 120// so do not transfer the following 
method
 121initInmemoryFlushSize(conf);
-122  }
-123
-124  @VisibleForTesting
-125  protected MemStoreCompactor 
createMemStoreCompactor(MemoryCompactionPolicy compactionPolicy)
-126  throws IllegalArgumentIOException 
{
-127return new MemStoreCompactor(this, 
compactionPolicy);
-128  }
-129
-130  private void 
initInmemoryFlushSize(Configuration conf) {
-131double factor = 0;
-132long memstoreFlushSize = 
getRegionServices().getMemStoreFlushSize();
-133int numStores = 
getRegionServices().getNumStores();
-134if (numStores <= 1) {
-135  // Family number might also be zero 
in some of our unit test case
-136  numStores = 1;
-137}
-138inmemoryFlushSize = memstoreFlushSize 
/ numStores;
-139// multiply by a factor (the same 
factor for all index types)
-140factor = 
conf.getDouble(IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY,
-141  
IN_MEMORY_FLUSH_THRESHOLD_FACTOR_DEFAULT);
-142
-143inmemoryFlushSize = (long) 
(inmemoryFlushSize * factor);
-144LOG.info("Setting in-memory flush 
size threshold to {} and immutable segments index to type={}",
-145
StringUtils.byteDesc(inmemoryFlushSize), indexType);
-146  }
-147
-148  /**
-149   * @return Total memory occupied by 
this MemStore. This won't include any size occupied by the
-150   * snapshot. We assume the 
snapshot will get cleared soon. This is not thread safe and
-151   * the memstore may be changed 
while computing its size. It is the responsibility of the
-152   * caller to make sure this 
doesn't happen.
-153   */
-154  @Override
-155  public MemStoreSize size() {
-156MemStoreSizing memstoreSizing = new 
MemStoreSizing();
-157
memstoreSizing.incMemStoreSize(active.getMemStoreSize());
-158for (Segment item : 
pipeline.getSegments()) {
-159  
memstoreSizing.incMemStoreSize(item.getMemStoreSize());
-160}
-161return memstoreSizing;
-162  }
-163
-164  /**
-165   * This method is called before the 
flush is executed.
-166   * @return an estimation (lower bound) 
of the unflushed sequence id in memstore after the flush
-167   * is executed. if memstore will be 
cleared returns {@code HConstants.NO_SEQNUM}.
-168   */
-169  @Override
-170  public long preFlushSeqIDEstimation() 
{
-171if(compositeSnapshot) {
-172  return HConstants.NO_SEQNUM;
-173}
-174Segment segment = getLastSegment();
-175if(segment == null) {
-176  return HConstants.NO_SEQNUM;
-177}
-178return segment.getMinSequenceId();
-179  }
-180
-181  @Override
-182  public boolean isSloppy() {
-183return true;
-184  }
-185
-186  /**
-187   * Push the current active memstore 
segment into the pipeline
-188   * and create a snapshot of the tail of 
current compaction pipeline
-189   * Snapshot must be cleared by call to 
{@link #clearSnapshot}.
-190   * {@link #clearSnapshot(long)}.
-191   * @return {@link MemStoreSnapshot}
-192   */
-193  @Override
-194  public MemStoreSnapshot snapshot() {
-195// If snapshot currently has entries, 
then flusher failed or didn't call
-196// cleanup.  Log a warning.
-197if (!this.snapshot.isEmpty()) {
-198  LOG.warn("Snapshot called again 
without clearing previous. " +
-199  "Doing nothing. Another ongoing 
flush or did we fail last attempt?");
-200} else {
-201  LOG.debug("FLUSHING TO DISK {}, 
store={}",
-202
getRegionServices().getRegionInfo().getEncodedName(), getFamilyName());
-203  stopCompaction();
-204  
pushActiveToPipeline(this.active);
-205  snapshotId = 
EnvironmentEdgeManager.currentTime();
-206  // in both cases whatever is pushed 
to snapshot is cleared from the pipeline
-207  if (compositeSnapshot) {
-208pushPipelineToSnapshot();
-209  } else {
-210pushTailToSnapshot();
-211  }
-212  compactor.resetStats();
-213}
-214return new 
MemStoreSnapshot(snapshotId, this.snapshot);
-215  }
-216
-217  /**
-218   * On flush, how much memory we will 
clear.
-219   * @return size of data that is going 
to be flushed
-220   */
-221  @Override
-222  public MemStoreSize getFlushableSize() 
{
-223MemStoreSizing snapshotSi

[04/26] hbase-site git commit: Published site at 219625233c1e8ad9daf2c35bc2e3a0844e1b97ba.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/21347dff/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/EagerMemStoreCompactionStrategy.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/EagerMemStoreCompactionStrategy.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/EagerMemStoreCompactionStrategy.html
index 6ab9121..8d0867b 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/EagerMemStoreCompactionStrategy.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/EagerMemStoreCompactionStrategy.html
@@ -31,17 +31,22 @@
 023
 024@InterfaceAudience.Private
 025public class 
EagerMemStoreCompactionStrategy extends MemStoreCompactionStrategy{
-026
-027  private static final String name = 
"EAGER";
+026  private static final String NAME = 
"EAGER";
+027
 028  public 
EagerMemStoreCompactionStrategy(Configuration conf, String cfName) {
 029super(conf, cfName);
 030  }
 031
 032  @Override
 033  public Action 
getAction(VersionedSegmentsList versionedList) {
-034return compact(versionedList, 
name);
+034return compact(versionedList, 
getName());
 035  }
-036}
+036
+037  @Override
+038  protected String getName() {
+039return NAME;
+040  }
+041}
 
 
 



[16/26] hbase-site git commit: Published site at 219625233c1e8ad9daf2c35bc2e3a0844e1b97ba.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/21347dff/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.BlockWritable.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.BlockWritable.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.BlockWritable.html
index 12d10e1..97ceefd 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.BlockWritable.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.BlockWritable.html
@@ -1740,384 +1740,380 @@
 1732  // and will save us having to seek 
the stream backwards to reread the header we
 1733  // read the last time through 
here.
 1734  ByteBuffer headerBuf = 
getCachedHeader(offset);
-1735  if (LOG.isTraceEnabled()) {
-1736LOG.trace("Reading " + 
this.fileContext.getHFileName() + " at offset=" + offset +
-1737  ", pread=" + pread + ", 
verifyChecksum=" + verifyChecksum + ", cachedHeader=" +
-1738  headerBuf + ", 
onDiskSizeWithHeader=" + onDiskSizeWithHeader);
-1739  }
-1740  // This is NOT same as 
verifyChecksum. This latter is whether to do hbase
-1741  // checksums. Can change with 
circumstances. The below flag is whether the
-1742  // file has support for checksums 
(version 2+).
-1743  boolean checksumSupport = 
this.fileContext.isUseHBaseChecksum();
-1744  long startTime = 
System.currentTimeMillis();
-1745  if (onDiskSizeWithHeader <= 0) 
{
-1746// We were not passed the block 
size. Need to get it from the header. If header was
-1747// not cached (see 
getCachedHeader above), need to seek to pull it in. This is costly
-1748// and should happen very 
rarely. Currently happens on open of a hfile reader where we
-1749// read the trailer blocks to 
pull in the indices. Otherwise, we are reading block sizes
-1750// out of the hfile index. To 
check, enable TRACE in this file and you'll get an exception
-1751// in a LOG every time we seek. 
See HBASE-17072 for more detail.
-1752if (headerBuf == null) {
-1753  if (LOG.isTraceEnabled()) {
-1754LOG.trace("Extra see to get 
block size!", new RuntimeException());
-1755  }
-1756  headerBuf = 
ByteBuffer.allocate(hdrSize);
-1757  readAtOffset(is, 
headerBuf.array(), headerBuf.arrayOffset(), hdrSize, false,
-1758  offset, pread);
-1759}
-1760onDiskSizeWithHeader = 
getOnDiskSizeWithHeader(headerBuf, checksumSupport);
-1761  }
-1762  int preReadHeaderSize = headerBuf 
== null? 0 : hdrSize;
-1763  // Allocate enough space to fit 
the next block's header too; saves a seek next time through.
-1764  // onDiskBlock is whole block + 
header + checksums then extra hdrSize to read next header;
-1765  // onDiskSizeWithHeader is header, 
body, and any checksums if present. preReadHeaderSize
-1766  // says where to start reading. If 
we have the header cached, then we don't need to read
-1767  // it again and we can likely read 
from last place we left off w/o need to backup and reread
-1768  // the header we read last time 
through here.
-1769  // TODO: Make this 
ByteBuffer-based. Will make it easier to go to HDFS with BBPool (offheap).
-1770  byte [] onDiskBlock = new 
byte[onDiskSizeWithHeader + hdrSize];
-1771  int nextBlockOnDiskSize = 
readAtOffset(is, onDiskBlock, preReadHeaderSize,
-1772  onDiskSizeWithHeader - 
preReadHeaderSize, true, offset + preReadHeaderSize, pread);
-1773  if (headerBuf != null) {
-1774// The header has been read when 
reading the previous block OR in a distinct header-only
-1775// read. Copy to this block's 
header.
-1776
System.arraycopy(headerBuf.array(), headerBuf.arrayOffset(), onDiskBlock, 0, 
hdrSize);
-1777  } else {
-1778headerBuf = 
ByteBuffer.wrap(onDiskBlock, 0, hdrSize);
-1779  }
-1780  // Do a few checks before we go 
instantiate HFileBlock.
-1781  assert onDiskSizeWithHeader > 
this.hdrSize;
-1782  
verifyOnDiskSizeMatchesHeader(onDiskSizeWithHeader, headerBuf, offset, 
checksumSupport);
-1783  ByteBuffer onDiskBlockByteBuffer = 
ByteBuffer.wrap(onDiskBlock, 0, onDiskSizeWithHeader);
-1784  // Verify checksum of the data 
before using it for building HFileBlock.
-1785  if (verifyChecksum &&
-1786  !validateChecksum(offset, 
onDiskBlockByteBuffer, hdrSize)) {
-1787return null;
-1788  }
-1789  long duration = 
System.currentTimeMillis() - startTime;
-1790  if (updateMetrics) {
-1791
HFile.updateReadLatency(duration, pread);
-1792  }
-1793  // The onDiskBlock will become the 
headerAndDataBuffer for this block.
-1794  // If 
nextBlockOnDiskSizeWithHeader is not zero, the onDiskBlock already
-1795  // contains the header of next 
block, s

[26/26] hbase-site git commit: Published site at 219625233c1e8ad9daf2c35bc2e3a0844e1b97ba.

Published site at 219625233c1e8ad9daf2c35bc2e3a0844e1b97ba.


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/21347dff
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/21347dff
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/21347dff

Branch: refs/heads/asf-site
Commit: 21347dff7da261edf5b5c805f2d14c24bfd883ee
Parents: e8a9b2e
Author: jenkins 
Authored: Tue Apr 3 14:47:32 2018 +
Committer: jenkins 
Committed: Tue Apr 3 14:47:32 2018 +

--
 acid-semantics.html | 4 +-
 apache_hbase_reference_guide.pdf| 30756 ++---
 apidocs/index-all.html  | 4 +-
 .../hbase/client/TableDescriptorBuilder.html| 8 +-
 .../hbase/client/TableDescriptorBuilder.html| 4 +-
 book.html   |  1405 +-
 bulk-loads.html | 4 +-
 checkstyle-aggregate.html   |   134 +-
 coc.html| 4 +-
 dependencies.html   | 4 +-
 dependency-convergence.html | 4 +-
 dependency-info.html| 4 +-
 dependency-management.html  | 4 +-
 devapidocs/constant-values.html |18 +-
 devapidocs/index-all.html   |28 +-
 .../hadoop/hbase/backup/package-tree.html   | 4 +-
 .../hbase/client/TableDescriptorBuilder.html| 8 +-
 .../hadoop/hbase/client/package-tree.html   |24 +-
 .../hadoop/hbase/executor/package-tree.html | 2 +-
 .../hadoop/hbase/filter/package-tree.html   | 6 +-
 .../hbase/io/hfile/HFileBlock.FSReaderImpl.html |16 +-
 .../hadoop/hbase/io/hfile/HFileBlock.html   |44 +-
 .../hadoop/hbase/io/hfile/package-tree.html | 6 +-
 .../apache/hadoop/hbase/ipc/package-tree.html   | 4 +-
 .../hadoop/hbase/master/package-tree.html   | 6 +-
 .../hbase/master/procedure/package-tree.html| 2 +-
 .../org/apache/hadoop/hbase/package-tree.html   |16 +-
 .../hadoop/hbase/procedure2/package-tree.html   | 2 +-
 .../hadoop/hbase/quotas/package-tree.html   | 8 +-
 .../AdaptiveMemStoreCompactionStrategy.html |63 +-
 .../BasicMemStoreCompactionStrategy.html|39 +-
 ...ompactingMemStore.InMemoryFlushRunnable.html | 6 +-
 .../hbase/regionserver/CompactingMemStore.html  |82 +-
 .../EagerMemStoreCompactionStrategy.html|35 +-
 .../regionserver/HStore.StoreFlusherImpl.html   |34 +-
 .../hadoop/hbase/regionserver/HStore.html   |   532 +-
 .../MemStoreCompactionStrategy.Action.html  | 4 +-
 .../MemStoreCompactionStrategy.html |52 +-
 .../hbase/regionserver/MemStoreCompactor.html   |35 +-
 .../hbase/regionserver/class-use/MemStore.html  |13 +
 .../hadoop/hbase/regionserver/package-tree.html |20 +-
 .../regionserver/querymatcher/package-tree.html | 2 +-
 .../hbase/regionserver/wal/package-tree.html| 2 +-
 .../hbase/security/access/package-tree.html | 2 +-
 .../hadoop/hbase/security/package-tree.html | 2 +-
 .../apache/hadoop/hbase/util/package-tree.html  | 8 +-
 .../apache/hadoop/hbase/wal/package-tree.html   | 2 +-
 .../org/apache/hadoop/hbase/Version.html| 6 +-
 ...riptorBuilder.ModifyableTableDescriptor.html | 4 +-
 .../hbase/client/TableDescriptorBuilder.html| 4 +-
 .../io/hfile/HFileBlock.BlockIterator.html  |   738 +-
 .../io/hfile/HFileBlock.BlockWritable.html  |   738 +-
 .../hbase/io/hfile/HFileBlock.FSReader.html |   738 +-
 .../hbase/io/hfile/HFileBlock.FSReaderImpl.html |   738 +-
 .../hbase/io/hfile/HFileBlock.Header.html   |   738 +-
 .../io/hfile/HFileBlock.PrefetchedHeader.html   |   738 +-
 .../hbase/io/hfile/HFileBlock.Writer.State.html |   738 +-
 .../hbase/io/hfile/HFileBlock.Writer.html   |   738 +-
 .../hadoop/hbase/io/hfile/HFileBlock.html   |   738 +-
 .../AdaptiveMemStoreCompactionStrategy.html |74 +-
 .../BasicMemStoreCompactionStrategy.html|28 +-
 ...ompactingMemStore.InMemoryFlushRunnable.html |   926 +-
 .../CompactingMemStore.IndexType.html   |   926 +-
 .../hbase/regionserver/CompactingMemStore.html  |   926 +-
 .../EagerMemStoreCompactionStrategy.html|13 +-
 .../regionserver/HStore.StoreFlusherImpl.html   |  4961 +--
 .../hadoop/hbase/regionserver/HStore.html   |  4961 +--
 .../MemStoreCompactionStrategy.Action.html  |76 +-
 .../MemStoreCompactionStrategy.html |76 +-
 .../hbase/regionserver/MemStoreCompactor.html   |   345 +-
 export_control.html | 4 +-
 index.html  | 4 +-
 integration.html 

[03/26] hbase-site git commit: Published site at 219625233c1e8ad9daf2c35bc2e3a0844e1b97ba.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/21347dff/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html
index cc35a46..cca21a9 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html
@@ -190,1967 +190,1967 @@
 182
 183  private final boolean 
verifyBulkLoads;
 184
-185  private final AtomicInteger 
currentParallelPutCount = new AtomicInteger(0);
-186  private final int 
parallelPutCountPrintThreshold;
-187
-188  private ScanInfo scanInfo;
-189
-190  // All access must be synchronized.
-191  // TODO: ideally, this should be part 
of storeFileManager, as we keep passing this to it.
-192  private final List 
filesCompacting = Lists.newArrayList();
-193
-194  // All access must be synchronized.
-195  private final 
Set changedReaderObservers =
-196Collections.newSetFromMap(new 
ConcurrentHashMap());
-197
-198  protected final int blocksize;
-199  private HFileDataBlockEncoder 
dataBlockEncoder;
-200
-201  /** Checksum configuration */
-202  protected ChecksumType checksumType;
-203  protected int bytesPerChecksum;
-204
-205  // Comparing KeyValues
-206  protected final CellComparator 
comparator;
-207
-208  final StoreEngine 
storeEngine;
+185  /**
+186   * Use this counter to track concurrent 
puts. If TRACE-log is enabled, if we are over the
+187   * threshold set by 
hbase.region.store.parallel.put.print.threshold (Default is 50) we will
+188   * log a message that identifies the 
Store experience this high-level of concurrency.
+189   */
+190  private final AtomicInteger 
currentParallelPutCount = new AtomicInteger(0);
+191  private final int 
parallelPutCountPrintThreshold;
+192
+193  private ScanInfo scanInfo;
+194
+195  // All access must be synchronized.
+196  // TODO: ideally, this should be part 
of storeFileManager, as we keep passing this to it.
+197  private final List 
filesCompacting = Lists.newArrayList();
+198
+199  // All access must be synchronized.
+200  private final 
Set changedReaderObservers =
+201Collections.newSetFromMap(new 
ConcurrentHashMap());
+202
+203  protected final int blocksize;
+204  private HFileDataBlockEncoder 
dataBlockEncoder;
+205
+206  /** Checksum configuration */
+207  protected ChecksumType checksumType;
+208  protected int bytesPerChecksum;
 209
-210  private static final AtomicBoolean 
offPeakCompactionTracker = new AtomicBoolean();
-211  private volatile OffPeakHours 
offPeakHours;
+210  // Comparing KeyValues
+211  protected final CellComparator 
comparator;
 212
-213  private static final int 
DEFAULT_FLUSH_RETRIES_NUMBER = 10;
-214  private int flushRetriesNumber;
-215  private int pauseTime;
-216
-217  private long blockingFileCount;
-218  private int 
compactionCheckMultiplier;
-219  protected Encryption.Context 
cryptoContext = Encryption.Context.NONE;
-220
-221  private AtomicLong flushedCellsCount = 
new AtomicLong();
-222  private AtomicLong compactedCellsCount 
= new AtomicLong();
-223  private AtomicLong 
majorCompactedCellsCount = new AtomicLong();
-224  private AtomicLong flushedCellsSize = 
new AtomicLong();
-225  private AtomicLong 
flushedOutputFileSize = new AtomicLong();
-226  private AtomicLong compactedCellsSize = 
new AtomicLong();
-227  private AtomicLong 
majorCompactedCellsSize = new AtomicLong();
-228
-229  /**
-230   * Constructor
-231   * @param region
-232   * @param family HColumnDescriptor for 
this column
-233   * @param confParam configuration 
object
-234   * failed.  Can be null.
-235   * @throws IOException
-236   */
-237  protected HStore(final HRegion region, 
final ColumnFamilyDescriptor family,
-238  final Configuration confParam) 
throws IOException {
-239
-240this.fs = 
region.getRegionFileSystem();
-241
-242// Assemble the store's home 
directory and Ensure it exists.
-243
fs.createStoreDir(family.getNameAsString());
-244this.region = region;
-245this.family = family;
-246// 'conf' renamed to 'confParam' b/c 
we use this.conf in the constructor
-247// CompoundConfiguration will look 
for keys in reverse order of addition, so we'd
-248// add global config first, then 
table and cf overrides, then cf metadata.
-249this.conf = new 
CompoundConfiguration()
-250  .add(confParam)
-251  
.addBytesMap(region.getTableDescriptor().getValues())
-252  
.addStringMap(family.getConfiguration())
-253  .addBytesMap(family.getValues());
-254this.blocksize = 
family.getBlocksize();
-255
-

[23/26] hbase-site git commit: Published site at 219625233c1e8ad9daf2c35bc2e3a0844e1b97ba.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/21347dff/book.html
--
diff --git a/book.html b/book.html
index cbb..233b28d 100644
--- a/book.html
+++ b/book.html
@@ -138,166 +138,172 @@
 75. Storing Medium-sized Objects (MOB)
 
 
+In-memory Compaction
+
+76. Overview
+77. Enabling
+
+
 Backup and Restore
 
-76. Overview
-77. Terminology
-78. Planning
-79. First-time configuration steps
-80. Backup and Restore 
commands
-81. Administration of Backup Images
-82. Configuration keys
-83. Best Practices
-84. Scenario: Safeguarding Application 
Datasets on Amazon S3
-85. Security of Backup Data
-86. Technical Details of Incremental 
Backup and Restore
-87. A Warning on File System 
Growth
-88. Capacity Planning
-89. Limitations of the Backup and Restore 
Utility
+78. Overview
+79. Terminology
+80. Planning
+81. First-time configuration steps
+82. Backup and Restore 
commands
+83. Administration of Backup Images
+84. Configuration keys
+85. Best Practices
+86. Scenario: Safeguarding Application 
Datasets on Amazon S3
+87. Security of Backup Data
+88. Technical Details of Incremental 
Backup and Restore
+89. A Warning on File System 
Growth
+90. Capacity Planning
+91. Limitations of the Backup and Restore 
Utility
 
 
 Apache HBase APIs
 
-90. Examples
+92. Examples
 
 
 Apache HBase External APIs
 
-91. REST
-92. Thrift
-93. C/C++ Apache HBase Client
-94. Using Java Data Objects (JDO) with HBase
-95. Scala
-96. Jython
+93. REST
+94. Thrift
+95. C/C++ Apache HBase Client
+96. Using Java Data Objects (JDO) with HBase
+97. Scala
+98. Jython
 
 
 Thrift API and Filter Language
 
-97. Filter Language
+99. Filter Language
 
 
 HBase and Spark
 
-98. Basic Spark
-99. Spark Streaming
-100. Bulk Load
-101. SparkSQL/DataFrames
+100. Basic Spark
+101. Spark Streaming
+102. Bulk Load
+103. SparkSQL/DataFrames
 
 
 Apache HBase Coprocessors
 
-102. Coprocessor Overview
-103. Types of Coprocessors
-104. Loading Coprocessors
-105. Examples
-106. Guidelines For 
Deploying A Coprocessor
-107. Restricting Coprocessor 
Usage
+104. Coprocessor Overview
+105. Types of Coprocessors
+106. Loading Coprocessors
+107. Examples
+108. Guidelines For 
Deploying A Coprocessor
+109. Restricting Coprocessor 
Usage
 
 
 Apache HBase Performance Tuning
 
-108. Operating System
-109. Network
-110. Java
-111. HBase Configurations
-112. ZooKeeper
-113. Schema Design
-114. HBase General Patterns
-115. Writing to HBase
-116. Reading from HBase
-117. Deleting from HBase
-118. HDFS
-119. Amazon EC2
-120. Collocating HBase and 
MapReduce
-121. Case Studies
+110. Operating System
+111. Network
+112. Java
+113. HBase Configurations
+114. ZooKeeper
+115. Schema Design
+116. HBase General Patterns
+117. Writing to HBase
+118. Reading from HBase
+119. Deleting from HBase
+120. HDFS
+121. Amazon EC2
+122. Collocating HBase and 
MapReduce
+123. Case Studies
 
 
 Troubleshooting and Debugging Apache HBase
 
-122. General Guidelines
-123. Logs
-124. Resources
-125. Tools
-126. Client
-127. MapReduce
-128. NameNode
-129. Network
-130. RegionServer
-131. Master
-132. ZooKeeper
-133. Amazon EC2
-134. HBase and Hadoop version issues
-135. HBase and HDFS
-136. Running unit or integration tests
-137. Case Studies
-138. Cryptographic Features
-139. Operating System 
Specific Issues
-140. JDK Issues
+124. General Guidelines
+125. Logs
+126. Resources
+127. Tools
+128. Client
+129. MapReduce
+130. NameNode
+131. Network
+132. RegionServer
+133. Master
+134. ZooKeeper
+135. Amazon EC2
+136. HBase and Hadoop version issues
+137. HBase and HDFS
+138. Running unit or integration tests
+139. Case Studies
+140. Cryptographic Features
+141. Operating System 
Specific Issues
+142. JDK Issues
 
 
 Apache HBase Case Studies
 
-141. Overview
-142. Schema Design
-143. Performance/Troubleshooting
+143. Overview
+144. Schema Design
+145. Performance/Troubleshooting
 
 
 Apache HBase Operational Management
 
-144. HBase Tools and Utilities
-145. Region Management
-146. Node Management
-147. HBase Metrics
-148. HBase Monitoring
-149. Cluster Replication
-150. Running 
Multiple Workloads On a Single Cluster
-151. HBase Backup
-152. HBase Snapshots
-153. Storing Snapshots in Microsoft Azure Blob 
Storage
-154. Capacity Planning and Region Sizing
-155. Table Rename
-156. RegionServer Grouping
-157. Region Normalizer
+146. HBase Tools and Utilities
+147. Region Management
+148. Node Management
+149. HBase Metrics
+150. HBase Monitoring
+151. Cluster Replication
+152. Running 
Multiple Workloads On a Single Cluster
+153. HBase Backup
+154. HBase Snapshots
+155. Storing Snapshots in Microsoft Azure Blob 
Storage
+156. Capacity Planning and Region Sizing
+157. Table Rename
+158. RegionServer Grouping
+159. Region Normalizer
 
 
 Building and Developing Apache HBase
 
-158. Getting Involved
-159. Apache HBase Repositories
-160. IDEs
-161. Building Apache HBase
-162. Releasing Apache HBase
-163. Voting on Release Cand

[09/26] hbase-site git commit: Published site at 219625233c1e8ad9daf2c35bc2e3a0844e1b97ba.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/21347dff/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.html
index 12d10e1..97ceefd 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.html
@@ -1740,384 +1740,380 @@
 1732  // and will save us having to seek 
the stream backwards to reread the header we
 1733  // read the last time through 
here.
 1734  ByteBuffer headerBuf = 
getCachedHeader(offset);
-1735  if (LOG.isTraceEnabled()) {
-1736LOG.trace("Reading " + 
this.fileContext.getHFileName() + " at offset=" + offset +
-1737  ", pread=" + pread + ", 
verifyChecksum=" + verifyChecksum + ", cachedHeader=" +
-1738  headerBuf + ", 
onDiskSizeWithHeader=" + onDiskSizeWithHeader);
-1739  }
-1740  // This is NOT same as 
verifyChecksum. This latter is whether to do hbase
-1741  // checksums. Can change with 
circumstances. The below flag is whether the
-1742  // file has support for checksums 
(version 2+).
-1743  boolean checksumSupport = 
this.fileContext.isUseHBaseChecksum();
-1744  long startTime = 
System.currentTimeMillis();
-1745  if (onDiskSizeWithHeader <= 0) 
{
-1746// We were not passed the block 
size. Need to get it from the header. If header was
-1747// not cached (see 
getCachedHeader above), need to seek to pull it in. This is costly
-1748// and should happen very 
rarely. Currently happens on open of a hfile reader where we
-1749// read the trailer blocks to 
pull in the indices. Otherwise, we are reading block sizes
-1750// out of the hfile index. To 
check, enable TRACE in this file and you'll get an exception
-1751// in a LOG every time we seek. 
See HBASE-17072 for more detail.
-1752if (headerBuf == null) {
-1753  if (LOG.isTraceEnabled()) {
-1754LOG.trace("Extra see to get 
block size!", new RuntimeException());
-1755  }
-1756  headerBuf = 
ByteBuffer.allocate(hdrSize);
-1757  readAtOffset(is, 
headerBuf.array(), headerBuf.arrayOffset(), hdrSize, false,
-1758  offset, pread);
-1759}
-1760onDiskSizeWithHeader = 
getOnDiskSizeWithHeader(headerBuf, checksumSupport);
-1761  }
-1762  int preReadHeaderSize = headerBuf 
== null? 0 : hdrSize;
-1763  // Allocate enough space to fit 
the next block's header too; saves a seek next time through.
-1764  // onDiskBlock is whole block + 
header + checksums then extra hdrSize to read next header;
-1765  // onDiskSizeWithHeader is header, 
body, and any checksums if present. preReadHeaderSize
-1766  // says where to start reading. If 
we have the header cached, then we don't need to read
-1767  // it again and we can likely read 
from last place we left off w/o need to backup and reread
-1768  // the header we read last time 
through here.
-1769  // TODO: Make this 
ByteBuffer-based. Will make it easier to go to HDFS with BBPool (offheap).
-1770  byte [] onDiskBlock = new 
byte[onDiskSizeWithHeader + hdrSize];
-1771  int nextBlockOnDiskSize = 
readAtOffset(is, onDiskBlock, preReadHeaderSize,
-1772  onDiskSizeWithHeader - 
preReadHeaderSize, true, offset + preReadHeaderSize, pread);
-1773  if (headerBuf != null) {
-1774// The header has been read when 
reading the previous block OR in a distinct header-only
-1775// read. Copy to this block's 
header.
-1776
System.arraycopy(headerBuf.array(), headerBuf.arrayOffset(), onDiskBlock, 0, 
hdrSize);
-1777  } else {
-1778headerBuf = 
ByteBuffer.wrap(onDiskBlock, 0, hdrSize);
-1779  }
-1780  // Do a few checks before we go 
instantiate HFileBlock.
-1781  assert onDiskSizeWithHeader > 
this.hdrSize;
-1782  
verifyOnDiskSizeMatchesHeader(onDiskSizeWithHeader, headerBuf, offset, 
checksumSupport);
-1783  ByteBuffer onDiskBlockByteBuffer = 
ByteBuffer.wrap(onDiskBlock, 0, onDiskSizeWithHeader);
-1784  // Verify checksum of the data 
before using it for building HFileBlock.
-1785  if (verifyChecksum &&
-1786  !validateChecksum(offset, 
onDiskBlockByteBuffer, hdrSize)) {
-1787return null;
-1788  }
-1789  long duration = 
System.currentTimeMillis() - startTime;
-1790  if (updateMetrics) {
-1791
HFile.updateReadLatency(duration, pread);
-1792  }
-1793  // The onDiskBlock will become the 
headerAndDataBuffer for this block.
-1794  // If 
nextBlockOnDiskSizeWithHeader is not zero, the onDiskBlock already
-1795  // contains the header of next 
block, so no need to set next block's header in it.
-1796  HFileBlock hFileBl

[15/26] hbase-site git commit: Published site at 219625233c1e8ad9daf2c35bc2e3a0844e1b97ba.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/21347dff/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReader.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReader.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReader.html
index 12d10e1..97ceefd 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReader.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReader.html
@@ -1740,384 +1740,380 @@
 1732  // and will save us having to seek 
the stream backwards to reread the header we
 1733  // read the last time through 
here.
 1734  ByteBuffer headerBuf = 
getCachedHeader(offset);
-1735  if (LOG.isTraceEnabled()) {
-1736LOG.trace("Reading " + 
this.fileContext.getHFileName() + " at offset=" + offset +
-1737  ", pread=" + pread + ", 
verifyChecksum=" + verifyChecksum + ", cachedHeader=" +
-1738  headerBuf + ", 
onDiskSizeWithHeader=" + onDiskSizeWithHeader);
-1739  }
-1740  // This is NOT same as 
verifyChecksum. This latter is whether to do hbase
-1741  // checksums. Can change with 
circumstances. The below flag is whether the
-1742  // file has support for checksums 
(version 2+).
-1743  boolean checksumSupport = 
this.fileContext.isUseHBaseChecksum();
-1744  long startTime = 
System.currentTimeMillis();
-1745  if (onDiskSizeWithHeader <= 0) 
{
-1746// We were not passed the block 
size. Need to get it from the header. If header was
-1747// not cached (see 
getCachedHeader above), need to seek to pull it in. This is costly
-1748// and should happen very 
rarely. Currently happens on open of a hfile reader where we
-1749// read the trailer blocks to 
pull in the indices. Otherwise, we are reading block sizes
-1750// out of the hfile index. To 
check, enable TRACE in this file and you'll get an exception
-1751// in a LOG every time we seek. 
See HBASE-17072 for more detail.
-1752if (headerBuf == null) {
-1753  if (LOG.isTraceEnabled()) {
-1754LOG.trace("Extra see to get 
block size!", new RuntimeException());
-1755  }
-1756  headerBuf = 
ByteBuffer.allocate(hdrSize);
-1757  readAtOffset(is, 
headerBuf.array(), headerBuf.arrayOffset(), hdrSize, false,
-1758  offset, pread);
-1759}
-1760onDiskSizeWithHeader = 
getOnDiskSizeWithHeader(headerBuf, checksumSupport);
-1761  }
-1762  int preReadHeaderSize = headerBuf 
== null? 0 : hdrSize;
-1763  // Allocate enough space to fit 
the next block's header too; saves a seek next time through.
-1764  // onDiskBlock is whole block + 
header + checksums then extra hdrSize to read next header;
-1765  // onDiskSizeWithHeader is header, 
body, and any checksums if present. preReadHeaderSize
-1766  // says where to start reading. If 
we have the header cached, then we don't need to read
-1767  // it again and we can likely read 
from last place we left off w/o need to backup and reread
-1768  // the header we read last time 
through here.
-1769  // TODO: Make this 
ByteBuffer-based. Will make it easier to go to HDFS with BBPool (offheap).
-1770  byte [] onDiskBlock = new 
byte[onDiskSizeWithHeader + hdrSize];
-1771  int nextBlockOnDiskSize = 
readAtOffset(is, onDiskBlock, preReadHeaderSize,
-1772  onDiskSizeWithHeader - 
preReadHeaderSize, true, offset + preReadHeaderSize, pread);
-1773  if (headerBuf != null) {
-1774// The header has been read when 
reading the previous block OR in a distinct header-only
-1775// read. Copy to this block's 
header.
-1776
System.arraycopy(headerBuf.array(), headerBuf.arrayOffset(), onDiskBlock, 0, 
hdrSize);
-1777  } else {
-1778headerBuf = 
ByteBuffer.wrap(onDiskBlock, 0, hdrSize);
-1779  }
-1780  // Do a few checks before we go 
instantiate HFileBlock.
-1781  assert onDiskSizeWithHeader > 
this.hdrSize;
-1782  
verifyOnDiskSizeMatchesHeader(onDiskSizeWithHeader, headerBuf, offset, 
checksumSupport);
-1783  ByteBuffer onDiskBlockByteBuffer = 
ByteBuffer.wrap(onDiskBlock, 0, onDiskSizeWithHeader);
-1784  // Verify checksum of the data 
before using it for building HFileBlock.
-1785  if (verifyChecksum &&
-1786  !validateChecksum(offset, 
onDiskBlockByteBuffer, hdrSize)) {
-1787return null;
-1788  }
-1789  long duration = 
System.currentTimeMillis() - startTime;
-1790  if (updateMetrics) {
-1791
HFile.updateReadLatency(duration, pread);
-1792  }
-1793  // The onDiskBlock will become the 
headerAndDataBuffer for this block.
-1794  // If 
nextBlockOnDiskSizeWithHeader is not zero, the onDiskBlock already
-1795  // contains the header of next 
block, so no need to set next bloc

[07/26] hbase-site git commit: Published site at 219625233c1e8ad9daf2c35bc2e3a0844e1b97ba.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/21347dff/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactingMemStore.InMemoryFlushRunnable.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactingMemStore.InMemoryFlushRunnable.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactingMemStore.InMemoryFlushRunnable.html
index 3dbdec3..22e7059 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactingMemStore.InMemoryFlushRunnable.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactingMemStore.InMemoryFlushRunnable.html
@@ -127,468 +127,470 @@
 119// initialization of the flush size 
should happen after initialization of the index type
 120// so do not transfer the following 
method
 121initInmemoryFlushSize(conf);
-122  }
-123
-124  @VisibleForTesting
-125  protected MemStoreCompactor 
createMemStoreCompactor(MemoryCompactionPolicy compactionPolicy)
-126  throws IllegalArgumentIOException 
{
-127return new MemStoreCompactor(this, 
compactionPolicy);
-128  }
-129
-130  private void 
initInmemoryFlushSize(Configuration conf) {
-131double factor = 0;
-132long memstoreFlushSize = 
getRegionServices().getMemStoreFlushSize();
-133int numStores = 
getRegionServices().getNumStores();
-134if (numStores <= 1) {
-135  // Family number might also be zero 
in some of our unit test case
-136  numStores = 1;
-137}
-138inmemoryFlushSize = memstoreFlushSize 
/ numStores;
-139// multiply by a factor (the same 
factor for all index types)
-140factor = 
conf.getDouble(IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY,
-141  
IN_MEMORY_FLUSH_THRESHOLD_FACTOR_DEFAULT);
-142
-143inmemoryFlushSize = (long) 
(inmemoryFlushSize * factor);
-144LOG.info("Setting in-memory flush 
size threshold to {} and immutable segments index to type={}",
-145
StringUtils.byteDesc(inmemoryFlushSize), indexType);
-146  }
-147
-148  /**
-149   * @return Total memory occupied by 
this MemStore. This won't include any size occupied by the
-150   * snapshot. We assume the 
snapshot will get cleared soon. This is not thread safe and
-151   * the memstore may be changed 
while computing its size. It is the responsibility of the
-152   * caller to make sure this 
doesn't happen.
-153   */
-154  @Override
-155  public MemStoreSize size() {
-156MemStoreSizing memstoreSizing = new 
MemStoreSizing();
-157
memstoreSizing.incMemStoreSize(active.getMemStoreSize());
-158for (Segment item : 
pipeline.getSegments()) {
-159  
memstoreSizing.incMemStoreSize(item.getMemStoreSize());
-160}
-161return memstoreSizing;
-162  }
-163
-164  /**
-165   * This method is called before the 
flush is executed.
-166   * @return an estimation (lower bound) 
of the unflushed sequence id in memstore after the flush
-167   * is executed. if memstore will be 
cleared returns {@code HConstants.NO_SEQNUM}.
-168   */
-169  @Override
-170  public long preFlushSeqIDEstimation() 
{
-171if(compositeSnapshot) {
-172  return HConstants.NO_SEQNUM;
-173}
-174Segment segment = getLastSegment();
-175if(segment == null) {
-176  return HConstants.NO_SEQNUM;
-177}
-178return segment.getMinSequenceId();
-179  }
-180
-181  @Override
-182  public boolean isSloppy() {
-183return true;
-184  }
-185
-186  /**
-187   * Push the current active memstore 
segment into the pipeline
-188   * and create a snapshot of the tail of 
current compaction pipeline
-189   * Snapshot must be cleared by call to 
{@link #clearSnapshot}.
-190   * {@link #clearSnapshot(long)}.
-191   * @return {@link MemStoreSnapshot}
-192   */
-193  @Override
-194  public MemStoreSnapshot snapshot() {
-195// If snapshot currently has entries, 
then flusher failed or didn't call
-196// cleanup.  Log a warning.
-197if (!this.snapshot.isEmpty()) {
-198  LOG.warn("Snapshot called again 
without clearing previous. " +
-199  "Doing nothing. Another ongoing 
flush or did we fail last attempt?");
-200} else {
-201  LOG.debug("FLUSHING TO DISK {}, 
store={}",
-202
getRegionServices().getRegionInfo().getEncodedName(), getFamilyName());
-203  stopCompaction();
-204  
pushActiveToPipeline(this.active);
-205  snapshotId = 
EnvironmentEdgeManager.currentTime();
-206  // in both cases whatever is pushed 
to snapshot is cleared from the pipeline
-207  if (compositeSnapshot) {
-208pushPipelineToSnapshot();
-209  } else {
-210pushTailToSnapshot();
-211  }
-212  compactor.resetStats();
-213}
-214return new 
MemStoreSnapshot(snapshotId, this.snapshot);
-215  }
-216
-217  /**
-218   * On flush, how much memory we will 
clear.
-219   * @return size of data that is going 
to be flushed
-220   */
-221  @Override
-222  public MemStore