hbase git commit: HBASE-16261 MultiHFileOutputFormat Enhancement (Yi Liang)

2017-06-01 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/master 123086eda -> c7a7f880d


HBASE-16261 MultiHFileOutputFormat Enhancement (Yi Liang)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c7a7f880
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c7a7f880
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c7a7f880

Branch: refs/heads/master
Commit: c7a7f880dd99a29183e54f0092c10e7a70186d9d
Parents: 123086e
Author: Jerry He 
Authored: Thu Jun 1 10:44:17 2017 -0700
Committer: Jerry He 
Committed: Thu Jun 1 10:44:17 2017 -0700

--
 .../hbase/mapreduce/HFileOutputFormat2.java |   2 +-
 .../hbase/mapreduce/MultiHFileOutputFormat.java |  99 
 .../mapreduce/MultiTableHFileOutputFormat.java  | 509 +++
 .../mapreduce/TestMultiHFileOutputFormat.java   | 224 
 .../TestMultiTableHFileOutputFormat.java| 382 ++
 5 files changed, 892 insertions(+), 324 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c7a7f880/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
index 5b1f13c..da507b1 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
@@ -166,7 +166,7 @@ public class HFileOutputFormat2
 /**
  * Mapredue job will create a temp path for outputting results. If out != 
null, it means that
  * the caller has set the temp working dir; If out == null, it means we 
need to set it here.
- * Used by HFileOutputFormat2 and MultiHFileOutputFormat. 
MultiHFileOutputFormat will give us
+ * Used by HFileOutputFormat2 and MultiTableHFileOutputFormat. 
MultiTableHFileOutputFormat will give us
  * temp working dir at the table level and HFileOutputFormat2 has to set 
it here within this
  * constructor.
  */

http://git-wip-us.apache.org/repos/asf/hbase/blob/c7a7f880/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiHFileOutputFormat.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiHFileOutputFormat.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiHFileOutputFormat.java
deleted file mode 100644
index 3c90b59..000
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiHFileOutputFormat.java
+++ /dev/null
@@ -1,99 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more 
contributor license
- * agreements. See the NOTICE file distributed with this work for additional 
information regarding
- * copyright ownership. The ASF licenses this file to you under the Apache 
License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the 
License. You may obtain a
- * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless 
required by applicable
- * law or agreed to in writing, software distributed under the License is 
distributed on an "AS IS"
- * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 
implied. See the License
- * for the specific language governing permissions and limitations under the 
License.
- */
-package org.apache.hadoop.hbase.mapreduce;
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.Map;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.mapreduce.RecordWriter;
-import org.apache.hadoop.mapreduce.TaskAttemptContext;
-import org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter;
-import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
-import org.apache.hadoop.hbase.mapreduce.HFileOutputFormat2;
-
-import com.google.common.annotations.VisibleForTesting;
-/**
- * Create 3 level tree directory, first level is using table name as parent 
directory and then use
- * family name as child directory, and all related HFiles for one family are 
under child directory
- * -tableName1
- *   -columnFamilyName1
- *   -columnFamilyName2
- * -HFiles
- * -tableName2
- *   -columnFamilyName1
- * -HFiles
- *   -col

hbase git commit: HBASE-18740 Upgrade Zookeeper version to 3.4.10

2017-09-03 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/branch-1 433b42b41 -> 6a5bb3b48


HBASE-18740 Upgrade Zookeeper version to 3.4.10


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6a5bb3b4
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6a5bb3b4
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6a5bb3b4

Branch: refs/heads/branch-1
Commit: 6a5bb3b48c12e3f441fb221d2f954e8a67706334
Parents: 433b42b
Author: Jerry He 
Authored: Sun Sep 3 21:35:20 2017 -0700
Committer: Jerry He 
Committed: Sun Sep 3 21:35:20 2017 -0700

--
 pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6a5bb3b4/pom.xml
--
diff --git a/pom.xml b/pom.xml
index 455db83..e3449a5 100644
--- a/pom.xml
+++ b/pom.xml
@@ -1273,7 +1273,7 @@
 2.5.0
 thrift
 0.9.3
-3.4.6
+3.4.10
 1.7.7
 4.0.3
 2.4.1



hbase git commit: HBASE-18740 Upgrade Zookeeper version to 3.4.10

2017-09-03 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/branch-1.4 a210ca8f2 -> fd9a29a23


HBASE-18740 Upgrade Zookeeper version to 3.4.10


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/fd9a29a2
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/fd9a29a2
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/fd9a29a2

Branch: refs/heads/branch-1.4
Commit: fd9a29a235cf33602ce938e25de6647558800ed2
Parents: a210ca8
Author: Jerry He 
Authored: Sun Sep 3 21:35:20 2017 -0700
Committer: Jerry He 
Committed: Sun Sep 3 21:38:08 2017 -0700

--
 pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/fd9a29a2/pom.xml
--
diff --git a/pom.xml b/pom.xml
index 8d3922e..51dc05a 100644
--- a/pom.xml
+++ b/pom.xml
@@ -1273,7 +1273,7 @@
 2.5.0
 thrift
 0.9.3
-3.4.6
+3.4.10
 1.7.7
 4.0.3
 2.4.1



hbase git commit: HBASE-18740 Upgrade Zookeeper version to 3.4.10

2017-09-03 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/branch-2 91ab25b46 -> 2d5012e7c


HBASE-18740 Upgrade Zookeeper version to 3.4.10


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2d5012e7
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2d5012e7
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2d5012e7

Branch: refs/heads/branch-2
Commit: 2d5012e7cf11f6939894846e3dea81c033a637a2
Parents: 91ab25b
Author: Jerry He 
Authored: Sun Sep 3 21:47:02 2017 -0700
Committer: Jerry He 
Committed: Sun Sep 3 21:47:02 2017 -0700

--
 pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/2d5012e7/pom.xml
--
diff --git a/pom.xml b/pom.xml
index a341cd1..b3b1d34 100644
--- a/pom.xml
+++ b/pom.xml
@@ -1379,7 +1379,7 @@
 0.5.0
 thrift
 0.9.3
-3.4.9
+3.4.10
 1.7.24
 4.0.3
 2.4.1



hbase git commit: HBASE-18740 Upgrade Zookeeper version to 3.4.10

2017-09-03 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/master 0e95a8a0a -> 2305510b7


HBASE-18740 Upgrade Zookeeper version to 3.4.10


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2305510b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2305510b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2305510b

Branch: refs/heads/master
Commit: 2305510b7a81451d0a2c9bea0007bd36b7758118
Parents: 0e95a8a
Author: Jerry He 
Authored: Sun Sep 3 21:47:02 2017 -0700
Committer: Jerry He 
Committed: Sun Sep 3 21:47:54 2017 -0700

--
 pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/2305510b/pom.xml
--
diff --git a/pom.xml b/pom.xml
index 510928f..ae7aec5 100755
--- a/pom.xml
+++ b/pom.xml
@@ -1417,7 +1417,7 @@
 0.5.0
 thrift
 0.9.3
-3.4.9
+3.4.10
 1.7.24
 4.0.3
 2.4.1



hbase git commit: HBASE-18573 Update Append and Delete to use Mutation#getCellList(family)

2017-08-16 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/branch-2 7149f9997 -> 4057552ed


HBASE-18573 Update Append and Delete to use Mutation#getCellList(family)

Signed-off-by: Jerry He 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4057552e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4057552e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4057552e

Branch: refs/heads/branch-2
Commit: 4057552ed6192c7b2e61035636dc8c7a897917c6
Parents: 7149f99
Author: Xiang Li 
Authored: Thu Aug 17 00:39:35 2017 +0800
Committer: Jerry He 
Committed: Wed Aug 16 14:45:33 2017 -0700

--
 .../org/apache/hadoop/hbase/client/Append.java  |  9 +++---
 .../org/apache/hadoop/hbase/client/Delete.java  | 31 
 2 files changed, 10 insertions(+), 30 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/4057552e/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
index 2bd0860..6947313 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
@@ -134,11 +134,10 @@ public class Append extends Mutation {
   public Append add(final Cell cell) {
 // Presume it is KeyValue for now.
 byte [] family = CellUtil.cloneFamily(cell);
-List list = this.familyMap.get(family);
-if (list == null) {
-  list  = new ArrayList<>(1);
-  this.familyMap.put(family, list);
-}
+
+// Get cell list for the family
+List list = getCellList(family);
+
 // find where the new entry should be placed in the List
 list.add(cell);
 return this;

http://git-wip-us.apache.org/repos/asf/hbase/blob/4057552e/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
index bf5241c..66b6cfc 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
@@ -180,11 +180,7 @@ public class Delete extends Mutation implements 
Comparable {
 " doesn't match the original one " +  Bytes.toStringBinary(this.row));
 }
 byte [] family = CellUtil.cloneFamily(kv);
-List list = familyMap.get(family);
-if (list == null) {
-  list = new ArrayList<>(1);
-  familyMap.put(family, list);
-}
+List list = getCellList(family);
 list.add(kv);
 return this;
   }
@@ -216,11 +212,8 @@ public class Delete extends Mutation implements 
Comparable {
 if (timestamp < 0) {
   throw new IllegalArgumentException("Timestamp cannot be negative. ts=" + 
timestamp);
 }
-List list = familyMap.get(family);
-if(list == null) {
-  list = new ArrayList<>(1);
-  familyMap.put(family, list);
-} else if(!list.isEmpty()) {
+List list = getCellList(family);
+if(!list.isEmpty()) {
   list.clear();
 }
 KeyValue kv = new KeyValue(row, family, null, timestamp, 
KeyValue.Type.DeleteFamily);
@@ -236,11 +229,7 @@ public class Delete extends Mutation implements 
Comparable {
* @return this for invocation chaining
*/
   public Delete addFamilyVersion(final byte [] family, final long timestamp) {
-List list = familyMap.get(family);
-if(list == null) {
-  list = new ArrayList<>(1);
-  familyMap.put(family, list);
-}
+List list = getCellList(family);
 list.add(new KeyValue(row, family, null, timestamp,
   KeyValue.Type.DeleteFamilyVersion));
 return this;
@@ -269,11 +258,7 @@ public class Delete extends Mutation implements 
Comparable {
 if (timestamp < 0) {
   throw new IllegalArgumentException("Timestamp cannot be negative. ts=" + 
timestamp);
 }
-List list = familyMap.get(family);
-if (list == null) {
-  list = new ArrayList<>(1);
-  familyMap.put(family, list);
-}
+List list = getCellList(family);
 list.add(new KeyValue(this.row, family, qualifier, timestamp,
 KeyValue.Type.DeleteColumn));
 return this;
@@ -304,11 +289,7 @@ public class Delete extends Mutation implements 
Comparable {
 if (timestamp < 0) {
   throw new IllegalArgumentException("Timestamp cannot be negative. ts=" + 
timestamp);
 }
-List list = familyMap.get(family);
-if(list == null) {
-  list = new ArrayList<>(1);
-  familyMap.put(family, list);
-}
+List list = getCell

hbase git commit: HBASE-18573 Update Append and Delete to use Mutation#getCellList(family)

2017-08-16 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/master 5d2c3ddf5 -> 4c3a64db1


HBASE-18573 Update Append and Delete to use Mutation#getCellList(family)

Signed-off-by: Jerry He 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4c3a64db
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4c3a64db
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4c3a64db

Branch: refs/heads/master
Commit: 4c3a64db13b086ad3d8a6ffa1be8ba2f5a24719c
Parents: 5d2c3dd
Author: Xiang Li 
Authored: Thu Aug 17 00:39:35 2017 +0800
Committer: Jerry He 
Committed: Wed Aug 16 14:50:46 2017 -0700

--
 .../org/apache/hadoop/hbase/client/Append.java  |  9 +++---
 .../org/apache/hadoop/hbase/client/Delete.java  | 31 
 2 files changed, 10 insertions(+), 30 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/4c3a64db/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
index 2bd0860..6947313 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
@@ -134,11 +134,10 @@ public class Append extends Mutation {
   public Append add(final Cell cell) {
 // Presume it is KeyValue for now.
 byte [] family = CellUtil.cloneFamily(cell);
-List list = this.familyMap.get(family);
-if (list == null) {
-  list  = new ArrayList<>(1);
-  this.familyMap.put(family, list);
-}
+
+// Get cell list for the family
+List list = getCellList(family);
+
 // find where the new entry should be placed in the List
 list.add(cell);
 return this;

http://git-wip-us.apache.org/repos/asf/hbase/blob/4c3a64db/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
index bf5241c..66b6cfc 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
@@ -180,11 +180,7 @@ public class Delete extends Mutation implements 
Comparable {
 " doesn't match the original one " +  Bytes.toStringBinary(this.row));
 }
 byte [] family = CellUtil.cloneFamily(kv);
-List list = familyMap.get(family);
-if (list == null) {
-  list = new ArrayList<>(1);
-  familyMap.put(family, list);
-}
+List list = getCellList(family);
 list.add(kv);
 return this;
   }
@@ -216,11 +212,8 @@ public class Delete extends Mutation implements 
Comparable {
 if (timestamp < 0) {
   throw new IllegalArgumentException("Timestamp cannot be negative. ts=" + 
timestamp);
 }
-List list = familyMap.get(family);
-if(list == null) {
-  list = new ArrayList<>(1);
-  familyMap.put(family, list);
-} else if(!list.isEmpty()) {
+List list = getCellList(family);
+if(!list.isEmpty()) {
   list.clear();
 }
 KeyValue kv = new KeyValue(row, family, null, timestamp, 
KeyValue.Type.DeleteFamily);
@@ -236,11 +229,7 @@ public class Delete extends Mutation implements 
Comparable {
* @return this for invocation chaining
*/
   public Delete addFamilyVersion(final byte [] family, final long timestamp) {
-List list = familyMap.get(family);
-if(list == null) {
-  list = new ArrayList<>(1);
-  familyMap.put(family, list);
-}
+List list = getCellList(family);
 list.add(new KeyValue(row, family, null, timestamp,
   KeyValue.Type.DeleteFamilyVersion));
 return this;
@@ -269,11 +258,7 @@ public class Delete extends Mutation implements 
Comparable {
 if (timestamp < 0) {
   throw new IllegalArgumentException("Timestamp cannot be negative. ts=" + 
timestamp);
 }
-List list = familyMap.get(family);
-if (list == null) {
-  list = new ArrayList<>(1);
-  familyMap.put(family, list);
-}
+List list = getCellList(family);
 list.add(new KeyValue(this.row, family, qualifier, timestamp,
 KeyValue.Type.DeleteColumn));
 return this;
@@ -304,11 +289,7 @@ public class Delete extends Mutation implements 
Comparable {
 if (timestamp < 0) {
   throw new IllegalArgumentException("Timestamp cannot be negative. ts=" + 
timestamp);
 }
-List list = familyMap.get(family);
-if(list == null) {
-  list = new ArrayList<>(1);
-  familyMap.put(family, list);
-}
+List list = getCellList

hbase git commit: HBASE-18573 Update Append and Delete to use Mutation#getCellList(family)

2017-08-16 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/branch-1 1f1ab8c87 -> 54aaf6bfb


HBASE-18573 Update Append and Delete to use Mutation#getCellList(family)

Signed-off-by: Jerry He 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/54aaf6bf
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/54aaf6bf
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/54aaf6bf

Branch: refs/heads/branch-1
Commit: 54aaf6bfb72815c343885f7ec40edbe20e4bc394
Parents: 1f1ab8c
Author: Xiang Li 
Authored: Thu Aug 17 00:39:35 2017 +0800
Committer: Jerry He 
Committed: Wed Aug 16 15:12:56 2017 -0700

--
 .../org/apache/hadoop/hbase/client/Append.java  |  9 +++---
 .../org/apache/hadoop/hbase/client/Delete.java  | 31 
 2 files changed, 10 insertions(+), 30 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/54aaf6bf/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
index 0741a0d..efc958d 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
@@ -122,11 +122,10 @@ public class Append extends Mutation {
   public Append add(final Cell cell) {
 // Presume it is KeyValue for now.
 byte [] family = CellUtil.cloneFamily(cell);
-List list = this.familyMap.get(family);
-if (list == null) {
-  list  = new ArrayList();
-  this.familyMap.put(family, list);
-}
+
+// Get cell list for the family
+List list = getCellList(family);
+
 // find where the new entry should be placed in the List
 list.add(cell);
 return this;

http://git-wip-us.apache.org/repos/asf/hbase/blob/54aaf6bf/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
index 8682eae..e45ae59 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
@@ -170,11 +170,7 @@ public class Delete extends Mutation implements 
Comparable {
 " doesn't match the original one " +  Bytes.toStringBinary(this.row));
 }
 byte [] family = CellUtil.cloneFamily(kv);
-List list = familyMap.get(family);
-if (list == null) {
-  list = new ArrayList();
-  familyMap.put(family, list);
-}
+List list = getCellList(family);
 list.add(kv);
 return this;
   }
@@ -236,11 +232,8 @@ public class Delete extends Mutation implements 
Comparable {
 if (timestamp < 0) {
   throw new IllegalArgumentException("Timestamp cannot be negative. ts=" + 
timestamp);
 }
-List list = familyMap.get(family);
-if(list == null) {
-  list = new ArrayList();
-  familyMap.put(family, list);
-} else if(!list.isEmpty()) {
+List list = getCellList(family);
+if(!list.isEmpty()) {
   list.clear();
 }
 KeyValue kv = new KeyValue(row, family, null, timestamp, 
KeyValue.Type.DeleteFamily);
@@ -269,11 +262,7 @@ public class Delete extends Mutation implements 
Comparable {
* @return this for invocation chaining
*/
   public Delete addFamilyVersion(final byte [] family, final long timestamp) {
-List list = familyMap.get(family);
-if(list == null) {
-  list = new ArrayList();
-  familyMap.put(family, list);
-}
+List list = getCellList(family);
 list.add(new KeyValue(row, family, null, timestamp,
   KeyValue.Type.DeleteFamilyVersion));
 return this;
@@ -328,11 +317,7 @@ public class Delete extends Mutation implements 
Comparable {
 if (timestamp < 0) {
   throw new IllegalArgumentException("Timestamp cannot be negative. ts=" + 
timestamp);
 }
-List list = familyMap.get(family);
-if (list == null) {
-  list = new ArrayList();
-  familyMap.put(family, list);
-}
+List list = getCellList(family);
 list.add(new KeyValue(this.row, family, qualifier, timestamp,
 KeyValue.Type.DeleteColumn));
 return this;
@@ -391,11 +376,7 @@ public class Delete extends Mutation implements 
Comparable {
 if (timestamp < 0) {
   throw new IllegalArgumentException("Timestamp cannot be negative. ts=" + 
timestamp);
 }
-List list = familyMap.get(family);
-if(list == null) {
-  list = new ArrayList();
-  familyMap.put(family, list);
-}
+List list = getCellList(family);

hbase git commit: HBASE-18573 Update Append and Delete to use Mutation#getCellList(family)

2017-08-16 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/branch-1.4 255319a0e -> 60f88a970


HBASE-18573 Update Append and Delete to use Mutation#getCellList(family)

Signed-off-by: Jerry He 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/60f88a97
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/60f88a97
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/60f88a97

Branch: refs/heads/branch-1.4
Commit: 60f88a97084656ad18e4b890d311cb110d4b6fa8
Parents: 255319a
Author: Xiang Li 
Authored: Thu Aug 17 00:39:35 2017 +0800
Committer: Jerry He 
Committed: Wed Aug 16 15:13:43 2017 -0700

--
 .../org/apache/hadoop/hbase/client/Append.java  |  9 +++---
 .../org/apache/hadoop/hbase/client/Delete.java  | 31 
 2 files changed, 10 insertions(+), 30 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/60f88a97/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
index 0741a0d..efc958d 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
@@ -122,11 +122,10 @@ public class Append extends Mutation {
   public Append add(final Cell cell) {
 // Presume it is KeyValue for now.
 byte [] family = CellUtil.cloneFamily(cell);
-List list = this.familyMap.get(family);
-if (list == null) {
-  list  = new ArrayList();
-  this.familyMap.put(family, list);
-}
+
+// Get cell list for the family
+List list = getCellList(family);
+
 // find where the new entry should be placed in the List
 list.add(cell);
 return this;

http://git-wip-us.apache.org/repos/asf/hbase/blob/60f88a97/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
index 8682eae..e45ae59 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
@@ -170,11 +170,7 @@ public class Delete extends Mutation implements 
Comparable {
 " doesn't match the original one " +  Bytes.toStringBinary(this.row));
 }
 byte [] family = CellUtil.cloneFamily(kv);
-List list = familyMap.get(family);
-if (list == null) {
-  list = new ArrayList();
-  familyMap.put(family, list);
-}
+List list = getCellList(family);
 list.add(kv);
 return this;
   }
@@ -236,11 +232,8 @@ public class Delete extends Mutation implements 
Comparable {
 if (timestamp < 0) {
   throw new IllegalArgumentException("Timestamp cannot be negative. ts=" + 
timestamp);
 }
-List list = familyMap.get(family);
-if(list == null) {
-  list = new ArrayList();
-  familyMap.put(family, list);
-} else if(!list.isEmpty()) {
+List list = getCellList(family);
+if(!list.isEmpty()) {
   list.clear();
 }
 KeyValue kv = new KeyValue(row, family, null, timestamp, 
KeyValue.Type.DeleteFamily);
@@ -269,11 +262,7 @@ public class Delete extends Mutation implements 
Comparable {
* @return this for invocation chaining
*/
   public Delete addFamilyVersion(final byte [] family, final long timestamp) {
-List list = familyMap.get(family);
-if(list == null) {
-  list = new ArrayList();
-  familyMap.put(family, list);
-}
+List list = getCellList(family);
 list.add(new KeyValue(row, family, null, timestamp,
   KeyValue.Type.DeleteFamilyVersion));
 return this;
@@ -328,11 +317,7 @@ public class Delete extends Mutation implements 
Comparable {
 if (timestamp < 0) {
   throw new IllegalArgumentException("Timestamp cannot be negative. ts=" + 
timestamp);
 }
-List list = familyMap.get(family);
-if (list == null) {
-  list = new ArrayList();
-  familyMap.put(family, list);
-}
+List list = getCellList(family);
 list.add(new KeyValue(this.row, family, qualifier, timestamp,
 KeyValue.Type.DeleteColumn));
 return this;
@@ -391,11 +376,7 @@ public class Delete extends Mutation implements 
Comparable {
 if (timestamp < 0) {
   throw new IllegalArgumentException("Timestamp cannot be negative. ts=" + 
timestamp);
 }
-List list = familyMap.get(family);
-if(list == null) {
-  list = new ArrayList();
-  familyMap.put(family, list);
-}
+List list = getCellList(family);

hbase git commit: HBASE-17869 UnsafeAvailChecker wrongly returns false on ppc

2017-04-06 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/master 910980389 -> af604f0c0


HBASE-17869 UnsafeAvailChecker wrongly returns false on ppc


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/af604f0c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/af604f0c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/af604f0c

Branch: refs/heads/master
Commit: af604f0c0cf3c40c56746150ffa860aad07f128a
Parents: 9109803
Author: Jerry He 
Authored: Thu Apr 6 16:04:47 2017 -0700
Committer: Jerry He 
Committed: Thu Apr 6 16:04:47 2017 -0700

--
 .../hadoop/hbase/util/UnsafeAvailChecker.java   | 24 
 1 file changed, 15 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/af604f0c/hbase-common/src/main/java/org/apache/hadoop/hbase/util/UnsafeAvailChecker.java
--
diff --git 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/UnsafeAvailChecker.java
 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/UnsafeAvailChecker.java
index 90e6ec8..886cb3c 100644
--- 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/UnsafeAvailChecker.java
+++ 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/UnsafeAvailChecker.java
@@ -51,15 +51,21 @@ public class UnsafeAvailChecker {
 });
 // When Unsafe itself is not available/accessible consider unaligned as 
false.
 if (avail) {
-  try {
-// Using java.nio.Bits#unaligned() to check for unaligned-access 
capability
-Class clazz = Class.forName("java.nio.Bits");
-Method m = clazz.getDeclaredMethod("unaligned");
-m.setAccessible(true);
-unaligned = (Boolean) m.invoke(null);
-  } catch (Exception e) {
-LOG.warn("java.nio.Bits#unaligned() check failed."
-+ "Unsafe based read/write of primitive types won't be used", e);
+  String arch = System.getProperty("os.arch");
+  if ("ppc64".equals(arch) || "ppc64le".equals(arch)) {
+// java.nio.Bits.unaligned() wrongly returns false on ppc 
(JDK-8165231),
+unaligned = true;
+  } else {
+try {
+  // Using java.nio.Bits#unaligned() to check for unaligned-access 
capability
+  Class clazz = Class.forName("java.nio.Bits");
+  Method m = clazz.getDeclaredMethod("unaligned");
+  m.setAccessible(true);
+  unaligned = (Boolean) m.invoke(null);
+} catch (Exception e) {
+  LOG.warn("java.nio.Bits#unaligned() check failed."
+  + "Unsafe based read/write of primitive types won't be used", e);
+}
   }
 }
   }



hbase git commit: HBASE-17869 UnsafeAvailChecker wrongly returns false on ppc

2017-04-06 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/branch-1 a6e9de3a0 -> b6a2c02b9


HBASE-17869 UnsafeAvailChecker wrongly returns false on ppc


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b6a2c02b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b6a2c02b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b6a2c02b

Branch: refs/heads/branch-1
Commit: b6a2c02b935ab22ec4c86accc3b1015fe715c675
Parents: a6e9de3
Author: Jerry He 
Authored: Thu Apr 6 16:04:47 2017 -0700
Committer: Jerry He 
Committed: Thu Apr 6 16:13:52 2017 -0700

--
 .../hadoop/hbase/util/UnsafeAvailChecker.java   | 24 
 1 file changed, 15 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b6a2c02b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/UnsafeAvailChecker.java
--
diff --git 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/UnsafeAvailChecker.java
 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/UnsafeAvailChecker.java
index 90e6ec8..886cb3c 100644
--- 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/UnsafeAvailChecker.java
+++ 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/UnsafeAvailChecker.java
@@ -51,15 +51,21 @@ public class UnsafeAvailChecker {
 });
 // When Unsafe itself is not available/accessible consider unaligned as 
false.
 if (avail) {
-  try {
-// Using java.nio.Bits#unaligned() to check for unaligned-access 
capability
-Class clazz = Class.forName("java.nio.Bits");
-Method m = clazz.getDeclaredMethod("unaligned");
-m.setAccessible(true);
-unaligned = (Boolean) m.invoke(null);
-  } catch (Exception e) {
-LOG.warn("java.nio.Bits#unaligned() check failed."
-+ "Unsafe based read/write of primitive types won't be used", e);
+  String arch = System.getProperty("os.arch");
+  if ("ppc64".equals(arch) || "ppc64le".equals(arch)) {
+// java.nio.Bits.unaligned() wrongly returns false on ppc 
(JDK-8165231),
+unaligned = true;
+  } else {
+try {
+  // Using java.nio.Bits#unaligned() to check for unaligned-access 
capability
+  Class clazz = Class.forName("java.nio.Bits");
+  Method m = clazz.getDeclaredMethod("unaligned");
+  m.setAccessible(true);
+  unaligned = (Boolean) m.invoke(null);
+} catch (Exception e) {
+  LOG.warn("java.nio.Bits#unaligned() check failed."
+  + "Unsafe based read/write of primitive types won't be used", e);
+}
   }
 }
   }



hbase git commit: HBASE-17816 HRegion#mutateRowWithLocks should update writeRequestCount metric (Weizhan Zeng)

2017-04-06 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/master af604f0c0 -> 48b2502a5


HBASE-17816 HRegion#mutateRowWithLocks should update writeRequestCount metric 
(Weizhan Zeng)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/48b2502a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/48b2502a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/48b2502a

Branch: refs/heads/master
Commit: 48b2502a5fcd4d3cd954c3abf6703422da7cdc2f
Parents: af604f0
Author: Jerry He 
Authored: Thu Apr 6 16:45:45 2017 -0700
Committer: Jerry He 
Committed: Thu Apr 6 16:45:45 2017 -0700

--
 .../hadoop/hbase/regionserver/HRegion.java  |  1 +
 .../hadoop/hbase/regionserver/TestHRegion.java  | 24 
 2 files changed, 25 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/48b2502a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 7f889ce..a87b679 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -6966,6 +6966,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   @Override
   public void mutateRowsWithLocks(Collection mutations,
   Collection rowsToLock, long nonceGroup, long nonce) throws 
IOException {
+writeRequestsCount.add(mutations.size());
 MultiRowMutationProcessor proc = new MultiRowMutationProcessor(mutations, 
rowsToLock);
 processRowsWithLocks(proc, -1, nonceGroup, nonce);
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/48b2502a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
index eac3c77..d56d6ec 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
@@ -6391,4 +6391,28 @@ public class TestHRegion {
   this.region = null;
 }
   }
+
+  @Test
+  public void testMutateRow_WriteRequestCount() throws Exception {
+byte[] row1 = Bytes.toBytes("row1");
+byte[] fam1 = Bytes.toBytes("fam1");
+byte[] qf1 = Bytes.toBytes("qualifier");
+byte[] val1 = Bytes.toBytes("value1");
+
+RowMutations rm = new RowMutations(row1);
+Put put = new Put(row1);
+put.addColumn(fam1, qf1, val1);
+rm.add(put);
+
+this.region = initHRegion(tableName, method, CONF, fam1);
+try {
+  long wrcBeforeMutate = this.region.writeRequestsCount.longValue();
+  this.region.mutateRow(rm);
+  long wrcAfterMutate = this.region.writeRequestsCount.longValue();
+  Assert.assertEquals(wrcBeforeMutate + rm.getMutations().size(), 
wrcAfterMutate);
+} finally {
+  HBaseTestingUtility.closeRegionAndWAL(this.region);
+  this.region = null;
+}
+  }
 }



hbase git commit: HBASE-15201 Add hbase-spark to hbase assembly

2016-02-08 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/master ec92a8a70 -> 3aff98c75


HBASE-15201 Add hbase-spark to hbase assembly


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3aff98c7
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3aff98c7
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3aff98c7

Branch: refs/heads/master
Commit: 3aff98c75b5e23a5010be17eecef3140d2bf70bb
Parents: ec92a8a
Author: Jerry He 
Authored: Mon Feb 8 14:13:46 2016 -0800
Committer: Jerry He 
Committed: Mon Feb 8 14:13:46 2016 -0800

--
 hbase-assembly/pom.xml | 5 +
 hbase-assembly/src/main/assembly/hadoop-two-compat.xml | 1 +
 hbase-spark/pom.xml| 1 +
 3 files changed, 7 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/3aff98c7/hbase-assembly/pom.xml
--
diff --git a/hbase-assembly/pom.xml b/hbase-assembly/pom.xml
index 4851391..87e82ad 100644
--- a/hbase-assembly/pom.xml
+++ b/hbase-assembly/pom.xml
@@ -201,5 +201,10 @@
${project.version}
true
 
+
+  org.apache.hbase
+  hbase-spark
+  ${project.version}
+
   
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/3aff98c7/hbase-assembly/src/main/assembly/hadoop-two-compat.xml
--
diff --git a/hbase-assembly/src/main/assembly/hadoop-two-compat.xml 
b/hbase-assembly/src/main/assembly/hadoop-two-compat.xml
index 9ef624c..2033e9c 100644
--- a/hbase-assembly/src/main/assembly/hadoop-two-compat.xml
+++ b/hbase-assembly/src/main/assembly/hadoop-two-compat.xml
@@ -45,6 +45,7 @@
 org.apache.hbase:hbase-rest
 org.apache.hbase:hbase-server
 org.apache.hbase:hbase-shell
+org.apache.hbase:hbase-spark
 org.apache.hbase:hbase-thrift
 org.apache.hbase:hbase-external-blockcache
   

http://git-wip-us.apache.org/repos/asf/hbase/blob/3aff98c7/hbase-spark/pom.xml
--
diff --git a/hbase-spark/pom.xml b/hbase-spark/pom.xml
index 8f71a89..251ea59 100644
--- a/hbase-spark/pom.xml
+++ b/hbase-spark/pom.xml
@@ -88,6 +88,7 @@
 org.apache.spark
 spark-streaming_${scala.binary.version}
 ${spark.version}
+provided
 
 
 org.apache.spark



hbase git commit: HBASE-15223 Make convertScanToString public for Spark

2016-02-10 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/branch-1 a34db9383 -> bc1d83673


HBASE-15223 Make convertScanToString public for Spark


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/bc1d8367
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/bc1d8367
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/bc1d8367

Branch: refs/heads/branch-1
Commit: bc1d8367389e14724cb6d79f3b0abac69266f0b0
Parents: a34db93
Author: Jerry He 
Authored: Wed Feb 10 14:55:38 2016 -0800
Committer: Jerry He 
Committed: Wed Feb 10 14:55:38 2016 -0800

--
 .../hadoop/hbase/mapreduce/TableInputFormatBase.java  | 14 +++---
 .../hadoop/hbase/mapreduce/TableMapReduceUtil.java|  4 ++--
 .../org/apache/hadoop/hbase/mapreduce/TableSplit.java | 13 -
 .../hadoop/hbase/regionserver/RSRpcServices.java  |  1 -
 4 files changed, 21 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/bc1d8367/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java
index 82378d1..d72c177 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java
@@ -264,7 +264,7 @@ extends InputFormat {
   }
   List splits = new ArrayList(1);
   long regionSize = 
sizeCalculator.getRegionSize(regLoc.getRegionInfo().getRegionName());
-  TableSplit split = new TableSplit(table.getName(),
+  TableSplit split = new TableSplit(table.getName(), scan,
   HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, regLoc
   .getHostnamePort().split(Addressing.HOSTNAME_PORT_SEPARATOR)[0], 
regionSize);
   splits.add(split);
@@ -307,7 +307,7 @@ extends InputFormat {
 
 byte[] regionName = location.getRegionInfo().getRegionName();
 long regionSize = sizeCalculator.getRegionSize(regionName);
-TableSplit split = new TableSplit(table.getName(),
+TableSplit split = new TableSplit(table.getName(), scan,
   splitStart, splitStop, regionLocation, regionSize);
 splits.add(split);
 if (LOG.isDebugEnabled()) {
@@ -398,10 +398,10 @@ extends InputFormat {
 byte[] splitKey = getSplitKey(ts.getStartRow(), ts.getEndRow(), 
isTextKey);
  //Set the size of child TableSplit as 1/2 of the region size. The 
exact size of the
  // MapReduce input splits is not far off.
-TableSplit t1 = new TableSplit(table.getName(), ts.getStartRow(), 
splitKey, regionLocation,
-regionSize / 2);
-TableSplit t2 = new TableSplit(table.getName(), splitKey, 
ts.getEndRow(), regionLocation,
-regionSize - regionSize / 2);
+TableSplit t1 = new TableSplit(table.getName(), scan, 
ts.getStartRow(), splitKey,
+regionLocation,  regionSize / 2);
+TableSplit t2 = new TableSplit(table.getName(), scan, splitKey, 
ts.getEndRow(),
+regionLocation, regionSize - regionSize / 2);
 resultList.add(t1);
 resultList.add(t2);
 count++;
@@ -427,7 +427,7 @@ extends InputFormat {
 break;
   }
 }
-TableSplit t = new TableSplit(table.getName(), splitStartKey, 
splitEndKey,
+TableSplit t = new TableSplit(table.getName(), scan, splitStartKey, 
splitEndKey,
 regionLocation, totalSize);
 resultList.add(t);
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/bc1d8367/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
index 8cad7ab..458464f 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
@@ -552,7 +552,7 @@ public class TableMapReduceUtil {
* @return The scan saved in a Base64 encoded string.
* @throws IOException When writing the scan fails.
*/
-  static String convertScanToString(Scan scan) throws IOException {
+  public static String convertScanToString(Scan scan) throws IOException {
 ClientProtos.Scan proto = ProtobufUtil.toScan(scan);
 return Base64.encodeBytes(proto.toByteArray());
   }
@@ -564,7 +564,

hbase git commit: HBASE-15223 Make convertScanToString public for Spark

2016-02-10 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/master abb6cdce7 -> 1942a99b8


HBASE-15223 Make convertScanToString public for Spark


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1942a99b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1942a99b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1942a99b

Branch: refs/heads/master
Commit: 1942a99b831bb4c41c0e09d6b93df5e1d060f58e
Parents: abb6cdc
Author: Jerry He 
Authored: Wed Feb 10 15:02:58 2016 -0800
Committer: Jerry He 
Committed: Wed Feb 10 15:02:58 2016 -0800

--
 .../hadoop/hbase/mapreduce/TableInputFormatBase.java   | 10 +-
 .../hadoop/hbase/mapreduce/TableMapReduceUtil.java |  4 ++--
 .../org/apache/hadoop/hbase/mapreduce/TableSplit.java  | 13 -
 3 files changed, 19 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/1942a99b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java
index 918232f..b2f115c 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java
@@ -266,7 +266,7 @@ extends InputFormat {
 }
 List splits = new ArrayList(1);
 long regionSize = 
sizeCalculator.getRegionSize(regLoc.getRegionInfo().getRegionName());
-TableSplit split = new TableSplit(tableName,
+TableSplit split = new TableSplit(tableName, scan,
 HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, regLoc
 
.getHostnamePort().split(Addressing.HOSTNAME_PORT_SEPARATOR)[0], regionSize);
 splits.add(split);
@@ -309,7 +309,7 @@ extends InputFormat {
   
   byte[] regionName = location.getRegionInfo().getRegionName();
   long regionSize = sizeCalculator.getRegionSize(regionName);
-  TableSplit split = new TableSplit(tableName,
+  TableSplit split = new TableSplit(tableName, scan,
 splitStart, splitStop, regionLocation, regionSize);
   splits.add(split);
   if (LOG.isDebugEnabled()) {
@@ -397,9 +397,9 @@ extends InputFormat {
 byte[] splitKey = getSplitKey(ts.getStartRow(), ts.getEndRow(), 
isTextKey);
  //Set the size of child TableSplit as 1/2 of the region size. The 
exact size of the
  // MapReduce input splits is not far off.
-TableSplit t1 = new TableSplit(tableName, ts.getStartRow(), splitKey, 
regionLocation,
+TableSplit t1 = new TableSplit(tableName, scan, ts.getStartRow(), 
splitKey, regionLocation,
 regionSize / 2);
-TableSplit t2 = new TableSplit(tableName, splitKey, ts.getEndRow(), 
regionLocation,
+TableSplit t2 = new TableSplit(tableName, scan, splitKey, 
ts.getEndRow(), regionLocation,
 regionSize - regionSize / 2);
 resultList.add(t1);
 resultList.add(t2);
@@ -426,7 +426,7 @@ extends InputFormat {
 break;
   }
 }
-TableSplit t = new TableSplit(tableName, splitStartKey, splitEndKey,
+TableSplit t = new TableSplit(tableName, scan, splitStartKey, 
splitEndKey,
 regionLocation, totalSize);
 resultList.add(t);
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/1942a99b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
index d43c4d9..37e4e44 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
@@ -561,7 +561,7 @@ public class TableMapReduceUtil {
* @return The scan saved in a Base64 encoded string.
* @throws IOException When writing the scan fails.
*/
-  static String convertScanToString(Scan scan) throws IOException {
+  public static String convertScanToString(Scan scan) throws IOException {
 ClientProtos.Scan proto = ProtobufUtil.toScan(scan);
 return Base64.encodeBytes(proto.toByteArray());
   }
@@ -573,7 +573,7 @@ public class TableMapReduceUtil {
* @return The newly created Scan instance.
* @throws IOException When reading the scan instance fails.
*/
-  static Scan convertStringToScan(String 

hbase git commit: HBASE-16257 Move staging dir to be under hbase root dir

2016-09-23 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/master d2ed74cbc -> 50b051ade


HBASE-16257 Move staging dir to be under hbase root dir


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/50b051ad
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/50b051ad
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/50b051ad

Branch: refs/heads/master
Commit: 50b051ade1d38bba0b936db1c751e5045bc103cb
Parents: d2ed74c
Author: Jerry He 
Authored: Fri Sep 23 10:07:58 2016 -0700
Committer: Jerry He 
Committed: Fri Sep 23 10:07:58 2016 -0700

--
 .../hbase/client/SecureBulkLoadClient.java  |   5 -
 .../hbase/security/SecureBulkLoadUtil.java  |  46 --
 .../org/apache/hadoop/hbase/HConstants.java |   3 +
 .../src/main/resources/hbase-default.xml|   9 +-
 .../hadoop/hbase/HBaseCommonTestingUtility.java |   1 -
 .../hbase/mapreduce/LoadIncrementalHFiles.java  |   4 +-
 .../hadoop/hbase/master/MasterFileSystem.java   | 141 ---
 .../hadoop/hbase/master/MasterWalManager.java   |   5 -
 .../regionserver/SecureBulkLoadManager.java |  36 ++---
 .../regionserver/HFileReplicator.java   |   7 +-
 .../hbase/util/hbck/HFileCorruptionChecker.java |   8 +-
 .../apache/hadoop/hbase/wal/WALSplitter.java|   8 +-
 .../hadoop/hbase/HBaseTestingUtility.java   |   1 -
 .../mapreduce/TestLoadIncrementalHFiles.java|   4 +-
 .../SecureBulkLoadEndpointClient.java   |   4 -
 .../apache/hadoop/hbase/wal/TestWALSplit.java   |   3 +-
 16 files changed, 153 insertions(+), 132 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/50b051ad/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SecureBulkLoadClient.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SecureBulkLoadClient.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SecureBulkLoadClient.java
index f460bdb..eddf8f1 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SecureBulkLoadClient.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SecureBulkLoadClient.java
@@ -36,7 +36,6 @@ import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.PrepareBulkLoadRe
 import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.PrepareBulkLoadResponse;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier;
 import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-import org.apache.hadoop.hbase.security.SecureBulkLoadUtil;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.security.token.Token;
 
@@ -125,8 +124,4 @@ public class SecureBulkLoadClient {
   throw ProtobufUtil.handleRemoteException(se);
 }
   }
-
-  public Path getStagingPath(String bulkToken, byte[] family) throws 
IOException {
-return SecureBulkLoadUtil.getStagingPath(table.getConfiguration(), 
bulkToken, family);
-  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/50b051ad/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecureBulkLoadUtil.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecureBulkLoadUtil.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecureBulkLoadUtil.java
deleted file mode 100644
index 5af6891..000
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecureBulkLoadUtil.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.security;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.util.Bytes;
-
-@InterfaceAudience.Private
-public class SecureBulkLoadUtil {
-  private final static

hbase git commit: HBASE-16667 Building with JDK 8: ignoring option MaxPermSize=256m (Niels Basjes)

2016-09-24 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/master 7ed93f8f7 -> 2765b9d9d


HBASE-16667 Building with JDK 8: ignoring option MaxPermSize=256m (Niels Basjes)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2765b9d9
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2765b9d9
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2765b9d9

Branch: refs/heads/master
Commit: 2765b9d9d965c61b3c40f81752cadb8ad536b501
Parents: 7ed93f8
Author: Jerry He 
Authored: Sat Sep 24 16:07:25 2016 -0700
Committer: Jerry He 
Committed: Sat Sep 24 16:07:25 2016 -0700

--
 conf/hbase-env.cmd| 4 
 conf/hbase-env.sh | 5 +
 dev-support/jenkinsEnv.sh | 2 +-
 hbase-it/pom.xml  | 5 +
 hbase-spark/pom.xml   | 2 +-
 pom.xml   | 6 +++---
 6 files changed, 7 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/2765b9d9/conf/hbase-env.cmd
--
diff --git a/conf/hbase-env.cmd b/conf/hbase-env.cmd
index eb31b1b..d16de55 100644
--- a/conf/hbase-env.cmd
+++ b/conf/hbase-env.cmd
@@ -42,10 +42,6 @@
 @rem @rem See TestIPv6NIOServerSocketChannel.
 set HBASE_OPTS="-XX:+UseConcMarkSweepGC" "-Djava.net.preferIPv4Stack=true"
 
-@rem Configure PermSize. Only needed in JDK7. You can safely remove it for 
JDK8+
-set HBASE_MASTER_OPTS=%HBASE_MASTER_OPTS% "-XX:PermSize=128m" 
"-XX:MaxPermSize=128m"
-set HBASE_REGIONSERVER_OPTS=%HBASE_REGIONSERVER_OPTS% "-XX:PermSize=128m" 
"-XX:MaxPermSize=128m"
-
 @rem Uncomment below to enable java garbage collection logging for the 
server-side processes
 @rem this enables basic gc logging for the server processes to the .out file
 @rem set SERVER_GC_OPTS="-verbose:gc" "-XX:+PrintGCDetails" 
"-XX:+PrintGCDateStamps" %HBASE_GC_OPTS%

http://git-wip-us.apache.org/repos/asf/hbase/blob/2765b9d9/conf/hbase-env.sh
--
diff --git a/conf/hbase-env.sh b/conf/hbase-env.sh
index c45e7a5..31e8441 100644
--- a/conf/hbase-env.sh
+++ b/conf/hbase-env.sh
@@ -1,3 +1,4 @@
+#!/usr/bin/env bash
 #
 #/**
 # * Licensed to the Apache Software Foundation (ASF) under one
@@ -42,10 +43,6 @@
 # see http://hbase.apache.org/book.html#performance
 export HBASE_OPTS="-XX:+UseConcMarkSweepGC"
 
-# Configure PermSize. Only needed in JDK7. You can safely remove it for JDK8+
-export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -XX:PermSize=128m 
-XX:MaxPermSize=128m"
-export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -XX:PermSize=128m 
-XX:MaxPermSize=128m"
-
 # Uncomment one of the below three options to enable java garbage collection 
logging for the server-side processes.
 
 # This enables basic gc logging to the .out file.

http://git-wip-us.apache.org/repos/asf/hbase/blob/2765b9d9/dev-support/jenkinsEnv.sh
--
diff --git a/dev-support/jenkinsEnv.sh b/dev-support/jenkinsEnv.sh
index 6961437..d7fe873 100755
--- a/dev-support/jenkinsEnv.sh
+++ b/dev-support/jenkinsEnv.sh
@@ -30,7 +30,7 @@ export CLOVER_HOME=/home/jenkins/tools/clover/latest
 export MAVEN_HOME=/home/jenkins/tools/maven/latest
 
 export PATH=$PATH:$JAVA_HOME/bin:$ANT_HOME/bin:
-export MAVEN_OPTS="${MAVEN_OPTS:-"-Xmx3100M -XX:-UsePerfData 
-XX:MaxPermSize=256m"}"
+export MAVEN_OPTS="${MAVEN_OPTS:-"-Xmx3100M -XX:-UsePerfData"}"
 
 ulimit -n
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/2765b9d9/hbase-it/pom.xml
--
diff --git a/hbase-it/pom.xml b/hbase-it/pom.xml
index f27b47a..07b5683 100644
--- a/hbase-it/pom.xml
+++ b/hbase-it/pom.xml
@@ -91,9 +91,6 @@
 
   ${unittest.include}
   **/*$*
-  
-  
 
 
${test.output.tofile}
 
@@ -174,7 +171,7 @@
 
 1800
--enableassertions -Xmx${failsafe.Xmx} -XX:MaxPermSize=368m
+-enableassertions -Xmx${failsafe.Xmx}
 -Djava.security.egd=file:/dev/./urandom 
-XX:+CMSClassUnloadingEnabled
 -verbose:gc -XX:+PrintCommandLineFlags  
-XX:+PrintFlagsFinal
   

http://git-wip-us.apache.org/repos/asf/hbase/blob/2765b9d9/hbase-spark/pom.xml
--
diff --git a/hbase-spark/pom.xml b/hbase-spark/pom.xml
index aa03854..7cd78f0 100644
--- a/hbase-spark/pom.xml
+++ b/hbase-spark/pom.xml
@@ -631,7 +631,7 @@
 
 Integration-Test
 
--Xmx1536m -XX:MaxPermSize=512m 
-XX:ReservedCodeCacheSize=512m
+-Xmx1536m -XX:Reserved

hbase git commit: HBASE-16667 Building with JDK 8: ignoring option MaxPermSize=256m (Niels Basjes)

2016-09-24 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/branch-1 9a510e9ba -> 92b1b5ac8


HBASE-16667 Building with JDK 8: ignoring option MaxPermSize=256m (Niels Basjes)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/92b1b5ac
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/92b1b5ac
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/92b1b5ac

Branch: refs/heads/branch-1
Commit: 92b1b5ac80982f7617653ae98a3c3cce1705deb3
Parents: 9a510e9
Author: Jerry He 
Authored: Sat Sep 24 16:29:41 2016 -0700
Committer: Jerry He 
Committed: Sat Sep 24 16:29:41 2016 -0700

--
 hbase-it/pom.xml |  2 +-
 pom.xml  | 28 +---
 2 files changed, 26 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/92b1b5ac/hbase-it/pom.xml
--
diff --git a/hbase-it/pom.xml b/hbase-it/pom.xml
index 845ccc5..62f7f28 100644
--- a/hbase-it/pom.xml
+++ b/hbase-it/pom.xml
@@ -172,7 +172,7 @@
 
 1800
--enableassertions -Xmx${failsafe.Xmx} -XX:MaxPermSize=368m
+-enableassertions -Xmx${failsafe.Xmx} ${maxpermsize}
 -Djava.security.egd=file:/dev/./urandom 
-XX:+CMSClassUnloadingEnabled
 -verbose:gc -XX:+PrintCommandLineFlags  
-XX:+PrintFlagsFinal
   

http://git-wip-us.apache.org/repos/asf/hbase/blob/92b1b5ac/pom.xml
--
diff --git a/pom.xml b/pom.xml
index 322823e..fb7c20a 100644
--- a/pom.xml
+++ b/pom.xml
@@ -1254,10 +1254,10 @@
 
 -enableassertions -Dhbase.test -Xmx${surefire.Xmx}
-  -XX:MaxPermSize=256m -Djava.security.egd=file:/dev/./urandom 
-Djava.net.preferIPv4Stack=true
+  ${maxpermsize} -Djava.security.egd=file:/dev/./urandom 
-Djava.net.preferIPv4Stack=true
   -Djava.awt.headless=true
 
--enableassertions -Xmx${surefire.cygwinXmx} 
-XX:MaxPermSize=256m
+-enableassertions -Xmx${surefire.cygwinXmx} 
${maxpermsize}
   -Djava.security.egd=file:/dev/./urandom -Djava.net.preferIPv4Stack=true
   "-Djava.library.path=${hadoop.library.path};${java.library.path}"
 
@@ -1843,6 +1843,28 @@
 
   
 
+
+
+
+
+  old-jdk
+  
+(,1.8)
+  
+  
+-XX:MaxPermSize=512m
+  
+
+
+  new-jdk
+  
+[1.8,)
+  
+  
+
+  
+
+
 
   jacoco
   
@@ -2458,7 +2480,7 @@
  or you can provide the license with 
-Dmaven.clover.licenseLocation=/path/to/license. Committers can find
  the license under 
https://svn.apache.org/repos/private/committers/donated-licenses/clover/
  The report will be generated under target/site/clover/index.html when 
you run
- MAVEN_OPTS="-Xmx2048m -XX:MaxPermSize=512m" mvn clean package 
-Pclover site -->
+ MAVEN_OPTS="-Xmx2048m ${maxpermsize}" mvn clean package -Pclover site 
-->
 
   clover
   



hbase git commit: HBASE-16667 Building with JDK 8: ignoring option MaxPermSize=256m (Niels Basjes)

2016-09-24 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/branch-1.2 d307ad19f -> f224e09ad


HBASE-16667 Building with JDK 8: ignoring option MaxPermSize=256m (Niels Basjes)

Conflicts:
pom.xml


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f224e09a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f224e09a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f224e09a

Branch: refs/heads/branch-1.2
Commit: f224e09ad9e5e18a31e14e2606bdefba5b901216
Parents: d307ad1
Author: Jerry He 
Authored: Sat Sep 24 16:29:41 2016 -0700
Committer: Jerry He 
Committed: Sat Sep 24 16:56:32 2016 -0700

--
 hbase-it/pom.xml |  2 +-
 pom.xml  | 28 +---
 2 files changed, 26 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/f224e09a/hbase-it/pom.xml
--
diff --git a/hbase-it/pom.xml b/hbase-it/pom.xml
index f7d8241..cf63a9d 100644
--- a/hbase-it/pom.xml
+++ b/hbase-it/pom.xml
@@ -172,7 +172,7 @@
 
 1800
--enableassertions -Xmx${failsafe.Xmx} -XX:MaxPermSize=368m
+-enableassertions -Xmx${failsafe.Xmx} ${maxpermsize}
 -Djava.security.egd=file:/dev/./urandom 
-XX:+CMSClassUnloadingEnabled
 -verbose:gc -XX:+PrintCommandLineFlags  
-XX:+PrintFlagsFinal
   

http://git-wip-us.apache.org/repos/asf/hbase/blob/f224e09a/pom.xml
--
diff --git a/pom.xml b/pom.xml
index a4be2ba..8bde325 100644
--- a/pom.xml
+++ b/pom.xml
@@ -1271,10 +1271,10 @@
 2800m
 2800m
 -enableassertions -XX:MaxDirectMemorySize=1G 
-Xmx${surefire.Xmx}
-  -XX:MaxPermSize=256m -Djava.security.egd=file:/dev/./urandom 
-Djava.net.preferIPv4Stack=true
+  ${maxpermsize} -Djava.security.egd=file:/dev/./urandom 
-Djava.net.preferIPv4Stack=true
   -Djava.awt.headless=true
 
--enableassertions -Xmx${surefire.cygwinXmx} 
-XX:MaxPermSize=256m
+-enableassertions -Xmx${surefire.cygwinXmx} 
${maxpermsize}
   -Djava.security.egd=file:/dev/./urandom -Djava.net.preferIPv4Stack=true
   "-Djava.library.path=${hadoop.library.path};${java.library.path}"
 
@@ -1845,6 +1845,28 @@
 
   
 
+
+
+
+
+  old-jdk
+  
+(,1.8)
+  
+  
+-XX:MaxPermSize=512m
+  
+
+
+  new-jdk
+  
+[1.8,)
+  
+  
+
+  
+
+
 
   jacoco
   
@@ -2460,7 +2482,7 @@
  or you can provide the license with 
-Dmaven.clover.licenseLocation=/path/to/license. Committers can find
  the license under 
https://svn.apache.org/repos/private/committers/donated-licenses/clover/
  The report will be generated under target/site/clover/index.html when 
you run
- MAVEN_OPTS="-Xmx2048m -XX:MaxPermSize=512m" mvn clean package 
-Pclover site -->
+ MAVEN_OPTS="-Xmx2048m ${maxpermsize}" mvn clean package -Pclover site 
-->
 
   clover
   



hbase git commit: HBASE-16667 Building with JDK 8: ignoring option MaxPermSize=256m (Niels Basjes)

2016-09-24 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 b4f7ad62e -> 2926a665a


HBASE-16667 Building with JDK 8: ignoring option MaxPermSize=256m (Niels Basjes)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2926a665
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2926a665
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2926a665

Branch: refs/heads/branch-1.3
Commit: 2926a665ab75bc8da6c57a65f9c12528cd4ff992
Parents: b4f7ad6
Author: Jerry He 
Authored: Sat Sep 24 16:29:41 2016 -0700
Committer: Jerry He 
Committed: Sat Sep 24 16:59:35 2016 -0700

--
 hbase-it/pom.xml |  2 +-
 pom.xml  | 28 +---
 2 files changed, 26 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/2926a665/hbase-it/pom.xml
--
diff --git a/hbase-it/pom.xml b/hbase-it/pom.xml
index 3225b1e..3dcdfdb 100644
--- a/hbase-it/pom.xml
+++ b/hbase-it/pom.xml
@@ -172,7 +172,7 @@
 
 1800
--enableassertions -Xmx${failsafe.Xmx} -XX:MaxPermSize=368m
+-enableassertions -Xmx${failsafe.Xmx} ${maxpermsize}
 -Djava.security.egd=file:/dev/./urandom 
-XX:+CMSClassUnloadingEnabled
 -verbose:gc -XX:+PrintCommandLineFlags  
-XX:+PrintFlagsFinal
   

http://git-wip-us.apache.org/repos/asf/hbase/blob/2926a665/pom.xml
--
diff --git a/pom.xml b/pom.xml
index 6d58c92..d11fe87 100644
--- a/pom.xml
+++ b/pom.xml
@@ -1252,10 +1252,10 @@
 
 -enableassertions -Dhbase.test -Xmx${surefire.Xmx}
-  -XX:MaxPermSize=256m -Djava.security.egd=file:/dev/./urandom 
-Djava.net.preferIPv4Stack=true
+  ${maxpermsize} -Djava.security.egd=file:/dev/./urandom 
-Djava.net.preferIPv4Stack=true
   -Djava.awt.headless=true
 
--enableassertions -Xmx${surefire.cygwinXmx} 
-XX:MaxPermSize=256m
+-enableassertions -Xmx${surefire.cygwinXmx} 
${maxpermsize}
   -Djava.security.egd=file:/dev/./urandom -Djava.net.preferIPv4Stack=true
   "-Djava.library.path=${hadoop.library.path};${java.library.path}"
 
@@ -1826,6 +1826,28 @@
 
   
 
+
+
+
+
+  old-jdk
+  
+(,1.8)
+  
+  
+-XX:MaxPermSize=512m
+  
+
+
+  new-jdk
+  
+[1.8,)
+  
+  
+
+  
+
+
 
   jacoco
   
@@ -2441,7 +2463,7 @@
  or you can provide the license with 
-Dmaven.clover.licenseLocation=/path/to/license. Committers can find
  the license under 
https://svn.apache.org/repos/private/committers/donated-licenses/clover/
  The report will be generated under target/site/clover/index.html when 
you run
- MAVEN_OPTS="-Xmx2048m -XX:MaxPermSize=512m" mvn clean package 
-Pclover site -->
+ MAVEN_OPTS="-Xmx2048m ${maxpermsize}" mvn clean package -Pclover site 
-->
 
   clover
   



hbase git commit: HBASE-16604 Scanner retries on IOException can cause the scans to miss data - addendum

2016-09-24 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 2926a665a -> 49a4980e6


HBASE-16604 Scanner retries on IOException can cause the scans to miss data - 
addendum


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/49a4980e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/49a4980e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/49a4980e

Branch: refs/heads/branch-1.3
Commit: 49a4980e6dac1e74275ae5b042b01cd27efc8ebd
Parents: 2926a66
Author: Jerry He 
Authored: Sat Sep 24 17:47:27 2016 -0700
Committer: Jerry He 
Committed: Sat Sep 24 17:47:27 2016 -0700

--
 .../hbase/regionserver/DelegatingKeyValueScanner.java | 10 +-
 1 file changed, 5 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/49a4980e/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DelegatingKeyValueScanner.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DelegatingKeyValueScanner.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DelegatingKeyValueScanner.java
index 10432b9..9804a32 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DelegatingKeyValueScanner.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DelegatingKeyValueScanner.java
@@ -53,11 +53,6 @@ public class DelegatingKeyValueScanner implements 
KeyValueScanner {
   }
 
   @Override
-  public long getScannerOrder() {
-return delegate.getScannerOrder();
-  }
-
-  @Override
   public void close() {
 delegate.close();
   }
@@ -73,6 +68,11 @@ public class DelegatingKeyValueScanner implements 
KeyValueScanner {
   }
 
   @Override
+  public long getSequenceID() {
+return delegate.getSequenceID();
+  }
+
+  @Override
   public boolean realSeekDone() {
 return delegate.realSeekDone();
   }



hbase git commit: HBASE-16732 Avoid possible NPE in MetaTableLocator

2016-09-29 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/master bf3c928b7 -> 3757da643


HBASE-16732 Avoid possible NPE in MetaTableLocator


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3757da64
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3757da64
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3757da64

Branch: refs/heads/master
Commit: 3757da643d43bf0eaf8a0bd4c30b56f24c95fb6c
Parents: bf3c928
Author: Jerry He 
Authored: Thu Sep 29 13:44:59 2016 -0700
Committer: Jerry He 
Committed: Thu Sep 29 14:00:46 2016 -0700

--
 .../hadoop/hbase/zookeeper/MetaTableLocator.java   | 13 -
 .../hadoop/hbase/zookeeper/ZooKeeperWatcher.java   |  8 +---
 2 files changed, 13 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/3757da64/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java
index 359617a..7b64e0c 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java
@@ -550,17 +550,20 @@ public class MetaTableLocator {
   final long timeout, Configuration conf)
   throws InterruptedException {
 int numReplicasConfigured = 1;
+
+List servers = new ArrayList();
+// Make the blocking call first so that we do the wait to know
+// the znodes are all in place or timeout.
+ServerName server = blockUntilAvailable(zkw, timeout);
+if (server == null) return null;
+servers.add(server);
+
 try {
   List metaReplicaNodes = zkw.getMetaReplicaNodes();
   numReplicasConfigured = metaReplicaNodes.size();
 } catch (KeeperException e) {
   LOG.warn("Got ZK exception " + e);
 }
-List servers = new 
ArrayList(numReplicasConfigured);
-ServerName server = blockUntilAvailable(zkw, timeout);
-if (server == null) return null;
-servers.add(server);
-
 for (int replicaId = 1; replicaId < numReplicasConfigured; replicaId++) {
   // return all replica locations for the meta
   servers.add(getMetaRegionLocation(zkw, replicaId));

http://git-wip-us.apache.org/repos/asf/hbase/blob/3757da64/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
index f7d7e26..1f3afe4 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
@@ -481,9 +481,11 @@ public class ZooKeeperWatcher implements Watcher, 
Abortable, Closeable {
   public List getMetaReplicaNodes() throws KeeperException {
 List childrenOfBaseNode = ZKUtil.listChildrenNoWatch(this, 
baseZNode);
 List metaReplicaNodes = new ArrayList(2);
-String pattern = 
conf.get("zookeeper.znode.metaserver","meta-region-server");
-for (String child : childrenOfBaseNode) {
-  if (child.startsWith(pattern)) metaReplicaNodes.add(child);
+if (childrenOfBaseNode != null) {
+  String pattern = 
conf.get("zookeeper.znode.metaserver","meta-region-server");
+  for (String child : childrenOfBaseNode) {
+if (child.startsWith(pattern)) metaReplicaNodes.add(child);
+  }
 }
 return metaReplicaNodes;
   }



hbase git commit: HBASE-16732 Avoid possible NPE in MetaTableLocator

2016-09-29 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 39a79d50f -> 728f58ad5


HBASE-16732 Avoid possible NPE in MetaTableLocator


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/728f58ad
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/728f58ad
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/728f58ad

Branch: refs/heads/branch-1.3
Commit: 728f58ad5f1e52264df58161fcbcea4ce8527a9d
Parents: 39a79d5
Author: Jerry He 
Authored: Thu Sep 29 13:44:59 2016 -0700
Committer: Jerry He 
Committed: Thu Sep 29 14:05:24 2016 -0700

--
 .../hadoop/hbase/zookeeper/MetaTableLocator.java   | 13 -
 .../hadoop/hbase/zookeeper/ZooKeeperWatcher.java   |  8 +---
 2 files changed, 13 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/728f58ad/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java
index be5bf6e..c13e212 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java
@@ -553,17 +553,20 @@ public class MetaTableLocator {
   final long timeout, Configuration conf)
   throws InterruptedException {
 int numReplicasConfigured = 1;
+
+List servers = new ArrayList();
+// Make the blocking call first so that we do the wait to know
+// the znodes are all in place or timeout.
+ServerName server = blockUntilAvailable(zkw, timeout);
+if (server == null) return null;
+servers.add(server);
+
 try {
   List metaReplicaNodes = zkw.getMetaReplicaNodes();
   numReplicasConfigured = metaReplicaNodes.size();
 } catch (KeeperException e) {
   LOG.warn("Got ZK exception " + e);
 }
-List servers = new 
ArrayList(numReplicasConfigured);
-ServerName server = blockUntilAvailable(zkw, timeout);
-if (server == null) return null;
-servers.add(server);
-
 for (int replicaId = 1; replicaId < numReplicasConfigured; replicaId++) {
   // return all replica locations for the meta
   servers.add(getMetaRegionLocation(zkw, replicaId));

http://git-wip-us.apache.org/repos/asf/hbase/blob/728f58ad/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
index d89041d..f5fa0b7 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
@@ -481,9 +481,11 @@ public class ZooKeeperWatcher implements Watcher, 
Abortable, Closeable {
   public List getMetaReplicaNodes() throws KeeperException {
 List childrenOfBaseNode = ZKUtil.listChildrenNoWatch(this, 
baseZNode);
 List metaReplicaNodes = new ArrayList(2);
-String pattern = 
conf.get("zookeeper.znode.metaserver","meta-region-server");
-for (String child : childrenOfBaseNode) {
-  if (child.startsWith(pattern)) metaReplicaNodes.add(child);
+if (childrenOfBaseNode != null) {
+  String pattern = 
conf.get("zookeeper.znode.metaserver","meta-region-server");
+  for (String child : childrenOfBaseNode) {
+if (child.startsWith(pattern)) metaReplicaNodes.add(child);
+  }
 }
 return metaReplicaNodes;
   }



hbase git commit: HBASE-16732 Avoid possible NPE in MetaTableLocator

2016-09-29 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/branch-1 df25ebf84 -> 5ac2776d2


HBASE-16732 Avoid possible NPE in MetaTableLocator


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5ac2776d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5ac2776d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5ac2776d

Branch: refs/heads/branch-1
Commit: 5ac2776d2394c339f4bfee99de1150387e0d92e4
Parents: df25ebf
Author: Jerry He 
Authored: Thu Sep 29 13:44:59 2016 -0700
Committer: Jerry He 
Committed: Thu Sep 29 14:14:01 2016 -0700

--
 .../hadoop/hbase/zookeeper/MetaTableLocator.java   | 13 -
 .../hadoop/hbase/zookeeper/ZooKeeperWatcher.java   |  8 +---
 2 files changed, 13 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5ac2776d/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java
index 40b84cf..1630d83 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java
@@ -554,17 +554,20 @@ public class MetaTableLocator {
   final long timeout, Configuration conf)
   throws InterruptedException {
 int numReplicasConfigured = 1;
+
+List servers = new ArrayList();
+// Make the blocking call first so that we do the wait to know
+// the znodes are all in place or timeout.
+ServerName server = blockUntilAvailable(zkw, timeout);
+if (server == null) return null;
+servers.add(server);
+
 try {
   List metaReplicaNodes = zkw.getMetaReplicaNodes();
   numReplicasConfigured = metaReplicaNodes.size();
 } catch (KeeperException e) {
   LOG.warn("Got ZK exception " + e);
 }
-List servers = new 
ArrayList(numReplicasConfigured);
-ServerName server = blockUntilAvailable(zkw, timeout);
-if (server == null) return null;
-servers.add(server);
-
 for (int replicaId = 1; replicaId < numReplicasConfigured; replicaId++) {
   // return all replica locations for the meta
   servers.add(getMetaRegionLocation(zkw, replicaId));

http://git-wip-us.apache.org/repos/asf/hbase/blob/5ac2776d/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
index ce209d6..f333cd5 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
@@ -496,9 +496,11 @@ public class ZooKeeperWatcher implements Watcher, 
Abortable, Closeable {
   public List getMetaReplicaNodes() throws KeeperException {
 List childrenOfBaseNode = ZKUtil.listChildrenNoWatch(this, 
baseZNode);
 List metaReplicaNodes = new ArrayList(2);
-String pattern = 
conf.get("zookeeper.znode.metaserver","meta-region-server");
-for (String child : childrenOfBaseNode) {
-  if (child.startsWith(pattern)) metaReplicaNodes.add(child);
+if (childrenOfBaseNode != null) {
+  String pattern = 
conf.get("zookeeper.znode.metaserver","meta-region-server");
+  for (String child : childrenOfBaseNode) {
+if (child.startsWith(pattern)) metaReplicaNodes.add(child);
+  }
 }
 return metaReplicaNodes;
   }



hbase git commit: HBASE-16732 Avoid possible NPE in MetaTableLocator

2016-09-29 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/branch-1.2 2733e24d3 -> bfb20c0c1


HBASE-16732 Avoid possible NPE in MetaTableLocator


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/bfb20c0c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/bfb20c0c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/bfb20c0c

Branch: refs/heads/branch-1.2
Commit: bfb20c0c1fa40f0580d440747a16852d2deeb78e
Parents: 2733e24
Author: Jerry He 
Authored: Thu Sep 29 13:44:59 2016 -0700
Committer: Jerry He 
Committed: Thu Sep 29 14:19:03 2016 -0700

--
 .../hadoop/hbase/zookeeper/MetaTableLocator.java   | 13 -
 .../hadoop/hbase/zookeeper/ZooKeeperWatcher.java   |  8 +---
 2 files changed, 13 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/bfb20c0c/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java
index ac6c6f0..0b7d42a 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java
@@ -554,17 +554,20 @@ public class MetaTableLocator {
   final long timeout, Configuration conf)
   throws InterruptedException {
 int numReplicasConfigured = 1;
+
+List servers = new ArrayList();
+// Make the blocking call first so that we do the wait to know
+// the znodes are all in place or timeout.
+ServerName server = blockUntilAvailable(zkw, timeout);
+if (server == null) return null;
+servers.add(server);
+
 try {
   List metaReplicaNodes = zkw.getMetaReplicaNodes();
   numReplicasConfigured = metaReplicaNodes.size();
 } catch (KeeperException e) {
   LOG.warn("Got ZK exception " + e);
 }
-List servers = new 
ArrayList(numReplicasConfigured);
-ServerName server = blockUntilAvailable(zkw, timeout);
-if (server == null) return null;
-servers.add(server);
-
 for (int replicaId = 1; replicaId < numReplicasConfigured; replicaId++) {
   // return all replica locations for the meta
   servers.add(getMetaRegionLocation(zkw, replicaId));

http://git-wip-us.apache.org/repos/asf/hbase/blob/bfb20c0c/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
index 5b6385f..73a3a9e 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
@@ -485,9 +485,11 @@ public class ZooKeeperWatcher implements Watcher, 
Abortable, Closeable {
   public List getMetaReplicaNodes() throws KeeperException {
 List childrenOfBaseNode = ZKUtil.listChildrenNoWatch(this, 
baseZNode);
 List metaReplicaNodes = new ArrayList(2);
-String pattern = 
conf.get("zookeeper.znode.metaserver","meta-region-server");
-for (String child : childrenOfBaseNode) {
-  if (child.startsWith(pattern)) metaReplicaNodes.add(child);
+if (childrenOfBaseNode != null) {
+  String pattern = 
conf.get("zookeeper.znode.metaserver","meta-region-server");
+  for (String child : childrenOfBaseNode) {
+if (child.startsWith(pattern)) metaReplicaNodes.add(child);
+  }
 }
 return metaReplicaNodes;
   }



hbase git commit: HBASE-17816 HRegion#mutateRowWithLocks should update writeRequestCount metric

2017-04-11 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/branch-1 4030facc9 -> 1afb86a1c


HBASE-17816 HRegion#mutateRowWithLocks should update writeRequestCount metric

Signed-off-by: Jerry He 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1afb86a1
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1afb86a1
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1afb86a1

Branch: refs/heads/branch-1
Commit: 1afb86a1c1568d77448c22651bf0027a4ede
Parents: 4030fac
Author: qgxiaozhan 
Authored: Tue Apr 11 12:29:27 2017 +0800
Committer: Jerry He 
Committed: Tue Apr 11 18:08:06 2017 -0700

--
 .../hadoop/hbase/regionserver/HRegion.java  |  1 +
 .../hadoop/hbase/regionserver/TestHRegion.java  | 24 
 2 files changed, 25 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/1afb86a1/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index c75dda1..81547d5 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -7277,6 +7277,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   @Override
   public void mutateRowsWithLocks(Collection mutations,
   Collection rowsToLock, long nonceGroup, long nonce) throws 
IOException {
+writeRequestsCount.add(mutations.size());
 MultiRowMutationProcessor proc = new MultiRowMutationProcessor(mutations, 
rowsToLock);
 processRowsWithLocks(proc, -1, nonceGroup, nonce);
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/1afb86a1/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
index e1af83a..0b7e3b5 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
@@ -6779,4 +6779,28 @@ public class TestHRegion {
   this.region = null;
 }
   }
+
+  @Test
+  public void testMutateRow_WriteRequestCount() throws Exception {
+byte[] row1 = Bytes.toBytes("row1");
+byte[] fam1 = Bytes.toBytes("fam1");
+byte[] qf1 = Bytes.toBytes("qualifier");
+byte[] val1 = Bytes.toBytes("value1");
+
+RowMutations rm = new RowMutations(row1);
+Put put = new Put(row1);
+put.addColumn(fam1, qf1, val1);
+rm.add(put);
+
+this.region = initHRegion(tableName, method, CONF, fam1);
+try {
+  long wrcBeforeMutate = this.region.writeRequestsCount.get();
+  this.region.mutateRow(rm);
+  long wrcAfterMutate = this.region.writeRequestsCount.get();
+  Assert.assertEquals(wrcBeforeMutate + rm.getMutations().size(), 
wrcAfterMutate);
+} finally {
+  HBaseTestingUtility.closeRegionAndWAL(this.region);
+  this.region = null;
+}
+  }
 }



hbase git commit: HBASE-14798 NPE reporting server load causes regionserver abort; causes TestAcidGuarantee to fail

2016-02-25 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/branch-1.1 4743fde0a -> 6cb16e93d


HBASE-14798 NPE reporting server load causes regionserver abort; causes 
TestAcidGuarantee to fail


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6cb16e93
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6cb16e93
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6cb16e93

Branch: refs/heads/branch-1.1
Commit: 6cb16e93dd1b48ee80c8b15115055eefdc03e571
Parents: 4743fde
Author: stack 
Authored: Sat Nov 14 09:07:39 2015 -0800
Committer: Jerry He 
Committed: Thu Feb 25 21:29:01 2016 -0800

--
 .../regionserver/DefaultStoreFileManager.java   |  1 +
 .../hadoop/hbase/regionserver/HRegion.java  | 53 
 2 files changed, 34 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6cb16e93/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFileManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFileManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFileManager.java
index 8305b99..4b2ec50 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFileManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFileManager.java
@@ -70,6 +70,7 @@ class DefaultStoreFileManager implements StoreFileManager {
 
   @Override
   public final Collection getStorefiles() {
+// TODO: I can return a null list of StoreFiles? That'll mess up clients. 
St.Ack 2015
 return storefiles;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/6cb16e93/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 6fd88b8..aeebb1c 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -968,16 +968,26 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 initializeRegionStores(reporter, status, true);
   }
 
-  private void writeRegionOpenMarker(WAL wal, long openSeqId) throws 
IOException {
-Map> storeFiles = new TreeMap>(Bytes.BYTES_COMPARATOR);
+  /**
+   * @return Map of StoreFiles by column family
+   */
+  private NavigableMap> getStoreFiles() {
+NavigableMap> allStoreFiles =
+  new TreeMap>(Bytes.BYTES_COMPARATOR);
 for (Store store: getStores()) {
-  ArrayList storeFileNames = new ArrayList();
-  for (StoreFile storeFile: store.getStorefiles()) {
+  Collection storeFiles = store.getStorefiles();
+  if (storeFiles == null) continue;
+  List storeFileNames = new ArrayList();
+  for (StoreFile storeFile: storeFiles) {
 storeFileNames.add(storeFile.getPath());
   }
-  storeFiles.put(store.getFamily().getName(), storeFileNames);
+  allStoreFiles.put(store.getFamily().getName(), storeFileNames);
 }
+return allStoreFiles;
+  }
 
+  private void writeRegionOpenMarker(WAL wal, long openSeqId) throws 
IOException {
+Map> storeFiles = getStoreFiles();
 RegionEventDescriptor regionOpenDesc = 
ProtobufUtil.toRegionEventDescriptor(
   RegionEventDescriptor.EventType.REGION_OPEN, getRegionInfo(), openSeqId,
   getRegionServerServices().getServerName(), storeFiles);
@@ -986,15 +996,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   }
 
   private void writeRegionCloseMarker(WAL wal) throws IOException {
-Map> storeFiles = new TreeMap>(Bytes.BYTES_COMPARATOR);
-for (Store store: getStores()) {
-  ArrayList storeFileNames = new ArrayList();
-  for (StoreFile storeFile: store.getStorefiles()) {
-storeFileNames.add(storeFile.getPath());
-  }
-  storeFiles.put(store.getFamily().getName(), storeFileNames);
-}
-
+Map> storeFiles = getStoreFiles();
 RegionEventDescriptor regionEventDesc = 
ProtobufUtil.toRegionEventDescriptor(
   RegionEventDescriptor.EventType.REGION_CLOSE, getRegionInfo(), 
getSequenceId().get(),
   getRegionServerServices().getServerName(), storeFiles);
@@ -1026,7 +1028,9 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   new HDFSBlocksDistribution();
 synchronized (this.stores) {
   for (Store store : this.stores.values()) {
-for (StoreFile sf : store.getStorefiles()) {
+   

hbase git commit: HBASE-14963 Remove use of Guava Stopwatch from HBase client code (Devaraj Das)

2016-03-19 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 ce00fc1bc -> c2cd23fee


HBASE-14963 Remove use of Guava Stopwatch from HBase client code (Devaraj Das)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c2cd23fe
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c2cd23fe
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c2cd23fe

Branch: refs/heads/branch-1.3
Commit: c2cd23fee98f34b846b4c43a9141f9bab598ae6a
Parents: ce00fc1
Author: Jerry He 
Authored: Sat Mar 19 13:21:53 2016 -0700
Committer: Jerry He 
Committed: Sat Mar 19 13:21:53 2016 -0700

--
 .../hbase/zookeeper/MetaTableLocator.java   | 23 
 1 file changed, 9 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c2cd23fe/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java
index 0975c14..dc25bab 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java
@@ -54,7 +54,6 @@ import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.zookeeper.KeeperException;
 
-import com.google.common.base.Stopwatch;
 import com.google.protobuf.InvalidProtocolBufferException;
 
 /**
@@ -228,11 +227,11 @@ public class MetaTableLocator {
* @throws InterruptedException if interrupted while waiting
*/
   public void waitMetaRegionLocation(ZooKeeperWatcher zkw) throws 
InterruptedException {
-Stopwatch stopwatch = new Stopwatch().start();
+long startTime = System.currentTimeMillis();
 while (!stopped) {
   try {
 if (waitMetaRegionLocation(zkw, 100) != null) break;
-long sleepTime = stopwatch.elapsedMillis();
+long sleepTime = System.currentTimeMillis() - startTime;
 // +1 in case sleepTime=0
 if ((sleepTime + 1) % 1 == 0) {
   LOG.warn("Have been waiting for meta to be assigned for " + 
sleepTime + "ms");
@@ -590,19 +589,15 @@ public class MetaTableLocator {
   throws InterruptedException {
 if (timeout < 0) throw new IllegalArgumentException();
 if (zkw == null) throw new IllegalArgumentException();
-Stopwatch sw = new Stopwatch().start();
+long startTime = System.currentTimeMillis();
 ServerName sn = null;
-try {
-  while (true) {
-sn = getMetaRegionLocation(zkw, replicaId);
-if (sn != null || sw.elapsedMillis()
-> timeout - HConstants.SOCKET_RETRY_WAIT_MS) {
-  break;
-}
-Thread.sleep(HConstants.SOCKET_RETRY_WAIT_MS);
+while (true) {
+  sn = getMetaRegionLocation(zkw, replicaId);
+  if (sn != null || (System.currentTimeMillis() - startTime)
+  > timeout - HConstants.SOCKET_RETRY_WAIT_MS) {
+break;
   }
-} finally {
-  sw.stop();
+  Thread.sleep(HConstants.SOCKET_RETRY_WAIT_MS);
 }
 return sn;
   }



hbase git commit: HBASE-14963 Remove use of Guava Stopwatch from HBase client code (Devaraj Das)

2016-03-19 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/branch-1 463374ecf -> 8151503fa


HBASE-14963 Remove use of Guava Stopwatch from HBase client code (Devaraj Das)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8151503f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8151503f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8151503f

Branch: refs/heads/branch-1
Commit: 8151503fa075bb7388019770bccb28703d23935d
Parents: 463374e
Author: Jerry He 
Authored: Sat Mar 19 13:21:53 2016 -0700
Committer: Jerry He 
Committed: Sat Mar 19 13:23:05 2016 -0700

--
 .../hbase/zookeeper/MetaTableLocator.java   | 23 
 1 file changed, 9 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/8151503f/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java
index 0975c14..dc25bab 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java
@@ -54,7 +54,6 @@ import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.zookeeper.KeeperException;
 
-import com.google.common.base.Stopwatch;
 import com.google.protobuf.InvalidProtocolBufferException;
 
 /**
@@ -228,11 +227,11 @@ public class MetaTableLocator {
* @throws InterruptedException if interrupted while waiting
*/
   public void waitMetaRegionLocation(ZooKeeperWatcher zkw) throws 
InterruptedException {
-Stopwatch stopwatch = new Stopwatch().start();
+long startTime = System.currentTimeMillis();
 while (!stopped) {
   try {
 if (waitMetaRegionLocation(zkw, 100) != null) break;
-long sleepTime = stopwatch.elapsedMillis();
+long sleepTime = System.currentTimeMillis() - startTime;
 // +1 in case sleepTime=0
 if ((sleepTime + 1) % 1 == 0) {
   LOG.warn("Have been waiting for meta to be assigned for " + 
sleepTime + "ms");
@@ -590,19 +589,15 @@ public class MetaTableLocator {
   throws InterruptedException {
 if (timeout < 0) throw new IllegalArgumentException();
 if (zkw == null) throw new IllegalArgumentException();
-Stopwatch sw = new Stopwatch().start();
+long startTime = System.currentTimeMillis();
 ServerName sn = null;
-try {
-  while (true) {
-sn = getMetaRegionLocation(zkw, replicaId);
-if (sn != null || sw.elapsedMillis()
-> timeout - HConstants.SOCKET_RETRY_WAIT_MS) {
-  break;
-}
-Thread.sleep(HConstants.SOCKET_RETRY_WAIT_MS);
+while (true) {
+  sn = getMetaRegionLocation(zkw, replicaId);
+  if (sn != null || (System.currentTimeMillis() - startTime)
+  > timeout - HConstants.SOCKET_RETRY_WAIT_MS) {
+break;
   }
-} finally {
-  sw.stop();
+  Thread.sleep(HConstants.SOCKET_RETRY_WAIT_MS);
 }
 return sn;
   }



hbase git commit: HBASE-14256 Flush task message may be confusing when region is recovered (Gabor Liptak)

2016-03-24 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/branch-1 ca819e1ef -> 1361a25e6


HBASE-14256 Flush task message may be confusing when region is recovered (Gabor 
Liptak)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1361a25e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1361a25e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1361a25e

Branch: refs/heads/branch-1
Commit: 1361a25e6dad87ad41cd1502adc511a5b9263c39
Parents: ca819e1
Author: Jerry He 
Authored: Thu Mar 24 10:01:53 2016 -0700
Committer: Jerry He 
Committed: Thu Mar 24 10:01:53 2016 -0700

--
 .../org/apache/hadoop/hbase/regionserver/HRegion.java| 11 ++-
 1 file changed, 6 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/1361a25e/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index d3a31a3..cabfc39 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -1210,11 +1210,12 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   // force a flush only if region replication is set up for this region. 
Otherwise no need.
   boolean forceFlush = getTableDesc().getRegionReplication() > 1;
 
-  // force a flush first
-  MonitoredTask status = TaskMonitor.get().createStatus(
-"Flushing region " + this + " because recovery is finished");
+  MonitoredTask status = TaskMonitor.get().createStatus("Recovering region 
" + this);
+
   try {
+// force a flush first
 if (forceFlush) {
+  status.setStatus("Flushing region " + this + " because recovery is 
finished");
   internalFlushcache(status);
 }
 
@@ -1230,13 +1231,13 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   // We cannot rethrow this exception since we are being called from 
the zk thread. The
   // region has already opened. In this case we log the error, but 
continue
   LOG.warn(getRegionInfo().getEncodedName() + " : was not able to 
write region opening "
-  + "event to WAL, continueing", e);
+  + "event to WAL, continuing", e);
 }
   } catch (IOException ioe) {
 // Distributed log replay semantics does not necessarily require a 
flush, since the replayed
 // data is already written again in the WAL. So failed flush should be 
fine.
 LOG.warn(getRegionInfo().getEncodedName() + " : was not able to flush "
-+ "event to WAL, continueing", ioe);
++ "event to WAL, continuing", ioe);
   } finally {
 status.cleanup();
   }



hbase git commit: HBASE-14256 Flush task message may be confusing when region is recovered (Gabor Liptak)

2016-03-24 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 625da2153 -> ee7ac7ea3


HBASE-14256 Flush task message may be confusing when region is recovered (Gabor 
Liptak)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ee7ac7ea
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ee7ac7ea
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ee7ac7ea

Branch: refs/heads/branch-1.3
Commit: ee7ac7ea3b6ffdf20e4d09defe18a7078eb3b29e
Parents: 625da21
Author: Jerry He 
Authored: Thu Mar 24 10:01:53 2016 -0700
Committer: Jerry He 
Committed: Thu Mar 24 10:03:11 2016 -0700

--
 .../org/apache/hadoop/hbase/regionserver/HRegion.java| 11 ++-
 1 file changed, 6 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/ee7ac7ea/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index d3a31a3..cabfc39 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -1210,11 +1210,12 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   // force a flush only if region replication is set up for this region. 
Otherwise no need.
   boolean forceFlush = getTableDesc().getRegionReplication() > 1;
 
-  // force a flush first
-  MonitoredTask status = TaskMonitor.get().createStatus(
-"Flushing region " + this + " because recovery is finished");
+  MonitoredTask status = TaskMonitor.get().createStatus("Recovering region 
" + this);
+
   try {
+// force a flush first
 if (forceFlush) {
+  status.setStatus("Flushing region " + this + " because recovery is 
finished");
   internalFlushcache(status);
 }
 
@@ -1230,13 +1231,13 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   // We cannot rethrow this exception since we are being called from 
the zk thread. The
   // region has already opened. In this case we log the error, but 
continue
   LOG.warn(getRegionInfo().getEncodedName() + " : was not able to 
write region opening "
-  + "event to WAL, continueing", e);
+  + "event to WAL, continuing", e);
 }
   } catch (IOException ioe) {
 // Distributed log replay semantics does not necessarily require a 
flush, since the replayed
 // data is already written again in the WAL. So failed flush should be 
fine.
 LOG.warn(getRegionInfo().getEncodedName() + " : was not able to flush "
-+ "event to WAL, continueing", ioe);
++ "event to WAL, continuing", ioe);
   } finally {
 status.cleanup();
   }



hbase git commit: HBASE-14256 Flush task message may be confusing when region is recovered (Gabor Liptak)

2016-03-24 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/master e71bc71c8 -> 7c9309821


HBASE-14256 Flush task message may be confusing when region is recovered (Gabor 
Liptak)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7c930982
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7c930982
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7c930982

Branch: refs/heads/master
Commit: 7c93098210d5f17242857a9a0148b4b26ff7aaae
Parents: e71bc71
Author: Jerry He 
Authored: Thu Mar 24 10:01:53 2016 -0700
Committer: Jerry He 
Committed: Thu Mar 24 10:06:38 2016 -0700

--
 .../org/apache/hadoop/hbase/regionserver/HRegion.java| 11 ++-
 1 file changed, 6 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7c930982/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index ab19d08..4da0f13 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -1217,11 +1217,12 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   // force a flush only if region replication is set up for this region. 
Otherwise no need.
   boolean forceFlush = getTableDesc().getRegionReplication() > 1;
 
-  // force a flush first
-  MonitoredTask status = TaskMonitor.get().createStatus(
-"Flushing region " + this + " because recovery is finished");
+  MonitoredTask status = TaskMonitor.get().createStatus("Recovering region 
" + this);
+
   try {
+// force a flush first
 if (forceFlush) {
+  status.setStatus("Flushing region " + this + " because recovery is 
finished");
   internalFlushcache(status);
 }
 
@@ -1237,13 +1238,13 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   // We cannot rethrow this exception since we are being called from 
the zk thread. The
   // region has already opened. In this case we log the error, but 
continue
   LOG.warn(getRegionInfo().getEncodedName() + " : was not able to 
write region opening "
-  + "event to WAL, continueing", e);
+  + "event to WAL, continuing", e);
 }
   } catch (IOException ioe) {
 // Distributed log replay semantics does not necessarily require a 
flush, since the replayed
 // data is already written again in the WAL. So failed flush should be 
fine.
 LOG.warn(getRegionInfo().getEncodedName() + " : was not able to flush "
-+ "event to WAL, continueing", ioe);
++ "event to WAL, continuing", ioe);
   } finally {
 status.cleanup();
   }



hbase git commit: HBASE-14256 Flush task message may be confusing when region is recovered (Gabor Liptak)

2016-03-24 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/branch-1.2 9725a5bd7 -> 1be381e7d


HBASE-14256 Flush task message may be confusing when region is recovered (Gabor 
Liptak)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1be381e7
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1be381e7
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1be381e7

Branch: refs/heads/branch-1.2
Commit: 1be381e7d98545d8745ffac7c6df22d486f31e01
Parents: 9725a5b
Author: Jerry He 
Authored: Thu Mar 24 10:01:53 2016 -0700
Committer: Jerry He 
Committed: Thu Mar 24 10:13:28 2016 -0700

--
 .../org/apache/hadoop/hbase/regionserver/HRegion.java| 11 ++-
 1 file changed, 6 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/1be381e7/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index da155fb..18024d9 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -1189,11 +1189,12 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   // force a flush only if region replication is set up for this region. 
Otherwise no need.
   boolean forceFlush = getTableDesc().getRegionReplication() > 1;
 
-  // force a flush first
-  MonitoredTask status = TaskMonitor.get().createStatus(
-"Flushing region " + this + " because recovery is finished");
+  MonitoredTask status = TaskMonitor.get().createStatus("Recovering region 
" + this);
+
   try {
+// force a flush first
 if (forceFlush) {
+  status.setStatus("Flushing region " + this + " because recovery is 
finished");
   internalFlushcache(status);
 }
 
@@ -1209,13 +1210,13 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   // We cannot rethrow this exception since we are being called from 
the zk thread. The
   // region has already opened. In this case we log the error, but 
continue
   LOG.warn(getRegionInfo().getEncodedName() + " : was not able to 
write region opening "
-  + "event to WAL, continueing", e);
+  + "event to WAL, continuing", e);
 }
   } catch (IOException ioe) {
 // Distributed log replay semantics does not necessarily require a 
flush, since the replayed
 // data is already written again in the WAL. So failed flush should be 
fine.
 LOG.warn(getRegionInfo().getEncodedName() + " : was not able to flush "
-+ "event to WAL, continueing", ioe);
++ "event to WAL, continuing", ioe);
   } finally {
 status.cleanup();
   }



hbase git commit: HBASE-18986 Remove unnecessary null check after CellUtil.cloneQualifier()

2017-10-15 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/branch-2 e04b15c68 -> aeaf222e3


HBASE-18986 Remove unnecessary null check after CellUtil.cloneQualifier()

Signed-off-by: Jerry He 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/aeaf222e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/aeaf222e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/aeaf222e

Branch: refs/heads/branch-2
Commit: aeaf222e35e8a7d5c751477e7774417654062e54
Parents: e04b15c
Author: Xiang Li 
Authored: Wed Oct 11 20:55:27 2017 +0800
Committer: Jerry He 
Committed: Sun Oct 15 13:11:31 2017 -0700

--
 .../src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java | 1 -
 1 file changed, 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/aeaf222e/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 6901e3f..e3d88f6 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -2956,7 +2956,6 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 //  This is expensive.
 if (cell.getTimestamp() == HConstants.LATEST_TIMESTAMP && 
CellUtil.isDeleteType(cell)) {
   byte[] qual = CellUtil.cloneQualifier(cell);
-  if (qual == null) qual = HConstants.EMPTY_BYTE_ARRAY;
 
   Integer count = kvCount.get(qual);
   if (count == null) {



hbase git commit: HBASE-18986 Remove unnecessary null check after CellUtil.cloneQualifier()

2017-10-15 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/master 202e414eb -> 83af5f2c6


HBASE-18986 Remove unnecessary null check after CellUtil.cloneQualifier()

Signed-off-by: Jerry He 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/83af5f2c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/83af5f2c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/83af5f2c

Branch: refs/heads/master
Commit: 83af5f2c623cb3180ab21f17f5681d4328acdc76
Parents: 202e414
Author: Xiang Li 
Authored: Wed Oct 11 20:55:27 2017 +0800
Committer: Jerry He 
Committed: Sun Oct 15 13:20:09 2017 -0700

--
 .../src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java | 1 -
 1 file changed, 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/83af5f2c/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 6901e3f..e3d88f6 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -2956,7 +2956,6 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 //  This is expensive.
 if (cell.getTimestamp() == HConstants.LATEST_TIMESTAMP && 
CellUtil.isDeleteType(cell)) {
   byte[] qual = CellUtil.cloneQualifier(cell);
-  if (qual == null) qual = HConstants.EMPTY_BYTE_ARRAY;
 
   Integer count = kvCount.get(qual);
   if (count == null) {



hbase git commit: HBASE-10367 RegionServer graceful stop / decommissioning

2017-10-19 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/master af479c580 -> a43a00e89


HBASE-10367 RegionServer graceful stop / decommissioning

Signed-off-by: Jerry He 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a43a00e8
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a43a00e8
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a43a00e8

Branch: refs/heads/master
Commit: a43a00e89c5c99968a205208ab9a5307c89730b3
Parents: af479c5
Author: Jerry He 
Authored: Thu Oct 19 21:44:38 2017 -0700
Committer: Jerry He 
Committed: Thu Oct 19 21:54:45 2017 -0700

--
 bin/draining_servers.rb |   2 +
 .../org/apache/hadoop/hbase/client/Admin.java   |  26 +++--
 .../apache/hadoop/hbase/client/AsyncAdmin.java  |  25 ++--
 .../hadoop/hbase/client/AsyncHBaseAdmin.java|  14 ++-
 .../hbase/client/ConnectionImplementation.java  |  30 ++---
 .../apache/hadoop/hbase/client/HBaseAdmin.java  |  23 ++--
 .../hadoop/hbase/client/RawAsyncHBaseAdmin.java |  66 +--
 .../client/ShortCircuitMasterConnection.java|  30 ++---
 .../hbase/shaded/protobuf/RequestConverter.java |  24 ++--
 .../src/main/protobuf/Master.proto  |  38 +++---
 .../hbase/coprocessor/MasterObserver.java   |  36 ++
 .../org/apache/hadoop/hbase/master/HMaster.java | 117 +--
 .../hbase/master/MasterCoprocessorHost.java |  56 +
 .../hadoop/hbase/master/MasterRpcServices.java  |  71 ++-
 .../hadoop/hbase/master/MasterServices.java |  19 +--
 .../hadoop/hbase/master/ServerManager.java  |  14 ++-
 .../hbase/security/access/AccessController.java |  17 +++
 .../hbase/zookeeper/DrainingServerTracker.java  |   3 +
 .../apache/hadoop/hbase/client/TestAdmin2.java  | 103 
 .../client/TestAsyncDecommissionAdminApi.java   |  95 +++
 .../hbase/client/TestAsyncDrainAdminApi.java| 101 
 .../hbase/master/MockNoopMasterServices.java|  15 ---
 .../hbase/zookeeper/TestZooKeeperACL.java   |  18 +--
 23 files changed, 556 insertions(+), 387 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a43a00e8/bin/draining_servers.rb
--
diff --git a/bin/draining_servers.rb b/bin/draining_servers.rb
index ea74c30..588bac4 100644
--- a/bin/draining_servers.rb
+++ b/bin/draining_servers.rb
@@ -17,6 +17,8 @@
 #
 
 # Add or remove servers from draining mode via zookeeper
+# Deprecated in 2.0, and will be removed in 3.0. Use Admin decommission
+# API instead.
 
 require 'optparse'
 include Java

http://git-wip-us.apache.org/repos/asf/hbase/blob/a43a00e8/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
index 64d5e53..540b7c8 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
@@ -2425,22 +2425,30 @@ public interface Admin extends Abortable, Closeable {
   }
 
   /**
-   * Mark a region server as draining to prevent additional regions from 
getting assigned to it.
-   * @param servers List of region servers to drain.
+   * Mark region server(s) as decommissioned to prevent additional regions 
from getting
+   * assigned to them. Optionally unload the regions on the servers. If there 
are multiple servers
+   * to be decommissioned, decommissioning them at the same time can prevent 
wasteful region
+   * movements. Region unloading is asynchronous.
+   * @param servers The list of servers to decommission.
+   * @param offload True to offload the regions from the decommissioned servers
*/
-  void drainRegionServers(List servers) throws IOException;
+  void decommissionRegionServers(List servers, boolean offload) 
throws IOException;
 
   /**
-   * List region servers marked as draining to not get additional regions 
assigned to them.
-   * @return List of draining region servers.
+   * List region servers marked as decommissioned, which can not be assigned 
regions.
+   * @return List of decommissioned region servers.
*/
-  List listDrainingRegionServers() throws IOException;
+  List listDecommissionedRegionServers() throws IOException;
 
   /**
-   * Remove drain from a region server to allow additional regions assignments.
-   * @param servers List of region servers to remove drain from.
+   * Remove decommission marker from a region server to allow regions 
assignments.
+   * Load regions onto the server if a list of regions is given. Region 
loading is
+   * asynchronous.
+   * @param server The server 

hbase git commit: HBASE-10367 RegionServer graceful stop / decommissioning

2017-10-19 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/branch-2 64f024a76 -> 75d2bba73


HBASE-10367 RegionServer graceful stop / decommissioning

Signed-off-by: Jerry He 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/75d2bba7
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/75d2bba7
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/75d2bba7

Branch: refs/heads/branch-2
Commit: 75d2bba73969d84834f5cf15560ad0341af31d48
Parents: 64f024a
Author: Jerry He 
Authored: Thu Oct 19 21:44:38 2017 -0700
Committer: Jerry He 
Committed: Thu Oct 19 22:10:52 2017 -0700

--
 bin/draining_servers.rb |   2 +
 .../org/apache/hadoop/hbase/client/Admin.java   |  26 +++--
 .../apache/hadoop/hbase/client/AsyncAdmin.java  |  25 ++--
 .../hadoop/hbase/client/AsyncHBaseAdmin.java|  14 ++-
 .../hbase/client/ConnectionImplementation.java  |  30 ++---
 .../apache/hadoop/hbase/client/HBaseAdmin.java  |  23 ++--
 .../hadoop/hbase/client/RawAsyncHBaseAdmin.java |  66 +--
 .../client/ShortCircuitMasterConnection.java|  30 ++---
 .../hbase/shaded/protobuf/RequestConverter.java |  24 ++--
 .../src/main/protobuf/Master.proto  |  38 +++---
 .../hbase/coprocessor/MasterObserver.java   |  36 ++
 .../org/apache/hadoop/hbase/master/HMaster.java | 117 +--
 .../hbase/master/MasterCoprocessorHost.java |  56 +
 .../hadoop/hbase/master/MasterRpcServices.java  |  71 ++-
 .../hadoop/hbase/master/MasterServices.java |  19 +--
 .../hadoop/hbase/master/ServerManager.java  |  14 ++-
 .../hbase/security/access/AccessController.java |  17 +++
 .../hbase/zookeeper/DrainingServerTracker.java  |   3 +
 .../apache/hadoop/hbase/client/TestAdmin2.java  | 103 
 .../client/TestAsyncDecommissionAdminApi.java   |  95 +++
 .../hbase/client/TestAsyncDrainAdminApi.java| 101 
 .../hbase/master/MockNoopMasterServices.java|  15 ---
 .../hbase/zookeeper/TestZooKeeperACL.java   |  18 +--
 23 files changed, 556 insertions(+), 387 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/75d2bba7/bin/draining_servers.rb
--
diff --git a/bin/draining_servers.rb b/bin/draining_servers.rb
index ea74c30..588bac4 100644
--- a/bin/draining_servers.rb
+++ b/bin/draining_servers.rb
@@ -17,6 +17,8 @@
 #
 
 # Add or remove servers from draining mode via zookeeper
+# Deprecated in 2.0, and will be removed in 3.0. Use Admin decommission
+# API instead.
 
 require 'optparse'
 include Java

http://git-wip-us.apache.org/repos/asf/hbase/blob/75d2bba7/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
index 64d5e53..540b7c8 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
@@ -2425,22 +2425,30 @@ public interface Admin extends Abortable, Closeable {
   }
 
   /**
-   * Mark a region server as draining to prevent additional regions from 
getting assigned to it.
-   * @param servers List of region servers to drain.
+   * Mark region server(s) as decommissioned to prevent additional regions 
from getting
+   * assigned to them. Optionally unload the regions on the servers. If there 
are multiple servers
+   * to be decommissioned, decommissioning them at the same time can prevent 
wasteful region
+   * movements. Region unloading is asynchronous.
+   * @param servers The list of servers to decommission.
+   * @param offload True to offload the regions from the decommissioned servers
*/
-  void drainRegionServers(List servers) throws IOException;
+  void decommissionRegionServers(List servers, boolean offload) 
throws IOException;
 
   /**
-   * List region servers marked as draining to not get additional regions 
assigned to them.
-   * @return List of draining region servers.
+   * List region servers marked as decommissioned, which can not be assigned 
regions.
+   * @return List of decommissioned region servers.
*/
-  List listDrainingRegionServers() throws IOException;
+  List listDecommissionedRegionServers() throws IOException;
 
   /**
-   * Remove drain from a region server to allow additional regions assignments.
-   * @param servers List of region servers to remove drain from.
+   * Remove decommission marker from a region server to allow regions 
assignments.
+   * Load regions onto the server if a list of regions is given. Region 
loading is
+   * asynchronous.
+   * @param server The ser

hbase git commit: HBASE-19096 Add RowMutions batch support in AsyncTable

2017-11-28 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/master 93b91e2cc -> e67a3699c


HBASE-19096 Add RowMutions batch support in AsyncTable

Signed-off-by: Jerry He 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e67a3699
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e67a3699
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e67a3699

Branch: refs/heads/master
Commit: e67a3699c463a9f222e5d1319d35994fea2a153d
Parents: 93b91e2
Author: Jerry He 
Authored: Tue Nov 28 18:41:23 2017 -0800
Committer: Jerry He 
Committed: Tue Nov 28 18:42:17 2017 -0800

--
 .../client/AsyncBatchRpcRetryingCaller.java |  36 +++--
 .../apache/hadoop/hbase/client/AsyncTable.java  |  12 +-
 .../hbase/client/MultiServerCallable.java   |  62 +++--
 .../hbase/shaded/protobuf/RequestConverter.java | 136 +++
 .../hbase/client/TestAsyncTableBatch.java   |  19 ++-
 5 files changed, 161 insertions(+), 104 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e67a3699/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.java
index 2ae68c4..52eb821 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.java
@@ -29,6 +29,7 @@ import 
org.apache.hadoop.hbase.shaded.io.netty.util.HashedWheelTimer;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collections;
+import java.util.HashMap;
 import java.util.IdentityHashMap;
 import java.util.List;
 import java.util.Map;
@@ -58,7 +59,6 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
 import org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 
@@ -232,27 +232,19 @@ class AsyncBatchRpcRetryingCaller {
   }
 
   private ClientProtos.MultiRequest buildReq(Map 
actionsByRegion,
-  List cells) throws IOException {
+  List cells, Map rowMutationsIndexMap) 
throws IOException {
 ClientProtos.MultiRequest.Builder multiRequestBuilder = 
ClientProtos.MultiRequest.newBuilder();
 ClientProtos.RegionAction.Builder regionActionBuilder = 
ClientProtos.RegionAction.newBuilder();
 ClientProtos.Action.Builder actionBuilder = 
ClientProtos.Action.newBuilder();
 ClientProtos.MutationProto.Builder mutationBuilder = 
ClientProtos.MutationProto.newBuilder();
 for (Map.Entry entry : actionsByRegion.entrySet()) {
-  // TODO: remove the extra for loop as we will iterate it in 
mutationBuilder.
-  if (!multiRequestBuilder.hasNonceGroup()) {
-for (Action action : entry.getValue().actions) {
-  if (action.hasNonce()) {
-
multiRequestBuilder.setNonceGroup(conn.getNonceGenerator().getNonceGroup());
-break;
-  }
-}
-  }
-  regionActionBuilder.clear();
-  regionActionBuilder.setRegion(
-RequestConverter.buildRegionSpecifier(RegionSpecifierType.REGION_NAME, 
entry.getKey()));
-  regionActionBuilder = 
RequestConverter.buildNoDataRegionAction(entry.getKey(),
-entry.getValue().actions, cells, regionActionBuilder, actionBuilder, 
mutationBuilder);
-  multiRequestBuilder.addRegionAction(regionActionBuilder.build());
+  long nonceGroup = conn.getNonceGenerator().getNonceGroup();
+  // multiRequestBuilder will be populated with region actions.
+  // rowMutationsIndexMap will be non-empty after the call if there is 
RowMutations in the
+  // action list.
+  RequestConverter.buildNoDataRegionActions(entry.getKey(),
+entry.getValue().actions, cells, multiRequestBuilder, 
regionActionBuilder, actionBuilder,
+mutationBuilder, nonceGroup, rowMutationsIndexMap);
 }
 return multiRequestBuilder.build();
   }
@@ -337,8 +329,12 @@ class AsyncBatchRpcRetryingCaller {
   }
   ClientProtos.MultiRequest req;
   List cells = new ArrayList<>();
+  // Map from a created RegionAction to the original index for a 
RowMutations within
+  // the original list of actions. This will be used to process the 
results when there
+  // is RowMutations in the a

hbase git commit: HBASE-19096 Add RowMutions batch support in AsyncTable

2017-11-28 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/branch-2 8688da9e9 -> 0c4c39553


HBASE-19096 Add RowMutions batch support in AsyncTable

Signed-off-by: Jerry He 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0c4c3955
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0c4c3955
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0c4c3955

Branch: refs/heads/branch-2
Commit: 0c4c3955380e1927311a8f4b092e23532d2e795f
Parents: 8688da9
Author: Jerry He 
Authored: Tue Nov 28 18:41:23 2017 -0800
Committer: Jerry He 
Committed: Tue Nov 28 18:49:08 2017 -0800

--
 .../client/AsyncBatchRpcRetryingCaller.java |  36 +++--
 .../apache/hadoop/hbase/client/AsyncTable.java  |  12 +-
 .../hbase/client/MultiServerCallable.java   |  62 +++--
 .../hbase/shaded/protobuf/RequestConverter.java | 136 +++
 .../hbase/client/TestAsyncTableBatch.java   |  19 ++-
 5 files changed, 161 insertions(+), 104 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/0c4c3955/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.java
index 2ae68c4..52eb821 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.java
@@ -29,6 +29,7 @@ import 
org.apache.hadoop.hbase.shaded.io.netty.util.HashedWheelTimer;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collections;
+import java.util.HashMap;
 import java.util.IdentityHashMap;
 import java.util.List;
 import java.util.Map;
@@ -58,7 +59,6 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
 import org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService;
-import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 
@@ -232,27 +232,19 @@ class AsyncBatchRpcRetryingCaller {
   }
 
   private ClientProtos.MultiRequest buildReq(Map 
actionsByRegion,
-  List cells) throws IOException {
+  List cells, Map rowMutationsIndexMap) 
throws IOException {
 ClientProtos.MultiRequest.Builder multiRequestBuilder = 
ClientProtos.MultiRequest.newBuilder();
 ClientProtos.RegionAction.Builder regionActionBuilder = 
ClientProtos.RegionAction.newBuilder();
 ClientProtos.Action.Builder actionBuilder = 
ClientProtos.Action.newBuilder();
 ClientProtos.MutationProto.Builder mutationBuilder = 
ClientProtos.MutationProto.newBuilder();
 for (Map.Entry entry : actionsByRegion.entrySet()) {
-  // TODO: remove the extra for loop as we will iterate it in 
mutationBuilder.
-  if (!multiRequestBuilder.hasNonceGroup()) {
-for (Action action : entry.getValue().actions) {
-  if (action.hasNonce()) {
-
multiRequestBuilder.setNonceGroup(conn.getNonceGenerator().getNonceGroup());
-break;
-  }
-}
-  }
-  regionActionBuilder.clear();
-  regionActionBuilder.setRegion(
-RequestConverter.buildRegionSpecifier(RegionSpecifierType.REGION_NAME, 
entry.getKey()));
-  regionActionBuilder = 
RequestConverter.buildNoDataRegionAction(entry.getKey(),
-entry.getValue().actions, cells, regionActionBuilder, actionBuilder, 
mutationBuilder);
-  multiRequestBuilder.addRegionAction(regionActionBuilder.build());
+  long nonceGroup = conn.getNonceGenerator().getNonceGroup();
+  // multiRequestBuilder will be populated with region actions.
+  // rowMutationsIndexMap will be non-empty after the call if there is 
RowMutations in the
+  // action list.
+  RequestConverter.buildNoDataRegionActions(entry.getKey(),
+entry.getValue().actions, cells, multiRequestBuilder, 
regionActionBuilder, actionBuilder,
+mutationBuilder, nonceGroup, rowMutationsIndexMap);
 }
 return multiRequestBuilder.build();
   }
@@ -337,8 +329,12 @@ class AsyncBatchRpcRetryingCaller {
   }
   ClientProtos.MultiRequest req;
   List cells = new ArrayList<>();
+  // Map from a created RegionAction to the original index for a 
RowMutations within
+  // the original list of actions. This will be used to process the 
results when there
+  // is RowMutations in t

hbase git commit: HBASE-19021 Restore a few important missing logics for balancer in 2.0

2017-10-24 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/master 1c1906e03 -> 9716f62f4


HBASE-19021 Restore a few important missing logics for balancer in 2.0

Signed-off-by: Jerry He 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9716f62f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9716f62f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9716f62f

Branch: refs/heads/master
Commit: 9716f62f43195ef024ac7a4bafb93a4716a7323e
Parents: 1c1906e
Author: Jerry He 
Authored: Tue Oct 24 07:53:17 2017 -0700
Committer: Jerry He 
Committed: Tue Oct 24 07:53:17 2017 -0700

--
 .../org/apache/hadoop/hbase/master/HMaster.java |  9 +---
 .../hbase/master/assignment/RegionStates.java   |  8 +++
 .../master/procedure/ServerCrashProcedure.java  |  1 +
 .../hadoop/hbase/TestRegionRebalancing.java | 24 
 4 files changed, 35 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/9716f62f/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 8f2ae6b..bb36520 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -1421,16 +1421,19 @@ public class HMaster extends HRegionServer implements 
MasterServices {
 }
   }
 
+  boolean isByTable = 
getConfiguration().getBoolean("hbase.master.loadbalance.bytable", false);
   Map>> assignmentsByTable =
-this.assignmentManager.getRegionStates().getAssignmentsByTable();
+
this.assignmentManager.getRegionStates().getAssignmentsByTable(!isByTable);
 
   List plans = new ArrayList<>();
 
   //Give the balancer the current cluster state.
   this.balancer.setClusterStatus(getClusterStatus());
-  this.balancer.setClusterLoad(
-  
this.assignmentManager.getRegionStates().getAssignmentsByTable());
+  this.balancer.setClusterLoad(assignmentsByTable);
 
+  for (Map> serverMap : 
assignmentsByTable.values()) {
+
serverMap.keySet().removeAll(this.serverManager.getDrainingServersList());
+  }
   for (Entry>> e : 
assignmentsByTable.entrySet()) {
 List partialPlans = 
this.balancer.balanceCluster(e.getKey(), e.getValue());
 if (partialPlans != null) plans.addAll(partialPlans);

http://git-wip-us.apache.org/repos/asf/hbase/blob/9716f62f/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java
index c13a49d..3b58fe2 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java
@@ -756,6 +756,14 @@ public class RegionStates {
 
   serverResult.add(node.getRegionInfo());
 }
+// Add online servers with no assignment for the table.
+for (Map> table: result.values()) {
+for (ServerName svr : serverMap.keySet()) {
+  if (!table.containsKey(svr)) {
+table.put(svr, new ArrayList());
+  }
+}
+}
 return result;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/9716f62f/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
index a0ee628..56efaeb 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
@@ -172,6 +172,7 @@ implements ServerProcedureInterface {
 break;
 
   case SERVER_CRASH_FINISH:
+
services.getAssignmentManager().getRegionStates().removeServer(serverName);
 services.getServerManager().getDeadServers().finish(serverName);
 return Flow.NO_MORE_STATE;
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/9716f62f/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java
--
diff --git 
a/hbase-serve

hbase git commit: HBASE-19021 Restore a few important missing logics for balancer in 2.0

2017-10-24 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/branch-2 3f9ea98c9 -> a49850e5c


HBASE-19021 Restore a few important missing logics for balancer in 2.0

Signed-off-by: Jerry He 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a49850e5
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a49850e5
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a49850e5

Branch: refs/heads/branch-2
Commit: a49850e5c3a648a93ecca848d5d58ca7da9b2d4d
Parents: 3f9ea98
Author: Jerry He 
Authored: Tue Oct 24 07:53:17 2017 -0700
Committer: Jerry He 
Committed: Tue Oct 24 07:58:27 2017 -0700

--
 .../org/apache/hadoop/hbase/master/HMaster.java |  9 +---
 .../hbase/master/assignment/RegionStates.java   |  8 +++
 .../master/procedure/ServerCrashProcedure.java  |  1 +
 .../hadoop/hbase/TestRegionRebalancing.java | 24 
 4 files changed, 35 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a49850e5/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 8f2ae6b..bb36520 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -1421,16 +1421,19 @@ public class HMaster extends HRegionServer implements 
MasterServices {
 }
   }
 
+  boolean isByTable = 
getConfiguration().getBoolean("hbase.master.loadbalance.bytable", false);
   Map>> assignmentsByTable =
-this.assignmentManager.getRegionStates().getAssignmentsByTable();
+
this.assignmentManager.getRegionStates().getAssignmentsByTable(!isByTable);
 
   List plans = new ArrayList<>();
 
   //Give the balancer the current cluster state.
   this.balancer.setClusterStatus(getClusterStatus());
-  this.balancer.setClusterLoad(
-  
this.assignmentManager.getRegionStates().getAssignmentsByTable());
+  this.balancer.setClusterLoad(assignmentsByTable);
 
+  for (Map> serverMap : 
assignmentsByTable.values()) {
+
serverMap.keySet().removeAll(this.serverManager.getDrainingServersList());
+  }
   for (Entry>> e : 
assignmentsByTable.entrySet()) {
 List partialPlans = 
this.balancer.balanceCluster(e.getKey(), e.getValue());
 if (partialPlans != null) plans.addAll(partialPlans);

http://git-wip-us.apache.org/repos/asf/hbase/blob/a49850e5/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java
index c13a49d..3b58fe2 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java
@@ -756,6 +756,14 @@ public class RegionStates {
 
   serverResult.add(node.getRegionInfo());
 }
+// Add online servers with no assignment for the table.
+for (Map> table: result.values()) {
+for (ServerName svr : serverMap.keySet()) {
+  if (!table.containsKey(svr)) {
+table.put(svr, new ArrayList());
+  }
+}
+}
 return result;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/a49850e5/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
index a0ee628..56efaeb 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
@@ -172,6 +172,7 @@ implements ServerProcedureInterface {
 break;
 
   case SERVER_CRASH_FINISH:
+
services.getAssignmentManager().getRegionStates().removeServer(serverName);
 services.getServerManager().getDeadServers().finish(serverName);
 return Flow.NO_MORE_STATE;
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/a49850e5/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java
--
diff --git 
a/hbase-s

hbase git commit: HBASE-18557: Change splitable to mergeable in MergeTableRegionsProcedure

2017-08-11 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/branch-2 b627cfad3 -> b5a4e07c6


HBASE-18557: Change splitable to mergeable in MergeTableRegionsProcedure

Signed-off-by: Jerry He 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b5a4e07c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b5a4e07c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b5a4e07c

Branch: refs/heads/branch-2
Commit: b5a4e07c6a521b13146daccb7a9d502317fde427
Parents: b627cfa
Author: Yi Liang 
Authored: Thu Aug 10 11:15:59 2017 -0700
Committer: Jerry He 
Committed: Fri Aug 11 13:29:13 2017 -0700

--
 .../hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b5a4e07c/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
index 74d9b75..9aaf297 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
@@ -513,7 +513,7 @@ public class MergeTableRegionsProcedure
   throws IOException {
 GetRegionInfoResponse response =
   Util.getRegionInfoResponse(env, rs.getServerName(), rs.getRegion());
-return response.hasSplittable() && response.getSplittable();
+return response.hasMergeable() && response.getMergeable();
   }
 
   /**



hbase git commit: HBASE-18557: Change splitable to mergeable in MergeTableRegionsProcedure

2017-08-11 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/master aa8f67a14 -> 95e883967


HBASE-18557: Change splitable to mergeable in MergeTableRegionsProcedure

Signed-off-by: Jerry He 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/95e88396
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/95e88396
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/95e88396

Branch: refs/heads/master
Commit: 95e883967cbb383b48d8fae548fb55b88c7f0529
Parents: aa8f67a
Author: Yi Liang 
Authored: Thu Aug 10 11:15:59 2017 -0700
Committer: Jerry He 
Committed: Fri Aug 11 22:45:22 2017 -0700

--
 .../hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/95e88396/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
index 74d9b75..9aaf297 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
@@ -513,7 +513,7 @@ public class MergeTableRegionsProcedure
   throws IOException {
 GetRegionInfoResponse response =
   Util.getRegionInfoResponse(env, rs.getServerName(), rs.getRegion());
-return response.hasSplittable() && response.getSplittable();
+return response.hasMergeable() && response.getMergeable();
   }
 
   /**



hbase git commit: HBASE-18555: Remove redundant familyMap.put() from addxxx() of sub-classes of Mutation and Query

2017-08-11 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/master 95e883967 -> 173dce734


HBASE-18555: Remove redundant familyMap.put() from addxxx() of sub-classes of 
Mutation and Query

Signed-off-by: Jerry He 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/173dce73
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/173dce73
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/173dce73

Branch: refs/heads/master
Commit: 173dce73471da005fb6780a7e7b65b43bad481e2
Parents: 95e8839
Author: Xiang Li 
Authored: Fri Aug 11 00:07:11 2017 +0800
Committer: Jerry He 
Committed: Fri Aug 11 22:49:38 2017 -0700

--
 .../main/java/org/apache/hadoop/hbase/client/Append.java  |  2 +-
 .../main/java/org/apache/hadoop/hbase/client/Delete.java  | 10 +-
 .../src/main/java/org/apache/hadoop/hbase/client/Get.java |  2 +-
 .../java/org/apache/hadoop/hbase/client/Increment.java|  2 --
 .../java/org/apache/hadoop/hbase/client/Mutation.java |  1 +
 .../src/main/java/org/apache/hadoop/hbase/client/Put.java |  7 ---
 .../main/java/org/apache/hadoop/hbase/client/Scan.java|  2 +-
 7 files changed, 9 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/173dce73/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
index 02ec770..2bd0860 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
@@ -137,10 +137,10 @@ public class Append extends Mutation {
 List list = this.familyMap.get(family);
 if (list == null) {
   list  = new ArrayList<>(1);
+  this.familyMap.put(family, list);
 }
 // find where the new entry should be placed in the List
 list.add(cell);
-this.familyMap.put(family, list);
 return this;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/173dce73/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
index 395c277..bf5241c 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
@@ -183,9 +183,9 @@ public class Delete extends Mutation implements 
Comparable {
 List list = familyMap.get(family);
 if (list == null) {
   list = new ArrayList<>(1);
+  familyMap.put(family, list);
 }
 list.add(kv);
-familyMap.put(family, list);
 return this;
   }
 
@@ -219,12 +219,12 @@ public class Delete extends Mutation implements 
Comparable {
 List list = familyMap.get(family);
 if(list == null) {
   list = new ArrayList<>(1);
+  familyMap.put(family, list);
 } else if(!list.isEmpty()) {
   list.clear();
 }
 KeyValue kv = new KeyValue(row, family, null, timestamp, 
KeyValue.Type.DeleteFamily);
 list.add(kv);
-familyMap.put(family, list);
 return this;
   }
 
@@ -239,10 +239,10 @@ public class Delete extends Mutation implements 
Comparable {
 List list = familyMap.get(family);
 if(list == null) {
   list = new ArrayList<>(1);
+  familyMap.put(family, list);
 }
 list.add(new KeyValue(row, family, null, timestamp,
   KeyValue.Type.DeleteFamilyVersion));
-familyMap.put(family, list);
 return this;
   }
 
@@ -272,10 +272,10 @@ public class Delete extends Mutation implements 
Comparable {
 List list = familyMap.get(family);
 if (list == null) {
   list = new ArrayList<>(1);
+  familyMap.put(family, list);
 }
 list.add(new KeyValue(this.row, family, qualifier, timestamp,
 KeyValue.Type.DeleteColumn));
-familyMap.put(family, list);
 return this;
   }
 
@@ -307,10 +307,10 @@ public class Delete extends Mutation implements 
Comparable {
 List list = familyMap.get(family);
 if(list == null) {
   list = new ArrayList<>(1);
+  familyMap.put(family, list);
 }
 KeyValue kv = new KeyValue(this.row, family, qualifier, timestamp, 
KeyValue.Type.Delete);
 list.add(kv);
-familyMap.put(family, list);
 return this;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/173dce73/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java
--
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Ge

hbase git commit: HBASE-18555: Remove redundant familyMap.put() from addxxx() of sub-classes of Mutation and Query

2017-08-11 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/branch-2 b5d4a0aa4 -> 8cebf7f1a


HBASE-18555: Remove redundant familyMap.put() from addxxx() of sub-classes of 
Mutation and Query

Signed-off-by: Jerry He 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8cebf7f1
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8cebf7f1
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8cebf7f1

Branch: refs/heads/branch-2
Commit: 8cebf7f1a818a419ef32a48a24c3b5df27dd980d
Parents: b5d4a0a
Author: Xiang Li 
Authored: Fri Aug 11 00:07:11 2017 +0800
Committer: Jerry He 
Committed: Fri Aug 11 22:53:37 2017 -0700

--
 .../main/java/org/apache/hadoop/hbase/client/Append.java  |  2 +-
 .../main/java/org/apache/hadoop/hbase/client/Delete.java  | 10 +-
 .../src/main/java/org/apache/hadoop/hbase/client/Get.java |  2 +-
 .../java/org/apache/hadoop/hbase/client/Increment.java|  2 --
 .../java/org/apache/hadoop/hbase/client/Mutation.java |  1 +
 .../src/main/java/org/apache/hadoop/hbase/client/Put.java |  7 ---
 .../main/java/org/apache/hadoop/hbase/client/Scan.java|  2 +-
 7 files changed, 9 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/8cebf7f1/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
index 02ec770..2bd0860 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
@@ -137,10 +137,10 @@ public class Append extends Mutation {
 List list = this.familyMap.get(family);
 if (list == null) {
   list  = new ArrayList<>(1);
+  this.familyMap.put(family, list);
 }
 // find where the new entry should be placed in the List
 list.add(cell);
-this.familyMap.put(family, list);
 return this;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/8cebf7f1/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
index 395c277..bf5241c 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
@@ -183,9 +183,9 @@ public class Delete extends Mutation implements 
Comparable {
 List list = familyMap.get(family);
 if (list == null) {
   list = new ArrayList<>(1);
+  familyMap.put(family, list);
 }
 list.add(kv);
-familyMap.put(family, list);
 return this;
   }
 
@@ -219,12 +219,12 @@ public class Delete extends Mutation implements 
Comparable {
 List list = familyMap.get(family);
 if(list == null) {
   list = new ArrayList<>(1);
+  familyMap.put(family, list);
 } else if(!list.isEmpty()) {
   list.clear();
 }
 KeyValue kv = new KeyValue(row, family, null, timestamp, 
KeyValue.Type.DeleteFamily);
 list.add(kv);
-familyMap.put(family, list);
 return this;
   }
 
@@ -239,10 +239,10 @@ public class Delete extends Mutation implements 
Comparable {
 List list = familyMap.get(family);
 if(list == null) {
   list = new ArrayList<>(1);
+  familyMap.put(family, list);
 }
 list.add(new KeyValue(row, family, null, timestamp,
   KeyValue.Type.DeleteFamilyVersion));
-familyMap.put(family, list);
 return this;
   }
 
@@ -272,10 +272,10 @@ public class Delete extends Mutation implements 
Comparable {
 List list = familyMap.get(family);
 if (list == null) {
   list = new ArrayList<>(1);
+  familyMap.put(family, list);
 }
 list.add(new KeyValue(this.row, family, qualifier, timestamp,
 KeyValue.Type.DeleteColumn));
-familyMap.put(family, list);
 return this;
   }
 
@@ -307,10 +307,10 @@ public class Delete extends Mutation implements 
Comparable {
 List list = familyMap.get(family);
 if(list == null) {
   list = new ArrayList<>(1);
+  familyMap.put(family, list);
 }
 KeyValue kv = new KeyValue(this.row, family, qualifier, timestamp, 
KeyValue.Type.Delete);
 list.add(kv);
-familyMap.put(family, list);
 return this;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/8cebf7f1/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java
--
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/clien

hbase git commit: HBASE-18555: Remove redundant familyMap.put() from addxxx() of sub-classes of Mutation and Query

2017-08-11 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/branch-1 578e29f96 -> 7bd2795ee


HBASE-18555: Remove redundant familyMap.put() from addxxx() of sub-classes of 
Mutation and Query

Signed-off-by: Jerry He 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7bd2795e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7bd2795e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7bd2795e

Branch: refs/heads/branch-1
Commit: 7bd2795ee927df841a8c0ce3574598e9a6811739
Parents: 578e29f
Author: Xiang Li 
Authored: Fri Aug 11 00:07:11 2017 +0800
Committer: Jerry He 
Committed: Fri Aug 11 23:29:52 2017 -0700

--
 .../main/java/org/apache/hadoop/hbase/client/Append.java  |  2 +-
 .../main/java/org/apache/hadoop/hbase/client/Delete.java  | 10 +-
 .../src/main/java/org/apache/hadoop/hbase/client/Get.java |  2 +-
 .../java/org/apache/hadoop/hbase/client/Increment.java|  2 --
 .../java/org/apache/hadoop/hbase/client/Mutation.java |  1 +
 .../src/main/java/org/apache/hadoop/hbase/client/Put.java |  7 ---
 .../main/java/org/apache/hadoop/hbase/client/Scan.java|  2 +-
 7 files changed, 9 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7bd2795e/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
index ec4ea37..0741a0d 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
@@ -125,10 +125,10 @@ public class Append extends Mutation {
 List list = this.familyMap.get(family);
 if (list == null) {
   list  = new ArrayList();
+  this.familyMap.put(family, list);
 }
 // find where the new entry should be placed in the List
 list.add(cell);
-this.familyMap.put(family, list);
 return this;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/7bd2795e/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
index 4e1fe09..8682eae 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
@@ -173,9 +173,9 @@ public class Delete extends Mutation implements 
Comparable {
 List list = familyMap.get(family);
 if (list == null) {
   list = new ArrayList();
+  familyMap.put(family, list);
 }
 list.add(kv);
-familyMap.put(family, list);
 return this;
   }
 
@@ -239,12 +239,12 @@ public class Delete extends Mutation implements 
Comparable {
 List list = familyMap.get(family);
 if(list == null) {
   list = new ArrayList();
+  familyMap.put(family, list);
 } else if(!list.isEmpty()) {
   list.clear();
 }
 KeyValue kv = new KeyValue(row, family, null, timestamp, 
KeyValue.Type.DeleteFamily);
 list.add(kv);
-familyMap.put(family, list);
 return this;
   }
 
@@ -272,10 +272,10 @@ public class Delete extends Mutation implements 
Comparable {
 List list = familyMap.get(family);
 if(list == null) {
   list = new ArrayList();
+  familyMap.put(family, list);
 }
 list.add(new KeyValue(row, family, null, timestamp,
   KeyValue.Type.DeleteFamilyVersion));
-familyMap.put(family, list);
 return this;
   }
 
@@ -331,10 +331,10 @@ public class Delete extends Mutation implements 
Comparable {
 List list = familyMap.get(family);
 if (list == null) {
   list = new ArrayList();
+  familyMap.put(family, list);
 }
 list.add(new KeyValue(this.row, family, qualifier, timestamp,
 KeyValue.Type.DeleteColumn));
-familyMap.put(family, list);
 return this;
   }
 
@@ -394,10 +394,10 @@ public class Delete extends Mutation implements 
Comparable {
 List list = familyMap.get(family);
 if(list == null) {
   list = new ArrayList();
+  familyMap.put(family, list);
 }
 KeyValue kv = new KeyValue(this.row, family, qualifier, timestamp, 
KeyValue.Type.Delete);
 list.add(kv);
-familyMap.put(family, list);
 return this;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/7bd2795e/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java
--
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java 
b/hbas

hbase git commit: HBASE-18589 branch-1.4 build compile is broken

2017-08-13 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/branch-1.4 aaece0ba5 -> 5ecb1588f


HBASE-18589 branch-1.4 build compile is broken


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5ecb1588
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5ecb1588
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5ecb1588

Branch: refs/heads/branch-1.4
Commit: 5ecb1588f96c7d9fdcd01e53106d3ec3a22aa581
Parents: aaece0b
Author: Jerry He 
Authored: Sun Aug 13 15:06:55 2017 -0700
Committer: Jerry He 
Committed: Sun Aug 13 15:06:55 2017 -0700

--
 .../hadoop/hbase/snapshot/TestRegionSnapshotTask.java   | 9 -
 1 file changed, 4 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5ecb1588/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRegionSnapshotTask.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRegionSnapshotTask.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRegionSnapshotTask.java
index 403b1e6..21853d3 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRegionSnapshotTask.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRegionSnapshotTask.java
@@ -27,7 +27,6 @@ import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
-import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
 import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos;
 import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
 import org.apache.hadoop.hbase.regionserver.HRegion;
@@ -109,9 +108,9 @@ public class TestRegionSnapshotTask {
 
 List hRegions = TEST_UTIL.getHBaseCluster().getRegions(tableName);
 
-final HBaseProtos.SnapshotDescription snapshot = 
HBaseProtos.SnapshotDescription.newBuilder()
+final SnapshotProtos.SnapshotDescription snapshot = 
SnapshotProtos.SnapshotDescription.newBuilder()
 .setTable(tableName.getNameAsString())
-.setType(HBaseProtos.SnapshotDescription.Type.FLUSH)
+.setType(SnapshotProtos.SnapshotDescription.Type.FLUSH)
 .setName("test_table_snapshot")
 .setVersion(SnapshotManifestV2.DESCRIPTOR_VERSION)
 .build();
@@ -161,7 +160,7 @@ public class TestRegionSnapshotTask {
 SnapshotReferenceUtil.verifySnapshot(conf, fs, manifest);
   }
 
-  private void addRegionToSnapshot(HBaseProtos.SnapshotDescription snapshot,
+  private void addRegionToSnapshot(SnapshotProtos.SnapshotDescription snapshot,
   HRegion region, SnapshotManifest manifest) throws Exception {
 LOG.info("Adding region to snapshot: " + 
region.getRegionInfo().getRegionNameAsString());
 Path workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, 
rootDir);
@@ -171,7 +170,7 @@ public class TestRegionSnapshotTask {
   }
 
   private SnapshotManifest.RegionVisitor createRegionVisitorWithDelay(
-  HBaseProtos.SnapshotDescription desc, Path workingDir) {
+  SnapshotProtos.SnapshotDescription desc, Path workingDir) {
 return new SnapshotManifestV2.ManifestBuilder(conf, fs, workingDir) {
   @Override
   public void storeFile(final 
SnapshotProtos.SnapshotRegionManifest.Builder region,



hbase git commit: HBASE-18555: Remove redundant familyMap.put() from addxxx() of sub-classes of Mutation and Query

2017-08-13 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/branch-1.4 5ecb1588f -> 2523e716f


HBASE-18555: Remove redundant familyMap.put() from addxxx() of sub-classes of 
Mutation and Query

Signed-off-by: Jerry He 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2523e716
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2523e716
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2523e716

Branch: refs/heads/branch-1.4
Commit: 2523e716f243057360e043b1137f0604c935517d
Parents: 5ecb158
Author: Xiang Li 
Authored: Fri Aug 11 00:07:11 2017 +0800
Committer: Jerry He 
Committed: Sun Aug 13 18:15:39 2017 -0700

--
 .../main/java/org/apache/hadoop/hbase/client/Append.java  |  2 +-
 .../main/java/org/apache/hadoop/hbase/client/Delete.java  | 10 +-
 .../src/main/java/org/apache/hadoop/hbase/client/Get.java |  2 +-
 .../java/org/apache/hadoop/hbase/client/Increment.java|  2 --
 .../java/org/apache/hadoop/hbase/client/Mutation.java |  1 +
 .../src/main/java/org/apache/hadoop/hbase/client/Put.java |  7 ---
 .../main/java/org/apache/hadoop/hbase/client/Scan.java|  2 +-
 7 files changed, 9 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/2523e716/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
index ec4ea37..0741a0d 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
@@ -125,10 +125,10 @@ public class Append extends Mutation {
 List list = this.familyMap.get(family);
 if (list == null) {
   list  = new ArrayList();
+  this.familyMap.put(family, list);
 }
 // find where the new entry should be placed in the List
 list.add(cell);
-this.familyMap.put(family, list);
 return this;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/2523e716/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
index 4e1fe09..8682eae 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
@@ -173,9 +173,9 @@ public class Delete extends Mutation implements 
Comparable {
 List list = familyMap.get(family);
 if (list == null) {
   list = new ArrayList();
+  familyMap.put(family, list);
 }
 list.add(kv);
-familyMap.put(family, list);
 return this;
   }
 
@@ -239,12 +239,12 @@ public class Delete extends Mutation implements 
Comparable {
 List list = familyMap.get(family);
 if(list == null) {
   list = new ArrayList();
+  familyMap.put(family, list);
 } else if(!list.isEmpty()) {
   list.clear();
 }
 KeyValue kv = new KeyValue(row, family, null, timestamp, 
KeyValue.Type.DeleteFamily);
 list.add(kv);
-familyMap.put(family, list);
 return this;
   }
 
@@ -272,10 +272,10 @@ public class Delete extends Mutation implements 
Comparable {
 List list = familyMap.get(family);
 if(list == null) {
   list = new ArrayList();
+  familyMap.put(family, list);
 }
 list.add(new KeyValue(row, family, null, timestamp,
   KeyValue.Type.DeleteFamilyVersion));
-familyMap.put(family, list);
 return this;
   }
 
@@ -331,10 +331,10 @@ public class Delete extends Mutation implements 
Comparable {
 List list = familyMap.get(family);
 if (list == null) {
   list = new ArrayList();
+  familyMap.put(family, list);
 }
 list.add(new KeyValue(this.row, family, qualifier, timestamp,
 KeyValue.Type.DeleteColumn));
-familyMap.put(family, list);
 return this;
   }
 
@@ -394,10 +394,10 @@ public class Delete extends Mutation implements 
Comparable {
 List list = familyMap.get(family);
 if(list == null) {
   list = new ArrayList();
+  familyMap.put(family, list);
 }
 KeyValue kv = new KeyValue(this.row, family, qualifier, timestamp, 
KeyValue.Type.Delete);
 list.add(kv);
-familyMap.put(family, list);
 return this;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/2523e716/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java
--
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java 
b/

hbase git commit: HBASE-18522 Add RowMutations support to Batch

2017-08-14 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/branch-1.4 043211760 -> c6f57e0f3


HBASE-18522 Add RowMutations support to Batch


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c6f57e0f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c6f57e0f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c6f57e0f

Branch: refs/heads/branch-1.4
Commit: c6f57e0f382e9dcef48f05da087d12eb0e47e9ad
Parents: 0432117
Author: Jerry He 
Authored: Sun Aug 13 18:23:49 2017 -0700
Committer: Jerry He 
Committed: Mon Aug 14 09:18:41 2017 -0700

--
 .../hbase/client/MultiServerCallable.java   | 66 +++-
 .../org/apache/hadoop/hbase/client/Table.java   |  6 +-
 .../hadoop/hbase/protobuf/RequestConverter.java |  6 +-
 .../hbase/protobuf/ResponseConverter.java   | 35 ++-
 .../hbase/client/TestFromClientSide3.java   | 46 ++
 .../hadoop/hbase/client/TestMultiParallel.java  | 49 ---
 6 files changed, 178 insertions(+), 30 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c6f57e0f/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java
index 42c63eb..b2ea941 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.client;
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
@@ -97,12 +98,21 @@ class MultiServerCallable extends 
PayloadCarryingServerCallable cells = null;
-// The multi object is a list of Actions by region.  Iterate by region.
+
+// Pre-size. Presume at least a KV per Action. There are likely more.
+List cells =
+(this.cellBlock ? new ArrayList(countOfActions) : null);
+
 long nonceGroup = multiAction.getNonceGroup();
 if (nonceGroup != HConstants.NO_NONCE) {
   multiRequestBuilder.setNonceGroup(nonceGroup);
 }
+// Index to track RegionAction within the MultiRequest
+int regionActionIndex = -1;
+// Map from a created RegionAction for a RowMutations to the original 
index within
+// its original list of actions
+Map rowMutationsIndexMap = new HashMap<>();
+// The multi object is a list of Actions by region.  Iterate by region.
 for (Map.Entry>> e: 
this.multiAction.actions.entrySet()) {
   final byte [] regionName = e.getKey();
   final List> actions = e.getValue();
@@ -110,19 +120,46 @@ class MultiServerCallable extends 
PayloadCarryingServerCallable action : actions) {
+Row row = action.getAction();
+// Row Mutations are a set of Puts and/or Deletes all to be applied 
atomically
+// on the one row. We do separate RegionAction for each RowMutations.
+// We maintain a map to keep track of this RegionAction and the 
original Action index.
+if (row instanceof RowMutations) {
+  RowMutations rms = (RowMutations)row;
+  if (this.cellBlock) {
+// Build a multi request absent its Cell payload. Send data in 
cellblocks.
+regionActionBuilder = 
RequestConverter.buildNoDataRegionAction(regionName, rms, cells,
+  regionActionBuilder, actionBuilder, mutationBuilder);
+  } else {
+regionActionBuilder = 
RequestConverter.buildRegionAction(regionName, rms);
+  }
+  regionActionBuilder.setAtomic(true);
+  multiRequestBuilder.addRegionAction(regionActionBuilder.build());
+  regionActionIndex++;
+  rowMutationsIndexMap.put(regionActionIndex, 
action.getOriginalIndex());
+  rowMutations++;
+
+  regionActionBuilder.clear();
+  regionActionBuilder.setRegion(RequestConverter.buildRegionSpecifier(
+HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME, 
regionName) );
+}
+  }
 
-  if (this.cellBlock) {
-// Presize.  Presume at least a KV per Action.  There are likely more.
-if (cells == null) cells = new 
ArrayList(countOfActions);
-// Send data in cellblocks. The call to buildNoDataMultiRequest will 
skip RowMutations.
-// They have already been handled above. Guess at count of cells
-regionActionBuilder = 
RequestConverter.buildNoDataRegionAction(regionName, actions, cells,
-  regionActionBuilder, actionBuilder, mutationBuilder);
-  } else {
-regionActionBuilder = RequestConverter.

hbase git commit: HBASE-18522 Add RowMutations support to Batch

2017-08-14 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/branch-1 7bd2795ee -> 9078a034c


HBASE-18522 Add RowMutations support to Batch


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9078a034
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9078a034
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9078a034

Branch: refs/heads/branch-1
Commit: 9078a034c410d53800e656d6a19f810c30fc102f
Parents: 7bd2795
Author: Jerry He 
Authored: Sun Aug 13 18:23:49 2017 -0700
Committer: Jerry He 
Committed: Mon Aug 14 09:21:53 2017 -0700

--
 .../hbase/client/MultiServerCallable.java   | 66 +++-
 .../org/apache/hadoop/hbase/client/Table.java   |  6 +-
 .../hadoop/hbase/protobuf/RequestConverter.java |  6 +-
 .../hbase/protobuf/ResponseConverter.java   | 35 ++-
 .../hbase/client/TestFromClientSide3.java   | 46 ++
 .../hadoop/hbase/client/TestMultiParallel.java  | 49 ---
 6 files changed, 178 insertions(+), 30 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/9078a034/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java
index 42c63eb..b2ea941 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.client;
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
@@ -97,12 +98,21 @@ class MultiServerCallable extends 
PayloadCarryingServerCallable cells = null;
-// The multi object is a list of Actions by region.  Iterate by region.
+
+// Pre-size. Presume at least a KV per Action. There are likely more.
+List cells =
+(this.cellBlock ? new ArrayList(countOfActions) : null);
+
 long nonceGroup = multiAction.getNonceGroup();
 if (nonceGroup != HConstants.NO_NONCE) {
   multiRequestBuilder.setNonceGroup(nonceGroup);
 }
+// Index to track RegionAction within the MultiRequest
+int regionActionIndex = -1;
+// Map from a created RegionAction for a RowMutations to the original 
index within
+// its original list of actions
+Map rowMutationsIndexMap = new HashMap<>();
+// The multi object is a list of Actions by region.  Iterate by region.
 for (Map.Entry>> e: 
this.multiAction.actions.entrySet()) {
   final byte [] regionName = e.getKey();
   final List> actions = e.getValue();
@@ -110,19 +120,46 @@ class MultiServerCallable extends 
PayloadCarryingServerCallable action : actions) {
+Row row = action.getAction();
+// Row Mutations are a set of Puts and/or Deletes all to be applied 
atomically
+// on the one row. We do separate RegionAction for each RowMutations.
+// We maintain a map to keep track of this RegionAction and the 
original Action index.
+if (row instanceof RowMutations) {
+  RowMutations rms = (RowMutations)row;
+  if (this.cellBlock) {
+// Build a multi request absent its Cell payload. Send data in 
cellblocks.
+regionActionBuilder = 
RequestConverter.buildNoDataRegionAction(regionName, rms, cells,
+  regionActionBuilder, actionBuilder, mutationBuilder);
+  } else {
+regionActionBuilder = 
RequestConverter.buildRegionAction(regionName, rms);
+  }
+  regionActionBuilder.setAtomic(true);
+  multiRequestBuilder.addRegionAction(regionActionBuilder.build());
+  regionActionIndex++;
+  rowMutationsIndexMap.put(regionActionIndex, 
action.getOriginalIndex());
+  rowMutations++;
+
+  regionActionBuilder.clear();
+  regionActionBuilder.setRegion(RequestConverter.buildRegionSpecifier(
+HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME, 
regionName) );
+}
+  }
 
-  if (this.cellBlock) {
-// Presize.  Presume at least a KV per Action.  There are likely more.
-if (cells == null) cells = new 
ArrayList(countOfActions);
-// Send data in cellblocks. The call to buildNoDataMultiRequest will 
skip RowMutations.
-// They have already been handled above. Guess at count of cells
-regionActionBuilder = 
RequestConverter.buildNoDataRegionAction(regionName, actions, cells,
-  regionActionBuilder, actionBuilder, mutationBuilder);
-  } else {
-regionActionBuilder = RequestConverter.buil

hbase git commit: HBASE-18522 Add RowMutations support to Batch

2017-08-14 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/master bd4007309 -> 096dac2e8


HBASE-18522 Add RowMutations support to Batch


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/096dac2e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/096dac2e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/096dac2e

Branch: refs/heads/master
Commit: 096dac2e83c675f212bad4f91888d8440ba152ca
Parents: bd40073
Author: Jerry He 
Authored: Mon Aug 14 10:39:46 2017 -0700
Committer: Jerry He 
Committed: Mon Aug 14 10:39:46 2017 -0700

--
 .../hbase/client/MultiServerCallable.java   | 64 +++-
 .../org/apache/hadoop/hbase/client/Table.java   |  4 +-
 .../hbase/shaded/protobuf/RequestConverter.java |  6 +-
 .../shaded/protobuf/ResponseConverter.java  | 37 ++-
 .../hbase/client/TestFromClientSide3.java   | 46 ++
 .../hadoop/hbase/client/TestMultiParallel.java  | 34 ++-
 6 files changed, 168 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/096dac2e/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java
index 33c9a0b..7f6052e 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.client;
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
@@ -93,30 +94,64 @@ class MultiServerCallable extends 
CancellableRegionServerCallable
 RegionAction.Builder regionActionBuilder = RegionAction.newBuilder();
 ClientProtos.Action.Builder actionBuilder = 
ClientProtos.Action.newBuilder();
 MutationProto.Builder mutationBuilder = MutationProto.newBuilder();
-List cells = null;
-// The multi object is a list of Actions by region.  Iterate by region.
+
+// Pre-size. Presume at least a KV per Action. There are likely more.
+List cells =
+(this.cellBlock ? new ArrayList(countOfActions) : null);
+
 long nonceGroup = multiAction.getNonceGroup();
 if (nonceGroup != HConstants.NO_NONCE) {
   multiRequestBuilder.setNonceGroup(nonceGroup);
 }
+// Index to track RegionAction within the MultiRequest
+int regionActionIndex = -1;
+// Map from a created RegionAction to the original index for a 
RowMutations within
+// its original list of actions
+Map rowMutationsIndexMap = new HashMap<>();
+// The multi object is a list of Actions by region. Iterate by region.
 for (Map.Entry> e: 
this.multiAction.actions.entrySet()) {
   final byte [] regionName = e.getKey();
   final List actions = e.getValue();
   regionActionBuilder.clear();
   regionActionBuilder.setRegion(RequestConverter.buildRegionSpecifier(
   HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME, 
regionName));
-  if (this.cellBlock) {
-// Pre-size. Presume at least a KV per Action.  There are likely more.
-if (cells == null) cells = new ArrayList<>(countOfActions);
-// Send data in cellblocks. The call to buildNoDataMultiRequest will 
skip RowMutations.
-// They have already been handled above. Guess at count of cells
-regionActionBuilder = 
RequestConverter.buildNoDataRegionAction(regionName, actions, cells,
-  regionActionBuilder, actionBuilder, mutationBuilder);
-  } else {
-regionActionBuilder = RequestConverter.buildRegionAction(regionName, 
actions,
-  regionActionBuilder, actionBuilder, mutationBuilder);
+
+  int rowMutations = 0;
+  for (Action action : actions) {
+Row row = action.getAction();
+// Row Mutations are a set of Puts and/or Deletes all to be applied 
atomically
+// on the one row. We do separate RegionAction for each RowMutations.
+// We maintain a map to keep track of this RegionAction and the 
original Action index.
+if (row instanceof RowMutations) {
+  RowMutations rms = (RowMutations)row;
+  if (this.cellBlock) {
+// Build a multi request absent its Cell payload. Send data in 
cellblocks.
+regionActionBuilder = 
RequestConverter.buildNoDataRegionAction(regionName, rms, cells,
+  regionActionBuilder, actionBuilder, mutationBuilder);
+  } else {
+regionActionBuilder = 
RequestConverter.buildRegionAction(regionName, rms);
+  }
+

hbase git commit: HBASE-18522 Add RowMutations support to Batch

2017-08-14 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/branch-2 add997451 -> cf050de91


HBASE-18522 Add RowMutations support to Batch


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/cf050de9
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/cf050de9
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/cf050de9

Branch: refs/heads/branch-2
Commit: cf050de9172a36b767eb7e4700787b9d6a32a1b9
Parents: add9974
Author: Jerry He 
Authored: Mon Aug 14 09:28:49 2017 -0700
Committer: Jerry He 
Committed: Mon Aug 14 10:43:10 2017 -0700

--
 .../hbase/client/MultiServerCallable.java   | 64 +++-
 .../org/apache/hadoop/hbase/client/Table.java   |  4 +-
 .../hbase/shaded/protobuf/RequestConverter.java |  6 +-
 .../shaded/protobuf/ResponseConverter.java  | 37 ++-
 .../hbase/client/TestFromClientSide3.java   | 46 ++
 .../hadoop/hbase/client/TestMultiParallel.java  | 34 ++-
 6 files changed, 168 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/cf050de9/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java
index 33c9a0b..7f6052e 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.client;
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
@@ -93,30 +94,64 @@ class MultiServerCallable extends 
CancellableRegionServerCallable
 RegionAction.Builder regionActionBuilder = RegionAction.newBuilder();
 ClientProtos.Action.Builder actionBuilder = 
ClientProtos.Action.newBuilder();
 MutationProto.Builder mutationBuilder = MutationProto.newBuilder();
-List cells = null;
-// The multi object is a list of Actions by region.  Iterate by region.
+
+// Pre-size. Presume at least a KV per Action. There are likely more.
+List cells =
+(this.cellBlock ? new ArrayList(countOfActions) : null);
+
 long nonceGroup = multiAction.getNonceGroup();
 if (nonceGroup != HConstants.NO_NONCE) {
   multiRequestBuilder.setNonceGroup(nonceGroup);
 }
+// Index to track RegionAction within the MultiRequest
+int regionActionIndex = -1;
+// Map from a created RegionAction to the original index for a 
RowMutations within
+// its original list of actions
+Map rowMutationsIndexMap = new HashMap<>();
+// The multi object is a list of Actions by region. Iterate by region.
 for (Map.Entry> e: 
this.multiAction.actions.entrySet()) {
   final byte [] regionName = e.getKey();
   final List actions = e.getValue();
   regionActionBuilder.clear();
   regionActionBuilder.setRegion(RequestConverter.buildRegionSpecifier(
   HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME, 
regionName));
-  if (this.cellBlock) {
-// Pre-size. Presume at least a KV per Action.  There are likely more.
-if (cells == null) cells = new ArrayList<>(countOfActions);
-// Send data in cellblocks. The call to buildNoDataMultiRequest will 
skip RowMutations.
-// They have already been handled above. Guess at count of cells
-regionActionBuilder = 
RequestConverter.buildNoDataRegionAction(regionName, actions, cells,
-  regionActionBuilder, actionBuilder, mutationBuilder);
-  } else {
-regionActionBuilder = RequestConverter.buildRegionAction(regionName, 
actions,
-  regionActionBuilder, actionBuilder, mutationBuilder);
+
+  int rowMutations = 0;
+  for (Action action : actions) {
+Row row = action.getAction();
+// Row Mutations are a set of Puts and/or Deletes all to be applied 
atomically
+// on the one row. We do separate RegionAction for each RowMutations.
+// We maintain a map to keep track of this RegionAction and the 
original Action index.
+if (row instanceof RowMutations) {
+  RowMutations rms = (RowMutations)row;
+  if (this.cellBlock) {
+// Build a multi request absent its Cell payload. Send data in 
cellblocks.
+regionActionBuilder = 
RequestConverter.buildNoDataRegionAction(regionName, rms, cells,
+  regionActionBuilder, actionBuilder, mutationBuilder);
+  } else {
+regionActionBuilder = 
RequestConverter.buildRegionAction(regionName, rms);
+  }
+

hbase git commit: HBASE-17574 Clean up how to run tests under hbase-spark module (Yi Liang)

2017-02-07 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/master d0498d979 -> 8088aa373


HBASE-17574 Clean up how to run tests under hbase-spark module (Yi Liang)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8088aa37
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8088aa37
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8088aa37

Branch: refs/heads/master
Commit: 8088aa3733539a09cb258f98cb12c1d96ea2463a
Parents: d0498d9
Author: Jerry He 
Authored: Tue Feb 7 12:31:38 2017 -0800
Committer: Jerry He 
Committed: Tue Feb 7 12:36:19 2017 -0800

--
 hbase-spark/README.txt | 13 +
 hbase-spark/pom.xml| 13 +
 2 files changed, 14 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/8088aa37/hbase-spark/README.txt
--
diff --git a/hbase-spark/README.txt b/hbase-spark/README.txt
index fe2c09d..7c658f9 100644
--- a/hbase-spark/README.txt
+++ b/hbase-spark/README.txt
@@ -17,3 +17,16 @@ or
 After you've done the above, check it and then check in changes (or post a 
patch
 on a JIRA with your definition file changes and the generated files). Be 
careful
 to notice new files and files removed and do appropriate git rm/adds.
+
+Running Tests under hbase-spark module
+Tests are run via ScalaTest Maven Plugin and Surefire Maven Plugin
+The following are examples to run the tests:
+
+Run tests under root dir or hbase-spark dir
+ $ mvn test  //run all small and medium java tests, and all 
scala tests
+ $ mvn test -PskipSparkTests //skip all scale and java test in hbase-spark
+ $ mvn test -P runAllTests   //run all tests, including scala and all java 
test including the large test
+
+Run specified test case
+  $ mvn test -Dtest=TestJavaHBaseContext -DwildcardSuites=None 
   //java unit test
+  $ mvn test -Dtest=None 
-DwildcardSuites=org.apache.hadoop.hbase.spark.BulkLoadSuite //scala unit test

http://git-wip-us.apache.org/repos/asf/hbase/blob/8088aa37/hbase-spark/pom.xml
--
diff --git a/hbase-spark/pom.xml b/hbase-spark/pom.xml
index 8417d2f..a7997f1 100644
--- a/hbase-spark/pom.xml
+++ b/hbase-spark/pom.xml
@@ -40,7 +40,6 @@
 1.6.0
 2.10.4
 2.10
-true
 ${project.basedir}/..
 1.7.6
 
@@ -611,17 +610,6 @@
 test
 
 
-true
-
-
-
-integration-test
-integration-test
-
-test
-
-
-Integration-Test
 
 -Xmx1536m -XX:ReservedCodeCacheSize=512m
 
@@ -696,6 +684,7 @@
 
 true
 true
+true
 
 
 



hbase git commit: Define public API for spark integration module

2017-03-04 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/master 404a2883f -> 58b6d9759


Define public API for spark integration module


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/58b6d975
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/58b6d975
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/58b6d975

Branch: refs/heads/master
Commit: 58b6d9759e97f76221ca57a0e399c056fca8ba01
Parents: 404a288
Author: Jerry He 
Authored: Sat Mar 4 12:53:21 2017 -0800
Committer: Jerry He 
Committed: Sat Mar 4 12:53:21 2017 -0800

--
 .../apache/hadoop/hbase/HBaseInterfaceAudience.java   |  2 ++
 .../hadoop/hbase/spark/SparkSQLPushDownFilter.java|  2 ++
 .../hadoop/hbase/spark/BulkLoadPartitioner.scala  |  4 
 .../hadoop/hbase/spark/ByteArrayComparable.scala  |  4 
 .../apache/hadoop/hbase/spark/ByteArrayWrapper.scala  |  4 
 .../spark/ColumnFamilyQualifierMapKeyWrapper.scala|  4 
 .../org/apache/hadoop/hbase/spark/DefaultSource.scala | 14 --
 .../hadoop/hbase/spark/DynamicLogicExpression.scala   | 13 +
 .../hadoop/hbase/spark/FamiliesQualifiersValues.scala |  5 +
 .../hadoop/hbase/spark/FamilyHFileWriteOptions.scala  |  5 +
 .../org/apache/hadoop/hbase/spark/HBaseContext.scala  |  4 
 .../hadoop/hbase/spark/HBaseDStreamFunctions.scala|  4 
 .../apache/hadoop/hbase/spark/HBaseRDDFunctions.scala |  4 
 .../apache/hadoop/hbase/spark/JavaHBaseContext.scala  |  4 
 .../hadoop/hbase/spark/KeyFamilyQualifier.scala   |  4 
 .../org/apache/hadoop/hbase/spark/NewHBaseRDD.scala   |  4 
 .../apache/hadoop/hbase/spark/datasources/Bound.scala |  8 
 .../hbase/spark/datasources/HBaseResources.scala  |  8 
 .../hbase/spark/datasources/HBaseSparkConf.scala  |  5 +
 .../hbase/spark/datasources/HBaseTableScanRDD.scala   |  2 ++
 .../hbase/spark/datasources/JavaBytesEncoder.scala| 11 +++
 .../hadoop/hbase/spark/datasources/NaiveEncoder.scala |  2 ++
 .../hbase/spark/datasources/SchemaConverters.scala|  8 ++--
 .../hadoop/hbase/spark/datasources/SerDes.scala   |  3 ++-
 .../spark/datasources/SerializableConfiguration.scala |  2 ++
 .../sql/datasources/hbase/DataTypeParserWrapper.scala |  1 +
 .../sql/datasources/hbase/HBaseTableCatalog.scala | 11 +++
 .../apache/spark/sql/datasources/hbase/Utils.scala|  3 +++
 28 files changed, 140 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/58b6d975/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseInterfaceAudience.java
--
diff --git 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseInterfaceAudience.java
 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseInterfaceAudience.java
index 2e58913..cb42e48 100644
--- 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseInterfaceAudience.java
+++ 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseInterfaceAudience.java
@@ -35,6 +35,8 @@ public final class HBaseInterfaceAudience {
   public static final String COPROC = "Coprocesssor";
   public static final String REPLICATION = "Replication";
   public static final String PHOENIX = "Phoenix";
+  public static final String SPARK = "Spark";
+
   /**
* Denotes class names that appear in user facing configuration files.
*/

http://git-wip-us.apache.org/repos/asf/hbase/blob/58b6d975/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/SparkSQLPushDownFilter.java
--
diff --git 
a/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/SparkSQLPushDownFilter.java
 
b/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/SparkSQLPushDownFilter.java
index 3e90fe1..398e6a2 100644
--- 
a/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/SparkSQLPushDownFilter.java
+++ 
b/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/SparkSQLPushDownFilter.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.spark;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.filter.FilterBase;
 import org.apache.hadoop.hbase.spark.datasources.BytesEncoder;
@@ -43,6 +44,7 @@ import com.google.protobuf.ByteString;
  * by SparkSQL so that we have make the filters at the region server level
  * and avoid sending the data back to the client to be filtered.
  */
+@InterfaceAudience.Private
 public class SparkSQLPushDownFilter extends FilterBase{
   protected static final Log log = 
LogFactory.getLog(SparkSQ

hbase git commit: Revert "Define public API for spark integration module" for missing JIRA number.

2017-03-04 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/master 58b6d9759 -> a95570cfa


Revert "Define public API for spark integration module" for missing JIRA number.

This reverts commit 58b6d9759e97f76221ca57a0e399c056fca8ba01.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a95570cf
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a95570cf
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a95570cf

Branch: refs/heads/master
Commit: a95570cfa08927d843f488fa798c08b1c4e47057
Parents: 58b6d97
Author: Jerry He 
Authored: Sat Mar 4 14:08:38 2017 -0800
Committer: Jerry He 
Committed: Sat Mar 4 14:08:38 2017 -0800

--
 .../apache/hadoop/hbase/HBaseInterfaceAudience.java   |  2 --
 .../hadoop/hbase/spark/SparkSQLPushDownFilter.java|  2 --
 .../hadoop/hbase/spark/BulkLoadPartitioner.scala  |  4 
 .../hadoop/hbase/spark/ByteArrayComparable.scala  |  4 
 .../apache/hadoop/hbase/spark/ByteArrayWrapper.scala  |  4 
 .../spark/ColumnFamilyQualifierMapKeyWrapper.scala|  4 
 .../org/apache/hadoop/hbase/spark/DefaultSource.scala | 14 ++
 .../hadoop/hbase/spark/DynamicLogicExpression.scala   | 13 -
 .../hadoop/hbase/spark/FamiliesQualifiersValues.scala |  5 -
 .../hadoop/hbase/spark/FamilyHFileWriteOptions.scala  |  5 -
 .../org/apache/hadoop/hbase/spark/HBaseContext.scala  |  4 
 .../hadoop/hbase/spark/HBaseDStreamFunctions.scala|  4 
 .../apache/hadoop/hbase/spark/HBaseRDDFunctions.scala |  4 
 .../apache/hadoop/hbase/spark/JavaHBaseContext.scala  |  4 
 .../hadoop/hbase/spark/KeyFamilyQualifier.scala   |  4 
 .../org/apache/hadoop/hbase/spark/NewHBaseRDD.scala   |  4 
 .../apache/hadoop/hbase/spark/datasources/Bound.scala |  8 
 .../hbase/spark/datasources/HBaseResources.scala  |  8 
 .../hbase/spark/datasources/HBaseSparkConf.scala  |  5 -
 .../hbase/spark/datasources/HBaseTableScanRDD.scala   |  2 --
 .../hbase/spark/datasources/JavaBytesEncoder.scala| 11 ---
 .../hadoop/hbase/spark/datasources/NaiveEncoder.scala |  2 --
 .../hbase/spark/datasources/SchemaConverters.scala|  8 ++--
 .../hadoop/hbase/spark/datasources/SerDes.scala   |  3 +--
 .../spark/datasources/SerializableConfiguration.scala |  2 --
 .../sql/datasources/hbase/DataTypeParserWrapper.scala |  1 -
 .../sql/datasources/hbase/HBaseTableCatalog.scala | 11 ---
 .../apache/spark/sql/datasources/hbase/Utils.scala|  3 ---
 28 files changed, 5 insertions(+), 140 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a95570cf/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseInterfaceAudience.java
--
diff --git 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseInterfaceAudience.java
 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseInterfaceAudience.java
index cb42e48..2e58913 100644
--- 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseInterfaceAudience.java
+++ 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseInterfaceAudience.java
@@ -35,8 +35,6 @@ public final class HBaseInterfaceAudience {
   public static final String COPROC = "Coprocesssor";
   public static final String REPLICATION = "Replication";
   public static final String PHOENIX = "Phoenix";
-  public static final String SPARK = "Spark";
-
   /**
* Denotes class names that appear in user facing configuration files.
*/

http://git-wip-us.apache.org/repos/asf/hbase/blob/a95570cf/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/SparkSQLPushDownFilter.java
--
diff --git 
a/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/SparkSQLPushDownFilter.java
 
b/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/SparkSQLPushDownFilter.java
index 398e6a2..3e90fe1 100644
--- 
a/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/SparkSQLPushDownFilter.java
+++ 
b/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/SparkSQLPushDownFilter.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.spark;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.filter.FilterBase;
 import org.apache.hadoop.hbase.spark.datasources.BytesEncoder;
@@ -44,7 +43,6 @@ import com.google.protobuf.ByteString;
  * by SparkSQL so that we have make the filters at the region server level
  * and avoid sending the data back to the client to be filtered.
  */
-@InterfaceAudience.Private
 public class SparkSQL

hbase git commit: HBASE-14375 Define public API for spark integration module

2017-03-04 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/master a95570cfa -> 6bb593822


HBASE-14375 Define public API for spark integration module


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6bb59382
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6bb59382
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6bb59382

Branch: refs/heads/master
Commit: 6bb59382263ad13d9ec2f51087a09266edef170c
Parents: a95570c
Author: Jerry He 
Authored: Sat Mar 4 12:53:21 2017 -0800
Committer: Jerry He 
Committed: Sat Mar 4 14:10:34 2017 -0800

--
 .../apache/hadoop/hbase/HBaseInterfaceAudience.java   |  2 ++
 .../hadoop/hbase/spark/SparkSQLPushDownFilter.java|  2 ++
 .../hadoop/hbase/spark/BulkLoadPartitioner.scala  |  4 
 .../hadoop/hbase/spark/ByteArrayComparable.scala  |  4 
 .../apache/hadoop/hbase/spark/ByteArrayWrapper.scala  |  4 
 .../spark/ColumnFamilyQualifierMapKeyWrapper.scala|  4 
 .../org/apache/hadoop/hbase/spark/DefaultSource.scala | 14 --
 .../hadoop/hbase/spark/DynamicLogicExpression.scala   | 13 +
 .../hadoop/hbase/spark/FamiliesQualifiersValues.scala |  5 +
 .../hadoop/hbase/spark/FamilyHFileWriteOptions.scala  |  5 +
 .../org/apache/hadoop/hbase/spark/HBaseContext.scala  |  4 
 .../hadoop/hbase/spark/HBaseDStreamFunctions.scala|  4 
 .../apache/hadoop/hbase/spark/HBaseRDDFunctions.scala |  4 
 .../apache/hadoop/hbase/spark/JavaHBaseContext.scala  |  4 
 .../hadoop/hbase/spark/KeyFamilyQualifier.scala   |  4 
 .../org/apache/hadoop/hbase/spark/NewHBaseRDD.scala   |  4 
 .../apache/hadoop/hbase/spark/datasources/Bound.scala |  8 
 .../hbase/spark/datasources/HBaseResources.scala  |  8 
 .../hbase/spark/datasources/HBaseSparkConf.scala  |  5 +
 .../hbase/spark/datasources/HBaseTableScanRDD.scala   |  2 ++
 .../hbase/spark/datasources/JavaBytesEncoder.scala| 11 +++
 .../hadoop/hbase/spark/datasources/NaiveEncoder.scala |  2 ++
 .../hbase/spark/datasources/SchemaConverters.scala|  8 ++--
 .../hadoop/hbase/spark/datasources/SerDes.scala   |  3 ++-
 .../spark/datasources/SerializableConfiguration.scala |  2 ++
 .../sql/datasources/hbase/DataTypeParserWrapper.scala |  1 +
 .../sql/datasources/hbase/HBaseTableCatalog.scala | 11 +++
 .../apache/spark/sql/datasources/hbase/Utils.scala|  3 +++
 28 files changed, 140 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6bb59382/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseInterfaceAudience.java
--
diff --git 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseInterfaceAudience.java
 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseInterfaceAudience.java
index 2e58913..cb42e48 100644
--- 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseInterfaceAudience.java
+++ 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseInterfaceAudience.java
@@ -35,6 +35,8 @@ public final class HBaseInterfaceAudience {
   public static final String COPROC = "Coprocesssor";
   public static final String REPLICATION = "Replication";
   public static final String PHOENIX = "Phoenix";
+  public static final String SPARK = "Spark";
+
   /**
* Denotes class names that appear in user facing configuration files.
*/

http://git-wip-us.apache.org/repos/asf/hbase/blob/6bb59382/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/SparkSQLPushDownFilter.java
--
diff --git 
a/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/SparkSQLPushDownFilter.java
 
b/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/SparkSQLPushDownFilter.java
index 3e90fe1..398e6a2 100644
--- 
a/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/SparkSQLPushDownFilter.java
+++ 
b/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/SparkSQLPushDownFilter.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.spark;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.filter.FilterBase;
 import org.apache.hadoop.hbase.spark.datasources.BytesEncoder;
@@ -43,6 +44,7 @@ import com.google.protobuf.ByteString;
  * by SparkSQL so that we have make the filters at the region server level
  * and avoid sending the data back to the client to be filtered.
  */
+@InterfaceAudience.Private
 public class SparkSQLPushDownFilter extends FilterBase{
   protected static final Log log = 
LogFactory.ge

hbase git commit: HBASE-15597 Clean up configuration keys used in hbase-spark module (Yi Liang)

2017-03-13 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/master fee67bcf1 -> 35d7a0cd0


HBASE-15597 Clean up configuration keys used in hbase-spark module (Yi Liang)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/35d7a0cd
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/35d7a0cd
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/35d7a0cd

Branch: refs/heads/master
Commit: 35d7a0cd0798cabe7df5766fcc993512eca6c92e
Parents: fee67bc
Author: Jerry He 
Authored: Mon Mar 13 12:02:07 2017 -0700
Committer: Jerry He 
Committed: Mon Mar 13 12:02:07 2017 -0700

--
 .../hadoop/hbase/spark/DefaultSource.scala  | 28 -
 .../hbase/spark/HBaseConnectionCache.scala  |  2 +-
 .../spark/datasources/HBaseSparkConf.scala  | 62 
 .../hadoop/hbase/spark/DefaultSourceSuite.scala | 16 ++---
 .../spark/DynamicLogicExpressionSuite.scala |  2 +-
 .../hadoop/hbase/spark/HBaseTestSource.scala| 13 ++--
 .../hbase/spark/PartitionFilterSuite.scala  |  6 +-
 7 files changed, 69 insertions(+), 60 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/35d7a0cd/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/DefaultSource.scala
--
diff --git 
a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/DefaultSource.scala 
b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/DefaultSource.scala
index a8b2ab8..b2b646a 100644
--- 
a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/DefaultSource.scala
+++ 
b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/DefaultSource.scala
@@ -97,36 +97,36 @@ case class HBaseRelation (
   )(@transient val sqlContext: SQLContext)
   extends BaseRelation with PrunedFilteredScan  with InsertableRelation  with 
Logging {
   val timestamp = parameters.get(HBaseSparkConf.TIMESTAMP).map(_.toLong)
-  val minTimestamp = parameters.get(HBaseSparkConf.MIN_TIMESTAMP).map(_.toLong)
-  val maxTimestamp = parameters.get(HBaseSparkConf.MAX_TIMESTAMP).map(_.toLong)
+  val minTimestamp = 
parameters.get(HBaseSparkConf.TIMERANGE_START).map(_.toLong)
+  val maxTimestamp = parameters.get(HBaseSparkConf.TIMERANGE_END).map(_.toLong)
   val maxVersions = parameters.get(HBaseSparkConf.MAX_VERSIONS).map(_.toInt)
-  val encoderClsName = 
parameters.get(HBaseSparkConf.ENCODER).getOrElse(HBaseSparkConf.defaultEncoder)
+  val encoderClsName = 
parameters.get(HBaseSparkConf.QUERY_ENCODER).getOrElse(HBaseSparkConf.DEFAULT_QUERY_ENCODER)
 
   @transient val encoder = JavaBytesEncoder.create(encoderClsName)
 
   val catalog = HBaseTableCatalog(parameters)
   def tableName = catalog.name
-  val configResources = 
parameters.getOrElse(HBaseSparkConf.HBASE_CONFIG_RESOURCES_LOCATIONS, "")
-  val useHBaseContext =  
parameters.get(HBaseSparkConf.USE_HBASE_CONTEXT).map(_.toBoolean).getOrElse(true)
-  val usePushDownColumnFilter = 
parameters.get(HBaseSparkConf.PUSH_DOWN_COLUMN_FILTER)
-.map(_.toBoolean).getOrElse(true)
+  val configResources = 
parameters.getOrElse(HBaseSparkConf.HBASE_CONFIG_LOCATION, "")
+  val useHBaseContext =  
parameters.get(HBaseSparkConf.USE_HBASECONTEXT).map(_.toBoolean).getOrElse(HBaseSparkConf.DEFAULT_USE_HBASECONTEXT)
+  val usePushDownColumnFilter = 
parameters.get(HBaseSparkConf.PUSHDOWN_COLUMN_FILTER)
+.map(_.toBoolean).getOrElse(HBaseSparkConf.DEFAULT_PUSHDOWN_COLUMN_FILTER)
 
   // The user supplied per table parameter will overwrite global ones in 
SparkConf
-  val blockCacheEnable = 
parameters.get(HBaseSparkConf.BLOCK_CACHE_ENABLE).map(_.toBoolean)
+  val blockCacheEnable = 
parameters.get(HBaseSparkConf.QUERY_CACHEBLOCKS).map(_.toBoolean)
 .getOrElse(
   sqlContext.sparkContext.getConf.getBoolean(
-HBaseSparkConf.BLOCK_CACHE_ENABLE, 
HBaseSparkConf.defaultBlockCacheEnable))
-  val cacheSize = parameters.get(HBaseSparkConf.CACHE_SIZE).map(_.toInt)
+HBaseSparkConf.QUERY_CACHEBLOCKS, 
HBaseSparkConf.DEFAULT_QUERY_CACHEBLOCKS))
+  val cacheSize = parameters.get(HBaseSparkConf.QUERY_CACHEDROWS).map(_.toInt)
 .getOrElse(
   sqlContext.sparkContext.getConf.getInt(
-  HBaseSparkConf.CACHE_SIZE, HBaseSparkConf.defaultCachingSize))
-  val batchNum = parameters.get(HBaseSparkConf.BATCH_NUM).map(_.toInt)
+  HBaseSparkConf.QUERY_CACHEDROWS, -1))
+  val batchNum = parameters.get(HBaseSparkConf.QUERY_BATCHSIZE).map(_.toInt)
 .getOrElse(sqlContext.sparkContext.getConf.getInt(
-HBaseSparkConf.BATCH_NUM,  HBaseSparkConf.defaultBatchNum))
+HBaseSparkConf.QUERY_BATCHSIZE,  -1))
 
   val bulkGetSize =  parameters.get(HBaseSparkConf.BULKGET_SIZE).map(_.toInt)
 .getOrElse(sqlContext.sparkContext.getConf.getInt(
-HBaseSparkConf.BULKGET_SIZE,  HBaseSparkConf.defaultBulkGetSize))
+HBaseSparkConf.BU

hbase git commit: HBASE-15592 Print Procedure WAL content

2016-04-06 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/master 3826894f8 -> ac8cd373e


HBASE-15592 Print Procedure WAL content


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ac8cd373
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ac8cd373
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ac8cd373

Branch: refs/heads/master
Commit: ac8cd373ebe81ed24cab6737154c6902c05ff059
Parents: 3826894
Author: Jerry He 
Authored: Wed Apr 6 21:42:38 2016 -0700
Committer: Jerry He 
Committed: Wed Apr 6 21:49:07 2016 -0700

--
 .../hadoop/hbase/procedure2/Procedure.java  |  33 
 .../store/wal/ProcedureWALPrettyPrinter.java| 189 +++
 2 files changed, 222 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/ac8cd373/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
index aff2b15..781bad9 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
@@ -205,6 +205,16 @@ public abstract class Procedure implements 
Comparable {
 
   @Override
   public String toString() {
+// Return the simple String presentation of the procedure.
+return toStringSimpleSB().toString();
+  }
+
+  /**
+   * Build the StringBuilder for the simple form of
+   * procedure string.
+   * @return the StringBuilder
+   */
+  protected StringBuilder toStringSimpleSB() {
 StringBuilder sb = new StringBuilder();
 toStringClassDetails(sb);
 
@@ -225,6 +235,29 @@ public abstract class Procedure implements 
Comparable {
 
 sb.append(" state=");
 toStringState(sb);
+
+return sb;
+  }
+
+  /**
+   * Extend the toString() information with more procedure
+   * details
+   */
+  public String toStringDetails() {
+StringBuilder sb = toStringSimpleSB();
+
+sb.append(" startTime=");
+sb.append(getStartTime());
+
+sb.append(" lastUpdate=");
+sb.append(getLastUpdate());
+
+if (stackIndexes != null) {
+  sb.append("\n");
+  sb.append("stackIndexes=");
+  sb.append(Arrays.toString(getStackIndexes()));
+}
+
 return sb.toString();
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/ac8cd373/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPrettyPrinter.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPrettyPrinter.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPrettyPrinter.java
new file mode 100644
index 000..9c33ac2
--- /dev/null
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPrettyPrinter.java
@@ -0,0 +1,189 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.procedure2.store.wal;
+
+import java.io.IOException;
+import java.io.PrintStream;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.CommandLineParser;
+import org.apache.commons.cli.HelpFormatter;
+import org.apache.commons.cli.Options;
+import org.apache.commons.cli.ParseException;
+import org.apache.commons.cli.PosixParser;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HBaseInterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceS

hbase git commit: HBASE-15592 Print Procedure WAL content

2016-04-06 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/branch-1 ff075fd9d -> 0727d0f41


HBASE-15592 Print Procedure WAL content


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0727d0f4
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0727d0f4
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0727d0f4

Branch: refs/heads/branch-1
Commit: 0727d0f414ef9dccae586e019059c6f95cfb2ba1
Parents: ff075fd
Author: Jerry He 
Authored: Wed Apr 6 22:00:52 2016 -0700
Committer: Jerry He 
Committed: Wed Apr 6 22:00:52 2016 -0700

--
 .../hadoop/hbase/procedure2/Procedure.java  |  33 
 .../store/wal/ProcedureWALPrettyPrinter.java| 189 +++
 2 files changed, 222 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/0727d0f4/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
index aff2b15..781bad9 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
@@ -205,6 +205,16 @@ public abstract class Procedure implements 
Comparable {
 
   @Override
   public String toString() {
+// Return the simple String presentation of the procedure.
+return toStringSimpleSB().toString();
+  }
+
+  /**
+   * Build the StringBuilder for the simple form of
+   * procedure string.
+   * @return the StringBuilder
+   */
+  protected StringBuilder toStringSimpleSB() {
 StringBuilder sb = new StringBuilder();
 toStringClassDetails(sb);
 
@@ -225,6 +235,29 @@ public abstract class Procedure implements 
Comparable {
 
 sb.append(" state=");
 toStringState(sb);
+
+return sb;
+  }
+
+  /**
+   * Extend the toString() information with more procedure
+   * details
+   */
+  public String toStringDetails() {
+StringBuilder sb = toStringSimpleSB();
+
+sb.append(" startTime=");
+sb.append(getStartTime());
+
+sb.append(" lastUpdate=");
+sb.append(getLastUpdate());
+
+if (stackIndexes != null) {
+  sb.append("\n");
+  sb.append("stackIndexes=");
+  sb.append(Arrays.toString(getStackIndexes()));
+}
+
 return sb.toString();
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/0727d0f4/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPrettyPrinter.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPrettyPrinter.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPrettyPrinter.java
new file mode 100644
index 000..9c33ac2
--- /dev/null
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPrettyPrinter.java
@@ -0,0 +1,189 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.procedure2.store.wal;
+
+import java.io.IOException;
+import java.io.PrintStream;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.CommandLineParser;
+import org.apache.commons.cli.HelpFormatter;
+import org.apache.commons.cli.Options;
+import org.apache.commons.cli.ParseException;
+import org.apache.commons.cli.PosixParser;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HBaseInterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.Interf

hbase git commit: HBASE-15592 Print Procedure WAL content

2016-04-06 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/branch-1.2 fcfda192f -> db3b46eb7


HBASE-15592 Print Procedure WAL content


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/db3b46eb
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/db3b46eb
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/db3b46eb

Branch: refs/heads/branch-1.2
Commit: db3b46eb7c22b705ee2c8a398e4832e60ba792c9
Parents: fcfda19
Author: Jerry He 
Authored: Wed Apr 6 22:06:16 2016 -0700
Committer: Jerry He 
Committed: Wed Apr 6 22:06:16 2016 -0700

--
 .../hadoop/hbase/procedure2/Procedure.java  |  33 
 .../store/wal/ProcedureWALPrettyPrinter.java| 189 +++
 2 files changed, 222 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/db3b46eb/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
index aff2b15..781bad9 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
@@ -205,6 +205,16 @@ public abstract class Procedure implements 
Comparable {
 
   @Override
   public String toString() {
+// Return the simple String presentation of the procedure.
+return toStringSimpleSB().toString();
+  }
+
+  /**
+   * Build the StringBuilder for the simple form of
+   * procedure string.
+   * @return the StringBuilder
+   */
+  protected StringBuilder toStringSimpleSB() {
 StringBuilder sb = new StringBuilder();
 toStringClassDetails(sb);
 
@@ -225,6 +235,29 @@ public abstract class Procedure implements 
Comparable {
 
 sb.append(" state=");
 toStringState(sb);
+
+return sb;
+  }
+
+  /**
+   * Extend the toString() information with more procedure
+   * details
+   */
+  public String toStringDetails() {
+StringBuilder sb = toStringSimpleSB();
+
+sb.append(" startTime=");
+sb.append(getStartTime());
+
+sb.append(" lastUpdate=");
+sb.append(getLastUpdate());
+
+if (stackIndexes != null) {
+  sb.append("\n");
+  sb.append("stackIndexes=");
+  sb.append(Arrays.toString(getStackIndexes()));
+}
+
 return sb.toString();
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/db3b46eb/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPrettyPrinter.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPrettyPrinter.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPrettyPrinter.java
new file mode 100644
index 000..e685822
--- /dev/null
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPrettyPrinter.java
@@ -0,0 +1,189 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.procedure2.store.wal;
+
+import java.io.IOException;
+import java.io.PrintStream;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.CommandLineParser;
+import org.apache.commons.cli.HelpFormatter;
+import org.apache.commons.cli.Options;
+import org.apache.commons.cli.ParseException;
+import org.apache.commons.cli.PosixParser;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HBaseInterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.In

hbase git commit: HBASE-15592 Print Procedure WAL content

2016-04-06 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 92afd3cd5 -> 2a0288dd8


HBASE-15592 Print Procedure WAL content


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2a0288dd
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2a0288dd
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2a0288dd

Branch: refs/heads/branch-1.3
Commit: 2a0288dd89b03230cb406a7dbc54314a8dd55ec5
Parents: 92afd3c
Author: Jerry He 
Authored: Wed Apr 6 22:00:52 2016 -0700
Committer: Jerry He 
Committed: Wed Apr 6 22:08:20 2016 -0700

--
 .../hadoop/hbase/procedure2/Procedure.java  |  33 
 .../store/wal/ProcedureWALPrettyPrinter.java| 189 +++
 2 files changed, 222 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/2a0288dd/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
index aff2b15..781bad9 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
@@ -205,6 +205,16 @@ public abstract class Procedure implements 
Comparable {
 
   @Override
   public String toString() {
+// Return the simple String presentation of the procedure.
+return toStringSimpleSB().toString();
+  }
+
+  /**
+   * Build the StringBuilder for the simple form of
+   * procedure string.
+   * @return the StringBuilder
+   */
+  protected StringBuilder toStringSimpleSB() {
 StringBuilder sb = new StringBuilder();
 toStringClassDetails(sb);
 
@@ -225,6 +235,29 @@ public abstract class Procedure implements 
Comparable {
 
 sb.append(" state=");
 toStringState(sb);
+
+return sb;
+  }
+
+  /**
+   * Extend the toString() information with more procedure
+   * details
+   */
+  public String toStringDetails() {
+StringBuilder sb = toStringSimpleSB();
+
+sb.append(" startTime=");
+sb.append(getStartTime());
+
+sb.append(" lastUpdate=");
+sb.append(getLastUpdate());
+
+if (stackIndexes != null) {
+  sb.append("\n");
+  sb.append("stackIndexes=");
+  sb.append(Arrays.toString(getStackIndexes()));
+}
+
 return sb.toString();
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/2a0288dd/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPrettyPrinter.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPrettyPrinter.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPrettyPrinter.java
new file mode 100644
index 000..9c33ac2
--- /dev/null
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPrettyPrinter.java
@@ -0,0 +1,189 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.procedure2.store.wal;
+
+import java.io.IOException;
+import java.io.PrintStream;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.CommandLineParser;
+import org.apache.commons.cli.HelpFormatter;
+import org.apache.commons.cli.Options;
+import org.apache.commons.cli.ParseException;
+import org.apache.commons.cli.PosixParser;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HBaseInterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.In

hbase git commit: HBASE-15592 Print Procedure WAL content

2016-04-06 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/branch-1.1 1b60100b5 -> 0d2c8bde9


HBASE-15592 Print Procedure WAL content


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0d2c8bde
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0d2c8bde
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0d2c8bde

Branch: refs/heads/branch-1.1
Commit: 0d2c8bde97cc9c3b982b7293f57be06170e1e9f8
Parents: 1b60100
Author: Jerry He 
Authored: Wed Apr 6 22:06:16 2016 -0700
Committer: Jerry He 
Committed: Wed Apr 6 22:39:33 2016 -0700

--
 .../hadoop/hbase/procedure2/Procedure.java  |  33 
 .../store/wal/ProcedureWALPrettyPrinter.java| 189 +++
 2 files changed, 222 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/0d2c8bde/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
index 304c225..813bbf5 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
@@ -175,6 +175,16 @@ public abstract class Procedure implements 
Comparable {
 
   @Override
   public String toString() {
+// Return the simple String presentation of the procedure.
+return toStringSimpleSB().toString();
+  }
+
+  /**
+   * Build the StringBuilder for the simple form of
+   * procedure string.
+   * @return the StringBuilder
+   */
+  protected StringBuilder toStringSimpleSB() {
 StringBuilder sb = new StringBuilder();
 toStringClassDetails(sb);
 
@@ -195,6 +205,29 @@ public abstract class Procedure implements 
Comparable {
 
 sb.append(" state=");
 sb.append(getState());
+
+return sb;
+  }
+
+  /**
+   * Extend the toString() information with more procedure
+   * details
+   */
+  public String toStringDetails() {
+StringBuilder sb = toStringSimpleSB();
+
+sb.append(" startTime=");
+sb.append(getStartTime());
+
+sb.append(" lastUpdate=");
+sb.append(getLastUpdate());
+
+if (stackIndexes != null) {
+  sb.append("\n");
+  sb.append("stackIndexes=");
+  sb.append(Arrays.toString(getStackIndexes()));
+}
+
 return sb.toString();
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/0d2c8bde/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPrettyPrinter.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPrettyPrinter.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPrettyPrinter.java
new file mode 100644
index 000..e685822
--- /dev/null
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPrettyPrinter.java
@@ -0,0 +1,189 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.procedure2.store.wal;
+
+import java.io.IOException;
+import java.io.PrintStream;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.CommandLineParser;
+import org.apache.commons.cli.HelpFormatter;
+import org.apache.commons.cli.Options;
+import org.apache.commons.cli.ParseException;
+import org.apache.commons.cli.PosixParser;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HBaseInterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classificatio

hbase git commit: HBASE-15591 ServerCrashProcedure not yielding

2016-04-10 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/master f7d44e929 -> 80df1cb7b


HBASE-15591 ServerCrashProcedure not yielding


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/80df1cb7
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/80df1cb7
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/80df1cb7

Branch: refs/heads/master
Commit: 80df1cb7b6c3eaae20c3f1390e60f1d35be004b9
Parents: f7d44e9
Author: Jerry He 
Authored: Sun Apr 10 17:02:39 2016 -0700
Committer: Jerry He 
Committed: Sun Apr 10 17:02:39 2016 -0700

--
 .../master/procedure/ServerCrashProcedure.java | 17 ++---
 1 file changed, 14 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/80df1cb7/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
index 19e05fd..7de694c 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
@@ -172,7 +172,7 @@ implements ServerProcedureInterface {
 
   @Override
   protected Flow executeFromState(MasterProcedureEnv env, ServerCrashState 
state)
-  throws ProcedureYieldException {
+  throws ProcedureYieldException {
 if (LOG.isTraceEnabled()) {
   LOG.trace(state);
 }
@@ -208,10 +208,17 @@ implements ServerProcedureInterface {
   case SERVER_CRASH_GET_REGIONS:
 // If hbase:meta is not assigned, yield.
 if (!isMetaAssignedQuickTest(env)) {
+  // isMetaAssignedQuickTest does not really wait. Let's delay a 
little before
+  // another round of execution.
+  long wait =
+  env.getMasterConfiguration().getLong(KEY_SHORT_WAIT_ON_META,
+DEFAULT_SHORT_WAIT_ON_META);
+  wait = wait / 10;
+  Thread.sleep(wait);
   throwProcedureYieldException("Waiting on hbase:meta assignment");
 }
 this.regionsOnCrashedServer =
-  
services.getAssignmentManager().getRegionStates().getServerRegions(this.serverName);
+
services.getAssignmentManager().getRegionStates().getServerRegions(this.serverName);
 // Where to go next? Depends on whether we should split logs at all or 
if we should do
 // distributed log splitting (DLS) vs distributed log replay (DLR).
 if (!this.shouldSplitWal) {
@@ -291,8 +298,12 @@ implements ServerProcedureInterface {
 return Flow.NO_MORE_STATE;
 
   default:
-  throw new UnsupportedOperationException("unhandled state=" + state);
+throw new UnsupportedOperationException("unhandled state=" + state);
   }
+} catch (ProcedureYieldException e) {
+  LOG.warn("Failed serverName=" + this.serverName + ", state=" + state + 
"; retry "
+  + e.getMessage());
+  throw e;
 } catch (IOException e) {
   LOG.warn("Failed serverName=" + this.serverName + ", state=" + state + 
"; retry", e);
 } catch (InterruptedException e) {



hbase git commit: HBASE-15591 ServerCrashProcedure not yielding

2016-04-10 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/branch-1 7303c7e47 -> ff835d5ae


HBASE-15591 ServerCrashProcedure not yielding


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ff835d5a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ff835d5a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ff835d5a

Branch: refs/heads/branch-1
Commit: ff835d5ae66da954f3a05f8cdcd80539396a694b
Parents: 7303c7e
Author: Jerry He 
Authored: Sun Apr 10 17:02:39 2016 -0700
Committer: Jerry He 
Committed: Sun Apr 10 17:07:39 2016 -0700

--
 .../master/procedure/ServerCrashProcedure.java | 17 ++---
 1 file changed, 14 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/ff835d5a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
index 0e35ddb..90addb2 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
@@ -173,7 +173,7 @@ implements ServerProcedureInterface {
 
   @Override
   protected Flow executeFromState(MasterProcedureEnv env, ServerCrashState 
state)
-  throws ProcedureYieldException {
+  throws ProcedureYieldException {
 if (LOG.isTraceEnabled()) {
   LOG.trace(state);
 }
@@ -209,10 +209,17 @@ implements ServerProcedureInterface {
   case SERVER_CRASH_GET_REGIONS:
 // If hbase:meta is not assigned, yield.
 if (!isMetaAssignedQuickTest(env)) {
+  // isMetaAssignedQuickTest does not really wait. Let's delay a 
little before
+  // another round of execution.
+  long wait =
+  env.getMasterConfiguration().getLong(KEY_SHORT_WAIT_ON_META,
+DEFAULT_SHORT_WAIT_ON_META);
+  wait = wait / 10;
+  Thread.sleep(wait);
   throwProcedureYieldException("Waiting on hbase:meta assignment");
 }
 this.regionsOnCrashedServer =
-  
services.getAssignmentManager().getRegionStates().getServerRegions(this.serverName);
+
services.getAssignmentManager().getRegionStates().getServerRegions(this.serverName);
 // Where to go next? Depends on whether we should split logs at all or 
if we should do
 // distributed log splitting (DLS) vs distributed log replay (DLR).
 if (!this.shouldSplitWal) {
@@ -292,8 +299,12 @@ implements ServerProcedureInterface {
 return Flow.NO_MORE_STATE;
 
   default:
-  throw new UnsupportedOperationException("unhandled state=" + state);
+throw new UnsupportedOperationException("unhandled state=" + state);
   }
+} catch (ProcedureYieldException e) {
+  LOG.warn("Failed serverName=" + this.serverName + ", state=" + state + 
"; retry "
+  + e.getMessage());
+  throw e;
 } catch (IOException e) {
   LOG.warn("Failed serverName=" + this.serverName + ", state=" + state + 
"; retry", e);
 } catch (InterruptedException e) {



hbase git commit: HBASE-15591 ServerCrashProcedure not yielding

2016-04-10 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/branch-1.2 db3b46eb7 -> 19d110051


HBASE-15591 ServerCrashProcedure not yielding


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/19d11005
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/19d11005
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/19d11005

Branch: refs/heads/branch-1.2
Commit: 19d110051692c7e3cb187d08afdffc12ed621bd0
Parents: db3b46e
Author: Jerry He 
Authored: Sun Apr 10 17:02:39 2016 -0700
Committer: Jerry He 
Committed: Sun Apr 10 17:10:40 2016 -0700

--
 .../master/procedure/ServerCrashProcedure.java | 17 ++---
 1 file changed, 14 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/19d11005/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
index 0e35ddb..90addb2 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
@@ -173,7 +173,7 @@ implements ServerProcedureInterface {
 
   @Override
   protected Flow executeFromState(MasterProcedureEnv env, ServerCrashState 
state)
-  throws ProcedureYieldException {
+  throws ProcedureYieldException {
 if (LOG.isTraceEnabled()) {
   LOG.trace(state);
 }
@@ -209,10 +209,17 @@ implements ServerProcedureInterface {
   case SERVER_CRASH_GET_REGIONS:
 // If hbase:meta is not assigned, yield.
 if (!isMetaAssignedQuickTest(env)) {
+  // isMetaAssignedQuickTest does not really wait. Let's delay a 
little before
+  // another round of execution.
+  long wait =
+  env.getMasterConfiguration().getLong(KEY_SHORT_WAIT_ON_META,
+DEFAULT_SHORT_WAIT_ON_META);
+  wait = wait / 10;
+  Thread.sleep(wait);
   throwProcedureYieldException("Waiting on hbase:meta assignment");
 }
 this.regionsOnCrashedServer =
-  
services.getAssignmentManager().getRegionStates().getServerRegions(this.serverName);
+
services.getAssignmentManager().getRegionStates().getServerRegions(this.serverName);
 // Where to go next? Depends on whether we should split logs at all or 
if we should do
 // distributed log splitting (DLS) vs distributed log replay (DLR).
 if (!this.shouldSplitWal) {
@@ -292,8 +299,12 @@ implements ServerProcedureInterface {
 return Flow.NO_MORE_STATE;
 
   default:
-  throw new UnsupportedOperationException("unhandled state=" + state);
+throw new UnsupportedOperationException("unhandled state=" + state);
   }
+} catch (ProcedureYieldException e) {
+  LOG.warn("Failed serverName=" + this.serverName + ", state=" + state + 
"; retry "
+  + e.getMessage());
+  throw e;
 } catch (IOException e) {
   LOG.warn("Failed serverName=" + this.serverName + ", state=" + state + 
"; retry", e);
 } catch (InterruptedException e) {



hbase git commit: HBASE-15591 ServerCrashProcedure not yielding

2016-04-10 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 c9a00ff8f -> b29074292


HBASE-15591 ServerCrashProcedure not yielding


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b2907429
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b2907429
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b2907429

Branch: refs/heads/branch-1.3
Commit: b2907429273432c550a5c930bc452d6555f2571a
Parents: c9a00ff
Author: Jerry He 
Authored: Sun Apr 10 17:02:39 2016 -0700
Committer: Jerry He 
Committed: Sun Apr 10 17:14:46 2016 -0700

--
 .../master/procedure/ServerCrashProcedure.java | 17 ++---
 1 file changed, 14 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b2907429/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
index 0e35ddb..90addb2 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
@@ -173,7 +173,7 @@ implements ServerProcedureInterface {
 
   @Override
   protected Flow executeFromState(MasterProcedureEnv env, ServerCrashState 
state)
-  throws ProcedureYieldException {
+  throws ProcedureYieldException {
 if (LOG.isTraceEnabled()) {
   LOG.trace(state);
 }
@@ -209,10 +209,17 @@ implements ServerProcedureInterface {
   case SERVER_CRASH_GET_REGIONS:
 // If hbase:meta is not assigned, yield.
 if (!isMetaAssignedQuickTest(env)) {
+  // isMetaAssignedQuickTest does not really wait. Let's delay a 
little before
+  // another round of execution.
+  long wait =
+  env.getMasterConfiguration().getLong(KEY_SHORT_WAIT_ON_META,
+DEFAULT_SHORT_WAIT_ON_META);
+  wait = wait / 10;
+  Thread.sleep(wait);
   throwProcedureYieldException("Waiting on hbase:meta assignment");
 }
 this.regionsOnCrashedServer =
-  
services.getAssignmentManager().getRegionStates().getServerRegions(this.serverName);
+
services.getAssignmentManager().getRegionStates().getServerRegions(this.serverName);
 // Where to go next? Depends on whether we should split logs at all or 
if we should do
 // distributed log splitting (DLS) vs distributed log replay (DLR).
 if (!this.shouldSplitWal) {
@@ -292,8 +299,12 @@ implements ServerProcedureInterface {
 return Flow.NO_MORE_STATE;
 
   default:
-  throw new UnsupportedOperationException("unhandled state=" + state);
+throw new UnsupportedOperationException("unhandled state=" + state);
   }
+} catch (ProcedureYieldException e) {
+  LOG.warn("Failed serverName=" + this.serverName + ", state=" + state + 
"; retry "
+  + e.getMessage());
+  throw e;
 } catch (IOException e) {
   LOG.warn("Failed serverName=" + this.serverName + ", state=" + state + 
"; retry", e);
 } catch (InterruptedException e) {



hbase git commit: Correct Bloom filter documentation in the book (yi liang)

2016-04-18 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/master 70687c18b -> 31b85e73d


Correct Bloom filter documentation in the book (yi liang)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/31b85e73
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/31b85e73
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/31b85e73

Branch: refs/heads/master
Commit: 31b85e73da8bee80e8aa918de09fd85d1d7a724a
Parents: 70687c1
Author: Jerry He 
Authored: Mon Apr 18 09:57:46 2016 -0700
Committer: Jerry He 
Committed: Mon Apr 18 09:57:46 2016 -0700

--
 src/main/asciidoc/_chapters/performance.adoc | 22 +++---
 1 file changed, 11 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/31b85e73/src/main/asciidoc/_chapters/performance.adoc
--
diff --git a/src/main/asciidoc/_chapters/performance.adoc 
b/src/main/asciidoc/_chapters/performance.adoc
index 66dd489..01956d5 100644
--- a/src/main/asciidoc/_chapters/performance.adoc
+++ b/src/main/asciidoc/_chapters/performance.adoc
@@ -361,7 +361,7 @@ Bloom filters need to be rebuilt upon deletion, so may not 
be appropriate in env
 
 Bloom filters are enabled on a Column Family.
 You can do this by using the setBloomFilterType method of HColumnDescriptor or 
using the HBase API.
-Valid values are `NONE` (the default), `ROW`, or `ROWCOL`.
+Valid values are `NONE`, `ROW` (default), or `ROWCOL`.
 See <> for more information on `ROW` versus `ROWCOL`.
 See also the API documentation for 
link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/HColumnDescriptor.html[HColumnDescriptor].
 
@@ -382,17 +382,17 @@ You can configure the following settings in the 
_hbase-site.xml_.
 | Default
 | Description
 
-| io.hfile.bloom.enabled
+| io.storefile.bloom.enabled
 | yes
 | Set to no to kill bloom filters server-wide if something goes wrong
 
-| io.hfile.bloom.error.rate
+| io.storefile.bloom.error.rate
 | .01
 | The average false positive rate for bloom filters. Folding is used to
   maintain the false positive rate. Expressed as a decimal 
representation of a
   percentage.
 
-| io.hfile.bloom.max.fold
+| io.storefile.bloom.max.fold
 | 7
 | The guaranteed maximum fold rate. Changing this setting should not be
   necessary and is not recommended.
@@ -406,7 +406,7 @@ You can configure the following settings in the 
_hbase-site.xml_.
 | Master switch to enable Delete Family Bloom filters and store them in the 
StoreFile.
 
 | io.storefile.bloom.block.size
-| 65536
+| 131072
 | Target Bloom block size. Bloom filter blocks of approximately this size
   are interleaved with data blocks.
 
@@ -713,20 +713,20 @@ Stored in the LRU cache, if it is enabled (It's enabled 
by default).
 [[config.bloom]]
  Bloom Filter Configuration
 
-= `io.hfile.bloom.enabled` global kill switch
+= `io.storefile.bloom.enabled` global kill switch
 
-`io.hfile.bloom.enabled` in `Configuration` serves as the kill switch in case 
something goes wrong.
+`io.storefile.bloom.enabled` in `Configuration` serves as the kill switch in 
case something goes wrong.
 Default = `true`.
 
-= `io.hfile.bloom.error.rate`
+= `io.storefile.bloom.error.rate`
 
-`io.hfile.bloom.error.rate` = average false positive rate.
+`io.storefile.bloom.error.rate` = average false positive rate.
 Default = 1%. Decrease rate by ½ (e.g.
 to .5%) == +1 bit per bloom entry.
 
-= `io.hfile.bloom.max.fold`
+= `io.storefile.bloom.max.fold`
 
-`io.hfile.bloom.max.fold` = guaranteed minimum fold rate.
+`io.storefile.bloom.max.fold` = guaranteed minimum fold rate.
 Most people should leave this alone.
 Default = 7, or can collapse to at least 1/128th of original size.
 See the _Development Process_ section of the document 
link:https://issues.apache.org/jira/secure/attachment/12444007/Bloom_Filters_in_HBase.pdf[BloomFilters
 in HBase] for more on what this option means.



hbase git commit: HBASE-16311 Audit log for delete snapshot operation is missing in case of snapshot owner deleting the same (Yi Liang)

2016-09-02 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/master f6ccae350 -> 0b6eccf4c


HBASE-16311 Audit log for delete snapshot operation is missing in case of 
snapshot owner deleting the same (Yi Liang)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0b6eccf4
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0b6eccf4
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0b6eccf4

Branch: refs/heads/master
Commit: 0b6eccf4c3273dbe4355e179e94814dc6131d87b
Parents: f6ccae3
Author: Jerry He 
Authored: Fri Sep 2 09:47:28 2016 -0700
Committer: Jerry He 
Committed: Fri Sep 2 09:47:28 2016 -0700

--
 .../hbase/security/access/AccessController.java | 20 
 .../security/access/TestAccessController.java   |  4 ++--
 2 files changed, 14 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/0b6eccf4/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
index 0e69060..ff27b41 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
@@ -1314,7 +1314,7 @@ public class AccessController extends 
BaseMasterAndRegionObserver
   public void preSnapshot(final ObserverContext 
ctx,
   final SnapshotDescription snapshot, final HTableDescriptor 
hTableDescriptor)
   throws IOException {
-requirePermission(getActiveUser(ctx), "snapshot", 
hTableDescriptor.getTableName(), null, null,
+requirePermission(getActiveUser(ctx), "snapshot " + snapshot.getName(), 
hTableDescriptor.getTableName(), null, null,
   Permission.Action.ADMIN);
   }
 
@@ -1324,9 +1324,11 @@ public class AccessController extends 
BaseMasterAndRegionObserver
 User user = getActiveUser(ctx);
 if (SnapshotDescriptionUtils.isSnapshotOwner(snapshot, user)) {
   // list it, if user is the owner of snapshot
-  // TODO: We are not logging this for audit
+  AuthResult result = AuthResult.allow("listSnapshot " + 
snapshot.getName(),
+  "Snapshot owner check allowed", user, null, null, null);
+  logResult(result);
 } else {
-  requirePermission(user, "listSnapshot", Action.ADMIN);
+  requirePermission(user, "listSnapshot " + snapshot.getName(), 
Action.ADMIN);
 }
   }
 
@@ -1334,7 +1336,7 @@ public class AccessController extends 
BaseMasterAndRegionObserver
   public void preCloneSnapshot(final 
ObserverContext ctx,
   final SnapshotDescription snapshot, final HTableDescriptor 
hTableDescriptor)
   throws IOException {
-requirePermission(getActiveUser(ctx), "clone", Action.ADMIN);
+requirePermission(getActiveUser(ctx), "cloneSnapshot " + 
snapshot.getName(), Action.ADMIN);
   }
 
   @Override
@@ -1343,10 +1345,10 @@ public class AccessController extends 
BaseMasterAndRegionObserver
   throws IOException {
 User user = getActiveUser(ctx);
 if (SnapshotDescriptionUtils.isSnapshotOwner(snapshot, user)) {
-  requirePermission(user, "restoreSnapshot", 
hTableDescriptor.getTableName(), null, null,
+  requirePermission(user, "restoreSnapshot " + snapshot.getName(), 
hTableDescriptor.getTableName(), null, null,
 Permission.Action.ADMIN);
 } else {
-  requirePermission(user, "restoreSnapshot", Action.ADMIN);
+  requirePermission(user, "restoreSnapshot " + snapshot.getName(), 
Action.ADMIN);
 }
   }
 
@@ -1356,9 +1358,11 @@ public class AccessController extends 
BaseMasterAndRegionObserver
 User user = getActiveUser(ctx);
 if (SnapshotDescriptionUtils.isSnapshotOwner(snapshot, user)) {
   // Snapshot owner is allowed to delete the snapshot
-  // TODO: We are not logging this for audit
+  AuthResult result = AuthResult.allow("deleteSnapshot " + 
snapshot.getName(),
+  "Snapshot owner check allowed", user, null, null, null);
+  logResult(result);
 } else {
-  requirePermission(user, "deleteSnapshot", Action.ADMIN);
+  requirePermission(user, "deleteSnapshot " + snapshot.getName(), 
Action.ADMIN);
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/0b6eccf4/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController

hbase git commit: HBASE-16311 Audit log for delete snapshot operation is missing in case of snapshot owner deleting the same (Yi Liang)

2016-09-02 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/branch-1 4147dcb81 -> 8be6f95f9


HBASE-16311 Audit log for delete snapshot operation is missing in case of 
snapshot owner deleting the same (Yi Liang)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8be6f95f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8be6f95f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8be6f95f

Branch: refs/heads/branch-1
Commit: 8be6f95f99e776c5b46845cb42d1c2c03c25f802
Parents: 4147dcb
Author: Jerry He 
Authored: Fri Sep 2 10:09:44 2016 -0700
Committer: Jerry He 
Committed: Fri Sep 2 10:09:44 2016 -0700

--
 .../hbase/security/access/AccessController.java | 25 +---
 .../security/access/TestAccessController.java   |  4 ++--
 2 files changed, 18 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/8be6f95f/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
index a147b12..7d1345f 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
@@ -1313,17 +1313,21 @@ public class AccessController extends 
BaseMasterAndRegionObserver
   public void preSnapshot(final ObserverContext 
ctx,
   final SnapshotDescription snapshot, final HTableDescriptor 
hTableDescriptor)
   throws IOException {
-requirePermission("snapshot", hTableDescriptor.getTableName(), null, null,
+requirePermission("snapshot " + snapshot.getName(), 
hTableDescriptor.getTableName(), null, null,
   Permission.Action.ADMIN);
   }
 
   @Override
   public void preListSnapshot(ObserverContext 
ctx,
   final SnapshotDescription snapshot) throws IOException {
-if (SnapshotDescriptionUtils.isSnapshotOwner(snapshot, getActiveUser())) {
+User user = getActiveUser();
+if (SnapshotDescriptionUtils.isSnapshotOwner(snapshot, user)) {
   // list it, if user is the owner of snapshot
+  AuthResult result = AuthResult.allow("listSnapshot " + 
snapshot.getName(),
+  "Snapshot owner check allowed", user, null, null, null);
+  logResult(result);
 } else {
-  requirePermission("listSnapshot", Action.ADMIN);
+  requirePermission("listSnapshot " + snapshot.getName(), Action.ADMIN);
 }
   }
   
@@ -1331,7 +1335,7 @@ public class AccessController extends 
BaseMasterAndRegionObserver
   public void preCloneSnapshot(final 
ObserverContext ctx,
   final SnapshotDescription snapshot, final HTableDescriptor 
hTableDescriptor)
   throws IOException {
-requirePermission("clone", Action.ADMIN);
+requirePermission("cloneSnapshot " + snapshot.getName(), Action.ADMIN);
   }
 
   @Override
@@ -1339,21 +1343,24 @@ public class AccessController extends 
BaseMasterAndRegionObserver
   final SnapshotDescription snapshot, final HTableDescriptor 
hTableDescriptor)
   throws IOException {
 if (SnapshotDescriptionUtils.isSnapshotOwner(snapshot, getActiveUser())) {
-  requirePermission("restoreSnapshot", hTableDescriptor.getTableName(), 
null, null,
+  requirePermission("restoreSnapshot " + snapshot.getName(), 
hTableDescriptor.getTableName(), null, null,
 Permission.Action.ADMIN);
 } else {
-  requirePermission("restore", Action.ADMIN);
+  requirePermission("restoreSnapshot " + snapshot.getName(), Action.ADMIN);
 }
   }
 
   @Override
   public void preDeleteSnapshot(final 
ObserverContext ctx,
   final SnapshotDescription snapshot) throws IOException {
-if (SnapshotDescriptionUtils.isSnapshotOwner(snapshot, getActiveUser())) {
+User user = getActiveUser();
+if (SnapshotDescriptionUtils.isSnapshotOwner(snapshot, user)) {
   // Snapshot owner is allowed to delete the snapshot
-  // TODO: We are not logging this for audit
+  AuthResult result = AuthResult.allow("deleteSnapshot " + 
snapshot.getName(),
+  "Snapshot owner check allowed", user, null, null, null);
+  logResult(result);
 } else {
-  requirePermission("deleteSnapshot", Action.ADMIN);
+  requirePermission("deleteSnapshot " + snapshot.getName(), Action.ADMIN);
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/8be6f95f/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access

hbase git commit: HBASE-16544 Remove or Clarify 'Using Amazon S3 Storage' section in the reference guide (Yi Liang)

2016-09-07 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/master e9cfbfd10 -> e65817ef1


HBASE-16544 Remove or Clarify 'Using Amazon S3 Storage' section in the 
reference guide (Yi Liang)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e65817ef
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e65817ef
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e65817ef

Branch: refs/heads/master
Commit: e65817ef15078e262c00d097996852be2716bb87
Parents: e9cfbfd
Author: Jerry He 
Authored: Wed Sep 7 18:50:02 2016 -0700
Committer: Jerry He 
Committed: Wed Sep 7 18:50:02 2016 -0700

--
 src/main/asciidoc/_chapters/configuration.adoc | 31 -
 1 file changed, 31 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e65817ef/src/main/asciidoc/_chapters/configuration.adoc
--
diff --git a/src/main/asciidoc/_chapters/configuration.adoc 
b/src/main/asciidoc/_chapters/configuration.adoc
index 8dc3e8a..89820ca 100644
--- a/src/main/asciidoc/_chapters/configuration.adoc
+++ b/src/main/asciidoc/_chapters/configuration.adoc
@@ -1090,37 +1090,6 @@ Only a subset of all configurations can currently be 
changed in the running serv
 Here is an incomplete list: `hbase.regionserver.thread.compaction.large`, 
`hbase.regionserver.thread.compaction.small`, 
`hbase.regionserver.thread.split`, `hbase.regionserver.thread.merge`, as well 
as compaction policy and configurations and adjustment to offpeak hours.
 For the full list consult the patch attached to  
link:https://issues.apache.org/jira/browse/HBASE-12147[HBASE-12147 Porting 
Online Config Change from 89-fb].
 
-[[amazon_s3_configuration]]
-== Using Amazon S3 Storage
-
-HBase is designed to be tightly coupled with HDFS, and testing of other 
filesystems
-has not been thorough.
-
-The following limitations have been reported:
-
-- RegionServers should be deployed in Amazon EC2 to mitigate latency and 
bandwidth
-limitations when accessing the filesystem, and RegionServers must remain 
available
-to preserve data locality.
-- S3 writes each inbound and outbound file to disk, which adds overhead to 
each operation.
-- The best performance is achieved when all clients and servers are in the 
Amazon
-cloud, rather than a heterogenous architecture.
-- You must be aware of the location of `hadoop.tmp.dir` so that the local 
`/tmp/`
-directory is not filled to capacity.
-- HBase has a different file usage pattern than MapReduce jobs and has been 
optimized for
-HDFS, rather than distant networked storage.
-- The `s3a://` protocol is strongly recommended. The `s3n://` and `s3://` 
protocols have serious
-limitations and do not use the Amazon AWS SDK. The `s3a://` protocol is 
supported
-for use with HBase if you use Hadoop 2.6.1 or higher with HBase 1.2 or higher. 
Hadoop
-2.6.0 is not supported with HBase at all.
-
-Configuration details for Amazon S3 and associated Amazon services such as EMR 
are
-out of the scope of the HBase documentation. See the
-link:https://wiki.apache.org/hadoop/AmazonS3[Hadoop Wiki entry on Amazon S3 
Storage]
-and
-link:http://docs.aws.amazon.com/ElasticMapReduce/latest/DeveloperGuide/emr-hbase.html[Amazon's
 documentation for deploying HBase in EMR].
-
-One use case that is well-suited for Amazon S3 is storing snapshots. See 
<>.
-
 ifdef::backend-docbook[]
 [index]
 == Index



hbase git commit: HBASE-16544 Remove or Clarify Using Amazon S3 Storage section in the reference guide -addendum (Yi Liang)

2016-09-16 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/master 1a1003a48 -> bb3d9ccd4


HBASE-16544 Remove or Clarify Using Amazon S3 Storage section in the reference 
guide -addendum (Yi Liang)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/bb3d9ccd
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/bb3d9ccd
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/bb3d9ccd

Branch: refs/heads/master
Commit: bb3d9ccd489fb64e3cb2020583935a393382a678
Parents: 1a1003a
Author: Jerry He 
Authored: Fri Sep 16 18:34:23 2016 -0700
Committer: Jerry He 
Committed: Fri Sep 16 18:34:23 2016 -0700

--
 src/main/asciidoc/_chapters/ops_mgt.adoc | 4 +---
 1 file changed, 1 insertion(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/bb3d9ccd/src/main/asciidoc/_chapters/ops_mgt.adoc
--
diff --git a/src/main/asciidoc/_chapters/ops_mgt.adoc 
b/src/main/asciidoc/_chapters/ops_mgt.adoc
index 0d184a7..550d5f7 100644
--- a/src/main/asciidoc/_chapters/ops_mgt.adoc
+++ b/src/main/asciidoc/_chapters/ops_mgt.adoc
@@ -2056,9 +2056,7 @@ $ bin/hbase 
org.apache.hadoop.hbase.snapshot.ExportSnapshot -snapshot MySnapshot
 [[snapshots_s3]]
 === Storing Snapshots in an Amazon S3 Bucket
 
-For general information and limitations of using Amazon S3 storage with HBase, 
see
-<>. You can also store and retrieve snapshots from 
Amazon
-S3, using the following procedure.
+You can store and retrieve snapshots from Amazon S3, using the following 
procedure.
 
 NOTE: You can also store snapshots in Microsoft Azure Blob Storage. See 
<>.
 



hbase git commit: HBASE-16598 Enable zookeeper useMulti always and clean up in HBase code

2016-09-17 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/master bb3d9ccd4 -> edc0ef3fe


HBASE-16598 Enable zookeeper useMulti always and clean up in HBase code


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/edc0ef3f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/edc0ef3f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/edc0ef3f

Branch: refs/heads/master
Commit: edc0ef3fe4b818da29ed0e581139dd4faf1cd591
Parents: bb3d9cc
Author: Jerry He 
Authored: Sat Sep 17 16:51:26 2016 -0700
Committer: Jerry He 
Committed: Sat Sep 17 16:51:26 2016 -0700

--
 .../replication/ReplicationPeersZKImpl.java |   1 -
 .../replication/ReplicationQueuesZKImpl.java| 160 +-
 .../apache/hadoop/hbase/zookeeper/ZKUtil.java   |  77 +--
 .../org/apache/hadoop/hbase/HConstants.java |   3 -
 .../src/main/resources/hbase-default.xml|  10 -
 .../protobuf/generated/ZooKeeperProtos.java | 559 +--
 .../src/main/protobuf/ZooKeeper.proto   |   7 -
 .../hadoop/hbase/rsgroup/TestRSGroups.java  |   3 -
 .../org/apache/hadoop/hbase/master/HMaster.java |  15 +-
 .../cleaner/ReplicationZKLockCleanerChore.java  | 112 
 .../replication/TestMultiSlaveReplication.java  |  38 --
 .../TestReplicationSourceManager.java   |   1 -
 .../TestReplicationSourceManagerZkImpl.java |   2 -
 .../hadoop/hbase/zookeeper/TestZKMulti.java |  47 --
 .../hbase/client/rsgroup/TestShellRSGroups.java |   3 -
 src/main/asciidoc/_chapters/configuration.adoc  |   5 +-
 src/main/asciidoc/_chapters/zookeeper.adoc  |   4 +-
 17 files changed, 47 insertions(+), 1000 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/edc0ef3f/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
index 90b1347..d4b93c0 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
@@ -131,7 +131,6 @@ public class ReplicationPeersZKImpl extends 
ReplicationStateZKBase implements Re
   List listOfOps = new ArrayList();
   ZKUtilOp op1 = ZKUtilOp.createAndFailSilent(getPeerNode(id),
 ReplicationSerDeHelper.toByteArray(peerConfig));
-  // There is a race (if hbase.zookeeper.useMulti is false)
   // b/w PeerWatcher and ReplicationZookeeper#add method to create the
   // peer-state znode. This happens while adding a peer
   // The peer state data is set as "ENABLED" by default.

http://git-wip-us.apache.org/repos/asf/hbase/blob/edc0ef3f/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java
index 1c579ab..40c9140 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java
@@ -23,7 +23,6 @@ import java.util.List;
 import java.util.SortedSet;
 import java.util.TreeSet;
 
-import com.google.common.annotations.VisibleForTesting;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
@@ -31,8 +30,6 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
-import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
@@ -67,8 +64,6 @@ public class ReplicationQueuesZKImpl extends 
ReplicationStateZKBase implements R
 
   /** Znode containing all replication queues for this region server. */
   private String myQueuesZnode;
-  /** Name of znode we use to lock during failover */
-  private final static String RS_LOCK_ZNODE = "lock";
 
   private static final Log LOG = 
LogFactory.getLog(ReplicationQueuesZKImpl.class);
 
@@ -189,42 +184,13 @@ public class ReplicationQueuesZKImpl extends 
ReplicationStateZKBase implements R
 } catch (KeeperException e) {
 

hbase git commit: HBASE-16647 hbck should do offline reference repair before online repair

2016-09-20 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/master 22dcce571 -> 66821206b


HBASE-16647 hbck should do offline reference repair before online repair


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/66821206
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/66821206
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/66821206

Branch: refs/heads/master
Commit: 66821206b85d85fba3012d6bb0d6b98c8ee23cb8
Parents: 22dcce5
Author: Jerry He 
Authored: Tue Sep 20 11:49:09 2016 -0700
Committer: Jerry He 
Committed: Tue Sep 20 11:49:09 2016 -0700

--
 .../main/java/org/apache/hadoop/hbase/util/HBaseFsck.java | 10 ++
 1 file changed, 6 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/66821206/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
index d483c71..6a56f8e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
@@ -630,7 +630,6 @@ public class HBaseFsck extends Configured implements 
Closeable {
*/
   public int onlineConsistencyRepair() throws IOException, KeeperException,
 InterruptedException {
-clearState();
 
 // get regions according to what is online on each RegionServer
 loadDeployedRegions();
@@ -748,8 +747,12 @@ public class HBaseFsck extends Configured implements 
Closeable {
   throws IOException, KeeperException, InterruptedException, 
ServiceException {
 // print hbase server version
 errors.print("Version: " + status.getHBaseVersion());
-offlineHdfsIntegrityRepair();
 
+// Clean start
+clearState();
+// Do offline check and repair first
+offlineHdfsIntegrityRepair();
+offlineReferenceFileRepair();
 // If Master runs maintenance tasks (such as balancer, catalog janitor, 
etc) during online
 // hbck, it is likely that hbck would be misled and report transient 
errors.  Therefore, it
 // is better to set Master into maintenance mode during online hbck.
@@ -765,8 +768,6 @@ public class HBaseFsck extends Configured implements 
Closeable {
   checkRegionBoundaries();
 }
 
-offlineReferenceFileRepair();
-
 checkAndFixTableLocks();
 
 checkAndFixReplication();
@@ -1068,6 +1069,7 @@ public class HBaseFsck extends Configured implements 
Closeable {
* be fixed before a cluster can start properly.
*/
   private void offlineReferenceFileRepair() throws IOException, 
InterruptedException {
+clearState();
 Configuration conf = getConf();
 Path hbaseRoot = FSUtils.getRootDir(conf);
 FileSystem fs = hbaseRoot.getFileSystem(conf);



hbase git commit: HBASE-16647 hbck should do offline reference repair before online repair

2016-09-20 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/branch-1 91d03697d -> abfb20206


HBASE-16647 hbck should do offline reference repair before online repair


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/abfb2020
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/abfb2020
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/abfb2020

Branch: refs/heads/branch-1
Commit: abfb20206d59dbbe3d85b2d1ced44b61fd37d58c
Parents: 91d0369
Author: Jerry He 
Authored: Tue Sep 20 11:49:09 2016 -0700
Committer: Jerry He 
Committed: Tue Sep 20 11:53:01 2016 -0700

--
 .../main/java/org/apache/hadoop/hbase/util/HBaseFsck.java | 10 ++
 1 file changed, 6 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/abfb2020/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
index 92aed7c..c115e7b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
@@ -638,7 +638,6 @@ public class HBaseFsck extends Configured implements 
Closeable {
*/
   public int onlineConsistencyRepair() throws IOException, KeeperException,
 InterruptedException {
-clearState();
 
 // get regions according to what is online on each RegionServer
 loadDeployedRegions();
@@ -756,8 +755,12 @@ public class HBaseFsck extends Configured implements 
Closeable {
   throws IOException, KeeperException, InterruptedException, 
ServiceException {
 // print hbase server version
 errors.print("Version: " + status.getHBaseVersion());
-offlineHdfsIntegrityRepair();
 
+// Clean start
+clearState();
+// Do offline check and repair first
+offlineHdfsIntegrityRepair();
+offlineReferenceFileRepair();
 // If Master runs maintenance tasks (such as balancer, catalog janitor, 
etc) during online
 // hbck, it is likely that hbck would be misled and report transient 
errors.  Therefore, it
 // is better to set Master into maintenance mode during online hbck.
@@ -773,8 +776,6 @@ public class HBaseFsck extends Configured implements 
Closeable {
   checkRegionBoundaries();
 }
 
-offlineReferenceFileRepair();
-
 checkAndFixTableLocks();
 
 // Check (and fix if requested) orphaned table ZNodes
@@ -1079,6 +1080,7 @@ public class HBaseFsck extends Configured implements 
Closeable {
* be fixed before a cluster can start properly.
*/
   private void offlineReferenceFileRepair() throws IOException, 
InterruptedException {
+clearState();
 Configuration conf = getConf();
 Path hbaseRoot = FSUtils.getRootDir(conf);
 FileSystem fs = hbaseRoot.getFileSystem(conf);



hbase git commit: HBASE-12949 Scanner can be stuck in infinite loop if the HFile is corrupted

2016-09-21 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/master 5568929dd -> b6b01a790


HBASE-12949 Scanner can be stuck in infinite loop if the HFile is corrupted


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b6b01a79
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b6b01a79
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b6b01a79

Branch: refs/heads/master
Commit: b6b01a790f600d4699450e2e97349599c9efc0e1
Parents: 5568929
Author: Jerry He 
Authored: Wed Sep 21 13:25:23 2016 -0700
Committer: Jerry He 
Committed: Wed Sep 21 13:25:23 2016 -0700

--
 .../hadoop/hbase/io/hfile/HFileReaderImpl.java   | 15 +++
 1 file changed, 11 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b6b01a79/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
index c9e6aea..bd9715b 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
@@ -709,8 +709,7 @@ public class HFileReaderImpl implements HFile.Reader, 
Configurable {
 long ll = blockBuffer.getLongAfterPosition(offsetFromPos);
 klen = (int)(ll >> Integer.SIZE);
 vlen = (int)(Bytes.MASK_FOR_LOWER_INT_IN_LONG ^ ll);
-if (klen < 0 || vlen < 0 || klen > blockBuffer.limit()
-|| vlen > blockBuffer.limit()) {
+if (checkKeyLen(klen) || checkLen(vlen)) {
   throw new IllegalStateException("Invalid klen " + klen + " or vlen "
   + vlen + ". Block offset: "
   + curBlock.getOffset() + ", block length: " + 
blockBuffer.limit() + ", position: "
@@ -725,7 +724,7 @@ public class HFileReaderImpl implements HFile.Reader, 
Configurable {
   // Read short as unsigned, high byte first
   tlen = ((blockBuffer.getByteAfterPosition(offsetFromPos) & 0xff) << 
8)
   ^ (blockBuffer.getByteAfterPosition(offsetFromPos + 1) & 0xff);
-  if (tlen < 0 || tlen > blockBuffer.limit()) {
+  if (checkLen(tlen)) {
 throw new IllegalStateException("Invalid tlen " + tlen + ". Block 
offset: "
 + curBlock.getOffset() + ", block length: " + 
blockBuffer.limit() + ", position: "
 + blockBuffer.position() + " (without header).");
@@ -1141,6 +1140,14 @@ public class HFileReaderImpl implements HFile.Reader, 
Configurable {
 
 /**
  * @param v
+ * @return True if v <= 0 or v > current block buffer limit.
+ */
+protected final boolean checkKeyLen(final int v) {
+  return v <= 0 || v > this.blockBuffer.limit();
+}
+
+/**
+ * @param v
  * @return True if v < 0 or v > current block buffer limit.
  */
 protected final boolean checkLen(final int v) {
@@ -1151,7 +1158,7 @@ public class HFileReaderImpl implements HFile.Reader, 
Configurable {
  * Check key and value lengths are wholesome.
  */
 protected final void checkKeyValueLen() {
-  if (checkLen(this.currKeyLen) || checkLen(this.currValueLen)) {
+  if (checkKeyLen(this.currKeyLen) || checkLen(this.currValueLen)) {
 throw new IllegalStateException("Invalid currKeyLen " + this.currKeyLen
 + " or currValueLen " + this.currValueLen + ". Block offset: "
 + this.curBlock.getOffset() + ", block length: "



hbase git commit: HBASE-12949 Scanner can be stuck in infinite loop if the HFile is corrupted

2016-09-21 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/branch-1 abfb20206 -> c80d671a0


HBASE-12949 Scanner can be stuck in infinite loop if the HFile is corrupted


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c80d671a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c80d671a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c80d671a

Branch: refs/heads/branch-1
Commit: c80d671a062737c806d4e20a992443e9b6b86b02
Parents: abfb202
Author: Jerry He 
Authored: Wed Sep 21 13:31:18 2016 -0700
Committer: Jerry He 
Committed: Wed Sep 21 13:31:18 2016 -0700

--
 .../apache/hadoop/hbase/io/hfile/HFileReaderV2.java | 16 +++-
 1 file changed, 15 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c80d671a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java
index 786d00d..0bca8e5 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java
@@ -970,6 +970,14 @@ public class HFileReaderV2 extends AbstractHFileReader {
 
 /**
  * @param v
+ * @return True if v <= 0 or v > current block buffer limit.
+ */
+protected final boolean checkKeyLen(final int v) {
+  return v <= 0 || v > this.blockBuffer.limit();
+}
+
+/**
+ * @param v
  * @return True if v < 0 or v > current block buffer limit.
  */
 protected final boolean checkLen(final int v) {
@@ -980,7 +988,7 @@ public class HFileReaderV2 extends AbstractHFileReader {
  * Check key and value lengths are wholesome.
  */
 protected final void checkKeyValueLen() {
-  if (checkLen(this.currKeyLen) || checkLen(this.currValueLen)) {
+  if (checkKeyLen(this.currKeyLen) || checkLen(this.currValueLen)) {
 throw new IllegalStateException("Invalid currKeyLen " + 
this.currKeyLen +
   " or currValueLen " + this.currValueLen + ". Block offset: " + 
block.getOffset() +
   ", block length: " + this.blockBuffer.limit() + ", position: " +
@@ -1078,6 +1086,12 @@ public class HFileReaderV2 extends AbstractHFileReader {
 blockBuffer.mark();
 klen = blockBuffer.getInt();
 vlen = blockBuffer.getInt();
+if (checkKeyLen(klen) || checkLen(vlen)) {
+  throw new IllegalStateException("Invalid klen " + klen + " or vlen "
+  + vlen + ". Block offset: "
+  + block.getOffset() + ", block length: " + blockBuffer.limit() + 
", position: "
+  + blockBuffer.position() + " (without header).");
+}
 blockBuffer.reset();
 if (this.reader.shouldIncludeMemstoreTS()) {
   if (this.reader.decodeMemstoreTS) {



hbase git commit: HBASE-15808 Reduce potential bulk load intermediate space usage and waste

2016-05-12 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/branch-1 98d13c745 -> cadc4cf15


HBASE-15808 Reduce potential bulk load intermediate space usage and waste


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/cadc4cf1
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/cadc4cf1
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/cadc4cf1

Branch: refs/heads/branch-1
Commit: cadc4cf15bc548e853332cc5fd9a116738d71d8f
Parents: 98d13c7
Author: Jerry He 
Authored: Thu May 12 15:22:56 2016 -0700
Committer: Jerry He 
Committed: Thu May 12 15:22:56 2016 -0700

--
 .../hbase/mapreduce/LoadIncrementalHFiles.java  | 26 --
 .../TestLoadIncrementalHFilesSplitRecovery.java | 36 
 2 files changed, 59 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/cadc4cf1/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
index 07059bc..0893b2e 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
@@ -121,6 +121,10 @@ public class LoadIncrementalHFiles extends Configured 
implements Tool {
   private static final String ASSIGN_SEQ_IDS = 
"hbase.mapreduce.bulkload.assign.sequenceNumbers";
   public final static String CREATE_TABLE_CONF_KEY = "create.table";
 
+  // We use a '.' prefix which is ignored when walking directory trees
+  // above. It is invalid family name.
+  final static String TMP_DIR = ".tmp";
+
   private int maxFilesPerRegionPerFamily;
   private boolean assignSeqIds;
 
@@ -203,6 +207,14 @@ public class LoadIncrementalHFiles extends Configured 
implements Tool {
   }
   Path familyDir = familyStat.getPath();
   byte[] familyName = familyDir.getName().getBytes();
+  // Skip invalid family
+  try {
+HColumnDescriptor.isLegalFamilyName(familyName);
+  }
+  catch (IllegalArgumentException e) {
+LOG.warn("Skipping invalid " + familyStat.getPath());
+continue;
+  }
   TFamily family = visitor.bulkFamily(familyName);
 
   FileStatus[] hfileStatuses = fs.listStatus(familyDir);
@@ -660,9 +672,6 @@ public class LoadIncrementalHFiles extends Configured 
implements Tool {
   byte[] splitKey) throws IOException {
 final Path hfilePath = item.hfilePath;
 
-// We use a '_' prefix which is ignored when walking directory trees
-// above.
-final String TMP_DIR = "_tmp";
 Path tmpDir = item.hfilePath.getParent();
 if (!tmpDir.getName().equals(TMP_DIR)) {
   tmpDir = new Path(tmpDir, TMP_DIR);
@@ -689,6 +698,17 @@ public class LoadIncrementalHFiles extends Configured 
implements Tool {
 lqis.add(new LoadQueueItem(item.family, botOut));
 lqis.add(new LoadQueueItem(item.family, topOut));
 
+// If the current item is already the result of previous splits,
+// we don't need it anymore. Clean up to save space.
+// It is not part of the original input files.
+try {
+  tmpDir = item.hfilePath.getParent();
+  if (tmpDir.getName().equals(TMP_DIR)) {
+fs.delete(item.hfilePath, false);
+  }
+} catch (IOException e) {
+  LOG.warn("Unable to delete temporary split file " + item.hfilePath);
+}
 LOG.info("Successfully split into new HFiles " + botOut + " and " + 
topOut);
 return lqis;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/cadc4cf1/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java
index e3024d0..26583f3 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hbase.mapreduce;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
@@ -61,6 +62,7 @@ import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequ
 import org.apache.hadoop.hbase.regionserver.HRegionServer;

hbase git commit: HBASE-15808 Reduce potential bulk load intermediate space usage and waste

2016-05-12 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/branch-1.2 258d96021 -> 6e3770b71


HBASE-15808 Reduce potential bulk load intermediate space usage and waste


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6e3770b7
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6e3770b7
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6e3770b7

Branch: refs/heads/branch-1.2
Commit: 6e3770b7167ca1b54380603c2e1cf3f232005680
Parents: 258d960
Author: Jerry He 
Authored: Thu May 12 15:22:56 2016 -0700
Committer: Jerry He 
Committed: Thu May 12 15:23:57 2016 -0700

--
 .../hbase/mapreduce/LoadIncrementalHFiles.java  | 26 --
 .../TestLoadIncrementalHFilesSplitRecovery.java | 36 
 2 files changed, 59 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6e3770b7/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
index 9630a35..15444ff 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
@@ -123,6 +123,10 @@ public class LoadIncrementalHFiles extends Configured 
implements Tool {
   private static final String ASSIGN_SEQ_IDS = 
"hbase.mapreduce.bulkload.assign.sequenceNumbers";
   public final static String CREATE_TABLE_CONF_KEY = "create.table";
 
+  // We use a '.' prefix which is ignored when walking directory trees
+  // above. It is invalid family name.
+  final static String TMP_DIR = ".tmp";
+
   private int maxFilesPerRegionPerFamily;
   private boolean assignSeqIds;
 
@@ -202,6 +206,14 @@ public class LoadIncrementalHFiles extends Configured 
implements Tool {
   }
   Path familyDir = familyStat.getPath();
   byte[] familyName = familyDir.getName().getBytes();
+  // Skip invalid family
+  try {
+HColumnDescriptor.isLegalFamilyName(familyName);
+  }
+  catch (IllegalArgumentException e) {
+LOG.warn("Skipping invalid " + familyStat.getPath());
+continue;
+  }
   TFamily family = visitor.bulkFamily(familyName);
 
   FileStatus[] hfileStatuses = fs.listStatus(familyDir);
@@ -611,9 +623,6 @@ public class LoadIncrementalHFiles extends Configured 
implements Tool {
   byte[] splitKey) throws IOException {
 final Path hfilePath = item.hfilePath;
 
-// We use a '_' prefix which is ignored when walking directory trees
-// above.
-final String TMP_DIR = "_tmp";
 Path tmpDir = item.hfilePath.getParent();
 if (!tmpDir.getName().equals(TMP_DIR)) {
   tmpDir = new Path(tmpDir, TMP_DIR);
@@ -640,6 +649,17 @@ public class LoadIncrementalHFiles extends Configured 
implements Tool {
 lqis.add(new LoadQueueItem(item.family, botOut));
 lqis.add(new LoadQueueItem(item.family, topOut));
 
+// If the current item is already the result of previous splits,
+// we don't need it anymore. Clean up to save space.
+// It is not part of the original input files.
+try {
+  tmpDir = item.hfilePath.getParent();
+  if (tmpDir.getName().equals(TMP_DIR)) {
+fs.delete(item.hfilePath, false);
+  }
+} catch (IOException e) {
+  LOG.warn("Unable to delete temporary split file " + item.hfilePath);
+}
 LOG.info("Successfully split into new HFiles " + botOut + " and " + 
topOut);
 return lqis;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/6e3770b7/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java
index e3024d0..26583f3 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hbase.mapreduce;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
@@ -61,6 +62,7 @@ import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequ
 import org.apache.hadoop.hbase.regionserver.HRegionServ

hbase git commit: HBASE-15808 Reduce potential bulk load intermediate space usage and waste

2016-05-12 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 921f745b2 -> 0b59341d2


HBASE-15808 Reduce potential bulk load intermediate space usage and waste


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0b59341d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0b59341d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0b59341d

Branch: refs/heads/branch-1.3
Commit: 0b59341d2442f9d3ac591676f3277eb3a887005b
Parents: 921f745
Author: Jerry He 
Authored: Thu May 12 15:22:56 2016 -0700
Committer: Jerry He 
Committed: Thu May 12 15:29:56 2016 -0700

--
 .../hbase/mapreduce/LoadIncrementalHFiles.java  | 26 --
 .../TestLoadIncrementalHFilesSplitRecovery.java | 36 
 2 files changed, 59 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/0b59341d/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
index 07059bc..0893b2e 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
@@ -121,6 +121,10 @@ public class LoadIncrementalHFiles extends Configured 
implements Tool {
   private static final String ASSIGN_SEQ_IDS = 
"hbase.mapreduce.bulkload.assign.sequenceNumbers";
   public final static String CREATE_TABLE_CONF_KEY = "create.table";
 
+  // We use a '.' prefix which is ignored when walking directory trees
+  // above. It is invalid family name.
+  final static String TMP_DIR = ".tmp";
+
   private int maxFilesPerRegionPerFamily;
   private boolean assignSeqIds;
 
@@ -203,6 +207,14 @@ public class LoadIncrementalHFiles extends Configured 
implements Tool {
   }
   Path familyDir = familyStat.getPath();
   byte[] familyName = familyDir.getName().getBytes();
+  // Skip invalid family
+  try {
+HColumnDescriptor.isLegalFamilyName(familyName);
+  }
+  catch (IllegalArgumentException e) {
+LOG.warn("Skipping invalid " + familyStat.getPath());
+continue;
+  }
   TFamily family = visitor.bulkFamily(familyName);
 
   FileStatus[] hfileStatuses = fs.listStatus(familyDir);
@@ -660,9 +672,6 @@ public class LoadIncrementalHFiles extends Configured 
implements Tool {
   byte[] splitKey) throws IOException {
 final Path hfilePath = item.hfilePath;
 
-// We use a '_' prefix which is ignored when walking directory trees
-// above.
-final String TMP_DIR = "_tmp";
 Path tmpDir = item.hfilePath.getParent();
 if (!tmpDir.getName().equals(TMP_DIR)) {
   tmpDir = new Path(tmpDir, TMP_DIR);
@@ -689,6 +698,17 @@ public class LoadIncrementalHFiles extends Configured 
implements Tool {
 lqis.add(new LoadQueueItem(item.family, botOut));
 lqis.add(new LoadQueueItem(item.family, topOut));
 
+// If the current item is already the result of previous splits,
+// we don't need it anymore. Clean up to save space.
+// It is not part of the original input files.
+try {
+  tmpDir = item.hfilePath.getParent();
+  if (tmpDir.getName().equals(TMP_DIR)) {
+fs.delete(item.hfilePath, false);
+  }
+} catch (IOException e) {
+  LOG.warn("Unable to delete temporary split file " + item.hfilePath);
+}
 LOG.info("Successfully split into new HFiles " + botOut + " and " + 
topOut);
 return lqis;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/0b59341d/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java
index e3024d0..26583f3 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hbase.mapreduce;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
@@ -61,6 +62,7 @@ import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequ
 import org.apache.hadoop.hbase.regionserver.HRegionServ

hbase git commit: HBASE-15808 Reduce potential bulk load intermediate space usage and waste

2016-05-12 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/master 1267f76e9 -> acca95fb5


HBASE-15808 Reduce potential bulk load intermediate space usage and waste


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/acca95fb
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/acca95fb
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/acca95fb

Branch: refs/heads/master
Commit: acca95fb5046a52ad26ca84fae1adeab932472a6
Parents: 1267f76
Author: Jerry He 
Authored: Thu May 12 15:43:48 2016 -0700
Committer: Jerry He 
Committed: Thu May 12 15:43:48 2016 -0700

--
 .../hbase/mapreduce/LoadIncrementalHFiles.java  | 26 --
 .../TestLoadIncrementalHFilesSplitRecovery.java | 38 
 2 files changed, 61 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/acca95fb/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
index 86a84a4..0084878 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
@@ -118,6 +118,10 @@ public class LoadIncrementalHFiles extends Configured 
implements Tool {
   private static final String ASSIGN_SEQ_IDS = 
"hbase.mapreduce.bulkload.assign.sequenceNumbers";
   public final static String CREATE_TABLE_CONF_KEY = "create.table";
 
+  // We use a '.' prefix which is ignored when walking directory trees
+  // above. It is invalid family name.
+  final static String TMP_DIR = ".tmp";
+
   private int maxFilesPerRegionPerFamily;
   private boolean assignSeqIds;
 
@@ -201,6 +205,14 @@ public class LoadIncrementalHFiles extends Configured 
implements Tool {
   }
   Path familyDir = familyStat.getPath();
   byte[] familyName = familyDir.getName().getBytes();
+  // Skip invalid family
+  try {
+HColumnDescriptor.isLegalFamilyName(familyName);
+  }
+  catch (IllegalArgumentException e) {
+LOG.warn("Skipping invalid " + familyStat.getPath());
+continue;
+  }
   TFamily family = visitor.bulkFamily(familyName);
 
   FileStatus[] hfileStatuses = fs.listStatus(familyDir);
@@ -632,9 +644,6 @@ public class LoadIncrementalHFiles extends Configured 
implements Tool {
   byte[] splitKey) throws IOException {
 final Path hfilePath = item.hfilePath;
 
-// We use a '_' prefix which is ignored when walking directory trees
-// above.
-final String TMP_DIR = "_tmp";
 Path tmpDir = item.hfilePath.getParent();
 if (!tmpDir.getName().equals(TMP_DIR)) {
   tmpDir = new Path(tmpDir, TMP_DIR);
@@ -661,6 +670,17 @@ public class LoadIncrementalHFiles extends Configured 
implements Tool {
 lqis.add(new LoadQueueItem(item.family, botOut));
 lqis.add(new LoadQueueItem(item.family, topOut));
 
+// If the current item is already the result of previous splits,
+// we don't need it anymore. Clean up to save space.
+// It is not part of the original input files.
+try {
+  tmpDir = item.hfilePath.getParent();
+  if (tmpDir.getName().equals(TMP_DIR)) {
+fs.delete(item.hfilePath, false);
+  }
+} catch (IOException e) {
+  LOG.warn("Unable to delete temporary split file " + item.hfilePath);
+}
 LOG.info("Successfully split into new HFiles " + botOut + " and " + 
topOut);
 return lqis;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/acca95fb/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java
index 32e3058..0975fd2 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hbase.mapreduce;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
@@ -63,6 +64,7 @@ import 
org.apache.hadoop.hbase.regionserver.TestHRegionServerBulkLoad;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org

hbase git commit: HBASE-15437 Response size calculated in RPCServer for warning tooLarge responses does NOT count CellScanner payload

2016-12-07 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/master 6f25f838c -> 75567f828


HBASE-15437 Response size calculated in RPCServer for warning tooLarge 
responses does NOT count CellScanner payload


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/75567f82
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/75567f82
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/75567f82

Branch: refs/heads/master
Commit: 75567f828c3c07204f99dd3d2fbbd1686cebcb26
Parents: 6f25f83
Author: Jerry He 
Authored: Wed Dec 7 14:47:10 2016 -0800
Committer: Jerry He 
Committed: Wed Dec 7 14:47:10 2016 -0800

--
 .../org/apache/hadoop/hbase/ipc/RpcServer.java  |  7 -
 .../hbase/regionserver/RSRpcServices.java   | 27 +---
 2 files changed, 18 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/75567f82/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
index 8b6379b..96f506f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
@@ -2659,8 +2659,13 @@ public class RpcServer implements RpcServerInterface, 
ConfigurationObserver {
 " processingTime: " + processingTime +
 " totalTime: " + totalTime);
   }
-  long requestSize = param.getSerializedSize();
+  // Use the raw request call size for now.
+  long requestSize = call.getSize();
   long responseSize = result.getSerializedSize();
+  if (call.isClientCellBlockSupported()) {
+// Include the payload size in HBaseRpcController
+responseSize += call.getResponseCellSize();
+  }
 
   metrics.dequeuedCall(qTime);
   metrics.processedCall(processingTime);

http://git-wip-us.apache.org/repos/asf/hbase/blob/75567f82/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
index 07e16c8..7ec4f68 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
@@ -460,22 +460,14 @@ public class RSRpcServices implements 
HBaseRPCErrorHandler,
 }
   }
 
-  /**
-   * @return True if current call supports cellblocks
-   */
-  private boolean isClientCellBlockSupport() {
-RpcCallContext context = RpcServer.getCurrentCall();
-return context != null && context.isClientCellBlockSupported();
-  }
-
   private boolean isClientCellBlockSupport(RpcCallContext context) {
 return context != null && context.isClientCellBlockSupported();
   }
 
   private void addResult(final MutateResponse.Builder builder, final Result 
result,
-  final HBaseRpcController rpcc) {
+  final HBaseRpcController rpcc, boolean clientCellBlockSupported) {
 if (result == null) return;
-if (isClientCellBlockSupport()) {
+if (clientCellBlockSupported) {
   builder.setResult(ProtobufUtil.toResultNoData(result));
   rpcc.setCellScanner(result.cellScanner());
 } else {
@@ -1167,7 +1159,7 @@ public class RSRpcServices implements 
HBaseRPCErrorHandler,
* @return an object that represents the last referenced block from this 
response.
*/
   Object addSize(RpcCallContext context, Result r, Object lastBlock) {
-if (context != null && !r.isEmpty()) {
+if (context != null && r != null && !r.isEmpty()) {
   for (Cell c : r.rawCells()) {
 context.incrementResponseCellSize(CellUtil.estimatedHeapSizeOf(c));
 
@@ -2299,12 +2291,12 @@ public class RSRpcServices implements 
HBaseRPCErrorHandler,
 builder.setResult(pbr);
   } else if (r != null) {
 ClientProtos.Result pbr;
-RpcCallContext call = RpcServer.getCurrentCall();
-if (isClientCellBlockSupport(call) && controller instanceof 
HBaseRpcController
-&& VersionInfoUtil.hasMinimumVersion(call.getClientVersionInfo(), 
1, 3)) {
+if (isClientCellBlockSupport(context) && controller instanceof 
HBaseRpcController
+&& 
VersionInfoUtil.hasMinimumVersion(context.getClientVersionInfo(), 1, 3)) {
   pbr = ProtobufUtil.toResultNoData(r);
   ((HBaseRpcController) 
controller).setCellScanner(CellUtil.createCellScanner(r
   .rawCells()));
+  addSize(cont

[3/3] hbase git commit: HBASE-16010 Put draining function through Admin API (Matt Warhaftig)

2016-12-23 Thread jerryjch
HBASE-16010 Put draining function through Admin API (Matt Warhaftig)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/992e5717
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/992e5717
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/992e5717

Branch: refs/heads/master
Commit: 992e5717d4e4deeef46836acea323a312b1e0851
Parents: 8fb9a91
Author: Jerry He 
Authored: Fri Dec 23 13:41:36 2016 -0800
Committer: Jerry He 
Committed: Fri Dec 23 13:41:36 2016 -0800

--
 .../org/apache/hadoop/hbase/client/Admin.java   |   19 +
 .../hbase/client/ConnectionImplementation.java  |   25 +
 .../apache/hadoop/hbase/client/HBaseAdmin.java  |   58 +
 .../shaded/protobuf/generated/MasterProtos.java | 3995 +-
 .../src/main/protobuf/Master.proto  |   33 +
 .../org/apache/hadoop/hbase/master/HMaster.java |   50 +
 .../hadoop/hbase/master/MasterRpcServices.java  |   51 +
 .../hadoop/hbase/master/MasterServices.java |   20 +
 .../apache/hadoop/hbase/client/TestAdmin2.java  |   77 +
 .../hbase/master/MockNoopMasterServices.java|   15 +
 .../hbase/zookeeper/TestZooKeeperACL.java   |   21 +
 11 files changed, 4217 insertions(+), 147 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/992e5717/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
index e7ea4d9..fe3960f 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
@@ -1858,4 +1858,23 @@ public interface Admin extends Abortable, Closeable {
*/
   default void disableReplicationPeer(final String peerId) throws IOException {
   }
+
+  /**
+   * Mark a region server as draining to prevent additional regions from 
getting assigned to it.
+   * @param servers List of region servers to drain.
+   */
+  void drainRegionServers(List servers) throws IOException;
+
+  /**
+   * List region servers marked as draining to not get additional regions 
assigned to them.
+   * @return List of draining region servers.
+   */
+  List listDrainingRegionServers() throws IOException;
+
+  /**
+   * Remove drain from a region server to allow additional regions assignments.
+   * @param servers List of region servers to remove drain from.
+   */
+  void removeDrainFromRegionServers(List servers) throws 
IOException;
+
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/992e5717/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
index ff939aa..a597be3 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
@@ -78,12 +78,18 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService.BlockingInterface;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledRequest;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledResponse;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeRequest;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeResponse;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersRequest;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersResponse;
 import 
org.apache.hadoop.hb

[2/3] hbase git commit: HBASE-16010 Put draining function through Admin API (Matt Warhaftig)

2016-12-23 Thread jerryjch
http://git-wip-us.apache.org/repos/asf/hbase/blob/992e5717/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
index 2af3982..1794a49 100644
--- 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
@@ -65611,6 +65611,3387 @@ public final class MasterProtos {
 
   }
 
+  public interface ListDrainingRegionServersRequestOrBuilder extends
+  // 
@@protoc_insertion_point(interface_extends:hbase.pb.ListDrainingRegionServersRequest)
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
+  }
+  /**
+   * Protobuf type {@code hbase.pb.ListDrainingRegionServersRequest}
+   */
+  public  static final class ListDrainingRegionServersRequest extends
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 
implements
+  // 
@@protoc_insertion_point(message_implements:hbase.pb.ListDrainingRegionServersRequest)
+  ListDrainingRegionServersRequestOrBuilder {
+// Use ListDrainingRegionServersRequest.newBuilder() to construct.
+private 
ListDrainingRegionServersRequest(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder
 builder) {
+  super(builder);
+}
+private ListDrainingRegionServersRequest() {
+}
+
+@java.lang.Override
+public final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
+getUnknownFields() {
+  return this.unknownFields;
+}
+private ListDrainingRegionServersRequest(
+org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream 
input,
+
org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite 
extensionRegistry)
+throws 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException
 {
+  this();
+  
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder 
unknownFields =
+  
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
+  try {
+boolean done = false;
+while (!done) {
+  int tag = input.readTag();
+  switch (tag) {
+case 0:
+  done = true;
+  break;
+default: {
+  if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+done = true;
+  }
+  break;
+}
+  }
+}
+  } catch 
(org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException
 e) {
+throw e.setUnfinishedMessage(this);
+  } catch (java.io.IOException e) {
+throw new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException(
+e).setUnfinishedMessage(this);
+  } finally {
+this.unknownFields = unknownFields.build();
+makeExtensionsImmutable();
+  }
+}
+public static final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+getDescriptor() {
+  return 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListDrainingRegionServersRequest_descriptor;
+}
+
+protected 
org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+internalGetFieldAccessorTable() {
+  return 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListDrainingRegionServersRequest_fieldAccessorTable
+  .ensureFieldAccessorsInitialized(
+  
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest.class,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest.Builder.class);
+}
+
+private byte memoizedIsInitialized = -1;
+public final boolean isInitialized() {
+  byte isInitialized = memoizedIsInitialized;
+  if (isInitialized == 1) return true;
+  if (isInitialized == 0) return false;
+
+  memoizedIsInitialized = 1;
+  return true;
+}
+
+public void 
writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream 
output)
+throws java.io.IOException {
+  unknownFields.writeTo(output);
+}
+
+public int getSerializedSize() {
+  int size = memoizedSize;
+  if (size != -1) return size;
+
+  size = 0;
+  size += unknownFields.getSerializedSize();
+  memoizedSize = size;
+  return size;
+}
+
+private static final

[1/3] hbase git commit: HBASE-16010 Put draining function through Admin API (Matt Warhaftig)

2016-12-23 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/master 8fb9a91d4 -> 992e5717d


http://git-wip-us.apache.org/repos/asf/hbase/blob/992e5717/hbase-protocol-shaded/src/main/protobuf/Master.proto
--
diff --git a/hbase-protocol-shaded/src/main/protobuf/Master.proto 
b/hbase-protocol-shaded/src/main/protobuf/Master.proto
index f4e7da6..0a000ee 100644
--- a/hbase-protocol-shaded/src/main/protobuf/Master.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/Master.proto
@@ -568,6 +568,27 @@ message SecurityCapabilitiesResponse {
   repeated Capability capabilities = 1;
 }
 
+message ListDrainingRegionServersRequest {
+}
+
+message ListDrainingRegionServersResponse {
+ repeated ServerName server_name = 1;
+}
+
+message DrainRegionServersRequest {
+ repeated ServerName server_name = 1;
+}
+
+message DrainRegionServersResponse {
+}
+
+message RemoveDrainFromRegionServersRequest {
+ repeated ServerName server_name = 1;
+}
+
+message RemoveDrainFromRegionServersResponse {
+}
+
 service MasterService {
   /** Used by the client to get the number of regions that have received the 
updated schema */
   rpc GetSchemaAlterStatus(GetSchemaAlterStatusRequest)
@@ -863,4 +884,16 @@ service MasterService {
   /** Disable a replication peer */
   rpc DisableReplicationPeer(DisableReplicationPeerRequest)
 returns(DisableReplicationPeerResponse);
+
+  /** Returns a list of ServerNames marked as draining. */
+  rpc listDrainingRegionServers(ListDrainingRegionServersRequest)
+returns(ListDrainingRegionServersResponse);
+
+  /** Mark a list of ServerNames as draining. */
+  rpc drainRegionServers(DrainRegionServersRequest)
+returns(DrainRegionServersResponse);
+
+  /** Unmark a list of ServerNames marked as draining. */
+  rpc removeDrainFromRegionServers(RemoveDrainFromRegionServersRequest)
+returns(RemoveDrainFromRegionServersResponse);
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/992e5717/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 6b135d9..613c5c1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -3196,4 +3196,54 @@ public class HMaster extends HRegionServer implements 
MasterServices {
   cpHost.postDisableReplicationPeer(peerId);
 }
   }
+
+  @Override
+  public void drainRegionServer(final ServerName server) {
+String parentZnode = getZooKeeper().znodePaths.drainingZNode;
+try {
+  String node = ZKUtil.joinZNode(parentZnode, server.getServerName());
+  ZKUtil.createAndFailSilent(getZooKeeper(), node);
+} catch (KeeperException ke) {
+  LOG.warn(this.zooKeeper.prefix("Unable to add drain for '" + 
server.getServerName() + "'."),
+ke);
+}
+  }
+
+  @Override
+  public List listDrainingRegionServers() {
+String parentZnode = getZooKeeper().znodePaths.drainingZNode;
+List serverNames = new ArrayList();
+List serverStrs = null;
+try {
+  serverStrs = ZKUtil.listChildrenNoWatch(getZooKeeper(), parentZnode);
+} catch (KeeperException ke) {
+  LOG.warn(this.zooKeeper.prefix("Unable to list draining servers."), ke);
+}
+// No nodes is empty draining list or ZK connectivity issues.
+if (serverStrs == null) {
+  return serverNames;
+}
+
+// Skip invalid ServerNames in result
+for (String serverStr : serverStrs) {
+  try {
+serverNames.add(ServerName.parseServerName(serverStr));
+  } catch (IllegalArgumentException iae) {
+LOG.warn("Unable to cast '" + serverStr + "' to ServerName.", iae);
+  }
+}
+return serverNames;
+  }
+
+  @Override
+  public void removeDrainFromRegionServer(ServerName server) {
+String parentZnode = getZooKeeper().znodePaths.drainingZNode;
+String node = ZKUtil.joinZNode(parentZnode, server.getServerName());
+try {
+  ZKUtil.deleteNodeFailSilent(getZooKeeper(), node);
+} catch (KeeperException ke) {
+  LOG.warn(
+this.zooKeeper.prefix("Unable to remove drain for '" + 
server.getServerName() + "'."), ke);
+}
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/992e5717/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index 8ee72c6..76da838 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpc

hbase git commit: HBASE-16869 Fixed typo in 'Disabling Blockcache' doc

2017-01-01 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/master 0e4866564 -> 521730ebc


HBASE-16869 Fixed typo in 'Disabling Blockcache' doc

Signed-off-by: Jerry He 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/521730eb
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/521730eb
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/521730eb

Branch: refs/heads/master
Commit: 521730ebc71995df514748b03554e56bfd8beafa
Parents: 0e48665
Author: Jan Hentschel 
Authored: Sun Jan 1 20:53:52 2017 +0100
Committer: Jerry He 
Committed: Sun Jan 1 12:45:21 2017 -0800

--
 src/main/asciidoc/_chapters/configuration.adoc | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/521730eb/src/main/asciidoc/_chapters/configuration.adoc
--
diff --git a/src/main/asciidoc/_chapters/configuration.adoc 
b/src/main/asciidoc/_chapters/configuration.adoc
index 6e356bc..baa4d4c 100644
--- a/src/main/asciidoc/_chapters/configuration.adoc
+++ b/src/main/asciidoc/_chapters/configuration.adoc
@@ -938,7 +938,7 @@ See 
<> for more inf
 [[disabling.blockcache]]
  Disabling Blockcache
 
-Do not turn off block cache (You'd do it by setting `hbase.block.cache.size` 
to zero). Currently we do not do well if you do this because the RegionServer 
will spend all its time loading HFile indices over and over again.
+Do not turn off block cache (You'd do it by setting `hfile.block.cache.size` 
to zero). Currently we do not do well if you do this because the RegionServer 
will spend all its time loading HFile indices over and over again.
 If your working set is such that block cache does you no good, at least size 
the block cache such that HFile indices will stay up in the cache (you can get 
a rough idea on the size you need by surveying RegionServer UIs; you'll see 
index block size accounted near the top of the webpage).
 
 [[nagles]]



hbase git commit: HBASE-17390 Added master and backup masters to online update of configuration

2017-01-04 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/master 20a7ae286 -> dba103e1b


HBASE-17390 Added master and backup masters to online update of configuration

Signed-off-by: Jerry He 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/dba103e1
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/dba103e1
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/dba103e1

Branch: refs/heads/master
Commit: dba103e1b6e27330a960513c65af49254d56078b
Parents: 20a7ae2
Author: Jan Hentschel 
Authored: Thu Dec 29 18:55:22 2016 +0100
Committer: Jerry He 
Committed: Wed Jan 4 22:08:05 2017 -0800

--
 .../apache/hadoop/hbase/client/HBaseAdmin.java  |  6 +++
 .../hbase/client/TestUpdateConfiguration.java   | 40 +++-
 2 files changed, 45 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/dba103e1/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
index 89d1b49..3c84929 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
@@ -3033,6 +3033,12 @@ public class HBaseAdmin implements Admin {
 for (ServerName server : this.getClusterStatus().getServers()) {
   updateConfiguration(server);
 }
+
+updateConfiguration(this.getClusterStatus().getMaster());
+
+for (ServerName server : this.getClusterStatus().getBackupMasters()) {
+  updateConfiguration(server);
+}
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/dba103e1/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestUpdateConfiguration.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestUpdateConfiguration.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestUpdateConfiguration.java
index 73e493b..731e02f 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestUpdateConfiguration.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestUpdateConfiguration.java
@@ -43,7 +43,7 @@ public class TestUpdateConfiguration {
   
   @BeforeClass
   public static void setup() throws Exception {
-TEST_UTIL.startMiniCluster();
+TEST_UTIL.startMiniCluster(2, 1);
   }
 
   @Test
@@ -74,4 +74,42 @@ public class TestUpdateConfiguration {
 // restore hbase-site.xml
 Files.copy(cnf3Path, cnfPath, StandardCopyOption.REPLACE_EXISTING);
   }
+
+  @Test
+  public void testAllOnlineConfigChange() throws IOException {
+LOG.debug("Starting the test");
+Admin admin = TEST_UTIL.getAdmin();
+admin.updateConfiguration();
+  }
+
+  @Test
+  public void testAllCustomOnlineConfigChange() throws IOException {
+LOG.debug("Starting the test");
+Path cnfPath = 
FileSystems.getDefault().getPath("target/test-classes/hbase-site.xml");
+Path cnf2Path = 
FileSystems.getDefault().getPath("target/test-classes/hbase-site2.xml");
+Path cnf3Path = 
FileSystems.getDefault().getPath("target/test-classes/hbase-site3.xml");
+// make a backup of hbase-site.xml
+Files.copy(cnfPath, cnf3Path, StandardCopyOption.REPLACE_EXISTING);
+// update hbase-site.xml by overwriting it
+Files.copy(cnf2Path, cnfPath, StandardCopyOption.REPLACE_EXISTING);
+
+Admin admin = TEST_UTIL.getAdmin();
+admin.updateConfiguration();
+
+// Check the configuration of the Masters
+Configuration masterConfiguration = 
TEST_UTIL.getMiniHBaseCluster().getMaster(0).getConfiguration();
+int custom = masterConfiguration.getInt("hbase.custom.config", 0);
+assertEquals(custom, 1000);
+Configuration backupMasterConfiguration = 
TEST_UTIL.getMiniHBaseCluster().getMaster(1).getConfiguration();
+custom = backupMasterConfiguration.getInt("hbase.custom.config", 0);
+assertEquals(custom, 1000);
+
+// Check the configuration of the RegionServer
+Configuration regionServerConfiguration = 
TEST_UTIL.getMiniHBaseCluster().getRegionServer(0).getConfiguration();
+custom = regionServerConfiguration.getInt("hbase.custom.config", 0);
+assertEquals(custom, 1000);
+
+// restore hbase-site.xml
+Files.copy(cnf3Path, cnfPath, StandardCopyOption.REPLACE_EXISTING);
+  }
 }



hbase git commit: HBASE-17390 Added master and backup masters to online update of configuration

2017-01-04 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/branch-1 db410ea7c -> a9bac6a49


HBASE-17390 Added master and backup masters to online update of configuration

Signed-off-by: Jerry He 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a9bac6a4
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a9bac6a4
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a9bac6a4

Branch: refs/heads/branch-1
Commit: a9bac6a496f051ac47ce017ba588ada92d6c57c3
Parents: db410ea
Author: Jan Hentschel 
Authored: Thu Dec 29 18:55:22 2016 +0100
Committer: Jerry He 
Committed: Wed Jan 4 22:25:22 2017 -0800

--
 .../apache/hadoop/hbase/client/HBaseAdmin.java  |  6 +++
 .../hbase/client/TestUpdateConfiguration.java   | 40 +++-
 2 files changed, 45 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a9bac6a4/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
index a963921..cbc83dc 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
@@ -4491,6 +4491,12 @@ public class HBaseAdmin implements Admin {
 for (ServerName server : this.getClusterStatus().getServers()) {
   updateConfiguration(server);
 }
+
+updateConfiguration(this.getClusterStatus().getMaster());
+
+for (ServerName server : this.getClusterStatus().getBackupMasters()) {
+  updateConfiguration(server);
+}
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/a9bac6a4/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestUpdateConfiguration.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestUpdateConfiguration.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestUpdateConfiguration.java
index e2af1ab..226c9e7 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestUpdateConfiguration.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestUpdateConfiguration.java
@@ -43,7 +43,7 @@ public class TestUpdateConfiguration {
 
   @BeforeClass
   public static void setup() throws Exception {
-TEST_UTIL.startMiniCluster();
+TEST_UTIL.startMiniCluster(2, 1);
   }
 
   @Test
@@ -74,4 +74,42 @@ public class TestUpdateConfiguration {
 // restore hbase-site.xml
 Files.copy(cnf3Path, cnfPath, StandardCopyOption.REPLACE_EXISTING);
   }
+
+  @Test
+  public void testAllOnlineConfigChange() throws IOException {
+LOG.debug("Starting the test");
+Admin admin = TEST_UTIL.getHBaseAdmin();
+admin.updateConfiguration();
+  }
+
+  @Test
+  public void testAllCustomOnlineConfigChange() throws IOException {
+LOG.debug("Starting the test");
+Path cnfPath = 
FileSystems.getDefault().getPath("target/test-classes/hbase-site.xml");
+Path cnf2Path = 
FileSystems.getDefault().getPath("target/test-classes/hbase-site2.xml");
+Path cnf3Path = 
FileSystems.getDefault().getPath("target/test-classes/hbase-site3.xml");
+// make a backup of hbase-site.xml
+Files.copy(cnfPath, cnf3Path, StandardCopyOption.REPLACE_EXISTING);
+// update hbase-site.xml by overwriting it
+Files.copy(cnf2Path, cnfPath, StandardCopyOption.REPLACE_EXISTING);
+
+Admin admin = TEST_UTIL.getHBaseAdmin();
+admin.updateConfiguration();
+
+// Check the configuration of the Masters
+Configuration masterConfiguration = 
TEST_UTIL.getMiniHBaseCluster().getMaster(0).getConfiguration();
+int custom = masterConfiguration.getInt("hbase.custom.config", 0);
+assertEquals(custom, 1000);
+Configuration backupMasterConfiguration = 
TEST_UTIL.getMiniHBaseCluster().getMaster(1).getConfiguration();
+custom = backupMasterConfiguration.getInt("hbase.custom.config", 0);
+assertEquals(custom, 1000);
+
+// Check the configuration of the RegionServer
+Configuration regionServerConfiguration = 
TEST_UTIL.getMiniHBaseCluster().getRegionServer(0).getConfiguration();
+custom = regionServerConfiguration.getInt("hbase.custom.config", 0);
+assertEquals(custom, 1000);
+
+// restore hbase-site.xml
+Files.copy(cnf3Path, cnfPath, StandardCopyOption.REPLACE_EXISTING);
+  }
 }



hbase git commit: HBASE-17430 Changed link from Google search to a direct link in docs

2017-01-08 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/master f92a14ade -> 97fd9051f


HBASE-17430 Changed link from Google search to a direct link in docs

Signed-off-by: Jerry He 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/97fd9051
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/97fd9051
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/97fd9051

Branch: refs/heads/master
Commit: 97fd9051f44e8fbe4ca99789bad7c35ede389b88
Parents: f92a14a
Author: Jan Hentschel 
Authored: Fri Jan 6 14:34:36 2017 +0100
Committer: Jerry He 
Committed: Sun Jan 8 20:39:44 2017 -0800

--
 src/main/asciidoc/_chapters/architecture.adoc | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/97fd9051/src/main/asciidoc/_chapters/architecture.adoc
--
diff --git a/src/main/asciidoc/_chapters/architecture.adoc 
b/src/main/asciidoc/_chapters/architecture.adoc
index 339566a..e51cb14 100644
--- a/src/main/asciidoc/_chapters/architecture.adoc
+++ b/src/main/asciidoc/_chapters/architecture.adoc
@@ -873,7 +873,7 @@ The compressed BlockCache is disabled by default. To enable 
it, set `hbase.block
 
 As write requests are handled by the region server, they accumulate in an 
in-memory storage system called the _memstore_. Once the memstore fills, its 
content are written to disk as additional store files. This event is called a 
_memstore flush_. As store files accumulate, the RegionServer will 
<> them into fewer, larger files. After each flush or 
compaction finishes, the amount of data stored in the region has changed. The 
RegionServer consults the region split policy to determine if the region has 
grown too large or should be split for another policy-specific reason. A region 
split request is enqueued if the policy recommends it.
 
-Logically, the process of splitting a region is simple. We find a suitable 
point in the keyspace of the region where we should divide the region in half, 
then split the region's data into two new regions at that point. The details of 
the process however are not simple.  When a split happens, the newly created 
_daughter regions_ do not rewrite all the data into new files immediately. 
Instead, they create small files similar to symbolic link files, named 
link:http://www.google.com/url?q=http%3A%2F%2Fhbase.apache.org%2Fapidocs%2Forg%2Fapache%2Fhadoop%2Fhbase%2Fio%2FReference.html&sa=D&sntz=1&usg=AFQjCNEkCbADZ3CgKHTtGYI8bJVwp663CA[Reference
 files], which point to either the top or bottom part of the parent store file 
according to the split point. The reference file is used just like a regular 
data file, but only half of the records are considered. The region can only be 
split if there are no more references to the immutable data files of the parent 
region. Those reference files are clea
 ned gradually by compactions, so that the region will stop referring to its 
parents files, and can be split further.
+Logically, the process of splitting a region is simple. We find a suitable 
point in the keyspace of the region where we should divide the region in half, 
then split the region's data into two new regions at that point. The details of 
the process however are not simple.  When a split happens, the newly created 
_daughter regions_ do not rewrite all the data into new files immediately. 
Instead, they create small files similar to symbolic link files, named 
link:http://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/io/Reference.html[Reference
 files], which point to either the top or bottom part of the parent store file 
according to the split point. The reference file is used just like a regular 
data file, but only half of the records are considered. The region can only be 
split if there are no more references to the immutable data files of the parent 
region. Those reference files are cleaned gradually by compactions, so that the 
region will stop referring to its parents files, and c
 an be split further.
 
 Although splitting the region is a local decision made by the RegionServer, 
the split process itself must coordinate with many actors. The RegionServer 
notifies the Master before and after the split, updates the `.META.` table so 
that clients can discover the new daughter regions, and rearranges the 
directory structure and data files in HDFS. Splitting is a multi-task process. 
To enable rollback in case of an error, the RegionServer keeps an in-memory 
journal about the execution state. The steps taken by the RegionServer to 
execute the split are illustrated in <>. Each 
step is labeled with its step number. Actions from RegionServers or Master are 
shown in red, while actions from the clients are show in green.
 



hbase git commit: HBASE-17502 Document hadoop pre-2.6.1 and Java 1.8 Kerberos problem in our hadoop support matrix

2017-01-21 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/master fb8f9247d -> 9a9e3df85


HBASE-17502 Document hadoop pre-2.6.1 and Java 1.8 Kerberos problem in our 
hadoop support matrix


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9a9e3df8
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9a9e3df8
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9a9e3df8

Branch: refs/heads/master
Commit: 9a9e3df8565a8ba36779a442907a38772ab59a72
Parents: fb8f924
Author: Jerry He 
Authored: Sat Jan 21 13:24:33 2017 -0800
Committer: Jerry He 
Committed: Sat Jan 21 13:24:33 2017 -0800

--
 src/main/asciidoc/_chapters/configuration.adoc | 9 +
 1 file changed, 9 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/9a9e3df8/src/main/asciidoc/_chapters/configuration.adoc
--
diff --git a/src/main/asciidoc/_chapters/configuration.adoc 
b/src/main/asciidoc/_chapters/configuration.adoc
index baa4d4c..d189c9f 100644
--- a/src/main/asciidoc/_chapters/configuration.adoc
+++ b/src/main/asciidoc/_chapters/configuration.adoc
@@ -243,6 +243,15 @@ Use the following legend to interpret this table:
 |Hadoop-2.7.1+ | NT | NT | NT | NT | S | S | S
 |===
 
+.Hadoop Pre-2.6.1 and JDK 1.8 Kerberos
+[TIP]
+
+When using pre-2.6.1 Hadoop versions and JDK 1.8 in a Kerberos environment, 
HBase server can fail
+and abort due to Kerberos keytab relogin error. Late version of JDK 1.7 
(1.7.0_80) has the problem too.
+Refer to link:https://issues.apache.org/jira/browse/HADOOP-10786[HADOOP-10786] 
for additional details.
+Consider upgrading to Hadoop 2.6.1+ in this case.
+
+
 .Hadoop 2.6.x
 [TIP]
 



hbase git commit: HBASE-17581 mvn clean test -PskipXXXTests does not work properly for some modules (Yi Liang)

2017-02-02 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/master f8b1f57b0 -> bc168b419


HBASE-17581 mvn clean test -PskipXXXTests does not work properly for some 
modules (Yi Liang)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/bc168b41
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/bc168b41
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/bc168b41

Branch: refs/heads/master
Commit: bc168b419d20e64512086651eaa9de87d0a4a4ce
Parents: f8b1f57
Author: Jerry He 
Authored: Thu Feb 2 11:05:17 2017 -0800
Committer: Jerry He 
Committed: Thu Feb 2 11:05:17 2017 -0800

--
 hbase-client/pom.xml | 1 +
 hbase-common/pom.xml | 1 +
 hbase-endpoint/pom.xml   | 1 +
 hbase-hadoop-compat/pom.xml  | 1 +
 hbase-hadoop2-compat/pom.xml | 1 +
 hbase-metrics-api/pom.xml| 1 +
 hbase-metrics/pom.xml| 1 +
 hbase-prefix-tree/pom.xml| 1 +
 hbase-protocol/pom.xml   | 1 +
 hbase-rsgroup/pom.xml| 1 +
 10 files changed, 10 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/bc168b41/hbase-client/pom.xml
--
diff --git a/hbase-client/pom.xml b/hbase-client/pom.xml
index b7816f6..d76049f 100644
--- a/hbase-client/pom.xml
+++ b/hbase-client/pom.xml
@@ -212,6 +212,7 @@
   
   
 true
+true
   
 
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/bc168b41/hbase-common/pom.xml
--
diff --git a/hbase-common/pom.xml b/hbase-common/pom.xml
index cf2772e..baabe56 100644
--- a/hbase-common/pom.xml
+++ b/hbase-common/pom.xml
@@ -309,6 +309,7 @@
   
   
 true
+true
   
 
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/bc168b41/hbase-endpoint/pom.xml
--
diff --git a/hbase-endpoint/pom.xml b/hbase-endpoint/pom.xml
index 4ba3936..6601d4b 100644
--- a/hbase-endpoint/pom.xml
+++ b/hbase-endpoint/pom.xml
@@ -162,6 +162,7 @@
 
 
 true
+true
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/bc168b41/hbase-hadoop-compat/pom.xml
--
diff --git a/hbase-hadoop-compat/pom.xml b/hbase-hadoop-compat/pom.xml
index 7fee08c..ebaf073 100644
--- a/hbase-hadoop-compat/pom.xml
+++ b/hbase-hadoop-compat/pom.xml
@@ -123,6 +123,7 @@
 
 
 true
+true
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/bc168b41/hbase-hadoop2-compat/pom.xml
--
diff --git a/hbase-hadoop2-compat/pom.xml b/hbase-hadoop2-compat/pom.xml
index 560b66d..c9705dd 100644
--- a/hbase-hadoop2-compat/pom.xml
+++ b/hbase-hadoop2-compat/pom.xml
@@ -198,6 +198,7 @@ limitations under the License.
 
 
 true
+true
 
 
   

http://git-wip-us.apache.org/repos/asf/hbase/blob/bc168b41/hbase-metrics-api/pom.xml
--
diff --git a/hbase-metrics-api/pom.xml b/hbase-metrics-api/pom.xml
index b5466e9..4900b8f 100644
--- a/hbase-metrics-api/pom.xml
+++ b/hbase-metrics-api/pom.xml
@@ -106,6 +106,7 @@
   
   
 true
+true
   
 
   

http://git-wip-us.apache.org/repos/asf/hbase/blob/bc168b41/hbase-metrics/pom.xml
--
diff --git a/hbase-metrics/pom.xml b/hbase-metrics/pom.xml
index 2ff9470..e2c9781 100644
--- a/hbase-metrics/pom.xml
+++ b/hbase-metrics/pom.xml
@@ -130,6 +130,7 @@
   
   
 true
+true
   
 
   

http://git-wip-us.apache.org/repos/asf/hbase/blob/bc168b41/hbase-prefix-tree/pom.xml
--
diff --git a/hbase-prefix-tree/pom.xml b/hbase-prefix-tree/pom.xml
index 6dcb9a9..73c3c5b 100644
--- a/hbase-prefix-tree/pom.xml
+++ b/hbase-prefix-tree/pom.xml
@@ -134,6 +134,7 @@
   
   
 true
+true
   
 
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/bc168b41/hbase-protocol/pom.xml
--
diff --git a/hbase-protocol/pom.xml b/hbase-protocol/pom.xml
index 3924172..794c6ae 100644
--- a/hbase-protocol/pom.xml
+++ b/hbase-protocol/pom.xml
@@ -139,6 +139,7 @@
 
 
 true
+true
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/bc168b41/hbase-rsgroup/pom.xml
-

hbase git commit: HBASE-15834 Correct Bloom filter documentation in section 96.4 of Reference Guide

2016-05-17 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/master 2c22e2003 -> 98fa263b5


HBASE-15834 Correct Bloom filter documentation in section 96.4 of Reference 
Guide


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/98fa263b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/98fa263b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/98fa263b

Branch: refs/heads/master
Commit: 98fa263b514cc12a418751e889cb58791a5fdb04
Parents: 2c22e20
Author: Jerry He 
Authored: Tue May 17 14:39:05 2016 -0700
Committer: Jerry He 
Committed: Tue May 17 14:39:05 2016 -0700

--
 src/main/asciidoc/_chapters/performance.adoc | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/98fa263b/src/main/asciidoc/_chapters/performance.adoc
--
diff --git a/src/main/asciidoc/_chapters/performance.adoc 
b/src/main/asciidoc/_chapters/performance.adoc
index efb6ace..a0c00ae 100644
--- a/src/main/asciidoc/_chapters/performance.adoc
+++ b/src/main/asciidoc/_chapters/performance.adoc
@@ -338,7 +338,7 @@ HBase includes some tuning mechanisms for folding the Bloom 
filter to reduce the
 
 Bloom filters were introduced in 
link:https://issues.apache.org/jira/browse/HBASE-1200[HBASE-1200].
 Since HBase 0.96, row-based Bloom filters are enabled by default.
-(link:https://issues.apache.org/jira/browse/HBASE-8450[HBASE-])
+(link:https://issues.apache.org/jira/browse/HBASE-8450[HBASE-8450])
 
 For more information on Bloom filters in relation to HBase, see <> for 
more information, or the following Quora discussion: 
link:http://www.quora.com/How-are-bloom-filters-used-in-HBase[How are bloom 
filters used in HBase?].
 



hbase git commit: HBASE-15841 Performance Evaluation tool total rows may not be set correctly

2016-05-18 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/master c1ada0a37 -> 393bcd69f


HBASE-15841 Performance Evaluation tool total rows may not be set correctly


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/393bcd69
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/393bcd69
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/393bcd69

Branch: refs/heads/master
Commit: 393bcd69f9a6088e2af7751619f973a43671b7aa
Parents: c1ada0a
Author: Jerry He 
Authored: Wed May 18 12:09:13 2016 -0700
Committer: Jerry He 
Committed: Wed May 18 12:09:13 2016 -0700

--
 .../test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java  | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/393bcd69/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
index 02b994a..c85607d 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
@@ -2143,8 +2143,7 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
   // total size in GB specified
   opts.totalRows = (int) opts.size * rowsPerGB;
   opts.perClientRunRows = opts.totalRows / opts.numClientThreads;
-} else if (opts.perClientRunRows != DEFAULT_OPTS.perClientRunRows) {
-  // number of rows specified
+} else {
   opts.totalRows = opts.perClientRunRows * opts.numClientThreads;
   opts.size = opts.totalRows / rowsPerGB;
 }



hbase git commit: HBASE-15841 Performance Evaluation tool total rows may not be set correctly

2016-05-18 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/branch-1 49359a4ee -> a9972355c


HBASE-15841 Performance Evaluation tool total rows may not be set correctly


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a9972355
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a9972355
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a9972355

Branch: refs/heads/branch-1
Commit: a9972355c4c374f9350be37249b2d7acf84fcf5f
Parents: 49359a4
Author: Jerry He 
Authored: Wed May 18 12:15:18 2016 -0700
Committer: Jerry He 
Committed: Wed May 18 12:15:18 2016 -0700

--
 .../test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java  | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a9972355/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
index 7dced80..12f5073 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
@@ -2074,8 +2074,7 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
   // total size in GB specified
   opts.totalRows = (int) opts.size * rowsPerGB;
   opts.perClientRunRows = opts.totalRows / opts.numClientThreads;
-} else if (opts.perClientRunRows != DEFAULT_OPTS.perClientRunRows) {
-  // number of rows specified
+} else {
   opts.totalRows = opts.perClientRunRows * opts.numClientThreads;
   opts.size = opts.totalRows / rowsPerGB;
 }



hbase git commit: HBASE-15841 Performance Evaluation tool total rows may not be set correctly

2016-05-18 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 8566fd0f1 -> 4b1221398


HBASE-15841 Performance Evaluation tool total rows may not be set correctly


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4b122139
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4b122139
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4b122139

Branch: refs/heads/branch-1.3
Commit: 4b1221398b18aaf749d522fb4f7b1594a7468aad
Parents: 8566fd0
Author: Jerry He 
Authored: Wed May 18 12:15:18 2016 -0700
Committer: Jerry He 
Committed: Wed May 18 12:18:20 2016 -0700

--
 .../test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java  | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/4b122139/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
index 7dced80..12f5073 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
@@ -2074,8 +2074,7 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
   // total size in GB specified
   opts.totalRows = (int) opts.size * rowsPerGB;
   opts.perClientRunRows = opts.totalRows / opts.numClientThreads;
-} else if (opts.perClientRunRows != DEFAULT_OPTS.perClientRunRows) {
-  // number of rows specified
+} else {
   opts.totalRows = opts.perClientRunRows * opts.numClientThreads;
   opts.size = opts.totalRows / rowsPerGB;
 }



hbase git commit: HBASE-15841 Performance Evaluation tool total rows may not be set correctly

2016-05-18 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/branch-1.2 98aa826b7 -> 58131946d


HBASE-15841 Performance Evaluation tool total rows may not be set correctly


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/58131946
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/58131946
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/58131946

Branch: refs/heads/branch-1.2
Commit: 58131946d0160fb9c8184afce929a1268939
Parents: 98aa826
Author: Jerry He 
Authored: Wed May 18 12:15:18 2016 -0700
Committer: Jerry He 
Committed: Wed May 18 12:16:18 2016 -0700

--
 .../test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java  | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/58131946/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
index 05d788e..8af2a7e 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
@@ -2084,8 +2084,7 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
   // total size in GB specified
   opts.totalRows = (int) opts.size * rowsPerGB;
   opts.perClientRunRows = opts.totalRows / opts.numClientThreads;
-} else if (opts.perClientRunRows != DEFAULT_OPTS.perClientRunRows) {
-  // number of rows specified
+} else {
   opts.totalRows = opts.perClientRunRows * opts.numClientThreads;
   opts.size = opts.totalRows / rowsPerGB;
 }



hbase git commit: HBASE-15465 userPermission returned by getUserPermission() for the selected namespace does not have namespace set (li xiang)

2016-05-19 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/master 92f5595e7 -> af5146ee0


HBASE-15465 userPermission returned by getUserPermission() for the selected 
namespace does not have namespace set (li xiang)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/af5146ee
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/af5146ee
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/af5146ee

Branch: refs/heads/master
Commit: af5146ee04ee6f8085e59d93e63a6062c7bb857b
Parents: 92f5595
Author: Jerry He 
Authored: Thu May 19 20:35:58 2016 -0700
Committer: Jerry He 
Committed: Thu May 19 20:35:58 2016 -0700

--
 .../security/access/AccessControlLists.java | 22 +---
 .../security/access/TestAccessController.java   |  4 
 2 files changed, 19 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/af5146ee/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
index f0723c2..2d98919 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
@@ -497,11 +497,19 @@ public class AccessControlLists {
 
 List perms = new ArrayList();
 
-for (Map.Entry entry : allPerms.entries()) {
-  UserPermission up = new UserPermission(Bytes.toBytes(entry.getKey()),
-  entry.getValue().getTableName(), entry.getValue().getFamily(),
-  entry.getValue().getQualifier(), entry.getValue().getActions());
-  perms.add(up);
+if(isNamespaceEntry(entryName)) {  // Namespace
+  for (Map.Entry entry : allPerms.entries()) {
+UserPermission up = new UserPermission(Bytes.toBytes(entry.getKey()),
+  entry.getValue().getNamespace(), entry.getValue().getActions());
+perms.add(up);
+  }
+} else {  // Table
+  for (Map.Entry entry : allPerms.entries()) {
+UserPermission up = new UserPermission(Bytes.toBytes(entry.getKey()),
+entry.getValue().getTableName(), entry.getValue().getFamily(),
+entry.getValue().getQualifier(), entry.getValue().getActions());
+perms.add(up);
+  }
 }
 return perms;
   }
@@ -620,11 +628,11 @@ public class AccessControlLists {
   }
 
   public static boolean isNamespaceEntry(String entryName) {
-return entryName.charAt(0) == NAMESPACE_PREFIX;
+return entryName != null && entryName.charAt(0) == NAMESPACE_PREFIX;
   }
 
   public static boolean isNamespaceEntry(byte[] entryName) {
-return entryName[0] == NAMESPACE_PREFIX;
+return entryName != null && entryName.length !=0 && entryName[0] == 
NAMESPACE_PREFIX;
   }
 
   public static String toNamespaceEntry(String namespace) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/af5146ee/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
index 92d7806..f5d7606 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
@@ -2601,6 +2601,10 @@ public class TestAccessController extends SecureTestUtil 
{
   systemUserConnection, 
AccessControlLists.toNamespaceEntry(namespace));
   assertTrue(namespacePermissions != null);
   assertTrue(namespacePermissions.size() == 1);
+  for (UserPermission namespacePermission : namespacePermissions) {
+assertFalse(namespacePermission.isGlobal());  // Verify it is not a 
global user permission
+assertEquals(namespace, namespacePermission.getNamespace());  // 
Verify namespace is set
+  }
 } catch (Throwable thw) {
   throw new HBaseException(thw);
 }



hbase git commit: HBASE-15465 userPermission returned by getUserPermission() for the selected namespace does not have namespace set (li xiang)

2016-05-19 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/branch-1 696a51d34 -> d038b762a


HBASE-15465 userPermission returned by getUserPermission() for the selected 
namespace does not have namespace set (li xiang)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d038b762
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d038b762
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d038b762

Branch: refs/heads/branch-1
Commit: d038b762a5d8e3cb895d6fbad09cbdc97fc9b408
Parents: 696a51d
Author: Jerry He 
Authored: Thu May 19 20:35:58 2016 -0700
Committer: Jerry He 
Committed: Thu May 19 20:38:24 2016 -0700

--
 .../security/access/AccessControlLists.java | 22 +---
 .../security/access/TestAccessController.java   |  4 
 2 files changed, 19 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d038b762/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
index 887af0a..6849790 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
@@ -496,11 +496,19 @@ public class AccessControlLists {
 
 List perms = new ArrayList();
 
-for (Map.Entry entry : allPerms.entries()) {
-  UserPermission up = new UserPermission(Bytes.toBytes(entry.getKey()),
-  entry.getValue().getTableName(), entry.getValue().getFamily(),
-  entry.getValue().getQualifier(), entry.getValue().getActions());
-  perms.add(up);
+if(isNamespaceEntry(entryName)) {  // Namespace
+  for (Map.Entry entry : allPerms.entries()) {
+UserPermission up = new UserPermission(Bytes.toBytes(entry.getKey()),
+  entry.getValue().getNamespace(), entry.getValue().getActions());
+perms.add(up);
+  }
+} else {  // Table
+  for (Map.Entry entry : allPerms.entries()) {
+UserPermission up = new UserPermission(Bytes.toBytes(entry.getKey()),
+entry.getValue().getTableName(), entry.getValue().getFamily(),
+entry.getValue().getQualifier(), entry.getValue().getActions());
+perms.add(up);
+  }
 }
 return perms;
   }
@@ -619,11 +627,11 @@ public class AccessControlLists {
   }
 
   public static boolean isNamespaceEntry(String entryName) {
-return entryName.charAt(0) == NAMESPACE_PREFIX;
+return entryName != null && entryName.charAt(0) == NAMESPACE_PREFIX;
   }
 
   public static boolean isNamespaceEntry(byte[] entryName) {
-return entryName[0] == NAMESPACE_PREFIX;
+return entryName != null && entryName.length !=0 && entryName[0] == 
NAMESPACE_PREFIX;
   }
 
   public static String toNamespaceEntry(String namespace) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/d038b762/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
index 173067a..a588a6c 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
@@ -2599,6 +2599,10 @@ public class TestAccessController extends SecureTestUtil 
{
   systemUserConnection, 
AccessControlLists.toNamespaceEntry(namespace));
   assertTrue(namespacePermissions != null);
   assertTrue(namespacePermissions.size() == 1);
+  for (UserPermission namespacePermission : namespacePermissions) {
+assertFalse(namespacePermission.isGlobal());  // Verify it is not a 
global user permission
+assertEquals(namespace, namespacePermission.getNamespace());  // 
Verify namespace is set
+  }
 } catch (Throwable thw) {
   throw new HBaseException(thw);
 }



hbase git commit: HBASE-15465 userPermission returned by getUserPermission() for the selected namespace does not have namespace set (li xiang)

2016-05-19 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 1eb004d88 -> be2252d08


HBASE-15465 userPermission returned by getUserPermission() for the selected 
namespace does not have namespace set (li xiang)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/be2252d0
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/be2252d0
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/be2252d0

Branch: refs/heads/branch-1.3
Commit: be2252d085539354916e8574e3c7504fd068f177
Parents: 1eb004d
Author: Jerry He 
Authored: Thu May 19 20:35:58 2016 -0700
Committer: Jerry He 
Committed: Thu May 19 20:41:38 2016 -0700

--
 .../security/access/AccessControlLists.java | 22 +---
 .../security/access/TestAccessController.java   |  4 
 2 files changed, 19 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/be2252d0/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
index 887af0a..6849790 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
@@ -496,11 +496,19 @@ public class AccessControlLists {
 
 List perms = new ArrayList();
 
-for (Map.Entry entry : allPerms.entries()) {
-  UserPermission up = new UserPermission(Bytes.toBytes(entry.getKey()),
-  entry.getValue().getTableName(), entry.getValue().getFamily(),
-  entry.getValue().getQualifier(), entry.getValue().getActions());
-  perms.add(up);
+if(isNamespaceEntry(entryName)) {  // Namespace
+  for (Map.Entry entry : allPerms.entries()) {
+UserPermission up = new UserPermission(Bytes.toBytes(entry.getKey()),
+  entry.getValue().getNamespace(), entry.getValue().getActions());
+perms.add(up);
+  }
+} else {  // Table
+  for (Map.Entry entry : allPerms.entries()) {
+UserPermission up = new UserPermission(Bytes.toBytes(entry.getKey()),
+entry.getValue().getTableName(), entry.getValue().getFamily(),
+entry.getValue().getQualifier(), entry.getValue().getActions());
+perms.add(up);
+  }
 }
 return perms;
   }
@@ -619,11 +627,11 @@ public class AccessControlLists {
   }
 
   public static boolean isNamespaceEntry(String entryName) {
-return entryName.charAt(0) == NAMESPACE_PREFIX;
+return entryName != null && entryName.charAt(0) == NAMESPACE_PREFIX;
   }
 
   public static boolean isNamespaceEntry(byte[] entryName) {
-return entryName[0] == NAMESPACE_PREFIX;
+return entryName != null && entryName.length !=0 && entryName[0] == 
NAMESPACE_PREFIX;
   }
 
   public static String toNamespaceEntry(String namespace) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/be2252d0/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
index 173067a..a588a6c 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
@@ -2599,6 +2599,10 @@ public class TestAccessController extends SecureTestUtil 
{
   systemUserConnection, 
AccessControlLists.toNamespaceEntry(namespace));
   assertTrue(namespacePermissions != null);
   assertTrue(namespacePermissions.size() == 1);
+  for (UserPermission namespacePermission : namespacePermissions) {
+assertFalse(namespacePermission.isGlobal());  // Verify it is not a 
global user permission
+assertEquals(namespace, namespacePermission.getNamespace());  // 
Verify namespace is set
+  }
 } catch (Throwable thw) {
   throw new HBaseException(thw);
 }



hbase git commit: HBASE-15465 userPermission returned by getUserPermission() for the selected namespace does not have namespace set (li xiang)

2016-05-20 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/branch-1.2 8108fc864 -> ac3cd465f


HBASE-15465 userPermission returned by getUserPermission() for the selected 
namespace does not have namespace set (li xiang)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ac3cd465
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ac3cd465
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ac3cd465

Branch: refs/heads/branch-1.2
Commit: ac3cd465fed49153607991ed7855cf282d05d425
Parents: 8108fc8
Author: Jerry He 
Authored: Thu May 19 20:35:58 2016 -0700
Committer: Jerry He 
Committed: Thu May 19 20:45:44 2016 -0700

--
 .../security/access/AccessControlLists.java | 22 +---
 .../security/access/TestAccessController.java   |  4 
 2 files changed, 19 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/ac3cd465/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
index 887af0a..6849790 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
@@ -496,11 +496,19 @@ public class AccessControlLists {
 
 List perms = new ArrayList();
 
-for (Map.Entry entry : allPerms.entries()) {
-  UserPermission up = new UserPermission(Bytes.toBytes(entry.getKey()),
-  entry.getValue().getTableName(), entry.getValue().getFamily(),
-  entry.getValue().getQualifier(), entry.getValue().getActions());
-  perms.add(up);
+if(isNamespaceEntry(entryName)) {  // Namespace
+  for (Map.Entry entry : allPerms.entries()) {
+UserPermission up = new UserPermission(Bytes.toBytes(entry.getKey()),
+  entry.getValue().getNamespace(), entry.getValue().getActions());
+perms.add(up);
+  }
+} else {  // Table
+  for (Map.Entry entry : allPerms.entries()) {
+UserPermission up = new UserPermission(Bytes.toBytes(entry.getKey()),
+entry.getValue().getTableName(), entry.getValue().getFamily(),
+entry.getValue().getQualifier(), entry.getValue().getActions());
+perms.add(up);
+  }
 }
 return perms;
   }
@@ -619,11 +627,11 @@ public class AccessControlLists {
   }
 
   public static boolean isNamespaceEntry(String entryName) {
-return entryName.charAt(0) == NAMESPACE_PREFIX;
+return entryName != null && entryName.charAt(0) == NAMESPACE_PREFIX;
   }
 
   public static boolean isNamespaceEntry(byte[] entryName) {
-return entryName[0] == NAMESPACE_PREFIX;
+return entryName != null && entryName.length !=0 && entryName[0] == 
NAMESPACE_PREFIX;
   }
 
   public static String toNamespaceEntry(String namespace) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/ac3cd465/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
index 862572b..9b2921e 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
@@ -2569,6 +2569,10 @@ public class TestAccessController extends SecureTestUtil 
{
   systemUserConnection, 
AccessControlLists.toNamespaceEntry(namespace));
   assertTrue(namespacePermissions != null);
   assertTrue(namespacePermissions.size() == 1);
+  for (UserPermission namespacePermission : namespacePermissions) {
+assertFalse(namespacePermission.isGlobal());  // Verify it is not a 
global user permission
+assertEquals(namespace, namespacePermission.getNamespace());  // 
Verify namespace is set
+  }
 } catch (Throwable thw) {
   throw new HBaseException(thw);
 }



hbase git commit: HBASE-14818 user_permission does not list namespace permissions (li xiang)

2016-05-21 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/master 0671cba65 -> 56e4b85d0


HBASE-14818 user_permission does not list namespace permissions (li xiang)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/56e4b85d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/56e4b85d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/56e4b85d

Branch: refs/heads/master
Commit: 56e4b85d067f63957a91fcf16676274542bb84ee
Parents: 0671cba
Author: Jerry He 
Authored: Sat May 21 20:06:05 2016 -0700
Committer: Jerry He 
Committed: Sat May 21 20:06:05 2016 -0700

--
 .../security/access/AccessControlClient.java| 15 ++
 .../security/access/TestAccessController.java   | 30 +---
 hbase-shell/src/main/ruby/hbase/security.rb |  4 +--
 .../main/ruby/shell/commands/user_permission.rb |  2 ++
 4 files changed, 40 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/56e4b85d/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java
index 25ac01f..79dbd05 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java
@@ -217,11 +217,16 @@ public class AccessControlClient {
 HTableDescriptor[] htds = null;
 if (tableRegex == null || tableRegex.isEmpty()) {
   permList = ProtobufUtil.getUserPermissions(controller, protocol);
-} else if (tableRegex.charAt(0) == '@') {
-  String namespace = tableRegex.substring(1);
-  permList = ProtobufUtil.getUserPermissions(controller, protocol,
-Bytes.toBytes(namespace));
-} else {
+} else if (tableRegex.charAt(0) == '@') {  // Namespaces
+  String namespaceRegex = tableRegex.substring(1);
+  for (NamespaceDescriptor nsds : admin.listNamespaceDescriptors()) {  
// Read out all namespaces
+String namespace = nsds.getName();
+if (namespace.matches(namespaceRegex)) {  // Match the given 
namespace regex?
+  permList.addAll(ProtobufUtil.getUserPermissions(controller, 
protocol,
+Bytes.toBytes(namespace)));
+}
+  }
+} else {  // Tables
   htds = admin.listTables(Pattern.compile(tableRegex), true);
   for (HTableDescriptor hd : htds) {
 permList.addAll(ProtobufUtil.getUserPermissions(controller, 
protocol,

http://git-wip-us.apache.org/repos/asf/hbase/blob/56e4b85d/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
index f5d7606..f58e24e 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
@@ -2596,19 +2596,41 @@ public class TestAccessController extends 
SecureTestUtil {
 NamespaceDescriptor desc = NamespaceDescriptor.create(namespace).build();
 createNamespace(TEST_UTIL, desc);
 grantOnNamespace(TEST_UTIL, USER_NONE.getShortName(), namespace, 
Permission.Action.READ);
+
+// Test 1: A specific namespace
+getNamespacePermissionsAndVerify(namespace, 1, namespace);
+
+// Test 2: '@.*'
+getNamespacePermissionsAndVerify(".*", 1, namespace);
+
+// Test 3: A more complex regex
+getNamespacePermissionsAndVerify("^test[a-zA-Z]*", 1, namespace);
+
+deleteNamespace(TEST_UTIL, namespace);
+  }
+
+  /**
+   * List all user permissions match the given regular expression for namespace
+   * and verify each of them.
+   * @param namespaceRegexWithoutPrefix the regualar expression for namespace, 
without NAMESPACE_PREFIX
+   * @param expectedAmount the expected amount of user permissions returned
+   * @param expectedNamespace the expected namespace of each user permission 
returned
+   * @throws HBaseException in the case of any HBase exception when accessing 
hbase:acl table
+   */
+  private void getNamespacePermissionsAndVerify(String 
namespaceRegexWithoutPrefix,
+  int expectedAmount, String expectedNamespace) throws HBaseException {
 try {
   List namespacePermissions = 
AccessControlClient

  1   2   >