[2/2] hbase git commit: HBASE-18147 POC jenkinsfile for nightly checks.

2017-07-19 Thread busbey
HBASE-18147 POC jenkinsfile for nightly checks.

* Jenkinsfile that works for all current branches.
* adds dev-support script for setting parameters for our yetus nightly 
invocation
* adds ruby tools to dockerfile
* adds rubocop to dockerfile
* adds ruby-lint to dockerfile
* adds perlcritic to dockerfile


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a60afbad
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a60afbad
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a60afbad

Branch: refs/heads/branch-1.1-HBASE-18147
Commit: a60afbad98a53a11eaefc8e9a13cdd0a884c5623
Parents: b03a5e7
Author: Sean Busbey 
Authored: Thu Jul 20 00:51:07 2017 -0500
Committer: Sean Busbey 
Committed: Thu Jul 20 00:56:08 2017 -0500

--
 dev-support/Jenkinsfile| 269 
 dev-support/docker/Dockerfile  |  29 
 dev-support/hbase_nightly_yetus.sh |  86 ++
 3 files changed, 384 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a60afbad/dev-support/Jenkinsfile
--
diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile
new file mode 100644
index 000..5ff3d82
--- /dev/null
+++ b/dev-support/Jenkinsfile
@@ -0,0 +1,269 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+pipeline {
+  agent {
+node {
+  label 'Hadoop'
+}
+  }
+  triggers {
+cron('@daily')
+  }
+  options {
+buildDiscarder(logRotator(numToKeepStr: '30'))
+timeout (time: 6, unit: 'HOURS')
+timestamps()
+  }
+  environment {
+TOOLS = "${env.WORKSPACE}/tools"
+// where we check out to across stages
+BASEDIR = "${env.WORKSPACE}/component"
+YETUS_RELEASE = '0.5.0'
+// where we'll write everything from different steps.
+OUTPUT_RELATIVE_GENERAL = 'output-general'
+OUTPUTDIR_GENERAL = "${env.WORKSPACE}/output-general"
+OUTPUT_RELATIVE_JDK7 = 'output-jdk7'
+OUTPUTDIR_JDK7 = "${env.WORKSPACE}/output-jdk7"
+OUTPUT_RELATIVE_JDK8 = 'output-jdk8'
+OUTPUTDIR_JDK8 = "${env.WORKSPACE}/output-jdk8"
+PROJECT = 'hbase'
+PROJET_PERSONALITY = 
'https://raw.githubusercontent.com/apache/hbase/master/dev-support/hbase-personality.sh'
+// This section of the docs tells folks not to use the javadoc tag. older 
branches have our old version of the check for said tag.
+AUTHOR_IGNORE_LIST = 
'src/main/asciidoc/_chapters/developer.adoc,dev-support/test-patch.sh'
+WHITESPACE_IGNORE_LIST = '.*/generated/.*'
+// output from surefire; sadly the archive function in yetus only works on 
file names.
+ARCHIVE_PATTERN_LIST = 
'TEST-*.xml,org.apache.h*-output.txt,org.apache.h*.txt'
+// These tests currently have known failures. Once they burn down to 0, 
remove from here so that new problems will cause a failure.
+TESTS_FILTER = 
'cc,checkstyle,javac,javadoc,pylint,shellcheck,whitespace,perlcritic,ruby-lint,rubocop'
+BRANCH_SPECIFIC_DOCKERFILE = "${env.BASEDIR}/dev-support/docker/Dockerfile"
+EXCLUDE_TESTS_URL = 
'https://builds.apache.org/job/HBase-Find-Flaky-Tests/lastSuccessfulBuild/artifact/excludes/'
+  }
+  parameters {
+booleanParam(name: 'USE_YETUS_PRERELEASE', defaultValue: false, 
description: '''Check to use the current HEAD of apache/yetus rather than our 
configured release.
+
+Should only be used manually when e.g. there is some non-work-aroundable 
issue in yetus we are checking a fix for.''')
+booleanParam(name: 'DEBUG', defaultValue: false, description: 'Produce a 
lot more meta-information.')
+  }
+  stages {
+stage ('yetus install') {
+  steps {
+sh  '''#!/usr/bin/env bash
+echo "Ensure we have a copy of Apache Yetus."
+if [[ true !=  "${USE_YETUS_PRERELEASE}" ]]; then
+  YETUS_DIR="${WORKSPACE}/yetus-${YETUS_RELEASE}"
+  echo "Checking for Yetus ${YETUS_RELEASE} in '${YETUS_DIR}'"
+  if [ ! -d "${YETUS_DIR}" ]; then
+echo "New download of Apache Yetus version ${YETUS_RELEASE}."
+rm 

[1/2] hbase git commit: HBASE-18147 POC jenkinsfile for nightly checks.

2017-07-19 Thread busbey
Repository: hbase
Updated Branches:
  refs/heads/branch-1.1-HBASE-18147 [created] a60afbad9
  refs/heads/branch-1.2-HBASE-18147 [created] ae1e6d26b


HBASE-18147 POC jenkinsfile for nightly checks.

* Jenkinsfile that works for all current branches.
* adds dev-support script for setting parameters for our yetus nightly 
invocation
* adds ruby tools to dockerfile
* adds rubocop to dockerfile
* adds ruby-lint to dockerfile
* adds perlcritic to dockerfile


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ae1e6d26
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ae1e6d26
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ae1e6d26

Branch: refs/heads/branch-1.2-HBASE-18147
Commit: ae1e6d26bf454cb53382053c1e0430ef50e3f0e0
Parents: 7d2175e
Author: Sean Busbey 
Authored: Thu Jul 20 00:51:07 2017 -0500
Committer: Sean Busbey 
Committed: Thu Jul 20 00:54:20 2017 -0500

--
 dev-support/Jenkinsfile| 269 
 dev-support/docker/Dockerfile  |  29 
 dev-support/hbase_nightly_yetus.sh |  86 ++
 3 files changed, 384 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/ae1e6d26/dev-support/Jenkinsfile
--
diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile
new file mode 100644
index 000..5ff3d82
--- /dev/null
+++ b/dev-support/Jenkinsfile
@@ -0,0 +1,269 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+pipeline {
+  agent {
+node {
+  label 'Hadoop'
+}
+  }
+  triggers {
+cron('@daily')
+  }
+  options {
+buildDiscarder(logRotator(numToKeepStr: '30'))
+timeout (time: 6, unit: 'HOURS')
+timestamps()
+  }
+  environment {
+TOOLS = "${env.WORKSPACE}/tools"
+// where we check out to across stages
+BASEDIR = "${env.WORKSPACE}/component"
+YETUS_RELEASE = '0.5.0'
+// where we'll write everything from different steps.
+OUTPUT_RELATIVE_GENERAL = 'output-general'
+OUTPUTDIR_GENERAL = "${env.WORKSPACE}/output-general"
+OUTPUT_RELATIVE_JDK7 = 'output-jdk7'
+OUTPUTDIR_JDK7 = "${env.WORKSPACE}/output-jdk7"
+OUTPUT_RELATIVE_JDK8 = 'output-jdk8'
+OUTPUTDIR_JDK8 = "${env.WORKSPACE}/output-jdk8"
+PROJECT = 'hbase'
+PROJET_PERSONALITY = 
'https://raw.githubusercontent.com/apache/hbase/master/dev-support/hbase-personality.sh'
+// This section of the docs tells folks not to use the javadoc tag. older 
branches have our old version of the check for said tag.
+AUTHOR_IGNORE_LIST = 
'src/main/asciidoc/_chapters/developer.adoc,dev-support/test-patch.sh'
+WHITESPACE_IGNORE_LIST = '.*/generated/.*'
+// output from surefire; sadly the archive function in yetus only works on 
file names.
+ARCHIVE_PATTERN_LIST = 
'TEST-*.xml,org.apache.h*-output.txt,org.apache.h*.txt'
+// These tests currently have known failures. Once they burn down to 0, 
remove from here so that new problems will cause a failure.
+TESTS_FILTER = 
'cc,checkstyle,javac,javadoc,pylint,shellcheck,whitespace,perlcritic,ruby-lint,rubocop'
+BRANCH_SPECIFIC_DOCKERFILE = "${env.BASEDIR}/dev-support/docker/Dockerfile"
+EXCLUDE_TESTS_URL = 
'https://builds.apache.org/job/HBase-Find-Flaky-Tests/lastSuccessfulBuild/artifact/excludes/'
+  }
+  parameters {
+booleanParam(name: 'USE_YETUS_PRERELEASE', defaultValue: false, 
description: '''Check to use the current HEAD of apache/yetus rather than our 
configured release.
+
+Should only be used manually when e.g. there is some non-work-aroundable 
issue in yetus we are checking a fix for.''')
+booleanParam(name: 'DEBUG', defaultValue: false, description: 'Produce a 
lot more meta-information.')
+  }
+  stages {
+stage ('yetus install') {
+  steps {
+sh  '''#!/usr/bin/env bash
+echo "Ensure we have a copy of Apache Yetus."
+if [[ true !=  "${USE_YETUS_PRERELEASE}" ]]; then
+  YETUS_DIR="${WORKSPACE}/yetus-${YETUS_RELEASE}"
+  echo "Checking for Yetus 

[2/6] hbase git commit: HBASE-16488 Starting namespace and quota services in master startup asynchronizely (Stephen Yuan Jiang)

2017-07-19 Thread busbey
HBASE-16488 Starting namespace and quota services in master startup 
asynchronizely (Stephen Yuan Jiang)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/af359d03
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/af359d03
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/af359d03

Branch: refs/heads/branch-1-HBASE-18147
Commit: af359d03b5e2cc798cee8ba52d2a9fcbb1022104
Parents: 5a28437
Author: Stephen Yuan Jiang 
Authored: Tue Jul 18 06:58:29 2017 -0700
Committer: Stephen Yuan Jiang 
Committed: Tue Jul 18 06:58:29 2017 -0700

--
 .../org/apache/hadoop/hbase/master/HMaster.java | 103 ---
 .../hadoop/hbase/master/MasterRpcServices.java  |   4 +-
 .../hbase/master/TableNamespaceManager.java |  17 ++-
 .../procedure/CreateNamespaceProcedure.java |  19 ++--
 .../master/procedure/CreateTableProcedure.java  |   2 +-
 .../procedure/DeleteNamespaceProcedure.java |   4 +-
 .../master/procedure/MasterProcedureEnv.java|   4 +
 .../procedure/ModifyNamespaceProcedure.java |   4 +-
 .../hbase/client/TestRollbackFromClient.java|   2 +-
 .../hadoop/hbase/master/TestMasterFailover.java |   3 +
 .../hadoop/hbase/master/TestMasterMetrics.java  |   3 +
 .../hbase/master/TestMasterNoCluster.java   |   8 --
 .../master/handler/TestCreateTableHandler.java  |   2 +-
 .../procedure/TestCreateNamespaceProcedure.java |   4 +
 .../procedure/TestMasterProcedureEvents.java|  69 -
 .../procedure/TestModifyNamespaceProcedure.java |   4 +
 .../TestRSKilledWhenInitializing.java   |  27 -
 .../hbase/regionserver/TestRegionOpen.java  |   4 +-
 .../wal/TestWALOpenAfterDNRollingStart.java |   1 +
 19 files changed, 233 insertions(+), 51 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/af359d03/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index feda7fd..98a3dfb 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -156,6 +156,7 @@ import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CompressionTest;
 import org.apache.hadoop.hbase.util.ConfigUtil;
 import org.apache.hadoop.hbase.util.EncryptionTest;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.HFileArchiveUtil;
 import org.apache.hadoop.hbase.util.HasThread;
@@ -309,6 +310,10 @@ public class HMaster extends HRegionServer implements 
MasterServices, Server {
   // initialization may have not completed yet.
   volatile boolean serviceStarted = false;
 
+  // flag set after we complete asynchorized services and master 
initialization is done,
+  private final ProcedureEvent namespaceManagerInitialized =
+  new ProcedureEvent("master namespace manager initialized");
+
   // flag set after we complete assignMeta.
   private final ProcedureEvent serverCrashProcessingEnabled =
 new ProcedureEvent("server crash processing");
@@ -874,8 +879,8 @@ public class HMaster extends HRegionServer implements 
MasterServices, Server {
 periodicDoMetricsChore = new PeriodicDoMetrics(msgInterval, this);
 getChoreService().scheduleChore(periodicDoMetricsChore);
 
-status.setStatus("Starting namespace manager");
-initNamespace();
+status.setStatus("Starting namespace manager and quota manager");
+initNamespaceAndQuotaManager();
 
 if (this.cpHost != null) {
   try {
@@ -892,11 +897,6 @@ public class HMaster extends HRegionServer implements 
MasterServices, Server {
 // Set master as 'initialized'.
 setInitialized(true);
 
-assignmentManager.checkIfShouldMoveSystemRegionAsync();
-
-status.setStatus("Starting quota manager");
-initQuotaManager();
-
 // assign the meta replicas
 Set EMPTY_SET = new HashSet();
 int numReplicas = conf.getInt(HConstants.META_REPLICAS_NUM,
@@ -928,12 +928,6 @@ public class HMaster extends HRegionServer implements 
MasterServices, Server {
 zombieDetector.interrupt();
   }
 
-  private void initQuotaManager() throws IOException {
-quotaManager = new MasterQuotaManager(this);
-this.assignmentManager.setRegionStateListener((RegionStateListener) 
quotaManager);
-quotaManager.start();
-  }
-
   /**
* Create a {@link ServerManager} instance.
* @param master
@@ -1080,10 +1074,60 @@ public class HMaster extends HRegionServer implements 
MasterServices, Server {
 

[1/6] hbase git commit: HBASE-18377 Error handling for FileNotFoundException should consider RemoteException in openReader() [Forced Update!]

2017-07-19 Thread busbey
Repository: hbase
Updated Branches:
  refs/heads/branch-1-HBASE-18147 7a41eaeda -> 4a2c0c38f (forced update)


HBASE-18377 Error handling for FileNotFoundException should consider 
RemoteException in openReader()


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5a28437a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5a28437a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5a28437a

Branch: refs/heads/branch-1-HBASE-18147
Commit: 5a28437a74f5569715de79061426165151f57e79
Parents: 1b8fb0a
Author: tedyu 
Authored: Tue Jul 18 06:50:02 2017 -0700
Committer: tedyu 
Committed: Tue Jul 18 06:50:02 2017 -0700

--
 .../regionserver/WALEntryStream.java| 22 +---
 1 file changed, 15 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5a28437a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryStream.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryStream.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryStream.java
index c4d552c..4f49955 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryStream.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryStream.java
@@ -41,6 +41,7 @@ import 
org.apache.hadoop.hbase.util.LeaseNotRecoveredException;
 import org.apache.hadoop.hbase.wal.WAL.Entry;
 import org.apache.hadoop.hbase.wal.WAL.Reader;
 import org.apache.hadoop.hbase.wal.WALFactory;
+import org.apache.hadoop.ipc.RemoteException;
 
 /**
  * Streaming access to WAL entries. This class is given a queue of WAL {@link 
Path}, and continually
@@ -316,6 +317,15 @@ public class WALEntryStream implements Iterator, 
Closeable, Iterable, 
Closeable, Iterable

[6/6] hbase git commit: HBASE-18147 POC jenkinsfile for nightly checks.

2017-07-19 Thread busbey
HBASE-18147 POC jenkinsfile for nightly checks.

* Jenkinsfile that works for all current branches.
* adds dev-support script for setting parameters for our yetus nightly 
invocation
* adds ruby tools to dockerfile
* adds rubocop to dockerfile
* adds ruby-lint to dockerfile
* adds perlcritic to dockerfile


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4a2c0c38
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4a2c0c38
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4a2c0c38

Branch: refs/heads/branch-1-HBASE-18147
Commit: 4a2c0c38f7f848dfdcdd20b31dfcdf5d03f2ae28
Parents: cfd5b6b
Author: Sean Busbey 
Authored: Thu Jul 20 00:51:07 2017 -0500
Committer: Sean Busbey 
Committed: Thu Jul 20 00:51:07 2017 -0500

--
 dev-support/Jenkinsfile| 269 
 dev-support/docker/Dockerfile  |  29 
 dev-support/hbase_nightly_yetus.sh |  86 ++
 3 files changed, 384 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/4a2c0c38/dev-support/Jenkinsfile
--
diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile
new file mode 100644
index 000..5ff3d82
--- /dev/null
+++ b/dev-support/Jenkinsfile
@@ -0,0 +1,269 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+pipeline {
+  agent {
+node {
+  label 'Hadoop'
+}
+  }
+  triggers {
+cron('@daily')
+  }
+  options {
+buildDiscarder(logRotator(numToKeepStr: '30'))
+timeout (time: 6, unit: 'HOURS')
+timestamps()
+  }
+  environment {
+TOOLS = "${env.WORKSPACE}/tools"
+// where we check out to across stages
+BASEDIR = "${env.WORKSPACE}/component"
+YETUS_RELEASE = '0.5.0'
+// where we'll write everything from different steps.
+OUTPUT_RELATIVE_GENERAL = 'output-general'
+OUTPUTDIR_GENERAL = "${env.WORKSPACE}/output-general"
+OUTPUT_RELATIVE_JDK7 = 'output-jdk7'
+OUTPUTDIR_JDK7 = "${env.WORKSPACE}/output-jdk7"
+OUTPUT_RELATIVE_JDK8 = 'output-jdk8'
+OUTPUTDIR_JDK8 = "${env.WORKSPACE}/output-jdk8"
+PROJECT = 'hbase'
+PROJET_PERSONALITY = 
'https://raw.githubusercontent.com/apache/hbase/master/dev-support/hbase-personality.sh'
+// This section of the docs tells folks not to use the javadoc tag. older 
branches have our old version of the check for said tag.
+AUTHOR_IGNORE_LIST = 
'src/main/asciidoc/_chapters/developer.adoc,dev-support/test-patch.sh'
+WHITESPACE_IGNORE_LIST = '.*/generated/.*'
+// output from surefire; sadly the archive function in yetus only works on 
file names.
+ARCHIVE_PATTERN_LIST = 
'TEST-*.xml,org.apache.h*-output.txt,org.apache.h*.txt'
+// These tests currently have known failures. Once they burn down to 0, 
remove from here so that new problems will cause a failure.
+TESTS_FILTER = 
'cc,checkstyle,javac,javadoc,pylint,shellcheck,whitespace,perlcritic,ruby-lint,rubocop'
+BRANCH_SPECIFIC_DOCKERFILE = "${env.BASEDIR}/dev-support/docker/Dockerfile"
+EXCLUDE_TESTS_URL = 
'https://builds.apache.org/job/HBase-Find-Flaky-Tests/lastSuccessfulBuild/artifact/excludes/'
+  }
+  parameters {
+booleanParam(name: 'USE_YETUS_PRERELEASE', defaultValue: false, 
description: '''Check to use the current HEAD of apache/yetus rather than our 
configured release.
+
+Should only be used manually when e.g. there is some non-work-aroundable 
issue in yetus we are checking a fix for.''')
+booleanParam(name: 'DEBUG', defaultValue: false, description: 'Produce a 
lot more meta-information.')
+  }
+  stages {
+stage ('yetus install') {
+  steps {
+sh  '''#!/usr/bin/env bash
+echo "Ensure we have a copy of Apache Yetus."
+if [[ true !=  "${USE_YETUS_PRERELEASE}" ]]; then
+  YETUS_DIR="${WORKSPACE}/yetus-${YETUS_RELEASE}"
+  echo "Checking for Yetus ${YETUS_RELEASE} in '${YETUS_DIR}'"
+  if [ ! -d "${YETUS_DIR}" ]; then
+echo "New download of Apache Yetus version ${YETUS_RELEASE}."
+rm -rf 

[3/6] hbase git commit: HBASE-18390 Sleep too long when finding region location failed

2017-07-19 Thread busbey
HBASE-18390 Sleep too long when finding region location failed


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/98020957
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/98020957
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/98020957

Branch: refs/heads/branch-1-HBASE-18147
Commit: 980209579ba13cc1fecc1e2ce5403b38877600bf
Parents: af359d0
Author: Phil Yang 
Authored: Wed Jul 19 11:34:57 2017 +0800
Committer: Phil Yang 
Committed: Wed Jul 19 12:05:21 2017 +0800

--
 .../hadoop/hbase/client/ConnectionUtils.java| 14 --
 .../client/RegionAdminServiceCallable.java  |  9 +
 .../hbase/client/RegionServerCallable.java  |  9 +
 .../hbase/client/TestConnectionUtils.java   | 20 
 4 files changed, 2 insertions(+), 50 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/98020957/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
index 7155659..96e7788 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
@@ -74,20 +74,6 @@ public class ConnectionUtils {
 
 
   /**
-   * Adds / subs an up to 50% jitter to a pause time. Minimum is 1.
-   * @param pause the expected pause.
-   * @param jitter the jitter ratio, between 0 and 1, exclusive.
-   */
-  public static long addJitter(final long pause, final float jitter) {
-float lag = pause * (ThreadLocalRandom.current().nextFloat() - 0.5f) * 
jitter;
-long newPause = pause + (long) lag;
-if (newPause <= 0) {
-  return 1;
-}
-return newPause;
-  }
-
-  /**
* @param conn The connection for which to replace the generator.
* @param cnm Replaces the nonce generator used, for testing.
* @return old nonce generator.

http://git-wip-us.apache.org/repos/asf/hbase/blob/98020957/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java
index 675a2f9..386925e 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java
@@ -50,8 +50,6 @@ public abstract class RegionAdminServiceCallable 
implements RetryingCallable<
   protected final byte[] row;
   protected final int replicaId;
 
-  protected final static int MIN_WAIT_DEAD_SERVER = 1;
-
   public RegionAdminServiceCallable(ClusterConnection connection,
   RpcControllerFactory rpcControllerFactory, TableName tableName, byte[] 
row) {
 this(connection, rpcControllerFactory, null, tableName, row);
@@ -138,12 +136,7 @@ public abstract class RegionAdminServiceCallable 
implements RetryingCallable<
 
   @Override
   public long sleep(long pause, int tries) {
-long sleep = ConnectionUtils.getPauseTime(pause, tries);
-if (sleep < MIN_WAIT_DEAD_SERVER
-&& (location == null || 
connection.isDeadServer(location.getServerName( {
-  sleep = ConnectionUtils.addJitter(MIN_WAIT_DEAD_SERVER, 0.10f);
-}
-return sleep;
+return ConnectionUtils.getPauseTime(pause, tries);
   }
 
   public static RegionLocations getRegionLocations(

http://git-wip-us.apache.org/repos/asf/hbase/blob/98020957/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java
index b446c3f..e0b09f3 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java
@@ -51,8 +51,6 @@ public abstract class RegionServerCallable implements 
RetryingCallable {
   protected HRegionLocation location;
   private ClientService.BlockingInterface stub;
 
-  protected final static int MIN_WAIT_DEAD_SERVER = 1;
-
   /**
* @param connection Connection to use.
* @param tableName Table name to which row belongs.
@@ -134,12 +132,7 @@ public 

[4/6] hbase git commit: HBASE-18308 Eliminate the findbugs warnings for hbase-server

2017-07-19 Thread busbey
HBASE-18308 Eliminate the findbugs warnings for hbase-server


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2da5b432
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2da5b432
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2da5b432

Branch: refs/heads/branch-1-HBASE-18147
Commit: 2da5b432a18fac0438346a2bb0ccea3a0beb90fe
Parents: 9802095
Author: Chia-Ping Tsai 
Authored: Thu Jul 20 00:36:16 2017 +0800
Committer: Chia-Ping Tsai 
Committed: Thu Jul 20 00:36:16 2017 +0800

--
 .../apache/hadoop/hbase/LocalHBaseCluster.java  | 10 
 .../hadoop/hbase/constraint/Constraints.java|  4 ++--
 .../hadoop/hbase/mapreduce/JarFinder.java   |  3 +++
 .../apache/hadoop/hbase/master/DeadServer.java  |  5 
 .../hadoop/hbase/master/ServerManager.java  |  5 ++--
 .../hbase/master/balancer/BaseLoadBalancer.java |  2 ++
 .../hadoop/hbase/regionserver/HRegion.java  |  3 ---
 .../hbase/regionserver/HRegionServer.java   |  3 ++-
 .../querymatcher/ExplicitColumnTracker.java | 14 +---
 .../regionserver/ReplicationSource.java |  2 +-
 .../org/apache/hadoop/hbase/tool/Canary.java|  5 ++--
 .../apache/hadoop/hbase/wal/WALSplitter.java| 24 
 12 files changed, 40 insertions(+), 40 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/2da5b432/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java
index b98078a..42484e7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java
@@ -313,12 +313,10 @@ public class LocalHBaseCluster {
*/
   public HMaster getActiveMaster() {
 for (JVMClusterUtil.MasterThread mt : masterThreads) {
-  if (mt.getMaster().isActiveMaster()) {
-// Ensure that the current active master is not stopped.
-// We don't want to return a stopping master as an active master.
-if (mt.getMaster().isActiveMaster()  && !mt.getMaster().isStopped()) {
-  return mt.getMaster();
-}
+  // Ensure that the current active master is not stopped.
+  // We don't want to return a stopping master as an active master.
+  if (mt.getMaster().isActiveMaster() && !mt.getMaster().isStopped()) {
+return mt.getMaster();
   }
 }
 return null;

http://git-wip-us.apache.org/repos/asf/hbase/blob/2da5b432/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraints.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraints.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraints.java
index 85ef717..c96bf3d 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraints.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraints.java
@@ -616,8 +616,8 @@ public final class Constraints {
 @Override
 public int compare(Constraint c1, Constraint c2) {
   // compare the priorities of the constraints stored in their 
configuration
-  return Long.valueOf(c1.getConf().getLong(PRIORITY_KEY, DEFAULT_PRIORITY))
-  .compareTo(c2.getConf().getLong(PRIORITY_KEY, DEFAULT_PRIORITY));
+  return Long.compare(c1.getConf().getLong(PRIORITY_KEY, DEFAULT_PRIORITY),
+c2.getConf().getLong(PRIORITY_KEY, DEFAULT_PRIORITY));
 }
   };
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/2da5b432/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/JarFinder.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/JarFinder.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/JarFinder.java
index dfbe648..e0421d9 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/JarFinder.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/JarFinder.java
@@ -90,6 +90,9 @@ public class JarFinder {
   private static void zipDir(File dir, String relativePath, ZipOutputStream 
zos,
  boolean start) throws IOException {
 String[] dirList = dir.list();
+if (dirList == null) {
+  return;
+}
 for (String aDirList : dirList) {
   File f = new File(dir, aDirList);
   if (!f.isHidden()) {


[5/6] hbase git commit: HBASE-18330 NPE in ReplicationZKLockCleanerChore

2017-07-19 Thread busbey
HBASE-18330 NPE in ReplicationZKLockCleanerChore


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/cfd5b6b5
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/cfd5b6b5
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/cfd5b6b5

Branch: refs/heads/branch-1-HBASE-18147
Commit: cfd5b6b59f00eb3cbcb07a2b32fac019436c479f
Parents: 2da5b43
Author: Andrew Purtell 
Authored: Wed Jul 19 15:46:45 2017 -0700
Committer: Andrew Purtell 
Committed: Wed Jul 19 15:46:45 2017 -0700

--
 .../apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java  | 3 +++
 .../hbase/master/cleaner/ReplicationZKLockCleanerChore.java  | 4 +++-
 .../hadoop/hbase/master/cleaner/ReplicationZKNodeCleaner.java| 3 +++
 .../hadoop/hbase/replication/master/ReplicationLogCleaner.java   | 2 +-
 .../hbase/replication/regionserver/DumpReplicationQueues.java| 3 +++
 .../hbase/replication/regionserver/ReplicationSourceManager.java | 2 +-
 6 files changed, 14 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/cfd5b6b5/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
index b242ca7..c2999ec 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
@@ -528,6 +528,9 @@ public class ReplicationPeersZKImpl extends 
ReplicationStateZKBase implements Re
 if (queuesClient == null) return;
 try {
   List replicators = queuesClient.getListOfReplicators();
+  if (replicators == null || replicators.isEmpty()) {
+return;
+  }
   for (String replicator : replicators) {
 List queueIds = queuesClient.getAllQueues(replicator);
 for (String queueId : queueIds) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/cfd5b6b5/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKLockCleanerChore.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKLockCleanerChore.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKLockCleanerChore.java
index dc5338e..3fa30bf 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKLockCleanerChore.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKLockCleanerChore.java
@@ -76,7 +76,9 @@ public class ReplicationZKLockCleanerChore extends 
ScheduledChore {
   }
   Set rsSet = new HashSet(regionServers);
   List replicators = queues.getListOfReplicators();
-
+  if (replicators == null || replicators.isEmpty()) {
+return;
+  }
   for (String replicator: replicators) {
 try {
   String lockNode = queues.getLockZNode(replicator);

http://git-wip-us.apache.org/repos/asf/hbase/blob/cfd5b6b5/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKNodeCleaner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKNodeCleaner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKNodeCleaner.java
index 8311b8d..f559510 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKNodeCleaner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKNodeCleaner.java
@@ -80,6 +80,9 @@ public class ReplicationZKNodeCleaner {
 Set peerIds = new HashSet<>(this.replicationPeers.getAllPeerIds());
 try {
   List replicators = this.queuesClient.getListOfReplicators();
+  if (replicators == null || replicators.isEmpty()) {
+return undeletedQueues;
+  }
   for (String replicator : replicators) {
 List queueIds = this.queuesClient.getAllQueues(replicator);
 for (String queueId : queueIds) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/cfd5b6b5/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java
 

hbase git commit: HBASE-18147 POC jenkinsfile for nightly checks. [Forced Update!]

2017-07-19 Thread busbey
Repository: hbase
Updated Branches:
  refs/heads/HBASE-18147 8f06993df -> 7164c6fef (forced update)


HBASE-18147 POC jenkinsfile for nightly checks.

* Jenkinsfile that works for all current branches.
* adds dev-support script for setting parameters for our yetus nightly 
invocation
* adds ruby tools to dockerfile
* adds rubocop to dockerfile
* adds ruby-lint to dockerfile
* adds perlcritic to dockerfile


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7164c6fe
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7164c6fe
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7164c6fe

Branch: refs/heads/HBASE-18147
Commit: 7164c6fefe0133a52b3bce336d7a4ff2082befa3
Parents: 01db60d
Author: Sean Busbey 
Authored: Tue Jul 4 15:12:38 2017 -0400
Committer: Sean Busbey 
Committed: Thu Jul 20 00:46:16 2017 -0500

--
 dev-support/Jenkinsfile| 269 
 dev-support/docker/Dockerfile  |  29 
 dev-support/hbase_nightly_yetus.sh |  86 ++
 3 files changed, 384 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7164c6fe/dev-support/Jenkinsfile
--
diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile
new file mode 100644
index 000..5ff3d82
--- /dev/null
+++ b/dev-support/Jenkinsfile
@@ -0,0 +1,269 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+pipeline {
+  agent {
+node {
+  label 'Hadoop'
+}
+  }
+  triggers {
+cron('@daily')
+  }
+  options {
+buildDiscarder(logRotator(numToKeepStr: '30'))
+timeout (time: 6, unit: 'HOURS')
+timestamps()
+  }
+  environment {
+TOOLS = "${env.WORKSPACE}/tools"
+// where we check out to across stages
+BASEDIR = "${env.WORKSPACE}/component"
+YETUS_RELEASE = '0.5.0'
+// where we'll write everything from different steps.
+OUTPUT_RELATIVE_GENERAL = 'output-general'
+OUTPUTDIR_GENERAL = "${env.WORKSPACE}/output-general"
+OUTPUT_RELATIVE_JDK7 = 'output-jdk7'
+OUTPUTDIR_JDK7 = "${env.WORKSPACE}/output-jdk7"
+OUTPUT_RELATIVE_JDK8 = 'output-jdk8'
+OUTPUTDIR_JDK8 = "${env.WORKSPACE}/output-jdk8"
+PROJECT = 'hbase'
+PROJET_PERSONALITY = 
'https://raw.githubusercontent.com/apache/hbase/master/dev-support/hbase-personality.sh'
+// This section of the docs tells folks not to use the javadoc tag. older 
branches have our old version of the check for said tag.
+AUTHOR_IGNORE_LIST = 
'src/main/asciidoc/_chapters/developer.adoc,dev-support/test-patch.sh'
+WHITESPACE_IGNORE_LIST = '.*/generated/.*'
+// output from surefire; sadly the archive function in yetus only works on 
file names.
+ARCHIVE_PATTERN_LIST = 
'TEST-*.xml,org.apache.h*-output.txt,org.apache.h*.txt'
+// These tests currently have known failures. Once they burn down to 0, 
remove from here so that new problems will cause a failure.
+TESTS_FILTER = 
'cc,checkstyle,javac,javadoc,pylint,shellcheck,whitespace,perlcritic,ruby-lint,rubocop'
+BRANCH_SPECIFIC_DOCKERFILE = "${env.BASEDIR}/dev-support/docker/Dockerfile"
+EXCLUDE_TESTS_URL = 
'https://builds.apache.org/job/HBase-Find-Flaky-Tests/lastSuccessfulBuild/artifact/excludes/'
+  }
+  parameters {
+booleanParam(name: 'USE_YETUS_PRERELEASE', defaultValue: false, 
description: '''Check to use the current HEAD of apache/yetus rather than our 
configured release.
+
+Should only be used manually when e.g. there is some non-work-aroundable 
issue in yetus we are checking a fix for.''')
+booleanParam(name: 'DEBUG', defaultValue: false, description: 'Produce a 
lot more meta-information.')
+  }
+  stages {
+stage ('yetus install') {
+  steps {
+sh  '''#!/usr/bin/env bash
+echo "Ensure we have a copy of Apache Yetus."
+if [[ true !=  "${USE_YETUS_PRERELEASE}" ]]; then
+  YETUS_DIR="${WORKSPACE}/yetus-${YETUS_RELEASE}"
+  echo "Checking for Yetus ${YETUS_RELEASE} in '${YETUS_DIR}'"
+  if [ ! -d 

[25/26] hbase git commit: HBASE-18330 NPE in ReplicationZKLockCleanerChore

2017-07-19 Thread busbey
HBASE-18330 NPE in ReplicationZKLockCleanerChore


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/01db60d6
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/01db60d6
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/01db60d6

Branch: refs/heads/HBASE-18147
Commit: 01db60d65b9a2dff0ca001323cb77a6e4e8d6f48
Parents: 5f54e28
Author: Andrew Purtell 
Authored: Wed Jul 19 15:46:08 2017 -0700
Committer: Andrew Purtell 
Committed: Wed Jul 19 15:46:08 2017 -0700

--
 .../apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java   | 3 +++
 .../hadoop/hbase/replication/ReplicationQueuesClientZKImpl.java   | 2 +-
 .../hadoop/hbase/master/cleaner/ReplicationZKNodeCleaner.java | 3 +++
 .../hbase/replication/regionserver/DumpReplicationQueues.java | 3 +++
 4 files changed, 10 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/01db60d6/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
index 02fe2f1..751e454 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
@@ -520,6 +520,9 @@ public class ReplicationPeersZKImpl extends 
ReplicationStateZKBase implements Re
 if (queuesClient == null) return;
 try {
   List replicators = queuesClient.getListOfReplicators();
+  if (replicators == null || replicators.isEmpty()) {
+return;
+  }
   for (String replicator : replicators) {
 List queueIds = queuesClient.getAllQueues(replicator);
 for (String queueId : queueIds) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/01db60d6/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClientZKImpl.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClientZKImpl.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClientZKImpl.java
index 0504373..0115b6f 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClientZKImpl.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClientZKImpl.java
@@ -98,7 +98,7 @@ public class ReplicationQueuesClientZKImpl extends 
ReplicationStateZKBase implem
 for (int retry = 0; ; retry++) {
   int v0 = getQueuesZNodeCversion();
   List rss = getListOfReplicators();
-  if (rss == null) {
+  if (rss == null || rss.isEmpty()) {
 LOG.debug("Didn't find any region server that replicates, won't 
prevent any deletions.");
 return ImmutableSet.of();
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/01db60d6/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKNodeCleaner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKNodeCleaner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKNodeCleaner.java
index dafc4f8..6d8962e 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKNodeCleaner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKNodeCleaner.java
@@ -77,6 +77,9 @@ public class ReplicationZKNodeCleaner {
 Set peerIds = new HashSet<>(this.replicationPeers.getAllPeerIds());
 try {
   List replicators = this.queuesClient.getListOfReplicators();
+  if (replicators == null || replicators.isEmpty()) {
+return undeletedQueues;
+  }
   for (String replicator : replicators) {
 List queueIds = this.queuesClient.getAllQueues(replicator);
 for (String queueId : queueIds) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/01db60d6/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
index 2bedbfd..4bda75b 100644
--- 

[21/26] hbase git commit: HBASE-17738 BucketCache startup is slow (Ram)

2017-07-19 Thread busbey
HBASE-17738 BucketCache startup is slow (Ram)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d0e4a643
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d0e4a643
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d0e4a643

Branch: refs/heads/HBASE-18147
Commit: d0e4a643a0a1085c98485d37fb433bc8865bc0ad
Parents: f10f819
Author: Ramkrishna 
Authored: Wed Jul 19 21:51:11 2017 +0530
Committer: Ramkrishna 
Committed: Wed Jul 19 21:51:11 2017 +0530

--
 .../hadoop/hbase/util/ByteBufferArray.java  | 79 ++--
 .../hadoop/hbase/util/TestByteBufferArray.java  | 25 +++
 2 files changed, 97 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d0e4a643/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteBufferArray.java
--
diff --git 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteBufferArray.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteBufferArray.java
index 2bb820e..60f8c79 100644
--- 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteBufferArray.java
+++ 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteBufferArray.java
@@ -20,6 +20,13 @@ package org.apache.hadoop.hbase.util;
 
 import java.io.IOException;
 import java.nio.ByteBuffer;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Future;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -29,6 +36,8 @@ import org.apache.hadoop.hbase.nio.MultiByteBuff;
 import org.apache.hadoop.hbase.nio.SingleByteBuff;
 import org.apache.hadoop.util.StringUtils;
 
+import com.google.common.annotations.VisibleForTesting;
+
 /**
  * This class manages an array of ByteBuffers with a default size 4MB. These
  * buffers are sequential and could be considered as a large buffer.It supports
@@ -39,7 +48,8 @@ public final class ByteBufferArray {
   private static final Log LOG = LogFactory.getLog(ByteBufferArray.class);
 
   public static final int DEFAULT_BUFFER_SIZE = 4 * 1024 * 1024;
-  private ByteBuffer buffers[];
+  @VisibleForTesting
+  ByteBuffer buffers[];
   private int bufferSize;
   private int bufferCount;
 
@@ -62,13 +72,68 @@ public final class ByteBufferArray {
 + ", sizePerBuffer=" + StringUtils.byteDesc(bufferSize) + ", count="
 + bufferCount + ", direct=" + directByteBuffer);
 buffers = new ByteBuffer[bufferCount + 1];
-for (int i = 0; i <= bufferCount; i++) {
-  if (i < bufferCount) {
-buffers[i] = allocator.allocate(bufferSize, directByteBuffer);
-  } else {
-// always create on heap
-buffers[i] = ByteBuffer.allocate(0);
+createBuffers(directByteBuffer, allocator);
+  }
+
+  private void createBuffers(boolean directByteBuffer, ByteBufferAllocator 
allocator)
+  throws IOException {
+int threadCount = Runtime.getRuntime().availableProcessors();
+ExecutorService service = new ThreadPoolExecutor(threadCount, threadCount, 
0L,
+TimeUnit.MILLISECONDS, new LinkedBlockingQueue());
+int perThreadCount = Math.round((float) (bufferCount) / threadCount);
+int lastThreadCount = bufferCount - (perThreadCount * (threadCount - 1));
+Future[] futures = new Future[threadCount];
+try {
+  for (int i = 0; i < threadCount; i++) {
+// Last thread will have to deal with a different number of buffers
+int buffersToCreate = (i == threadCount - 1) ? lastThreadCount : 
perThreadCount;
+futures[i] = service.submit(
+  new BufferCreatorCallable(bufferSize, directByteBuffer, 
buffersToCreate, allocator));
+  }
+  int bufferIndex = 0;
+  for (Future future : futures) {
+try {
+  ByteBuffer[] buffers = future.get();
+  for (ByteBuffer buffer : buffers) {
+this.buffers[bufferIndex++] = buffer;
+  }
+} catch (InterruptedException | ExecutionException e) {
+  LOG.error("Buffer creation interrupted", e);
+  throw new IOException(e);
+}
+  }
+} finally {
+  service.shutdownNow();
+}
+// always create on heap empty dummy buffer at last
+this.buffers[bufferCount] = ByteBuffer.allocate(0);
+  }
+
+  /**
+   * A callable that creates buffers of the specified length either 
onheap/offheap using the
+   * {@link ByteBufferAllocator}
+   */
+  private static class 

[04/26] hbase git commit: HBASE-18175 Add hbase-spark integration test into hbase-spark-it

2017-07-19 Thread busbey
HBASE-18175 Add hbase-spark integration test into hbase-spark-it

* adds module hbase-spark-it
* adds test IntegrationTestSparkBulkLoad
* adds resultant jar to bin assembly

Signed-off-by: Mike Drob 
Signed-off-by: Sean Busbey 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/79a702d1
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/79a702d1
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/79a702d1

Branch: refs/heads/HBASE-18147
Commit: 79a702d1126288a0dd45ba9458265cff8e3619c6
Parents: 81ffd6a
Author: Yi Liang 
Authored: Wed Jul 12 17:12:52 2017 -0700
Committer: Sean Busbey 
Committed: Fri Jul 14 10:53:40 2017 -0500

--
 hbase-assembly/pom.xml  |   6 +
 hbase-assembly/src/main/assembly/src.xml|   1 +
 hbase-spark-it/pom.xml  | 361 ++
 .../spark/IntegrationTestSparkBulkLoad.java | 661 +++
 pom.xml |   1 +
 5 files changed, 1030 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/79a702d1/hbase-assembly/pom.xml
--
diff --git a/hbase-assembly/pom.xml b/hbase-assembly/pom.xml
index eda54cb..1699ea0 100644
--- a/hbase-assembly/pom.xml
+++ b/hbase-assembly/pom.xml
@@ -269,6 +269,12 @@
   org.apache.httpcomponents
   httpcore
 
+
+  org.apache.hbase
+  hbase-spark-it
+  ${project.version}
+  test-jar
+
   
 
   

http://git-wip-us.apache.org/repos/asf/hbase/blob/79a702d1/hbase-assembly/src/main/assembly/src.xml
--
diff --git a/hbase-assembly/src/main/assembly/src.xml 
b/hbase-assembly/src/main/assembly/src.xml
index befcce0..e5d3faf 100644
--- a/hbase-assembly/src/main/assembly/src.xml
+++ b/hbase-assembly/src/main/assembly/src.xml
@@ -56,6 +56,7 @@
 org.apache.hbase:hbase-shaded
 org.apache.hbase:hbase-shell
 org.apache.hbase:hbase-spark
+org.apache.hbase:hbase-spark-it
 org.apache.hbase:hbase-testing-util
 org.apache.hbase:hbase-thrift
   

http://git-wip-us.apache.org/repos/asf/hbase/blob/79a702d1/hbase-spark-it/pom.xml
--
diff --git a/hbase-spark-it/pom.xml b/hbase-spark-it/pom.xml
new file mode 100644
index 000..94ed27f
--- /dev/null
+++ b/hbase-spark-it/pom.xml
@@ -0,0 +1,361 @@
+
+http://maven.apache.org/POM/4.0.0; 
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance; 
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/xsd/maven-4.0.0.xsd;>
+
+  4.0.0
+  
+hbase
+org.apache.hbase
+3.0.0-SNAPSHOT
+..
+  
+
+  hbase-spark-it
+  Apache HBase - Spark Integration Tests
+  Integration and System tests for HBase
+
+
+  
+1.6.0
+2.10.4
+2.10
+
+**/Test*.java
+**/IntegrationTest*.java
+
+4g
+
+  
+  
+
+  
+
+  org.apache.maven.plugins
+  maven-site-plugin
+  
+true
+  
+
+
+
+  org.apache.maven.plugins
+  maven-source-plugin
+
+
+  
+  maven-assembly-plugin
+  
+true
+  
+
+
+  org.apache.maven.plugins
+  maven-failsafe-plugin
+  ${surefire.version}
+  
+
+  org.apache.maven.surefire
+  surefire-junit4
+  ${surefire.version}
+
+  
+  
+
+  ${integrationtest.include}
+
+
+  ${unittest.include}
+  **/*$*
+
+
${test.output.tofile}
+false
+false
+  
+  
+
+  integration-test
+  integration-test
+  
+integration-test
+  
+
+
+  verify
+  verify
+  
+verify
+  
+
+  
+
+  
+
+
+
+  
+  
+org.apache.maven.plugins
+maven-failsafe-plugin
+
+  false
+  always
+  
+  1800
+  -enableassertions -Xmx${failsafe.Xmx}
+-Djava.security.egd=file:/dev/./urandom 
-XX:+CMSClassUnloadingEnabled
+-verbose:gc -XX:+PrintCommandLineFlags  
-XX:+PrintFlagsFinal
+
+  
+  
+org.apache.maven.plugins
+maven-enforcer-plugin
+
+  
+  
+banned-jsr305
+
+ 

[07/26] hbase git commit: HBASE-18384 Add link to refguide schema section on apache blog on hbase application archetypes

2017-07-19 Thread busbey
HBASE-18384 Add link to refguide schema section on apache blog on hbase 
application archetypes


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c08db673
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c08db673
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c08db673

Branch: refs/heads/HBASE-18147
Commit: c08db6733def157fb8cc7e379307b2d577ae1550
Parents: f3a3989
Author: Michael Stack 
Authored: Fri Jul 14 22:17:13 2017 +0100
Committer: Michael Stack 
Committed: Fri Jul 14 22:17:13 2017 +0100

--
 src/main/asciidoc/_chapters/schema_design.adoc | 3 +++
 1 file changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c08db673/src/main/asciidoc/_chapters/schema_design.adoc
--
diff --git a/src/main/asciidoc/_chapters/schema_design.adoc 
b/src/main/asciidoc/_chapters/schema_design.adoc
index acfcdda..cef05f2 100644
--- a/src/main/asciidoc/_chapters/schema_design.adoc
+++ b/src/main/asciidoc/_chapters/schema_design.adoc
@@ -40,6 +40,9 @@ any quoted values by ~10 to get what works for HBase: e.g. 
where it says individ
 to go smaller if you can -- and where it says a maximum of 100 column families 
in Cloud Bigtable, think ~10 when
 modeling on HBase.
 
+See also Robert Yokota's 
link:https://blogs.apache.org/hbase/entry/hbase-application-archetypes-redux[HBase
 Application Archetypes]
+(an update on work done by other HBasers), for a helpful categorization of use 
cases that do well on top of the HBase model.
+
 
 [[schema.creation]]
 ==  Schema Creation



[23/26] hbase git commit: HBASE-18393 Fix shell noninteractive launch

2017-07-19 Thread busbey
HBASE-18393 Fix shell noninteractive launch

Signed-off-by: Sean Busbey 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/775179bc
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/775179bc
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/775179bc

Branch: refs/heads/HBASE-18147
Commit: 775179bc4a725ef7f252bcec4c37882db17dc857
Parents: 3574757
Author: Mike Drob 
Authored: Wed Jul 19 12:17:02 2017 -0500
Committer: Sean Busbey 
Committed: Wed Jul 19 13:54:55 2017 -0500

--
 bin/hirb.rb | 9 +
 1 file changed, 9 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/775179bc/bin/hirb.rb
--
diff --git a/bin/hirb.rb b/bin/hirb.rb
index d0295d6..841ab54 100644
--- a/bin/hirb.rb
+++ b/bin/hirb.rb
@@ -215,6 +215,15 @@ else
 require "irb/workspace"
 workspace = IRB::WorkSpace.new(binding())
 scanner = RubyLex.new
+
+# RubyLex claims to take an IO but really wants an InputMethod
+module IOExtensions
+  def encoding
+external_encoding
+  end
+end
+IO.include IOExtensions
+
 scanner.set_input(STDIN)
 scanner.each_top_level_statement do |statement, linenum|
puts(workspace.evaluate(nil, statement, 'stdin', linenum))



[13/26] hbase git commit: HBASE-18377 Error handling for FileNotFoundException should consider RemoteException in openReader()

2017-07-19 Thread busbey
HBASE-18377 Error handling for FileNotFoundException should consider 
RemoteException in openReader()


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0c2915b4
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0c2915b4
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0c2915b4

Branch: refs/heads/HBASE-18147
Commit: 0c2915b48e157724cefee9f0dbe069ce3f04d0d4
Parents: c423dc7
Author: tedyu 
Authored: Mon Jul 17 20:24:29 2017 -0700
Committer: tedyu 
Committed: Mon Jul 17 20:24:29 2017 -0700

--
 .../regionserver/WALEntryStream.java| 22 +---
 1 file changed, 15 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/0c2915b4/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryStream.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryStream.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryStream.java
index c4d552c..4f49955 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryStream.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntryStream.java
@@ -41,6 +41,7 @@ import 
org.apache.hadoop.hbase.util.LeaseNotRecoveredException;
 import org.apache.hadoop.hbase.wal.WAL.Entry;
 import org.apache.hadoop.hbase.wal.WAL.Reader;
 import org.apache.hadoop.hbase.wal.WALFactory;
+import org.apache.hadoop.ipc.RemoteException;
 
 /**
  * Streaming access to WAL entries. This class is given a queue of WAL {@link 
Path}, and continually
@@ -316,6 +317,15 @@ public class WALEntryStream implements Iterator, 
Closeable, Iterable, 
Closeable, Iterable

[20/26] hbase git commit: HBASE-16312 update jquery version

2017-07-19 Thread busbey
HBASE-16312 update jquery version

Upgrade jquery from 1.8.3 to 3.2.1 in hbase-server and hbase-thrift modules

Change-Id: I92d479e9802d954f607ba409077bc98581e9e5ca

Signed-off-by: Michael Stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f10f8198
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f10f8198
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f10f8198

Branch: refs/heads/HBASE-18147
Commit: f10f8198afed36a40faa459fe4aa4646838832af
Parents: 6b7ebc0
Author: Peter Somogyi 
Authored: Mon Jul 10 13:27:12 2017 +0200
Committer: Michael Stack 
Committed: Wed Jul 19 11:44:31 2017 +0100

--
 hbase-rest/src/main/resources/hbase-webapps/rest/rest.jsp  | 2 --
 .../org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon | 2 --
 .../apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon   | 2 --
 .../src/main/resources/hbase-webapps/master/procedures.jsp | 2 --
 .../src/main/resources/hbase-webapps/master/processMaster.jsp  | 2 --
 .../src/main/resources/hbase-webapps/master/processRS.jsp  | 2 --
 .../src/main/resources/hbase-webapps/master/snapshot.jsp   | 2 --
 .../src/main/resources/hbase-webapps/master/snapshotsStats.jsp | 2 --
 hbase-server/src/main/resources/hbase-webapps/master/table.jsp | 2 --
 .../src/main/resources/hbase-webapps/master/tablesDetailed.jsp | 2 --
 hbase-server/src/main/resources/hbase-webapps/master/zk.jsp| 2 --
 .../main/resources/hbase-webapps/regionserver/processRS.jsp| 2 --
 .../src/main/resources/hbase-webapps/regionserver/region.jsp   | 2 --
 .../main/resources/hbase-webapps/regionserver/storeFile.jsp| 2 --
 .../src/main/resources/hbase-webapps/static/js/jquery.min.js   | 6 --
 .../src/main/resources/hbase-webapps/thrift/thrift.jsp | 2 --
 .../src/main/resources/hbase-webapps/static/js/jquery.min.js   | 6 --
 .../src/main/resources/hbase-webapps/thrift/thrift.jsp | 2 --
 18 files changed, 8 insertions(+), 36 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/f10f8198/hbase-rest/src/main/resources/hbase-webapps/rest/rest.jsp
--
diff --git a/hbase-rest/src/main/resources/hbase-webapps/rest/rest.jsp 
b/hbase-rest/src/main/resources/hbase-webapps/rest/rest.jsp
index e002749..ba3c027 100644
--- a/hbase-rest/src/main/resources/hbase-webapps/rest/rest.jsp
+++ b/hbase-rest/src/main/resources/hbase-webapps/rest/rest.jsp
@@ -27,9 +27,7 @@ Configuration conf = 
(Configuration)getServletContext().getAttribute("hbase.conf
 long startcode = conf.getLong("startcode", System.currentTimeMillis());
 String listenPort = conf.get("hbase.rest.port", "8080");
 %>
-
 
 
   

http://git-wip-us.apache.org/repos/asf/hbase/blob/f10f8198/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
--
diff --git 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
index 708e72d..bf3b623 100644
--- 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
+++ 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
@@ -93,9 +93,7 @@ AssignmentManager assignmentManager = 
master.getAssignmentManager();
   }
 
 
-
 
 
   

http://git-wip-us.apache.org/repos/asf/hbase/blob/f10f8198/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon
--
diff --git 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon
 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon
index 7219c0a..61795e0 100644
--- 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon
+++ 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon
@@ -50,9 +50,7 @@ org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
 : masterAddressTracker.getMasterAddress();
   int infoPort = masterAddressTracker == null ? 0 : 
masterAddressTracker.getMasterInfoPort();
 
-
 
 
   

http://git-wip-us.apache.org/repos/asf/hbase/blob/f10f8198/hbase-server/src/main/resources/hbase-webapps/master/procedures.jsp
--
diff --git 
a/hbase-server/src/main/resources/hbase-webapps/master/procedures.jsp 
b/hbase-server/src/main/resources/hbase-webapps/master/procedures.jsp
index 00d8ead..29c3d45 100644
--- 

[14/26] hbase git commit: HBASE-18392 Add default value of --movetimeout to rolling-restart.sh

2017-07-19 Thread busbey
HBASE-18392 Add default value of --movetimeout to rolling-restart.sh

Signed-off-by: tedyu 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/56d00f57
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/56d00f57
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/56d00f57

Branch: refs/heads/HBASE-18147
Commit: 56d00f574b391fa58ba5b73718f2a5b29171bd07
Parents: 0c2915b4
Author: Samir Ahmic 
Authored: Mon Jul 17 22:57:02 2017 +0200
Committer: tedyu 
Committed: Tue Jul 18 07:04:29 2017 -0700

--
 bin/rolling-restart.sh | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/56d00f57/bin/rolling-restart.sh
--
diff --git a/bin/rolling-restart.sh b/bin/rolling-restart.sh
index 11c091d..46d5cba 100755
--- a/bin/rolling-restart.sh
+++ b/bin/rolling-restart.sh
@@ -60,6 +60,7 @@ RR_RS=1
 RR_MASTER=1
 RR_GRACEFUL=0
 RR_MAXTHREADS=1
+RR_MOVE_TIMEOUT=2147483647
 START_CMD_NON_DIST_MODE=restart
 START_CMD_DIST_MODE=start
 RESTART_CMD_REGIONSERVER=restart



[03/26] hbase git commit: HBASE-18342 Add coprocessor service support for async admin

2017-07-19 Thread busbey
HBASE-18342 Add coprocessor service support for async admin


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/81ffd6a1
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/81ffd6a1
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/81ffd6a1

Branch: refs/heads/HBASE-18147
Commit: 81ffd6a13e866e920de6705ba12aa59a56115c60
Parents: 9e0f450
Author: Guanghao Zhang 
Authored: Mon Jul 10 09:25:47 2017 +0800
Committer: Guanghao Zhang 
Committed: Fri Jul 14 11:53:52 2017 +0800

--
 .../apache/hadoop/hbase/client/AsyncAdmin.java  |  49 ++
 .../hadoop/hbase/client/AsyncHBaseAdmin.java|  16 ++
 .../client/AsyncRpcRetryingCallerFactory.java   |  63 ++-
 .../AsyncServerRequestRpcRetryingCaller.java|  79 +
 .../client/MasterCoprocessorRpcChannelImpl.java |  86 ++
 .../hadoop/hbase/client/RawAsyncHBaseAdmin.java |  52 +-
 .../RegionServerCoprocessorRpcChannelImpl.java  |  86 ++
 .../TestAsyncCoprocessorEndpoint.java   | 167 +++
 8 files changed, 596 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/81ffd6a1/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
index 7d904b3..1adf353 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
@@ -23,6 +23,7 @@ import java.util.Map;
 import java.util.Optional;
 import java.util.Set;
 import java.util.concurrent.CompletableFuture;
+import java.util.function.Function;
 import java.util.regex.Pattern;
 
 import org.apache.hadoop.hbase.ClusterStatus;
@@ -36,12 +37,15 @@ import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.procedure2.LockInfo;
 import org.apache.hadoop.hbase.quotas.QuotaFilter;
 import org.apache.hadoop.hbase.quotas.QuotaSettings;
+import org.apache.hadoop.hbase.client.RawAsyncTable.CoprocessorCallable;
 import org.apache.hadoop.hbase.client.replication.TableCFs;
 import org.apache.hadoop.hbase.client.security.SecurityCapability;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
 import org.apache.hadoop.hbase.util.Pair;
 
+import com.google.protobuf.RpcChannel;
+
 /**
  * The asynchronous administrative API for HBase.
  * 
@@ -1060,4 +1064,49 @@ public interface AsyncAdmin {
* {@link CompletableFuture}
*/
   CompletableFuture runCatalogJanitor();
+
+  /**
+   * Execute the given coprocessor call on the master.
+   * 
+   * The {@code stubMaker} is just a delegation to the {@code newStub} call. 
Usually it is only a
+   * one line lambda expression, like:
+   *
+   * 
+   * 
+   * channel -> xxxService.newStub(channel)
+   * 
+   * 
+   * @param stubMaker a delegation to the actual {@code newStub} call.
+   * @param callable a delegation to the actual protobuf rpc call. See the 
comment of
+   *  {@link CoprocessorCallable} for more details.
+   * @param  the type of the asynchronous stub
+   * @param  the type of the return value
+   * @return the return value of the protobuf rpc call, wrapped by a {@link 
CompletableFuture}.
+   * @see CoprocessorCallable
+   */
+   CompletableFuture coprocessorService(Function 
stubMaker,
+  CoprocessorCallable callable);
+
+  /**
+   * Execute the given coprocessor call on the given region server.
+   * 
+   * The {@code stubMaker} is just a delegation to the {@code newStub} call. 
Usually it is only a
+   * one line lambda expression, like:
+   *
+   * 
+   * 
+   * channel -> xxxService.newStub(channel)
+   * 
+   * 
+   * @param stubMaker a delegation to the actual {@code newStub} call.
+   * @param callable a delegation to the actual protobuf rpc call. See the 
comment of
+   *  {@link CoprocessorCallable} for more details.
+   * @param serverName the given region server
+   * @param  the type of the asynchronous stub
+   * @param  the type of the return value
+   * @return the return value of the protobuf rpc call, wrapped by a {@link 
CompletableFuture}.
+   * @see CoprocessorCallable
+   */
+   CompletableFuture coprocessorService(Function 
stubMaker,
+CoprocessorCallable callable, ServerName serverName);
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/81ffd6a1/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java

[10/26] hbase git commit: HBASE-18366 Fix flaky test TestServerCrashProcedure#testRecoveryAndDoubleExecutionOnRsWithMeta (Umesh Agashe) Disabled for now. Will be back here when a more fundamental fix.

2017-07-19 Thread busbey
HBASE-18366 Fix flaky test 
TestServerCrashProcedure#testRecoveryAndDoubleExecutionOnRsWithMeta (Umesh 
Agashe)
Disabled for now. Will be back here when a more fundamental fix.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a9352fe9
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a9352fe9
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a9352fe9

Branch: refs/heads/HBASE-18147
Commit: a9352fe9564f57658f918ef63ac4a5ebdc17fb45
Parents: 353627b
Author: Michael Stack 
Authored: Fri Jul 14 22:39:53 2017 +0100
Committer: Michael Stack 
Committed: Fri Jul 14 22:41:36 2017 +0100

--
 .../hadoop/hbase/master/procedure/TestServerCrashProcedure.java| 2 ++
 1 file changed, 2 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a9352fe9/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestServerCrashProcedure.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestServerCrashProcedure.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestServerCrashProcedure.java
index e2d894f..b6bf0bb 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestServerCrashProcedure.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestServerCrashProcedure.java
@@ -40,6 +40,7 @@ import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
+import org.junit.Ignore;
 import org.junit.experimental.categories.Category;
 
 @Category({MasterTests.class, LargeTests.class})
@@ -85,6 +86,7 @@ public class TestServerCrashProcedure {
   public void testCrashTargetRs() throws Exception {
   }
 
+  @Ignore  // HBASE-18366... To be enabled again.
   @Test(timeout=6)
   public void testRecoveryAndDoubleExecutionOnRsWithMeta() throws Exception {
 testRecoveryAndDoubleExecution(true);



[17/26] hbase git commit: HBASE-16312 update jquery version

2017-07-19 Thread busbey
http://git-wip-us.apache.org/repos/asf/hbase/blob/f10f8198/hbase-thrift/src/main/resources/hbase-webapps/static/js/jquery.min.js
--
diff --git 
a/hbase-thrift/src/main/resources/hbase-webapps/static/js/jquery.min.js 
b/hbase-thrift/src/main/resources/hbase-webapps/static/js/jquery.min.js
index 3883779..644d35e 100644
--- a/hbase-thrift/src/main/resources/hbase-webapps/static/js/jquery.min.js
+++ b/hbase-thrift/src/main/resources/hbase-webapps/static/js/jquery.min.js
@@ -1,2 +1,4 @@
-/*! jQuery v1.8.3 jquery.com | jquery.org/license */
-(function(e,t){function _(e){var t=M[e]={};return 
v.each(e.split(y),function(e,n){t[n]=!0}),t}function 
H(e,n,r){if(r===t&===1){var 
i="data-"+n.replace(P,"-$1").toLowerCase();r=e.getAttribute(i);if(typeof 
r=="string"){try{r=r==="true"?!0:r==="false"?!1:r==="null"?null:+r+""===r?+r:D.test(r)?v.parseJSON(r):r}catch(s){}v.data(e,n,r)}else
 r=t}return r}function B(e){var t;for(t in 
e){if(t==="data"&(e[t]))continue;if(t!=="toJSON")return!1}return!0}function
 et(){return!1}function tt(){return!0}function 
ut(e){return!e||!e.parentNode||e.parentNode.nodeType===11}function at(e,t){do 
e=e[t];while(e&!==1);return e}function 
ft(e,t,n){t=t||0;if(v.isFunction(t))return v.grep(e,function(e,r){var 
i=!!t.call(e,r,e);return i===n});if(t.nodeType)return 
v.grep(e,function(e,r){return e===t===n});if(typeof t=="string"){var 
r=v.grep(e,function(e){return e.nodeType===1});if(it.test(t))return 
v.filter(t,r,!n);t=v.filter(t,r)}return v.grep(e,function(e,r){return v.inArray(
 e,t)>=0===n})}function lt(e){var 
t=ct.split("|"),n=e.createDocumentFragment();if(n.createElement)while(t.length)n.createElement(t.pop());return
 n}function Lt(e,t){return 
e.getElementsByTagName(t)[0]||e.appendChild(e.ownerDocument.createElement(t))}function
 At(e,t){if(t.nodeType!==1||!v.hasData(e))return;var 
n,r,i,s=v._data(e),o=v._data(t,s),u=s.events;if(u){delete 
o.handle,o.events={};for(n in 
u)for(r=0,i=u[n].length;r").appendTo(i.body),n=t.css("display");t.remove();if(n==="none"||n===""){Pt=i.body.appendChild(Pt||v.extend(i.createElement("iframe"),{frameBorder:0,width:0,height:0}));if(!Ht||!Pt.
 
createElement)Ht=(Pt.contentWindow||Pt.contentDocument).document,Ht.write(""),Ht.close();t=Ht.body.appendChild(Ht.createElement(e)),n=Dt(t,"display"),i.body.removeChild(Pt)}return
 Wt[e]=n,n}function fn(e,t,n,r){var 
i;if(v.isArray(t))v.each(t,function(t,i){n||sn.test(e)?r(e,i):fn(e+"["+(typeof 
i=="object"?t:"")+"]",i,n,r)});else if(!n&(t)==="object")for(i in 
t)fn(e+"["+i+"]",t[i],n,r);else r(e,t)}function Cn(e){return 
function(t,n){typeof t!="string"&&(n=t,t="*");var 

[02/26] hbase git commit: HBASE-17922 Clean TestRegionServerHostname for hadoop3.

2017-07-19 Thread busbey
HBASE-17922 Clean TestRegionServerHostname for hadoop3.

Change-Id: I6f1514b1bc301be553912539e6a4192c2ccc782b
Signed-off-by: Apekshit Sharma 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9e0f450c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9e0f450c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9e0f450c

Branch: refs/heads/HBASE-18147
Commit: 9e0f450c0ca732a9634e2147c2e0d7b885eca9cc
Parents: 500592d
Author: Mike Drob 
Authored: Thu Jun 29 15:30:11 2017 -0500
Committer: Apekshit Sharma 
Committed: Thu Jul 13 11:44:18 2017 -0700

--
 .../regionserver/TestRegionServerHostname.java  | 64 +---
 1 file changed, 29 insertions(+), 35 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/9e0f450c/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerHostname.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerHostname.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerHostname.java
index 679595a..317a3a2 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerHostname.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerHostname.java
@@ -17,9 +17,10 @@
  */
 package org.apache.hadoop.hbase.regionserver;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 
-import java.io.IOException;
 import java.net.InetAddress;
 import java.net.NetworkInterface;
 import java.util.Enumeration;
@@ -35,6 +36,8 @@ import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
+import org.junit.After;
+import org.junit.Before;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
@@ -45,36 +48,40 @@ import org.junit.experimental.categories.Category;
 public class TestRegionServerHostname {
   private static final Log LOG = 
LogFactory.getLog(TestRegionServerHostname.class);
 
+  private HBaseTestingUtility TEST_UTIL;
+
+  private static final int NUM_MASTERS = 1;
+  private static final int NUM_RS = 1;
+
+  @Before
+  public void setup() {
+Configuration conf = HBaseConfiguration.create();
+TEST_UTIL = new HBaseTestingUtility(conf);
+  }
+
+  @After
+  public void teardown() throws Exception {
+TEST_UTIL.shutdownMiniCluster();
+  }
+
   @Test (timeout=3)
   public void testInvalidRegionServerHostnameAbortsServer() throws Exception {
-final int NUM_MASTERS = 1;
-final int NUM_RS = 1;
 String invalidHostname = "hostAddr.invalid";
-Configuration conf = HBaseConfiguration.create();
-HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(conf);
 TEST_UTIL.getConfiguration().set(HRegionServer.RS_HOSTNAME_KEY, 
invalidHostname);
+HRegionServer hrs = null;
 try {
-  TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_RS);
-} catch (IOException ioe) {
-  Throwable t1 = ioe.getCause();
-  Throwable t2 = t1.getCause();
-  assertTrue(t1.getMessage() + " - " + t2.getMessage(),
-t2.getMessage().contains("Failed resolve of " + invalidHostname) ||
-t2.getMessage().contains("Problem binding to " + invalidHostname));
-  return;
-} finally {
-  TEST_UTIL.shutdownMiniCluster();
+  hrs = new HRegionServer(TEST_UTIL.getConfiguration(), null);
+} catch (IllegalArgumentException iae) {
+  assertTrue(iae.getMessage(),
+iae.getMessage().contains("Failed resolve of " + invalidHostname) ||
+iae.getMessage().contains("Problem binding to " + invalidHostname));
 }
-assertTrue("Failed to validate against invalid hostname", false);
+assertNull("Failed to validate against invalid hostname", hrs);
   }
 
   @Test(timeout=12)
   public void testRegionServerHostname() throws Exception {
-final int NUM_MASTERS = 1;
-final int NUM_RS = 1;
 Enumeration netInterfaceList = 
NetworkInterface.getNetworkInterfaces();
-Configuration conf = HBaseConfiguration.create();
-HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(conf);
 while (netInterfaceList.hasMoreElements()) {
   NetworkInterface ni = netInterfaceList.nextElement();
   Enumeration addrList = ni.getInetAddresses();
@@ -109,11 +116,7 @@ public class TestRegionServerHostname {
 
   @Test(timeout=3)
   public void testConflictRegionServerHostnameConfigurationsAbortServer() 
throws Exception {
-final 

[06/26] hbase git commit: HBASE-18175: (addendum) Add hbase-spark integration test into hbase-spark-it

2017-07-19 Thread busbey
HBASE-18175: (addendum) Add hbase-spark integration test into hbase-spark-it

Signed-off-by: Sean Busbey 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f3a39895
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f3a39895
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f3a39895

Branch: refs/heads/HBASE-18147
Commit: f3a398953f9f293d09874622551cce8032dbe4a0
Parents: ea0062f
Author: Yi Liang 
Authored: Fri Jul 14 11:00:40 2017 -0700
Committer: Sean Busbey 
Committed: Fri Jul 14 14:52:23 2017 -0500

--
 .../apache/hadoop/hbase/spark/IntegrationTestSparkBulkLoad.java  | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/f3a39895/hbase-spark-it/src/test/java/org/apache/hadoop/hbase/spark/IntegrationTestSparkBulkLoad.java
--
diff --git 
a/hbase-spark-it/src/test/java/org/apache/hadoop/hbase/spark/IntegrationTestSparkBulkLoad.java
 
b/hbase-spark-it/src/test/java/org/apache/hadoop/hbase/spark/IntegrationTestSparkBulkLoad.java
index 2d84914..f45c0b9 100644
--- 
a/hbase-spark-it/src/test/java/org/apache/hadoop/hbase/spark/IntegrationTestSparkBulkLoad.java
+++ 
b/hbase-spark-it/src/test/java/org/apache/hadoop/hbase/spark/IntegrationTestSparkBulkLoad.java
@@ -86,8 +86,10 @@ import java.util.Set;
  * This test mimic {@link IntegrationTestBulkLoad} in mapreduce.
  *
  * Usage on cluster:
+ *   First add hbase related jars and hbase-spark.jar into spark classpath.
+ *
  *   spark-submit --class 
org.apache.hadoop.hbase.spark.IntegrationTestSparkBulkLoad
- *HBASE_HOME/lib/hbase-it-XXX-tests.jar -m slowDeterministic 
-Dhbase.spark.bulkload.chainlength=300
+ *HBASE_HOME/lib/hbase-spark-it-XXX-tests.jar -m 
slowDeterministic -Dhbase.spark.bulkload.chainlength=300
  */
 public class IntegrationTestSparkBulkLoad extends IntegrationTestBase {
 



[12/26] hbase git commit: HBASE-18332 Upgrade asciidoctor-maven-plugin

2017-07-19 Thread busbey
HBASE-18332 Upgrade asciidoctor-maven-plugin

Update asciidoctor-maven-plugin to 1.5.5 and asciidoctorj-pdf to 1.5.0-alpha.15
asciidoctor's pdfmark generation is turned off
Modify title-logo tag to title-logo-image

Signed-off-by: Misty Stanley-Jones 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c423dc79
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c423dc79
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c423dc79

Branch: refs/heads/HBASE-18147
Commit: c423dc7950c4746220498b0e0b8884c51c51e77e
Parents: 2d5a0fb
Author: Peter Somogyi 
Authored: Fri Jul 7 13:54:41 2017 +0200
Committer: Misty Stanley-Jones 
Committed: Mon Jul 17 19:05:53 2017 -0700

--
 pom.xml | 5 ++---
 src/main/asciidoc/book.adoc | 2 +-
 2 files changed, 3 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c423dc79/pom.xml
--
diff --git a/pom.xml b/pom.xml
index 329c468..9554d85 100644
--- a/pom.xml
+++ b/pom.xml
@@ -1265,7 +1265,6 @@
 
   
 
-
   
 
 
@@ -1391,8 +1390,8 @@
 2.12.0
 
 0.12
-1.5.2.1
-1.5.0-alpha.6
+1.5.5
+1.5.0-alpha.15
 3.0.0
 1.4
 6.18

http://git-wip-us.apache.org/repos/asf/hbase/blob/c423dc79/src/main/asciidoc/book.adoc
--
diff --git a/src/main/asciidoc/book.adoc b/src/main/asciidoc/book.adoc
index e5898d5..2b9bf26 100644
--- a/src/main/asciidoc/book.adoc
+++ b/src/main/asciidoc/book.adoc
@@ -26,7 +26,7 @@
 :Version: {docVersion}
 :revnumber: {docVersion}
 // Logo for PDF -- doesn't render in HTML
-:title-logo: hbase_logo_with_orca.png
+:title-logo-image: image:hbase_logo_with_orca.png[pdfwidth=4.25in,align=center]
 :numbered:
 :toc: left
 :toclevels: 1



[22/26] hbase git commit: HBASE-18308 Eliminate the findbugs warnings for hbase-server

2017-07-19 Thread busbey
HBASE-18308 Eliminate the findbugs warnings for hbase-server


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3574757f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3574757f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3574757f

Branch: refs/heads/HBASE-18147
Commit: 3574757f74762ba7ba563595d1cda3314312ef8f
Parents: d0e4a64
Author: Chia-Ping Tsai 
Authored: Thu Jul 20 00:35:07 2017 +0800
Committer: Chia-Ping Tsai 
Committed: Thu Jul 20 00:35:07 2017 +0800

--
 .../java/org/apache/hadoop/hbase/LocalHBaseCluster.java   | 10 --
 .../org/apache/hadoop/hbase/constraint/Constraints.java   |  4 ++--
 .../hbase/coordination/ZkSplitLogWorkerCoordination.java  |  3 ++-
 .../java/org/apache/hadoop/hbase/mapreduce/JarFinder.java |  3 +++
 .../hadoop/hbase/mapreduce/LoadIncrementalHFiles.java |  6 --
 .../java/org/apache/hadoop/hbase/master/DeadServer.java   |  5 -
 .../org/apache/hadoop/hbase/regionserver/HRegion.java |  4 
 .../hbase/replication/regionserver/ReplicationSource.java |  2 +-
 .../main/java/org/apache/hadoop/hbase/tool/Canary.java|  5 +++--
 9 files changed, 15 insertions(+), 27 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/3574757f/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java
index b04e685..2dad81a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java
@@ -309,12 +309,10 @@ public class LocalHBaseCluster {
*/
   public HMaster getActiveMaster() {
 for (JVMClusterUtil.MasterThread mt : masterThreads) {
-  if (mt.getMaster().isActiveMaster()) {
-// Ensure that the current active master is not stopped.
-// We don't want to return a stopping master as an active master.
-if (mt.getMaster().isActiveMaster()  && !mt.getMaster().isStopped()) {
-  return mt.getMaster();
-}
+  // Ensure that the current active master is not stopped.
+  // We don't want to return a stopping master as an active master.
+  if (mt.getMaster().isActiveMaster()  && !mt.getMaster().isStopped()) {
+return mt.getMaster();
   }
 }
 return null;

http://git-wip-us.apache.org/repos/asf/hbase/blob/3574757f/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraints.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraints.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraints.java
index 203442a..c6c3688 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraints.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraints.java
@@ -614,8 +614,8 @@ public final class Constraints {
 @Override
 public int compare(Constraint c1, Constraint c2) {
   // compare the priorities of the constraints stored in their 
configuration
-  return Long.valueOf(c1.getConf().getLong(PRIORITY_KEY, DEFAULT_PRIORITY))
-  .compareTo(c2.getConf().getLong(PRIORITY_KEY, DEFAULT_PRIORITY));
+  return Long.compare(c1.getConf().getLong(PRIORITY_KEY, DEFAULT_PRIORITY),
+  c2.getConf().getLong(PRIORITY_KEY, DEFAULT_PRIORITY));
 }
   };
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/3574757f/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java
index 70445bd..e4fffa1 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java
@@ -50,6 +50,7 @@ import 
org.apache.hadoop.hbase.regionserver.handler.WALSplitterHandler;
 import org.apache.hadoop.hbase.util.CancelableProgressable;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
+import org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper;
 import org.apache.hadoop.hbase.zookeeper.ZKSplitLog;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
 import 

[11/26] hbase git commit: HBASE-18052 Add document for async admin

2017-07-19 Thread busbey
HBASE-18052 Add document for async admin


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2d5a0fbd
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2d5a0fbd
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2d5a0fbd

Branch: refs/heads/HBASE-18147
Commit: 2d5a0fbd16ddd9d46ab3f72cabd06a853df4916b
Parents: a9352fe
Author: Guanghao Zhang 
Authored: Sun Jul 16 16:00:35 2017 +0800
Committer: Guanghao Zhang 
Committed: Sun Jul 16 16:46:33 2017 +0800

--
 src/main/asciidoc/_chapters/architecture.adoc | 17 -
 1 file changed, 12 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/2d5a0fbd/src/main/asciidoc/_chapters/architecture.adoc
--
diff --git a/src/main/asciidoc/_chapters/architecture.adoc 
b/src/main/asciidoc/_chapters/architecture.adoc
index 8d11efb..ebb0677 100644
--- a/src/main/asciidoc/_chapters/architecture.adoc
+++ b/src/main/asciidoc/_chapters/architecture.adoc
@@ -265,11 +265,18 @@ For `AsyncTable`, you need to provide a thread pool when 
getting it. The callbac
 
 For `RawAsyncTable`, all the callbacks are executed inside the framework 
thread so it is not allowed to do time consuming works in the callbacks 
otherwise you may block the framework thread and cause very bad performance 
impact. It is designed for advanced users who want to write high performance 
code. You can see the `org.apache.hadoop.hbase.client.example.HttpProxyExample` 
to see how to write fully asynchronous code with `RawAsyncTable`. And 
coprocessor related methods are only in `RawAsyncTable`.
 
-.On `AsyncAdmin`
-[WARNING]
-
-`AsyncAdmin` is still under development and marked as IA.Private. Use it with 
caution as we may change the API without any announcement.
-
+[[async.admin]]
+=== Asynchronous Admin ===
+
+You can obtain an `AsyncConnection` from `ConnectionFactory`, and then get a 
`AsyncAdmin` instance from it to access HBase. Notice that there are two 
`getAdmin` methods to get a `AsyncAdmin` instance. One method has one extra 
thread pool parameter which is used to execute callbacks. It is designed for 
normal users. Another method doesn't need a thread pool and all the callbacks 
are executed inside the framework thread so it is not allowed to do time 
consuming works in the callbacks. It is designed for advanced users.
+
+The default `getAdmin` methods will return a `AsyncAdmin` instance which use 
default configs. If you want to customize some configs, you can use 
`getAdminBuilder` methods to get a `AsyncAdminBuilder` for creating 
`AsyncAdmin` instance. Users are free to only set the configs they care about 
to create a new `AsyncAdmin` instance.
+
+For the `AsyncAdmin` interface, most methods have the same meaning with the 
old `Admin` interface, expect that the return value is wrapped with a 
CompletableFuture usually.
+
+For most admin operations, when the returned CompletableFuture is done, it 
means the admin operation has also been done. But for compact operation, it 
only means the compact request was sent to HBase and may need some time to 
finish the compact operation. For `rollWALWriter` method, it only means the 
rollWALWriter request was sent to the region server and may need some time to 
finish the `rollWALWriter` operation.
+
+For region name, we only accept `byte[]` as the parameter type and it may be a 
full region name or a encoded region name. For server name, we only accept 
`ServerName` as the parameter type. For table name, we only accept `TableName` 
as the parameter type. For `list*` operations, we only accept `Pattern` as the 
parameter type if you want to do regex matching.
 
 [[client.external]]
 === External Clients



[19/26] hbase git commit: HBASE-16312 update jquery version

2017-07-19 Thread busbey
http://git-wip-us.apache.org/repos/asf/hbase/blob/f10f8198/hbase-server/src/main/resources/hbase-webapps/static/js/jquery.min.js
--
diff --git 
a/hbase-server/src/main/resources/hbase-webapps/static/js/jquery.min.js 
b/hbase-server/src/main/resources/hbase-webapps/static/js/jquery.min.js
index 3883779..644d35e 100644
--- a/hbase-server/src/main/resources/hbase-webapps/static/js/jquery.min.js
+++ b/hbase-server/src/main/resources/hbase-webapps/static/js/jquery.min.js
@@ -1,2 +1,4 @@
-/*! jQuery v1.8.3 jquery.com | jquery.org/license */
-(function(e,t){function _(e){var t=M[e]={};return 
v.each(e.split(y),function(e,n){t[n]=!0}),t}function 
H(e,n,r){if(r===t&===1){var 
i="data-"+n.replace(P,"-$1").toLowerCase();r=e.getAttribute(i);if(typeof 
r=="string"){try{r=r==="true"?!0:r==="false"?!1:r==="null"?null:+r+""===r?+r:D.test(r)?v.parseJSON(r):r}catch(s){}v.data(e,n,r)}else
 r=t}return r}function B(e){var t;for(t in 
e){if(t==="data"&(e[t]))continue;if(t!=="toJSON")return!1}return!0}function
 et(){return!1}function tt(){return!0}function 
ut(e){return!e||!e.parentNode||e.parentNode.nodeType===11}function at(e,t){do 
e=e[t];while(e&!==1);return e}function 
ft(e,t,n){t=t||0;if(v.isFunction(t))return v.grep(e,function(e,r){var 
i=!!t.call(e,r,e);return i===n});if(t.nodeType)return 
v.grep(e,function(e,r){return e===t===n});if(typeof t=="string"){var 
r=v.grep(e,function(e){return e.nodeType===1});if(it.test(t))return 
v.filter(t,r,!n);t=v.filter(t,r)}return v.grep(e,function(e,r){return v.inArray(
 e,t)>=0===n})}function lt(e){var 
t=ct.split("|"),n=e.createDocumentFragment();if(n.createElement)while(t.length)n.createElement(t.pop());return
 n}function Lt(e,t){return 
e.getElementsByTagName(t)[0]||e.appendChild(e.ownerDocument.createElement(t))}function
 At(e,t){if(t.nodeType!==1||!v.hasData(e))return;var 
n,r,i,s=v._data(e),o=v._data(t,s),u=s.events;if(u){delete 
o.handle,o.events={};for(n in 
u)for(r=0,i=u[n].length;r").appendTo(i.body),n=t.css("display");t.remove();if(n==="none"||n===""){Pt=i.body.appendChild(Pt||v.extend(i.createElement("iframe"),{frameBorder:0,width:0,height:0}));if(!Ht||!Pt.
 
createElement)Ht=(Pt.contentWindow||Pt.contentDocument).document,Ht.write(""),Ht.close();t=Ht.body.appendChild(Ht.createElement(e)),n=Dt(t,"display"),i.body.removeChild(Pt)}return
 Wt[e]=n,n}function fn(e,t,n,r){var 
i;if(v.isArray(t))v.each(t,function(t,i){n||sn.test(e)?r(e,i):fn(e+"["+(typeof 
i=="object"?t:"")+"]",i,n,r)});else if(!n&(t)==="object")for(i in 
t)fn(e+"["+i+"]",t[i],n,r);else r(e,t)}function Cn(e){return 
function(t,n){typeof t!="string"&&(n=t,t="*");var 

[16/26] hbase git commit: HBASE-16312 update jquery version

2017-07-19 Thread busbey
http://git-wip-us.apache.org/repos/asf/hbase/blob/f10f8198/hbase-thrift/src/main/resources/hbase-webapps/thrift/thrift.jsp
--
diff --git a/hbase-thrift/src/main/resources/hbase-webapps/thrift/thrift.jsp 
b/hbase-thrift/src/main/resources/hbase-webapps/thrift/thrift.jsp
index 97b948f..579d0f7 100644
--- a/hbase-thrift/src/main/resources/hbase-webapps/thrift/thrift.jsp
+++ b/hbase-thrift/src/main/resources/hbase-webapps/thrift/thrift.jsp
@@ -33,9 +33,7 @@ String implType = 
conf.get("hbase.regionserver.thrift.server.type", "threadpool"
 String compact = conf.get("hbase.regionserver.thrift.compact", "false");
 String framed = conf.get("hbase.regionserver.thrift.framed", "false");
 %>
-
 
 
   



[15/26] hbase git commit: HBASE-18390 Sleep too long when finding region location failed

2017-07-19 Thread busbey
HBASE-18390 Sleep too long when finding region location failed


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6b7ebc01
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6b7ebc01
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6b7ebc01

Branch: refs/heads/HBASE-18147
Commit: 6b7ebc019c8c64a7e7a00461029f699b0f0e3772
Parents: 56d00f5
Author: Phil Yang 
Authored: Wed Jul 19 11:34:57 2017 +0800
Committer: Phil Yang 
Committed: Wed Jul 19 11:34:57 2017 +0800

--
 .../hadoop/hbase/client/ConnectionUtils.java| 14 --
 .../client/RegionAdminServiceCallable.java  |  8 +---
 .../hbase/client/RegionServerCallable.java  |  8 +---
 .../hbase/client/TestConnectionUtils.java   | 20 
 4 files changed, 2 insertions(+), 48 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6b7ebc01/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
index 98ac845..1f2fbb5 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
@@ -95,20 +95,6 @@ public final class ConnectionUtils {
   }
 
   /**
-   * Adds / subs an up to 50% jitter to a pause time. Minimum is 1.
-   * @param pause the expected pause.
-   * @param jitter the jitter ratio, between 0 and 1, exclusive.
-   */
-  public static long addJitter(final long pause, final float jitter) {
-float lag = pause * (ThreadLocalRandom.current().nextFloat() - 0.5f) * 
jitter;
-long newPause = pause + (long) lag;
-if (newPause <= 0) {
-  return 1;
-}
-return newPause;
-  }
-
-  /**
* @param conn The connection for which to replace the generator.
* @param cnm Replaces the nonce generator used, for testing.
* @return old nonce generator.

http://git-wip-us.apache.org/repos/asf/hbase/blob/6b7ebc01/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java
index 6846562..c9a143c 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java
@@ -51,7 +51,6 @@ public abstract class RegionAdminServiceCallable 
implements RetryingCallable<
   protected final TableName tableName;
   protected final byte[] row;
   protected final int replicaId;
-  protected final static int MIN_WAIT_DEAD_SERVER = 1;
 
   public RegionAdminServiceCallable(ClusterConnection connection,
   RpcControllerFactory rpcControllerFactory, TableName tableName, byte[] 
row) {
@@ -136,12 +135,7 @@ public abstract class RegionAdminServiceCallable 
implements RetryingCallable<
 
   @Override
   public long sleep(long pause, int tries) {
-long sleep = ConnectionUtils.getPauseTime(pause, tries);
-if (sleep < MIN_WAIT_DEAD_SERVER
-&& (location == null || 
connection.isDeadServer(location.getServerName( {
-  sleep = ConnectionUtils.addJitter(MIN_WAIT_DEAD_SERVER, 0.10f);
-}
-return sleep;
+return ConnectionUtils.getPauseTime(pause, tries);
   }
 
   public static RegionLocations getRegionLocations(

http://git-wip-us.apache.org/repos/asf/hbase/blob/6b7ebc01/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java
index a8e17c6..fb593a3 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java
@@ -58,7 +58,6 @@ public abstract class RegionServerCallable implements 
RetryingCallable
* Some subclasses want to set their own location. Make it protected.
*/
   protected HRegionLocation location;
-  protected final static int MIN_WAIT_DEAD_SERVER = 1;
   protected S stub;
 
   /**
@@ -185,12 +184,7 @@ public abstract class RegionServerCallable 
implements RetryingCallable
   }
 
   

[18/26] hbase git commit: HBASE-16312 update jquery version

2017-07-19 Thread busbey
http://git-wip-us.apache.org/repos/asf/hbase/blob/f10f8198/hbase-server/src/main/resources/hbase-webapps/thrift/thrift.jsp
--
diff --git a/hbase-server/src/main/resources/hbase-webapps/thrift/thrift.jsp 
b/hbase-server/src/main/resources/hbase-webapps/thrift/thrift.jsp
index fa98f05..7f545ad 100644
--- a/hbase-server/src/main/resources/hbase-webapps/thrift/thrift.jsp
+++ b/hbase-server/src/main/resources/hbase-webapps/thrift/thrift.jsp
@@ -33,9 +33,7 @@ String implType = 
conf.get("hbase.regionserver.thrift.server.type", "threadpool"
 String compact = conf.get("hbase.regionserver.thrift.compact", "false");
 String framed = conf.get("hbase.regionserver.thrift.framed", "false");
 %>
-
 
 
   



[01/26] hbase git commit: HBASE-18339 Update to hadoop3-alpha4 [Forced Update!]

2017-07-19 Thread busbey
Repository: hbase
Updated Branches:
  refs/heads/HBASE-18147 795e33295 -> 8f06993df (forced update)


HBASE-18339 Update to hadoop3-alpha4

Signed-off-by: Sean Busbey 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/500592df
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/500592df
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/500592df

Branch: refs/heads/HBASE-18147
Commit: 500592dfd0fb0446dc501d11ade0f3b3ddc49bd3
Parents: c0725dd
Author: Mike Drob 
Authored: Wed Jul 12 13:36:27 2017 -0500
Committer: Sean Busbey 
Committed: Thu Jul 13 10:03:03 2017 -0500

--
 dev-support/hbase-personality.sh | 4 ++--
 pom.xml  | 2 +-
 2 files changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/500592df/dev-support/hbase-personality.sh
--
diff --git a/dev-support/hbase-personality.sh b/dev-support/hbase-personality.sh
index 990a49a..ae8b501 100755
--- a/dev-support/hbase-personality.sh
+++ b/dev-support/hbase-personality.sh
@@ -58,10 +58,10 @@ function personality_globals
 HBASE_HADOOP3_VERSIONS=""
   elif [[ ${PATCH_BRANCH} = branch-2* ]]; then
 HBASE_HADOOP2_VERSIONS="2.6.1 2.6.2 2.6.3 2.6.4 2.6.5 2.7.1 2.7.2 2.7.3"
-HBASE_HADOOP3_VERSIONS="3.0.0-alpha3"
+HBASE_HADOOP3_VERSIONS="3.0.0-alpha4"
   else # master or a feature branch
 HBASE_HADOOP2_VERSIONS="2.6.1 2.6.2 2.6.3 2.6.4 2.6.5 2.7.1 2.7.2 2.7.3"
-HBASE_HADOOP3_VERSIONS="3.0.0-alpha3"
+HBASE_HADOOP3_VERSIONS="3.0.0-alpha4"
   fi
 
   # TODO use PATCH_BRANCH to select jdk versions to use.

http://git-wip-us.apache.org/repos/asf/hbase/blob/500592df/pom.xml
--
diff --git a/pom.xml b/pom.xml
index 708cece..5e9e175 100644
--- a/pom.xml
+++ b/pom.xml
@@ -1330,7 +1330,7 @@
 ${compileSource}
 
 2.7.1
-3.0.0-alpha2
+3.0.0-alpha4
 



[26/26] hbase git commit: HBASE-18147 POC jenkinsfile for nightly checks.

2017-07-19 Thread busbey
HBASE-18147 POC jenkinsfile for nightly checks.

* Jenkinsfile that works for all current branches.
* adds dev-support script for setting parameters for our yetus nightly 
invocation
* adds ruby tools to dockerfile
* adds rubocop to dockerfile
* adds ruby-lint to dockerfile
* adds perlcritic to dockerfile


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8f06993d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8f06993d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8f06993d

Branch: refs/heads/HBASE-18147
Commit: 8f06993df515e928c33fc3a9dc0422e19cc67998
Parents: 01db60d
Author: Sean Busbey 
Authored: Tue Jul 4 15:12:38 2017 -0400
Committer: Sean Busbey 
Committed: Thu Jul 20 00:37:32 2017 -0500

--
 dev-support/Jenkinsfile| 269 
 dev-support/docker/Dockerfile  |  29 
 dev-support/hbase_nightly_yetus.sh |  85 ++
 3 files changed, 383 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/8f06993d/dev-support/Jenkinsfile
--
diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile
new file mode 100644
index 000..5ff3d82
--- /dev/null
+++ b/dev-support/Jenkinsfile
@@ -0,0 +1,269 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+pipeline {
+  agent {
+node {
+  label 'Hadoop'
+}
+  }
+  triggers {
+cron('@daily')
+  }
+  options {
+buildDiscarder(logRotator(numToKeepStr: '30'))
+timeout (time: 6, unit: 'HOURS')
+timestamps()
+  }
+  environment {
+TOOLS = "${env.WORKSPACE}/tools"
+// where we check out to across stages
+BASEDIR = "${env.WORKSPACE}/component"
+YETUS_RELEASE = '0.5.0'
+// where we'll write everything from different steps.
+OUTPUT_RELATIVE_GENERAL = 'output-general'
+OUTPUTDIR_GENERAL = "${env.WORKSPACE}/output-general"
+OUTPUT_RELATIVE_JDK7 = 'output-jdk7'
+OUTPUTDIR_JDK7 = "${env.WORKSPACE}/output-jdk7"
+OUTPUT_RELATIVE_JDK8 = 'output-jdk8'
+OUTPUTDIR_JDK8 = "${env.WORKSPACE}/output-jdk8"
+PROJECT = 'hbase'
+PROJET_PERSONALITY = 
'https://raw.githubusercontent.com/apache/hbase/master/dev-support/hbase-personality.sh'
+// This section of the docs tells folks not to use the javadoc tag. older 
branches have our old version of the check for said tag.
+AUTHOR_IGNORE_LIST = 
'src/main/asciidoc/_chapters/developer.adoc,dev-support/test-patch.sh'
+WHITESPACE_IGNORE_LIST = '.*/generated/.*'
+// output from surefire; sadly the archive function in yetus only works on 
file names.
+ARCHIVE_PATTERN_LIST = 
'TEST-*.xml,org.apache.h*-output.txt,org.apache.h*.txt'
+// These tests currently have known failures. Once they burn down to 0, 
remove from here so that new problems will cause a failure.
+TESTS_FILTER = 
'cc,checkstyle,javac,javadoc,pylint,shellcheck,whitespace,perlcritic,ruby-lint,rubocop'
+BRANCH_SPECIFIC_DOCKERFILE = "${env.BASEDIR}/dev-support/docker/Dockerfile"
+EXCLUDE_TESTS_URL = 
'https://builds.apache.org/job/HBase-Find-Flaky-Tests/lastSuccessfulBuild/artifact/excludes/'
+  }
+  parameters {
+booleanParam(name: 'USE_YETUS_PRERELEASE', defaultValue: false, 
description: '''Check to use the current HEAD of apache/yetus rather than our 
configured release.
+
+Should only be used manually when e.g. there is some non-work-aroundable 
issue in yetus we are checking a fix for.''')
+booleanParam(name: 'DEBUG', defaultValue: false, description: 'Produce a 
lot more meta-information.')
+  }
+  stages {
+stage ('yetus install') {
+  steps {
+sh  '''#!/usr/bin/env bash
+echo "Ensure we have a copy of Apache Yetus."
+if [[ true !=  "${USE_YETUS_PRERELEASE}" ]]; then
+  YETUS_DIR="${WORKSPACE}/yetus-${YETUS_RELEASE}"
+  echo "Checking for Yetus ${YETUS_RELEASE} in '${YETUS_DIR}'"
+  if [ ! -d "${YETUS_DIR}" ]; then
+echo "New download of Apache Yetus version ${YETUS_RELEASE}."
+rm -rf 

[05/26] hbase git commit: HBASE-18376 Add debug logs to hbase-personality.sh to make it possible to debug this issue.

2017-07-19 Thread busbey
HBASE-18376 Add debug logs to hbase-personality.sh to make it possible to debug 
this issue.

Change-Id: I172b95ed6a387ddb507abc6645a0c3aa704321fc


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ea0062fa
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ea0062fa
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ea0062fa

Branch: refs/heads/HBASE-18147
Commit: ea0062fa54bba3c31a597e94b62f4130624b6f82
Parents: 79a702d
Author: Apekshit Sharma 
Authored: Fri Jul 14 12:33:25 2017 -0700
Committer: Apekshit Sharma 
Committed: Fri Jul 14 12:33:42 2017 -0700

--
 dev-support/hbase-personality.sh | 4 
 1 file changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/ea0062fa/dev-support/hbase-personality.sh
--
diff --git a/dev-support/hbase-personality.sh b/dev-support/hbase-personality.sh
index ae8b501..7a9ce3e 100755
--- a/dev-support/hbase-personality.sh
+++ b/dev-support/hbase-personality.sh
@@ -116,10 +116,13 @@ function personality_modules
   # tests respectively.
   if [[ ${testtype} = unit ]]; then
 extra="${extra} -PrunAllTests"
+yetus_debug "EXCLUDE_TESTS_URL = ${EXCLUDE_TESTS_URL}"
+yetus_debug "INCLUDE_TESTS_URL = ${INCLUDE_TESTS_URL}"
 if [[ -n "$EXCLUDE_TESTS_URL" ]]; then
 wget "$EXCLUDE_TESTS_URL" -O "excludes"
 if [[ $? -eq 0 ]]; then
   excludes=$(cat excludes)
+  yetus_debug "excludes=${excludes}"
   if [[ -n "${excludes}" ]]; then
 extra="${extra} -Dtest.exclude.pattern=${excludes}"
   fi
@@ -132,6 +135,7 @@ function personality_modules
 wget "$INCLUDE_TESTS_URL" -O "includes"
 if [[ $? -eq 0 ]]; then
   includes=$(cat includes)
+  yetus_debug "includes=${includes}"
   if [[ -n "${includes}" ]]; then
 extra="${extra} -Dtest=${includes}"
   fi



[08/26] hbase git commit: HBASE-18229: create new Async Split API to embrace AM v2

2017-07-19 Thread busbey
http://git-wip-us.apache.org/repos/asf/hbase/blob/353627b3/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestTablePermissions.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestTablePermissions.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestTablePermissions.java
index 4d2cb0b..4a16808 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestTablePermissions.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestTablePermissions.java
@@ -316,7 +316,16 @@ public class TestTablePermissions {
 table.put(new Put(Bytes.toBytes("row2"))
 .addColumn(TEST_FAMILY, TEST_QUALIFIER, Bytes.toBytes("v2")));
 Admin admin = UTIL.getAdmin();
-admin.split(TEST_TABLE);
+try {
+  admin.split(TEST_TABLE);
+}
+catch (IOException e) {
+  //although split fail, this may not affect following check
+  //In old Split API without AM2, if region's best split key is not found,
+  //there are not exception thrown. But in current API, exception
+  //will be thrown.
+  LOG.debug("region is not splittable, because " + e);
+}
 
 // wait for split
 Thread.sleep(1);

http://git-wip-us.apache.org/repos/asf/hbase/blob/353627b3/hbase-shell/src/test/ruby/hbase/admin_test.rb
--
diff --git a/hbase-shell/src/test/ruby/hbase/admin_test.rb 
b/hbase-shell/src/test/ruby/hbase/admin_test.rb
index 60fc43b..2a20d34 100644
--- a/hbase-shell/src/test/ruby/hbase/admin_test.rb
+++ b/hbase-shell/src/test/ruby/hbase/admin_test.rb
@@ -124,7 +124,11 @@ module Hbase
 
#---
 
 define_test "split should work" do
-  command(:split, 'hbase:meta', nil)
+  begin
+command(:split, 'hbase:meta', nil)
+  rescue org.apache.hadoop.hbase.ipc.RemoteWithExtrasException => e
+puts "can not split hbase:meta"
+  end
 end
 
 
#---



[24/26] hbase git commit: HBASE-18337 Exclude jetty-orbit sigs from shade

2017-07-19 Thread busbey
HBASE-18337 Exclude jetty-orbit sigs from shade

Signed-off-by: Sean Busbey 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5f54e285
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5f54e285
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5f54e285

Branch: refs/heads/HBASE-18147
Commit: 5f54e28510fdbdc1a08688168f8df19904bcd975
Parents: 775179b
Author: Mike Drob 
Authored: Fri Jul 7 13:45:05 2017 -0700
Committer: Sean Busbey 
Committed: Wed Jul 19 15:49:38 2017 -0500

--
 hbase-shaded/pom.xml | 10 ++
 1 file changed, 10 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5f54e285/hbase-shaded/pom.xml
--
diff --git a/hbase-shaded/pom.xml b/hbase-shaded/pom.xml
index 3f9a317..6bccc7d 100644
--- a/hbase-shaded/pom.xml
+++ b/hbase-shaded/pom.xml
@@ -347,6 +347,16 @@
 ${project.name}
   
 
+
+  
+
+
org.eclipse.jetty.orbit:javax.servlet.jsp.jstl
+
+  META-INF/ECLIPSEF.SF
+  META-INF/ECLIPSEF.RSA
+
+  
+
 
 
 



[09/26] hbase git commit: HBASE-18229: create new Async Split API to embrace AM v2

2017-07-19 Thread busbey
HBASE-18229: create new Async Split API to embrace AM v2

Signed-off-by: Michael Stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/353627b3
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/353627b3
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/353627b3

Branch: refs/heads/HBASE-18147
Commit: 353627b39de73020dd2448b54c0f13f6902b19bf
Parents: c08db67
Author: Yi Liang 
Authored: Wed Jul 12 09:59:29 2017 -0700
Committer: Michael Stack 
Committed: Fri Jul 14 22:25:14 2017 +0100

--
 .../org/apache/hadoop/hbase/client/Admin.java   |  13 +
 .../apache/hadoop/hbase/client/HBaseAdmin.java  | 121 -
 .../hadoop/hbase/client/RawAsyncHBaseAdmin.java |  50 +-
 .../hbase/shaded/protobuf/RequestConverter.java |  30 ++
 .../shaded/protobuf/generated/AdminProtos.java  | 481 +--
 .../shaded/protobuf/generated/ClientProtos.java |   4 +-
 .../shaded/protobuf/generated/MasterProtos.java |  25 +-
 .../src/main/protobuf/Admin.proto   |   3 +
 .../src/main/protobuf/Master.proto  |   2 +-
 .../hadoop/hbase/master/MasterRpcServices.java  |   2 +-
 .../master/assignment/AssignmentManager.java|  24 +-
 .../assignment/MergeTableRegionsProcedure.java  |  10 +-
 .../assignment/SplitTableRegionProcedure.java   | 100 ++--
 .../hadoop/hbase/master/assignment/Util.java|  13 +-
 .../hbase/regionserver/RSRpcServices.java   |  17 +
 .../hbase/backup/TestIncrementalBackup.java |  11 +-
 .../apache/hadoop/hbase/client/TestAdmin1.java  |  94 ++--
 .../hbase/client/TestAsyncRegionAdminApi.java   |   9 +-
 .../security/access/TestTablePermissions.java   |  11 +-
 hbase-shell/src/test/ruby/hbase/admin_test.rb   |   6 +-
 20 files changed, 750 insertions(+), 276 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/353627b3/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
index e428012..9bb5df4 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
@@ -1184,6 +1184,8 @@ public interface Admin extends Abortable, Closeable {
*
* @param regionName region to split
* @throws IOException if a remote or network exception occurs
+   * @deprecated Since 2.0. Will be removed in 3.0. Use
+   * {@link #splitRegionAsync(byte[], byte[])} instead.
*/
   void splitRegion(final byte[] regionName) throws IOException;
 
@@ -1203,11 +1205,22 @@ public interface Admin extends Abortable, Closeable {
* @param regionName region to split
* @param splitPoint the explicit position to split on
* @throws IOException if a remote or network exception occurs
+   * @deprecated Since 2.0. Will be removed in 3.0. Use
+   * {@link #splitRegionAsync(byte[], byte[])} instead.
*/
   void splitRegion(final byte[] regionName, final byte[] splitPoint)
 throws IOException;
 
   /**
+   * Split an individual region. Asynchronous operation.
+   * @param regionName region to split
+   * @param splitPoint the explicit position to split on
+   * @throws IOException if a remote or network exception occurs
+   */
+  Future splitRegionAsync(byte[] regionName, byte[] splitPoint)
+throws IOException;
+
+  /**
* Modify an existing table, more IRB friendly version.
*
* @param tableName name of table.

http://git-wip-us.apache.org/repos/asf/hbase/blob/353627b3/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
index 3b099ef..fb9df62 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
@@ -177,6 +177,8 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormali
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownRequest;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotRequest;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotResponse;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionRequest;
+import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionResponse;
 import 

[13/22] hbase git commit: Be robust against movement of the rsgroup table

2017-07-19 Thread apurtell
Be robust against movement of the rsgroup table


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3a98817e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3a98817e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3a98817e

Branch: refs/heads/HBASE-15631-branch-1
Commit: 3a98817ec3112903584694245f9a6b26284e7a4d
Parents: dfcdc65
Author: Andrew Purtell 
Authored: Wed Jul 5 15:19:32 2017 -0700
Committer: Andrew Purtell 
Committed: Wed Jul 19 16:21:10 2017 -0700

--
 .../hbase/rsgroup/RSGroupInfoManagerImpl.java   | 35 +++-
 1 file changed, 19 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/3a98817e/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
--
diff --git 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
index 7fcb7c7..6c991bd 100644
--- 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
+++ 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
@@ -77,6 +77,7 @@ import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
 import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos;
 import org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos;
 import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
+import 
org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MutateRowsRequest;
 import org.apache.hadoop.hbase.regionserver.DisabledRegionSplitPolicy;
 import org.apache.hadoop.hbase.security.access.AccessControlLists;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -112,7 +113,6 @@ public class RSGroupInfoManagerImpl implements 
RSGroupInfoManager, ServerListene
   private volatile Map rsGroupMap;
   private volatile Map tableMap;
   private MasterServices master;
-  private Table rsGroupTable;
   private ClusterConnection conn;
   private ZooKeeperWatcher watcher;
   private RSGroupStartupWorker rsGroupStartupWorker;
@@ -281,10 +281,9 @@ public class RSGroupInfoManagerImpl implements 
RSGroupInfoManager, ServerListene
 // if online read from GROUP table
 if (forceOnline || isOnline()) {
   LOG.debug("Refreshing in Online mode.");
-  if (rsGroupTable == null) {
-rsGroupTable = conn.getTable(RSGROUP_TABLE_NAME);
+  try (Table rsGroupTable = conn.getTable(RSGROUP_TABLE_NAME)) {
+groupList.addAll(rsGroupSerDe.retrieveGroupList(rsGroupTable));
   }
-  groupList.addAll(rsGroupSerDe.retrieveGroupList(rsGroupTable));
 } else {
   LOG.debug("Refershing in Offline mode.");
   String groupBasePath = ZKUtil.joinZNode(watcher.baseZNode, rsGroupZNode);
@@ -724,28 +723,32 @@ public class RSGroupInfoManagerImpl implements 
RSGroupInfoManager, ServerListene
 
   private void multiMutate(List mutations)
   throws IOException {
-CoprocessorRpcChannel channel = rsGroupTable.coprocessorService(ROW_KEY);
-MultiRowMutationProtos.MutateRowsRequest.Builder mmrBuilder
-  = MultiRowMutationProtos.MutateRowsRequest.newBuilder();
+MutateRowsRequest.Builder mrmBuilder = MutateRowsRequest.newBuilder();
 for (Mutation mutation : mutations) {
   if (mutation instanceof Put) {
-mmrBuilder.addMutationRequest(ProtobufUtil.toMutation(
+mrmBuilder.addMutationRequest(ProtobufUtil.toMutation(
   ClientProtos.MutationProto.MutationType.PUT, mutation));
   } else if (mutation instanceof Delete) {
-mmrBuilder.addMutationRequest(ProtobufUtil.toMutation(
+mrmBuilder.addMutationRequest(ProtobufUtil.toMutation(
   ClientProtos.MutationProto.MutationType.DELETE, mutation));
   } else {
 throw new DoNotRetryIOException("multiMutate doesn't support "
   + mutation.getClass().getName());
   }
 }
-
-MultiRowMutationProtos.MultiRowMutationService.BlockingInterface service =
-  MultiRowMutationProtos.MultiRowMutationService.newBlockingStub(channel);
-try {
-  service.mutateRows(null, mmrBuilder.build());
-} catch (ServiceException ex) {
-  ProtobufUtil.toIOException(ex);
+MutateRowsRequest mrm = mrmBuilder.build();
+// Be robust against movement of the rsgroup table
+// TODO: Why is this necessary sometimes? Should we be using our own 
connection?
+conn.clearRegionCache(RSGROUP_TABLE_NAME);
+try (Table rsGroupTable = conn.getTable(RSGROUP_TABLE_NAME)) {
+  CoprocessorRpcChannel channel = rsGroupTable.coprocessorService(ROW_KEY);
+  

[16/22] hbase git commit: HBASE-16133 RSGroupBasedLoadBalancer.retainAssignment() might miss a region

2017-07-19 Thread apurtell
HBASE-16133 RSGroupBasedLoadBalancer.retainAssignment() might miss a region


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e7f97663
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e7f97663
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e7f97663

Branch: refs/heads/HBASE-15631-branch-1
Commit: e7f976633d883653e5518196a21d4d7e0a0b936e
Parents: 2d09a56
Author: Andrew Purtell 
Authored: Wed Jul 5 15:43:46 2017 -0700
Committer: Andrew Purtell 
Committed: Wed Jul 19 16:21:10 2017 -0700

--
 .../apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java | 7 ---
 1 file changed, 4 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e7f97663/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
--
diff --git 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
index f69f093..c1b3c7d 100644
--- 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
+++ 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
@@ -216,9 +216,10 @@ public class RSGroupBasedLoadBalancer implements 
RSGroupableBalancer, LoadBalanc
 List candidateList = filterOfflineServers(info, servers);
 ServerName server = this.internalBalancer.randomAssignment(region,
 candidateList);
-if (server != null && !assignments.containsKey(server)) {
-  assignments.put(server, new ArrayList());
-} else if (server != null) {
+if (server != null) {
+  if (!assignments.containsKey(server)) {
+assignments.put(server, new ArrayList());
+  }
   assignments.get(server).add(region);
 } else {
   //if not server is available assign to bogus so it ends up in RIT



[04/22] hbase git commit: HBASE-15631 Backport Regionserver Groups (HBASE-6721) to branch-1

2017-07-19 Thread apurtell
http://git-wip-us.apache.org/repos/asf/hbase/blob/74010ec9/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
--
diff --git 
a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
 
b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
new file mode 100644
index 000..9225e09
--- /dev/null
+++ 
b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
@@ -0,0 +1,643 @@
+/**
+ * Copyright The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.rsgroup;
+
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
+import com.google.common.net.HostAndPort;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.ClusterStatus;
+import org.apache.hadoop.hbase.HBaseCluster;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.hadoop.hbase.RegionLoad;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.Waiter;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.constraint.ConstraintException;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.AdminProtos;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.security.SecureRandom;
+import java.util.HashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeMap;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+public abstract class TestRSGroupsBase {
+  protected static final Log LOG = LogFactory.getLog(TestRSGroupsBase.class);
+
+  //shared
+  protected final static String groupPrefix = "Group";
+  protected final static String tablePrefix = "Group";
+  protected final static SecureRandom rand = new SecureRandom();
+
+  //shared, cluster type specific
+  protected static HBaseTestingUtility TEST_UTIL;
+  protected static HBaseAdmin admin;
+  protected static HBaseCluster cluster;
+  protected static RSGroupAdmin rsGroupAdmin;
+
+  public final static long WAIT_TIMEOUT = 6*5;
+  public final static int NUM_SLAVES_BASE = 4; //number of slaves for the 
smallest cluster
+
+
+
+  protected RSGroupInfo addGroup(RSGroupAdmin gAdmin, String groupName,
+ int serverCount) throws IOException, 
InterruptedException {
+RSGroupInfo defaultInfo = gAdmin
+.getRSGroupInfo(RSGroupInfo.DEFAULT_GROUP);
+assertTrue(defaultInfo != null);
+assertTrue(defaultInfo.getServers().size() >= serverCount);
+gAdmin.addRSGroup(groupName);
+
+Set set = new HashSet();
+for(HostAndPort server: defaultInfo.getServers()) {
+  if(set.size() == serverCount) {
+break;
+  }
+  set.add(server);
+}
+gAdmin.moveServers(set, groupName);
+RSGroupInfo result = gAdmin.getRSGroupInfo(groupName);
+assertTrue(result.getServers().size() >= serverCount);
+return result;
+  }
+
+  static void removeGroup(RSGroupAdminClient groupAdmin, String groupName) 
throws IOException {
+RSGroupInfo RSGroupInfo = groupAdmin.getRSGroupInfo(groupName);
+groupAdmin.moveTables(RSGroupInfo.getTables(), RSGroupInfo.DEFAULT_GROUP);
+groupAdmin.moveServers(RSGroupInfo.getServers(), 
RSGroupInfo.DEFAULT_GROUP);
+groupAdmin.removeRSGroup(groupName);
+  }
+
+  protected void deleteTableIfNecessary() throws IOException {
+for (HTableDescriptor desc : 
TEST_UTIL.getHBaseAdmin().listTables(tablePrefix+".*")) {
+  

[08/22] hbase git commit: HBASE-15631 Backport Regionserver Groups (HBASE-6721) to branch-1

2017-07-19 Thread apurtell
http://git-wip-us.apache.org/repos/asf/hbase/blob/74010ec9/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RSGroupAdminProtos.java
--
diff --git 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RSGroupAdminProtos.java
 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RSGroupAdminProtos.java
new file mode 100644
index 000..3d1f4bd
--- /dev/null
+++ 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RSGroupAdminProtos.java
@@ -0,0 +1,11855 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: RSGroupAdmin.proto
+
+package org.apache.hadoop.hbase.protobuf.generated;
+
+public final class RSGroupAdminProtos {
+  private RSGroupAdminProtos() {}
+  public static void registerAllExtensions(
+  com.google.protobuf.ExtensionRegistry registry) {
+  }
+  public interface ListTablesOfRSGroupRequestOrBuilder
+  extends com.google.protobuf.MessageOrBuilder {
+
+// required string r_s_group_name = 1;
+/**
+ * required string r_s_group_name = 1;
+ */
+boolean hasRSGroupName();
+/**
+ * required string r_s_group_name = 1;
+ */
+java.lang.String getRSGroupName();
+/**
+ * required string r_s_group_name = 1;
+ */
+com.google.protobuf.ByteString
+getRSGroupNameBytes();
+  }
+  /**
+   * Protobuf type {@code hbase.pb.ListTablesOfRSGroupRequest}
+   */
+  public static final class ListTablesOfRSGroupRequest extends
+  com.google.protobuf.GeneratedMessage
+  implements ListTablesOfRSGroupRequestOrBuilder {
+// Use ListTablesOfRSGroupRequest.newBuilder() to construct.
+private 
ListTablesOfRSGroupRequest(com.google.protobuf.GeneratedMessage.Builder 
builder) {
+  super(builder);
+  this.unknownFields = builder.getUnknownFields();
+}
+private ListTablesOfRSGroupRequest(boolean noInit) { this.unknownFields = 
com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+private static final ListTablesOfRSGroupRequest defaultInstance;
+public static ListTablesOfRSGroupRequest getDefaultInstance() {
+  return defaultInstance;
+}
+
+public ListTablesOfRSGroupRequest getDefaultInstanceForType() {
+  return defaultInstance;
+}
+
+private final com.google.protobuf.UnknownFieldSet unknownFields;
+@java.lang.Override
+public final com.google.protobuf.UnknownFieldSet
+getUnknownFields() {
+  return this.unknownFields;
+}
+private ListTablesOfRSGroupRequest(
+com.google.protobuf.CodedInputStream input,
+com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+throws com.google.protobuf.InvalidProtocolBufferException {
+  initFields();
+  int mutable_bitField0_ = 0;
+  com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+  com.google.protobuf.UnknownFieldSet.newBuilder();
+  try {
+boolean done = false;
+while (!done) {
+  int tag = input.readTag();
+  switch (tag) {
+case 0:
+  done = true;
+  break;
+default: {
+  if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+done = true;
+  }
+  break;
+}
+case 10: {
+  bitField0_ |= 0x0001;
+  rSGroupName_ = input.readBytes();
+  break;
+}
+  }
+}
+  } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+throw e.setUnfinishedMessage(this);
+  } catch (java.io.IOException e) {
+throw new com.google.protobuf.InvalidProtocolBufferException(
+e.getMessage()).setUnfinishedMessage(this);
+  } finally {
+this.unknownFields = unknownFields.build();
+makeExtensionsImmutable();
+  }
+}
+public static final com.google.protobuf.Descriptors.Descriptor
+getDescriptor() {
+  return 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_ListTablesOfRSGroupRequest_descriptor;
+}
+
+protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+internalGetFieldAccessorTable() {
+  return 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_ListTablesOfRSGroupRequest_fieldAccessorTable
+  .ensureFieldAccessorsInitialized(
+  
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListTablesOfRSGroupRequest.class,
 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListTablesOfRSGroupRequest.Builder.class);
+}
+
+public static com.google.protobuf.Parser 
PARSER =
+new com.google.protobuf.AbstractParser() {
+  public ListTablesOfRSGroupRequest parsePartialFrom(
+  

[18/22] hbase git commit: HBASE-16430 Fix RegionServer Group's bug when moving multiple tables (Guangxu Cheng)

2017-07-19 Thread apurtell
HBASE-16430 Fix RegionServer Group's bug when moving multiple tables (Guangxu 
Cheng)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8c69283c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8c69283c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8c69283c

Branch: refs/heads/HBASE-15631-branch-1
Commit: 8c69283c5b42e4f12f5d9c1a20585ae0ca4138f4
Parents: e7f9766
Author: Andrew Purtell 
Authored: Wed Jul 5 17:16:50 2017 -0700
Committer: Andrew Purtell 
Committed: Wed Jul 19 16:21:10 2017 -0700

--
 .../hbase/rsgroup/RSGroupInfoManagerImpl.java   |  2 +-
 .../hadoop/hbase/rsgroup/TestRSGroupsBase.java  | 54 
 2 files changed, 55 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/8c69283c/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
--
diff --git 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
index 6c991bd..5cb2e71 100644
--- 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
+++ 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
@@ -227,7 +227,7 @@ public class RSGroupInfoManagerImpl implements 
RSGroupInfoManager, ServerListene
 Map newGroupMap = Maps.newHashMap(rsGroupMap);
 for(TableName tableName: tableNames) {
   if (tableMap.containsKey(tableName)) {
-RSGroupInfo src = new 
RSGroupInfo(rsGroupMap.get(tableMap.get(tableName)));
+RSGroupInfo src = new 
RSGroupInfo(newGroupMap.get(tableMap.get(tableName)));
 src.removeTable(tableName);
 newGroupMap.put(src.getName(), src);
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/8c69283c/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
--
diff --git 
a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
 
b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
index 9225e09..5fcdc7c 100644
--- 
a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
+++ 
b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
@@ -640,4 +640,58 @@ public abstract class TestRSGroupsBase {
   private String getGroupName(String baseName) {
 return groupPrefix+"_"+baseName+"_"+rand.nextInt(Integer.MAX_VALUE);
   }
+
+  @Test
+  public void testMultiTableMove() throws Exception {
+LOG.info("testMultiTableMove");
+
+final TableName tableNameA = TableName.valueOf(tablePrefix + 
"_testMultiTableMoveA");
+final TableName tableNameB = TableName.valueOf(tablePrefix + 
"_testMultiTableMoveB");
+final byte[] familyNameBytes = Bytes.toBytes("f");
+String newGroupName = getGroupName("testMultiTableMove");
+final RSGroupInfo newGroup = addGroup(rsGroupAdmin, newGroupName, 1);
+
+TEST_UTIL.createTable(tableNameA, familyNameBytes);
+TEST_UTIL.createTable(tableNameB, familyNameBytes);
+TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() {
+  @Override
+  public boolean evaluate() throws Exception {
+List regionsA = getTableRegionMap().get(tableNameA);
+if (regionsA == null)
+  return false;
+List regionsB = getTableRegionMap().get(tableNameB);
+if (regionsB == null)
+  return false;
+
+return getTableRegionMap().get(tableNameA).size() >= 1
+&& getTableRegionMap().get(tableNameB).size() >= 1;
+  }
+});
+
+RSGroupInfo tableGrpA = rsGroupAdmin.getRSGroupInfoOfTable(tableNameA);
+assertTrue(tableGrpA.getName().equals(RSGroupInfo.DEFAULT_GROUP));
+
+RSGroupInfo tableGrpB = rsGroupAdmin.getRSGroupInfoOfTable(tableNameB);
+assertTrue(tableGrpB.getName().equals(RSGroupInfo.DEFAULT_GROUP));
+//change table's group
+LOG.info("Moving table [" + tableNameA + "," + tableNameB + "] to " + 
newGroup.getName());
+rsGroupAdmin.moveTables(Sets.newHashSet(tableNameA, tableNameB), 
newGroup.getName());
+
+//verify group change
+Assert.assertEquals(newGroup.getName(),
+rsGroupAdmin.getRSGroupInfoOfTable(tableNameA).getName());
+
+Assert.assertEquals(newGroup.getName(),
+rsGroupAdmin.getRSGroupInfoOfTable(tableNameB).getName());
+
+//verify tables' not exist in old group
+Set DefaultTables = 
rsGroupAdmin.getRSGroupInfo(RSGroupInfo.DEFAULT_GROUP).getTables();
+

[07/22] hbase git commit: HBASE-15631 Backport Regionserver Groups (HBASE-6721) to branch-1

2017-07-19 Thread apurtell
http://git-wip-us.apache.org/repos/asf/hbase/blob/74010ec9/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RSGroupProtos.java
--
diff --git 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RSGroupProtos.java
 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RSGroupProtos.java
new file mode 100644
index 000..979f762
--- /dev/null
+++ 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RSGroupProtos.java
@@ -0,0 +1,1331 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: RSGroup.proto
+
+package org.apache.hadoop.hbase.protobuf.generated;
+
+public final class RSGroupProtos {
+  private RSGroupProtos() {}
+  public static void registerAllExtensions(
+  com.google.protobuf.ExtensionRegistry registry) {
+  }
+  public interface RSGroupInfoOrBuilder
+  extends com.google.protobuf.MessageOrBuilder {
+
+// required string name = 1;
+/**
+ * required string name = 1;
+ */
+boolean hasName();
+/**
+ * required string name = 1;
+ */
+java.lang.String getName();
+/**
+ * required string name = 1;
+ */
+com.google.protobuf.ByteString
+getNameBytes();
+
+// repeated .hbase.pb.ServerName servers = 4;
+/**
+ * repeated .hbase.pb.ServerName servers = 4;
+ */
+
java.util.List
 
+getServersList();
+/**
+ * repeated .hbase.pb.ServerName servers = 4;
+ */
+org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName 
getServers(int index);
+/**
+ * repeated .hbase.pb.ServerName servers = 4;
+ */
+int getServersCount();
+/**
+ * repeated .hbase.pb.ServerName servers = 4;
+ */
+java.util.List 
+getServersOrBuilderList();
+/**
+ * repeated .hbase.pb.ServerName servers = 4;
+ */
+org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder 
getServersOrBuilder(
+int index);
+
+// repeated .hbase.pb.TableName tables = 3;
+/**
+ * repeated .hbase.pb.TableName tables = 3;
+ */
+
java.util.List
 
+getTablesList();
+/**
+ * repeated .hbase.pb.TableName tables = 3;
+ */
+org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName 
getTables(int index);
+/**
+ * repeated .hbase.pb.TableName tables = 3;
+ */
+int getTablesCount();
+/**
+ * repeated .hbase.pb.TableName tables = 3;
+ */
+java.util.List 
+getTablesOrBuilderList();
+/**
+ * repeated .hbase.pb.TableName tables = 3;
+ */
+org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder 
getTablesOrBuilder(
+int index);
+  }
+  /**
+   * Protobuf type {@code hbase.pb.RSGroupInfo}
+   */
+  public static final class RSGroupInfo extends
+  com.google.protobuf.GeneratedMessage
+  implements RSGroupInfoOrBuilder {
+// Use RSGroupInfo.newBuilder() to construct.
+private RSGroupInfo(com.google.protobuf.GeneratedMessage.Builder 
builder) {
+  super(builder);
+  this.unknownFields = builder.getUnknownFields();
+}
+private RSGroupInfo(boolean noInit) { this.unknownFields = 
com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+private static final RSGroupInfo defaultInstance;
+public static RSGroupInfo getDefaultInstance() {
+  return defaultInstance;
+}
+
+public RSGroupInfo getDefaultInstanceForType() {
+  return defaultInstance;
+}
+
+private final com.google.protobuf.UnknownFieldSet unknownFields;
+@java.lang.Override
+public final com.google.protobuf.UnknownFieldSet
+getUnknownFields() {
+  return this.unknownFields;
+}
+private RSGroupInfo(
+com.google.protobuf.CodedInputStream input,
+com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+throws com.google.protobuf.InvalidProtocolBufferException {
+  initFields();
+  int mutable_bitField0_ = 0;
+  com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+  com.google.protobuf.UnknownFieldSet.newBuilder();
+  try {
+boolean done = false;
+while (!done) {
+  int tag = input.readTag();
+  switch (tag) {
+case 0:
+  done = true;
+  break;
+default: {
+  if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+done = true;
+  }
+  break;
+}
+case 10: {
+  bitField0_ |= 0x0001;
+  name_ = input.readBytes();
+  break;
+}
+case 26: {
+  if (!((mutable_bitField0_ & 0x0004) == 0x0004)) {
+tables_ = new 
java.util.ArrayList();
+mutable_bitField0_ |= 0x0004;

[21/22] hbase git commit: HBASE-17758 [RSGROUP] Add shell command to move servers and tables at the same time (Guangxu Cheng)

2017-07-19 Thread apurtell
HBASE-17758 [RSGROUP] Add shell command to move servers and tables at the same 
time (Guangxu Cheng)

HBASE-17806 TestRSGroups#testMoveServersAndTables is flaky in master branch


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a23322ea
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a23322ea
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a23322ea

Branch: refs/heads/HBASE-15631-branch-1
Commit: a23322eae2adf436696d9c3e41a325b2b4f0e50e
Parents: 1259fc8
Author: Andrew Purtell 
Authored: Wed Jul 5 18:29:14 2017 -0700
Committer: Andrew Purtell 
Committed: Wed Jul 19 16:21:10 2017 -0700

--
 .../hadoop/hbase/rsgroup/RSGroupInfo.java   |3 +
 .../protobuf/generated/RSGroupAdminProtos.java  | 1759 +-
 .../src/main/protobuf/RSGroupAdmin.proto|   12 +
 .../hadoop/hbase/rsgroup/RSGroupAdmin.java  |   11 +
 .../hbase/rsgroup/RSGroupAdminClient.java   |   22 +
 .../hbase/rsgroup/RSGroupAdminEndpoint.java |   32 +
 .../hbase/rsgroup/RSGroupAdminServer.java   |   13 +
 .../hbase/rsgroup/RSGroupInfoManager.java   |   10 +
 .../hbase/rsgroup/RSGroupInfoManagerImpl.java   |   24 +
 .../hadoop/hbase/rsgroup/TestRSGroupsBase.java  |  111 ++
 .../rsgroup/VerifyingRSGroupAdminClient.java|7 +
 .../BaseMasterAndRegionObserver.java|   10 +
 .../hbase/coprocessor/BaseMasterObserver.java   |   10 +
 .../hbase/coprocessor/MasterObserver.java   |   20 +-
 .../hbase/master/MasterCoprocessorHost.java |   26 +
 .../hbase/security/access/AccessController.java |6 +
 .../hbase/coprocessor/TestMasterObserver.java   |   10 +
 .../src/main/ruby/hbase/rsgroup_admin.rb|   14 +
 hbase-shell/src/main/ruby/shell.rb  |1 +
 .../commands/move_servers_tables_rsgroup.rb |   37 +
 20 files changed, 2115 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a23322ea/hbase-common/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java
--
diff --git 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java
index 7297ff2..74572ac 100644
--- 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java
+++ 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java
@@ -150,6 +150,9 @@ public class RSGroupInfo {
 sb.append(", ");
 sb.append(" Servers:");
 sb.append(this.servers);
+sb.append(", ");
+sb.append(" Tables:");
+sb.append(this.tables);
 return sb.toString();
 
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a23322ea/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RSGroupAdminProtos.java
--
diff --git 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RSGroupAdminProtos.java
 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RSGroupAdminProtos.java
index 3d1f4bd..ca1db1e 100644
--- 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RSGroupAdminProtos.java
+++ 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RSGroupAdminProtos.java
@@ -10754,6 +10754,1621 @@ public final class RSGroupAdminProtos {
 // 
@@protoc_insertion_point(class_scope:hbase.pb.GetRSGroupInfoOfServerResponse)
   }
 
+  public interface MoveServersAndTablesRequestOrBuilder
+  extends com.google.protobuf.MessageOrBuilder {
+
+// required string target_group = 1;
+/**
+ * required string target_group = 1;
+ */
+boolean hasTargetGroup();
+/**
+ * required string target_group = 1;
+ */
+java.lang.String getTargetGroup();
+/**
+ * required string target_group = 1;
+ */
+com.google.protobuf.ByteString
+getTargetGroupBytes();
+
+// repeated .hbase.pb.ServerName servers = 2;
+/**
+ * repeated .hbase.pb.ServerName servers = 2;
+ */
+
java.util.List
 
+getServersList();
+/**
+ * repeated .hbase.pb.ServerName servers = 2;
+ */
+org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName 
getServers(int index);
+/**
+ * repeated .hbase.pb.ServerName servers = 2;
+ */
+int getServersCount();
+/**
+ * repeated .hbase.pb.ServerName servers = 2;
+ */
+java.util.List 
+getServersOrBuilderList();
+/**
+ * repeated .hbase.pb.ServerName servers = 2;
+ */
+org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder 
getServersOrBuilder(
+int index);
+
+// repeated 

[19/22] hbase git commit: HBASE-17350 Fixup of regionserver group-based assignment

2017-07-19 Thread apurtell
HBASE-17350 Fixup of regionserver group-based assignment


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/200afdb7
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/200afdb7
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/200afdb7

Branch: refs/heads/HBASE-15631-branch-1
Commit: 200afdb78fcd9d11a368c83ef8ba05225ad1919b
Parents: ff737d6
Author: Andrew Purtell 
Authored: Wed Jul 5 18:09:48 2017 -0700
Committer: Andrew Purtell 
Committed: Wed Jul 19 16:21:10 2017 -0700

--
 .../hadoop/hbase/rsgroup/RSGroupInfo.java   |  19 +--
 .../apache/hadoop/hbase/util/Addressing.java|  22 +++
 .../hadoop/hbase/util/TestAddressing.java   |  39 +
 .../hbase/rsgroup/RSGroupAdminServer.java   | 159 ++-
 .../hbase/rsgroup/RSGroupInfoManager.java   |   4 +-
 .../hbase/rsgroup/RSGroupInfoManagerImpl.java   |  42 +++--
 .../apache/hadoop/hbase/rsgroup/Utility.java|  48 ++
 .../hadoop/hbase/rsgroup/TestRSGroupsBase.java  |   2 +-
 .../hadoop/hbase/master/MasterRpcServices.java  |   3 +-
 .../hadoop/hbase/master/RegionStates.java   |   2 +-
 .../hadoop/hbase/master/ServerManager.java  |   1 -
 hbase-shell/src/main/ruby/shell.rb  |   7 +-
 hbase-shell/src/main/ruby/shell/commands.rb |   1 -
 .../src/main/ruby/shell/commands/add_rsgroup.rb |   3 +-
 .../main/ruby/shell/commands/balance_rsgroup.rb |   5 +-
 .../src/main/ruby/shell/commands/get_rsgroup.rb |   5 +-
 .../ruby/shell/commands/get_server_rsgroup.rb   |   5 +-
 .../ruby/shell/commands/get_table_rsgroup.rb|   5 +-
 .../main/ruby/shell/commands/list_procedures.rb |   2 +-
 .../main/ruby/shell/commands/list_rsgroups.rb   |   3 +-
 .../ruby/shell/commands/move_rsgroup_servers.rb |  37 -
 .../ruby/shell/commands/move_rsgroup_tables.rb  |  37 -
 .../ruby/shell/commands/move_servers_rsgroup.rb |  40 +
 .../ruby/shell/commands/move_tables_rsgroup.rb  |  40 +
 .../main/ruby/shell/commands/remove_rsgroup.rb  |   3 +-
 .../src/test/ruby/shell/rsgroup_shell_test.rb   |   4 +-
 26 files changed, 341 insertions(+), 197 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/200afdb7/hbase-common/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java
--
diff --git 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java
index 0fb02d8..7297ff2 100644
--- 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java
+++ 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java
@@ -20,16 +20,19 @@
 
 package org.apache.hadoop.hbase.rsgroup;
 
-import com.google.common.collect.Sets;
-import com.google.common.net.HostAndPort;
-
 import java.util.Collection;
 import java.util.NavigableSet;
 import java.util.Set;
+import java.util.SortedSet;
+import java.util.TreeSet;
 
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.util.Addressing;
+
+import com.google.common.collect.Sets;
+import com.google.common.net.HostAndPort;
 
 /**
  * Stores the group information of region server groups.
@@ -53,14 +56,13 @@ public class RSGroupInfo {
   Set servers,
   NavigableSet tables) {
 this.name = name;
-this.servers = servers;
-this.tables = tables;
+this.servers = new TreeSet<>(new Addressing.HostAndPortComparable());
+this.servers.addAll(servers);
+this.tables = new TreeSet<>(tables);
   }
 
   public RSGroupInfo(RSGroupInfo src) {
-name = src.getName();
-servers = Sets.newHashSet(src.getServers());
-tables = Sets.newTreeSet(src.getTables());
+this(src.getName(), src.servers, src.tables);
   }
 
   /**
@@ -183,5 +185,4 @@ public class RSGroupInfo {
 result = 31 * result + name.hashCode();
 return result;
   }
-
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/200afdb7/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Addressing.java
--
diff --git 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Addressing.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Addressing.java
index 31fb1f5..71f6127 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Addressing.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Addressing.java
@@ -24,10 +24,13 @@ import java.net.InetAddress;
 import java.net.InetSocketAddress;
 import java.net.NetworkInterface;
 import java.net.SocketException;

[02/22] hbase git commit: HBASE-17785 RSGroupBasedLoadBalancer fails to assign new table regions when cloning snapshot

2017-07-19 Thread apurtell
HBASE-17785 RSGroupBasedLoadBalancer fails to assign new table regions when 
cloning snapshot


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/dfcdc651
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/dfcdc651
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/dfcdc651

Branch: refs/heads/HBASE-15631-branch-1
Commit: dfcdc65167cfcba58076edac97a90395eb0d8790
Parents: 74010ec
Author: Andrew Purtell 
Authored: Wed Apr 5 16:25:56 2017 -0700
Committer: Andrew Purtell 
Committed: Wed Jul 19 16:21:09 2017 -0700

--
 .../hbase/rsgroup/RSGroupAdminEndpoint.java | 28 +---
 .../hadoop/hbase/rsgroup/TestRSGroups.java  | 22 +++
 2 files changed, 47 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/dfcdc651/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
--
diff --git 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
index 00cd6b0..8fa9fdc 100644
--- 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
+++ 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
@@ -77,7 +77,6 @@ import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGro
 import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupResponse;
 import 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
 
-
 public class RSGroupAdminEndpoint extends RSGroupAdminService
 implements CoprocessorService, Coprocessor, MasterObserver {
 
@@ -274,12 +273,36 @@ public class RSGroupAdminEndpoint extends 
RSGroupAdminService
 done.run(builder.build());
   }
 
+  void assignTableToGroup(HTableDescriptor desc) throws IOException {
+String groupName =
+
master.getNamespaceDescriptor(desc.getTableName().getNamespaceAsString())
+.getConfigurationValue(RSGroupInfo.NAMESPACEDESC_PROP_GROUP);
+if (groupName == null) {
+  groupName = RSGroupInfo.DEFAULT_GROUP;
+}
+RSGroupInfo rsGroupInfo = groupAdminServer.getRSGroupInfo(groupName);
+if (rsGroupInfo == null) {
+  throw new ConstraintException("Default RSGroup (" + groupName + ") for 
this table's "
+  + "namespace does not exist.");
+}
+if (!rsGroupInfo.containsTable(desc.getTableName())) {
+  groupAdminServer.moveTables(Sets.newHashSet(desc.getTableName()), 
groupName);
+}
+  }
+
+  /
+  // MasterObserver overrides
+  /
+
+  // Assign table to default RSGroup.
   @Override
   public void preCreateTable(ObserverContext ctx,
   HTableDescriptor desc, HRegionInfo[] regions) throws IOException {
 groupAdminServer.prepareRSGroupForTable(desc);
+assignTableToGroup(desc);
   }
 
+  // Remove table from its RSGroup.
   @Override
   public void postDeleteTable(ObserverContext 
ctx,
   TableName tableName) throws IOException {
@@ -663,7 +686,7 @@ public class RSGroupAdminEndpoint extends 
RSGroupAdminService
   public void preCloneSnapshot(ObserverContext 
ctx,
SnapshotDescription snapshot,
HTableDescriptor hTableDescriptor) throws 
IOException {
-
+assignTableToGroup(hTableDescriptor);
   }
 
   @Override
@@ -951,5 +974,4 @@ public class RSGroupAdminEndpoint extends 
RSGroupAdminService
 
   }
 
-
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/dfcdc651/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroups.java
--
diff --git 
a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroups.java 
b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroups.java
index 34add63..e5a1f6a 100644
--- 
a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroups.java
+++ 
b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroups.java
@@ -39,6 +39,7 @@ import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
 import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.master.ServerManager;
+import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos;
 import 

[09/22] hbase git commit: HBASE-15631 Backport Regionserver Groups (HBASE-6721) to branch-1

2017-07-19 Thread apurtell
HBASE-15631 Backport Regionserver Groups (HBASE-6721) to branch-1

  Applied 
https://issues.apache.org/jira/secure/attachment/12799888/HBASE-15631.02.branch-1.patch
  Amending-Author: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/74010ec9
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/74010ec9
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/74010ec9

Branch: refs/heads/HBASE-15631-branch-1
Commit: 74010ec99111baed1b8cd3dec3688f54b35c6ad0
Parents: cfd5b6b
Author: Andrew Purtell 
Authored: Wed Jul 5 13:39:35 2017 -0700
Committer: Andrew Purtell 
Committed: Wed Jul 19 16:21:09 2017 -0700

--
 .../org/apache/hadoop/hbase/ServerName.java |19 +-
 .../hadoop/hbase/protobuf/ProtobufUtil.java |36 +-
 .../hadoop/hbase/rsgroup/RSGroupInfo.java   |   187 +
 hbase-it/pom.xml|10 +
 .../hbase/rsgroup/IntegrationTestRSGroup.java   |99 +
 hbase-protocol/pom.xml  | 2 +
 .../hbase/protobuf/generated/ClientProtos.java  | 2 +-
 .../hbase/protobuf/generated/MasterProtos.java  |30 +-
 .../protobuf/generated/RSGroupAdminProtos.java  | 11855 +
 .../hbase/protobuf/generated/RSGroupProtos.java |  1331 ++
 .../protobuf/generated/SnapshotProtos.java  |24 +-
 hbase-protocol/src/main/protobuf/RSGroup.proto  |34 +
 .../src/main/protobuf/RSGroupAdmin.proto|   136 +
 hbase-rsgroup/pom.xml   |   278 +
 .../hadoop/hbase/rsgroup/RSGroupAdmin.java  |   121 +
 .../hbase/rsgroup/RSGroupAdminClient.java   |   204 +
 .../hbase/rsgroup/RSGroupAdminEndpoint.java |   955 ++
 .../hbase/rsgroup/RSGroupAdminServer.java   |   503 +
 .../hbase/rsgroup/RSGroupBasedLoadBalancer.java |   428 +
 .../hbase/rsgroup/RSGroupInfoManager.java   |   132 +
 .../hbase/rsgroup/RSGroupInfoManagerImpl.java   |   758 ++
 .../hadoop/hbase/rsgroup/RSGroupSerDe.java  |88 +
 .../hbase/rsgroup/RSGroupableBalancer.java  |29 +
 .../balancer/TestRSGroupBasedLoadBalancer.java  |   574 +
 .../hadoop/hbase/rsgroup/TestRSGroups.java  |   287 +
 .../hadoop/hbase/rsgroup/TestRSGroupsBase.java  |   643 +
 .../hbase/rsgroup/TestRSGroupsOfflineMode.java  |   187 +
 .../rsgroup/VerifyingRSGroupAdminClient.java|   149 +
 .../hbase/tmpl/master/MasterStatusTmpl.jamon| 2 +
 .../apache/hadoop/hbase/LocalHBaseCluster.java  | 3 +
 .../BaseMasterAndRegionObserver.java|53 +
 .../hbase/coprocessor/BaseMasterObserver.java   |54 +
 .../hbase/coprocessor/MasterObserver.java   |98 +
 .../hadoop/hbase/master/AssignmentManager.java  |16 +-
 .../org/apache/hadoop/hbase/master/HMaster.java |40 +-
 .../hadoop/hbase/master/LoadBalancer.java   | 3 +
 .../hbase/master/MasterCoprocessorHost.java |   137 +
 .../hadoop/hbase/master/MasterRpcServices.java  | 8 +
 .../hadoop/hbase/master/MasterServices.java | 5 +
 .../hbase/security/access/AccessController.java |32 +
 .../hbase/coprocessor/TestMasterObserver.java   |52 +
 .../hbase/master/MockNoopMasterServices.java| 5 +
 .../master/TestAssignmentManagerOnCluster.java  |   127 +-
 .../hadoop/hbase/master/TestCatalogJanitor.java | 3 +
 .../hbase/master/TestMasterStatusServlet.java   |12 +-
 .../normalizer/TestSimpleRegionNormalizer.java  | 2 +-
 .../security/access/TestAccessController.java   |75 +
 hbase-shell/pom.xml |35 +
 hbase-shell/src/main/ruby/hbase.rb  | 1 +
 hbase-shell/src/main/ruby/hbase/hbase.rb| 4 +
 .../src/main/ruby/hbase/rsgroup_admin.rb|   150 +
 hbase-shell/src/main/ruby/shell.rb  |21 +
 hbase-shell/src/main/ruby/shell/commands.rb | 4 +
 .../src/main/ruby/shell/commands/add_rsgroup.rb |39 +
 .../main/ruby/shell/commands/balance_rsgroup.rb |37 +
 .../src/main/ruby/shell/commands/get_rsgroup.rb |44 +
 .../ruby/shell/commands/get_server_rsgroup.rb   |40 +
 .../ruby/shell/commands/get_table_rsgroup.rb|41 +
 .../main/ruby/shell/commands/list_rsgroups.rb   |50 +
 .../ruby/shell/commands/move_rsgroup_servers.rb |37 +
 .../ruby/shell/commands/move_rsgroup_tables.rb  |37 +
 .../main/ruby/shell/commands/remove_rsgroup.rb  |37 +
 .../apache/hadoop/hbase/client/TestShell.java   | 2 +-
 .../hbase/client/rsgroup/TestShellRSGroups.java |   111 +
 .../src/test/ruby/shell/rsgroup_shell_test.rb   |96 +
 hbase-shell/src/test/ruby/test_helper.rb| 4 +
 pom.xml |23 +
 67 files changed, 20585 insertions(+), 56 deletions(-)
--



[22/22] hbase git commit: HBASE-17496 RSGroup shell commands:get_server_rsgroup don't work and commands display an incorrect result size (Guangxu Cheng)

2017-07-19 Thread apurtell
HBASE-17496 RSGroup shell commands:get_server_rsgroup don't work and commands 
display an incorrect result size (Guangxu Cheng)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5cc50d45
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5cc50d45
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5cc50d45

Branch: refs/heads/HBASE-15631-branch-1
Commit: 5cc50d4511dd2ad34808da01299f894a8f8a4ebf
Parents: 200afdb
Author: Andrew Purtell 
Authored: Wed Jul 5 18:23:19 2017 -0700
Committer: Andrew Purtell 
Committed: Wed Jul 19 16:21:10 2017 -0700

--
 hbase-shell/src/main/ruby/shell/commands/get_rsgroup.rb| 5 ++---
 hbase-shell/src/main/ruby/shell/commands/get_server_rsgroup.rb | 5 ++---
 hbase-shell/src/main/ruby/shell/commands/get_table_rsgroup.rb  | 3 +--
 hbase-shell/src/main/ruby/shell/commands/list_rsgroups.rb  | 3 +--
 4 files changed, 6 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5cc50d45/hbase-shell/src/main/ruby/shell/commands/get_rsgroup.rb
--
diff --git a/hbase-shell/src/main/ruby/shell/commands/get_rsgroup.rb 
b/hbase-shell/src/main/ruby/shell/commands/get_rsgroup.rb
index 122020a..a5b41af 100644
--- a/hbase-shell/src/main/ruby/shell/commands/get_rsgroup.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/get_rsgroup.rb
@@ -33,12 +33,11 @@ EOF
   end
 
   def command(group_name)
-now = Time.now
-formatter.header(['RSGROUP '.concat(group_name)])
+formatter.header(['GROUP INFORMATION'])
 rsgroup_admin.get_rsgroup(group_name) do |s|
   formatter.row([s])
 end
-formatter.footer(now)
+formatter.footer()
   end
 end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/5cc50d45/hbase-shell/src/main/ruby/shell/commands/get_server_rsgroup.rb
--
diff --git a/hbase-shell/src/main/ruby/shell/commands/get_server_rsgroup.rb 
b/hbase-shell/src/main/ruby/shell/commands/get_server_rsgroup.rb
index dddf080..fd2ccc7 100644
--- a/hbase-shell/src/main/ruby/shell/commands/get_server_rsgroup.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/get_server_rsgroup.rb
@@ -33,10 +33,9 @@ EOF
   end
 
   def command(server)
-now = Time.now
-group_name = rsgroup_admin.getRSGroupOfServer(server).getName
+group_name = rsgroup_admin.get_rsgroup_of_server(server).getName
 formatter.row([group_name])
-formatter.footer(now, 1)
+formatter.footer(1)
   end
 end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/5cc50d45/hbase-shell/src/main/ruby/shell/commands/get_table_rsgroup.rb
--
diff --git a/hbase-shell/src/main/ruby/shell/commands/get_table_rsgroup.rb 
b/hbase-shell/src/main/ruby/shell/commands/get_table_rsgroup.rb
index 6939c12..9684687 100644
--- a/hbase-shell/src/main/ruby/shell/commands/get_table_rsgroup.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/get_table_rsgroup.rb
@@ -33,11 +33,10 @@ EOF
   end
 
   def command(table)
-now = Time.now
 group_name =
 rsgroup_admin.get_rsgroup_of_table(table).getName
 formatter.row([group_name])
-formatter.footer(now, 1)
+formatter.footer(1)
   end
 end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/5cc50d45/hbase-shell/src/main/ruby/shell/commands/list_rsgroups.rb
--
diff --git a/hbase-shell/src/main/ruby/shell/commands/list_rsgroups.rb 
b/hbase-shell/src/main/ruby/shell/commands/list_rsgroups.rb
index 5ab923a..393797d 100644
--- a/hbase-shell/src/main/ruby/shell/commands/list_rsgroups.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/list_rsgroups.rb
@@ -35,7 +35,6 @@ EOF
   end
 
   def command(regex = '.*')
-now = Time.now
 formatter.header(['GROUPS'])
 
 regex = /#{regex}/ unless regex.is_a?(Regexp)
@@ -44,7 +43,7 @@ EOF
   formatter.row([group])
 end
 
-formatter.footer(now, list.size)
+formatter.footer(list.size)
   end
 end
   end



[15/22] hbase git commit: HBASE-15858 Some region server group shell commands don't work

2017-07-19 Thread apurtell
HBASE-15858 Some region server group shell commands don't work


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2d09a560
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2d09a560
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2d09a560

Branch: refs/heads/HBASE-15631-branch-1
Commit: 2d09a56011222ab2c40e063afd41e4fc479a790e
Parents: 9629f31
Author: Andrew Purtell 
Authored: Wed Jul 5 15:37:47 2017 -0700
Committer: Andrew Purtell 
Committed: Wed Jul 19 16:21:10 2017 -0700

--
 .../src/main/ruby/shell/commands/get_server_rsgroup.rb   |  2 +-
 hbase-shell/src/test/ruby/shell/rsgroup_shell_test.rb| 11 +++
 2 files changed, 8 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/2d09a560/hbase-shell/src/main/ruby/shell/commands/get_server_rsgroup.rb
--
diff --git a/hbase-shell/src/main/ruby/shell/commands/get_server_rsgroup.rb 
b/hbase-shell/src/main/ruby/shell/commands/get_server_rsgroup.rb
index 322f6bb..a689a7c 100644
--- a/hbase-shell/src/main/ruby/shell/commands/get_server_rsgroup.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/get_server_rsgroup.rb
@@ -31,7 +31,7 @@ EOF
 
   def command(server)
 now = Time.now
-group_name = rsgroup_admin.getGroupOfServer(server).getName
+group_name = rsgroup_admin.getRSGroupOfServer(server).getName
 formatter.row([group_name])
 formatter.footer(now, 1)
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/2d09a560/hbase-shell/src/test/ruby/shell/rsgroup_shell_test.rb
--
diff --git a/hbase-shell/src/test/ruby/shell/rsgroup_shell_test.rb 
b/hbase-shell/src/test/ruby/shell/rsgroup_shell_test.rb
index d892775..1040ed8 100644
--- a/hbase-shell/src/test/ruby/shell/rsgroup_shell_test.rb
+++ b/hbase-shell/src/test/ruby/shell/rsgroup_shell_test.rb
@@ -49,12 +49,15 @@ module Hbase
   assert_not_nil(group)
   assert_equal(0, group.getServers.count)
 
-  hostport =
-  
@rsgroup_admin.getRSGroupInfo('default').getServers.iterator.next.toString
+  hostport = 
@rsgroup_admin.getRSGroupInfo('default').getServers.iterator.next
+  @shell.command('get_rsgroup', 'default')
+  hostPortStr = hostport.toString
+  @shell.command('get_server_rsgroup', [hostPortStr])
   @shell.command('move_rsgroup_servers',
  group_name,
- [hostport])
+ [hostPortStr])
   assert_equal(1, 
@rsgroup_admin.getRSGroupInfo(group_name).getServers.count)
+  assert_equal(group_name, 
@rsgroup_admin.getRSGroupOfServer(hostport).getName)
 
   @shell.command('move_rsgroup_tables',
  group_name,
@@ -65,7 +68,7 @@ module Hbase
   @hbase.rsgroup_admin(@formatter).get_rsgroup(group_name) do |line|
 case count
 when 1
-  assert_equal(hostport, line)
+  assert_equal(hostPortStr, line)
 when 3
   assert_equal(table_name, line)
 end



[10/22] hbase git commit: HBASE-16456 Fix findbugs warnings in hbase-rsgroup module (Guangxu Cheng)

2017-07-19 Thread apurtell
HBASE-16456 Fix findbugs warnings in hbase-rsgroup module (Guangxu Cheng)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d4f6d7f0
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d4f6d7f0
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d4f6d7f0

Branch: refs/heads/HBASE-15631-branch-1
Commit: d4f6d7f098a01bcdcd7b3a4bff228f6d2a44a4d1
Parents: 8c69283
Author: Andrew Purtell 
Authored: Wed Jul 5 17:19:43 2017 -0700
Committer: Andrew Purtell 
Committed: Wed Jul 19 16:21:10 2017 -0700

--
 .../hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java  | 16 +++-
 .../hadoop/hbase/rsgroup/RSGroupAdminServer.java|  2 +-
 .../hbase/rsgroup/RSGroupBasedLoadBalancer.java | 16 +---
 .../hbase/rsgroup/RSGroupInfoManagerImpl.java   | 14 --
 4 files changed, 37 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d4f6d7f0/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
--
diff --git 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
index 8fa9fdc..e71470e 100644
--- 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
+++ 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
@@ -89,7 +89,7 @@ public class RSGroupAdminEndpoint extends RSGroupAdminService
   public void start(CoprocessorEnvironment env) throws IOException {
 MasterCoprocessorEnvironment menv = (MasterCoprocessorEnvironment)env;
 master = menv.getMasterServices();
-groupInfoManager = new RSGroupInfoManagerImpl(master);
+setGroupInfoManager(new RSGroupInfoManagerImpl(master));
 groupAdminServer = new RSGroupAdminServer(master, groupInfoManager);
 Class clazz =
 
master.getConfiguration().getClass(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, 
null);
@@ -107,6 +107,20 @@ public class RSGroupAdminEndpoint extends 
RSGroupAdminService
 return this;
   }
 
+  private static void setStaticGroupInfoManager(RSGroupInfoManagerImpl 
groupInfoManager) {
+RSGroupAdminEndpoint.groupInfoManager = groupInfoManager;
+  }
+
+  private void setGroupInfoManager(RSGroupInfoManagerImpl groupInfoManager) 
throws IOException {
+if (groupInfoManager == null) {
+  groupInfoManager = new RSGroupInfoManagerImpl(master);
+  groupInfoManager.init();
+} else if (!groupInfoManager.isInit()) {
+  groupInfoManager.init();
+}
+setStaticGroupInfoManager(groupInfoManager);
+  }
+
   public RSGroupInfoManager getGroupInfoManager() {
 return groupInfoManager;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/d4f6d7f0/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
--
diff --git 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
index 43ac3ad..e76e3e7 100644
--- 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
+++ 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
@@ -218,7 +218,7 @@ public class RSGroupAdminServer extends RSGroupAdmin {
 }
   }
   try {
-Thread.sleep(1000);
+manager.wait(1000);
   } catch (InterruptedException e) {
 LOG.warn("Sleep interrupted", e);
 Thread.currentThread().interrupt();

http://git-wip-us.apache.org/repos/asf/hbase/blob/d4f6d7f0/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
--
diff --git 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
index c1b3c7d..519177c 100644
--- 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
+++ 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
@@ -322,18 +322,19 @@ public class RSGroupBasedLoadBalancer implements 
RSGroupableBalancer, LoadBalanc
   private Set getMisplacedRegions(
   Map regions) throws IOException {
 Set misplacedRegions = new HashSet();
-for (HRegionInfo region : regions.keySet()) {
-  ServerName assignedServer = regions.get(region);
+

[01/22] hbase git commit: HBASE-18330 NPE in ReplicationZKLockCleanerChore [Forced Update!]

2017-07-19 Thread apurtell
Repository: hbase
Updated Branches:
  refs/heads/HBASE-15631-branch-1 a98bc190d -> d0c72847a (forced update)


HBASE-18330 NPE in ReplicationZKLockCleanerChore


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/cfd5b6b5
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/cfd5b6b5
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/cfd5b6b5

Branch: refs/heads/HBASE-15631-branch-1
Commit: cfd5b6b59f00eb3cbcb07a2b32fac019436c479f
Parents: 2da5b43
Author: Andrew Purtell 
Authored: Wed Jul 19 15:46:45 2017 -0700
Committer: Andrew Purtell 
Committed: Wed Jul 19 15:46:45 2017 -0700

--
 .../apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java  | 3 +++
 .../hbase/master/cleaner/ReplicationZKLockCleanerChore.java  | 4 +++-
 .../hadoop/hbase/master/cleaner/ReplicationZKNodeCleaner.java| 3 +++
 .../hadoop/hbase/replication/master/ReplicationLogCleaner.java   | 2 +-
 .../hbase/replication/regionserver/DumpReplicationQueues.java| 3 +++
 .../hbase/replication/regionserver/ReplicationSourceManager.java | 2 +-
 6 files changed, 14 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/cfd5b6b5/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
index b242ca7..c2999ec 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
@@ -528,6 +528,9 @@ public class ReplicationPeersZKImpl extends 
ReplicationStateZKBase implements Re
 if (queuesClient == null) return;
 try {
   List replicators = queuesClient.getListOfReplicators();
+  if (replicators == null || replicators.isEmpty()) {
+return;
+  }
   for (String replicator : replicators) {
 List queueIds = queuesClient.getAllQueues(replicator);
 for (String queueId : queueIds) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/cfd5b6b5/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKLockCleanerChore.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKLockCleanerChore.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKLockCleanerChore.java
index dc5338e..3fa30bf 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKLockCleanerChore.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKLockCleanerChore.java
@@ -76,7 +76,9 @@ public class ReplicationZKLockCleanerChore extends 
ScheduledChore {
   }
   Set rsSet = new HashSet(regionServers);
   List replicators = queues.getListOfReplicators();
-
+  if (replicators == null || replicators.isEmpty()) {
+return;
+  }
   for (String replicator: replicators) {
 try {
   String lockNode = queues.getLockZNode(replicator);

http://git-wip-us.apache.org/repos/asf/hbase/blob/cfd5b6b5/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKNodeCleaner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKNodeCleaner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKNodeCleaner.java
index 8311b8d..f559510 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKNodeCleaner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKNodeCleaner.java
@@ -80,6 +80,9 @@ public class ReplicationZKNodeCleaner {
 Set peerIds = new HashSet<>(this.replicationPeers.getAllPeerIds());
 try {
   List replicators = this.queuesClient.getListOfReplicators();
+  if (replicators == null || replicators.isEmpty()) {
+return undeletedQueues;
+  }
   for (String replicator : replicators) {
 List queueIds = this.queuesClient.getAllQueues(replicator);
 for (String queueId : queueIds) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/cfd5b6b5/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java
--
diff --git 

[06/22] hbase git commit: HBASE-15631 Backport Regionserver Groups (HBASE-6721) to branch-1

2017-07-19 Thread apurtell
http://git-wip-us.apache.org/repos/asf/hbase/blob/74010ec9/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
--
diff --git 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
new file mode 100644
index 000..00cd6b0
--- /dev/null
+++ 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
@@ -0,0 +1,955 @@
+/**
+ * Copyright The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rsgroup;
+
+import com.google.common.collect.Sets;
+import com.google.common.net.HostAndPort;
+import com.google.protobuf.RpcCallback;
+import com.google.protobuf.RpcController;
+import com.google.protobuf.Service;
+
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.hadoop.hbase.Coprocessor;
+import org.apache.hadoop.hbase.CoprocessorEnvironment;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.hadoop.hbase.ProcedureInfo;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin.MasterSwitchType;
+import org.apache.hadoop.hbase.constraint.ConstraintException;
+import org.apache.hadoop.hbase.coprocessor.CoprocessorService;
+import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
+import org.apache.hadoop.hbase.coprocessor.MasterObserver;
+import org.apache.hadoop.hbase.coprocessor.ObserverContext;
+import org.apache.hadoop.hbase.master.MasterServices;
+import org.apache.hadoop.hbase.master.RegionPlan;
+import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.ResponseConverter;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
+import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas;
+import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos;
+import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupRequest;
+import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupResponse;
+import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupRequest;
+import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupResponse;
+import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerRequest;
+import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerResponse;
+import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableRequest;
+import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableResponse;
+import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoRequest;
+import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoResponse;
+import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest;
+import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse;
+import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersRequest;
+import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersResponse;
+import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesRequest;
+import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesResponse;
+import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RSGroupAdminService;
+import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupRequest;
+import 

[05/22] hbase git commit: HBASE-15631 Backport Regionserver Groups (HBASE-6721) to branch-1

2017-07-19 Thread apurtell
http://git-wip-us.apache.org/repos/asf/hbase/blob/74010ec9/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
--
diff --git 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
new file mode 100644
index 000..7fcb7c7
--- /dev/null
+++ 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
@@ -0,0 +1,758 @@
+/**
+ * Copyright The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rsgroup;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+
+import com.google.common.collect.Sets;
+import com.google.common.net.HostAndPort;
+import com.google.protobuf.ServiceException;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.NavigableSet;
+import java.util.Set;
+import java.util.TreeSet;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.Coprocessor;
+import org.apache.hadoop.hbase.DoNotRetryIOException;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.TableStateManager;
+import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.constraint.ConstraintException;
+import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint;
+import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
+import org.apache.hadoop.hbase.master.MasterServices;
+import org.apache.hadoop.hbase.master.ServerListener;
+import org.apache.hadoop.hbase.master.procedure.CreateTableProcedure;
+import org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.RequestConverter;
+import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
+import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos;
+import org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos;
+import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
+import org.apache.hadoop.hbase.regionserver.DisabledRegionSplitPolicy;
+import org.apache.hadoop.hbase.security.access.AccessControlLists;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.ModifyRegionUtils;
+import org.apache.hadoop.hbase.zookeeper.ZKUtil;
+import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
+import org.apache.zookeeper.KeeperException;
+
+/**
+ * This is an implementation of {@link RSGroupInfoManager}. Which makes
+ * use of an HBase table as the persistence store for the group information.
+ * It also makes use of zookeeper to store group information needed
+ * for bootstrapping during offline mode.
+ */
+public class RSGroupInfoManagerImpl implements RSGroupInfoManager, 
ServerListener {
+  private static final Log LOG = 
LogFactory.getLog(RSGroupInfoManagerImpl.class);
+
+  /** Table descriptor for hbase:rsgroup catalog table */
+  private final static HTableDescriptor RSGROUP_TABLE_DESC;
+  static {
+

[12/22] hbase git commit: HBASE-16462 TestRSGroupsBas#testGroupBalance may hang due to uneven region distribution (Guangxu Cheng)

2017-07-19 Thread apurtell
HBASE-16462 TestRSGroupsBas#testGroupBalance may hang due to uneven region 
distribution (Guangxu Cheng)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ff737d60
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ff737d60
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ff737d60

Branch: refs/heads/HBASE-15631-branch-1
Commit: ff737d60b8b60877b0afdb69daed1ed65d803d54
Parents: d4f6d7f
Author: Andrew Purtell 
Authored: Wed Jul 5 17:57:24 2017 -0700
Committer: Andrew Purtell 
Committed: Wed Jul 19 16:21:10 2017 -0700

--
 .../test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroups.java| 2 ++
 1 file changed, 2 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/ff737d60/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroups.java
--
diff --git 
a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroups.java 
b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroups.java
index e5a1f6a..9baaa1a 100644
--- 
a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroups.java
+++ 
b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroups.java
@@ -79,6 +79,8 @@ public class TestRSGroups extends TestRSGroupsBase {
   @BeforeClass
   public static void setUp() throws Exception {
 TEST_UTIL = new HBaseTestingUtility();
+TEST_UTIL.getConfiguration().setFloat(
+"hbase.master.balancer.stochastic.tableSkewCost", 6000);
 TEST_UTIL.getConfiguration().set(
 HConstants.HBASE_MASTER_LOADBALANCER_CLASS,
 RSGroupBasedLoadBalancer.class.getName());



[20/22] hbase git commit: HBASE-17758 [RSGROUP] Add shell command to move servers and tables at the same time (Guangxu Cheng)

2017-07-19 Thread apurtell
http://git-wip-us.apache.org/repos/asf/hbase/blob/a23322ea/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
--
diff --git 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
index e71470e..ee30e15 100644
--- 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
+++ 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
@@ -68,6 +68,8 @@ import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupI
 import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoResponse;
 import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest;
 import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse;
+import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest;
+import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse;
 import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersRequest;
 import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersResponse;
 import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesRequest;
@@ -206,6 +208,26 @@ public class RSGroupAdminEndpoint extends 
RSGroupAdminService
   }
 
   @Override
+  public void moveServersAndTables(RpcController controller, 
MoveServersAndTablesRequest request,
+  RpcCallback done) {
+MoveServersAndTablesResponse.Builder builder = 
MoveServersAndTablesResponse.newBuilder();
+try {
+  Set hostPorts = Sets.newHashSet();
+  for (HBaseProtos.ServerName el : request.getServersList()) {
+hostPorts.add(HostAndPort.fromParts(el.getHostName(), el.getPort()));
+  }
+  Set tables = new HashSet<>(request.getTableNameList().size());
+  for (HBaseProtos.TableName tableName : request.getTableNameList()) {
+tables.add(ProtobufUtil.toTableName(tableName));
+  }
+  groupAdminServer.moveServersAndTables(hostPorts, tables, 
request.getTargetGroup());
+} catch (IOException e) {
+  ResponseConverter.setControllerException(controller, e);
+}
+done.run(builder.build());
+  }
+
+  @Override
   public void addRSGroup(RpcController controller,
AddRSGroupRequest request,
RpcCallback done) {
@@ -953,6 +975,16 @@ public class RSGroupAdminEndpoint extends 
RSGroupAdminService
   }
 
   @Override
+  public void 
preMoveServersAndTables(ObserverContext ctx,
+  Set servers, Set tables, String targetGroup) 
throws IOException {
+  }
+
+  @Override
+  public void 
postMoveServersAndTables(ObserverContext ctx,
+  Set servers, Set tables, String targetGroup) 
throws IOException {
+  }
+
+  @Override
   public void preAddRSGroup(ObserverContext ctx,
 String name) throws IOException {
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/a23322ea/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
--
diff --git 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
index 1069ac0..863b71e 100644
--- 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
+++ 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
@@ -501,6 +501,19 @@ public class RSGroupAdminServer extends RSGroupAdmin {
   }
 
   @Override
+  public void moveServersAndTables(Set servers, Set 
tables,
+  String targetGroup) throws IOException {
+if (servers == null || servers.isEmpty() ) {
+  throw new ConstraintException("The list of servers to move cannot be 
null or empty.");
+}
+if (tables == null || tables.isEmpty()) {
+  throw new ConstraintException("The list of tables to move cannot be null 
or empty.");
+}
+moveServers(servers, targetGroup);
+moveTables(tables, targetGroup);
+  }
+
+  @Override
   public void close() throws IOException {
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a23322ea/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java
--
diff --git 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java
 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java
index 5b5563e..e11cb57 100644
--- 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java
+++ 

[11/22] hbase git commit: HBASE-18235 LoadBalancer.BOGUS_SERVER_NAME should not have a bogus hostname

2017-07-19 Thread apurtell
HBASE-18235 LoadBalancer.BOGUS_SERVER_NAME should not have a bogus hostname


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d0c72847
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d0c72847
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d0c72847

Branch: refs/heads/HBASE-15631-branch-1
Commit: d0c72847a2dbfd75b1ecca0c6f3dbab6a4d9a6bf
Parents: a23322e
Author: Andrew Purtell 
Authored: Mon Jul 3 17:54:36 2017 -0700
Committer: Andrew Purtell 
Committed: Wed Jul 19 16:21:10 2017 -0700

--
 .../main/java/org/apache/hadoop/hbase/master/LoadBalancer.java  | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d0c72847/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java
index 937b32f..a80cdc3 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java
@@ -52,8 +52,9 @@ import org.apache.hadoop.hbase.TableName;
 @InterfaceAudience.Private
 public interface LoadBalancer extends Configurable, Stoppable, 
ConfigurationObserver {
 
-  //used to signal to the caller that the region(s) cannot be assigned
-  ServerName BOGUS_SERVER_NAME = ServerName.parseServerName("localhost,1,1");
+  // Used to signal to the caller that the region(s) cannot be assigned
+  // We deliberately use 'localhost' so the operation will fail fast
+  ServerName BOGUS_SERVER_NAME = ServerName.valueOf("localhost,1,1");
 
   /**
* Set the current cluster status.  This allows a LoadBalancer to map host 
name to a server



[4/7] hbase git commit: HBASE-18330 NPE in ReplicationZKLockCleanerChore

2017-07-19 Thread apurtell
HBASE-18330 NPE in ReplicationZKLockCleanerChore


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7d2175eb
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7d2175eb
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7d2175eb

Branch: refs/heads/branch-1.2
Commit: 7d2175eb3e00ac705adee9dd533c07041bf0f38a
Parents: 7db088b
Author: Andrew Purtell 
Authored: Wed Jul 19 15:46:45 2017 -0700
Committer: Andrew Purtell 
Committed: Wed Jul 19 15:47:59 2017 -0700

--
 .../apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java  | 1 -
 .../hbase/master/cleaner/ReplicationZKLockCleanerChore.java  | 4 +++-
 .../hadoop/hbase/replication/master/ReplicationLogCleaner.java   | 2 +-
 .../hbase/replication/regionserver/ReplicationSourceManager.java | 2 +-
 4 files changed, 5 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7d2175eb/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
index 5bceaa5..cd0fcdb 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
@@ -560,5 +560,4 @@ public class ReplicationPeersZKImpl extends 
ReplicationStateZKBase implements Re
 return ProtobufUtil.prependPBMagic(bytes);
   }
 
-
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/7d2175eb/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKLockCleanerChore.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKLockCleanerChore.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKLockCleanerChore.java
index dc5338e..3fa30bf 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKLockCleanerChore.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKLockCleanerChore.java
@@ -76,7 +76,9 @@ public class ReplicationZKLockCleanerChore extends 
ScheduledChore {
   }
   Set rsSet = new HashSet(regionServers);
   List replicators = queues.getListOfReplicators();
-
+  if (replicators == null || replicators.isEmpty()) {
+return;
+  }
   for (String replicator: replicators) {
 try {
   String lockNode = queues.getLockZNode(replicator);

http://git-wip-us.apache.org/repos/asf/hbase/blob/7d2175eb/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java
index 9ecba11..7731240 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java
@@ -98,7 +98,7 @@ public class ReplicationLogCleaner extends 
BaseLogCleanerDelegate {
 for (int retry = 0; ; retry++) {
   int v0 = replicationQueues.getQueuesZNodeCversion();
   List rss = replicationQueues.getListOfReplicators();
-  if (rss == null) {
+  if (rss == null || rss.isEmpty()) {
 LOG.debug("Didn't find any region server that replicates, won't 
prevent any deletions.");
 return ImmutableSet.of();
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/7d2175eb/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
index 603557b..90f818d 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
@@ -230,7 +230,7 @@ public class ReplicationSourceManager implements 
ReplicationListener {
   addSource(id);
 }
 List currentReplicators = 

[14/22] hbase git commit: HBASE-15848 Fix possible null point dereference in RSGroupBasedLoadBalancer#getMisplacedRegions (Stephen Yuan Jiang)

2017-07-19 Thread apurtell
HBASE-15848 Fix possible null point dereference in 
RSGroupBasedLoadBalancer#getMisplacedRegions (Stephen Yuan Jiang)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9629f31c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9629f31c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9629f31c

Branch: refs/heads/HBASE-15631-branch-1
Commit: 9629f31c0eb9eab6aa4476e3461d0e5a82b5bbf7
Parents: 3a98817
Author: Andrew Purtell 
Authored: Wed Jul 5 15:37:10 2017 -0700
Committer: Andrew Purtell 
Committed: Wed Jul 19 16:21:10 2017 -0700

--
 .../apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java| 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/9629f31c/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
--
diff --git 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
index fea1275..f69f093 100644
--- 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
+++ 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
@@ -331,7 +331,7 @@ public class RSGroupBasedLoadBalancer implements 
RSGroupableBalancer, LoadBalanc
 " on server: " + assignedServer +
 " found in group: " +
 
RSGroupInfoManager.getRSGroupOfServer(assignedServer.getHostPort()) +
-" outside of group: " + info.getName());
+" outside of group: " + (info == null ? "UNKNOWN" : 
info.getName()));
 misplacedRegions.add(region);
   }
 }
@@ -352,7 +352,7 @@ public class RSGroupBasedLoadBalancer implements 
RSGroupableBalancer, LoadBalanc
 try {
   info = RSGroupInfoManager.getRSGroup(
   RSGroupInfoManager.getRSGroupOfTable(region.getTable()));
-}catch(IOException exp){
+} catch (IOException exp) {
   LOG.debug("Group information null for region of table " + 
region.getTable(),
   exp);
 }



[17/22] hbase git commit: HBASE-17772 IntegrationTestRSGroup won't run

2017-07-19 Thread apurtell
HBASE-17772 IntegrationTestRSGroup won't run


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1259fc80
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1259fc80
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1259fc80

Branch: refs/heads/HBASE-15631-branch-1
Commit: 1259fc80d8dc013f94b6150e3fbbd537fcd80fc7
Parents: 5cc50d4
Author: Andrew Purtell 
Authored: Wed Jul 5 18:24:47 2017 -0700
Committer: Andrew Purtell 
Committed: Wed Jul 19 16:21:10 2017 -0700

--
 .../org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java   | 9 -
 1 file changed, 8 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/1259fc80/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
--
diff --git 
a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
 
b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
index 5831696..52f576d 100644
--- 
a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
+++ 
b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
@@ -150,7 +150,14 @@ public abstract class TestRSGroupsBase {
 ClusterStatus status = 
TEST_UTIL.getHBaseClusterInterface().getClusterStatus();
 for(ServerName serverName : status.getServers()) {
   for(RegionLoad rl : 
status.getLoad(serverName).getRegionsLoad().values()) {
-TableName tableName = HRegionInfo.getTable(rl.getName());
+TableName tableName = null;
+try {
+  tableName = HRegionInfo.getTable(rl.getName());
+} catch (IllegalArgumentException e) {
+  LOG.warn("Failed parse a table name from regionname=" +
+  Bytes.toStringBinary(rl.getName()));
+  continue;
+}
 if(!map.containsKey(tableName)) {
   map.put(tableName, new TreeMap());
 }



[03/22] hbase git commit: HBASE-15631 Backport Regionserver Groups (HBASE-6721) to branch-1

2017-07-19 Thread apurtell
http://git-wip-us.apache.org/repos/asf/hbase/blob/74010ec9/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
index 51aeff8..c409ee9 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
@@ -2925,4 +2925,79 @@ public class TestAccessController extends SecureTestUtil 
{
 verifyDenied(replicateLogEntriesAction, USER_CREATE, USER_RW, USER_RO, 
USER_NONE, USER_OWNER,
   USER_GROUP_READ, USER_GROUP_ADMIN, USER_GROUP_CREATE);
   }
+
+  @Test
+  public void testMoveServers() throws Exception {
+AccessTestAction action1 = new AccessTestAction() {
+  @Override
+  public Object run() throws Exception {
+
ACCESS_CONTROLLER.preMoveServers(ObserverContext.createAndPrepare(CP_ENV, null),
+null, null);
+return null;
+  }
+};
+
+verifyAllowed(action1, SUPERUSER, USER_ADMIN);
+verifyDenied(action1, USER_CREATE, USER_RW, USER_RO, USER_NONE, 
USER_OWNER);
+  }
+
+  @Test
+  public void testMoveTables() throws Exception {
+AccessTestAction action1 = new AccessTestAction() {
+  @Override
+  public Object run() throws Exception {
+
ACCESS_CONTROLLER.preMoveTables(ObserverContext.createAndPrepare(CP_ENV, null),
+null, null);
+return null;
+  }
+};
+
+verifyAllowed(action1, SUPERUSER, USER_ADMIN);
+verifyDenied(action1, USER_CREATE, USER_RW, USER_RO, USER_NONE, 
USER_OWNER);
+  }
+
+  @Test
+  public void testAddGroup() throws Exception {
+AccessTestAction action1 = new AccessTestAction() {
+  @Override
+  public Object run() throws Exception {
+
ACCESS_CONTROLLER.preAddRSGroup(ObserverContext.createAndPrepare(CP_ENV, null),
+null);
+return null;
+  }
+};
+
+verifyAllowed(action1, SUPERUSER, USER_ADMIN);
+verifyDenied(action1, USER_CREATE, USER_RW, USER_RO, USER_NONE, 
USER_OWNER);
+  }
+
+  @Test
+  public void testRemoveGroup() throws Exception {
+AccessTestAction action1 = new AccessTestAction() {
+  @Override
+  public Object run() throws Exception {
+
ACCESS_CONTROLLER.preRemoveRSGroup(ObserverContext.createAndPrepare(CP_ENV, 
null),
+null);
+return null;
+  }
+};
+
+verifyAllowed(action1, SUPERUSER, USER_ADMIN);
+verifyDenied(action1, USER_CREATE, USER_RW, USER_RO, USER_NONE, 
USER_OWNER);
+  }
+
+  @Test
+  public void testBalanceGroup() throws Exception {
+AccessTestAction action1 = new AccessTestAction() {
+  @Override
+  public Object run() throws Exception {
+
ACCESS_CONTROLLER.preBalanceRSGroup(ObserverContext.createAndPrepare(CP_ENV, 
null),
+null);
+return null;
+  }
+};
+
+verifyAllowed(action1, SUPERUSER, USER_ADMIN);
+verifyDenied(action1, USER_CREATE, USER_RW, USER_RO, USER_NONE, 
USER_OWNER);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/74010ec9/hbase-shell/pom.xml
--
diff --git a/hbase-shell/pom.xml b/hbase-shell/pom.xml
index a2a1d0c..44b6095 100644
--- a/hbase-shell/pom.xml
+++ b/hbase-shell/pom.xml
@@ -254,6 +254,41 @@
 
   
   
+
+  rsgroup
+  
+
+!skip-rsgroup
+
+  
+  
+
+  org.apache.hbase
+  hbase-rsgroup
+
+  
+  
+
+  
+org.codehaus.mojo
+build-helper-maven-plugin
+
+  
+add-test-source
+
+  add-test-source
+
+
+  
+src/test/rsgroup
+  
+
+  
+
+  
+
+  
+
 
 
   skipShellTests

http://git-wip-us.apache.org/repos/asf/hbase/blob/74010ec9/hbase-shell/src/main/ruby/hbase.rb
--
diff --git a/hbase-shell/src/main/ruby/hbase.rb 
b/hbase-shell/src/main/ruby/hbase.rb
index 88a6f04..2c0aecb 100644
--- a/hbase-shell/src/main/ruby/hbase.rb
+++ b/hbase-shell/src/main/ruby/hbase.rb
@@ -112,6 +112,7 @@ require 'hbase/quotas'
 require 'hbase/replication_admin'
 require 'hbase/security'
 require 'hbase/visibility_labels'
+require 'hbase/rsgroup_admin'
 
 
 include HBaseQuotasConstants

http://git-wip-us.apache.org/repos/asf/hbase/blob/74010ec9/hbase-shell/src/main/ruby/hbase/hbase.rb
--
diff --git 

[1/7] hbase git commit: HBASE-18330 NPE in ReplicationZKLockCleanerChore

2017-07-19 Thread apurtell
Repository: hbase
Updated Branches:
  refs/heads/branch-1 2da5b432a -> cfd5b6b59
  refs/heads/branch-1.1 ff7df565c -> b03a5e743
  refs/heads/branch-1.2 7db088b4b -> 7d2175eb3
  refs/heads/branch-1.3 8aabbdb3e -> 2b60f4ecd
  refs/heads/branch-1.4 68e9f2173 -> ffb702cd4
  refs/heads/branch-2 51067caef -> cbf390422
  refs/heads/master 5f54e2851 -> 01db60d65


HBASE-18330 NPE in ReplicationZKLockCleanerChore


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/01db60d6
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/01db60d6
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/01db60d6

Branch: refs/heads/master
Commit: 01db60d65b9a2dff0ca001323cb77a6e4e8d6f48
Parents: 5f54e28
Author: Andrew Purtell 
Authored: Wed Jul 19 15:46:08 2017 -0700
Committer: Andrew Purtell 
Committed: Wed Jul 19 15:46:08 2017 -0700

--
 .../apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java   | 3 +++
 .../hadoop/hbase/replication/ReplicationQueuesClientZKImpl.java   | 2 +-
 .../hadoop/hbase/master/cleaner/ReplicationZKNodeCleaner.java | 3 +++
 .../hbase/replication/regionserver/DumpReplicationQueues.java | 3 +++
 4 files changed, 10 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/01db60d6/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
index 02fe2f1..751e454 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
@@ -520,6 +520,9 @@ public class ReplicationPeersZKImpl extends 
ReplicationStateZKBase implements Re
 if (queuesClient == null) return;
 try {
   List replicators = queuesClient.getListOfReplicators();
+  if (replicators == null || replicators.isEmpty()) {
+return;
+  }
   for (String replicator : replicators) {
 List queueIds = queuesClient.getAllQueues(replicator);
 for (String queueId : queueIds) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/01db60d6/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClientZKImpl.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClientZKImpl.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClientZKImpl.java
index 0504373..0115b6f 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClientZKImpl.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClientZKImpl.java
@@ -98,7 +98,7 @@ public class ReplicationQueuesClientZKImpl extends 
ReplicationStateZKBase implem
 for (int retry = 0; ; retry++) {
   int v0 = getQueuesZNodeCversion();
   List rss = getListOfReplicators();
-  if (rss == null) {
+  if (rss == null || rss.isEmpty()) {
 LOG.debug("Didn't find any region server that replicates, won't 
prevent any deletions.");
 return ImmutableSet.of();
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/01db60d6/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKNodeCleaner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKNodeCleaner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKNodeCleaner.java
index dafc4f8..6d8962e 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKNodeCleaner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKNodeCleaner.java
@@ -77,6 +77,9 @@ public class ReplicationZKNodeCleaner {
 Set peerIds = new HashSet<>(this.replicationPeers.getAllPeerIds());
 try {
   List replicators = this.queuesClient.getListOfReplicators();
+  if (replicators == null || replicators.isEmpty()) {
+return undeletedQueues;
+  }
   for (String replicator : replicators) {
 List queueIds = this.queuesClient.getAllQueues(replicator);
 for (String queueId : queueIds) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/01db60d6/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java

[5/7] hbase git commit: HBASE-18330 NPE in ReplicationZKLockCleanerChore

2017-07-19 Thread apurtell
HBASE-18330 NPE in ReplicationZKLockCleanerChore


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ffb702cd
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ffb702cd
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ffb702cd

Branch: refs/heads/branch-1.4
Commit: ffb702cd4a6a9c311bbb4942fd404ea8358c72b1
Parents: 68e9f21
Author: Andrew Purtell 
Authored: Wed Jul 19 15:46:45 2017 -0700
Committer: Andrew Purtell 
Committed: Wed Jul 19 15:48:18 2017 -0700

--
 .../apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java  | 3 +++
 .../hbase/master/cleaner/ReplicationZKLockCleanerChore.java  | 4 +++-
 .../hadoop/hbase/master/cleaner/ReplicationZKNodeCleaner.java| 3 +++
 .../hadoop/hbase/replication/master/ReplicationLogCleaner.java   | 2 +-
 .../hbase/replication/regionserver/DumpReplicationQueues.java| 3 +++
 .../hbase/replication/regionserver/ReplicationSourceManager.java | 2 +-
 6 files changed, 14 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/ffb702cd/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
index b242ca7..c2999ec 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
@@ -528,6 +528,9 @@ public class ReplicationPeersZKImpl extends 
ReplicationStateZKBase implements Re
 if (queuesClient == null) return;
 try {
   List replicators = queuesClient.getListOfReplicators();
+  if (replicators == null || replicators.isEmpty()) {
+return;
+  }
   for (String replicator : replicators) {
 List queueIds = queuesClient.getAllQueues(replicator);
 for (String queueId : queueIds) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/ffb702cd/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKLockCleanerChore.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKLockCleanerChore.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKLockCleanerChore.java
index dc5338e..3fa30bf 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKLockCleanerChore.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKLockCleanerChore.java
@@ -76,7 +76,9 @@ public class ReplicationZKLockCleanerChore extends 
ScheduledChore {
   }
   Set rsSet = new HashSet(regionServers);
   List replicators = queues.getListOfReplicators();
-
+  if (replicators == null || replicators.isEmpty()) {
+return;
+  }
   for (String replicator: replicators) {
 try {
   String lockNode = queues.getLockZNode(replicator);

http://git-wip-us.apache.org/repos/asf/hbase/blob/ffb702cd/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKNodeCleaner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKNodeCleaner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKNodeCleaner.java
index 8311b8d..f559510 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKNodeCleaner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKNodeCleaner.java
@@ -80,6 +80,9 @@ public class ReplicationZKNodeCleaner {
 Set peerIds = new HashSet<>(this.replicationPeers.getAllPeerIds());
 try {
   List replicators = this.queuesClient.getListOfReplicators();
+  if (replicators == null || replicators.isEmpty()) {
+return undeletedQueues;
+  }
   for (String replicator : replicators) {
 List queueIds = this.queuesClient.getAllQueues(replicator);
 for (String queueId : queueIds) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/ffb702cd/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java
 

[3/7] hbase git commit: HBASE-18330 NPE in ReplicationZKLockCleanerChore

2017-07-19 Thread apurtell
HBASE-18330 NPE in ReplicationZKLockCleanerChore


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/cfd5b6b5
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/cfd5b6b5
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/cfd5b6b5

Branch: refs/heads/branch-1
Commit: cfd5b6b59f00eb3cbcb07a2b32fac019436c479f
Parents: 2da5b43
Author: Andrew Purtell 
Authored: Wed Jul 19 15:46:45 2017 -0700
Committer: Andrew Purtell 
Committed: Wed Jul 19 15:46:45 2017 -0700

--
 .../apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java  | 3 +++
 .../hbase/master/cleaner/ReplicationZKLockCleanerChore.java  | 4 +++-
 .../hadoop/hbase/master/cleaner/ReplicationZKNodeCleaner.java| 3 +++
 .../hadoop/hbase/replication/master/ReplicationLogCleaner.java   | 2 +-
 .../hbase/replication/regionserver/DumpReplicationQueues.java| 3 +++
 .../hbase/replication/regionserver/ReplicationSourceManager.java | 2 +-
 6 files changed, 14 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/cfd5b6b5/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
index b242ca7..c2999ec 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
@@ -528,6 +528,9 @@ public class ReplicationPeersZKImpl extends 
ReplicationStateZKBase implements Re
 if (queuesClient == null) return;
 try {
   List replicators = queuesClient.getListOfReplicators();
+  if (replicators == null || replicators.isEmpty()) {
+return;
+  }
   for (String replicator : replicators) {
 List queueIds = queuesClient.getAllQueues(replicator);
 for (String queueId : queueIds) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/cfd5b6b5/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKLockCleanerChore.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKLockCleanerChore.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKLockCleanerChore.java
index dc5338e..3fa30bf 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKLockCleanerChore.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKLockCleanerChore.java
@@ -76,7 +76,9 @@ public class ReplicationZKLockCleanerChore extends 
ScheduledChore {
   }
   Set rsSet = new HashSet(regionServers);
   List replicators = queues.getListOfReplicators();
-
+  if (replicators == null || replicators.isEmpty()) {
+return;
+  }
   for (String replicator: replicators) {
 try {
   String lockNode = queues.getLockZNode(replicator);

http://git-wip-us.apache.org/repos/asf/hbase/blob/cfd5b6b5/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKNodeCleaner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKNodeCleaner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKNodeCleaner.java
index 8311b8d..f559510 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKNodeCleaner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKNodeCleaner.java
@@ -80,6 +80,9 @@ public class ReplicationZKNodeCleaner {
 Set peerIds = new HashSet<>(this.replicationPeers.getAllPeerIds());
 try {
   List replicators = this.queuesClient.getListOfReplicators();
+  if (replicators == null || replicators.isEmpty()) {
+return undeletedQueues;
+  }
   for (String replicator : replicators) {
 List queueIds = this.queuesClient.getAllQueues(replicator);
 for (String queueId : queueIds) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/cfd5b6b5/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java
 

[7/7] hbase git commit: HBASE-18330 NPE in ReplicationZKLockCleanerChore

2017-07-19 Thread apurtell
HBASE-18330 NPE in ReplicationZKLockCleanerChore


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b03a5e74
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b03a5e74
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b03a5e74

Branch: refs/heads/branch-1.1
Commit: b03a5e743952495e97c1c180e272a08c4b2eb56f
Parents: ff7df56
Author: Andrew Purtell 
Authored: Wed Jul 19 15:46:45 2017 -0700
Committer: Andrew Purtell 
Committed: Wed Jul 19 15:59:40 2017 -0700

--
 .../apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java  | 1 -
 .../hbase/master/cleaner/ReplicationZKLockCleanerChore.java  | 4 +++-
 .../hadoop/hbase/replication/master/ReplicationLogCleaner.java   | 2 +-
 .../hbase/replication/regionserver/ReplicationSourceManager.java | 2 +-
 4 files changed, 5 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b03a5e74/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
index da9ae15..048a623 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
@@ -561,5 +561,4 @@ public class ReplicationPeersZKImpl extends 
ReplicationStateZKBase implements Re
 return ProtobufUtil.prependPBMagic(bytes);
   }
 
-
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b03a5e74/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKLockCleanerChore.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKLockCleanerChore.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKLockCleanerChore.java
index dc5338e..3fa30bf 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKLockCleanerChore.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKLockCleanerChore.java
@@ -76,7 +76,9 @@ public class ReplicationZKLockCleanerChore extends 
ScheduledChore {
   }
   Set rsSet = new HashSet(regionServers);
   List replicators = queues.getListOfReplicators();
-
+  if (replicators == null || replicators.isEmpty()) {
+return;
+  }
   for (String replicator: replicators) {
 try {
   String lockNode = queues.getLockZNode(replicator);

http://git-wip-us.apache.org/repos/asf/hbase/blob/b03a5e74/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java
index 9ecba11..7731240 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java
@@ -98,7 +98,7 @@ public class ReplicationLogCleaner extends 
BaseLogCleanerDelegate {
 for (int retry = 0; ; retry++) {
   int v0 = replicationQueues.getQueuesZNodeCversion();
   List rss = replicationQueues.getListOfReplicators();
-  if (rss == null) {
+  if (rss == null || rss.isEmpty()) {
 LOG.debug("Didn't find any region server that replicates, won't 
prevent any deletions.");
 return ImmutableSet.of();
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b03a5e74/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
index 81f06a3..51c3c31 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
@@ -221,7 +221,7 @@ public class ReplicationSourceManager implements 
ReplicationListener {
   addSource(id);
 }
 List currentReplicators = 

[6/7] hbase git commit: HBASE-18330 NPE in ReplicationZKLockCleanerChore

2017-07-19 Thread apurtell
HBASE-18330 NPE in ReplicationZKLockCleanerChore


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2b60f4ec
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2b60f4ec
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2b60f4ec

Branch: refs/heads/branch-1.3
Commit: 2b60f4ecd98da8b4c74f044cc1ec0d221d960399
Parents: 8aabbdb
Author: Andrew Purtell 
Authored: Wed Jul 19 15:46:45 2017 -0700
Committer: Andrew Purtell 
Committed: Wed Jul 19 15:55:56 2017 -0700

--
 .../apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java  | 3 +++
 .../hbase/master/cleaner/ReplicationZKLockCleanerChore.java  | 4 +++-
 .../hadoop/hbase/replication/master/ReplicationLogCleaner.java   | 2 +-
 .../hbase/replication/regionserver/DumpReplicationQueues.java| 3 +++
 .../hbase/replication/regionserver/ReplicationSourceManager.java | 2 +-
 .../org/apache/hadoop/hbase/util/hbck/ReplicationChecker.java| 3 +++
 6 files changed, 14 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/2b60f4ec/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
index 89a531a..636aebd 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
@@ -570,6 +570,9 @@ public class ReplicationPeersZKImpl extends 
ReplicationStateZKBase implements Re
 if (queuesClient == null) return;
 try {
   List replicators = queuesClient.getListOfReplicators();
+  if (replicators == null || replicators.isEmpty()) {
+return;
+  }
   for (String replicator : replicators) {
 List queueIds = queuesClient.getAllQueues(replicator);
 for (String queueId : queueIds) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/2b60f4ec/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKLockCleanerChore.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKLockCleanerChore.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKLockCleanerChore.java
index dc5338e..3fa30bf 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKLockCleanerChore.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKLockCleanerChore.java
@@ -76,7 +76,9 @@ public class ReplicationZKLockCleanerChore extends 
ScheduledChore {
   }
   Set rsSet = new HashSet(regionServers);
   List replicators = queues.getListOfReplicators();
-
+  if (replicators == null || replicators.isEmpty()) {
+return;
+  }
   for (String replicator: replicators) {
 try {
   String lockNode = queues.getLockZNode(replicator);

http://git-wip-us.apache.org/repos/asf/hbase/blob/2b60f4ec/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java
index 9ecba11..7731240 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java
@@ -98,7 +98,7 @@ public class ReplicationLogCleaner extends 
BaseLogCleanerDelegate {
 for (int retry = 0; ; retry++) {
   int v0 = replicationQueues.getQueuesZNodeCversion();
   List rss = replicationQueues.getListOfReplicators();
-  if (rss == null) {
+  if (rss == null || rss.isEmpty()) {
 LOG.debug("Didn't find any region server that replicates, won't 
prevent any deletions.");
 return ImmutableSet.of();
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/2b60f4ec/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
 

[2/7] hbase git commit: HBASE-18330 NPE in ReplicationZKLockCleanerChore

2017-07-19 Thread apurtell
HBASE-18330 NPE in ReplicationZKLockCleanerChore


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/cbf39042
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/cbf39042
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/cbf39042

Branch: refs/heads/branch-2
Commit: cbf390422b988b0fd4466c36bc8e4c767903e63c
Parents: 51067ca
Author: Andrew Purtell 
Authored: Wed Jul 19 15:46:08 2017 -0700
Committer: Andrew Purtell 
Committed: Wed Jul 19 15:46:22 2017 -0700

--
 .../apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java   | 3 +++
 .../hadoop/hbase/replication/ReplicationQueuesClientZKImpl.java   | 2 +-
 .../hadoop/hbase/master/cleaner/ReplicationZKNodeCleaner.java | 3 +++
 .../hbase/replication/regionserver/DumpReplicationQueues.java | 3 +++
 4 files changed, 10 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/cbf39042/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
index 02fe2f1..751e454 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
@@ -520,6 +520,9 @@ public class ReplicationPeersZKImpl extends 
ReplicationStateZKBase implements Re
 if (queuesClient == null) return;
 try {
   List replicators = queuesClient.getListOfReplicators();
+  if (replicators == null || replicators.isEmpty()) {
+return;
+  }
   for (String replicator : replicators) {
 List queueIds = queuesClient.getAllQueues(replicator);
 for (String queueId : queueIds) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/cbf39042/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClientZKImpl.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClientZKImpl.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClientZKImpl.java
index 0504373..0115b6f 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClientZKImpl.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClientZKImpl.java
@@ -98,7 +98,7 @@ public class ReplicationQueuesClientZKImpl extends 
ReplicationStateZKBase implem
 for (int retry = 0; ; retry++) {
   int v0 = getQueuesZNodeCversion();
   List rss = getListOfReplicators();
-  if (rss == null) {
+  if (rss == null || rss.isEmpty()) {
 LOG.debug("Didn't find any region server that replicates, won't 
prevent any deletions.");
 return ImmutableSet.of();
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/cbf39042/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKNodeCleaner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKNodeCleaner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKNodeCleaner.java
index dafc4f8..6d8962e 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKNodeCleaner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationZKNodeCleaner.java
@@ -77,6 +77,9 @@ public class ReplicationZKNodeCleaner {
 Set peerIds = new HashSet<>(this.replicationPeers.getAllPeerIds());
 try {
   List replicators = this.queuesClient.getListOfReplicators();
+  if (replicators == null || replicators.isEmpty()) {
+return undeletedQueues;
+  }
   for (String replicator : replicators) {
 List queueIds = this.queuesClient.getAllQueues(replicator);
 for (String queueId : queueIds) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/cbf39042/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
index 2bedbfd..4bda75b 100644
--- 

[07/23] hbase git commit: HBASE-15631 Backport Regionserver Groups (HBASE-6721) to branch-1

2017-07-19 Thread apurtell
http://git-wip-us.apache.org/repos/asf/hbase/blob/a860e48a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
--
diff --git 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
new file mode 100644
index 000..7fcb7c7
--- /dev/null
+++ 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
@@ -0,0 +1,758 @@
+/**
+ * Copyright The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rsgroup;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+
+import com.google.common.collect.Sets;
+import com.google.common.net.HostAndPort;
+import com.google.protobuf.ServiceException;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.NavigableSet;
+import java.util.Set;
+import java.util.TreeSet;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.Coprocessor;
+import org.apache.hadoop.hbase.DoNotRetryIOException;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.TableStateManager;
+import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.constraint.ConstraintException;
+import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint;
+import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
+import org.apache.hadoop.hbase.master.MasterServices;
+import org.apache.hadoop.hbase.master.ServerListener;
+import org.apache.hadoop.hbase.master.procedure.CreateTableProcedure;
+import org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.RequestConverter;
+import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
+import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos;
+import org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos;
+import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
+import org.apache.hadoop.hbase.regionserver.DisabledRegionSplitPolicy;
+import org.apache.hadoop.hbase.security.access.AccessControlLists;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.ModifyRegionUtils;
+import org.apache.hadoop.hbase.zookeeper.ZKUtil;
+import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
+import org.apache.zookeeper.KeeperException;
+
+/**
+ * This is an implementation of {@link RSGroupInfoManager}. Which makes
+ * use of an HBase table as the persistence store for the group information.
+ * It also makes use of zookeeper to store group information needed
+ * for bootstrapping during offline mode.
+ */
+public class RSGroupInfoManagerImpl implements RSGroupInfoManager, 
ServerListener {
+  private static final Log LOG = 
LogFactory.getLog(RSGroupInfoManagerImpl.class);
+
+  /** Table descriptor for hbase:rsgroup catalog table */
+  private final static HTableDescriptor RSGROUP_TABLE_DESC;
+  static {
+

[17/23] hbase git commit: HBASE-16133 RSGroupBasedLoadBalancer.retainAssignment() might miss a region

2017-07-19 Thread apurtell
HBASE-16133 RSGroupBasedLoadBalancer.retainAssignment() might miss a region


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/81087059
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/81087059
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/81087059

Branch: refs/heads/HBASE-15631-branch-1
Commit: 8108705923b7d287276917e2ca8943d4a710e543
Parents: 1705cb0
Author: Andrew Purtell 
Authored: Wed Jul 5 15:43:46 2017 -0700
Committer: Andrew Purtell 
Committed: Wed Jul 19 15:32:37 2017 -0700

--
 .../apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java | 7 ---
 1 file changed, 4 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/81087059/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
--
diff --git 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
index f69f093..c1b3c7d 100644
--- 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
+++ 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
@@ -216,9 +216,10 @@ public class RSGroupBasedLoadBalancer implements 
RSGroupableBalancer, LoadBalanc
 List candidateList = filterOfflineServers(info, servers);
 ServerName server = this.internalBalancer.randomAssignment(region,
 candidateList);
-if (server != null && !assignments.containsKey(server)) {
-  assignments.put(server, new ArrayList());
-} else if (server != null) {
+if (server != null) {
+  if (!assignments.containsKey(server)) {
+assignments.put(server, new ArrayList());
+  }
   assignments.get(server).add(region);
 } else {
   //if not server is available assign to bogus so it ends up in RIT



[14/23] hbase git commit: Be robust against movement of the rsgroup table

2017-07-19 Thread apurtell
Be robust against movement of the rsgroup table


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/cd84de6f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/cd84de6f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/cd84de6f

Branch: refs/heads/HBASE-15631-branch-1
Commit: cd84de6f563d4cc1a620b35265cbde9b97375b6c
Parents: 8d0cd98
Author: Andrew Purtell 
Authored: Wed Jul 5 15:19:32 2017 -0700
Committer: Andrew Purtell 
Committed: Wed Jul 19 15:32:37 2017 -0700

--
 .../hbase/rsgroup/RSGroupInfoManagerImpl.java   | 35 +++-
 1 file changed, 19 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/cd84de6f/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
--
diff --git 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
index 7fcb7c7..6c991bd 100644
--- 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
+++ 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
@@ -77,6 +77,7 @@ import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
 import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos;
 import org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos;
 import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
+import 
org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MutateRowsRequest;
 import org.apache.hadoop.hbase.regionserver.DisabledRegionSplitPolicy;
 import org.apache.hadoop.hbase.security.access.AccessControlLists;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -112,7 +113,6 @@ public class RSGroupInfoManagerImpl implements 
RSGroupInfoManager, ServerListene
   private volatile Map rsGroupMap;
   private volatile Map tableMap;
   private MasterServices master;
-  private Table rsGroupTable;
   private ClusterConnection conn;
   private ZooKeeperWatcher watcher;
   private RSGroupStartupWorker rsGroupStartupWorker;
@@ -281,10 +281,9 @@ public class RSGroupInfoManagerImpl implements 
RSGroupInfoManager, ServerListene
 // if online read from GROUP table
 if (forceOnline || isOnline()) {
   LOG.debug("Refreshing in Online mode.");
-  if (rsGroupTable == null) {
-rsGroupTable = conn.getTable(RSGROUP_TABLE_NAME);
+  try (Table rsGroupTable = conn.getTable(RSGROUP_TABLE_NAME)) {
+groupList.addAll(rsGroupSerDe.retrieveGroupList(rsGroupTable));
   }
-  groupList.addAll(rsGroupSerDe.retrieveGroupList(rsGroupTable));
 } else {
   LOG.debug("Refershing in Offline mode.");
   String groupBasePath = ZKUtil.joinZNode(watcher.baseZNode, rsGroupZNode);
@@ -724,28 +723,32 @@ public class RSGroupInfoManagerImpl implements 
RSGroupInfoManager, ServerListene
 
   private void multiMutate(List mutations)
   throws IOException {
-CoprocessorRpcChannel channel = rsGroupTable.coprocessorService(ROW_KEY);
-MultiRowMutationProtos.MutateRowsRequest.Builder mmrBuilder
-  = MultiRowMutationProtos.MutateRowsRequest.newBuilder();
+MutateRowsRequest.Builder mrmBuilder = MutateRowsRequest.newBuilder();
 for (Mutation mutation : mutations) {
   if (mutation instanceof Put) {
-mmrBuilder.addMutationRequest(ProtobufUtil.toMutation(
+mrmBuilder.addMutationRequest(ProtobufUtil.toMutation(
   ClientProtos.MutationProto.MutationType.PUT, mutation));
   } else if (mutation instanceof Delete) {
-mmrBuilder.addMutationRequest(ProtobufUtil.toMutation(
+mrmBuilder.addMutationRequest(ProtobufUtil.toMutation(
   ClientProtos.MutationProto.MutationType.DELETE, mutation));
   } else {
 throw new DoNotRetryIOException("multiMutate doesn't support "
   + mutation.getClass().getName());
   }
 }
-
-MultiRowMutationProtos.MultiRowMutationService.BlockingInterface service =
-  MultiRowMutationProtos.MultiRowMutationService.newBlockingStub(channel);
-try {
-  service.mutateRows(null, mmrBuilder.build());
-} catch (ServiceException ex) {
-  ProtobufUtil.toIOException(ex);
+MutateRowsRequest mrm = mrmBuilder.build();
+// Be robust against movement of the rsgroup table
+// TODO: Why is this necessary sometimes? Should we be using our own 
connection?
+conn.clearRegionCache(RSGROUP_TABLE_NAME);
+try (Table rsGroupTable = conn.getTable(RSGROUP_TABLE_NAME)) {
+  CoprocessorRpcChannel channel = rsGroupTable.coprocessorService(ROW_KEY);
+  

[18/23] hbase git commit: HBASE-16462 TestRSGroupsBas#testGroupBalance may hang due to uneven region distribution (Guangxu Cheng)

2017-07-19 Thread apurtell
HBASE-16462 TestRSGroupsBas#testGroupBalance may hang due to uneven region 
distribution (Guangxu Cheng)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f0f39cda
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f0f39cda
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f0f39cda

Branch: refs/heads/HBASE-15631-branch-1
Commit: f0f39cda56aee1b10de0646c16388e8acec78746
Parents: 4db0d14
Author: Andrew Purtell 
Authored: Wed Jul 5 17:57:24 2017 -0700
Committer: Andrew Purtell 
Committed: Wed Jul 19 15:32:37 2017 -0700

--
 .../test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroups.java| 2 ++
 1 file changed, 2 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/f0f39cda/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroups.java
--
diff --git 
a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroups.java 
b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroups.java
index e5a1f6a..9baaa1a 100644
--- 
a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroups.java
+++ 
b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroups.java
@@ -79,6 +79,8 @@ public class TestRSGroups extends TestRSGroupsBase {
   @BeforeClass
   public static void setUp() throws Exception {
 TEST_UTIL = new HBaseTestingUtility();
+TEST_UTIL.getConfiguration().setFloat(
+"hbase.master.balancer.stochastic.tableSkewCost", 6000);
 TEST_UTIL.getConfiguration().set(
 HConstants.HBASE_MASTER_LOADBALANCER_CLASS,
 RSGroupBasedLoadBalancer.class.getName());



[02/23] hbase git commit: HBASE-18308 Eliminate the findbugs warnings for hbase-server

2017-07-19 Thread apurtell
HBASE-18308 Eliminate the findbugs warnings for hbase-server


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2da5b432
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2da5b432
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2da5b432

Branch: refs/heads/HBASE-15631-branch-1
Commit: 2da5b432a18fac0438346a2bb0ccea3a0beb90fe
Parents: 9802095
Author: Chia-Ping Tsai 
Authored: Thu Jul 20 00:36:16 2017 +0800
Committer: Chia-Ping Tsai 
Committed: Thu Jul 20 00:36:16 2017 +0800

--
 .../apache/hadoop/hbase/LocalHBaseCluster.java  | 10 
 .../hadoop/hbase/constraint/Constraints.java|  4 ++--
 .../hadoop/hbase/mapreduce/JarFinder.java   |  3 +++
 .../apache/hadoop/hbase/master/DeadServer.java  |  5 
 .../hadoop/hbase/master/ServerManager.java  |  5 ++--
 .../hbase/master/balancer/BaseLoadBalancer.java |  2 ++
 .../hadoop/hbase/regionserver/HRegion.java  |  3 ---
 .../hbase/regionserver/HRegionServer.java   |  3 ++-
 .../querymatcher/ExplicitColumnTracker.java | 14 +---
 .../regionserver/ReplicationSource.java |  2 +-
 .../org/apache/hadoop/hbase/tool/Canary.java|  5 ++--
 .../apache/hadoop/hbase/wal/WALSplitter.java| 24 
 12 files changed, 40 insertions(+), 40 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/2da5b432/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java
index b98078a..42484e7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java
@@ -313,12 +313,10 @@ public class LocalHBaseCluster {
*/
   public HMaster getActiveMaster() {
 for (JVMClusterUtil.MasterThread mt : masterThreads) {
-  if (mt.getMaster().isActiveMaster()) {
-// Ensure that the current active master is not stopped.
-// We don't want to return a stopping master as an active master.
-if (mt.getMaster().isActiveMaster()  && !mt.getMaster().isStopped()) {
-  return mt.getMaster();
-}
+  // Ensure that the current active master is not stopped.
+  // We don't want to return a stopping master as an active master.
+  if (mt.getMaster().isActiveMaster() && !mt.getMaster().isStopped()) {
+return mt.getMaster();
   }
 }
 return null;

http://git-wip-us.apache.org/repos/asf/hbase/blob/2da5b432/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraints.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraints.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraints.java
index 85ef717..c96bf3d 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraints.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraints.java
@@ -616,8 +616,8 @@ public final class Constraints {
 @Override
 public int compare(Constraint c1, Constraint c2) {
   // compare the priorities of the constraints stored in their 
configuration
-  return Long.valueOf(c1.getConf().getLong(PRIORITY_KEY, DEFAULT_PRIORITY))
-  .compareTo(c2.getConf().getLong(PRIORITY_KEY, DEFAULT_PRIORITY));
+  return Long.compare(c1.getConf().getLong(PRIORITY_KEY, DEFAULT_PRIORITY),
+c2.getConf().getLong(PRIORITY_KEY, DEFAULT_PRIORITY));
 }
   };
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/2da5b432/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/JarFinder.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/JarFinder.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/JarFinder.java
index dfbe648..e0421d9 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/JarFinder.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/JarFinder.java
@@ -90,6 +90,9 @@ public class JarFinder {
   private static void zipDir(File dir, String relativePath, ZipOutputStream 
zos,
  boolean start) throws IOException {
 String[] dirList = dir.list();
+if (dirList == null) {
+  return;
+}
 for (String aDirList : dirList) {
   File f = new File(dir, aDirList);
   if (!f.isHidden()) {


[12/23] hbase git commit: HBASE-16456 Fix findbugs warnings in hbase-rsgroup module (Guangxu Cheng)

2017-07-19 Thread apurtell
HBASE-16456 Fix findbugs warnings in hbase-rsgroup module (Guangxu Cheng)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4db0d142
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4db0d142
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4db0d142

Branch: refs/heads/HBASE-15631-branch-1
Commit: 4db0d142265f223a9f6f428e81dd5f187570e692
Parents: e9ab2b1
Author: Andrew Purtell 
Authored: Wed Jul 5 17:19:43 2017 -0700
Committer: Andrew Purtell 
Committed: Wed Jul 19 15:32:37 2017 -0700

--
 .../hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java  | 16 +++-
 .../hadoop/hbase/rsgroup/RSGroupAdminServer.java|  2 +-
 .../hbase/rsgroup/RSGroupBasedLoadBalancer.java | 16 +---
 .../hbase/rsgroup/RSGroupInfoManagerImpl.java   | 14 --
 4 files changed, 37 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/4db0d142/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
--
diff --git 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
index 8fa9fdc..e71470e 100644
--- 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
+++ 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
@@ -89,7 +89,7 @@ public class RSGroupAdminEndpoint extends RSGroupAdminService
   public void start(CoprocessorEnvironment env) throws IOException {
 MasterCoprocessorEnvironment menv = (MasterCoprocessorEnvironment)env;
 master = menv.getMasterServices();
-groupInfoManager = new RSGroupInfoManagerImpl(master);
+setGroupInfoManager(new RSGroupInfoManagerImpl(master));
 groupAdminServer = new RSGroupAdminServer(master, groupInfoManager);
 Class clazz =
 
master.getConfiguration().getClass(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, 
null);
@@ -107,6 +107,20 @@ public class RSGroupAdminEndpoint extends 
RSGroupAdminService
 return this;
   }
 
+  private static void setStaticGroupInfoManager(RSGroupInfoManagerImpl 
groupInfoManager) {
+RSGroupAdminEndpoint.groupInfoManager = groupInfoManager;
+  }
+
+  private void setGroupInfoManager(RSGroupInfoManagerImpl groupInfoManager) 
throws IOException {
+if (groupInfoManager == null) {
+  groupInfoManager = new RSGroupInfoManagerImpl(master);
+  groupInfoManager.init();
+} else if (!groupInfoManager.isInit()) {
+  groupInfoManager.init();
+}
+setStaticGroupInfoManager(groupInfoManager);
+  }
+
   public RSGroupInfoManager getGroupInfoManager() {
 return groupInfoManager;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/4db0d142/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
--
diff --git 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
index 43ac3ad..e76e3e7 100644
--- 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
+++ 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
@@ -218,7 +218,7 @@ public class RSGroupAdminServer extends RSGroupAdmin {
 }
   }
   try {
-Thread.sleep(1000);
+manager.wait(1000);
   } catch (InterruptedException e) {
 LOG.warn("Sleep interrupted", e);
 Thread.currentThread().interrupt();

http://git-wip-us.apache.org/repos/asf/hbase/blob/4db0d142/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
--
diff --git 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
index c1b3c7d..519177c 100644
--- 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
+++ 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
@@ -322,18 +322,19 @@ public class RSGroupBasedLoadBalancer implements 
RSGroupableBalancer, LoadBalanc
   private Set getMisplacedRegions(
   Map regions) throws IOException {
 Set misplacedRegions = new HashSet();
-for (HRegionInfo region : regions.keySet()) {
-  ServerName assignedServer = regions.get(region);
+

[08/23] hbase git commit: HBASE-15631 Backport Regionserver Groups (HBASE-6721) to branch-1

2017-07-19 Thread apurtell
http://git-wip-us.apache.org/repos/asf/hbase/blob/a860e48a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
--
diff --git 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
new file mode 100644
index 000..00cd6b0
--- /dev/null
+++ 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
@@ -0,0 +1,955 @@
+/**
+ * Copyright The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.rsgroup;
+
+import com.google.common.collect.Sets;
+import com.google.common.net.HostAndPort;
+import com.google.protobuf.RpcCallback;
+import com.google.protobuf.RpcController;
+import com.google.protobuf.Service;
+
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.hadoop.hbase.Coprocessor;
+import org.apache.hadoop.hbase.CoprocessorEnvironment;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.hadoop.hbase.ProcedureInfo;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin.MasterSwitchType;
+import org.apache.hadoop.hbase.constraint.ConstraintException;
+import org.apache.hadoop.hbase.coprocessor.CoprocessorService;
+import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
+import org.apache.hadoop.hbase.coprocessor.MasterObserver;
+import org.apache.hadoop.hbase.coprocessor.ObserverContext;
+import org.apache.hadoop.hbase.master.MasterServices;
+import org.apache.hadoop.hbase.master.RegionPlan;
+import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.ResponseConverter;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
+import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas;
+import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos;
+import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupRequest;
+import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupResponse;
+import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupRequest;
+import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupResponse;
+import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerRequest;
+import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerResponse;
+import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableRequest;
+import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableResponse;
+import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoRequest;
+import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoResponse;
+import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest;
+import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse;
+import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersRequest;
+import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersResponse;
+import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesRequest;
+import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesResponse;
+import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RSGroupAdminService;
+import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupRequest;
+import 

[09/23] hbase git commit: HBASE-15631 Backport Regionserver Groups (HBASE-6721) to branch-1

2017-07-19 Thread apurtell
http://git-wip-us.apache.org/repos/asf/hbase/blob/a860e48a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RSGroupProtos.java
--
diff --git 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RSGroupProtos.java
 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RSGroupProtos.java
new file mode 100644
index 000..979f762
--- /dev/null
+++ 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RSGroupProtos.java
@@ -0,0 +1,1331 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: RSGroup.proto
+
+package org.apache.hadoop.hbase.protobuf.generated;
+
+public final class RSGroupProtos {
+  private RSGroupProtos() {}
+  public static void registerAllExtensions(
+  com.google.protobuf.ExtensionRegistry registry) {
+  }
+  public interface RSGroupInfoOrBuilder
+  extends com.google.protobuf.MessageOrBuilder {
+
+// required string name = 1;
+/**
+ * required string name = 1;
+ */
+boolean hasName();
+/**
+ * required string name = 1;
+ */
+java.lang.String getName();
+/**
+ * required string name = 1;
+ */
+com.google.protobuf.ByteString
+getNameBytes();
+
+// repeated .hbase.pb.ServerName servers = 4;
+/**
+ * repeated .hbase.pb.ServerName servers = 4;
+ */
+
java.util.List
 
+getServersList();
+/**
+ * repeated .hbase.pb.ServerName servers = 4;
+ */
+org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName 
getServers(int index);
+/**
+ * repeated .hbase.pb.ServerName servers = 4;
+ */
+int getServersCount();
+/**
+ * repeated .hbase.pb.ServerName servers = 4;
+ */
+java.util.List 
+getServersOrBuilderList();
+/**
+ * repeated .hbase.pb.ServerName servers = 4;
+ */
+org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder 
getServersOrBuilder(
+int index);
+
+// repeated .hbase.pb.TableName tables = 3;
+/**
+ * repeated .hbase.pb.TableName tables = 3;
+ */
+
java.util.List
 
+getTablesList();
+/**
+ * repeated .hbase.pb.TableName tables = 3;
+ */
+org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName 
getTables(int index);
+/**
+ * repeated .hbase.pb.TableName tables = 3;
+ */
+int getTablesCount();
+/**
+ * repeated .hbase.pb.TableName tables = 3;
+ */
+java.util.List 
+getTablesOrBuilderList();
+/**
+ * repeated .hbase.pb.TableName tables = 3;
+ */
+org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder 
getTablesOrBuilder(
+int index);
+  }
+  /**
+   * Protobuf type {@code hbase.pb.RSGroupInfo}
+   */
+  public static final class RSGroupInfo extends
+  com.google.protobuf.GeneratedMessage
+  implements RSGroupInfoOrBuilder {
+// Use RSGroupInfo.newBuilder() to construct.
+private RSGroupInfo(com.google.protobuf.GeneratedMessage.Builder 
builder) {
+  super(builder);
+  this.unknownFields = builder.getUnknownFields();
+}
+private RSGroupInfo(boolean noInit) { this.unknownFields = 
com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+private static final RSGroupInfo defaultInstance;
+public static RSGroupInfo getDefaultInstance() {
+  return defaultInstance;
+}
+
+public RSGroupInfo getDefaultInstanceForType() {
+  return defaultInstance;
+}
+
+private final com.google.protobuf.UnknownFieldSet unknownFields;
+@java.lang.Override
+public final com.google.protobuf.UnknownFieldSet
+getUnknownFields() {
+  return this.unknownFields;
+}
+private RSGroupInfo(
+com.google.protobuf.CodedInputStream input,
+com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+throws com.google.protobuf.InvalidProtocolBufferException {
+  initFields();
+  int mutable_bitField0_ = 0;
+  com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+  com.google.protobuf.UnknownFieldSet.newBuilder();
+  try {
+boolean done = false;
+while (!done) {
+  int tag = input.readTag();
+  switch (tag) {
+case 0:
+  done = true;
+  break;
+default: {
+  if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+done = true;
+  }
+  break;
+}
+case 10: {
+  bitField0_ |= 0x0001;
+  name_ = input.readBytes();
+  break;
+}
+case 26: {
+  if (!((mutable_bitField0_ & 0x0004) == 0x0004)) {
+tables_ = new 
java.util.ArrayList();
+mutable_bitField0_ |= 0x0004;

[15/23] hbase git commit: HBASE-16430 Fix RegionServer Group's bug when moving multiple tables (Guangxu Cheng)

2017-07-19 Thread apurtell
HBASE-16430 Fix RegionServer Group's bug when moving multiple tables (Guangxu 
Cheng)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e9ab2b19
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e9ab2b19
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e9ab2b19

Branch: refs/heads/HBASE-15631-branch-1
Commit: e9ab2b194bf66076fb5c818710c112cca1e76451
Parents: 8108705
Author: Andrew Purtell 
Authored: Wed Jul 5 17:16:50 2017 -0700
Committer: Andrew Purtell 
Committed: Wed Jul 19 15:32:37 2017 -0700

--
 .../hbase/rsgroup/RSGroupInfoManagerImpl.java   |  2 +-
 .../hadoop/hbase/rsgroup/TestRSGroupsBase.java  | 54 
 2 files changed, 55 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e9ab2b19/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
--
diff --git 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
index 6c991bd..5cb2e71 100644
--- 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
+++ 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
@@ -227,7 +227,7 @@ public class RSGroupInfoManagerImpl implements 
RSGroupInfoManager, ServerListene
 Map newGroupMap = Maps.newHashMap(rsGroupMap);
 for(TableName tableName: tableNames) {
   if (tableMap.containsKey(tableName)) {
-RSGroupInfo src = new 
RSGroupInfo(rsGroupMap.get(tableMap.get(tableName)));
+RSGroupInfo src = new 
RSGroupInfo(newGroupMap.get(tableMap.get(tableName)));
 src.removeTable(tableName);
 newGroupMap.put(src.getName(), src);
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/e9ab2b19/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
--
diff --git 
a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
 
b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
index 9225e09..5fcdc7c 100644
--- 
a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
+++ 
b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
@@ -640,4 +640,58 @@ public abstract class TestRSGroupsBase {
   private String getGroupName(String baseName) {
 return groupPrefix+"_"+baseName+"_"+rand.nextInt(Integer.MAX_VALUE);
   }
+
+  @Test
+  public void testMultiTableMove() throws Exception {
+LOG.info("testMultiTableMove");
+
+final TableName tableNameA = TableName.valueOf(tablePrefix + 
"_testMultiTableMoveA");
+final TableName tableNameB = TableName.valueOf(tablePrefix + 
"_testMultiTableMoveB");
+final byte[] familyNameBytes = Bytes.toBytes("f");
+String newGroupName = getGroupName("testMultiTableMove");
+final RSGroupInfo newGroup = addGroup(rsGroupAdmin, newGroupName, 1);
+
+TEST_UTIL.createTable(tableNameA, familyNameBytes);
+TEST_UTIL.createTable(tableNameB, familyNameBytes);
+TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() {
+  @Override
+  public boolean evaluate() throws Exception {
+List regionsA = getTableRegionMap().get(tableNameA);
+if (regionsA == null)
+  return false;
+List regionsB = getTableRegionMap().get(tableNameB);
+if (regionsB == null)
+  return false;
+
+return getTableRegionMap().get(tableNameA).size() >= 1
+&& getTableRegionMap().get(tableNameB).size() >= 1;
+  }
+});
+
+RSGroupInfo tableGrpA = rsGroupAdmin.getRSGroupInfoOfTable(tableNameA);
+assertTrue(tableGrpA.getName().equals(RSGroupInfo.DEFAULT_GROUP));
+
+RSGroupInfo tableGrpB = rsGroupAdmin.getRSGroupInfoOfTable(tableNameB);
+assertTrue(tableGrpB.getName().equals(RSGroupInfo.DEFAULT_GROUP));
+//change table's group
+LOG.info("Moving table [" + tableNameA + "," + tableNameB + "] to " + 
newGroup.getName());
+rsGroupAdmin.moveTables(Sets.newHashSet(tableNameA, tableNameB), 
newGroup.getName());
+
+//verify group change
+Assert.assertEquals(newGroup.getName(),
+rsGroupAdmin.getRSGroupInfoOfTable(tableNameA).getName());
+
+Assert.assertEquals(newGroup.getName(),
+rsGroupAdmin.getRSGroupInfoOfTable(tableNameB).getName());
+
+//verify tables' not exist in old group
+Set DefaultTables = 
rsGroupAdmin.getRSGroupInfo(RSGroupInfo.DEFAULT_GROUP).getTables();
+

[13/23] hbase git commit: HBASE-17785 RSGroupBasedLoadBalancer fails to assign new table regions when cloning snapshot

2017-07-19 Thread apurtell
HBASE-17785 RSGroupBasedLoadBalancer fails to assign new table regions when 
cloning snapshot


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8d0cd98d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8d0cd98d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8d0cd98d

Branch: refs/heads/HBASE-15631-branch-1
Commit: 8d0cd98dd04b1ca2dbcbb6f5f040643c73b23262
Parents: a860e48
Author: Andrew Purtell 
Authored: Wed Apr 5 16:25:56 2017 -0700
Committer: Andrew Purtell 
Committed: Wed Jul 19 15:32:37 2017 -0700

--
 .../hbase/rsgroup/RSGroupAdminEndpoint.java | 28 +---
 .../hadoop/hbase/rsgroup/TestRSGroups.java  | 22 +++
 2 files changed, 47 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/8d0cd98d/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
--
diff --git 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
index 00cd6b0..8fa9fdc 100644
--- 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
+++ 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
@@ -77,7 +77,6 @@ import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGro
 import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupResponse;
 import 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
 
-
 public class RSGroupAdminEndpoint extends RSGroupAdminService
 implements CoprocessorService, Coprocessor, MasterObserver {
 
@@ -274,12 +273,36 @@ public class RSGroupAdminEndpoint extends 
RSGroupAdminService
 done.run(builder.build());
   }
 
+  void assignTableToGroup(HTableDescriptor desc) throws IOException {
+String groupName =
+
master.getNamespaceDescriptor(desc.getTableName().getNamespaceAsString())
+.getConfigurationValue(RSGroupInfo.NAMESPACEDESC_PROP_GROUP);
+if (groupName == null) {
+  groupName = RSGroupInfo.DEFAULT_GROUP;
+}
+RSGroupInfo rsGroupInfo = groupAdminServer.getRSGroupInfo(groupName);
+if (rsGroupInfo == null) {
+  throw new ConstraintException("Default RSGroup (" + groupName + ") for 
this table's "
+  + "namespace does not exist.");
+}
+if (!rsGroupInfo.containsTable(desc.getTableName())) {
+  groupAdminServer.moveTables(Sets.newHashSet(desc.getTableName()), 
groupName);
+}
+  }
+
+  /
+  // MasterObserver overrides
+  /
+
+  // Assign table to default RSGroup.
   @Override
   public void preCreateTable(ObserverContext ctx,
   HTableDescriptor desc, HRegionInfo[] regions) throws IOException {
 groupAdminServer.prepareRSGroupForTable(desc);
+assignTableToGroup(desc);
   }
 
+  // Remove table from its RSGroup.
   @Override
   public void postDeleteTable(ObserverContext 
ctx,
   TableName tableName) throws IOException {
@@ -663,7 +686,7 @@ public class RSGroupAdminEndpoint extends 
RSGroupAdminService
   public void preCloneSnapshot(ObserverContext 
ctx,
SnapshotDescription snapshot,
HTableDescriptor hTableDescriptor) throws 
IOException {
-
+assignTableToGroup(hTableDescriptor);
   }
 
   @Override
@@ -951,5 +974,4 @@ public class RSGroupAdminEndpoint extends 
RSGroupAdminService
 
   }
 
-
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/8d0cd98d/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroups.java
--
diff --git 
a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroups.java 
b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroups.java
index 34add63..e5a1f6a 100644
--- 
a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroups.java
+++ 
b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroups.java
@@ -39,6 +39,7 @@ import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
 import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.master.ServerManager;
+import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos;
 import 

[21/23] hbase git commit: HBASE-17350 Fixup of regionserver group-based assignment

2017-07-19 Thread apurtell
HBASE-17350 Fixup of regionserver group-based assignment


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/582977f2
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/582977f2
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/582977f2

Branch: refs/heads/HBASE-15631-branch-1
Commit: 582977f2f0f44d63ee6d3ef3fc41282657628445
Parents: f0f39cd
Author: Andrew Purtell 
Authored: Wed Jul 5 18:09:48 2017 -0700
Committer: Andrew Purtell 
Committed: Wed Jul 19 15:32:37 2017 -0700

--
 .../hadoop/hbase/rsgroup/RSGroupInfo.java   |  19 +--
 .../apache/hadoop/hbase/util/Addressing.java|  22 +++
 .../hadoop/hbase/util/TestAddressing.java   |  39 +
 .../hbase/rsgroup/RSGroupAdminServer.java   | 159 ++-
 .../hbase/rsgroup/RSGroupInfoManager.java   |   4 +-
 .../hbase/rsgroup/RSGroupInfoManagerImpl.java   |  42 +++--
 .../apache/hadoop/hbase/rsgroup/Utility.java|  48 ++
 .../hadoop/hbase/rsgroup/TestRSGroupsBase.java  |   2 +-
 .../hadoop/hbase/master/MasterRpcServices.java  |   3 +-
 .../hadoop/hbase/master/RegionStates.java   |   2 +-
 .../hadoop/hbase/master/ServerManager.java  |   1 -
 hbase-shell/src/main/ruby/shell.rb  |   7 +-
 hbase-shell/src/main/ruby/shell/commands.rb |   1 -
 .../src/main/ruby/shell/commands/add_rsgroup.rb |   3 +-
 .../main/ruby/shell/commands/balance_rsgroup.rb |   5 +-
 .../src/main/ruby/shell/commands/get_rsgroup.rb |   5 +-
 .../ruby/shell/commands/get_server_rsgroup.rb   |   5 +-
 .../ruby/shell/commands/get_table_rsgroup.rb|   5 +-
 .../main/ruby/shell/commands/list_procedures.rb |   2 +-
 .../main/ruby/shell/commands/list_rsgroups.rb   |   3 +-
 .../ruby/shell/commands/move_rsgroup_servers.rb |  37 -
 .../ruby/shell/commands/move_rsgroup_tables.rb  |  37 -
 .../ruby/shell/commands/move_servers_rsgroup.rb |  40 +
 .../ruby/shell/commands/move_tables_rsgroup.rb  |  40 +
 .../main/ruby/shell/commands/remove_rsgroup.rb  |   3 +-
 .../src/test/ruby/shell/rsgroup_shell_test.rb   |   4 +-
 26 files changed, 341 insertions(+), 197 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/582977f2/hbase-common/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java
--
diff --git 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java
index 0fb02d8..7297ff2 100644
--- 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java
+++ 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java
@@ -20,16 +20,19 @@
 
 package org.apache.hadoop.hbase.rsgroup;
 
-import com.google.common.collect.Sets;
-import com.google.common.net.HostAndPort;
-
 import java.util.Collection;
 import java.util.NavigableSet;
 import java.util.Set;
+import java.util.SortedSet;
+import java.util.TreeSet;
 
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.util.Addressing;
+
+import com.google.common.collect.Sets;
+import com.google.common.net.HostAndPort;
 
 /**
  * Stores the group information of region server groups.
@@ -53,14 +56,13 @@ public class RSGroupInfo {
   Set servers,
   NavigableSet tables) {
 this.name = name;
-this.servers = servers;
-this.tables = tables;
+this.servers = new TreeSet<>(new Addressing.HostAndPortComparable());
+this.servers.addAll(servers);
+this.tables = new TreeSet<>(tables);
   }
 
   public RSGroupInfo(RSGroupInfo src) {
-name = src.getName();
-servers = Sets.newHashSet(src.getServers());
-tables = Sets.newTreeSet(src.getTables());
+this(src.getName(), src.servers, src.tables);
   }
 
   /**
@@ -183,5 +185,4 @@ public class RSGroupInfo {
 result = 31 * result + name.hashCode();
 return result;
   }
-
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/582977f2/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Addressing.java
--
diff --git 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Addressing.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Addressing.java
index 31fb1f5..71f6127 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Addressing.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Addressing.java
@@ -24,10 +24,13 @@ import java.net.InetAddress;
 import java.net.InetSocketAddress;
 import java.net.NetworkInterface;
 import java.net.SocketException;

[20/23] hbase git commit: HBASE-15848 Fix possible null point dereference in RSGroupBasedLoadBalancer#getMisplacedRegions (Stephen Yuan Jiang)

2017-07-19 Thread apurtell
HBASE-15848 Fix possible null point dereference in 
RSGroupBasedLoadBalancer#getMisplacedRegions (Stephen Yuan Jiang)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5a12a16b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5a12a16b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5a12a16b

Branch: refs/heads/HBASE-15631-branch-1
Commit: 5a12a16b6e2fe8ef5e08af3b8a18f8eacc639d0e
Parents: cd84de6
Author: Andrew Purtell 
Authored: Wed Jul 5 15:37:10 2017 -0700
Committer: Andrew Purtell 
Committed: Wed Jul 19 15:32:37 2017 -0700

--
 .../apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java| 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5a12a16b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
--
diff --git 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
index fea1275..f69f093 100644
--- 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
+++ 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
@@ -331,7 +331,7 @@ public class RSGroupBasedLoadBalancer implements 
RSGroupableBalancer, LoadBalanc
 " on server: " + assignedServer +
 " found in group: " +
 
RSGroupInfoManager.getRSGroupOfServer(assignedServer.getHostPort()) +
-" outside of group: " + info.getName());
+" outside of group: " + (info == null ? "UNKNOWN" : 
info.getName()));
 misplacedRegions.add(region);
   }
 }
@@ -352,7 +352,7 @@ public class RSGroupBasedLoadBalancer implements 
RSGroupableBalancer, LoadBalanc
 try {
   info = RSGroupInfoManager.getRSGroup(
   RSGroupInfoManager.getRSGroupOfTable(region.getTable()));
-}catch(IOException exp){
+} catch (IOException exp) {
   LOG.debug("Group information null for region of table " + 
region.getTable(),
   exp);
 }



[05/23] hbase git commit: HBASE-15631 Backport Regionserver Groups (HBASE-6721) to branch-1

2017-07-19 Thread apurtell
http://git-wip-us.apache.org/repos/asf/hbase/blob/a860e48a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
index 51aeff8..c409ee9 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
@@ -2925,4 +2925,79 @@ public class TestAccessController extends SecureTestUtil 
{
 verifyDenied(replicateLogEntriesAction, USER_CREATE, USER_RW, USER_RO, 
USER_NONE, USER_OWNER,
   USER_GROUP_READ, USER_GROUP_ADMIN, USER_GROUP_CREATE);
   }
+
+  @Test
+  public void testMoveServers() throws Exception {
+AccessTestAction action1 = new AccessTestAction() {
+  @Override
+  public Object run() throws Exception {
+
ACCESS_CONTROLLER.preMoveServers(ObserverContext.createAndPrepare(CP_ENV, null),
+null, null);
+return null;
+  }
+};
+
+verifyAllowed(action1, SUPERUSER, USER_ADMIN);
+verifyDenied(action1, USER_CREATE, USER_RW, USER_RO, USER_NONE, 
USER_OWNER);
+  }
+
+  @Test
+  public void testMoveTables() throws Exception {
+AccessTestAction action1 = new AccessTestAction() {
+  @Override
+  public Object run() throws Exception {
+
ACCESS_CONTROLLER.preMoveTables(ObserverContext.createAndPrepare(CP_ENV, null),
+null, null);
+return null;
+  }
+};
+
+verifyAllowed(action1, SUPERUSER, USER_ADMIN);
+verifyDenied(action1, USER_CREATE, USER_RW, USER_RO, USER_NONE, 
USER_OWNER);
+  }
+
+  @Test
+  public void testAddGroup() throws Exception {
+AccessTestAction action1 = new AccessTestAction() {
+  @Override
+  public Object run() throws Exception {
+
ACCESS_CONTROLLER.preAddRSGroup(ObserverContext.createAndPrepare(CP_ENV, null),
+null);
+return null;
+  }
+};
+
+verifyAllowed(action1, SUPERUSER, USER_ADMIN);
+verifyDenied(action1, USER_CREATE, USER_RW, USER_RO, USER_NONE, 
USER_OWNER);
+  }
+
+  @Test
+  public void testRemoveGroup() throws Exception {
+AccessTestAction action1 = new AccessTestAction() {
+  @Override
+  public Object run() throws Exception {
+
ACCESS_CONTROLLER.preRemoveRSGroup(ObserverContext.createAndPrepare(CP_ENV, 
null),
+null);
+return null;
+  }
+};
+
+verifyAllowed(action1, SUPERUSER, USER_ADMIN);
+verifyDenied(action1, USER_CREATE, USER_RW, USER_RO, USER_NONE, 
USER_OWNER);
+  }
+
+  @Test
+  public void testBalanceGroup() throws Exception {
+AccessTestAction action1 = new AccessTestAction() {
+  @Override
+  public Object run() throws Exception {
+
ACCESS_CONTROLLER.preBalanceRSGroup(ObserverContext.createAndPrepare(CP_ENV, 
null),
+null);
+return null;
+  }
+};
+
+verifyAllowed(action1, SUPERUSER, USER_ADMIN);
+verifyDenied(action1, USER_CREATE, USER_RW, USER_RO, USER_NONE, 
USER_OWNER);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a860e48a/hbase-shell/pom.xml
--
diff --git a/hbase-shell/pom.xml b/hbase-shell/pom.xml
index a2a1d0c..44b6095 100644
--- a/hbase-shell/pom.xml
+++ b/hbase-shell/pom.xml
@@ -254,6 +254,41 @@
 
   
   
+
+  rsgroup
+  
+
+!skip-rsgroup
+
+  
+  
+
+  org.apache.hbase
+  hbase-rsgroup
+
+  
+  
+
+  
+org.codehaus.mojo
+build-helper-maven-plugin
+
+  
+add-test-source
+
+  add-test-source
+
+
+  
+src/test/rsgroup
+  
+
+  
+
+  
+
+  
+
 
 
   skipShellTests

http://git-wip-us.apache.org/repos/asf/hbase/blob/a860e48a/hbase-shell/src/main/ruby/hbase.rb
--
diff --git a/hbase-shell/src/main/ruby/hbase.rb 
b/hbase-shell/src/main/ruby/hbase.rb
index 88a6f04..2c0aecb 100644
--- a/hbase-shell/src/main/ruby/hbase.rb
+++ b/hbase-shell/src/main/ruby/hbase.rb
@@ -112,6 +112,7 @@ require 'hbase/quotas'
 require 'hbase/replication_admin'
 require 'hbase/security'
 require 'hbase/visibility_labels'
+require 'hbase/rsgroup_admin'
 
 
 include HBaseQuotasConstants

http://git-wip-us.apache.org/repos/asf/hbase/blob/a860e48a/hbase-shell/src/main/ruby/hbase/hbase.rb
--
diff --git 

[03/23] hbase git commit: HBASE-18235 LoadBalancer.BOGUS_SERVER_NAME should not have a bogus hostname

2017-07-19 Thread apurtell
HBASE-18235 LoadBalancer.BOGUS_SERVER_NAME should not have a bogus hostname


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a98bc190
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a98bc190
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a98bc190

Branch: refs/heads/HBASE-15631-branch-1
Commit: a98bc190d6126dc68abc89153d17778432b3c7d1
Parents: a1cf043
Author: Andrew Purtell 
Authored: Mon Jul 3 17:54:36 2017 -0700
Committer: Andrew Purtell 
Committed: Wed Jul 19 15:32:37 2017 -0700

--
 .../main/java/org/apache/hadoop/hbase/master/LoadBalancer.java  | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a98bc190/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java
index 937b32f..a80cdc3 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java
@@ -52,8 +52,9 @@ import org.apache.hadoop.hbase.TableName;
 @InterfaceAudience.Private
 public interface LoadBalancer extends Configurable, Stoppable, 
ConfigurationObserver {
 
-  //used to signal to the caller that the region(s) cannot be assigned
-  ServerName BOGUS_SERVER_NAME = ServerName.parseServerName("localhost,1,1");
+  // Used to signal to the caller that the region(s) cannot be assigned
+  // We deliberately use 'localhost' so the operation will fail fast
+  ServerName BOGUS_SERVER_NAME = ServerName.valueOf("localhost,1,1");
 
   /**
* Set the current cluster status.  This allows a LoadBalancer to map host 
name to a server



[11/23] hbase git commit: HBASE-15631 Backport Regionserver Groups (HBASE-6721) to branch-1

2017-07-19 Thread apurtell
HBASE-15631 Backport Regionserver Groups (HBASE-6721) to branch-1

  Applied 
https://issues.apache.org/jira/secure/attachment/12799888/HBASE-15631.02.branch-1.patch
  Amending-Author: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a860e48a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a860e48a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a860e48a

Branch: refs/heads/HBASE-15631-branch-1
Commit: a860e48a3a959e2e8ea6f4d3d6aa85b7c6108e2d
Parents: 2da5b43
Author: Andrew Purtell 
Authored: Wed Jul 5 13:39:35 2017 -0700
Committer: Andrew Purtell 
Committed: Wed Jul 19 15:32:37 2017 -0700

--
 .../org/apache/hadoop/hbase/ServerName.java |19 +-
 .../hadoop/hbase/protobuf/ProtobufUtil.java |36 +-
 .../hadoop/hbase/rsgroup/RSGroupInfo.java   |   187 +
 hbase-it/pom.xml|10 +
 .../hbase/rsgroup/IntegrationTestRSGroup.java   |99 +
 hbase-protocol/pom.xml  | 2 +
 .../hbase/protobuf/generated/ClientProtos.java  | 2 +-
 .../hbase/protobuf/generated/MasterProtos.java  |30 +-
 .../protobuf/generated/RSGroupAdminProtos.java  | 11855 +
 .../hbase/protobuf/generated/RSGroupProtos.java |  1331 ++
 .../protobuf/generated/SnapshotProtos.java  |24 +-
 hbase-protocol/src/main/protobuf/RSGroup.proto  |34 +
 .../src/main/protobuf/RSGroupAdmin.proto|   136 +
 hbase-rsgroup/pom.xml   |   278 +
 .../hadoop/hbase/rsgroup/RSGroupAdmin.java  |   121 +
 .../hbase/rsgroup/RSGroupAdminClient.java   |   204 +
 .../hbase/rsgroup/RSGroupAdminEndpoint.java |   955 ++
 .../hbase/rsgroup/RSGroupAdminServer.java   |   503 +
 .../hbase/rsgroup/RSGroupBasedLoadBalancer.java |   428 +
 .../hbase/rsgroup/RSGroupInfoManager.java   |   132 +
 .../hbase/rsgroup/RSGroupInfoManagerImpl.java   |   758 ++
 .../hadoop/hbase/rsgroup/RSGroupSerDe.java  |88 +
 .../hbase/rsgroup/RSGroupableBalancer.java  |29 +
 .../balancer/TestRSGroupBasedLoadBalancer.java  |   574 +
 .../hadoop/hbase/rsgroup/TestRSGroups.java  |   287 +
 .../hadoop/hbase/rsgroup/TestRSGroupsBase.java  |   643 +
 .../hbase/rsgroup/TestRSGroupsOfflineMode.java  |   187 +
 .../rsgroup/VerifyingRSGroupAdminClient.java|   149 +
 .../hbase/tmpl/master/MasterStatusTmpl.jamon| 2 +
 .../apache/hadoop/hbase/LocalHBaseCluster.java  | 3 +
 .../BaseMasterAndRegionObserver.java|53 +
 .../hbase/coprocessor/BaseMasterObserver.java   |54 +
 .../hbase/coprocessor/MasterObserver.java   |98 +
 .../hadoop/hbase/master/AssignmentManager.java  |16 +-
 .../org/apache/hadoop/hbase/master/HMaster.java |40 +-
 .../hadoop/hbase/master/LoadBalancer.java   | 3 +
 .../hbase/master/MasterCoprocessorHost.java |   137 +
 .../hadoop/hbase/master/MasterRpcServices.java  | 8 +
 .../hadoop/hbase/master/MasterServices.java | 5 +
 .../hbase/security/access/AccessController.java |32 +
 .../hbase/coprocessor/TestMasterObserver.java   |52 +
 .../hbase/master/MockNoopMasterServices.java| 5 +
 .../master/TestAssignmentManagerOnCluster.java  |   127 +-
 .../hadoop/hbase/master/TestCatalogJanitor.java | 3 +
 .../hbase/master/TestMasterStatusServlet.java   |12 +-
 .../normalizer/TestSimpleRegionNormalizer.java  | 2 +-
 .../security/access/TestAccessController.java   |75 +
 hbase-shell/pom.xml |35 +
 hbase-shell/src/main/ruby/hbase.rb  | 1 +
 hbase-shell/src/main/ruby/hbase/hbase.rb| 4 +
 .../src/main/ruby/hbase/rsgroup_admin.rb|   150 +
 hbase-shell/src/main/ruby/shell.rb  |21 +
 hbase-shell/src/main/ruby/shell/commands.rb | 4 +
 .../src/main/ruby/shell/commands/add_rsgroup.rb |39 +
 .../main/ruby/shell/commands/balance_rsgroup.rb |37 +
 .../src/main/ruby/shell/commands/get_rsgroup.rb |44 +
 .../ruby/shell/commands/get_server_rsgroup.rb   |40 +
 .../ruby/shell/commands/get_table_rsgroup.rb|41 +
 .../main/ruby/shell/commands/list_rsgroups.rb   |50 +
 .../ruby/shell/commands/move_rsgroup_servers.rb |37 +
 .../ruby/shell/commands/move_rsgroup_tables.rb  |37 +
 .../main/ruby/shell/commands/remove_rsgroup.rb  |37 +
 .../apache/hadoop/hbase/client/TestShell.java   | 2 +-
 .../hbase/client/rsgroup/TestShellRSGroups.java |   111 +
 .../src/test/ruby/shell/rsgroup_shell_test.rb   |96 +
 hbase-shell/src/test/ruby/test_helper.rb| 4 +
 pom.xml |23 +
 67 files changed, 20585 insertions(+), 56 deletions(-)
--



[01/23] hbase git commit: HBASE-18390 Sleep too long when finding region location failed [Forced Update!]

2017-07-19 Thread apurtell
Repository: hbase
Updated Branches:
  refs/heads/HBASE-15631-branch-1 a0783e326 -> a98bc190d (forced update)


HBASE-18390 Sleep too long when finding region location failed


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/98020957
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/98020957
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/98020957

Branch: refs/heads/HBASE-15631-branch-1
Commit: 980209579ba13cc1fecc1e2ce5403b38877600bf
Parents: af359d0
Author: Phil Yang 
Authored: Wed Jul 19 11:34:57 2017 +0800
Committer: Phil Yang 
Committed: Wed Jul 19 12:05:21 2017 +0800

--
 .../hadoop/hbase/client/ConnectionUtils.java| 14 --
 .../client/RegionAdminServiceCallable.java  |  9 +
 .../hbase/client/RegionServerCallable.java  |  9 +
 .../hbase/client/TestConnectionUtils.java   | 20 
 4 files changed, 2 insertions(+), 50 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/98020957/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
index 7155659..96e7788 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
@@ -74,20 +74,6 @@ public class ConnectionUtils {
 
 
   /**
-   * Adds / subs an up to 50% jitter to a pause time. Minimum is 1.
-   * @param pause the expected pause.
-   * @param jitter the jitter ratio, between 0 and 1, exclusive.
-   */
-  public static long addJitter(final long pause, final float jitter) {
-float lag = pause * (ThreadLocalRandom.current().nextFloat() - 0.5f) * 
jitter;
-long newPause = pause + (long) lag;
-if (newPause <= 0) {
-  return 1;
-}
-return newPause;
-  }
-
-  /**
* @param conn The connection for which to replace the generator.
* @param cnm Replaces the nonce generator used, for testing.
* @return old nonce generator.

http://git-wip-us.apache.org/repos/asf/hbase/blob/98020957/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java
index 675a2f9..386925e 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java
@@ -50,8 +50,6 @@ public abstract class RegionAdminServiceCallable 
implements RetryingCallable<
   protected final byte[] row;
   protected final int replicaId;
 
-  protected final static int MIN_WAIT_DEAD_SERVER = 1;
-
   public RegionAdminServiceCallable(ClusterConnection connection,
   RpcControllerFactory rpcControllerFactory, TableName tableName, byte[] 
row) {
 this(connection, rpcControllerFactory, null, tableName, row);
@@ -138,12 +136,7 @@ public abstract class RegionAdminServiceCallable 
implements RetryingCallable<
 
   @Override
   public long sleep(long pause, int tries) {
-long sleep = ConnectionUtils.getPauseTime(pause, tries);
-if (sleep < MIN_WAIT_DEAD_SERVER
-&& (location == null || 
connection.isDeadServer(location.getServerName( {
-  sleep = ConnectionUtils.addJitter(MIN_WAIT_DEAD_SERVER, 0.10f);
-}
-return sleep;
+return ConnectionUtils.getPauseTime(pause, tries);
   }
 
   public static RegionLocations getRegionLocations(

http://git-wip-us.apache.org/repos/asf/hbase/blob/98020957/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java
index b446c3f..e0b09f3 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java
@@ -51,8 +51,6 @@ public abstract class RegionServerCallable implements 
RetryingCallable {
   protected HRegionLocation location;
   private ClientService.BlockingInterface stub;
 
-  protected final static int MIN_WAIT_DEAD_SERVER = 1;
-
   /**
* @param 

[22/23] hbase git commit: HBASE-17758 [RSGROUP] Add shell command to move servers and tables at the same time (Guangxu Cheng)

2017-07-19 Thread apurtell
http://git-wip-us.apache.org/repos/asf/hbase/blob/a1cf043a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
--
diff --git 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
index e71470e..ee30e15 100644
--- 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
+++ 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
@@ -68,6 +68,8 @@ import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupI
 import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoResponse;
 import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest;
 import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse;
+import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest;
+import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse;
 import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersRequest;
 import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersResponse;
 import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesRequest;
@@ -206,6 +208,26 @@ public class RSGroupAdminEndpoint extends 
RSGroupAdminService
   }
 
   @Override
+  public void moveServersAndTables(RpcController controller, 
MoveServersAndTablesRequest request,
+  RpcCallback done) {
+MoveServersAndTablesResponse.Builder builder = 
MoveServersAndTablesResponse.newBuilder();
+try {
+  Set hostPorts = Sets.newHashSet();
+  for (HBaseProtos.ServerName el : request.getServersList()) {
+hostPorts.add(HostAndPort.fromParts(el.getHostName(), el.getPort()));
+  }
+  Set tables = new HashSet<>(request.getTableNameList().size());
+  for (HBaseProtos.TableName tableName : request.getTableNameList()) {
+tables.add(ProtobufUtil.toTableName(tableName));
+  }
+  groupAdminServer.moveServersAndTables(hostPorts, tables, 
request.getTargetGroup());
+} catch (IOException e) {
+  ResponseConverter.setControllerException(controller, e);
+}
+done.run(builder.build());
+  }
+
+  @Override
   public void addRSGroup(RpcController controller,
AddRSGroupRequest request,
RpcCallback done) {
@@ -953,6 +975,16 @@ public class RSGroupAdminEndpoint extends 
RSGroupAdminService
   }
 
   @Override
+  public void 
preMoveServersAndTables(ObserverContext ctx,
+  Set servers, Set tables, String targetGroup) 
throws IOException {
+  }
+
+  @Override
+  public void 
postMoveServersAndTables(ObserverContext ctx,
+  Set servers, Set tables, String targetGroup) 
throws IOException {
+  }
+
+  @Override
   public void preAddRSGroup(ObserverContext ctx,
 String name) throws IOException {
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/a1cf043a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
--
diff --git 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
index 1069ac0..863b71e 100644
--- 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
+++ 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
@@ -501,6 +501,19 @@ public class RSGroupAdminServer extends RSGroupAdmin {
   }
 
   @Override
+  public void moveServersAndTables(Set servers, Set 
tables,
+  String targetGroup) throws IOException {
+if (servers == null || servers.isEmpty() ) {
+  throw new ConstraintException("The list of servers to move cannot be 
null or empty.");
+}
+if (tables == null || tables.isEmpty()) {
+  throw new ConstraintException("The list of tables to move cannot be null 
or empty.");
+}
+moveServers(servers, targetGroup);
+moveTables(tables, targetGroup);
+  }
+
+  @Override
   public void close() throws IOException {
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a1cf043a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java
--
diff --git 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java
 
b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java
index 5b5563e..e11cb57 100644
--- 
a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java
+++ 

[23/23] hbase git commit: HBASE-17758 [RSGROUP] Add shell command to move servers and tables at the same time (Guangxu Cheng)

2017-07-19 Thread apurtell
HBASE-17758 [RSGROUP] Add shell command to move servers and tables at the same 
time (Guangxu Cheng)

HBASE-17806 TestRSGroups#testMoveServersAndTables is flaky in master branch


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a1cf043a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a1cf043a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a1cf043a

Branch: refs/heads/HBASE-15631-branch-1
Commit: a1cf043a9f537c84f94943b9007da6a9c0217ebf
Parents: f4e6ae6
Author: Andrew Purtell 
Authored: Wed Jul 5 18:29:14 2017 -0700
Committer: Andrew Purtell 
Committed: Wed Jul 19 15:32:37 2017 -0700

--
 .../hadoop/hbase/rsgroup/RSGroupInfo.java   |3 +
 .../protobuf/generated/RSGroupAdminProtos.java  | 1759 +-
 .../src/main/protobuf/RSGroupAdmin.proto|   12 +
 .../hadoop/hbase/rsgroup/RSGroupAdmin.java  |   11 +
 .../hbase/rsgroup/RSGroupAdminClient.java   |   22 +
 .../hbase/rsgroup/RSGroupAdminEndpoint.java |   32 +
 .../hbase/rsgroup/RSGroupAdminServer.java   |   13 +
 .../hbase/rsgroup/RSGroupInfoManager.java   |   10 +
 .../hbase/rsgroup/RSGroupInfoManagerImpl.java   |   24 +
 .../hadoop/hbase/rsgroup/TestRSGroupsBase.java  |  111 ++
 .../rsgroup/VerifyingRSGroupAdminClient.java|7 +
 .../BaseMasterAndRegionObserver.java|   10 +
 .../hbase/coprocessor/BaseMasterObserver.java   |   10 +
 .../hbase/coprocessor/MasterObserver.java   |   20 +-
 .../hbase/master/MasterCoprocessorHost.java |   26 +
 .../hbase/security/access/AccessController.java |6 +
 .../hbase/coprocessor/TestMasterObserver.java   |   10 +
 .../src/main/ruby/hbase/rsgroup_admin.rb|   14 +
 hbase-shell/src/main/ruby/shell.rb  |1 +
 .../commands/move_servers_tables_rsgroup.rb |   37 +
 20 files changed, 2115 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a1cf043a/hbase-common/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java
--
diff --git 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java
index 7297ff2..74572ac 100644
--- 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java
+++ 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java
@@ -150,6 +150,9 @@ public class RSGroupInfo {
 sb.append(", ");
 sb.append(" Servers:");
 sb.append(this.servers);
+sb.append(", ");
+sb.append(" Tables:");
+sb.append(this.tables);
 return sb.toString();
 
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a1cf043a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RSGroupAdminProtos.java
--
diff --git 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RSGroupAdminProtos.java
 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RSGroupAdminProtos.java
index 3d1f4bd..ca1db1e 100644
--- 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RSGroupAdminProtos.java
+++ 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RSGroupAdminProtos.java
@@ -10754,6 +10754,1621 @@ public final class RSGroupAdminProtos {
 // 
@@protoc_insertion_point(class_scope:hbase.pb.GetRSGroupInfoOfServerResponse)
   }
 
+  public interface MoveServersAndTablesRequestOrBuilder
+  extends com.google.protobuf.MessageOrBuilder {
+
+// required string target_group = 1;
+/**
+ * required string target_group = 1;
+ */
+boolean hasTargetGroup();
+/**
+ * required string target_group = 1;
+ */
+java.lang.String getTargetGroup();
+/**
+ * required string target_group = 1;
+ */
+com.google.protobuf.ByteString
+getTargetGroupBytes();
+
+// repeated .hbase.pb.ServerName servers = 2;
+/**
+ * repeated .hbase.pb.ServerName servers = 2;
+ */
+
java.util.List
 
+getServersList();
+/**
+ * repeated .hbase.pb.ServerName servers = 2;
+ */
+org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName 
getServers(int index);
+/**
+ * repeated .hbase.pb.ServerName servers = 2;
+ */
+int getServersCount();
+/**
+ * repeated .hbase.pb.ServerName servers = 2;
+ */
+java.util.List 
+getServersOrBuilderList();
+/**
+ * repeated .hbase.pb.ServerName servers = 2;
+ */
+org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder 
getServersOrBuilder(
+int index);
+
+// repeated 

[06/23] hbase git commit: HBASE-15631 Backport Regionserver Groups (HBASE-6721) to branch-1

2017-07-19 Thread apurtell
http://git-wip-us.apache.org/repos/asf/hbase/blob/a860e48a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
--
diff --git 
a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
 
b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
new file mode 100644
index 000..9225e09
--- /dev/null
+++ 
b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
@@ -0,0 +1,643 @@
+/**
+ * Copyright The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.rsgroup;
+
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
+import com.google.common.net.HostAndPort;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.ClusterStatus;
+import org.apache.hadoop.hbase.HBaseCluster;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.hadoop.hbase.RegionLoad;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.Waiter;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.constraint.ConstraintException;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.AdminProtos;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.security.SecureRandom;
+import java.util.HashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeMap;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+public abstract class TestRSGroupsBase {
+  protected static final Log LOG = LogFactory.getLog(TestRSGroupsBase.class);
+
+  //shared
+  protected final static String groupPrefix = "Group";
+  protected final static String tablePrefix = "Group";
+  protected final static SecureRandom rand = new SecureRandom();
+
+  //shared, cluster type specific
+  protected static HBaseTestingUtility TEST_UTIL;
+  protected static HBaseAdmin admin;
+  protected static HBaseCluster cluster;
+  protected static RSGroupAdmin rsGroupAdmin;
+
+  public final static long WAIT_TIMEOUT = 6*5;
+  public final static int NUM_SLAVES_BASE = 4; //number of slaves for the 
smallest cluster
+
+
+
+  protected RSGroupInfo addGroup(RSGroupAdmin gAdmin, String groupName,
+ int serverCount) throws IOException, 
InterruptedException {
+RSGroupInfo defaultInfo = gAdmin
+.getRSGroupInfo(RSGroupInfo.DEFAULT_GROUP);
+assertTrue(defaultInfo != null);
+assertTrue(defaultInfo.getServers().size() >= serverCount);
+gAdmin.addRSGroup(groupName);
+
+Set set = new HashSet();
+for(HostAndPort server: defaultInfo.getServers()) {
+  if(set.size() == serverCount) {
+break;
+  }
+  set.add(server);
+}
+gAdmin.moveServers(set, groupName);
+RSGroupInfo result = gAdmin.getRSGroupInfo(groupName);
+assertTrue(result.getServers().size() >= serverCount);
+return result;
+  }
+
+  static void removeGroup(RSGroupAdminClient groupAdmin, String groupName) 
throws IOException {
+RSGroupInfo RSGroupInfo = groupAdmin.getRSGroupInfo(groupName);
+groupAdmin.moveTables(RSGroupInfo.getTables(), RSGroupInfo.DEFAULT_GROUP);
+groupAdmin.moveServers(RSGroupInfo.getServers(), 
RSGroupInfo.DEFAULT_GROUP);
+groupAdmin.removeRSGroup(groupName);
+  }
+
+  protected void deleteTableIfNecessary() throws IOException {
+for (HTableDescriptor desc : 
TEST_UTIL.getHBaseAdmin().listTables(tablePrefix+".*")) {
+  

[10/23] hbase git commit: HBASE-15631 Backport Regionserver Groups (HBASE-6721) to branch-1

2017-07-19 Thread apurtell
http://git-wip-us.apache.org/repos/asf/hbase/blob/a860e48a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RSGroupAdminProtos.java
--
diff --git 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RSGroupAdminProtos.java
 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RSGroupAdminProtos.java
new file mode 100644
index 000..3d1f4bd
--- /dev/null
+++ 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RSGroupAdminProtos.java
@@ -0,0 +1,11855 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: RSGroupAdmin.proto
+
+package org.apache.hadoop.hbase.protobuf.generated;
+
+public final class RSGroupAdminProtos {
+  private RSGroupAdminProtos() {}
+  public static void registerAllExtensions(
+  com.google.protobuf.ExtensionRegistry registry) {
+  }
+  public interface ListTablesOfRSGroupRequestOrBuilder
+  extends com.google.protobuf.MessageOrBuilder {
+
+// required string r_s_group_name = 1;
+/**
+ * required string r_s_group_name = 1;
+ */
+boolean hasRSGroupName();
+/**
+ * required string r_s_group_name = 1;
+ */
+java.lang.String getRSGroupName();
+/**
+ * required string r_s_group_name = 1;
+ */
+com.google.protobuf.ByteString
+getRSGroupNameBytes();
+  }
+  /**
+   * Protobuf type {@code hbase.pb.ListTablesOfRSGroupRequest}
+   */
+  public static final class ListTablesOfRSGroupRequest extends
+  com.google.protobuf.GeneratedMessage
+  implements ListTablesOfRSGroupRequestOrBuilder {
+// Use ListTablesOfRSGroupRequest.newBuilder() to construct.
+private 
ListTablesOfRSGroupRequest(com.google.protobuf.GeneratedMessage.Builder 
builder) {
+  super(builder);
+  this.unknownFields = builder.getUnknownFields();
+}
+private ListTablesOfRSGroupRequest(boolean noInit) { this.unknownFields = 
com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+private static final ListTablesOfRSGroupRequest defaultInstance;
+public static ListTablesOfRSGroupRequest getDefaultInstance() {
+  return defaultInstance;
+}
+
+public ListTablesOfRSGroupRequest getDefaultInstanceForType() {
+  return defaultInstance;
+}
+
+private final com.google.protobuf.UnknownFieldSet unknownFields;
+@java.lang.Override
+public final com.google.protobuf.UnknownFieldSet
+getUnknownFields() {
+  return this.unknownFields;
+}
+private ListTablesOfRSGroupRequest(
+com.google.protobuf.CodedInputStream input,
+com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+throws com.google.protobuf.InvalidProtocolBufferException {
+  initFields();
+  int mutable_bitField0_ = 0;
+  com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+  com.google.protobuf.UnknownFieldSet.newBuilder();
+  try {
+boolean done = false;
+while (!done) {
+  int tag = input.readTag();
+  switch (tag) {
+case 0:
+  done = true;
+  break;
+default: {
+  if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+done = true;
+  }
+  break;
+}
+case 10: {
+  bitField0_ |= 0x0001;
+  rSGroupName_ = input.readBytes();
+  break;
+}
+  }
+}
+  } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+throw e.setUnfinishedMessage(this);
+  } catch (java.io.IOException e) {
+throw new com.google.protobuf.InvalidProtocolBufferException(
+e.getMessage()).setUnfinishedMessage(this);
+  } finally {
+this.unknownFields = unknownFields.build();
+makeExtensionsImmutable();
+  }
+}
+public static final com.google.protobuf.Descriptors.Descriptor
+getDescriptor() {
+  return 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_ListTablesOfRSGroupRequest_descriptor;
+}
+
+protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+internalGetFieldAccessorTable() {
+  return 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_ListTablesOfRSGroupRequest_fieldAccessorTable
+  .ensureFieldAccessorsInitialized(
+  
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListTablesOfRSGroupRequest.class,
 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListTablesOfRSGroupRequest.Builder.class);
+}
+
+public static com.google.protobuf.Parser 
PARSER =
+new com.google.protobuf.AbstractParser() {
+  public ListTablesOfRSGroupRequest parsePartialFrom(
+  

[19/23] hbase git commit: HBASE-17496 RSGroup shell commands:get_server_rsgroup don't work and commands display an incorrect result size (Guangxu Cheng)

2017-07-19 Thread apurtell
HBASE-17496 RSGroup shell commands:get_server_rsgroup don't work and commands 
display an incorrect result size (Guangxu Cheng)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7bd973c5
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7bd973c5
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7bd973c5

Branch: refs/heads/HBASE-15631-branch-1
Commit: 7bd973c5c3c8a3a0e729b2c545fc78c67894a478
Parents: 582977f
Author: Andrew Purtell 
Authored: Wed Jul 5 18:23:19 2017 -0700
Committer: Andrew Purtell 
Committed: Wed Jul 19 15:32:37 2017 -0700

--
 hbase-shell/src/main/ruby/shell/commands/get_rsgroup.rb| 5 ++---
 hbase-shell/src/main/ruby/shell/commands/get_server_rsgroup.rb | 5 ++---
 hbase-shell/src/main/ruby/shell/commands/get_table_rsgroup.rb  | 3 +--
 hbase-shell/src/main/ruby/shell/commands/list_rsgroups.rb  | 3 +--
 4 files changed, 6 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7bd973c5/hbase-shell/src/main/ruby/shell/commands/get_rsgroup.rb
--
diff --git a/hbase-shell/src/main/ruby/shell/commands/get_rsgroup.rb 
b/hbase-shell/src/main/ruby/shell/commands/get_rsgroup.rb
index 122020a..a5b41af 100644
--- a/hbase-shell/src/main/ruby/shell/commands/get_rsgroup.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/get_rsgroup.rb
@@ -33,12 +33,11 @@ EOF
   end
 
   def command(group_name)
-now = Time.now
-formatter.header(['RSGROUP '.concat(group_name)])
+formatter.header(['GROUP INFORMATION'])
 rsgroup_admin.get_rsgroup(group_name) do |s|
   formatter.row([s])
 end
-formatter.footer(now)
+formatter.footer()
   end
 end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/7bd973c5/hbase-shell/src/main/ruby/shell/commands/get_server_rsgroup.rb
--
diff --git a/hbase-shell/src/main/ruby/shell/commands/get_server_rsgroup.rb 
b/hbase-shell/src/main/ruby/shell/commands/get_server_rsgroup.rb
index dddf080..fd2ccc7 100644
--- a/hbase-shell/src/main/ruby/shell/commands/get_server_rsgroup.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/get_server_rsgroup.rb
@@ -33,10 +33,9 @@ EOF
   end
 
   def command(server)
-now = Time.now
-group_name = rsgroup_admin.getRSGroupOfServer(server).getName
+group_name = rsgroup_admin.get_rsgroup_of_server(server).getName
 formatter.row([group_name])
-formatter.footer(now, 1)
+formatter.footer(1)
   end
 end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/7bd973c5/hbase-shell/src/main/ruby/shell/commands/get_table_rsgroup.rb
--
diff --git a/hbase-shell/src/main/ruby/shell/commands/get_table_rsgroup.rb 
b/hbase-shell/src/main/ruby/shell/commands/get_table_rsgroup.rb
index 6939c12..9684687 100644
--- a/hbase-shell/src/main/ruby/shell/commands/get_table_rsgroup.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/get_table_rsgroup.rb
@@ -33,11 +33,10 @@ EOF
   end
 
   def command(table)
-now = Time.now
 group_name =
 rsgroup_admin.get_rsgroup_of_table(table).getName
 formatter.row([group_name])
-formatter.footer(now, 1)
+formatter.footer(1)
   end
 end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/7bd973c5/hbase-shell/src/main/ruby/shell/commands/list_rsgroups.rb
--
diff --git a/hbase-shell/src/main/ruby/shell/commands/list_rsgroups.rb 
b/hbase-shell/src/main/ruby/shell/commands/list_rsgroups.rb
index 5ab923a..393797d 100644
--- a/hbase-shell/src/main/ruby/shell/commands/list_rsgroups.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/list_rsgroups.rb
@@ -35,7 +35,6 @@ EOF
   end
 
   def command(regex = '.*')
-now = Time.now
 formatter.header(['GROUPS'])
 
 regex = /#{regex}/ unless regex.is_a?(Regexp)
@@ -44,7 +43,7 @@ EOF
   formatter.row([group])
 end
 
-formatter.footer(now, list.size)
+formatter.footer(list.size)
   end
 end
   end



[16/23] hbase git commit: HBASE-15858 Some region server group shell commands don't work

2017-07-19 Thread apurtell
HBASE-15858 Some region server group shell commands don't work


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1705cb0f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1705cb0f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1705cb0f

Branch: refs/heads/HBASE-15631-branch-1
Commit: 1705cb0f0a05d7b79021cf18e16c5bd74b3cd427
Parents: 5a12a16
Author: Andrew Purtell 
Authored: Wed Jul 5 15:37:47 2017 -0700
Committer: Andrew Purtell 
Committed: Wed Jul 19 15:32:37 2017 -0700

--
 .../src/main/ruby/shell/commands/get_server_rsgroup.rb   |  2 +-
 hbase-shell/src/test/ruby/shell/rsgroup_shell_test.rb| 11 +++
 2 files changed, 8 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/1705cb0f/hbase-shell/src/main/ruby/shell/commands/get_server_rsgroup.rb
--
diff --git a/hbase-shell/src/main/ruby/shell/commands/get_server_rsgroup.rb 
b/hbase-shell/src/main/ruby/shell/commands/get_server_rsgroup.rb
index 322f6bb..a689a7c 100644
--- a/hbase-shell/src/main/ruby/shell/commands/get_server_rsgroup.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/get_server_rsgroup.rb
@@ -31,7 +31,7 @@ EOF
 
   def command(server)
 now = Time.now
-group_name = rsgroup_admin.getGroupOfServer(server).getName
+group_name = rsgroup_admin.getRSGroupOfServer(server).getName
 formatter.row([group_name])
 formatter.footer(now, 1)
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/1705cb0f/hbase-shell/src/test/ruby/shell/rsgroup_shell_test.rb
--
diff --git a/hbase-shell/src/test/ruby/shell/rsgroup_shell_test.rb 
b/hbase-shell/src/test/ruby/shell/rsgroup_shell_test.rb
index d892775..1040ed8 100644
--- a/hbase-shell/src/test/ruby/shell/rsgroup_shell_test.rb
+++ b/hbase-shell/src/test/ruby/shell/rsgroup_shell_test.rb
@@ -49,12 +49,15 @@ module Hbase
   assert_not_nil(group)
   assert_equal(0, group.getServers.count)
 
-  hostport =
-  
@rsgroup_admin.getRSGroupInfo('default').getServers.iterator.next.toString
+  hostport = 
@rsgroup_admin.getRSGroupInfo('default').getServers.iterator.next
+  @shell.command('get_rsgroup', 'default')
+  hostPortStr = hostport.toString
+  @shell.command('get_server_rsgroup', [hostPortStr])
   @shell.command('move_rsgroup_servers',
  group_name,
- [hostport])
+ [hostPortStr])
   assert_equal(1, 
@rsgroup_admin.getRSGroupInfo(group_name).getServers.count)
+  assert_equal(group_name, 
@rsgroup_admin.getRSGroupOfServer(hostport).getName)
 
   @shell.command('move_rsgroup_tables',
  group_name,
@@ -65,7 +68,7 @@ module Hbase
   @hbase.rsgroup_admin(@formatter).get_rsgroup(group_name) do |line|
 case count
 when 1
-  assert_equal(hostport, line)
+  assert_equal(hostPortStr, line)
 when 3
   assert_equal(table_name, line)
 end



[04/23] hbase git commit: HBASE-17772 IntegrationTestRSGroup won't run

2017-07-19 Thread apurtell
HBASE-17772 IntegrationTestRSGroup won't run


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f4e6ae6e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f4e6ae6e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f4e6ae6e

Branch: refs/heads/HBASE-15631-branch-1
Commit: f4e6ae6e878c95343a4c31aeab01fd0f9dca92d9
Parents: 7bd973c
Author: Andrew Purtell 
Authored: Wed Jul 5 18:24:47 2017 -0700
Committer: Andrew Purtell 
Committed: Wed Jul 19 15:32:37 2017 -0700

--
 .../org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java   | 9 -
 1 file changed, 8 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/f4e6ae6e/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
--
diff --git 
a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
 
b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
index 5831696..52f576d 100644
--- 
a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
+++ 
b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
@@ -150,7 +150,14 @@ public abstract class TestRSGroupsBase {
 ClusterStatus status = 
TEST_UTIL.getHBaseClusterInterface().getClusterStatus();
 for(ServerName serverName : status.getServers()) {
   for(RegionLoad rl : 
status.getLoad(serverName).getRegionsLoad().values()) {
-TableName tableName = HRegionInfo.getTable(rl.getName());
+TableName tableName = null;
+try {
+  tableName = HRegionInfo.getTable(rl.getName());
+} catch (IllegalArgumentException e) {
+  LOG.warn("Failed parse a table name from regionname=" +
+  Bytes.toStringBinary(rl.getName()));
+  continue;
+}
 if(!map.containsKey(tableName)) {
   map.put(tableName, new TreeMap());
 }



hbase-site git commit: INFRA-10751 Empty commit

2017-07-19 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 9eba7fcf3 -> 95b5168b3


INFRA-10751 Empty commit


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/95b5168b
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/95b5168b
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/95b5168b

Branch: refs/heads/asf-site
Commit: 95b5168b34a105290846048f8433cccf045d20ba
Parents: 9eba7fc
Author: jenkins 
Authored: Wed Jul 19 22:07:20 2017 +
Committer: jenkins 
Committed: Wed Jul 19 22:07:20 2017 +

--

--




[26/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9eba7fcf/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
index 504e470..38667c0 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
@@ -2866,5375 +2866,5371 @@
 2858checkResources();
 2859
startRegionOperation(Operation.DELETE);
 2860try {
-2861  delete.getRow();
-2862  // All edits for the given row 
(across all column families) must happen atomically.
-2863  doBatchMutate(delete);
-2864} finally {
-2865  
closeRegionOperation(Operation.DELETE);
-2866}
-2867  }
-2868
-2869  /**
-2870   * Row needed by below method.
-2871   */
-2872  private static final byte [] 
FOR_UNIT_TESTS_ONLY = Bytes.toBytes("ForUnitTestsOnly");
-2873
-2874  /**
-2875   * This is used only by unit tests. 
Not required to be a public API.
-2876   * @param familyMap map of family to 
edits for the given family.
-2877   * @throws IOException
-2878   */
-2879  void delete(NavigableMapbyte[], 
ListCell familyMap,
-2880  Durability durability) throws 
IOException {
-2881Delete delete = new 
Delete(FOR_UNIT_TESTS_ONLY);
-2882
delete.setFamilyCellMap(familyMap);
-2883delete.setDurability(durability);
-2884doBatchMutate(delete);
-2885  }
-2886
-2887  @Override
-2888  public void 
prepareDeleteTimestamps(Mutation mutation, Mapbyte[], ListCell 
familyMap,
-2889  byte[] byteNow) throws IOException 
{
-2890for (Map.Entrybyte[], 
ListCell e : familyMap.entrySet()) {
-2891
-2892  byte[] family = e.getKey();
-2893  ListCell cells = 
e.getValue();
-2894  assert cells instanceof 
RandomAccess;
-2895
-2896  Mapbyte[], Integer kvCount 
= new TreeMap(Bytes.BYTES_COMPARATOR);
-2897  int listSize = cells.size();
-2898  for (int i=0; i  listSize; 
i++) {
-2899Cell cell = cells.get(i);
-2900//  Check if time is LATEST, 
change to time of most recent addition if so
-2901//  This is expensive.
-2902if (cell.getTimestamp() == 
HConstants.LATEST_TIMESTAMP  CellUtil.isDeleteType(cell)) {
-2903  byte[] qual = 
CellUtil.cloneQualifier(cell);
-2904  if (qual == null) qual = 
HConstants.EMPTY_BYTE_ARRAY;
-2905
-2906  Integer count = 
kvCount.get(qual);
-2907  if (count == null) {
-2908kvCount.put(qual, 1);
-2909  } else {
-2910kvCount.put(qual, count + 
1);
-2911  }
-2912  count = kvCount.get(qual);
-2913
-2914  Get get = new 
Get(CellUtil.cloneRow(cell));
-2915  get.setMaxVersions(count);
-2916  get.addColumn(family, qual);
-2917  if (coprocessorHost != null) 
{
-2918if 
(!coprocessorHost.prePrepareTimeStampForDeleteVersion(mutation, cell,
-2919byteNow, get)) {
-2920  
updateDeleteLatestVersionTimeStamp(cell, get, count, byteNow);
-2921}
-2922  } else {
-2923
updateDeleteLatestVersionTimeStamp(cell, get, count, byteNow);
-2924  }
-2925} else {
-2926  
CellUtil.updateLatestStamp(cell, byteNow, 0);
-2927}
-2928  }
-2929}
-2930  }
-2931
-2932  void 
updateDeleteLatestVersionTimeStamp(Cell cell, Get get, int count, byte[] 
byteNow)
-2933  throws IOException {
-2934ListCell result = get(get, 
false);
-2935
-2936if (result.size()  count) {
-2937  // Nothing to delete
-2938  CellUtil.updateLatestStamp(cell, 
byteNow, 0);
-2939  return;
-2940}
-2941if (result.size()  count) {
-2942  throw new 
RuntimeException("Unexpected size: " + result.size());
-2943}
-2944Cell getCell = result.get(count - 
1);
-2945CellUtil.setTimestamp(cell, 
getCell.getTimestamp());
-2946  }
-2947
-2948  @Override
-2949  public void put(Put put) throws 
IOException {
-2950checkReadOnly();
-2951
-2952// Do a rough check that we have 
resources to accept a write.  The check is
-2953// 'rough' in that between the 
resource check and the call to obtain a
-2954// read lock, resources may run out. 
 For now, the thought is that this
-2955// will be extremely rare; we'll 
deal with it when it happens.
-2956checkResources();
-2957
startRegionOperation(Operation.PUT);
-2958try {
-2959  // All edits for the given row 
(across all column families) must happen atomically.
-2960  doBatchMutate(put);
-2961} finally {
-2962  
closeRegionOperation(Operation.PUT);
-2963}
-2964  }
-2965
-2966  /**
-2967   * Struct-like class that tracks the 
progress of a batch operation,
-2968   * 

[38/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9eba7fcf/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.html
index c7ef9c6..b9ac9a6 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.html
@@ -103,394 +103,380 @@
 095  }
 096
 097  /**
-098   * Adds / subs an up to 50% jitter to a 
pause time. Minimum is 1.
-099   * @param pause the expected pause.
-100   * @param jitter the jitter ratio, 
between 0 and 1, exclusive.
+098   * @param conn The connection for which 
to replace the generator.
+099   * @param cnm Replaces the nonce 
generator used, for testing.
+100   * @return old nonce generator.
 101   */
-102  public static long addJitter(final long 
pause, final float jitter) {
-103float lag = pause * 
(ThreadLocalRandom.current().nextFloat() - 0.5f) * jitter;
-104long newPause = pause + (long) lag;
-105if (newPause = 0) {
-106  return 1;
-107}
-108return newPause;
-109  }
-110
-111  /**
-112   * @param conn The connection for which 
to replace the generator.
-113   * @param cnm Replaces the nonce 
generator used, for testing.
-114   * @return old nonce generator.
-115   */
-116  public static NonceGenerator 
injectNonceGeneratorForTesting(ClusterConnection conn,
-117  NonceGenerator cnm) {
-118return 
ConnectionImplementation.injectNonceGeneratorForTesting(conn, cnm);
-119  }
-120
-121  /**
-122   * Changes the configuration to set the 
number of retries needed when using Connection internally,
-123   * e.g. for updating catalog tables, 
etc. Call this method before we create any Connections.
-124   * @param c The Configuration instance 
to set the retries into.
-125   * @param log Used to log what we set 
in here.
-126   */
-127  public static void 
setServerSideHConnectionRetriesConfig(final Configuration c, final String sn,
-128  final Log log) {
-129// TODO: Fix this. Not all 
connections from server side should have 10 times the retries.
-130int hcRetries = 
c.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
-131  
HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
-132// Go big. Multiply by 10. If we 
can't get to meta after this many retries
-133// then something seriously wrong.
-134int serversideMultiplier = 
c.getInt("hbase.client.serverside.retries.multiplier", 10);
-135int retries = hcRetries * 
serversideMultiplier;
-136
c.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, retries);
-137log.info(sn + " server-side 
Connection retries=" + retries);
-138  }
-139
-140  /**
-141   * Creates a short-circuit connection 
that can bypass the RPC layer (serialization,
-142   * deserialization, networking, etc..) 
when talking to a local server.
-143   * @param conf the current 
configuration
-144   * @param pool the thread pool to use 
for batch operations
-145   * @param user the user the connection 
is for
-146   * @param serverName the local server 
name
-147   * @param admin the admin interface of 
the local server
-148   * @param client the client interface 
of the local server
-149   * @return an short-circuit 
connection.
-150   * @throws IOException if IO failure 
occurred
-151   */
-152  public static ClusterConnection 
createShortCircuitConnection(final Configuration conf,
-153  ExecutorService pool, User user, 
final ServerName serverName,
-154  final 
AdminService.BlockingInterface admin, final ClientService.BlockingInterface 
client)
-155  throws IOException {
-156if (user == null) {
-157  user = 
UserProvider.instantiate(conf).getCurrent();
-158}
-159return new 
ConnectionImplementation(conf, pool, user) {
-160  @Override
-161  public 
AdminService.BlockingInterface getAdmin(ServerName sn) throws IOException {
-162return serverName.equals(sn) ? 
admin : super.getAdmin(sn);
-163  }
-164
-165  @Override
-166  public 
ClientService.BlockingInterface getClient(ServerName sn) throws IOException {
-167return serverName.equals(sn) ? 
client : super.getClient(sn);
-168  }
-169
-170  @Override
-171  public MasterKeepAliveConnection 
getKeepAliveMasterService()
-172  throws 
MasterNotRunningException {
-173if (!(client instanceof 
MasterService.BlockingInterface)) {
-174  return 
super.getKeepAliveMasterService();
-175} else {
-176  return new 
ShortCircuitMasterConnection((MasterService.BlockingInterface) client);
-177}
-178  }
-179};
-180  }
-181
-182  /**
-183   * Setup the connection class, so that 
it will not depend on master being online. Used for testing
-184   * @param conf configuration to set
-185   */
-186  @VisibleForTesting
-187  

[07/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9eba7fcf/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.Sink.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.Sink.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.Sink.html
index e1fbce4..873e17f 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.Sink.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.Sink.html
@@ -1089,497 +1089,498 @@
 1081}
 1082  }
 1083  MapString, AtomicLong 
actualReadTableLatency = regionSink.getReadLatencyMap();
-1084  for (String tableName : 
this.configuredReadTableTimeouts.keySet()) {
-1085if 
(actualReadTableLatency.containsKey(tableName)) {
-1086  Long actual = 
actualReadTableLatency.get(tableName).longValue();
-1087  Long configured = 
this.configuredReadTableTimeouts.get(tableName);
-1088  LOG.info("Read operation 
for " + tableName + " took " + actual +
-1089" ms. The configured 
read timeout was " + configured + " ms.");
-1090  if (actual  
configured) {
-1091LOG.error("Read 
operation for " + tableName + " exceeded the configured read timeout.");
-1092  }
-1093} else {
-1094  LOG.error("Read operation 
for " + tableName + " failed!");
-1095}
-1096  }
-1097  if (this.writeSniffing) {
-1098String writeTableStringName 
= this.writeTableName.getNameAsString();
-1099long actualWriteLatency = 
regionSink.getWriteLatency().longValue();
-1100LOG.info("Write operation 
for " + writeTableStringName + " took " + actualWriteLatency + " ms. The 
configured write timeout was " +
-1101  
this.configuredWriteTableTimeout + " ms.");
-1102// Check that the writeTable 
write operation latency does not exceed the configured timeout.
-1103if (actualWriteLatency  
this.configuredWriteTableTimeout) {
-1104  LOG.error("Write operation 
for " + writeTableStringName + " exceeded the configured write timeout.");
-1105}
-1106  }
-1107} catch (Exception e) {
-1108  LOG.error("Run regionMonitor 
failed", e);
-1109  this.errorCode = 
ERROR_EXIT_CODE;
-1110}
-  }
-1112  this.done = true;
-1113}
-1114
-1115private String[] 
generateMonitorTables(String[] monitorTargets) throws IOException {
-1116  String[] returnTables = null;
-1117
-1118  if (this.useRegExp) {
-1119Pattern pattern = null;
-1120HTableDescriptor[] tds = null;
-1121SetString tmpTables = 
new TreeSet();
-1122try {
-1123  if (LOG.isDebugEnabled()) {
-1124
LOG.debug(String.format("reading list of tables"));
-1125  }
-1126  tds = 
this.admin.listTables(pattern);
-1127  if (tds == null) {
-1128tds = new 
HTableDescriptor[0];
-1129  }
-1130  for (String monitorTarget : 
monitorTargets) {
-1131pattern = 
Pattern.compile(monitorTarget);
-1132for (HTableDescriptor td : 
tds) {
-1133  if 
(pattern.matcher(td.getNameAsString()).matches()) {
-1134
tmpTables.add(td.getNameAsString());
-1135  }
-1136}
-1137  }
-1138} catch (IOException e) {
-1139  LOG.error("Communicate with 
admin failed", e);
-1140  throw e;
-1141}
-1142
-1143if (tmpTables.size()  0) {
-1144  returnTables = 
tmpTables.toArray(new String[tmpTables.size()]);
-1145} else {
-1146  String msg = "No HTable found, 
tablePattern:" + Arrays.toString(monitorTargets);
-1147  LOG.error(msg);
-1148  this.errorCode = 
INIT_ERROR_EXIT_CODE;
-1149  throw new 
TableNotFoundException(msg);
-1150}
-1151  } else {
-1152returnTables = monitorTargets;
-1153  }
-1154
-1155  return returnTables;
-1156}
-1157
-1158/*
-1159 * canary entry point to monitor all 
the tables.
-1160 */
-1161private 
ListFutureVoid sniff(TaskType taskType, RegionStdOutSink 
regionSink) throws Exception {
-1162  if (LOG.isDebugEnabled()) {
-1163LOG.debug(String.format("reading 
list of tables"));
-1164  }
-1165  ListFutureVoid 
taskFutures = new LinkedList();
-1166  for (HTableDescriptor table : 
admin.listTables()) {
-1167if 
(admin.isTableEnabled(table.getTableName())
-1168 
(!table.getTableName().equals(writeTableName))) {
-1169  AtomicLong readLatency = 
regionSink.initializeAndGetReadLatencyForTable(table.getNameAsString());
-1170  
taskFutures.addAll(Canary.sniff(admin, sink, table, executor, taskType, 
this.rawScanEnabled, readLatency));
-1171}
-1172  }
-1173  return 

[31/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9eba7fcf/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.html
index 53cae9a..64d0880 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.html
@@ -1034,289 +1034,283 @@
 1026  protected ListLoadQueueItem 
tryAtomicRegionLoad(ClientServiceCallablebyte[] serviceCallable,
 1027  final TableName tableName, final 
byte[] first, final CollectionLoadQueueItem lqis)
 1028  throws IOException {
-1029final ListPairbyte[], 
String famPaths = new ArrayList(lqis.size());
-1030for (LoadQueueItem lqi : lqis) {
-1031  if 
(!unmatchedFamilies.contains(Bytes.toString(lqi.family))) {
-1032
famPaths.add(Pair.newPair(lqi.family, lqi.hfilePath.toString()));
-1033  }
-1034}
-1035try {
-1036  ListLoadQueueItem toRetry 
= new ArrayList();
-1037  Configuration conf = getConf();
-1038  byte[] region = 
RpcRetryingCallerFactory.instantiate(conf,
-1039  null).byte[] 
newCaller()
-1040  
.callWithRetries(serviceCallable, Integer.MAX_VALUE);
-1041  if (region == null) {
-1042LOG.warn("Attempt to bulk load 
region containing "
-1043+ 
Bytes.toStringBinary(first) + " into table "
-1044+ tableName  + " with files 
" + lqis
-1045+ " failed.  This is 
recoverable and they will be retried.");
-1046toRetry.addAll(lqis); // return 
lqi's to retry
-1047  }
-1048  // success
-1049  return toRetry;
-1050} catch (IOException e) {
-1051  LOG.error("Encountered 
unrecoverable error from region server, additional details: "
-1052  + 
serviceCallable.getExceptionMessageAdditionalDetail(), e);
-1053  throw e;
-1054}
-1055  }
-1056
-1057  private final String 
toString(ListPairbyte[], String list) {
-1058StringBuffer sb = new 
StringBuffer();
-1059sb.append("[");
-1060if(list != null){
-1061  for(Pairbyte[], String 
pair: list) {
-1062sb.append("{");
-1063
sb.append(Bytes.toStringBinary(pair.getFirst()));
-1064sb.append(",");
-1065sb.append(pair.getSecond());
-1066sb.append("}");
-1067  }
-1068}
-1069sb.append("]");
-1070return sb.toString();
-1071  }
-1072  private boolean 
isSecureBulkLoadEndpointAvailable() {
-1073String classes = 
getConf().get(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, "");
-1074return 
classes.contains("org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint");
-1075  }
-1076
-1077  /**
-1078   * Split a storefile into a top and 
bottom half, maintaining
-1079   * the metadata, recreating bloom 
filters, etc.
-1080   */
-1081  static void splitStoreFile(
-1082  Configuration conf, Path inFile,
-1083  HColumnDescriptor familyDesc, 
byte[] splitKey,
-1084  Path bottomOut, Path topOut) 
throws IOException {
-1085// Open reader with no block cache, 
and not in-memory
-1086Reference topReference = 
Reference.createTopReference(splitKey);
-1087Reference bottomReference = 
Reference.createBottomReference(splitKey);
-1088
-1089copyHFileHalf(conf, inFile, topOut, 
topReference, familyDesc);
-1090copyHFileHalf(conf, inFile, 
bottomOut, bottomReference, familyDesc);
-1091  }
-1092
-1093  /**
-1094   * Copy half of an HFile into a new 
HFile.
-1095   */
-1096  private static void copyHFileHalf(
-1097  Configuration conf, Path inFile, 
Path outFile, Reference reference,
-1098  HColumnDescriptor 
familyDescriptor)
-1099  throws IOException {
-1100FileSystem fs = 
inFile.getFileSystem(conf);
-1101CacheConfig cacheConf = new 
CacheConfig(conf);
-1102HalfStoreFileReader halfReader = 
null;
-1103StoreFileWriter halfWriter = null;
-1104try {
-1105  halfReader = new 
HalfStoreFileReader(fs, inFile, cacheConf, reference, true,
-1106  new AtomicInteger(0), true, 
conf);
-1107  Mapbyte[], byte[] fileInfo 
= halfReader.loadFileInfo();
-1108
-1109  int blocksize = 
familyDescriptor.getBlocksize();
-1110  Algorithm compression = 
familyDescriptor.getCompressionType();
-  BloomType bloomFilterType = 
familyDescriptor.getBloomFilterType();
-1112  HFileContext hFileContext = new 
HFileContextBuilder()
-1113  
.withCompression(compression)
-1114  
.withChecksumType(HStore.getChecksumType(conf))
-1115  
.withBytesPerCheckSum(HStore.getBytesPerChecksum(conf))
-1116  
.withBlockSize(blocksize)
-1117  

[39/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9eba7fcf/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.MasterlessConnection.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.MasterlessConnection.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.MasterlessConnection.html
index c7ef9c6..b9ac9a6 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.MasterlessConnection.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.MasterlessConnection.html
@@ -103,394 +103,380 @@
 095  }
 096
 097  /**
-098   * Adds / subs an up to 50% jitter to a 
pause time. Minimum is 1.
-099   * @param pause the expected pause.
-100   * @param jitter the jitter ratio, 
between 0 and 1, exclusive.
+098   * @param conn The connection for which 
to replace the generator.
+099   * @param cnm Replaces the nonce 
generator used, for testing.
+100   * @return old nonce generator.
 101   */
-102  public static long addJitter(final long 
pause, final float jitter) {
-103float lag = pause * 
(ThreadLocalRandom.current().nextFloat() - 0.5f) * jitter;
-104long newPause = pause + (long) lag;
-105if (newPause = 0) {
-106  return 1;
-107}
-108return newPause;
-109  }
-110
-111  /**
-112   * @param conn The connection for which 
to replace the generator.
-113   * @param cnm Replaces the nonce 
generator used, for testing.
-114   * @return old nonce generator.
-115   */
-116  public static NonceGenerator 
injectNonceGeneratorForTesting(ClusterConnection conn,
-117  NonceGenerator cnm) {
-118return 
ConnectionImplementation.injectNonceGeneratorForTesting(conn, cnm);
-119  }
-120
-121  /**
-122   * Changes the configuration to set the 
number of retries needed when using Connection internally,
-123   * e.g. for updating catalog tables, 
etc. Call this method before we create any Connections.
-124   * @param c The Configuration instance 
to set the retries into.
-125   * @param log Used to log what we set 
in here.
-126   */
-127  public static void 
setServerSideHConnectionRetriesConfig(final Configuration c, final String sn,
-128  final Log log) {
-129// TODO: Fix this. Not all 
connections from server side should have 10 times the retries.
-130int hcRetries = 
c.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
-131  
HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
-132// Go big. Multiply by 10. If we 
can't get to meta after this many retries
-133// then something seriously wrong.
-134int serversideMultiplier = 
c.getInt("hbase.client.serverside.retries.multiplier", 10);
-135int retries = hcRetries * 
serversideMultiplier;
-136
c.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, retries);
-137log.info(sn + " server-side 
Connection retries=" + retries);
-138  }
-139
-140  /**
-141   * Creates a short-circuit connection 
that can bypass the RPC layer (serialization,
-142   * deserialization, networking, etc..) 
when talking to a local server.
-143   * @param conf the current 
configuration
-144   * @param pool the thread pool to use 
for batch operations
-145   * @param user the user the connection 
is for
-146   * @param serverName the local server 
name
-147   * @param admin the admin interface of 
the local server
-148   * @param client the client interface 
of the local server
-149   * @return an short-circuit 
connection.
-150   * @throws IOException if IO failure 
occurred
-151   */
-152  public static ClusterConnection 
createShortCircuitConnection(final Configuration conf,
-153  ExecutorService pool, User user, 
final ServerName serverName,
-154  final 
AdminService.BlockingInterface admin, final ClientService.BlockingInterface 
client)
-155  throws IOException {
-156if (user == null) {
-157  user = 
UserProvider.instantiate(conf).getCurrent();
-158}
-159return new 
ConnectionImplementation(conf, pool, user) {
-160  @Override
-161  public 
AdminService.BlockingInterface getAdmin(ServerName sn) throws IOException {
-162return serverName.equals(sn) ? 
admin : super.getAdmin(sn);
-163  }
-164
-165  @Override
-166  public 
ClientService.BlockingInterface getClient(ServerName sn) throws IOException {
-167return serverName.equals(sn) ? 
client : super.getClient(sn);
-168  }
-169
-170  @Override
-171  public MasterKeepAliveConnection 
getKeepAliveMasterService()
-172  throws 
MasterNotRunningException {
-173if (!(client instanceof 
MasterService.BlockingInterface)) {
-174  return 
super.getKeepAliveMasterService();
-175} else {
-176  return new 
ShortCircuitMasterConnection((MasterService.BlockingInterface) client);
-177}
-178  }
-179};
-180  }
-181
-182  /**
-183   * Setup the connection class, so that 
it will not depend on master being 

[16/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9eba7fcf/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.ExtendedSink.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.ExtendedSink.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.ExtendedSink.html
index e1fbce4..873e17f 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.ExtendedSink.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.ExtendedSink.html
@@ -1089,497 +1089,498 @@
 1081}
 1082  }
 1083  MapString, AtomicLong 
actualReadTableLatency = regionSink.getReadLatencyMap();
-1084  for (String tableName : 
this.configuredReadTableTimeouts.keySet()) {
-1085if 
(actualReadTableLatency.containsKey(tableName)) {
-1086  Long actual = 
actualReadTableLatency.get(tableName).longValue();
-1087  Long configured = 
this.configuredReadTableTimeouts.get(tableName);
-1088  LOG.info("Read operation 
for " + tableName + " took " + actual +
-1089" ms. The configured 
read timeout was " + configured + " ms.");
-1090  if (actual  
configured) {
-1091LOG.error("Read 
operation for " + tableName + " exceeded the configured read timeout.");
-1092  }
-1093} else {
-1094  LOG.error("Read operation 
for " + tableName + " failed!");
-1095}
-1096  }
-1097  if (this.writeSniffing) {
-1098String writeTableStringName 
= this.writeTableName.getNameAsString();
-1099long actualWriteLatency = 
regionSink.getWriteLatency().longValue();
-1100LOG.info("Write operation 
for " + writeTableStringName + " took " + actualWriteLatency + " ms. The 
configured write timeout was " +
-1101  
this.configuredWriteTableTimeout + " ms.");
-1102// Check that the writeTable 
write operation latency does not exceed the configured timeout.
-1103if (actualWriteLatency  
this.configuredWriteTableTimeout) {
-1104  LOG.error("Write operation 
for " + writeTableStringName + " exceeded the configured write timeout.");
-1105}
-1106  }
-1107} catch (Exception e) {
-1108  LOG.error("Run regionMonitor 
failed", e);
-1109  this.errorCode = 
ERROR_EXIT_CODE;
-1110}
-  }
-1112  this.done = true;
-1113}
-1114
-1115private String[] 
generateMonitorTables(String[] monitorTargets) throws IOException {
-1116  String[] returnTables = null;
-1117
-1118  if (this.useRegExp) {
-1119Pattern pattern = null;
-1120HTableDescriptor[] tds = null;
-1121SetString tmpTables = 
new TreeSet();
-1122try {
-1123  if (LOG.isDebugEnabled()) {
-1124
LOG.debug(String.format("reading list of tables"));
-1125  }
-1126  tds = 
this.admin.listTables(pattern);
-1127  if (tds == null) {
-1128tds = new 
HTableDescriptor[0];
-1129  }
-1130  for (String monitorTarget : 
monitorTargets) {
-1131pattern = 
Pattern.compile(monitorTarget);
-1132for (HTableDescriptor td : 
tds) {
-1133  if 
(pattern.matcher(td.getNameAsString()).matches()) {
-1134
tmpTables.add(td.getNameAsString());
-1135  }
-1136}
-1137  }
-1138} catch (IOException e) {
-1139  LOG.error("Communicate with 
admin failed", e);
-1140  throw e;
-1141}
-1142
-1143if (tmpTables.size()  0) {
-1144  returnTables = 
tmpTables.toArray(new String[tmpTables.size()]);
-1145} else {
-1146  String msg = "No HTable found, 
tablePattern:" + Arrays.toString(monitorTargets);
-1147  LOG.error(msg);
-1148  this.errorCode = 
INIT_ERROR_EXIT_CODE;
-1149  throw new 
TableNotFoundException(msg);
-1150}
-1151  } else {
-1152returnTables = monitorTargets;
-1153  }
-1154
-1155  return returnTables;
-1156}
-1157
-1158/*
-1159 * canary entry point to monitor all 
the tables.
-1160 */
-1161private 
ListFutureVoid sniff(TaskType taskType, RegionStdOutSink 
regionSink) throws Exception {
-1162  if (LOG.isDebugEnabled()) {
-1163LOG.debug(String.format("reading 
list of tables"));
-1164  }
-1165  ListFutureVoid 
taskFutures = new LinkedList();
-1166  for (HTableDescriptor table : 
admin.listTables()) {
-1167if 
(admin.isTableEnabled(table.getTableName())
-1168 
(!table.getTableName().equals(writeTableName))) {
-1169  AtomicLong readLatency = 
regionSink.initializeAndGetReadLatencyForTable(table.getNameAsString());
-1170  
taskFutures.addAll(Canary.sniff(admin, sink, table, executor, taskType, 
this.rawScanEnabled, readLatency));
-1171}

[37/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9eba7fcf/devapidocs/src-html/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.html
index 7b499bf..f9cda22 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.html
@@ -59,154 +59,148 @@
 051  protected final TableName tableName;
 052  protected final byte[] row;
 053  protected final int replicaId;
-054  protected final static int 
MIN_WAIT_DEAD_SERVER = 1;
-055
-056  public 
RegionAdminServiceCallable(ClusterConnection connection,
-057  RpcControllerFactory 
rpcControllerFactory, TableName tableName, byte[] row) {
-058this(connection, 
rpcControllerFactory, null, tableName, row);
-059  }
-060
-061  public 
RegionAdminServiceCallable(ClusterConnection connection,
-062  RpcControllerFactory 
rpcControllerFactory, HRegionLocation location,
-063  TableName tableName, byte[] row) 
{
-064this(connection, 
rpcControllerFactory, location,
-065  tableName, row, 
RegionReplicaUtil.DEFAULT_REPLICA_ID);
-066  }
-067
-068  public 
RegionAdminServiceCallable(ClusterConnection connection,
-069  RpcControllerFactory 
rpcControllerFactory, HRegionLocation location,
-070  TableName tableName, byte[] row, 
int replicaId) {
-071this.connection = connection;
-072this.rpcControllerFactory = 
rpcControllerFactory;
-073this.location = location;
-074this.tableName = tableName;
-075this.row = row;
-076this.replicaId = replicaId;
-077  }
-078
-079  @Override
-080  public void prepare(boolean reload) 
throws IOException {
-081if (Thread.interrupted()) {
-082  throw new 
InterruptedIOException();
-083}
-084if (reload || location == null) {
-085  location = getLocation(!reload);
-086}
-087if (location == null) {
-088  // With this exception, there will 
be a retry.
-089  throw new 
HBaseIOException(getExceptionMessage());
-090}
-091
this.setStub(connection.getAdmin(location.getServerName()));
-092  }
-093
-094  protected void 
setStub(AdminService.BlockingInterface stub) {
-095this.stub = stub;
-096  }
-097
-098  public HRegionLocation 
getLocation(boolean useCache) throws IOException {
-099RegionLocations rl = 
getRegionLocations(connection, tableName, row, useCache, replicaId);
-100if (rl == null) {
-101  throw new 
HBaseIOException(getExceptionMessage());
-102}
-103HRegionLocation location = 
rl.getRegionLocation(replicaId);
-104if (location == null) {
-105  throw new 
HBaseIOException(getExceptionMessage());
-106}
-107
-108return location;
-109  }
-110
-111  @Override
-112  public void throwable(Throwable t, 
boolean retrying) {
-113if (location != null) {
-114  
connection.updateCachedLocations(tableName, 
location.getRegionInfo().getRegionName(), row,
-115  t, location.getServerName());
-116}
-117  }
-118
-119  /**
-120   * @return {@link Connection} instance 
used by this Callable.
-121   */
-122  Connection getConnection() {
-123return this.connection;
-124  }
-125
-126  //subclasses can override this.
-127  protected String getExceptionMessage() 
{
-128return "There is no location" + " 
table=" + tableName
-129+ " ,replica=" + replicaId + ", 
row=" + Bytes.toStringBinary(row);
-130  }
-131
-132  @Override
-133  public String 
getExceptionMessageAdditionalDetail() {
-134return null;
-135  }
-136
-137  @Override
-138  public long sleep(long pause, int 
tries) {
-139long sleep = 
ConnectionUtils.getPauseTime(pause, tries);
-140if (sleep  MIN_WAIT_DEAD_SERVER
-141 (location == null || 
connection.isDeadServer(location.getServerName( {
-142  sleep = 
ConnectionUtils.addJitter(MIN_WAIT_DEAD_SERVER, 0.10f);
-143}
-144return sleep;
-145  }
-146
-147  public static RegionLocations 
getRegionLocations(
-148  ClusterConnection connection, 
TableName tableName, byte[] row,
-149  boolean useCache, int replicaId)
-150  throws RetriesExhaustedException, 
DoNotRetryIOException, InterruptedIOException {
-151RegionLocations rl;
-152try {
-153  rl = 
connection.locateRegion(tableName, row, useCache, true, replicaId);
-154} catch (DoNotRetryIOException e) {
-155  throw e;
-156} catch (RetriesExhaustedException e) 
{
-157  throw e;
-158} catch (InterruptedIOException e) 
{
-159  throw e;
-160} catch (IOException e) {
-161  throw new 
RetriesExhaustedException("Can't get the location", e);
-162}
-163if (rl == null) {
-164  throw new 
RetriesExhaustedException("Can't get the locations");
-165}
-166return rl;
-167  }
-168

[36/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9eba7fcf/devapidocs/src-html/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.GetDataAsyncCallback.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.GetDataAsyncCallback.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.GetDataAsyncCallback.html
index 5eefe01..59063f5 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.GetDataAsyncCallback.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.GetDataAsyncCallback.html
@@ -58,606 +58,607 @@
 050import 
org.apache.hadoop.hbase.util.CancelableProgressable;
 051import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 052import 
org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
-053import 
org.apache.hadoop.hbase.zookeeper.ZKSplitLog;
-054import 
org.apache.hadoop.hbase.zookeeper.ZKUtil;
-055import 
org.apache.hadoop.hbase.zookeeper.ZooKeeperListener;
-056import 
org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
-057import 
org.apache.hadoop.util.StringUtils;
-058import 
org.apache.zookeeper.AsyncCallback;
-059import 
org.apache.zookeeper.KeeperException;
-060import org.apache.zookeeper.data.Stat;
-061
-062/**
-063 * ZooKeeper based implementation of 
{@link SplitLogWorkerCoordination}
-064 * It listen for changes in ZooKeeper 
and
-065 *
-066 */
-067@InterfaceAudience.Private
-068public class ZkSplitLogWorkerCoordination 
extends ZooKeeperListener implements
-069SplitLogWorkerCoordination {
-070
-071  private static final Log LOG = 
LogFactory.getLog(ZkSplitLogWorkerCoordination.class);
-072
-073  private static final int checkInterval 
= 5000; // 5 seconds
-074  private static final int 
FAILED_TO_OWN_TASK = -1;
-075
-076  private  SplitLogWorker worker;
-077
-078  private TaskExecutor 
splitTaskExecutor;
-079
-080  private final Object taskReadyLock = 
new Object();
-081  private AtomicInteger taskReadySeq = 
new AtomicInteger(0);
-082  private volatile String currentTask = 
null;
-083  private int currentVersion;
-084  private volatile boolean shouldStop = 
false;
-085  private final Object grabTaskLock = new 
Object();
-086  private boolean workerInGrabTask = 
false;
-087  private int reportPeriod;
-088  private RegionServerServices server = 
null;
-089  protected final AtomicInteger 
tasksInProgress = new AtomicInteger(0);
-090  private int maxConcurrentTasks = 0;
-091
-092  private final ZkCoordinatedStateManager 
manager;
-093
-094  public 
ZkSplitLogWorkerCoordination(ZkCoordinatedStateManager 
zkCoordinatedStateManager,
-095  ZooKeeperWatcher watcher) {
-096super(watcher);
-097manager = 
zkCoordinatedStateManager;
-098
-099  }
-100
-101  /**
-102   * Override handler from {@link 
ZooKeeperListener}
-103   */
-104  @Override
-105  public void nodeChildrenChanged(String 
path) {
-106if 
(path.equals(watcher.znodePaths.splitLogZNode)) {
-107  if (LOG.isTraceEnabled()) 
LOG.trace("tasks arrived or departed on " + path);
-108  synchronized (taskReadyLock) {
-109
this.taskReadySeq.incrementAndGet();
-110taskReadyLock.notify();
-111  }
-112}
-113  }
-114
-115  /**
-116   * Override handler from {@link 
ZooKeeperListener}
-117   */
-118  @Override
-119  public void nodeDataChanged(String 
path) {
-120// there will be a self generated 
dataChanged event every time attemptToOwnTask()
-121// heartbeats the task znode by 
upping its version
-122synchronized (grabTaskLock) {
-123  if (workerInGrabTask) {
-124// currentTask can change
-125String taskpath = currentTask;
-126if (taskpath != null  
taskpath.equals(path)) {
-127  getDataSetWatchAsync();
-128}
-129  }
-130}
-131  }
-132
-133  /**
-134   * Override setter from {@link 
SplitLogWorkerCoordination}
-135   */
-136  @Override
-137  public void init(RegionServerServices 
server, Configuration conf,
-138  TaskExecutor splitExecutor, 
SplitLogWorker worker) {
-139this.server = server;
-140this.worker = worker;
-141this.splitTaskExecutor = 
splitExecutor;
-142maxConcurrentTasks = 
conf.getInt("hbase.regionserver.wal.max.splitters", DEFAULT_MAX_SPLITTERS);
-143reportPeriod =
-144
conf.getInt("hbase.splitlog.report.period",
-145  
conf.getInt(HConstants.HBASE_SPLITLOG_MANAGER_TIMEOUT,
-146
ZKSplitLogManagerCoordination.DEFAULT_TIMEOUT) / 3);
-147  }
-148
-149  /* Support functions for ZooKeeper 
async callback */
-150
-151  void getDataSetWatchFailure(String 
path) {
-152synchronized (grabTaskLock) {
-153  if (workerInGrabTask) {
-154// currentTask can change but 
that's ok
-155String taskpath = currentTask;
-156if (taskpath != null  
taskpath.equals(path)) {
-157  

[14/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9eba7fcf/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.RegionMonitor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.RegionMonitor.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.RegionMonitor.html
index e1fbce4..873e17f 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.RegionMonitor.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.RegionMonitor.html
@@ -1089,497 +1089,498 @@
 1081}
 1082  }
 1083  MapString, AtomicLong 
actualReadTableLatency = regionSink.getReadLatencyMap();
-1084  for (String tableName : 
this.configuredReadTableTimeouts.keySet()) {
-1085if 
(actualReadTableLatency.containsKey(tableName)) {
-1086  Long actual = 
actualReadTableLatency.get(tableName).longValue();
-1087  Long configured = 
this.configuredReadTableTimeouts.get(tableName);
-1088  LOG.info("Read operation 
for " + tableName + " took " + actual +
-1089" ms. The configured 
read timeout was " + configured + " ms.");
-1090  if (actual  
configured) {
-1091LOG.error("Read 
operation for " + tableName + " exceeded the configured read timeout.");
-1092  }
-1093} else {
-1094  LOG.error("Read operation 
for " + tableName + " failed!");
-1095}
-1096  }
-1097  if (this.writeSniffing) {
-1098String writeTableStringName 
= this.writeTableName.getNameAsString();
-1099long actualWriteLatency = 
regionSink.getWriteLatency().longValue();
-1100LOG.info("Write operation 
for " + writeTableStringName + " took " + actualWriteLatency + " ms. The 
configured write timeout was " +
-1101  
this.configuredWriteTableTimeout + " ms.");
-1102// Check that the writeTable 
write operation latency does not exceed the configured timeout.
-1103if (actualWriteLatency  
this.configuredWriteTableTimeout) {
-1104  LOG.error("Write operation 
for " + writeTableStringName + " exceeded the configured write timeout.");
-1105}
-1106  }
-1107} catch (Exception e) {
-1108  LOG.error("Run regionMonitor 
failed", e);
-1109  this.errorCode = 
ERROR_EXIT_CODE;
-1110}
-  }
-1112  this.done = true;
-1113}
-1114
-1115private String[] 
generateMonitorTables(String[] monitorTargets) throws IOException {
-1116  String[] returnTables = null;
-1117
-1118  if (this.useRegExp) {
-1119Pattern pattern = null;
-1120HTableDescriptor[] tds = null;
-1121SetString tmpTables = 
new TreeSet();
-1122try {
-1123  if (LOG.isDebugEnabled()) {
-1124
LOG.debug(String.format("reading list of tables"));
-1125  }
-1126  tds = 
this.admin.listTables(pattern);
-1127  if (tds == null) {
-1128tds = new 
HTableDescriptor[0];
-1129  }
-1130  for (String monitorTarget : 
monitorTargets) {
-1131pattern = 
Pattern.compile(monitorTarget);
-1132for (HTableDescriptor td : 
tds) {
-1133  if 
(pattern.matcher(td.getNameAsString()).matches()) {
-1134
tmpTables.add(td.getNameAsString());
-1135  }
-1136}
-1137  }
-1138} catch (IOException e) {
-1139  LOG.error("Communicate with 
admin failed", e);
-1140  throw e;
-1141}
-1142
-1143if (tmpTables.size()  0) {
-1144  returnTables = 
tmpTables.toArray(new String[tmpTables.size()]);
-1145} else {
-1146  String msg = "No HTable found, 
tablePattern:" + Arrays.toString(monitorTargets);
-1147  LOG.error(msg);
-1148  this.errorCode = 
INIT_ERROR_EXIT_CODE;
-1149  throw new 
TableNotFoundException(msg);
-1150}
-1151  } else {
-1152returnTables = monitorTargets;
-1153  }
-1154
-1155  return returnTables;
-1156}
-1157
-1158/*
-1159 * canary entry point to monitor all 
the tables.
-1160 */
-1161private 
ListFutureVoid sniff(TaskType taskType, RegionStdOutSink 
regionSink) throws Exception {
-1162  if (LOG.isDebugEnabled()) {
-1163LOG.debug(String.format("reading 
list of tables"));
-1164  }
-1165  ListFutureVoid 
taskFutures = new LinkedList();
-1166  for (HTableDescriptor table : 
admin.listTables()) {
-1167if 
(admin.isTableEnabled(table.getTableName())
-1168 
(!table.getTableName().equals(writeTableName))) {
-1169  AtomicLong readLatency = 
regionSink.initializeAndGetReadLatencyForTable(table.getNameAsString());
-1170  
taskFutures.addAll(Canary.sniff(admin, sink, table, executor, taskType, 
this.rawScanEnabled, readLatency));
-1171 

[48/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9eba7fcf/checkstyle.rss
--
diff --git a/checkstyle.rss b/checkstyle.rss
index 3c69db4..55440a4 100644
--- a/checkstyle.rss
+++ b/checkstyle.rss
@@ -26,7 +26,7 @@ under the License.
 2007 - 2017 The Apache Software Foundation
 
   File: 2247,
- Errors: 14813,
+ Errors: 14814,
  Warnings: 0,
  Infos: 0
   
@@ -13831,7 +13831,7 @@ under the License.
   0
 
 
-  6
+  7
 
   
   

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9eba7fcf/coc.html
--
diff --git a/coc.html b/coc.html
index 6c19e1f..9afc2e2 100644
--- a/coc.html
+++ b/coc.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  
   Code of Conduct Policy
@@ -380,7 +380,7 @@ email to mailto:priv...@hbase.apache.org;>the priv
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-07-18
+  Last Published: 
2017-07-19
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9eba7fcf/cygwin.html
--
diff --git a/cygwin.html b/cygwin.html
index c6ac414..de21f6e 100644
--- a/cygwin.html
+++ b/cygwin.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Installing Apache HBase (TM) on Windows using 
Cygwin
 
@@ -679,7 +679,7 @@ Now your HBase server is running, start 
coding and build that next
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-07-18
+  Last Published: 
2017-07-19
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9eba7fcf/dependencies.html
--
diff --git a/dependencies.html b/dependencies.html
index 62c9ec0..163eef0 100644
--- a/dependencies.html
+++ b/dependencies.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Project Dependencies
 
@@ -527,7 +527,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-07-18
+  Last Published: 
2017-07-19
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9eba7fcf/dependency-convergence.html
--
diff --git a/dependency-convergence.html b/dependency-convergence.html
index 9f85aa0..7294e72 100644
--- a/dependency-convergence.html
+++ b/dependency-convergence.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Reactor Dependency Convergence
 
@@ -703,7 +703,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-07-18
+  Last Published: 
2017-07-19
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9eba7fcf/dependency-info.html
--
diff --git a/dependency-info.html b/dependency-info.html
index 4214e70..f2f904b 100644
--- a/dependency-info.html
+++ b/dependency-info.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Dependency Information
 
@@ -318,7 +318,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-07-18
+  Last Published: 
2017-07-19
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9eba7fcf/dependency-management.html
--
diff --git a/dependency-management.html b/dependency-management.html
index 8ccd2d5..fc48ae6 100644
--- a/dependency-management.html
+++ b/dependency-management.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Project Dependency Management
 
@@ -900,7 +900,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-07-18
+  Last Published: 
2017-07-19
 
 
 


[21/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9eba7fcf/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
index 504e470..38667c0 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
@@ -2866,5375 +2866,5371 @@
 2858checkResources();
 2859
startRegionOperation(Operation.DELETE);
 2860try {
-2861  delete.getRow();
-2862  // All edits for the given row 
(across all column families) must happen atomically.
-2863  doBatchMutate(delete);
-2864} finally {
-2865  
closeRegionOperation(Operation.DELETE);
-2866}
-2867  }
-2868
-2869  /**
-2870   * Row needed by below method.
-2871   */
-2872  private static final byte [] 
FOR_UNIT_TESTS_ONLY = Bytes.toBytes("ForUnitTestsOnly");
-2873
-2874  /**
-2875   * This is used only by unit tests. 
Not required to be a public API.
-2876   * @param familyMap map of family to 
edits for the given family.
-2877   * @throws IOException
-2878   */
-2879  void delete(NavigableMapbyte[], 
ListCell familyMap,
-2880  Durability durability) throws 
IOException {
-2881Delete delete = new 
Delete(FOR_UNIT_TESTS_ONLY);
-2882
delete.setFamilyCellMap(familyMap);
-2883delete.setDurability(durability);
-2884doBatchMutate(delete);
-2885  }
-2886
-2887  @Override
-2888  public void 
prepareDeleteTimestamps(Mutation mutation, Mapbyte[], ListCell 
familyMap,
-2889  byte[] byteNow) throws IOException 
{
-2890for (Map.Entrybyte[], 
ListCell e : familyMap.entrySet()) {
-2891
-2892  byte[] family = e.getKey();
-2893  ListCell cells = 
e.getValue();
-2894  assert cells instanceof 
RandomAccess;
-2895
-2896  Mapbyte[], Integer kvCount 
= new TreeMap(Bytes.BYTES_COMPARATOR);
-2897  int listSize = cells.size();
-2898  for (int i=0; i  listSize; 
i++) {
-2899Cell cell = cells.get(i);
-2900//  Check if time is LATEST, 
change to time of most recent addition if so
-2901//  This is expensive.
-2902if (cell.getTimestamp() == 
HConstants.LATEST_TIMESTAMP  CellUtil.isDeleteType(cell)) {
-2903  byte[] qual = 
CellUtil.cloneQualifier(cell);
-2904  if (qual == null) qual = 
HConstants.EMPTY_BYTE_ARRAY;
-2905
-2906  Integer count = 
kvCount.get(qual);
-2907  if (count == null) {
-2908kvCount.put(qual, 1);
-2909  } else {
-2910kvCount.put(qual, count + 
1);
-2911  }
-2912  count = kvCount.get(qual);
-2913
-2914  Get get = new 
Get(CellUtil.cloneRow(cell));
-2915  get.setMaxVersions(count);
-2916  get.addColumn(family, qual);
-2917  if (coprocessorHost != null) 
{
-2918if 
(!coprocessorHost.prePrepareTimeStampForDeleteVersion(mutation, cell,
-2919byteNow, get)) {
-2920  
updateDeleteLatestVersionTimeStamp(cell, get, count, byteNow);
-2921}
-2922  } else {
-2923
updateDeleteLatestVersionTimeStamp(cell, get, count, byteNow);
-2924  }
-2925} else {
-2926  
CellUtil.updateLatestStamp(cell, byteNow, 0);
-2927}
-2928  }
-2929}
-2930  }
-2931
-2932  void 
updateDeleteLatestVersionTimeStamp(Cell cell, Get get, int count, byte[] 
byteNow)
-2933  throws IOException {
-2934ListCell result = get(get, 
false);
-2935
-2936if (result.size()  count) {
-2937  // Nothing to delete
-2938  CellUtil.updateLatestStamp(cell, 
byteNow, 0);
-2939  return;
-2940}
-2941if (result.size()  count) {
-2942  throw new 
RuntimeException("Unexpected size: " + result.size());
-2943}
-2944Cell getCell = result.get(count - 
1);
-2945CellUtil.setTimestamp(cell, 
getCell.getTimestamp());
-2946  }
-2947
-2948  @Override
-2949  public void put(Put put) throws 
IOException {
-2950checkReadOnly();
-2951
-2952// Do a rough check that we have 
resources to accept a write.  The check is
-2953// 'rough' in that between the 
resource check and the call to obtain a
-2954// read lock, resources may run out. 
 For now, the thought is that this
-2955// will be extremely rare; we'll 
deal with it when it happens.
-2956checkResources();
-2957
startRegionOperation(Operation.PUT);
-2958try {
-2959  // All edits for the given row 
(across all column families) must happen atomically.
-2960  doBatchMutate(put);
-2961} finally {
-2962  
closeRegionOperation(Operation.PUT);
-2963}
-2964  }
-2965
-2966  /**
-2967   * Struct-like class that tracks the 
progress of a batch operation,
-2968   * accumulating status codes and 
tracking 

[27/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9eba7fcf/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
index 504e470..38667c0 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
@@ -2866,5375 +2866,5371 @@
 2858checkResources();
 2859
startRegionOperation(Operation.DELETE);
 2860try {
-2861  delete.getRow();
-2862  // All edits for the given row 
(across all column families) must happen atomically.
-2863  doBatchMutate(delete);
-2864} finally {
-2865  
closeRegionOperation(Operation.DELETE);
-2866}
-2867  }
-2868
-2869  /**
-2870   * Row needed by below method.
-2871   */
-2872  private static final byte [] 
FOR_UNIT_TESTS_ONLY = Bytes.toBytes("ForUnitTestsOnly");
-2873
-2874  /**
-2875   * This is used only by unit tests. 
Not required to be a public API.
-2876   * @param familyMap map of family to 
edits for the given family.
-2877   * @throws IOException
-2878   */
-2879  void delete(NavigableMapbyte[], 
ListCell familyMap,
-2880  Durability durability) throws 
IOException {
-2881Delete delete = new 
Delete(FOR_UNIT_TESTS_ONLY);
-2882
delete.setFamilyCellMap(familyMap);
-2883delete.setDurability(durability);
-2884doBatchMutate(delete);
-2885  }
-2886
-2887  @Override
-2888  public void 
prepareDeleteTimestamps(Mutation mutation, Mapbyte[], ListCell 
familyMap,
-2889  byte[] byteNow) throws IOException 
{
-2890for (Map.Entrybyte[], 
ListCell e : familyMap.entrySet()) {
-2891
-2892  byte[] family = e.getKey();
-2893  ListCell cells = 
e.getValue();
-2894  assert cells instanceof 
RandomAccess;
-2895
-2896  Mapbyte[], Integer kvCount 
= new TreeMap(Bytes.BYTES_COMPARATOR);
-2897  int listSize = cells.size();
-2898  for (int i=0; i  listSize; 
i++) {
-2899Cell cell = cells.get(i);
-2900//  Check if time is LATEST, 
change to time of most recent addition if so
-2901//  This is expensive.
-2902if (cell.getTimestamp() == 
HConstants.LATEST_TIMESTAMP  CellUtil.isDeleteType(cell)) {
-2903  byte[] qual = 
CellUtil.cloneQualifier(cell);
-2904  if (qual == null) qual = 
HConstants.EMPTY_BYTE_ARRAY;
-2905
-2906  Integer count = 
kvCount.get(qual);
-2907  if (count == null) {
-2908kvCount.put(qual, 1);
-2909  } else {
-2910kvCount.put(qual, count + 
1);
-2911  }
-2912  count = kvCount.get(qual);
-2913
-2914  Get get = new 
Get(CellUtil.cloneRow(cell));
-2915  get.setMaxVersions(count);
-2916  get.addColumn(family, qual);
-2917  if (coprocessorHost != null) 
{
-2918if 
(!coprocessorHost.prePrepareTimeStampForDeleteVersion(mutation, cell,
-2919byteNow, get)) {
-2920  
updateDeleteLatestVersionTimeStamp(cell, get, count, byteNow);
-2921}
-2922  } else {
-2923
updateDeleteLatestVersionTimeStamp(cell, get, count, byteNow);
-2924  }
-2925} else {
-2926  
CellUtil.updateLatestStamp(cell, byteNow, 0);
-2927}
-2928  }
-2929}
-2930  }
-2931
-2932  void 
updateDeleteLatestVersionTimeStamp(Cell cell, Get get, int count, byte[] 
byteNow)
-2933  throws IOException {
-2934ListCell result = get(get, 
false);
-2935
-2936if (result.size()  count) {
-2937  // Nothing to delete
-2938  CellUtil.updateLatestStamp(cell, 
byteNow, 0);
-2939  return;
-2940}
-2941if (result.size()  count) {
-2942  throw new 
RuntimeException("Unexpected size: " + result.size());
-2943}
-2944Cell getCell = result.get(count - 
1);
-2945CellUtil.setTimestamp(cell, 
getCell.getTimestamp());
-2946  }
-2947
-2948  @Override
-2949  public void put(Put put) throws 
IOException {
-2950checkReadOnly();
-2951
-2952// Do a rough check that we have 
resources to accept a write.  The check is
-2953// 'rough' in that between the 
resource check and the call to obtain a
-2954// read lock, resources may run out. 
 For now, the thought is that this
-2955// will be extremely rare; we'll 
deal with it when it happens.
-2956checkResources();
-2957
startRegionOperation(Operation.PUT);
-2958try {
-2959  // All edits for the given row 
(across all column families) must happen atomically.
-2960  doBatchMutate(put);
-2961} finally {
-2962  
closeRegionOperation(Operation.PUT);
-2963}
-2964  }
-2965
-2966  /**
-2967   * Struct-like class that tracks the 

  1   2   >