[3/3] hbase git commit: HBASE-20388 nightly tests running on a feature branch should only comment on that feature branch's jira

2018-04-12 Thread busbey
HBASE-20388 nightly tests running on a feature branch should only comment on 
that feature branch's jira


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0deb8cd8
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0deb8cd8
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0deb8cd8

Branch: refs/heads/HBASE-20388
Commit: 0deb8cd8a8c7545486e0808f1be71baa98fd9c04
Parents: d59a6c8
Author: Sean Busbey 
Authored: Thu Apr 12 21:10:53 2018 -0500
Committer: Sean Busbey 
Committed: Fri Apr 13 00:17:09 2018 -0500

--
 dev-support/Jenkinsfile | 35 +++
 1 file changed, 23 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/0deb8cd8/dev-support/Jenkinsfile
--
diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile
index 3f3066b..4e249ce 100644
--- a/dev-support/Jenkinsfile
+++ b/dev-support/Jenkinsfile
@@ -497,8 +497,14 @@ curl -L  -o personality.sh "${env.PROJECT_PERSONALITY}"
echo "[INFO] Comment:"
echo comment
echo ""
-   echo "[INFO] There are ${currentBuild.changeSets.size()} change 
sets."
-   getJirasToComment(currentBuild).each { currentIssue ->
+   echo "[DEBUG] checking to see if feature branch"
+   def jiras = getJirasToComment(env.BRANCH_NAME, [])
+   if (jiras.isEmpty()) {
+ echo "[DEBUG] non-feature branch, checking change messages for 
jira keys."
+ echo "[INFO] There are ${currentBuild.changeSets.size()} change 
sets."
+ jiras = getJirasToCommentFromChangesets(currentBuild)
+   }
+   jiras.each { currentIssue ->
  jiraComment issueKey: currentIssue, body: comment
}
 } catch (Exception exception) {
@@ -511,7 +517,7 @@ curl -L  -o personality.sh "${env.PROJECT_PERSONALITY}"
 }
 import org.jenkinsci.plugins.workflow.support.steps.build.RunWrapper
 @NonCPS
-List getJirasToComment(RunWrapper thisBuild) {
+List getJirasToCommentFromChangesets(RunWrapper thisBuild) {
   def seenJiras = []
   thisBuild.changeSets.each { cs ->
 cs.getItems().each { change ->
@@ -521,16 +527,21 @@ List getJirasToComment(RunWrapper thisBuild) {
   echo " ${change.commitId}"
   echo " ${change.author}"
   echo ""
-  msg.eachMatch("HBASE-[0-9]+") { currentIssue ->
-echo "[DEBUG] found jira key: ${currentIssue}"
-if (currentIssue in seenJiras) {
-  echo "[DEBUG] already commented on ${currentIssue}."
-} else {
-  echo "[INFO] commenting on ${currentIssue}."
-  seenJiras << currentIssue
-}
-  }
+  seenJiras = getJirasToComment(msg, seenJiras)
 }
   }
   return seenJiras
 }
+@NonCPS
+List getJirasToComment(CharSequence source, CharSequence[] seen) {
+  source.eachMatch("HBASE-[0-9]+") { currentIssue ->
+echo "[DEBUG] found jira key: ${currentIssue}"
+if (currentIssue in seen) {
+  echo "[DEBUG] already commented on ${currentIssue}."
+} else {
+  echo "[INFO] commenting on ${currentIssue}."
+  seen << currentIssue
+}
+  }
+  return seen
+}



[1/3] hbase git commit: HBASE-20338 WALProcedureStore#recoverLease() should have fixed sleeps for retrying rollWriter() [Forced Update!]

2018-04-12 Thread busbey
Repository: hbase
Updated Branches:
  refs/heads/HBASE-20388 855df58b1 -> 0deb8cd8a (forced update)


HBASE-20338 WALProcedureStore#recoverLease() should have fixed sleeps for 
retrying rollWriter()

Signed-off-by: Mike Drob 
Signed-off-by: Umesh Agashe 
Signed-off-by: Chia-Ping Tsai 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/17a29ac2
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/17a29ac2
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/17a29ac2

Branch: refs/heads/HBASE-20388
Commit: 17a29ac2313774850f0cce116ee33ef79d007a34
Parents: 70d2321
Author: Wei-Chiu Chuang 
Authored: Thu Apr 5 14:11:29 2018 -0700
Committer: Mike Drob 
Committed: Thu Apr 12 16:33:55 2018 -0500

--
 .../hbase/procedure2/store/wal/WALProcedureStore.java   | 9 +
 1 file changed, 9 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/17a29ac2/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
index c5680cf..f2931fc 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
@@ -359,7 +359,16 @@ public class WALProcedureStore extends ProcedureStoreBase {
 lock.lock();
 try {
   LOG.trace("Starting WAL Procedure Store lease recovery");
+  boolean afterFirstAttempt = false;
   while (isRunning()) {
+// Don't sleep before first attempt
+if (afterFirstAttempt) {
+  LOG.trace("Sleep {} ms after first lease recovery attempt.",
+  waitBeforeRoll);
+  Threads.sleepWithoutInterrupt(waitBeforeRoll);
+} else {
+  afterFirstAttempt = true;
+}
 FileStatus[] oldLogs = getLogFiles();
 // Get Log-MaxID and recover lease on old logs
 try {



[2/3] hbase git commit: HBASE-20329 Add note for operators to refguide on AsyncFSWAL; ADDENDUM

2018-04-12 Thread busbey
HBASE-20329 Add note for operators to refguide on AsyncFSWAL; ADDENDUM

Add small note on edits being immediately visible when Durability == ASYNC_WAL.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d59a6c81
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d59a6c81
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d59a6c81

Branch: refs/heads/HBASE-20388
Commit: d59a6c8166cf398ee62089cc35ffeddfe5824134
Parents: 17a29ac
Author: Michael Stack 
Authored: Thu Apr 12 15:59:00 2018 -0700
Committer: Michael Stack 
Committed: Thu Apr 12 16:00:11 2018 -0700

--
 src/main/asciidoc/_chapters/architecture.adoc | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d59a6c81/src/main/asciidoc/_chapters/architecture.adoc
--
diff --git a/src/main/asciidoc/_chapters/architecture.adoc 
b/src/main/asciidoc/_chapters/architecture.adoc
index bc29d4b..1d6fc60 100644
--- a/src/main/asciidoc/_chapters/architecture.adoc
+++ b/src/main/asciidoc/_chapters/architecture.adoc
@@ -1248,7 +1248,7 @@ dictionary because of an abrupt termination, a read of 
this last block may not b
 It is possible to set _durability_ on each Mutation or on a Table basis. 
Options include:
 
  * _SKIP_WAL_: Do not write Mutations to the WAL (See the next section, 
<>).
- * _ASYNC_WAL_: Write the WAL asynchronously; do not hold-up clients waiting 
on the sync of their write to the filesystem but return immediately; the 
Mutation will be flushed to the WAL at a later time. This option currently may 
lose data. See HBASE-16689.
+ * _ASYNC_WAL_: Write the WAL asynchronously; do not hold-up clients waiting 
on the sync of their write to the filesystem but return immediately. The edit 
becomes visible. Meanwhile, in the background, the Mutation will be flushed to 
the WAL at some time later. This option currently may lose data. See 
HBASE-16689.
  * _SYNC_WAL_: The *default*. Each edit is sync'd to HDFS before we return 
success to the client.
  * _FSYNC_WAL_: Each edit is fsync'd to HDFS and the filesystem before we 
return success to the client.
 



hbase git commit: HBASE-20112 register nightly junit over hadoop3 results with jenkins.

2018-04-12 Thread busbey
Repository: hbase
Updated Branches:
  refs/heads/HBASE-20112 [created] 3a1291299


HBASE-20112 register nightly junit over hadoop3 results with jenkins.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3a129129
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3a129129
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3a129129

Branch: refs/heads/HBASE-20112
Commit: 3a129129971d3544506a9381c3d460f42eb65a98
Parents: d59a6c8
Author: Sean Busbey 
Authored: Fri Apr 13 00:08:39 2018 -0500
Committer: Sean Busbey 
Committed: Fri Apr 13 00:08:39 2018 -0500

--
 dev-support/Jenkinsfile | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/3a129129/dev-support/Jenkinsfile
--
diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile
index 3f3066b..b289eaf 100644
--- a/dev-support/Jenkinsfile
+++ b/dev-support/Jenkinsfile
@@ -381,8 +381,7 @@ curl -L  -o personality.sh "${env.PROJECT_PERSONALITY}"
   post {
 always {
   stash name: 'hadoop3-result', includes: 
"${OUTPUT_DIR_RELATIVE}/commentfile"
-  // Not sure how two junit test reports will work. Disabling this 
for now.
-  // junit testResults: 
"${env.OUTPUT_DIR_RELATIVE}/**/target/**/TEST-*.xml", allowEmptyResults: true
+  junit testResults: 
"${env.OUTPUT_DIR_RELATIVE}/**/target/**/TEST-*.xml", allowEmptyResults: true
   // zip surefire reports.
   sh '''#!/bin/bash -e
 if [ -d "${OUTPUT_DIR}/archiver" ]; then



[1/2] hbase git commit: HBASE-20364 ensure jira comment from nightly reflects results of run comment comes from.

2018-04-12 Thread busbey
Repository: hbase
Updated Branches:
  refs/heads/HBASE-20364 [created] da8ede838


HBASE-20364 ensure jira comment from nightly reflects results of run comment 
comes from.

* in a post-step, build status can either be "null" or "SUCCESS" to indicate 
success.
* before we do an scm checkout for stages that post to the comment, set a 
default "we failed ¯\_(ツ)_/¯" comment.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7d04d374
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7d04d374
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7d04d374

Branch: refs/heads/HBASE-20364
Commit: 7d04d3740d842a12ddd275f14f8305ac3e6b800b
Parents: d59a6c8
Author: Sean Busbey 
Authored: Thu Apr 12 23:38:27 2018 -0500
Committer: Sean Busbey 
Committed: Thu Apr 12 23:38:35 2018 -0500

--
 dev-support/Jenkinsfile | 55 ++--
 1 file changed, 38 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7d04d374/dev-support/Jenkinsfile
--
diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile
index 3f3066b..cab5027 100644
--- a/dev-support/Jenkinsfile
+++ b/dev-support/Jenkinsfile
@@ -141,25 +141,30 @@ curl -L  -o personality.sh "${env.PROJECT_PERSONALITY}"
 OUTPUT_DIR = "${env.WORKSPACE}/${env.OUTPUT_DIR_RELATIVE_GENERAL}"
   }
   steps {
+// Must do prior to anything else, since if one of them timesout 
we'll stash the commentfile
+sh '''#!/usr/bin/env bash
+  set -e
+  rm -rf "${OUTPUT_DIR}" && mkdir "${OUTPUT_DIR}"
+  echo '(x) {color:red}-1 general checks{color}' 
>"${OUTPUT_DIR}/commentfile"
+  echo "-- Something went wrong running this stage, please [check 
relevant console output|${BUILD_URL}/console]." >> "${OUTPUT_DIR}/commentfile"
+'''
 unstash 'yetus'
 dir('component') {
   checkout scm
 }
 sh '''#!/usr/bin/env bash
   set -e
-  rm -rf "${OUTPUT_DIR}" && mkdir "${OUTPUT_DIR}"
   rm -rf "${OUTPUT_DIR}/machine" && mkdir "${OUTPUT_DIR}/machine"
   "${BASEDIR}/dev-support/gather_machine_environment.sh" 
"${OUTPUT_DIR_RELATIVE}/machine"
 '''
 // TODO roll this into the hbase_nightly_yetus script
 sh '''#!/usr/bin/env bash
   set -e
-  rm -rf "${OUTPUT_DIR}/commentfile}"
   declare -i status=0
   if "${BASEDIR}/dev-support/hbase_nightly_yetus.sh" ; then
-echo '(/) {color:green}+1 general checks{color}' >> 
"${OUTPUT_DIR}/commentfile"
+echo '(/) {color:green}+1 general checks{color}' > 
"${OUTPUT_DIR}/commentfile"
   else
-echo '(x) {color:red}-1 general checks{color}' >> 
"${OUTPUT_DIR}/commentfile"
+echo '(x) {color:red}-1 general checks{color}' > 
"${OUTPUT_DIR}/commentfile"
 status=1
   fi
   echo "-- For more information [see general 
report|${BUILD_URL}/General_Nightly_Build_Report/]" >> 
"${OUTPUT_DIR}/commentfile"
@@ -201,24 +206,29 @@ curl -L  -o personality.sh "${env.PROJECT_PERSONALITY}"
 // On branches where we do jdk7 checks, jdk7 will be JAVA_HOME 
already.
   }
   steps {
+// Must do prior to anything else, since if one of them timesout 
we'll stash the commentfile
+sh '''#!/usr/bin/env bash
+  set -e
+  rm -rf "${OUTPUT_DIR}" && mkdir "${OUTPUT_DIR}"
+  echo '(x) {color:red}-1 jdk7 checks{color}' 
>"${OUTPUT_DIR}/commentfile"
+  echo "-- Something went wrong running this stage, please [check 
relevant console output|${BUILD_URL}/console]." >> "${OUTPUT_DIR}/commentfile"
+'''
 unstash 'yetus'
 dir('component') {
   checkout scm
 }
 sh '''#!/usr/bin/env bash
   set -e
-  rm -rf "${OUTPUT_DIR}" && mkdir "${OUTPUT_DIR}"
   rm -rf "${OUTPUT_DIR}/machine" && mkdir "${OUTPUT_DIR}/machine"
   "${BASEDIR}/dev-support/gather_machine_environment.sh" 
"${OUTPUT_DIR_RELATIVE}/machine"
 '''
 sh '''#!/usr/bin/env bash
   set -e
-  rm -rf "${OUTPUT_DIR}/commentfile}"
   declare -i status=0
   if "${BASEDIR}/dev-support/hbase_nightly_yetus.sh" ; then
-echo '(/) {color:green}+1 jdk7 checks{color}' >> 
"${OUTPUT_DIR}/commentfile"
+echo '(/) {color:green}+1 jdk7 checks{color}' > 
"${OUTPUT_DIR}/commentfile"
   else
-

[2/2] hbase git commit: WIP fail stages that do an scm checkout.

2018-04-12 Thread busbey
WIP fail stages that do an scm checkout.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/da8ede83
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/da8ede83
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/da8ede83

Branch: refs/heads/HBASE-20364
Commit: da8ede8386f9d46d9ef0098ca2ae7382a7cec5b8
Parents: 7d04d37
Author: Sean Busbey 
Authored: Thu Apr 12 23:49:17 2018 -0500
Committer: Sean Busbey 
Committed: Thu Apr 12 23:49:17 2018 -0500

--
 dev-support/Jenkinsfile | 15 +++
 1 file changed, 15 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/da8ede83/dev-support/Jenkinsfile
--
diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile
index cab5027..e0b2f9c 100644
--- a/dev-support/Jenkinsfile
+++ b/dev-support/Jenkinsfile
@@ -149,6 +149,9 @@ curl -L  -o personality.sh "${env.PROJECT_PERSONALITY}"
   echo "-- Something went wrong running this stage, please [check 
relevant console output|${BUILD_URL}/console]." >> "${OUTPUT_DIR}/commentfile"
 '''
 unstash 'yetus'
+sh '''#!/usr/bin/env bash
+  exit 1
+'''
 dir('component') {
   checkout scm
 }
@@ -214,6 +217,9 @@ curl -L  -o personality.sh "${env.PROJECT_PERSONALITY}"
   echo "-- Something went wrong running this stage, please [check 
relevant console output|${BUILD_URL}/console]." >> "${OUTPUT_DIR}/commentfile"
 '''
 unstash 'yetus'
+sh '''#!/usr/bin/env bash
+  exit 1
+'''
 dir('component') {
   checkout scm
 }
@@ -292,6 +298,9 @@ curl -L  -o personality.sh "${env.PROJECT_PERSONALITY}"
   echo "-- Something went wrong running this stage, please [check 
relevant console output|${BUILD_URL}/console]." >> "${OUTPUT_DIR}/commentfile"
 '''
 unstash 'yetus'
+sh '''#!/usr/bin/env bash
+  exit 1
+'''
 dir('component') {
   checkout scm
 }
@@ -377,6 +386,9 @@ curl -L  -o personality.sh "${env.PROJECT_PERSONALITY}"
   echo "-- Something went wrong running this stage, please [check 
relevant console output|${BUILD_URL}/console]." >> "${OUTPUT_DIR}/commentfile"
 '''
 unstash 'yetus'
+sh '''#!/usr/bin/env bash
+  exit 1
+'''
 dir('component') {
   checkout scm
 }
@@ -452,6 +464,9 @@ curl -L  -o personality.sh "${env.PROJECT_PERSONALITY}"
   rm -rf ".m2-for-src" && mkdir ".m2-for-src"
   echo '(x) {color:red}-1 source release artifact{color}\n-- 
Something went wrong with this stage, [check relevant console 
output|${BUILD_URL}/console].' >output-srctarball/commentfile
 '''
+sh '''#!/usr/bin/env bash
+  exit 1
+'''
 dir('component') {
   checkout scm
 }



hbase git commit: HBASE-20163 Forbid major compaction when standby cluster replay the remote wals

2018-04-12 Thread zghao
Repository: hbase
Updated Branches:
  refs/heads/HBASE-19064 1e7c341cb -> 9850f3984


HBASE-20163 Forbid major compaction when standby cluster replay the remote wals


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9850f398
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9850f398
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9850f398

Branch: refs/heads/HBASE-19064
Commit: 9850f3984cdf7990c2a4e466c952510b5dc5bd63
Parents: 1e7c341
Author: Guanghao Zhang 
Authored: Thu Apr 12 14:44:25 2018 +0800
Committer: Guanghao Zhang 
Committed: Fri Apr 13 10:05:07 2018 +0800

--
 .../hadoop/hbase/regionserver/HRegion.java  | 18 
 .../hbase/regionserver/HRegionServer.java   |  2 +-
 .../regionserver/RegionServerServices.java  |  5 +++
 .../ForbidMajorCompactionChecker.java   | 44 
 .../hadoop/hbase/MockRegionServerServices.java  |  6 +++
 .../hadoop/hbase/master/MockRegionServer.java   |  6 +++
 6 files changed, 80 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/9850f398/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 9f9911d..e263fc1 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -145,6 +145,7 @@ import 
org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope;
 import org.apache.hadoop.hbase.regionserver.ScannerContext.NextState;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
 import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker;
+import 
org.apache.hadoop.hbase.regionserver.compactions.ForbidMajorCompactionChecker;
 import 
org.apache.hadoop.hbase.regionserver.throttle.CompactionThroughputControllerFactory;
 import 
org.apache.hadoop.hbase.regionserver.throttle.NoLimitThroughputController;
 import org.apache.hadoop.hbase.regionserver.throttle.StoreHotnessProtector;
@@ -1977,6 +1978,14 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 return compact(compaction, store, throughputController, null);
   }
 
+  private boolean shouldForbidMajorCompaction() {
+if (rsServices != null && rsServices.getReplicationSourceService() != 
null) {
+  return 
rsServices.getReplicationSourceService().getSyncReplicationPeerInfoProvider()
+  .checkState(getRegionInfo(), ForbidMajorCompactionChecker.get());
+}
+return false;
+  }
+
   public boolean compact(CompactionContext compaction, HStore store,
   ThroughputController throughputController, User user) throws IOException 
{
 assert compaction != null && compaction.hasSelection();
@@ -1986,6 +1995,15 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   store.cancelRequestedCompaction(compaction);
   return false;
 }
+
+if (compaction.getRequest().isAllFiles() && shouldForbidMajorCompaction()) 
{
+  LOG.warn("Skipping major compaction on " + this
+  + " because this cluster is transiting sync replication state"
+  + " from STANDBY to DOWNGRADE_ACTIVE");
+  store.cancelRequestedCompaction(compaction);
+  return false;
+}
+
 MonitoredTask status = null;
 boolean requestNeedsCancellation = true;
 /*

http://git-wip-us.apache.org/repos/asf/hbase/blob/9850f398/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 25bc6be..4dd8f09 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -2472,7 +2472,7 @@ public class HRegionServer extends HasThread implements
* @return Return the object that implements the replication
* source executorService.
*/
-  @VisibleForTesting
+  @Override
   public ReplicationSourceService getReplicationSourceService() {
 return replicationSourceHandler;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/9850f398/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java

hbase git commit: HBASE-20388 nightly tests running on a feature branch should only comment on that feature branch's jira

2018-04-12 Thread busbey
Repository: hbase
Updated Branches:
  refs/heads/HBASE-20388 [created] 855df58b1


HBASE-20388 nightly tests running on a feature branch should only comment on 
that feature branch's jira


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/855df58b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/855df58b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/855df58b

Branch: refs/heads/HBASE-20388
Commit: 855df58b184736b4fef7663f16cda438b50771e1
Parents: 70d2321
Author: Sean Busbey 
Authored: Thu Apr 12 21:10:53 2018 -0500
Committer: Sean Busbey 
Committed: Thu Apr 12 21:10:53 2018 -0500

--
 dev-support/Jenkinsfile | 35 +++
 1 file changed, 23 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/855df58b/dev-support/Jenkinsfile
--
diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile
index 3f3066b..d3432e0 100644
--- a/dev-support/Jenkinsfile
+++ b/dev-support/Jenkinsfile
@@ -497,8 +497,14 @@ curl -L  -o personality.sh "${env.PROJECT_PERSONALITY}"
echo "[INFO] Comment:"
echo comment
echo ""
-   echo "[INFO] There are ${currentBuild.changeSets.size()} change 
sets."
-   getJirasToComment(currentBuild).each { currentIssue ->
+   echo "[DEBUG] checking to see if feature branch"
+   def jiras = getJirasToComment(env.BRANCH_NAME, [])
+   if (jiras.isEmpty()) {
+ echo "[DEBUG] non-feature branch, checking change messages for 
jira keys."
+ echo "[INFO] There are ${currentBuild.changeSets.size()} change 
sets."
+ jiras = getJirasToCommentFromChangesets(currentBuild)
+   }
+   jiras.each { currentIssue ->
  jiraComment issueKey: currentIssue, body: comment
}
 } catch (Exception exception) {
@@ -511,7 +517,7 @@ curl -L  -o personality.sh "${env.PROJECT_PERSONALITY}"
 }
 import org.jenkinsci.plugins.workflow.support.steps.build.RunWrapper
 @NonCPS
-List getJirasToComment(RunWrapper thisBuild) {
+List getJirasToCommentFromChangesets(RunWrapper thisBuild) {
   def seenJiras = []
   thisBuild.changeSets.each { cs ->
 cs.getItems().each { change ->
@@ -521,16 +527,21 @@ List getJirasToComment(RunWrapper thisBuild) {
   echo " ${change.commitId}"
   echo " ${change.author}"
   echo ""
-  msg.eachMatch("HBASE-[0-9]+") { currentIssue ->
-echo "[DEBUG] found jira key: ${currentIssue}"
-if (currentIssue in seenJiras) {
-  echo "[DEBUG] already commented on ${currentIssue}."
-} else {
-  echo "[INFO] commenting on ${currentIssue}."
-  seenJiras << currentIssue
-}
-  }
+  seenJiras = getJirasToComment(msg, seenJiras)
 }
   }
   return seenJiras
 }
+@NonCPS
+List getJirasToComment(CharSequence source, CharSequence[] seen) {
+  source.eachMatch("HBASE-[0-9]+") { currentIssue ->
+echo "[DEBUG] found jira key: ${currentIssue}"
+if (currentIssue in seenJiras) {
+  echo "[DEBUG] already commented on ${currentIssue}."
+} else {
+  echo "[INFO] commenting on ${currentIssue}."
+  seenJiras << currentIssue
+}
+  }
+  return seen
+}



hbase git commit: HBASE-20329 Add note for operators to refguide on AsyncFSWAL; ADDENDUM

2018-04-12 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/master 17a29ac23 -> d59a6c816


HBASE-20329 Add note for operators to refguide on AsyncFSWAL; ADDENDUM

Add small note on edits being immediately visible when Durability == ASYNC_WAL.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d59a6c81
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d59a6c81
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d59a6c81

Branch: refs/heads/master
Commit: d59a6c8166cf398ee62089cc35ffeddfe5824134
Parents: 17a29ac
Author: Michael Stack 
Authored: Thu Apr 12 15:59:00 2018 -0700
Committer: Michael Stack 
Committed: Thu Apr 12 16:00:11 2018 -0700

--
 src/main/asciidoc/_chapters/architecture.adoc | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d59a6c81/src/main/asciidoc/_chapters/architecture.adoc
--
diff --git a/src/main/asciidoc/_chapters/architecture.adoc 
b/src/main/asciidoc/_chapters/architecture.adoc
index bc29d4b..1d6fc60 100644
--- a/src/main/asciidoc/_chapters/architecture.adoc
+++ b/src/main/asciidoc/_chapters/architecture.adoc
@@ -1248,7 +1248,7 @@ dictionary because of an abrupt termination, a read of 
this last block may not b
 It is possible to set _durability_ on each Mutation or on a Table basis. 
Options include:
 
  * _SKIP_WAL_: Do not write Mutations to the WAL (See the next section, 
<>).
- * _ASYNC_WAL_: Write the WAL asynchronously; do not hold-up clients waiting 
on the sync of their write to the filesystem but return immediately; the 
Mutation will be flushed to the WAL at a later time. This option currently may 
lose data. See HBASE-16689.
+ * _ASYNC_WAL_: Write the WAL asynchronously; do not hold-up clients waiting 
on the sync of their write to the filesystem but return immediately. The edit 
becomes visible. Meanwhile, in the background, the Mutation will be flushed to 
the WAL at some time later. This option currently may lose data. See 
HBASE-16689.
  * _SYNC_WAL_: The *default*. Each edit is sync'd to HDFS before we return 
success to the client.
  * _FSYNC_WAL_: Each edit is fsync'd to HDFS and the filesystem before we 
return success to the client.
 



svn commit: r26313 [2/2] - /dev/hbase/hbase-2.0.0RC0/compat-check-v1.2.6-v2.0.0.report.html

2018-04-12 Thread stack

Added: dev/hbase/hbase-2.0.0RC0/compat-check-v1.2.6-v2.0.0.report.html
==
--- dev/hbase/hbase-2.0.0RC0/compat-check-v1.2.6-v2.0.0.report.html (added)
+++ dev/hbase/hbase-2.0.0RC0/compat-check-v1.2.6-v2.0.0.report.html Thu Apr 12 
22:01:19 2018
@@ -0,0 +1,11876 @@
+
+
+http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
+http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
+
+
+
+
+
+hbase: rel/1.2.6 to 2.0.0RC0 compatibility report
+
+body {
+font-family:Arial, sans-serif;
+background-color:White;
+color:Black;
+}
+hr {
+color:Black;
+background-color:Black;
+height:1px;
+border:0;
+}
+h1 {
+margin-bottom:0px;
+padding-bottom:0px;
+font-size:1.625em;
+}
+h2 {
+margin-bottom:0px;
+padding-bottom:0px;
+font-size:1.25em;
+white-space:nowrap;
+}
+div.symbols {
+color:#003E69;
+}
+div.symbols i {
+color:Brown;
+}
+span.section {
+font-weight:bold;
+cursor:pointer;
+color:#003E69;
+white-space:nowrap;
+margin-left:0.3125em;
+}
+span:hover.section {
+color:#336699;
+}
+span.sect_aff {
+cursor:pointer;
+padding-left:1.55em;
+font-size:0.875em;
+color:#cc3300;
+}
+span.ext {
+font-weight:normal;
+}
+span.jar {
+color:#cc3300;
+font-size:0.875em;
+font-weight:bold;
+}
+div.jar_list {
+padding-left:0.4em;
+font-size:0.94em;
+}
+span.pkg_t {
+color:#408080;
+font-size:0.875em;
+}
+span.pkg {
+color:#408080;
+font-size:0.875em;
+font-weight:bold;
+}
+span.cname {
+color:Green;
+font-size:0.875em;
+font-weight:bold;
+}
+span.iname_b {
+font-weight:bold;
+}
+span.iname_a {
+color:#33;
+font-weight:bold;
+font-size:0.94em;
+}
+span.sym_p {
+font-weight:normal;
+white-space:normal;
+}
+span.sym_pd {
+white-space:normal;
+}
+span.sym_p span, span.sym_pd span {
+white-space:nowrap;
+}
+span.attr {
+color:Black;
+font-weight:normal;
+}
+span.deprecated {
+color:Red;
+font-weight:bold;
+font-family:Monaco, monospace;
+}
+div.affect {
+padding-left:1em;
+padding-bottom:10px;
+font-size:0.87em;
+font-style:italic;
+line-height:0.9em;
+}
+div.affected {
+padding-left:2em;
+padding-top:10px;
+}
+table.ptable {
+border-collapse:collapse;
+border:1px outset black;
+margin-left:0.95em;
+margin-top:3px;
+margin-bottom:3px;
+width:56.25em;
+}
+table.ptable td {
+border:1px solid Gray;
+padding:3px;
+font-size:0.875em;
+text-align:left;
+vertical-align:top;
+max-width:28em;
+word-wrap:break-word;
+}
+table.ptable th {
+background-color:#ee;
+font-weight:bold;
+color:#33;
+font-family:Verdana, Arial;
+font-size:0.875em;
+border:1px solid Gray;
+text-align:center;
+vertical-align:top;
+white-space:nowrap;
+padding:3px;
+}
+table.summary {
+border-collapse:collapse;
+border:1px outset black;
+}
+table.summary th {
+background-color:#ee;
+font-weight:normal;
+text-align:left;
+font-size:0.94em;
+white-space:nowrap;
+border:1px inset Gray;
+padding:3px;
+}
+table.summary td {
+text-align:right;
+white-space:nowrap;
+border:1px inset Gray;
+padding:3px 5px 3px 10px;
+}
+span.mngl {
+padding-left:1em;
+font-size:0.875em;
+cursor:text;
+color:#44;
+font-weight:bold;
+}
+span.pleft {
+padding-left:2.5em;
+}
+span.color_p {
+font-style:italic;
+color:Brown;
+}
+span.param {
+font-style:italic;
+}
+span.focus_p {
+font-style:italic;
+background-color:#DCDCDC;
+}
+span.ttype {
+font-weight:normal;
+}
+span.nowrap {
+white-space:nowrap;
+}
+span.value {
+white-space:nowrap;
+font-weight:bold;
+}
+.passed {
+background-color:#CCFFCC;
+font-weight:normal;
+}
+.warning {
+background-color:#F4F4AF;
+font-weight:normal;
+}
+.failed {
+background-color:#FF;
+font-weight:normal;
+}
+.new {
+background-color:#C6DEFF;
+font-weight:normal;
+}
+
+.compatible {
+background-color:#CCFFCC;
+font-weight:normal;
+}
+.almost_compatible {
+background-color:#FFDAA3;
+font-weight:normal;
+}
+.incompatible {
+background-color:#FF;
+font-weight:normal;
+}
+.gray {
+background-color:#DCDCDC;
+font-weight:normal;
+}
+
+.top_ref {
+font-size:0.69em;
+}
+.footer {
+font-size:0.8125em;
+}
+.tabset {
+float:left;
+}
+a.tab {
+border:1px solid Black;
+float:left;
+margin:0px 5px -1px 0px;
+padding:3px 5px 3px 5px;
+position:relative;
+font-size:0.875em;
+background-color:#DDD;
+text-decoration:none;
+color:Black;
+}
+a.disabled:hover
+{
+color:Black;
+background:#EEE;
+}
+a.active:hover
+{
+color:Black;
+background:White;
+}
+a.active {
+border-bottom-color:White;
+background-color:White;
+}

svn commit: r26313 [1/2] - /dev/hbase/hbase-2.0.0RC0/compat-check-v1.2.6-v2.0.0.report.html

2018-04-12 Thread stack
Author: stack
Date: Thu Apr 12 22:01:19 2018
New Revision: 26313

Log:
Add compat report

Added:
dev/hbase/hbase-2.0.0RC0/compat-check-v1.2.6-v2.0.0.report.html



[2/3] hbase git commit: HBASE-20338 WALProcedureStore#recoverLease() should have fixed sleeps for retrying rollWriter()

2018-04-12 Thread mdrob
HBASE-20338 WALProcedureStore#recoverLease() should have fixed sleeps for 
retrying rollWriter()

Signed-off-by: Mike Drob 
Signed-off-by: Umesh Agashe 
Signed-off-by: Chia-Ping Tsai 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b1901c9a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b1901c9a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b1901c9a

Branch: refs/heads/branch-2
Commit: b1901c9a15fccba94f1750021d7d7c84dd60cc8b
Parents: a50d9f4
Author: Wei-Chiu Chuang 
Authored: Thu Apr 5 14:11:29 2018 -0700
Committer: Mike Drob 
Committed: Thu Apr 12 16:35:11 2018 -0500

--
 .../hbase/procedure2/store/wal/WALProcedureStore.java   | 9 +
 1 file changed, 9 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b1901c9a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
index d14e1bf..0c8cfd2 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
@@ -359,7 +359,16 @@ public class WALProcedureStore extends ProcedureStoreBase {
 lock.lock();
 try {
   LOG.trace("Starting WAL Procedure Store lease recovery");
+  boolean afterFirstAttempt = false;
   while (isRunning()) {
+// Don't sleep before first attempt
+if (afterFirstAttempt) {
+  LOG.trace("Sleep {} ms after first lease recovery attempt.",
+  waitBeforeRoll);
+  Threads.sleepWithoutInterrupt(waitBeforeRoll);
+} else {
+  afterFirstAttempt = true;
+}
 FileStatus[] oldLogs = getLogFiles();
 // Get Log-MaxID and recover lease on old logs
 try {



[3/3] hbase git commit: HBASE-20338 WALProcedureStore#recoverLease() should have fixed sleeps for retrying rollWriter()

2018-04-12 Thread mdrob
HBASE-20338 WALProcedureStore#recoverLease() should have fixed sleeps for 
retrying rollWriter()

Signed-off-by: Mike Drob 
Signed-off-by: Umesh Agashe 
Signed-off-by: Chia-Ping Tsai 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0eacb3ea
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0eacb3ea
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0eacb3ea

Branch: refs/heads/branch-2.0
Commit: 0eacb3ea0a202b09265030ddba5b408da597dc9a
Parents: 263cc8d
Author: Wei-Chiu Chuang 
Authored: Thu Apr 5 14:11:29 2018 -0700
Committer: Mike Drob 
Committed: Thu Apr 12 16:35:30 2018 -0500

--
 .../hbase/procedure2/store/wal/WALProcedureStore.java   | 9 +
 1 file changed, 9 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/0eacb3ea/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
index d14e1bf..0c8cfd2 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
@@ -359,7 +359,16 @@ public class WALProcedureStore extends ProcedureStoreBase {
 lock.lock();
 try {
   LOG.trace("Starting WAL Procedure Store lease recovery");
+  boolean afterFirstAttempt = false;
   while (isRunning()) {
+// Don't sleep before first attempt
+if (afterFirstAttempt) {
+  LOG.trace("Sleep {} ms after first lease recovery attempt.",
+  waitBeforeRoll);
+  Threads.sleepWithoutInterrupt(waitBeforeRoll);
+} else {
+  afterFirstAttempt = true;
+}
 FileStatus[] oldLogs = getLogFiles();
 // Get Log-MaxID and recover lease on old logs
 try {



[1/3] hbase git commit: HBASE-20338 WALProcedureStore#recoverLease() should have fixed sleeps for retrying rollWriter()

2018-04-12 Thread mdrob
Repository: hbase
Updated Branches:
  refs/heads/branch-2 a50d9f435 -> b1901c9a1
  refs/heads/branch-2.0 263cc8d14 -> 0eacb3ea0
  refs/heads/master 70d23214f -> 17a29ac23


HBASE-20338 WALProcedureStore#recoverLease() should have fixed sleeps for 
retrying rollWriter()

Signed-off-by: Mike Drob 
Signed-off-by: Umesh Agashe 
Signed-off-by: Chia-Ping Tsai 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/17a29ac2
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/17a29ac2
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/17a29ac2

Branch: refs/heads/master
Commit: 17a29ac2313774850f0cce116ee33ef79d007a34
Parents: 70d2321
Author: Wei-Chiu Chuang 
Authored: Thu Apr 5 14:11:29 2018 -0700
Committer: Mike Drob 
Committed: Thu Apr 12 16:33:55 2018 -0500

--
 .../hbase/procedure2/store/wal/WALProcedureStore.java   | 9 +
 1 file changed, 9 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/17a29ac2/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
--
diff --git 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
index c5680cf..f2931fc 100644
--- 
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
+++ 
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
@@ -359,7 +359,16 @@ public class WALProcedureStore extends ProcedureStoreBase {
 lock.lock();
 try {
   LOG.trace("Starting WAL Procedure Store lease recovery");
+  boolean afterFirstAttempt = false;
   while (isRunning()) {
+// Don't sleep before first attempt
+if (afterFirstAttempt) {
+  LOG.trace("Sleep {} ms after first lease recovery attempt.",
+  waitBeforeRoll);
+  Threads.sleepWithoutInterrupt(waitBeforeRoll);
+} else {
+  afterFirstAttempt = true;
+}
 FileStatus[] oldLogs = getLogFiles();
 // Get Log-MaxID and recover lease on old logs
 try {



[2/2] hbase git commit: HBASE-20356 Make skipping protoc possible

2018-04-12 Thread mdrob
HBASE-20356 Make skipping protoc possible


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a50d9f43
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a50d9f43
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a50d9f43

Branch: refs/heads/branch-2
Commit: a50d9f43512a5321a00e141d929072300cb41490
Parents: 0c751da
Author: Mike Drob 
Authored: Wed Apr 11 21:25:04 2018 -0500
Committer: Mike Drob 
Committed: Thu Apr 12 14:54:03 2018 -0500

--
 dev-support/hbase-personality.sh   |  2 +-
 hbase-protocol-shaded/pom.xml  |  5 -
 src/main/asciidoc/_chapters/developer.adoc | 28 -
 3 files changed, 28 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a50d9f43/dev-support/hbase-personality.sh
--
diff --git a/dev-support/hbase-personality.sh b/dev-support/hbase-personality.sh
index 90786f2..3507a1d 100755
--- a/dev-support/hbase-personality.sh
+++ b/dev-support/hbase-personality.sh
@@ -554,7 +554,7 @@ function hbaseprotoc_rebuild
   # Need to run 'install' instead of 'compile' because shading plugin
   # is hooked-up to 'install'; else hbase-protocol-shaded is left with
   # half of its process done.
-  modules_workers patch hbaseprotoc install -DskipTests -Pcompile-protobuf -X 
-DHBasePatchProcess
+  modules_workers patch hbaseprotoc install -DskipTests -X -DHBasePatchProcess
 
   # shellcheck disable=SC2153
   until [[ $i -eq "${#MODULE[@]}" ]]; do

http://git-wip-us.apache.org/repos/asf/hbase/blob/a50d9f43/hbase-protocol-shaded/pom.xml
--
diff --git a/hbase-protocol-shaded/pom.xml b/hbase-protocol-shaded/pom.xml
index c409012..ee7d54b 100644
--- a/hbase-protocol-shaded/pom.xml
+++ b/hbase-protocol-shaded/pom.xml
@@ -106,7 +106,7 @@
 1.5.3
 
   
-generate-sources
+process-sources
 
   replace
 
@@ -117,6 +117,9 @@
 
 **/*.java
 
+
+true
 
   
 ([^\.])com.google.protobuf

http://git-wip-us.apache.org/repos/asf/hbase/blob/a50d9f43/src/main/asciidoc/_chapters/developer.adoc
--
diff --git a/src/main/asciidoc/_chapters/developer.adoc 
b/src/main/asciidoc/_chapters/developer.adoc
index 6d959c2..40701e9 100644
--- a/src/main/asciidoc/_chapters/developer.adoc
+++ b/src/main/asciidoc/_chapters/developer.adoc
@@ -415,22 +415,40 @@ use so we can freely change versions without upsetting 
any downstream project us
 
 The protobuf files are located in _hbase-protocol/src/main/protobuf_.
 For the change to be effective, you will need to regenerate the classes.
-You can use maven profile `compile-protobuf` to do this.
 
 [source,bourne]
 
-mvn compile -Pcompile-protobuf
+mvn package -pl hbase-protocol -am
 
 
-You may also want to define `protoc.path` for the protoc binary, using the 
following command:
+Similarly, protobuf definitions for internal use are located in the 
_hbase-protocol-shaded_ module.
 
 [source,bourne]
 
+mvn package -pl hbase-protocol-shaded -am
+
+
+Typically, protobuf code generation is done using the native `protoc` binary. 
In our build we use a maven plugin for
+convenience; however, the plugin may not be able to retrieve appropriate 
binaries for all platforms. If you find yourself
+on a platform where protoc fails, you will have to compile protoc from source, 
and run it independent of our maven build.
+You can disable the inline code generation by specifying `-Dprotoc.skip` in 
your maven arguments, allowing your build to proceed further.
 
-mvn compile -Pcompile-protobuf -Dprotoc.path=/opt/local/bin/protoc
+A similar failure relates to the stock CentOS 6 docker image providing a too 
old version of glibc for the version of protoc that we use.
+In this case, you would have to install glibc 2.14 and protoc 3.5.1 manually, 
then execute something like:
+
+[source,bourne]
 
+cd hbase-protocol-shaded
+LD_LIBRARY_PATH=/opt/glibc-2.14/lib protoc \
+  --proto_path=src/main/protobuf \
+  --java_out=target/generated-sources/protobuf/java \
+  src/main/protobuf/*.proto
+
+
+[NOTE]
+If you need to manually generate your protobuf files, you should not use 
`clean` in subsequent maven calls, as that will delete the newly generated 
files.
 
-Read the _hbase-protocol/README.txt_ for more details.
+Read the _hbase-protocol/README.txt_ for more details
 
 [[build.thrift]]
  Build Thrift



[1/2] hbase git commit: HBASE-20356 Make skipping protoc possible

2018-04-12 Thread mdrob
Repository: hbase
Updated Branches:
  refs/heads/branch-2 0c751dadf -> a50d9f435
  refs/heads/master f3ec23093 -> 70d23214f


HBASE-20356 Make skipping protoc possible


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/70d23214
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/70d23214
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/70d23214

Branch: refs/heads/master
Commit: 70d23214fbaacad90eaf0c0764405bb455e2ae85
Parents: f3ec230
Author: Mike Drob 
Authored: Wed Apr 11 21:25:04 2018 -0500
Committer: Mike Drob 
Committed: Thu Apr 12 13:31:54 2018 -0500

--
 dev-support/hbase-personality.sh   |  2 +-
 hbase-protocol-shaded/pom.xml  |  5 -
 hbase-protocol/pom.xml |  5 -
 src/main/asciidoc/_chapters/developer.adoc | 28 -
 4 files changed, 32 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/70d23214/dev-support/hbase-personality.sh
--
diff --git a/dev-support/hbase-personality.sh b/dev-support/hbase-personality.sh
index 90786f2..3507a1d 100755
--- a/dev-support/hbase-personality.sh
+++ b/dev-support/hbase-personality.sh
@@ -554,7 +554,7 @@ function hbaseprotoc_rebuild
   # Need to run 'install' instead of 'compile' because shading plugin
   # is hooked-up to 'install'; else hbase-protocol-shaded is left with
   # half of its process done.
-  modules_workers patch hbaseprotoc install -DskipTests -Pcompile-protobuf -X 
-DHBasePatchProcess
+  modules_workers patch hbaseprotoc install -DskipTests -X -DHBasePatchProcess
 
   # shellcheck disable=SC2153
   until [[ $i -eq "${#MODULE[@]}" ]]; do

http://git-wip-us.apache.org/repos/asf/hbase/blob/70d23214/hbase-protocol-shaded/pom.xml
--
diff --git a/hbase-protocol-shaded/pom.xml b/hbase-protocol-shaded/pom.xml
index 021e424..25443e1 100644
--- a/hbase-protocol-shaded/pom.xml
+++ b/hbase-protocol-shaded/pom.xml
@@ -106,7 +106,7 @@
 1.5.3
 
   
-generate-sources
+process-sources
 
   replace
 
@@ -117,6 +117,9 @@
 
 **/*.java
 
+
+true
 
   
 ([^\.])com.google.protobuf

http://git-wip-us.apache.org/repos/asf/hbase/blob/70d23214/hbase-protocol/pom.xml
--
diff --git a/hbase-protocol/pom.xml b/hbase-protocol/pom.xml
index e6d546e..bfe2588 100644
--- a/hbase-protocol/pom.xml
+++ b/hbase-protocol/pom.xml
@@ -81,7 +81,7 @@
 1.5.3
 
   
-generate-sources
+process-sources
 
   replace
 
@@ -92,6 +92,9 @@
 
 **/*.java
 
+
+true
 
   
 (public)(\W+static)?(\W+final)?(\W+class)

http://git-wip-us.apache.org/repos/asf/hbase/blob/70d23214/src/main/asciidoc/_chapters/developer.adoc
--
diff --git a/src/main/asciidoc/_chapters/developer.adoc 
b/src/main/asciidoc/_chapters/developer.adoc
index 48dc79e..9d9f564 100644
--- a/src/main/asciidoc/_chapters/developer.adoc
+++ b/src/main/asciidoc/_chapters/developer.adoc
@@ -415,22 +415,40 @@ use so we can freely change versions without upsetting 
any downstream project us
 
 The protobuf files are located in _hbase-protocol/src/main/protobuf_.
 For the change to be effective, you will need to regenerate the classes.
-You can use maven profile `compile-protobuf` to do this.
 
 [source,bourne]
 
-mvn compile -Pcompile-protobuf
+mvn package -pl hbase-protocol -am
 
 
-You may also want to define `protoc.path` for the protoc binary, using the 
following command:
+Similarly, protobuf definitions for internal use are located in the 
_hbase-protocol-shaded_ module.
 
 [source,bourne]
 
+mvn package -pl hbase-protocol-shaded -am
+
+
+Typically, protobuf code generation is done using the native `protoc` binary. 
In our build we use a maven plugin for
+convenience; however, the plugin may not be able to retrieve appropriate 
binaries for all platforms. If you find yourself
+on a platform where protoc fails, you will have to compile protoc from source, 
and run it independent of our maven build.
+You can disable the inline code generation by specifying `-Dprotoc.skip` in 
your maven arguments, allowing your build to proceed further.
 
-mvn compile -Pcompile-protobuf -Dprotoc.path=/opt/local/bin/protoc
+A similar failure relates to the stock 

hbase git commit: HBASE-20394 HBase over rides the value of HBASE_OPTS (if any) set by client

2018-04-12 Thread elserj
Repository: hbase
Updated Branches:
  refs/heads/branch-2 e51ced4f1 -> 0c751dadf


HBASE-20394 HBase over rides the value of HBASE_OPTS (if any) set by client

Signed-off-by: Josh Elser 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0c751dad
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0c751dad
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0c751dad

Branch: refs/heads/branch-2
Commit: 0c751dadf83df6f4b0f1815d78c26e13a708d659
Parents: e51ced4
Author: Nihal Jain 
Authored: Thu Apr 12 12:38:45 2018 +0530
Committer: Josh Elser 
Committed: Thu Apr 12 14:47:32 2018 -0400

--
 conf/hbase-env.sh | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/0c751dad/conf/hbase-env.sh
--
diff --git a/conf/hbase-env.sh b/conf/hbase-env.sh
index d9879c6..1ac93cc 100644
--- a/conf/hbase-env.sh
+++ b/conf/hbase-env.sh
@@ -41,7 +41,7 @@
 # Below are what we set by default.  May only work with SUN JVM.
 # For more on why as well as other possible settings,
 # see http://hbase.apache.org/book.html#performance
-export HBASE_OPTS="-XX:+UseConcMarkSweepGC"
+export HBASE_OPTS="$HBASE_OPTS -XX:+UseConcMarkSweepGC"
 
 # Uncomment one of the below three options to enable java garbage collection 
logging for the server-side processes.
 



[2/2] hbase git commit: HBASE-20335 only comment on the troubleshooting jira.

2018-04-12 Thread busbey
HBASE-20335 only comment on the troubleshooting jira.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0aab6035
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0aab6035
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0aab6035

Branch: refs/heads/HBASE-20335
Commit: 0aab60352a973387038810fbcd032906bdca589e
Parents: f3ec230
Author: Sean Busbey 
Authored: Wed Apr 11 10:39:51 2018 -0500
Committer: Sean Busbey 
Committed: Thu Apr 12 12:19:41 2018 -0500

--
 dev-support/Jenkinsfile | 7 ---
 1 file changed, 4 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/0aab6035/dev-support/Jenkinsfile
--
diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile
index 3f3066b..4f34eab 100644
--- a/dev-support/Jenkinsfile
+++ b/dev-support/Jenkinsfile
@@ -498,9 +498,10 @@ curl -L  -o personality.sh "${env.PROJECT_PERSONALITY}"
echo comment
echo ""
echo "[INFO] There are ${currentBuild.changeSets.size()} change 
sets."
-   getJirasToComment(currentBuild).each { currentIssue ->
- jiraComment issueKey: currentIssue, body: comment
-   }
+   //getJirasToComment(currentBuild).each { currentIssue ->
+   //  jiraComment issueKey: currentIssue, body: comment
+   //}
+   jiraComment issueKey: 'HBASE-20335', body: comment
 } catch (Exception exception) {
   echo "Got exception: ${exception}"
   echo "${exception.getStackTrace()}"



[1/2] hbase git commit: HBASE-20335 ensure each stage of the nightly job gathers machine information.

2018-04-12 Thread busbey
Repository: hbase
Updated Branches:
  refs/heads/HBASE-20335 [created] 215809392
  refs/heads/HBASE-20355 [deleted] 4d9250684


HBASE-20335 ensure each stage of the nightly job gathers machine information.

* fix archiving for src tarball stage's machine info
* stop nightly wrapper desroying the output dir.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/21580939
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/21580939
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/21580939

Branch: refs/heads/HBASE-20335
Commit: 215809392eef9e0310b8ceaef9de49ddd7718492
Parents: 0aab603
Author: Sean Busbey 
Authored: Wed Apr 11 10:38:12 2018 -0500
Committer: Sean Busbey 
Committed: Thu Apr 12 12:19:41 2018 -0500

--
 dev-support/Jenkinsfile| 11 +++
 dev-support/hbase_nightly_yetus.sh |  7 +--
 2 files changed, 16 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/21580939/dev-support/Jenkinsfile
--
diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile
index 4f34eab..f05523a 100644
--- a/dev-support/Jenkinsfile
+++ b/dev-support/Jenkinsfile
@@ -150,6 +150,8 @@ curl -L  -o personality.sh "${env.PROJECT_PERSONALITY}"
   rm -rf "${OUTPUT_DIR}" && mkdir "${OUTPUT_DIR}"
   rm -rf "${OUTPUT_DIR}/machine" && mkdir "${OUTPUT_DIR}/machine"
   "${BASEDIR}/dev-support/gather_machine_environment.sh" 
"${OUTPUT_DIR_RELATIVE}/machine"
+  echo "got the following saved stats in 
'${OUTPUT_DIR_RELATIVE}/machine'"
+  ls -lh "${OUTPUT_DIR_RELATIVE}/machine"
 '''
 // TODO roll this into the hbase_nightly_yetus script
 sh '''#!/usr/bin/env bash
@@ -210,6 +212,8 @@ curl -L  -o personality.sh "${env.PROJECT_PERSONALITY}"
   rm -rf "${OUTPUT_DIR}" && mkdir "${OUTPUT_DIR}"
   rm -rf "${OUTPUT_DIR}/machine" && mkdir "${OUTPUT_DIR}/machine"
   "${BASEDIR}/dev-support/gather_machine_environment.sh" 
"${OUTPUT_DIR_RELATIVE}/machine"
+  echo "got the following saved stats in 
'${OUTPUT_DIR_RELATIVE}/machine'"
+  ls -lh "${OUTPUT_DIR_RELATIVE}/machine"
 '''
 sh '''#!/usr/bin/env bash
   set -e
@@ -283,6 +287,8 @@ curl -L  -o personality.sh "${env.PROJECT_PERSONALITY}"
   rm -rf "${OUTPUT_DIR}" && mkdir "${OUTPUT_DIR}"
   rm -rf "${OUTPUT_DIR}/machine" && mkdir "${OUTPUT_DIR}/machine"
   "${BASEDIR}/dev-support/gather_machine_environment.sh" 
"${OUTPUT_DIR_RELATIVE}/machine"
+  echo "got the following saved stats in 
'${OUTPUT_DIR_RELATIVE}/machine'"
+  ls -lh "${OUTPUT_DIR_RELATIVE}/machine"
 '''
 sh '''#!/usr/bin/env bash
   set -e
@@ -363,6 +369,8 @@ curl -L  -o personality.sh "${env.PROJECT_PERSONALITY}"
   rm -rf "${OUTPUT_DIR}" && mkdir "${OUTPUT_DIR}"
   rm -rf "${OUTPUT_DIR}/machine" && mkdir "${OUTPUT_DIR}/machine"
   "${BASEDIR}/dev-support/gather_machine_environment.sh" 
"${OUTPUT_DIR_RELATIVE}/machine"
+  echo "got the following saved stats in 
'${OUTPUT_DIR_RELATIVE}/machine'"
+  ls -lh "${OUTPUT_DIR_RELATIVE}/machine"
 '''
 sh '''#!/usr/bin/env bash
   set -e
@@ -438,6 +446,8 @@ curl -L  -o personality.sh "${env.PROJECT_PERSONALITY}"
   set -e
   rm -rf "output-srctarball/machine" && mkdir 
"output-srctarball/machine"
   "${BASEDIR}/dev-support/gather_machine_environment.sh" 
"output-srctarball/machine"
+  echo "got the following saved stats in 
'output-srctarball/machine'"
+  ls -lh "output-srctarball/machine"
 '''
 sh """#!/bin/bash -e
   if "${env.BASEDIR}/dev-support/hbase_nightly_source-artifact.sh" 
\
@@ -457,6 +467,7 @@ curl -L  -o personality.sh "${env.PROJECT_PERSONALITY}"
 always {
   stash name: 'srctarball-result', includes: 
"output-srctarball/commentfile"
   archive 'output-srctarball/*'
+  archive 'output-srctarball/**/*'
 }
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/21580939/dev-support/hbase_nightly_yetus.sh
--
diff --git a/dev-support/hbase_nightly_yetus.sh 
b/dev-support/hbase_nightly_yetus.sh
index 4e0200d..bba5f4d 100755
--- a/dev-support/hbase_nightly_yetus.sh
+++ b/dev-support/hbase_nightly_yetus.sh
@@ -91,8 +91,11 @@ if [[ true == "${DEBUG}" ]]; then
   YETUS_ARGS=("--debug" "${YETUS_ARGS[@]}")
 fi
 
-rm -rf "${OUTPUT_DIR}"
-mkdir -p "${OUTPUT_DIR}"
+if [[ 

hbase-site git commit: Add some contact-info for the registration

2018-04-12 Thread elserj
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site f5cbe4dd1 -> 93f2e3cc2


Add some contact-info for the registration


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/93f2e3cc
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/93f2e3cc
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/93f2e3cc

Branch: refs/heads/asf-site
Commit: 93f2e3cc263a91f457c327c892ae55aa5200ef8d
Parents: f5cbe4d
Author: Josh Elser 
Authored: Thu Apr 12 13:05:42 2018 -0400
Committer: Josh Elser 
Committed: Thu Apr 12 13:05:42 2018 -0400

--
 hbasecon-2018/feed.xml   |  2 +-
 hbasecon-2018/index.html | 15 +--
 2 files changed, 14 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/93f2e3cc/hbasecon-2018/feed.xml
--
diff --git a/hbasecon-2018/feed.xml b/hbasecon-2018/feed.xml
index 605fd42..90c240c 100644
--- a/hbasecon-2018/feed.xml
+++ b/hbasecon-2018/feed.xml
@@ -4,7 +4,7 @@
   https://jekyllrb.com/; version="3.7.3">Jekyll
   https://hbase.apache.org/hbasecon-2018//hbasecon-2018/feed.xml; 
rel="self" type="application/atom+xml" />
   https://hbase.apache.org/hbasecon-2018//hbasecon-2018/; 
rel="alternate" type="text/html" />
-  2018-04-10T18:02:48-04:00
+  2018-04-12T13:04:46-04:00
   https://hbase.apache.org/hbasecon-2018//hbasecon-2018/
 
   

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/93f2e3cc/hbasecon-2018/index.html
--
diff --git a/hbasecon-2018/index.html b/hbasecon-2018/index.html
index 834011f..289ad28 100644
--- a/hbasecon-2018/index.html
+++ b/hbasecon-2018/index.html
@@ -161,8 +161,19 @@



- Registration for HBaseCon 2018 is now open! Please use the 
following page to register: https://hbasecon2018.hortonworksevents.com/;>Register Here
- Registration for HBaseCon includes registration to PhoenixCon 
which is running at concurrently at the same venue.
+ Registration for HBaseCon 2018 is now open! Please use the 
following page to register: https://hbasecon2018.hortonworksevents.com/;>Register Here.
+   Registration for HBaseCon includes registration to PhoenixCon 
which is running at concurrently at the same venue.
+   
+   
+   
+   
+ For any issues around registration, please use the following 
contact information, Monday-Friday, 8:00am - 5:00pm PST:
+   
+ Telephone: (800) 380-3544 toll free
+ Telephone: (415) 446-7709 international
+ Email: mailto:hbasecon2...@hortonworksevents.com;>hbasecon2...@hortonworksevents.com
+   
+ 






hbase git commit: HBASE-20394 HBase over rides the value of HBASE_OPTS (if any) set by client

2018-04-12 Thread elserj
Repository: hbase
Updated Branches:
  refs/heads/master 2912c9535 -> f3ec23093


HBASE-20394 HBase over rides the value of HBASE_OPTS (if any) set by client

Signed-off-by: Josh Elser 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f3ec2309
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f3ec2309
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f3ec2309

Branch: refs/heads/master
Commit: f3ec2309365bc9be32e7200a52485a73e24b4059
Parents: 2912c95
Author: Nihal Jain 
Authored: Thu Apr 12 12:38:45 2018 +0530
Committer: Josh Elser 
Committed: Thu Apr 12 11:43:56 2018 -0400

--
 conf/hbase-env.sh | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/f3ec2309/conf/hbase-env.sh
--
diff --git a/conf/hbase-env.sh b/conf/hbase-env.sh
index d9879c6..1ac93cc 100644
--- a/conf/hbase-env.sh
+++ b/conf/hbase-env.sh
@@ -41,7 +41,7 @@
 # Below are what we set by default.  May only work with SUN JVM.
 # For more on why as well as other possible settings,
 # see http://hbase.apache.org/book.html#performance
-export HBASE_OPTS="-XX:+UseConcMarkSweepGC"
+export HBASE_OPTS="$HBASE_OPTS -XX:+UseConcMarkSweepGC"
 
 # Uncomment one of the below three options to enable java garbage collection 
logging for the server-side processes.
 



[30/51] [partial] hbase-site git commit: Published site at 2912c953551bedbfbf30c32c156ed7bb187d54c3.

2018-04-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer.html
index e63cd50..d8c0d2b 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer.html
@@ -422,7 +422,7 @@
 414  }
 415
 416  /**
-417   * {@link #listTables(boolean)}
+417   * {@link 
#listTableDescriptors(boolean)}
 418   */
 419  @Override
 420  public 
CompletableFutureListTableDescriptor 
listTableDescriptors(Pattern pattern,
@@ -3476,16 +3476,79 @@
 3468return future;
 3469  }
 3470
-3471  private 
CompletableFutureCacheEvictionStats clearBlockCache(ServerName 
serverName,
-3472  ListRegionInfo hris) {
-3473return 
this.CacheEvictionStats newAdminCaller().action((controller, stub) 
- this
-3474  .ClearRegionBlockCacheRequest, 
ClearRegionBlockCacheResponse, CacheEvictionStats adminCall(
-3475controller, stub, 
RequestConverter.buildClearRegionBlockCacheRequest(hris),
-3476(s, c, req, done) - 
s.clearRegionBlockCache(controller, req, done),
-3477resp - 
ProtobufUtil.toCacheEvictionStats(resp.getStats(
-3478  .serverName(serverName).call();
-3479  }
-3480}
+3471  @Override
+3472  public CompletableFutureVoid 
cloneTableSchema(TableName tableName, TableName newTableName,
+3473  boolean preserveSplits) {
+3474CompletableFutureVoid future 
= new CompletableFuture();
+3475
tableExists(tableName).whenComplete(
+3476  (exist, err) - {
+3477if (err != null) {
+3478  
future.completeExceptionally(err);
+3479  return;
+3480}
+3481if (!exist) {
+3482  
future.completeExceptionally(new TableNotFoundException(tableName));
+3483  return;
+3484}
+3485
tableExists(newTableName).whenComplete(
+3486  (exist1, err1) - {
+3487if (err1 != null) {
+3488  
future.completeExceptionally(err1);
+3489  return;
+3490}
+3491if (exist1) {
+3492  
future.completeExceptionally(new TableExistsException(newTableName));
+3493  return;
+3494}
+3495
getDescriptor(tableName).whenComplete(
+3496  (tableDesc, err2) - 
{
+3497if (err2 != null) {
+3498  
future.completeExceptionally(err2);
+3499  return;
+3500}
+3501TableDescriptor 
newTableDesc
+3502= 
TableDescriptorBuilder.copy(newTableName, tableDesc);
+3503if (preserveSplits) {
+3504  
getTableSplits(tableName).whenComplete((splits, err3) - {
+3505if (err3 != null) 
{
+3506  
future.completeExceptionally(err3);
+3507} else {
+3508  
createTable(newTableDesc, splits).whenComplete(
+3509(result, err4) 
- {
+3510  if (err4 != 
null) {
+3511
future.completeExceptionally(err4);
+3512  } else {
+3513
future.complete(result);
+3514  }
+3515});
+3516}
+3517  });
+3518} else {
+3519  
createTable(newTableDesc).whenComplete(
+3520(result, err5) - 
{
+3521  if (err5 != null) 
{
+3522
future.completeExceptionally(err5);
+3523  } else {
+3524
future.complete(result);
+3525  }
+3526});
+3527}
+3528  });
+3529  });
+3530  });
+3531return future;
+3532  }
+3533
+3534  private 
CompletableFutureCacheEvictionStats clearBlockCache(ServerName 
serverName,
+3535  ListRegionInfo hris) {
+3536return 
this.CacheEvictionStats newAdminCaller().action((controller, stub) 
- this
+3537  .ClearRegionBlockCacheRequest, 
ClearRegionBlockCacheResponse, CacheEvictionStats adminCall(
+3538controller, stub, 
RequestConverter.buildClearRegionBlockCacheRequest(hris),
+3539(s, c, req, done) - 
s.clearRegionBlockCache(controller, req, done),
+3540resp - 
ProtobufUtil.toCacheEvictionStats(resp.getStats(
+3541  .serverName(serverName).call();
+3542  }
+3543}
 
 
 


[50/51] [partial] hbase-site git commit: Published site at 2912c953551bedbfbf30c32c156ed7bb187d54c3.

2018-04-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/apache_hbase_reference_guide.pdf
--
diff --git a/apache_hbase_reference_guide.pdf b/apache_hbase_reference_guide.pdf
index e532323..7563962 100644
--- a/apache_hbase_reference_guide.pdf
+++ b/apache_hbase_reference_guide.pdf
@@ -5,16 +5,16 @@
 /Author (Apache HBase Team)
 /Creator (Asciidoctor PDF 1.5.0.alpha.15, based on Prawn 2.2.2)
 /Producer (Apache HBase Team)
-/ModDate (D:20180411144616+00'00')
-/CreationDate (D:20180411144616+00'00')
+/ModDate (D:20180412144701+00'00')
+/CreationDate (D:20180412144701+00'00')
 >>
 endobj
 2 0 obj
 << /Type /Catalog
 /Pages 3 0 R
 /Names 26 0 R
-/Outlines 4595 0 R
-/PageLabels 4821 0 R
+/Outlines 4610 0 R
+/PageLabels 4836 0 R
 /PageMode /UseOutlines
 /OpenAction [7 0 R /FitH 842.89]
 /ViewerPreferences << /DisplayDocTitle true
@@ -23,8 +23,8 @@ endobj
 endobj
 3 0 obj
 << /Type /Pages
-/Count 718
-/Kids [7 0 R 12 0 R 14 0 R 16 0 R 18 0 R 20 0 R 22 0 R 24 0 R 44 0 R 47 0 R 50 
0 R 54 0 R 61 0 R 63 0 R 67 0 R 69 0 R 71 0 R 78 0 R 81 0 R 83 0 R 89 0 R 92 0 
R 94 0 R 96 0 R 103 0 R 109 0 R 114 0 R 116 0 R 132 0 R 137 0 R 144 0 R 153 0 R 
161 0 R 170 0 R 181 0 R 185 0 R 187 0 R 191 0 R 200 0 R 209 0 R 217 0 R 226 0 R 
231 0 R 240 0 R 248 0 R 257 0 R 270 0 R 277 0 R 287 0 R 295 0 R 303 0 R 310 0 R 
318 0 R 324 0 R 330 0 R 337 0 R 345 0 R 356 0 R 365 0 R 377 0 R 385 0 R 393 0 R 
400 0 R 409 0 R 417 0 R 427 0 R 435 0 R 442 0 R 451 0 R 463 0 R 472 0 R 479 0 R 
487 0 R 495 0 R 504 0 R 511 0 R 516 0 R 520 0 R 525 0 R 529 0 R 545 0 R 556 0 R 
560 0 R 575 0 R 580 0 R 585 0 R 587 0 R 589 0 R 592 0 R 594 0 R 596 0 R 604 0 R 
610 0 R 615 0 R 620 0 R 627 0 R 637 0 R 645 0 R 649 0 R 653 0 R 655 0 R 665 0 R 
679 0 R 687 0 R 694 0 R 706 0 R 714 0 R 730 0 R 744 0 R 750 0 R 756 0 R 759 0 R 
763 0 R 767 0 R 770 0 R 773 0 R 775 0 R 778 0 R 782 0 R 784 0 R 788 0 R 794 0 R 
799 0 R 803 0 R 806 0 R 812 0 R 814 0 R
  818 0 R 826 0 R 828 0 R 831 0 R 834 0 R 837 0 R 840 0 R 854 0 R 862 0 R 872 0 
R 883 0 R 889 0 R 899 0 R 910 0 R 913 0 R 917 0 R 920 0 R 925 0 R 934 0 R 942 0 
R 946 0 R 950 0 R 955 0 R 959 0 R 961 0 R 976 0 R 987 0 R 992 0 R 999 0 R 1002 
0 R 1010 0 R 1018 0 R 1023 0 R 1028 0 R 1033 0 R 1035 0 R 1037 0 R 1039 0 R 
1049 0 R 1057 0 R 1061 0 R 1068 0 R 1075 0 R 1083 0 R 1087 0 R 1093 0 R 1098 0 
R 1106 0 R 1110 0 R 1115 0 R 1117 0 R 1123 0 R 1131 0 R 1138 0 R 1145 0 R 1156 
0 R 1160 0 R 1162 0 R 1164 0 R 1168 0 R 1171 0 R 1176 0 R 1179 0 R 1191 0 R 
1195 0 R 1201 0 R 1209 0 R 1214 0 R 1218 0 R 1222 0 R 1224 0 R 1227 0 R 1230 0 
R 1233 0 R 1237 0 R 1241 0 R 1245 0 R 1250 0 R 1254 0 R 1257 0 R 1259 0 R 1269 
0 R 1272 0 R 1280 0 R 1289 0 R 1295 0 R 1299 0 R 1301 0 R 1312 0 R 1315 0 R 
1321 0 R 1329 0 R 1332 0 R 1339 0 R 1347 0 R 1349 0 R 1351 0 R 1360 0 R 1362 0 
R 1364 0 R 1367 0 R 1369 0 R 1371 0 R 1373 0 R 1375 0 R 1378 0 R 1382 0 R 1387 
0 R 1389 0 R 1391 0 R 1393 0 R 1398 0 R 1405 0 R 1411 0 R
  1414 0 R 1416 0 R 1419 0 R 1423 0 R 1427 0 R 1430 0 R 1432 0 R 1434 0 R 1437 
0 R 1442 0 R 1448 0 R 1456 0 R 1470 0 R 1484 0 R 1487 0 R 1492 0 R 1505 0 R 
1510 0 R 1525 0 R 1533 0 R 1537 0 R 1546 0 R 1561 0 R 1575 0 R 1587 0 R 1592 0 
R 1598 0 R 1608 0 R 1613 0 R 1618 0 R 1626 0 R 1629 0 R 1638 0 R 1644 0 R 1649 
0 R 1661 0 R 1666 0 R 1672 0 R 1674 0 R 1680 0 R 1688 0 R 1696 0 R 1700 0 R 
1702 0 R 1704 0 R 1716 0 R 1722 0 R 1731 0 R 1737 0 R 1750 0 R 1756 0 R 1762 0 
R 1773 0 R 1779 0 R 1784 0 R 1789 0 R 1792 0 R 1795 0 R 1800 0 R 1805 0 R 1812 
0 R 1816 0 R 1821 0 R 1830 0 R 1835 0 R 1840 0 R 1842 0 R 1851 0 R 1858 0 R 
1864 0 R 1869 0 R 1873 0 R 1876 0 R 1881 0 R 1886 0 R 1892 0 R 1894 0 R 1896 0 
R 1899 0 R 1910 0 R 1913 0 R 1920 0 R 1928 0 R 1933 0 R 1937 0 R 1942 0 R 1944 
0 R 1947 0 R 1952 0 R 1955 0 R 1957 0 R 1960 0 R 1963 0 R 1966 0 R 1976 0 R 
1981 0 R 1986 0 R 1988 0 R 1996 0 R 2003 0 R 2010 0 R 2016 0 R 2021 0 R 2023 0 
R 2032 0 R 2042 0 R 2052 0 R 2058 0 R 2065 0 R 2067 0 R 2072 0
  R 2074 0 R 2076 0 R 2080 0 R 2083 0 R 2086 0 R 2091 0 R 2095 0 R 2106 0 R 
2109 0 R 2114 0 R 2117 0 R 2119 0 R 2124 0 R 2134 0 R 2136 0 R 2138 0 R 2140 0 
R 2142 0 R 2145 0 R 2147 0 R 2149 0 R 2152 0 R 2154 0 R 2156 0 R 2161 0 R 2166 
0 R 2175 0 R 2177 0 R 2179 0 R 2185 0 R 2187 0 R 2192 0 R 2194 0 R 2196 0 R 
2203 0 R 2208 0 R 2212 0 R 2217 0 R 2221 0 R 2223 0 R 2225 0 R 2229 0 R 2232 0 
R 2234 0 R 2236 0 R 2240 0 R 2242 0 R 2245 0 R 2247 0 R 2249 0 R 2251 0 R 2258 
0 R 2261 0 R 2266 0 R 2268 0 R 2270 0 R 2272 0 R 2274 0 R 2282 0 R 2293 0 R 
2307 0 R 2318 0 R 2322 0 R 2327 0 R 2331 0 R 2334 0 R 2339 0 R 2345 0 R 2347 0 
R 2350 0 R 2352 0 R 2354 0 R 2356 0 R 2361 0 R 2363 0 R 2376 0 R 2379 0 R 2387 
0 R 2393 0 R 2405 0 R 2419 0 R 2432 0 R 2449 0 R 2453 0 R 2455 0 R 2459 0 R 
2477 0 R 2483 0 R 2495 0 R 2499 0 R 2503 0 R 2512 0 R 2524 0 R 2529 0 R 2539 0 
R 2552 0 R 2571 0 R 2580 0 R 2583 0 R 2592 0 R 2609 0 R 2616 0 R 2619 0 R 2624 
0 R 2628 0 R 2631 0 R 2640 0 R 2648 

[38/51] [partial] hbase-site git commit: Published site at 2912c953551bedbfbf30c32c156ed7bb187d54c3.

2018-04-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.html 
b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.html
index 4e06fa8..37f4bd5 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i
 
109":10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10,"i135":10,"i136":10,"i137":10,"i138":10,"i139":10,"i140":10,"i141":10,"i142":10,"i143":10,"i144":10,"i145":10,"i146":10,"i147":10,"i148":10,"i149":10,"i150":10,"i151":10,"i152":10,"i153":10,"i154":10,"i155":10,"i156":10,"i157":10,"i158":10,"i159":10,"i160":10,"i161":10,"i162":10,"i163":10,"i164":10,"i165":10,"i166":10,"i167":10,"i168":10,"i169":10,"i170":10,"i171":10,"i172":10,"i173":10,"i174":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i
 
109":10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10,"i135":10,"i136":10,"i137":10,"i138":10,"i139":10,"i140":10,"i141":10,"i142":10,"i143":10,"i144":10,"i145":10,"i146":10,"i147":10,"i148":10,"i149":10,"i150":10,"i151":10,"i152":10,"i153":10,"i154":10,"i155":10,"i156":10,"i157":10,"i158":10,"i159":10,"i160":10,"i161":10,"i162":10,"i163":10,"i164":10,"i165":10,"i166":10,"i167":10,"i168":10,"i169":10,"i170":10,"i171":10,"i172":10,"i173":10,"i174":10,"i175":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -459,6 +459,14 @@ implements 
+https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
+cloneTableSchema(TableNametableName,
+TableNamenewTableName,
+booleanpreserveSplits)
+Create a new table by cloning the existent table 
schema.
+
+
+
 private https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
 compact(ServerNamesn,
RegionInfohri,
@@ -467,7 +475,7 @@ implements Compact the region at specific 

[42/51] [partial] hbase-site git commit: Published site at 2912c953551bedbfbf30c32c156ed7bb187d54c3.

2018-04-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/devapidocs/org/apache/hadoop/hbase/client/Admin.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/Admin.html 
b/devapidocs/org/apache/hadoop/hbase/client/Admin.html
index f428b7c..cd7e1f4 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/Admin.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/Admin.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":6,"i1":6,"i2":6,"i3":50,"i4":6,"i5":6,"i6":18,"i7":6,"i8":18,"i9":6,"i10":6,"i11":6,"i12":6,"i13":6,"i14":50,"i15":50,"i16":6,"i17":6,"i18":6,"i19":6,"i20":6,"i21":6,"i22":6,"i23":6,"i24":6,"i25":6,"i26":6,"i27":38,"i28":38,"i29":38,"i30":38,"i31":6,"i32":6,"i33":6,"i34":6,"i35":6,"i36":6,"i37":6,"i38":50,"i39":6,"i40":6,"i41":6,"i42":6,"i43":6,"i44":6,"i45":6,"i46":6,"i47":6,"i48":38,"i49":6,"i50":6,"i51":6,"i52":6,"i53":6,"i54":6,"i55":6,"i56":38,"i57":6,"i58":6,"i59":38,"i60":38,"i61":6,"i62":38,"i63":6,"i64":6,"i65":6,"i66":6,"i67":6,"i68":38,"i69":38,"i70":50,"i71":6,"i72":6,"i73":6,"i74":6,"i75":6,"i76":38,"i77":38,"i78":6,"i79":50,"i80":6,"i81":6,"i82":6,"i83":6,"i84":38,"i85":38,"i86":18,"i87":18,"i88":6,"i89":50,"i90":6,"i91":6,"i92":6,"i93":6,"i94":6,"i95":6,"i96":6,"i97":6,"i98":6,"i99":18,"i100":18,"i101":50,"i102":18,"i103":6,"i104":38,"i105":6,"i106":6,"i107":6,"i108":38,"i109":18,"i110":6,"i111":6,"i112":6,"i113":18,"i114":6,"i115":6,"i116":38,"i11
 
7":38,"i118":38,"i119":38,"i120":6,"i121":6,"i122":6,"i123":6,"i124":6,"i125":6,"i126":6,"i127":6,"i128":6,"i129":6,"i130":50,"i131":6,"i132":38,"i133":6,"i134":6,"i135":18,"i136":6,"i137":6,"i138":6,"i139":6,"i140":6,"i141":6,"i142":6,"i143":38,"i144":6,"i145":6,"i146":6,"i147":6,"i148":6,"i149":38,"i150":6,"i151":6,"i152":6,"i153":38,"i154":38,"i155":6,"i156":38,"i157":38,"i158":38,"i159":38,"i160":38,"i161":6,"i162":38,"i163":6,"i164":6,"i165":6,"i166":6,"i167":6,"i168":6,"i169":6,"i170":38,"i171":6,"i172":6,"i173":6,"i174":50,"i175":6,"i176":6,"i177":6,"i178":6,"i179":6,"i180":38,"i181":6,"i182":38,"i183":6,"i184":6,"i185":6,"i186":6,"i187":6,"i188":6,"i189":6,"i190":6,"i191":6,"i192":6,"i193":6,"i194":6,"i195":6,"i196":6,"i197":6,"i198":6,"i199":50,"i200":6,"i201":50,"i202":50,"i203":50,"i204":6,"i205":50,"i206":6,"i207":6,"i208":6,"i209":6,"i210":6,"i211":6,"i212":6,"i213":6,"i214":38,"i215":38,"i216":6,"i217":6,"i218":6,"i219":6,"i220":6,"i221":50,"i222":6,"i223":6,"i224":6,"
 i225":6,"i226":6,"i227":6,"i228":6};
+var methods = 
{"i0":6,"i1":6,"i2":6,"i3":50,"i4":6,"i5":6,"i6":18,"i7":6,"i8":18,"i9":6,"i10":6,"i11":6,"i12":6,"i13":6,"i14":50,"i15":50,"i16":6,"i17":6,"i18":6,"i19":6,"i20":6,"i21":6,"i22":6,"i23":6,"i24":6,"i25":6,"i26":6,"i27":6,"i28":38,"i29":38,"i30":38,"i31":38,"i32":6,"i33":6,"i34":6,"i35":6,"i36":6,"i37":6,"i38":6,"i39":50,"i40":6,"i41":6,"i42":6,"i43":6,"i44":6,"i45":6,"i46":6,"i47":6,"i48":6,"i49":38,"i50":6,"i51":6,"i52":6,"i53":6,"i54":6,"i55":6,"i56":6,"i57":38,"i58":6,"i59":6,"i60":38,"i61":38,"i62":6,"i63":38,"i64":6,"i65":6,"i66":6,"i67":6,"i68":6,"i69":38,"i70":38,"i71":50,"i72":6,"i73":6,"i74":6,"i75":6,"i76":6,"i77":38,"i78":38,"i79":6,"i80":50,"i81":6,"i82":6,"i83":6,"i84":6,"i85":38,"i86":38,"i87":18,"i88":18,"i89":6,"i90":50,"i91":6,"i92":6,"i93":6,"i94":6,"i95":6,"i96":6,"i97":6,"i98":6,"i99":6,"i100":18,"i101":18,"i102":50,"i103":18,"i104":6,"i105":38,"i106":6,"i107":6,"i108":6,"i109":38,"i110":18,"i111":6,"i112":6,"i113":6,"i114":18,"i115":6,"i116":6,"i117
 
":38,"i118":38,"i119":38,"i120":38,"i121":6,"i122":6,"i123":6,"i124":6,"i125":6,"i126":6,"i127":6,"i128":6,"i129":6,"i130":6,"i131":50,"i132":6,"i133":38,"i134":6,"i135":6,"i136":18,"i137":6,"i138":6,"i139":6,"i140":6,"i141":6,"i142":6,"i143":6,"i144":38,"i145":6,"i146":6,"i147":6,"i148":6,"i149":6,"i150":38,"i151":6,"i152":6,"i153":6,"i154":38,"i155":38,"i156":6,"i157":38,"i158":38,"i159":38,"i160":38,"i161":38,"i162":6,"i163":38,"i164":6,"i165":6,"i166":6,"i167":6,"i168":6,"i169":6,"i170":6,"i171":38,"i172":6,"i173":6,"i174":6,"i175":50,"i176":6,"i177":6,"i178":6,"i179":6,"i180":6,"i181":38,"i182":6,"i183":38,"i184":6,"i185":6,"i186":6,"i187":6,"i188":6,"i189":6,"i190":6,"i191":6,"i192":6,"i193":6,"i194":6,"i195":6,"i196":6,"i197":6,"i198":6,"i199":6,"i200":50,"i201":6,"i202":50,"i203":50,"i204":50,"i205":6,"i206":50,"i207":6,"i208":6,"i209":6,"i210":6,"i211":6,"i212":6,"i213":6,"i214":6,"i215":38,"i216":38,"i217":6,"i218":6,"i219":6,"i220":6,"i221":6,"i222":50,"i223":6,"i224":6,"
 i225":6,"i226":6,"i227":6,"i228":6,"i229":6};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],4:["t3","Abstract Methods"],16:["t5","Default 
Methods"],32:["t6","Deprecated Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -331,10 +331,18 @@ extends 
 void
-close()
+cloneTableSchema(TableNametableName,
+TableNamenewTableName,
+booleanpreserveSplits)
+Create a new table by 

[29/51] [partial] hbase-site git commit: Published site at 2912c953551bedbfbf30c32c156ed7bb187d54c3.

2018-04-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.MergeTableRegionProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.MergeTableRegionProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.MergeTableRegionProcedureBiConsumer.html
index e63cd50..d8c0d2b 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.MergeTableRegionProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.MergeTableRegionProcedureBiConsumer.html
@@ -422,7 +422,7 @@
 414  }
 415
 416  /**
-417   * {@link #listTables(boolean)}
+417   * {@link 
#listTableDescriptors(boolean)}
 418   */
 419  @Override
 420  public 
CompletableFutureListTableDescriptor 
listTableDescriptors(Pattern pattern,
@@ -3476,16 +3476,79 @@
 3468return future;
 3469  }
 3470
-3471  private 
CompletableFutureCacheEvictionStats clearBlockCache(ServerName 
serverName,
-3472  ListRegionInfo hris) {
-3473return 
this.CacheEvictionStats newAdminCaller().action((controller, stub) 
- this
-3474  .ClearRegionBlockCacheRequest, 
ClearRegionBlockCacheResponse, CacheEvictionStats adminCall(
-3475controller, stub, 
RequestConverter.buildClearRegionBlockCacheRequest(hris),
-3476(s, c, req, done) - 
s.clearRegionBlockCache(controller, req, done),
-3477resp - 
ProtobufUtil.toCacheEvictionStats(resp.getStats(
-3478  .serverName(serverName).call();
-3479  }
-3480}
+3471  @Override
+3472  public CompletableFutureVoid 
cloneTableSchema(TableName tableName, TableName newTableName,
+3473  boolean preserveSplits) {
+3474CompletableFutureVoid future 
= new CompletableFuture();
+3475
tableExists(tableName).whenComplete(
+3476  (exist, err) - {
+3477if (err != null) {
+3478  
future.completeExceptionally(err);
+3479  return;
+3480}
+3481if (!exist) {
+3482  
future.completeExceptionally(new TableNotFoundException(tableName));
+3483  return;
+3484}
+3485
tableExists(newTableName).whenComplete(
+3486  (exist1, err1) - {
+3487if (err1 != null) {
+3488  
future.completeExceptionally(err1);
+3489  return;
+3490}
+3491if (exist1) {
+3492  
future.completeExceptionally(new TableExistsException(newTableName));
+3493  return;
+3494}
+3495
getDescriptor(tableName).whenComplete(
+3496  (tableDesc, err2) - 
{
+3497if (err2 != null) {
+3498  
future.completeExceptionally(err2);
+3499  return;
+3500}
+3501TableDescriptor 
newTableDesc
+3502= 
TableDescriptorBuilder.copy(newTableName, tableDesc);
+3503if (preserveSplits) {
+3504  
getTableSplits(tableName).whenComplete((splits, err3) - {
+3505if (err3 != null) 
{
+3506  
future.completeExceptionally(err3);
+3507} else {
+3508  
createTable(newTableDesc, splits).whenComplete(
+3509(result, err4) 
- {
+3510  if (err4 != 
null) {
+3511
future.completeExceptionally(err4);
+3512  } else {
+3513
future.complete(result);
+3514  }
+3515});
+3516}
+3517  });
+3518} else {
+3519  
createTable(newTableDesc).whenComplete(
+3520(result, err5) - 
{
+3521  if (err5 != null) 
{
+3522
future.completeExceptionally(err5);
+3523  } else {
+3524
future.complete(result);
+3525  }
+3526});
+3527}
+3528  });
+3529  });
+3530  });
+3531return future;
+3532  }
+3533
+3534  private 
CompletableFutureCacheEvictionStats clearBlockCache(ServerName 
serverName,
+3535  ListRegionInfo hris) {
+3536return 
this.CacheEvictionStats newAdminCaller().action((controller, stub) 
- this
+3537  .ClearRegionBlockCacheRequest, 
ClearRegionBlockCacheResponse, CacheEvictionStats adminCall(
+3538controller, stub, 
RequestConverter.buildClearRegionBlockCacheRequest(hris),
+3539(s, c, req, done) - 
s.clearRegionBlockCache(controller, req, done),
+3540resp - 
ProtobufUtil.toCacheEvictionStats(resp.getStats(
+3541  .serverName(serverName).call();
+3542  }
+3543}
 
 
 


[34/51] [partial] hbase-site git commit: Published site at 2912c953551bedbfbf30c32c156ed7bb187d54c3.

2018-04-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html 
b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html
index a461eb2..621297c 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public static class HBaseFsck.HbckInfo
+public static class HBaseFsck.HbckInfo
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements KeyRange
 Maintain information about a particular region.  It gathers 
information
@@ -305,7 +305,7 @@ implements 
 
 metaEntry
-privateHBaseFsck.MetaEntry metaEntry
+privateHBaseFsck.MetaEntry metaEntry
 
 
 
@@ -314,7 +314,7 @@ implements 
 
 hdfsEntry
-privateHBaseFsck.HdfsEntry hdfsEntry
+privateHBaseFsck.HdfsEntry hdfsEntry
 
 
 
@@ -323,7 +323,7 @@ implements 
 
 deployedEntries
-privatehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListHBaseFsck.OnlineEntry deployedEntries
+privatehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListHBaseFsck.OnlineEntry deployedEntries
 
 
 
@@ -332,7 +332,7 @@ implements 
 
 deployedOn
-privatehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListServerName deployedOn
+privatehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListServerName deployedOn
 
 
 
@@ -341,7 +341,7 @@ implements 
 
 skipChecks
-privateboolean skipChecks
+privateboolean skipChecks
 
 
 
@@ -350,7 +350,7 @@ implements 
 
 isMerged
-privateboolean isMerged
+privateboolean isMerged
 
 
 
@@ -359,7 +359,7 @@ implements 
 
 deployedReplicaId
-privateint deployedReplicaId
+privateint deployedReplicaId
 
 
 
@@ -368,7 +368,7 @@ implements 
 
 primaryHRIForDeployedReplica
-privateRegionInfo primaryHRIForDeployedReplica
+privateRegionInfo primaryHRIForDeployedReplica
 
 
 
@@ -385,7 +385,7 @@ implements 
 
 HbckInfo
-HbckInfo(HBaseFsck.MetaEntrymetaEntry)
+HbckInfo(HBaseFsck.MetaEntrymetaEntry)
 
 
 
@@ -402,7 +402,7 @@ implements 
 
 getReplicaId
-publicintgetReplicaId()
+publicintgetReplicaId()
 
 
 
@@ -411,7 +411,7 @@ implements 
 
 addServer
-publicvoidaddServer(RegionInfohri,
+publicvoidaddServer(RegionInfohri,
   ServerNameserver)
 
 
@@ -421,7 +421,7 @@ implements 
 
 toString
-publichttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringtoString()
+publichttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringtoString()
 
 Overrides:
 https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#toString--;
 title="class or interface in java.lang">toStringin 
classhttps://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
@@ -434,7 +434,7 @@ implements 
 
 getStartKey
-publicbyte[]getStartKey()
+publicbyte[]getStartKey()
 
 Specified by:
 getStartKeyin
 interfaceKeyRange
@@ -447,7 +447,7 @@ implements 
 
 getEndKey
-publicbyte[]getEndKey()
+publicbyte[]getEndKey()
 
 Specified by:
 getEndKeyin
 interfaceKeyRange
@@ -460,7 +460,7 @@ implements 
 
 getTableName
-publicTableNamegetTableName()
+publicTableNamegetTableName()
 
 
 
@@ -469,7 +469,7 @@ implements 
 
 getRegionNameAsString
-publichttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringgetRegionNameAsString()
+publichttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringgetRegionNameAsString()
 
 
 
@@ -478,7 +478,7 @@ implements 
 
 getRegionName
-publicbyte[]getRegionName()
+publicbyte[]getRegionName()
 
 
 
@@ -487,7 +487,7 @@ implements 
 
 getPrimaryHRIForDeployedReplica
-publicRegionInfogetPrimaryHRIForDeployedReplica()
+publicRegionInfogetPrimaryHRIForDeployedReplica()
 
 
 
@@ -496,7 +496,7 @@ implements 
 
 getHdfsRegionDir
-org.apache.hadoop.fs.PathgetHdfsRegionDir()
+org.apache.hadoop.fs.PathgetHdfsRegionDir()
 
 
 
@@ -505,7 +505,7 @@ implements 
 
 containsOnlyHdfsEdits
-booleancontainsOnlyHdfsEdits()
+booleancontainsOnlyHdfsEdits()
 
 
 
@@ -514,7 +514,7 @@ implements 
 
 isHdfsRegioninfoPresent
-booleanisHdfsRegioninfoPresent()
+booleanisHdfsRegioninfoPresent()
 
 
 
@@ -523,7 +523,7 @@ implements 
 
 getModTime
-longgetModTime()

[39/51] [partial] hbase-site git commit: Published site at 2912c953551bedbfbf30c32c156ed7bb187d54c3.

2018-04-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.html 
b/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.html
index 4521542..d57ab81 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":9,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":42,"i28":42,"i29":42,"i30":42,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":42,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":9,"i83":10,"i84":10,"i85":10,"i86":10,"i87":9,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":41,"i98":10,"i99":10,"i100":10,"i101":10,"i102":42,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i109
 
":10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":9,"i118":10,"i119":10,"i120":10,"i121":42,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10,"i135":10,"i136":10,"i137":10,"i138":10,"i139":10,"i140":10,"i141":10,"i142":10,"i143":10,"i144":10,"i145":10,"i146":10,"i147":10,"i148":10,"i149":10,"i150":10,"i151":10,"i152":10,"i153":10,"i154":10,"i155":10,"i156":10,"i157":10,"i158":10,"i159":10,"i160":10,"i161":10,"i162":10,"i163":10,"i164":10,"i165":10,"i166":10,"i167":10,"i168":10,"i169":10,"i170":10,"i171":10,"i172":10,"i173":42,"i174":10,"i175":10,"i176":10,"i177":10,"i178":10,"i179":10,"i180":10,"i181":10,"i182":10,"i183":10,"i184":10,"i185":10,"i186":10,"i187":10,"i188":10,"i189":10,"i190":10,"i191":10,"i192":10,"i193":10,"i194":10,"i195":10,"i196":10,"i197":10,"i198":10,"i199":10,"i200":42,"i201":10,"i202":10,"i203":10,"i204":10,"i205":10,"i206":10,"i207":10,"i208":10,"i2
 
09":10,"i210":10,"i211":10,"i212":10,"i213":10,"i214":10,"i215":10,"i216":10,"i217":10,"i218":10,"i219":10,"i220":10,"i221":10,"i222":10,"i223":10,"i224":10,"i225":10,"i226":10,"i227":10,"i228":10,"i229":10,"i230":10,"i231":10,"i232":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":9,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":42,"i29":42,"i30":42,"i31":42,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":42,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":9,"i84":10,"i85":10,"i86":10,"i87":10,"i88":9,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":41,"i99":10,"i100":10,"i101":10,"i102":10,"i103":42,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i109
 
":10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":9,"i119":10,"i120":10,"i121":10,"i122":42,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10,"i135":10,"i136":10,"i137":10,"i138":10,"i139":10,"i140":10,"i141":10,"i142":10,"i143":10,"i144":10,"i145":10,"i146":10,"i147":10,"i148":10,"i149":10,"i150":10,"i151":10,"i152":10,"i153":10,"i154":10,"i155":10,"i156":10,"i157":10,"i158":10,"i159":10,"i160":10,"i161":10,"i162":10,"i163":10,"i164":10,"i165":10,"i166":10,"i167":10,"i168":10,"i169":10,"i170":10,"i171":10,"i172":10,"i173":10,"i174":42,"i175":10,"i176":10,"i177":10,"i178":10,"i179":10,"i180":10,"i181":10,"i182":10,"i183":10,"i184":10,"i185":10,"i186":10,"i187":10,"i188":10,"i189":10,"i190":10,"i191":10,"i192":10,"i193":10,"i194":10,"i195":10,"i196":10,"i197":10,"i198":10,"i199":10,"i200":10,"i201":42,"i202":10,"i203":10,"i204":10,"i205":10,"i206":10,"i207":10,"i208":10,"i2
 

[51/51] [partial] hbase-site git commit: Published site at 2912c953551bedbfbf30c32c156ed7bb187d54c3.

2018-04-12 Thread git-site-role
Published site at 2912c953551bedbfbf30c32c156ed7bb187d54c3.


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/d220bc5e
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/d220bc5e
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/d220bc5e

Branch: refs/heads/asf-site
Commit: d220bc5e774bf03a5f59b9efba2c1f73a53753f7
Parents: f50447c
Author: jenkins 
Authored: Thu Apr 12 14:48:53 2018 +
Committer: jenkins 
Committed: Thu Apr 12 14:48:53 2018 +

--
 acid-semantics.html | 4 +-
 apache_hbase_reference_guide.pdf| 37773 +
 apidocs/index-all.html  | 8 +
 .../hadoop/hbase/class-use/TableName.html   |16 +
 .../org/apache/hadoop/hbase/client/Admin.html   |   437 +-
 .../apache/hadoop/hbase/client/AsyncAdmin.html  |   288 +-
 .../hadoop/hbase/mapreduce/CellCounter.html | 2 +-
 .../hadoop/hbase/mapreduce/WALPlayer.html   | 4 +-
 .../org/apache/hadoop/hbase/client/Admin.html   |13 +-
 .../apache/hadoop/hbase/client/AsyncAdmin.html  |12 +-
 .../hadoop/hbase/mapreduce/CellCounter.html |81 +-
 .../hadoop/hbase/mapreduce/RowCounter.html  | 6 +-
 .../hadoop/hbase/mapreduce/WALPlayer.html   |73 +-
 book.html   |   165 +-
 bulk-loads.html | 4 +-
 checkstyle-aggregate.html   | 31892 +++---
 checkstyle.rss  |16 +-
 coc.html| 4 +-
 dependencies.html   | 4 +-
 dependency-convergence.html | 4 +-
 dependency-info.html| 4 +-
 dependency-management.html  | 4 +-
 devapidocs/constant-values.html | 6 +-
 devapidocs/index-all.html   |22 +-
 .../hadoop/hbase/RSGroupTableAccessor.html  |28 +-
 .../hadoop/hbase/backup/package-tree.html   | 2 +-
 .../hadoop/hbase/class-use/TableName.html   |   600 +-
 .../org/apache/hadoop/hbase/client/Admin.html   |   437 +-
 .../apache/hadoop/hbase/client/AsyncAdmin.html  |   288 +-
 .../hadoop/hbase/client/AsyncHBaseAdmin.html|   265 +-
 .../apache/hadoop/hbase/client/HBaseAdmin.html  |   448 +-
 .../hadoop/hbase/client/RawAsyncHBaseAdmin.html |   347 +-
 .../hbase/client/class-use/Connection.html  |42 +-
 .../hbase/client/class-use/TableDescriptor.html | 2 +-
 .../hadoop/hbase/client/package-tree.html   |20 +-
 .../hadoop/hbase/executor/package-tree.html | 2 +-
 .../hadoop/hbase/filter/package-tree.html   | 8 +-
 .../hadoop/hbase/io/hfile/package-tree.html | 8 +-
 .../hadoop/hbase/mapreduce/CellCounter.html |17 +-
 .../hadoop/hbase/mapreduce/WALPlayer.html   | 4 +-
 .../hadoop/hbase/mapreduce/package-tree.html| 4 +-
 .../master/assignment/RegionStateStore.html |34 +-
 .../hadoop/hbase/master/package-tree.html   | 4 +-
 .../hbase/master/procedure/package-tree.html| 4 +-
 .../hadoop/hbase/monitoring/package-tree.html   | 2 +-
 .../org/apache/hadoop/hbase/package-tree.html   |16 +-
 .../hadoop/hbase/procedure2/package-tree.html   | 4 +-
 .../store/wal/WALProcedureStore.PushType.html   | 8 +-
 .../procedure2/store/wal/WALProcedureStore.html |92 +-
 .../hadoop/hbase/quotas/package-tree.html   | 6 +-
 .../regionserver/ReversedRegionScannerImpl.html | 4 +-
 .../hadoop/hbase/regionserver/package-tree.html |20 +-
 .../regionserver/querymatcher/package-tree.html | 4 +-
 .../hbase/regionserver/wal/package-tree.html| 2 +-
 .../replication/regionserver/package-tree.html  | 2 +-
 .../hadoop/hbase/rest/model/package-tree.html   | 2 +-
 .../hbase/security/access/package-tree.html | 2 +-
 .../hadoop/hbase/security/package-tree.html | 4 +-
 .../hadoop/hbase/thrift/ThriftServer.html   |46 +-
 .../hadoop/hbase/thrift2/ThriftServer.html  |83 +-
 .../HBaseFsck.ErrorReporter.ERROR_CODE.html |76 +-
 .../hbase/util/HBaseFsck.ErrorReporter.html |30 +-
 .../hbase/util/HBaseFsck.HBaseFsckTool.html | 6 +-
 .../hadoop/hbase/util/HBaseFsck.HbckInfo.html   |56 +-
 .../hadoop/hbase/util/HBaseFsck.HdfsEntry.html  |14 +-
 .../hadoop/hbase/util/HBaseFsck.MetaEntry.html  |18 +-
 .../hbase/util/HBaseFsck.OnlineEntry.html   |10 +-
 .../util/HBaseFsck.PrintingErrorReporter.html   |42 +-
 .../HBaseFsck.TableInfo.HDFSIntegrityFixer.html |22 +-
 ...aseFsck.TableInfo.IntegrityFixSuggester.html |20 +-
 .../hadoop/hbase/util/HBaseFsck.TableInfo.html  |38 +-
 .../hbase/util/HBaseFsck.WorkItemHdfsDir.html   

[35/51] [partial] hbase-site git commit: Published site at 2912c953551bedbfbf30c32c156ed7bb187d54c3.

2018-04-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/devapidocs/org/apache/hadoop/hbase/thrift2/ThriftServer.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/thrift2/ThriftServer.html 
b/devapidocs/org/apache/hadoop/hbase/thrift2/ThriftServer.html
index 6efebf0..a91ac62 100644
--- a/devapidocs/org/apache/hadoop/hbase/thrift2/ThriftServer.html
+++ b/devapidocs/org/apache/hadoop/hbase/thrift2/ThriftServer.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":9,"i1":10,"i2":9,"i3":10,"i4":10,"i5":10,"i6":9,"i7":10,"i8":10,"i9":9,"i10":9,"i11":9,"i12":9,"i13":9,"i14":9,"i15":9,"i16":9,"i17":9,"i18":9,"i19":10,"i20":10};
+var methods = 
{"i0":9,"i1":9,"i2":10,"i3":10,"i4":10,"i5":9,"i6":10,"i7":10,"i8":9,"i9":9,"i10":9,"i11":9,"i12":9,"i13":9,"i14":9,"i15":9,"i16":9,"i17":9,"i18":10,"i19":10};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -119,7 +119,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.LimitedPrivate(value="Tools")
-public class ThriftServer
+public class ThriftServer
 extends org.apache.hadoop.conf.Configured
 implements org.apache.hadoop.util.Tool
 ThriftServer - this class starts up a Thrift server which 
implements the HBase API specified in
@@ -213,41 +213,37 @@ implements org.apache.hadoop.util.Tool
   intlistenPort)
 
 
-private boolean
-checkArguments(org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLinecmd)
-
-
 private static https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html?is-external=true;
 title="class or interface in 
java.util.concurrent">ExecutorService
 createExecutor(intworkerThreads,
   intmaxCallQueueSize,
   ThriftMetricsmetrics)
 
-
+
 private https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 getBindAddress(org.apache.hadoop.conf.Configurationconf,
   
org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLinecmd)
 
-
+
 private https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 getImplType(booleannonblocking,
booleanhsha,
booleanselector)
 
-
+
 private int
 getListenPort(org.apache.hadoop.conf.Configurationconf,
  
org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLinecmd)
 
-
+
 private static 
org.apache.hbase.thirdparty.org.apache.commons.cli.Options
 getOptions()
 
-
+
 private int
 getReadTimeout(org.apache.hadoop.conf.Configurationconf,
   
org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLinecmd)
 
-
+
 private org.apache.thrift.server.TServer
 getServer(intworkerThreads,
  intselectorThreads,
@@ -263,7 +259,7 @@ implements org.apache.hadoop.util.Tool
  org.apache.thrift.transport.TTransportFactorytransportFactory,
  https://docs.oracle.com/javase/8/docs/api/java/net/InetSocketAddress.html?is-external=true;
 title="class or interface in 
java.net">InetSocketAddressinetSocketAddress)
 
-
+
 private static 
org.apache.thrift.server.TServer
 getTHsHaServer(org.apache.thrift.protocol.TProtocolFactoryprotocolFactory,
   org.apache.thrift.TProcessorprocessor,
@@ -273,18 +269,18 @@ implements org.apache.hadoop.util.Tool
   https://docs.oracle.com/javase/8/docs/api/java/net/InetSocketAddress.html?is-external=true;
 title="class or interface in 
java.net">InetSocketAddressinetSocketAddress,
   ThriftMetricsmetrics)
 
-
+
 private static 
org.apache.thrift.server.TServer
 getTNonBlockingServer(org.apache.thrift.protocol.TProtocolFactoryprotocolFactory,
  org.apache.thrift.TProcessorprocessor,
  
org.apache.thrift.transport.TTransportFactorytransportFactory,
  https://docs.oracle.com/javase/8/docs/api/java/net/InetSocketAddress.html?is-external=true;
 title="class or interface in 
java.net">InetSocketAddressinetSocketAddress)
 
-
+
 private static 
org.apache.thrift.protocol.TProtocolFactory
 getTProtocolFactory(booleanisCompact)
 
-
+
 private static 
org.apache.thrift.server.TServer
 getTThreadedSelectorServer(org.apache.thrift.protocol.TProtocolFactoryprotocolFactory,
   org.apache.thrift.TProcessorprocessor,
@@ -295,7 +291,7 @@ implements org.apache.hadoop.util.Tool
   https://docs.oracle.com/javase/8/docs/api/java/net/InetSocketAddress.html?is-external=true;
 title="class or interface in 
java.net">InetSocketAddressinetSocketAddress,
   ThriftMetricsmetrics)
 
-
+
 private static 
org.apache.thrift.server.TServer
 getTThreadPoolServer(org.apache.thrift.protocol.TProtocolFactoryprotocolFactory,
   

[45/51] [partial] hbase-site git commit: Published site at 2912c953551bedbfbf30c32c156ed7bb187d54c3.

2018-04-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/checkstyle-aggregate.html
--
diff --git a/checkstyle-aggregate.html b/checkstyle-aggregate.html
index 7001ee7..215cadc 100644
--- a/checkstyle-aggregate.html
+++ b/checkstyle-aggregate.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Checkstyle Results
 
@@ -284,7 +284,7 @@
 3601
 0
 0
-15876
+15869
 
 Files
 
@@ -4157,7 +4157,7 @@
 org/apache/hadoop/hbase/mapreduce/CellCounter.java
 0
 0
-9
+6
 
 org/apache/hadoop/hbase/mapreduce/CellCreator.java
 0
@@ -4627,7 +4627,7 @@
 org/apache/hadoop/hbase/mapreduce/WALPlayer.java
 0
 0
-1
+2
 
 org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
 0
@@ -5007,7 +5007,7 @@
 org/apache/hadoop/hbase/master/assignment/RegionStateStore.java
 0
 0
-6
+5
 
 org/apache/hadoop/hbase/master/assignment/RegionStates.java
 0
@@ -5982,7 +5982,7 @@
 org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
 0
 0
-10
+9
 
 org/apache/hadoop/hbase/procedure2/util/DelayedUtil.java
 0
@@ -6832,7 +6832,7 @@
 org/apache/hadoop/hbase/regionserver/ReversedRegionScannerImpl.java
 0
 0
-6
+4
 
 org/apache/hadoop/hbase/regionserver/ReversedStoreScanner.java
 0
@@ -7182,7 +7182,7 @@
 org/apache/hadoop/hbase/regionserver/TestJoinedScanners.java
 0
 0
-3
+1
 
 org/apache/hadoop/hbase/regionserver/TestKeepDeletes.java
 0
@@ -9159,1041 +9159,1046 @@
 0
 3
 
+org/apache/hadoop/hbase/thrift2/ThriftServer.java
+0
+0
+1
+
 org/apache/hadoop/hbase/tool/Canary.java
 0
 0
 44
-
+
 org/apache/hadoop/hbase/tool/LoadIncrementalHFiles.java
 0
 0
 7
-
+
 org/apache/hadoop/hbase/tool/MapreduceTestingShim.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/tool/TestCanaryTool.java
 0
 0
 5
-
+
 org/apache/hadoop/hbase/tool/TestLoadIncrementalHFiles.java
 0
 0
 8
-
+
 org/apache/hadoop/hbase/tool/TestLoadIncrementalHFilesSplitRecovery.java
 0
 0
 7
-
+
 org/apache/hadoop/hbase/trace/IntegrationTestSendTraceRequests.java
 0
 0
 7
-
+
 org/apache/hadoop/hbase/trace/SpanReceiverHost.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/trace/TraceTree.java
 0
 0
 9
-
+
 org/apache/hadoop/hbase/trace/TraceUtil.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/types/TestCopyOnWriteMaps.java
 0
 0
 11
-
+
 org/apache/hadoop/hbase/types/TestOrderedBlob.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/types/TestOrderedString.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/types/TestStruct.java
 0
 0
 21
-
+
 org/apache/hadoop/hbase/types/TestTerminatedWrapper.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/types/TestUnion2.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/util/AbstractByteRange.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/util/AbstractFileStatusFilter.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/util/AbstractHBaseTool.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/util/AbstractPositionedByteRange.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/util/Addressing.java
 0
 0
 5
-
+
 org/apache/hadoop/hbase/util/AtomicUtils.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/util/AvlUtil.java
 0
 0
 9
-
+
 org/apache/hadoop/hbase/util/Base64.java
 0
 0
 28
-
+
 org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java
 0
 0
 12
-
+
 org/apache/hadoop/hbase/util/BloomContext.java
 0
 0
 5
-
+
 org/apache/hadoop/hbase/util/BloomFilter.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/util/BloomFilterChunk.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/util/BloomFilterFactory.java
 0
 0
 7
-
+
 org/apache/hadoop/hbase/util/BloomFilterUtil.java
 0
 0
 16
-
+
 org/apache/hadoop/hbase/util/BloomFilterWriter.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/util/BoundedPriorityBlockingQueue.java
 0
 0
 11
-
+
 org/apache/hadoop/hbase/util/BuilderStyleTest.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/util/ByteBufferArray.java
 0
 0
 6
-
+
 org/apache/hadoop/hbase/util/ByteBufferUtils.java
 0
 0
 45
-
+
 org/apache/hadoop/hbase/util/ByteRangeUtils.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/util/Bytes.java
 0
 0
 85
-
+
 org/apache/hadoop/hbase/util/ChecksumType.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/util/ClassLoaderTestHelper.java
 0
 0
 7
-
+
 org/apache/hadoop/hbase/util/ClassSize.java
 0
 0
 14
-
+
 org/apache/hadoop/hbase/util/Classes.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/util/CollectionBackedScanner.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/util/CollectionUtils.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/util/CommonFSUtils.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/util/CompressionTest.java
 0
 0
 5
-
+
 org/apache/hadoop/hbase/util/ConcatenatedLists.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/util/ConfigurationUtil.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/util/ConnectionCache.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/util/ConstantDelayQueue.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/util/CoprocessorClassLoader.java
 0
 0
 11
-
+
 org/apache/hadoop/hbase/util/Counter.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/util/DirectMemoryUtils.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/util/DynamicClassLoader.java
 0
 0
 26

[33/51] [partial] hbase-site git commit: Published site at 2912c953551bedbfbf30c32c156ed7bb187d54c3.

2018-04-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html 
b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html
index b274a3c..60a8d07 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html
@@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public class HBaseFsck.TableInfo
+public class HBaseFsck.TableInfo
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 Maintain information about a particular table.
 
@@ -293,7 +293,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 tableName
-TableName tableName
+TableName tableName
 
 
 
@@ -302,7 +302,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 deployedOn
-https://docs.oracle.com/javase/8/docs/api/java/util/TreeSet.html?is-external=true;
 title="class or interface in java.util">TreeSetServerName deployedOn
+https://docs.oracle.com/javase/8/docs/api/java/util/TreeSet.html?is-external=true;
 title="class or interface in java.util">TreeSetServerName deployedOn
 
 
 
@@ -311,7 +311,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 backwards
-finalhttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListHBaseFsck.HbckInfo backwards
+finalhttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListHBaseFsck.HbckInfo backwards
 
 
 
@@ -320,7 +320,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 sidelinedRegions
-finalhttps://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in 
java.util">Maporg.apache.hadoop.fs.Path,HBaseFsck.HbckInfo sidelinedRegions
+finalhttps://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in 
java.util">Maporg.apache.hadoop.fs.Path,HBaseFsck.HbckInfo sidelinedRegions
 
 
 
@@ -329,7 +329,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 sc
-finalRegionSplitCalculatorHBaseFsck.HbckInfo sc
+finalRegionSplitCalculatorHBaseFsck.HbckInfo sc
 
 
 
@@ -338,7 +338,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 htds
-finalhttps://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">SetTableDescriptor htds
+finalhttps://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">SetTableDescriptor htds
 
 
 
@@ -347,7 +347,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 overlapGroups
-finalorg.apache.hbase.thirdparty.com.google.common.collect.Multimapbyte[],HBaseFsck.HbckInfo overlapGroups
+finalorg.apache.hbase.thirdparty.com.google.common.collect.Multimapbyte[],HBaseFsck.HbckInfo overlapGroups
 
 
 
@@ -356,7 +356,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 regionsFromMeta
-privateorg.apache.hbase.thirdparty.com.google.common.collect.ImmutableListRegionInfo regionsFromMeta
+privateorg.apache.hbase.thirdparty.com.google.common.collect.ImmutableListRegionInfo regionsFromMeta
 
 
 
@@ -373,7 +373,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 TableInfo
-TableInfo(TableNamename)
+TableInfo(TableNamename)
 
 
 
@@ -390,7 +390,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 getHTD
-privateTableDescriptorgetHTD()
+privateTableDescriptorgetHTD()
 
 Returns:
 descriptor common to all regions.  null if are none or multiple!
@@ -403,7 +403,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 addRegionInfo
-publicvoidaddRegionInfo(HBaseFsck.HbckInfohir)
+publicvoidaddRegionInfo(HBaseFsck.HbckInfohir)
 
 
 
@@ -412,7 +412,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 addServer
-publicvoidaddServer(ServerNameserver)
+publicvoidaddServer(ServerNameserver)
 
 
 
@@ -421,7 +421,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 getName
-publicTableNamegetName()
+publicTableNamegetName()
 
 
 
@@ -430,7 +430,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 getNumRegions
-publicintgetNumRegions()
+publicintgetNumRegions()
 
 
 
@@ -439,7 +439,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 getRegionsFromMeta
-publicorg.apache.hbase.thirdparty.com.google.common.collect.ImmutableListRegionInfogetRegionsFromMeta()

[21/51] [partial] hbase-site git commit: Published site at 2912c953551bedbfbf30c32c156ed7bb187d54c3.

2018-04-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/KeyValueHeap.KVScannerComparator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/KeyValueHeap.KVScannerComparator.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/KeyValueHeap.KVScannerComparator.html
index 3cafa7c..05ee7d2 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/KeyValueHeap.KVScannerComparator.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/KeyValueHeap.KVScannerComparator.html
@@ -232,8 +232,8 @@
 224  this.current.close();
 225}
 226if (this.heap != null) {
-227  KeyValueScanner scanner;
-228  while ((scanner = this.heap.poll()) 
!= null) {
+227  // Order of closing the scanners 
shouldn't matter here, so simply iterate and close them.
+228  for (KeyValueScanner scanner : 
heap) {
 229scanner.close();
 230  }
 231}

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/KeyValueHeap.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/KeyValueHeap.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/KeyValueHeap.html
index 3cafa7c..05ee7d2 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/KeyValueHeap.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/KeyValueHeap.html
@@ -232,8 +232,8 @@
 224  this.current.close();
 225}
 226if (this.heap != null) {
-227  KeyValueScanner scanner;
-228  while ((scanner = this.heap.poll()) 
!= null) {
+227  // Order of closing the scanners 
shouldn't matter here, so simply iterate and close them.
+228  for (KeyValueScanner scanner : 
heap) {
 229scanner.close();
 230  }
 231}

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ReversedRegionScannerImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ReversedRegionScannerImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ReversedRegionScannerImpl.html
index 47c27f1..d7d35b7 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ReversedRegionScannerImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ReversedRegionScannerImpl.html
@@ -30,13 +30,13 @@
 022import java.util.List;
 023
 024import org.apache.hadoop.hbase.Cell;
-025import 
org.apache.hadoop.hbase.CellUtil;
+025import 
org.apache.hadoop.hbase.DoNotRetryIOException;
 026import 
org.apache.hadoop.hbase.HConstants;
 027import 
org.apache.hadoop.hbase.PrivateCellUtil;
-028import 
org.apache.yetus.audience.InterfaceAudience;
-029import 
org.apache.hadoop.hbase.client.Scan;
-030import 
org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl;
-031import 
org.apache.hadoop.hbase.util.Bytes;
+028import 
org.apache.hadoop.hbase.client.Scan;
+029import 
org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl;
+030import 
org.apache.hadoop.hbase.util.Bytes;
+031import 
org.apache.yetus.audience.InterfaceAudience;
 032
 033/**
 034 * ReversibleRegionScannerImpl extends 
from RegionScannerImpl, and is used to
@@ -61,37 +61,36 @@
 053  ListKeyValueScanner 
joinedScanners, HRegion region) throws IOException {
 054this.storeHeap = new 
ReversedKeyValueHeap(scanners, comparator);
 055if (!joinedScanners.isEmpty()) {
-056  this.joinedHeap = new 
ReversedKeyValueHeap(joinedScanners,
-057  comparator);
-058}
-059  }
-060
-061  @Override
-062  protected boolean shouldStop(Cell 
currentRowCell) {
-063if (currentRowCell == null) {
-064  return true;
-065}
-066if (stopRow == null || 
Bytes.equals(stopRow, HConstants.EMPTY_START_ROW)) {
-067  return false;
-068}
-069int c = 
comparator.compareRows(currentRowCell, stopRow, 0, stopRow.length);
-070return c  0 || (c == 0  
!includeStopRow);
-071  }
-072
-073  @Override
-074  protected boolean 
nextRow(ScannerContext scannerContext, Cell curRowCell)
-075  throws IOException {
-076assert super.joinedContinuationRow == 
null : "Trying to go to next row during joinedHeap read.";
-077
this.storeHeap.seekToPreviousRow(PrivateCellUtil.createFirstOnRow(curRowCell));
-078resetFilters();
-079// Calling the hook in CP which 
allows it to do a fast forward
-080if (this.region.getCoprocessorHost() 
!= null) {
-081  return 
this.region.getCoprocessorHost().postScannerFilterRow(this, curRowCell);
-082}
-083return true;
-084  }
-085
-086}
+056  throw new 
DoNotRetryIOException("Reverse scan with loading CFs on demand is not 
supported");
+057

[16/51] [partial] hbase-site git commit: Published site at 2912c953551bedbfbf30c32c156ed7bb187d54c3.

2018-04-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html
index 8302e28..c370eb9 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html
@@ -2113,3031 +2113,3033 @@
 2105
errors.reportError(ERROR_CODE.ORPHAN_TABLE_STATE,
 2106tableName + " unable to 
delete dangling table state " + tableState);
 2107  }
-2108} else {
-2109  
errors.reportError(ERROR_CODE.ORPHAN_TABLE_STATE,
-2110  tableName + " has dangling 
table state " + tableState);
-2111}
-2112  }
-2113}
-2114// check that all tables have 
states
-2115for (TableName tableName : 
tablesInfo.keySet()) {
-2116  if (isTableIncluded(tableName) 
 !tableStates.containsKey(tableName)) {
-2117if (fixMeta) {
-2118  
MetaTableAccessor.updateTableState(connection, tableName, 
TableState.State.ENABLED);
-2119  TableState newState = 
MetaTableAccessor.getTableState(connection, tableName);
-2120  if (newState == null) {
-2121
errors.reportError(ERROR_CODE.NO_TABLE_STATE,
-2122"Unable to change state 
for table " + tableName + " in meta ");
-2123  }
-2124} else {
-2125  
errors.reportError(ERROR_CODE.NO_TABLE_STATE,
-2126  tableName + " has no state 
in meta ");
-2127}
-2128  }
-2129}
-2130  }
-2131
-2132  private void preCheckPermission() 
throws IOException, AccessDeniedException {
-2133if 
(shouldIgnorePreCheckPermission()) {
-2134  return;
-2135}
-2136
-2137Path hbaseDir = 
FSUtils.getRootDir(getConf());
-2138FileSystem fs = 
hbaseDir.getFileSystem(getConf());
-2139UserProvider userProvider = 
UserProvider.instantiate(getConf());
-2140UserGroupInformation ugi = 
userProvider.getCurrent().getUGI();
-2141FileStatus[] files = 
fs.listStatus(hbaseDir);
-2142for (FileStatus file : files) {
-2143  try {
-2144FSUtils.checkAccess(ugi, file, 
FsAction.WRITE);
-2145  } catch (AccessDeniedException 
ace) {
-2146LOG.warn("Got 
AccessDeniedException when preCheckPermission ", ace);
-2147
errors.reportError(ERROR_CODE.WRONG_USAGE, "Current user " + 
ugi.getUserName()
-2148  + " does not have write perms 
to " + file.getPath()
-2149  + ". Please rerun hbck as hdfs 
user " + file.getOwner());
-2150throw ace;
-2151  }
-2152}
-2153  }
-2154
-2155  /**
-2156   * Deletes region from meta table
-2157   */
-2158  private void deleteMetaRegion(HbckInfo 
hi) throws IOException {
-2159
deleteMetaRegion(hi.metaEntry.getRegionName());
-2160  }
-2161
-2162  /**
-2163   * Deletes region from meta table
-2164   */
-2165  private void deleteMetaRegion(byte[] 
metaKey) throws IOException {
-2166Delete d = new Delete(metaKey);
-2167meta.delete(d);
-2168LOG.info("Deleted " + 
Bytes.toString(metaKey) + " from META" );
-2169  }
-2170
-2171  /**
-2172   * Reset the split parent region info 
in meta table
-2173   */
-2174  private void resetSplitParent(HbckInfo 
hi) throws IOException {
-2175RowMutations mutations = new 
RowMutations(hi.metaEntry.getRegionName());
-2176Delete d = new 
Delete(hi.metaEntry.getRegionName());
-2177
d.addColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER);
-2178
d.addColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER);
-2179mutations.add(d);
-2180
-2181RegionInfo hri = 
RegionInfoBuilder.newBuilder(hi.metaEntry)
-2182.setOffline(false)
-2183.setSplit(false)
-2184.build();
-2185Put p = 
MetaTableAccessor.makePutFromRegionInfo(hri, 
EnvironmentEdgeManager.currentTime());
-2186mutations.add(p);
-2187
-2188meta.mutateRow(mutations);
-2189LOG.info("Reset split parent " + 
hi.metaEntry.getRegionNameAsString() + " in META" );
-2190  }
-2191
-2192  /**
-2193   * This backwards-compatibility 
wrapper for permanently offlining a region
-2194   * that should not be alive.  If the 
region server does not support the
-2195   * "offline" method, it will use the 
closest unassign method instead.  This
-2196   * will basically work until one 
attempts to disable or delete the affected
-2197   * table.  The problem has to do with 
in-memory only master state, so
-2198   * restarting the HMaster or failing 
over to another should fix this.
-2199   */
-2200  private void offline(byte[] 
regionName) throws IOException {
-2201String regionString = 
Bytes.toStringBinary(regionName);
-2202if (!rsSupportsOffline) {
-2203  

[15/51] [partial] hbase-site git commit: Published site at 2912c953551bedbfbf30c32c156ed7bb187d54c3.

2018-04-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HBaseFsckTool.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HBaseFsckTool.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HBaseFsckTool.html
index 8302e28..c370eb9 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HBaseFsckTool.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HBaseFsckTool.html
@@ -2113,3031 +2113,3033 @@
 2105
errors.reportError(ERROR_CODE.ORPHAN_TABLE_STATE,
 2106tableName + " unable to 
delete dangling table state " + tableState);
 2107  }
-2108} else {
-2109  
errors.reportError(ERROR_CODE.ORPHAN_TABLE_STATE,
-2110  tableName + " has dangling 
table state " + tableState);
-2111}
-2112  }
-2113}
-2114// check that all tables have 
states
-2115for (TableName tableName : 
tablesInfo.keySet()) {
-2116  if (isTableIncluded(tableName) 
 !tableStates.containsKey(tableName)) {
-2117if (fixMeta) {
-2118  
MetaTableAccessor.updateTableState(connection, tableName, 
TableState.State.ENABLED);
-2119  TableState newState = 
MetaTableAccessor.getTableState(connection, tableName);
-2120  if (newState == null) {
-2121
errors.reportError(ERROR_CODE.NO_TABLE_STATE,
-2122"Unable to change state 
for table " + tableName + " in meta ");
-2123  }
-2124} else {
-2125  
errors.reportError(ERROR_CODE.NO_TABLE_STATE,
-2126  tableName + " has no state 
in meta ");
-2127}
-2128  }
-2129}
-2130  }
-2131
-2132  private void preCheckPermission() 
throws IOException, AccessDeniedException {
-2133if 
(shouldIgnorePreCheckPermission()) {
-2134  return;
-2135}
-2136
-2137Path hbaseDir = 
FSUtils.getRootDir(getConf());
-2138FileSystem fs = 
hbaseDir.getFileSystem(getConf());
-2139UserProvider userProvider = 
UserProvider.instantiate(getConf());
-2140UserGroupInformation ugi = 
userProvider.getCurrent().getUGI();
-2141FileStatus[] files = 
fs.listStatus(hbaseDir);
-2142for (FileStatus file : files) {
-2143  try {
-2144FSUtils.checkAccess(ugi, file, 
FsAction.WRITE);
-2145  } catch (AccessDeniedException 
ace) {
-2146LOG.warn("Got 
AccessDeniedException when preCheckPermission ", ace);
-2147
errors.reportError(ERROR_CODE.WRONG_USAGE, "Current user " + 
ugi.getUserName()
-2148  + " does not have write perms 
to " + file.getPath()
-2149  + ". Please rerun hbck as hdfs 
user " + file.getOwner());
-2150throw ace;
-2151  }
-2152}
-2153  }
-2154
-2155  /**
-2156   * Deletes region from meta table
-2157   */
-2158  private void deleteMetaRegion(HbckInfo 
hi) throws IOException {
-2159
deleteMetaRegion(hi.metaEntry.getRegionName());
-2160  }
-2161
-2162  /**
-2163   * Deletes region from meta table
-2164   */
-2165  private void deleteMetaRegion(byte[] 
metaKey) throws IOException {
-2166Delete d = new Delete(metaKey);
-2167meta.delete(d);
-2168LOG.info("Deleted " + 
Bytes.toString(metaKey) + " from META" );
-2169  }
-2170
-2171  /**
-2172   * Reset the split parent region info 
in meta table
-2173   */
-2174  private void resetSplitParent(HbckInfo 
hi) throws IOException {
-2175RowMutations mutations = new 
RowMutations(hi.metaEntry.getRegionName());
-2176Delete d = new 
Delete(hi.metaEntry.getRegionName());
-2177
d.addColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER);
-2178
d.addColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER);
-2179mutations.add(d);
-2180
-2181RegionInfo hri = 
RegionInfoBuilder.newBuilder(hi.metaEntry)
-2182.setOffline(false)
-2183.setSplit(false)
-2184.build();
-2185Put p = 
MetaTableAccessor.makePutFromRegionInfo(hri, 
EnvironmentEdgeManager.currentTime());
-2186mutations.add(p);
-2187
-2188meta.mutateRow(mutations);
-2189LOG.info("Reset split parent " + 
hi.metaEntry.getRegionNameAsString() + " in META" );
-2190  }
-2191
-2192  /**
-2193   * This backwards-compatibility 
wrapper for permanently offlining a region
-2194   * that should not be alive.  If the 
region server does not support the
-2195   * "offline" method, it will use the 
closest unassign method instead.  This
-2196   * will basically work until one 
attempts to disable or delete the affected
-2197   * table.  The problem has to do with 
in-memory only master state, so
-2198   * restarting the HMaster or failing 
over to another should fix this.
-2199   */
-2200  private void offline(byte[] 
regionName) throws IOException {
-2201String regionString = 
Bytes.toStringBinary(regionName);
-2202if (!rsSupportsOffline) {
-2203  LOG.warn("Using 

[19/51] [partial] hbase-site git commit: Published site at 2912c953551bedbfbf30c32c156ed7bb187d54c3.

2018-04-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html
index 8302e28..c370eb9 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html
@@ -2113,3031 +2113,3033 @@
 2105
errors.reportError(ERROR_CODE.ORPHAN_TABLE_STATE,
 2106tableName + " unable to 
delete dangling table state " + tableState);
 2107  }
-2108} else {
-2109  
errors.reportError(ERROR_CODE.ORPHAN_TABLE_STATE,
-2110  tableName + " has dangling 
table state " + tableState);
-2111}
-2112  }
-2113}
-2114// check that all tables have 
states
-2115for (TableName tableName : 
tablesInfo.keySet()) {
-2116  if (isTableIncluded(tableName) 
 !tableStates.containsKey(tableName)) {
-2117if (fixMeta) {
-2118  
MetaTableAccessor.updateTableState(connection, tableName, 
TableState.State.ENABLED);
-2119  TableState newState = 
MetaTableAccessor.getTableState(connection, tableName);
-2120  if (newState == null) {
-2121
errors.reportError(ERROR_CODE.NO_TABLE_STATE,
-2122"Unable to change state 
for table " + tableName + " in meta ");
-2123  }
-2124} else {
-2125  
errors.reportError(ERROR_CODE.NO_TABLE_STATE,
-2126  tableName + " has no state 
in meta ");
-2127}
-2128  }
-2129}
-2130  }
-2131
-2132  private void preCheckPermission() 
throws IOException, AccessDeniedException {
-2133if 
(shouldIgnorePreCheckPermission()) {
-2134  return;
-2135}
-2136
-2137Path hbaseDir = 
FSUtils.getRootDir(getConf());
-2138FileSystem fs = 
hbaseDir.getFileSystem(getConf());
-2139UserProvider userProvider = 
UserProvider.instantiate(getConf());
-2140UserGroupInformation ugi = 
userProvider.getCurrent().getUGI();
-2141FileStatus[] files = 
fs.listStatus(hbaseDir);
-2142for (FileStatus file : files) {
-2143  try {
-2144FSUtils.checkAccess(ugi, file, 
FsAction.WRITE);
-2145  } catch (AccessDeniedException 
ace) {
-2146LOG.warn("Got 
AccessDeniedException when preCheckPermission ", ace);
-2147
errors.reportError(ERROR_CODE.WRONG_USAGE, "Current user " + 
ugi.getUserName()
-2148  + " does not have write perms 
to " + file.getPath()
-2149  + ". Please rerun hbck as hdfs 
user " + file.getOwner());
-2150throw ace;
-2151  }
-2152}
-2153  }
-2154
-2155  /**
-2156   * Deletes region from meta table
-2157   */
-2158  private void deleteMetaRegion(HbckInfo 
hi) throws IOException {
-2159
deleteMetaRegion(hi.metaEntry.getRegionName());
-2160  }
-2161
-2162  /**
-2163   * Deletes region from meta table
-2164   */
-2165  private void deleteMetaRegion(byte[] 
metaKey) throws IOException {
-2166Delete d = new Delete(metaKey);
-2167meta.delete(d);
-2168LOG.info("Deleted " + 
Bytes.toString(metaKey) + " from META" );
-2169  }
-2170
-2171  /**
-2172   * Reset the split parent region info 
in meta table
-2173   */
-2174  private void resetSplitParent(HbckInfo 
hi) throws IOException {
-2175RowMutations mutations = new 
RowMutations(hi.metaEntry.getRegionName());
-2176Delete d = new 
Delete(hi.metaEntry.getRegionName());
-2177
d.addColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER);
-2178
d.addColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER);
-2179mutations.add(d);
-2180
-2181RegionInfo hri = 
RegionInfoBuilder.newBuilder(hi.metaEntry)
-2182.setOffline(false)
-2183.setSplit(false)
-2184.build();
-2185Put p = 
MetaTableAccessor.makePutFromRegionInfo(hri, 
EnvironmentEdgeManager.currentTime());
-2186mutations.add(p);
-2187
-2188meta.mutateRow(mutations);
-2189LOG.info("Reset split parent " + 
hi.metaEntry.getRegionNameAsString() + " in META" );
-2190  }
-2191
-2192  /**
-2193   * This backwards-compatibility 
wrapper for permanently offlining a region
-2194   * that should not be alive.  If the 
region server does not support the
-2195   * "offline" method, it will use the 
closest unassign method instead.  This
-2196   * will basically work until one 
attempts to disable or delete the affected
-2197   * table.  The problem has to do with 
in-memory only master state, so
-2198   * restarting the HMaster or failing 
over to another should fix this.
-2199   */
-2200  private void offline(byte[] 
regionName) throws IOException {
-2201String regionString = 

[17/51] [partial] hbase-site git commit: Published site at 2912c953551bedbfbf30c32c156ed7bb187d54c3.

2018-04-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html
index 8302e28..c370eb9 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html
@@ -2113,3031 +2113,3033 @@
 2105
errors.reportError(ERROR_CODE.ORPHAN_TABLE_STATE,
 2106tableName + " unable to 
delete dangling table state " + tableState);
 2107  }
-2108} else {
-2109  
errors.reportError(ERROR_CODE.ORPHAN_TABLE_STATE,
-2110  tableName + " has dangling 
table state " + tableState);
-2111}
-2112  }
-2113}
-2114// check that all tables have 
states
-2115for (TableName tableName : 
tablesInfo.keySet()) {
-2116  if (isTableIncluded(tableName) 
 !tableStates.containsKey(tableName)) {
-2117if (fixMeta) {
-2118  
MetaTableAccessor.updateTableState(connection, tableName, 
TableState.State.ENABLED);
-2119  TableState newState = 
MetaTableAccessor.getTableState(connection, tableName);
-2120  if (newState == null) {
-2121
errors.reportError(ERROR_CODE.NO_TABLE_STATE,
-2122"Unable to change state 
for table " + tableName + " in meta ");
-2123  }
-2124} else {
-2125  
errors.reportError(ERROR_CODE.NO_TABLE_STATE,
-2126  tableName + " has no state 
in meta ");
-2127}
-2128  }
-2129}
-2130  }
-2131
-2132  private void preCheckPermission() 
throws IOException, AccessDeniedException {
-2133if 
(shouldIgnorePreCheckPermission()) {
-2134  return;
-2135}
-2136
-2137Path hbaseDir = 
FSUtils.getRootDir(getConf());
-2138FileSystem fs = 
hbaseDir.getFileSystem(getConf());
-2139UserProvider userProvider = 
UserProvider.instantiate(getConf());
-2140UserGroupInformation ugi = 
userProvider.getCurrent().getUGI();
-2141FileStatus[] files = 
fs.listStatus(hbaseDir);
-2142for (FileStatus file : files) {
-2143  try {
-2144FSUtils.checkAccess(ugi, file, 
FsAction.WRITE);
-2145  } catch (AccessDeniedException 
ace) {
-2146LOG.warn("Got 
AccessDeniedException when preCheckPermission ", ace);
-2147
errors.reportError(ERROR_CODE.WRONG_USAGE, "Current user " + 
ugi.getUserName()
-2148  + " does not have write perms 
to " + file.getPath()
-2149  + ". Please rerun hbck as hdfs 
user " + file.getOwner());
-2150throw ace;
-2151  }
-2152}
-2153  }
-2154
-2155  /**
-2156   * Deletes region from meta table
-2157   */
-2158  private void deleteMetaRegion(HbckInfo 
hi) throws IOException {
-2159
deleteMetaRegion(hi.metaEntry.getRegionName());
-2160  }
-2161
-2162  /**
-2163   * Deletes region from meta table
-2164   */
-2165  private void deleteMetaRegion(byte[] 
metaKey) throws IOException {
-2166Delete d = new Delete(metaKey);
-2167meta.delete(d);
-2168LOG.info("Deleted " + 
Bytes.toString(metaKey) + " from META" );
-2169  }
-2170
-2171  /**
-2172   * Reset the split parent region info 
in meta table
-2173   */
-2174  private void resetSplitParent(HbckInfo 
hi) throws IOException {
-2175RowMutations mutations = new 
RowMutations(hi.metaEntry.getRegionName());
-2176Delete d = new 
Delete(hi.metaEntry.getRegionName());
-2177
d.addColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER);
-2178
d.addColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER);
-2179mutations.add(d);
-2180
-2181RegionInfo hri = 
RegionInfoBuilder.newBuilder(hi.metaEntry)
-2182.setOffline(false)
-2183.setSplit(false)
-2184.build();
-2185Put p = 
MetaTableAccessor.makePutFromRegionInfo(hri, 
EnvironmentEdgeManager.currentTime());
-2186mutations.add(p);
-2187
-2188meta.mutateRow(mutations);
-2189LOG.info("Reset split parent " + 
hi.metaEntry.getRegionNameAsString() + " in META" );
-2190  }
-2191
-2192  /**
-2193   * This backwards-compatibility 
wrapper for permanently offlining a region
-2194   * that should not be alive.  If the 
region server does not support the
-2195   * "offline" method, it will use the 
closest unassign method instead.  This
-2196   * will basically work until one 
attempts to disable or delete the affected
-2197   * table.  The problem has to do with 
in-memory only master state, so
-2198   * restarting the HMaster or failing 
over to another should fix this.
-2199   */
-2200  private void offline(byte[] 
regionName) throws IOException {
-2201String regionString = 
Bytes.toStringBinary(regionName);
-2202if (!rsSupportsOffline) {
-2203  LOG.warn("Using 

[04/51] [partial] hbase-site git commit: Published site at 2912c953551bedbfbf30c32c156ed7bb187d54c3.

2018-04-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsDir.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsDir.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsDir.html
index 8302e28..c370eb9 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsDir.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsDir.html
@@ -2113,3031 +2113,3033 @@
 2105
errors.reportError(ERROR_CODE.ORPHAN_TABLE_STATE,
 2106tableName + " unable to 
delete dangling table state " + tableState);
 2107  }
-2108} else {
-2109  
errors.reportError(ERROR_CODE.ORPHAN_TABLE_STATE,
-2110  tableName + " has dangling 
table state " + tableState);
-2111}
-2112  }
-2113}
-2114// check that all tables have 
states
-2115for (TableName tableName : 
tablesInfo.keySet()) {
-2116  if (isTableIncluded(tableName) 
 !tableStates.containsKey(tableName)) {
-2117if (fixMeta) {
-2118  
MetaTableAccessor.updateTableState(connection, tableName, 
TableState.State.ENABLED);
-2119  TableState newState = 
MetaTableAccessor.getTableState(connection, tableName);
-2120  if (newState == null) {
-2121
errors.reportError(ERROR_CODE.NO_TABLE_STATE,
-2122"Unable to change state 
for table " + tableName + " in meta ");
-2123  }
-2124} else {
-2125  
errors.reportError(ERROR_CODE.NO_TABLE_STATE,
-2126  tableName + " has no state 
in meta ");
-2127}
-2128  }
-2129}
-2130  }
-2131
-2132  private void preCheckPermission() 
throws IOException, AccessDeniedException {
-2133if 
(shouldIgnorePreCheckPermission()) {
-2134  return;
-2135}
-2136
-2137Path hbaseDir = 
FSUtils.getRootDir(getConf());
-2138FileSystem fs = 
hbaseDir.getFileSystem(getConf());
-2139UserProvider userProvider = 
UserProvider.instantiate(getConf());
-2140UserGroupInformation ugi = 
userProvider.getCurrent().getUGI();
-2141FileStatus[] files = 
fs.listStatus(hbaseDir);
-2142for (FileStatus file : files) {
-2143  try {
-2144FSUtils.checkAccess(ugi, file, 
FsAction.WRITE);
-2145  } catch (AccessDeniedException 
ace) {
-2146LOG.warn("Got 
AccessDeniedException when preCheckPermission ", ace);
-2147
errors.reportError(ERROR_CODE.WRONG_USAGE, "Current user " + 
ugi.getUserName()
-2148  + " does not have write perms 
to " + file.getPath()
-2149  + ". Please rerun hbck as hdfs 
user " + file.getOwner());
-2150throw ace;
-2151  }
-2152}
-2153  }
-2154
-2155  /**
-2156   * Deletes region from meta table
-2157   */
-2158  private void deleteMetaRegion(HbckInfo 
hi) throws IOException {
-2159
deleteMetaRegion(hi.metaEntry.getRegionName());
-2160  }
-2161
-2162  /**
-2163   * Deletes region from meta table
-2164   */
-2165  private void deleteMetaRegion(byte[] 
metaKey) throws IOException {
-2166Delete d = new Delete(metaKey);
-2167meta.delete(d);
-2168LOG.info("Deleted " + 
Bytes.toString(metaKey) + " from META" );
-2169  }
-2170
-2171  /**
-2172   * Reset the split parent region info 
in meta table
-2173   */
-2174  private void resetSplitParent(HbckInfo 
hi) throws IOException {
-2175RowMutations mutations = new 
RowMutations(hi.metaEntry.getRegionName());
-2176Delete d = new 
Delete(hi.metaEntry.getRegionName());
-2177
d.addColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER);
-2178
d.addColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER);
-2179mutations.add(d);
-2180
-2181RegionInfo hri = 
RegionInfoBuilder.newBuilder(hi.metaEntry)
-2182.setOffline(false)
-2183.setSplit(false)
-2184.build();
-2185Put p = 
MetaTableAccessor.makePutFromRegionInfo(hri, 
EnvironmentEdgeManager.currentTime());
-2186mutations.add(p);
-2187
-2188meta.mutateRow(mutations);
-2189LOG.info("Reset split parent " + 
hi.metaEntry.getRegionNameAsString() + " in META" );
-2190  }
-2191
-2192  /**
-2193   * This backwards-compatibility 
wrapper for permanently offlining a region
-2194   * that should not be alive.  If the 
region server does not support the
-2195   * "offline" method, it will use the 
closest unassign method instead.  This
-2196   * will basically work until one 
attempts to disable or delete the affected
-2197   * table.  The problem has to do with 
in-memory only master state, so
-2198   * restarting the HMaster or failing 
over to another should fix this.
-2199   */
-2200  private void offline(byte[] 
regionName) throws IOException {
-2201String regionString = 
Bytes.toStringBinary(regionName);
-2202if (!rsSupportsOffline) {
-2203  

[37/51] [partial] hbase-site git commit: Published site at 2912c953551bedbfbf30c32c156ed7bb187d54c3.

2018-04-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/devapidocs/org/apache/hadoop/hbase/client/class-use/Connection.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/Connection.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/Connection.html
index 02d7b34..14bfa72 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/Connection.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/Connection.html
@@ -564,6 +564,10 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 
+static boolean
+RSGroupTableAccessor.isRSGroupsEnabled(Connectionconnection)
+
+
 static void
 MetaTableAccessor.mergeRegions(Connectionconnection,
 RegionInfomergedRegion,
@@ -576,7 +580,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 Merge the two regions into one in an atomic operation.
 
 
-
+
 private static void
 MetaTableAccessor.multiMutate(Connectionconnection,
Tabletable,
@@ -585,21 +589,21 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 Performs an atomic multi-mutate operation against the given 
table.
 
 
-
+
 private static void
 MetaTableAccessor.multiMutate(Connectionconnection,
Tabletable,
byte[]row,
Mutation...mutations)
 
-
+
 static void
 MetaTableAccessor.mutateMetaTable(Connectionconnection,
https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListMutationmutations)
 Execute the passed mutations against 
hbase:meta table.
 
 
-
+
 static void
 MetaTableAccessor.overwriteRegions(Connectionconnection,
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInforegionInfos,
@@ -607,21 +611,21 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 Overwrites the specified regions from hbase:meta.
 
 
-
+
 static void
 MetaTableAccessor.putsToMetaTable(Connectionconnection,
https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListPutps)
 Put the passed ps to the 
hbase:meta table.
 
 
-
+
 private static void
 MetaTableAccessor.putToMetaTable(Connectionconnection,
   Putp)
 Put the passed p to the 
hbase:meta table.
 
 
-
+
 static void
 MetaTableAccessor.removeRegionReplicasFromMeta(https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">Setbyte[]metaRows,
 intreplicaIndexToDeleteFrom,
@@ -630,7 +634,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 Deletes some replica columns corresponding to replicas for 
the passed rows
 
 
-
+
 private static void
 MetaTableAccessor.scanMeta(Connectionconnection,
 byte[]startRow,
@@ -640,7 +644,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 intmaxRows,
 MetaTableAccessor.Visitorvisitor)
 
-
+
 static void
 MetaTableAccessor.scanMeta(Connectionconnection,
 byte[]startRow,
@@ -651,7 +655,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 Performs a scan of META table.
 
 
-
+
 static void
 MetaTableAccessor.scanMeta(Connectionconnection,
 byte[]startRow,
@@ -659,7 +663,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 MetaTableAccessor.QueryTypetype,
 MetaTableAccessor.Visitorvisitor)
 
-
+
 static void
 MetaTableAccessor.scanMeta(Connectionconnection,
 MetaTableAccessor.Visitorvisitor,
@@ -670,7 +674,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
  given row.
 
 
-
+
 static void
 MetaTableAccessor.scanMeta(Connectionconnection,
 TableNametable,
@@ -678,13 +682,13 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 intmaxRows,
 MetaTableAccessor.Visitorvisitor)
 
-
+
 static void
 MetaTableAccessor.scanMetaForTableRegions(Connectionconnection,
MetaTableAccessor.Visitorvisitor,
TableNametableName)
 
-
+
 static void
 MetaTableAccessor.splitRegion(Connectionconnection,
RegionInfoparent,
@@ -696,14 +700,14 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 Splits the region into two in an atomic operation.
 
 
-
+
 static boolean
 MetaTableAccessor.tableExists(Connectionconnection,
TableNametableName)
 Checks if the specified table exists.
 
 
-
+
 private static void
 MetaTableAccessor.updateLocation(Connectionconnection,
   RegionInforegionInfo,
@@ -713,7 +717,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 

[03/51] [partial] hbase-site git commit: Published site at 2912c953551bedbfbf30c32c156ed7bb187d54c3.

2018-04-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html
index 8302e28..c370eb9 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html
@@ -2113,3031 +2113,3033 @@
 2105
errors.reportError(ERROR_CODE.ORPHAN_TABLE_STATE,
 2106tableName + " unable to 
delete dangling table state " + tableState);
 2107  }
-2108} else {
-2109  
errors.reportError(ERROR_CODE.ORPHAN_TABLE_STATE,
-2110  tableName + " has dangling 
table state " + tableState);
-2111}
-2112  }
-2113}
-2114// check that all tables have 
states
-2115for (TableName tableName : 
tablesInfo.keySet()) {
-2116  if (isTableIncluded(tableName) 
 !tableStates.containsKey(tableName)) {
-2117if (fixMeta) {
-2118  
MetaTableAccessor.updateTableState(connection, tableName, 
TableState.State.ENABLED);
-2119  TableState newState = 
MetaTableAccessor.getTableState(connection, tableName);
-2120  if (newState == null) {
-2121
errors.reportError(ERROR_CODE.NO_TABLE_STATE,
-2122"Unable to change state 
for table " + tableName + " in meta ");
-2123  }
-2124} else {
-2125  
errors.reportError(ERROR_CODE.NO_TABLE_STATE,
-2126  tableName + " has no state 
in meta ");
-2127}
-2128  }
-2129}
-2130  }
-2131
-2132  private void preCheckPermission() 
throws IOException, AccessDeniedException {
-2133if 
(shouldIgnorePreCheckPermission()) {
-2134  return;
-2135}
-2136
-2137Path hbaseDir = 
FSUtils.getRootDir(getConf());
-2138FileSystem fs = 
hbaseDir.getFileSystem(getConf());
-2139UserProvider userProvider = 
UserProvider.instantiate(getConf());
-2140UserGroupInformation ugi = 
userProvider.getCurrent().getUGI();
-2141FileStatus[] files = 
fs.listStatus(hbaseDir);
-2142for (FileStatus file : files) {
-2143  try {
-2144FSUtils.checkAccess(ugi, file, 
FsAction.WRITE);
-2145  } catch (AccessDeniedException 
ace) {
-2146LOG.warn("Got 
AccessDeniedException when preCheckPermission ", ace);
-2147
errors.reportError(ERROR_CODE.WRONG_USAGE, "Current user " + 
ugi.getUserName()
-2148  + " does not have write perms 
to " + file.getPath()
-2149  + ". Please rerun hbck as hdfs 
user " + file.getOwner());
-2150throw ace;
-2151  }
-2152}
-2153  }
-2154
-2155  /**
-2156   * Deletes region from meta table
-2157   */
-2158  private void deleteMetaRegion(HbckInfo 
hi) throws IOException {
-2159
deleteMetaRegion(hi.metaEntry.getRegionName());
-2160  }
-2161
-2162  /**
-2163   * Deletes region from meta table
-2164   */
-2165  private void deleteMetaRegion(byte[] 
metaKey) throws IOException {
-2166Delete d = new Delete(metaKey);
-2167meta.delete(d);
-2168LOG.info("Deleted " + 
Bytes.toString(metaKey) + " from META" );
-2169  }
-2170
-2171  /**
-2172   * Reset the split parent region info 
in meta table
-2173   */
-2174  private void resetSplitParent(HbckInfo 
hi) throws IOException {
-2175RowMutations mutations = new 
RowMutations(hi.metaEntry.getRegionName());
-2176Delete d = new 
Delete(hi.metaEntry.getRegionName());
-2177
d.addColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER);
-2178
d.addColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER);
-2179mutations.add(d);
-2180
-2181RegionInfo hri = 
RegionInfoBuilder.newBuilder(hi.metaEntry)
-2182.setOffline(false)
-2183.setSplit(false)
-2184.build();
-2185Put p = 
MetaTableAccessor.makePutFromRegionInfo(hri, 
EnvironmentEdgeManager.currentTime());
-2186mutations.add(p);
-2187
-2188meta.mutateRow(mutations);
-2189LOG.info("Reset split parent " + 
hi.metaEntry.getRegionNameAsString() + " in META" );
-2190  }
-2191
-2192  /**
-2193   * This backwards-compatibility 
wrapper for permanently offlining a region
-2194   * that should not be alive.  If the 
region server does not support the
-2195   * "offline" method, it will use the 
closest unassign method instead.  This
-2196   * will basically work until one 
attempts to disable or delete the affected
-2197   * table.  The problem has to do with 
in-memory only master state, so
-2198   * restarting the HMaster or failing 
over to another should fix this.
-2199   */
-2200  private void offline(byte[] 
regionName) throws IOException {
-2201String regionString = 
Bytes.toStringBinary(regionName);
-2202if 

[18/51] [partial] hbase-site git commit: Published site at 2912c953551bedbfbf30c32c156ed7bb187d54c3.

2018-04-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html
index 8302e28..c370eb9 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html
@@ -2113,3031 +2113,3033 @@
 2105
errors.reportError(ERROR_CODE.ORPHAN_TABLE_STATE,
 2106tableName + " unable to 
delete dangling table state " + tableState);
 2107  }
-2108} else {
-2109  
errors.reportError(ERROR_CODE.ORPHAN_TABLE_STATE,
-2110  tableName + " has dangling 
table state " + tableState);
-2111}
-2112  }
-2113}
-2114// check that all tables have 
states
-2115for (TableName tableName : 
tablesInfo.keySet()) {
-2116  if (isTableIncluded(tableName) 
 !tableStates.containsKey(tableName)) {
-2117if (fixMeta) {
-2118  
MetaTableAccessor.updateTableState(connection, tableName, 
TableState.State.ENABLED);
-2119  TableState newState = 
MetaTableAccessor.getTableState(connection, tableName);
-2120  if (newState == null) {
-2121
errors.reportError(ERROR_CODE.NO_TABLE_STATE,
-2122"Unable to change state 
for table " + tableName + " in meta ");
-2123  }
-2124} else {
-2125  
errors.reportError(ERROR_CODE.NO_TABLE_STATE,
-2126  tableName + " has no state 
in meta ");
-2127}
-2128  }
-2129}
-2130  }
-2131
-2132  private void preCheckPermission() 
throws IOException, AccessDeniedException {
-2133if 
(shouldIgnorePreCheckPermission()) {
-2134  return;
-2135}
-2136
-2137Path hbaseDir = 
FSUtils.getRootDir(getConf());
-2138FileSystem fs = 
hbaseDir.getFileSystem(getConf());
-2139UserProvider userProvider = 
UserProvider.instantiate(getConf());
-2140UserGroupInformation ugi = 
userProvider.getCurrent().getUGI();
-2141FileStatus[] files = 
fs.listStatus(hbaseDir);
-2142for (FileStatus file : files) {
-2143  try {
-2144FSUtils.checkAccess(ugi, file, 
FsAction.WRITE);
-2145  } catch (AccessDeniedException 
ace) {
-2146LOG.warn("Got 
AccessDeniedException when preCheckPermission ", ace);
-2147
errors.reportError(ERROR_CODE.WRONG_USAGE, "Current user " + 
ugi.getUserName()
-2148  + " does not have write perms 
to " + file.getPath()
-2149  + ". Please rerun hbck as hdfs 
user " + file.getOwner());
-2150throw ace;
-2151  }
-2152}
-2153  }
-2154
-2155  /**
-2156   * Deletes region from meta table
-2157   */
-2158  private void deleteMetaRegion(HbckInfo 
hi) throws IOException {
-2159
deleteMetaRegion(hi.metaEntry.getRegionName());
-2160  }
-2161
-2162  /**
-2163   * Deletes region from meta table
-2164   */
-2165  private void deleteMetaRegion(byte[] 
metaKey) throws IOException {
-2166Delete d = new Delete(metaKey);
-2167meta.delete(d);
-2168LOG.info("Deleted " + 
Bytes.toString(metaKey) + " from META" );
-2169  }
-2170
-2171  /**
-2172   * Reset the split parent region info 
in meta table
-2173   */
-2174  private void resetSplitParent(HbckInfo 
hi) throws IOException {
-2175RowMutations mutations = new 
RowMutations(hi.metaEntry.getRegionName());
-2176Delete d = new 
Delete(hi.metaEntry.getRegionName());
-2177
d.addColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER);
-2178
d.addColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER);
-2179mutations.add(d);
-2180
-2181RegionInfo hri = 
RegionInfoBuilder.newBuilder(hi.metaEntry)
-2182.setOffline(false)
-2183.setSplit(false)
-2184.build();
-2185Put p = 
MetaTableAccessor.makePutFromRegionInfo(hri, 
EnvironmentEdgeManager.currentTime());
-2186mutations.add(p);
-2187
-2188meta.mutateRow(mutations);
-2189LOG.info("Reset split parent " + 
hi.metaEntry.getRegionNameAsString() + " in META" );
-2190  }
-2191
-2192  /**
-2193   * This backwards-compatibility 
wrapper for permanently offlining a region
-2194   * that should not be alive.  If the 
region server does not support the
-2195   * "offline" method, it will use the 
closest unassign method instead.  This
-2196   * will basically work until one 
attempts to disable or delete the affected
-2197   * table.  The problem has to do with 
in-memory only master state, so
-2198   * restarting the HMaster or failing 
over to another should fix this.
-2199   */
-2200  private void offline(byte[] 
regionName) throws IOException {
-2201String regionString = 
Bytes.toStringBinary(regionName);
-2202if 

[11/51] [partial] hbase-site git commit: Published site at 2912c953551bedbfbf30c32c156ed7bb187d54c3.

2018-04-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.OnlineEntry.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.OnlineEntry.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.OnlineEntry.html
index 8302e28..c370eb9 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.OnlineEntry.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.OnlineEntry.html
@@ -2113,3031 +2113,3033 @@
 2105
errors.reportError(ERROR_CODE.ORPHAN_TABLE_STATE,
 2106tableName + " unable to 
delete dangling table state " + tableState);
 2107  }
-2108} else {
-2109  
errors.reportError(ERROR_CODE.ORPHAN_TABLE_STATE,
-2110  tableName + " has dangling 
table state " + tableState);
-2111}
-2112  }
-2113}
-2114// check that all tables have 
states
-2115for (TableName tableName : 
tablesInfo.keySet()) {
-2116  if (isTableIncluded(tableName) 
 !tableStates.containsKey(tableName)) {
-2117if (fixMeta) {
-2118  
MetaTableAccessor.updateTableState(connection, tableName, 
TableState.State.ENABLED);
-2119  TableState newState = 
MetaTableAccessor.getTableState(connection, tableName);
-2120  if (newState == null) {
-2121
errors.reportError(ERROR_CODE.NO_TABLE_STATE,
-2122"Unable to change state 
for table " + tableName + " in meta ");
-2123  }
-2124} else {
-2125  
errors.reportError(ERROR_CODE.NO_TABLE_STATE,
-2126  tableName + " has no state 
in meta ");
-2127}
-2128  }
-2129}
-2130  }
-2131
-2132  private void preCheckPermission() 
throws IOException, AccessDeniedException {
-2133if 
(shouldIgnorePreCheckPermission()) {
-2134  return;
-2135}
-2136
-2137Path hbaseDir = 
FSUtils.getRootDir(getConf());
-2138FileSystem fs = 
hbaseDir.getFileSystem(getConf());
-2139UserProvider userProvider = 
UserProvider.instantiate(getConf());
-2140UserGroupInformation ugi = 
userProvider.getCurrent().getUGI();
-2141FileStatus[] files = 
fs.listStatus(hbaseDir);
-2142for (FileStatus file : files) {
-2143  try {
-2144FSUtils.checkAccess(ugi, file, 
FsAction.WRITE);
-2145  } catch (AccessDeniedException 
ace) {
-2146LOG.warn("Got 
AccessDeniedException when preCheckPermission ", ace);
-2147
errors.reportError(ERROR_CODE.WRONG_USAGE, "Current user " + 
ugi.getUserName()
-2148  + " does not have write perms 
to " + file.getPath()
-2149  + ". Please rerun hbck as hdfs 
user " + file.getOwner());
-2150throw ace;
-2151  }
-2152}
-2153  }
-2154
-2155  /**
-2156   * Deletes region from meta table
-2157   */
-2158  private void deleteMetaRegion(HbckInfo 
hi) throws IOException {
-2159
deleteMetaRegion(hi.metaEntry.getRegionName());
-2160  }
-2161
-2162  /**
-2163   * Deletes region from meta table
-2164   */
-2165  private void deleteMetaRegion(byte[] 
metaKey) throws IOException {
-2166Delete d = new Delete(metaKey);
-2167meta.delete(d);
-2168LOG.info("Deleted " + 
Bytes.toString(metaKey) + " from META" );
-2169  }
-2170
-2171  /**
-2172   * Reset the split parent region info 
in meta table
-2173   */
-2174  private void resetSplitParent(HbckInfo 
hi) throws IOException {
-2175RowMutations mutations = new 
RowMutations(hi.metaEntry.getRegionName());
-2176Delete d = new 
Delete(hi.metaEntry.getRegionName());
-2177
d.addColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER);
-2178
d.addColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER);
-2179mutations.add(d);
-2180
-2181RegionInfo hri = 
RegionInfoBuilder.newBuilder(hi.metaEntry)
-2182.setOffline(false)
-2183.setSplit(false)
-2184.build();
-2185Put p = 
MetaTableAccessor.makePutFromRegionInfo(hri, 
EnvironmentEdgeManager.currentTime());
-2186mutations.add(p);
-2187
-2188meta.mutateRow(mutations);
-2189LOG.info("Reset split parent " + 
hi.metaEntry.getRegionNameAsString() + " in META" );
-2190  }
-2191
-2192  /**
-2193   * This backwards-compatibility 
wrapper for permanently offlining a region
-2194   * that should not be alive.  If the 
region server does not support the
-2195   * "offline" method, it will use the 
closest unassign method instead.  This
-2196   * will basically work until one 
attempts to disable or delete the affected
-2197   * table.  The problem has to do with 
in-memory only master state, so
-2198   * restarting the HMaster or failing 
over to another should fix this.
-2199   */
-2200  private void offline(byte[] 
regionName) throws IOException {
-2201String regionString = 
Bytes.toStringBinary(regionName);
-2202if (!rsSupportsOffline) {
-2203  LOG.warn("Using unassign region 

[27/51] [partial] hbase-site git commit: Published site at 2912c953551bedbfbf30c32c156ed7bb187d54c3.

2018-04-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/RowCounter.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/RowCounter.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/RowCounter.html
index 56ec3ac..8f4cacb 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/RowCounter.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/RowCounter.html
@@ -229,9 +229,9 @@
 221   * Note that we don't document 
--expected-count, because it's intended for test.
 222   */
 223  private static void printUsage() {
-224System.err.println("Usage: RowCounter 
[options] tablename " +
-225"[--starttime=[start] 
--endtime=[end] " +
-226
"[--range=[startKey],[endKey][;[startKey],[endKey]...]] [column1 
column2...]");
+224System.err.println("Usage: hbase 
rowcounter [options] tablename "
+225+ "[--starttime=start 
--endtime=end] "
+226+ 
"[--range=[startKey],[endKey][;[startKey],[endKey]...]] [column1 
column2...]");
 227System.err.println("For performance 
consider the following options:\n"
 228+ 
"-Dhbase.client.scanner.caching=100\n"
 229+ 
"-Dmapreduce.map.speculative=false");

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/WALPlayer.WALKeyValueMapper.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/WALPlayer.WALKeyValueMapper.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/WALPlayer.WALKeyValueMapper.html
index a5477ac..9501e97 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/WALPlayer.WALKeyValueMapper.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/WALPlayer.WALKeyValueMapper.html
@@ -357,46 +357,51 @@
 349  System.err.println("ERROR: " + 
errorMsg);
 350}
 351System.err.println("Usage: " + NAME + 
" [options] wal inputdir tables [tableMappings]");
-352System.err.println("Read all WAL 
entries for tables.");
-353System.err.println("If no tables 
(\"\") are specific, all tables are imported.");
-354System.err.println("(Careful, even 
hbase:meta entries will be imported" + " in that case.)");
-355System.err.println("Otherwise 
tables is a comma separated list of tables.\n");
-356System.err.println("The WAL entries 
can be mapped to new set of tables via tableMapping.");
-357
System.err.println("tableMapping is a command separated list of 
targettables.");
+352System.err.println("Replay all WAL 
files into HBase.");
+353System.err.println("tables is 
a comma separated list of tables.");
+354System.err.println("If no tables 
(\"\") are specified, all tables are imported.");
+355System.err.println("(Be careful, 
hbase:meta entries will be imported in this case.)\n");
+356System.err.println("WAL entries can 
be mapped to new set of tables via tableMappings.");
+357
System.err.println("tableMappings is a comma separated list of target 
tables.");
 358System.err.println("If specified, 
each table in tables must have a mapping.\n");
 359System.err.println("By default " + 
NAME + " will load data directly into HBase.");
-360System.err.println("To generate 
HFiles for a bulk data load instead, pass the option:");
+360System.err.println("To generate 
HFiles for a bulk data load instead, pass the following option:");
 361System.err.println("  -D" + 
BULK_OUTPUT_CONF_KEY + "=/path/for/output");
 362System.err.println("  (Only one table 
can be specified, and no mapping is allowed!)");
-363System.err.println("Other options: 
(specify time range to WAL edit to consider)");
+363System.err.println("Time range 
options:");
 364System.err.println("  -D" + 
WALInputFormat.START_TIME_KEY + "=[date|ms]");
 365System.err.println("  -D" + 
WALInputFormat.END_TIME_KEY + "=[date|ms]");
-366System.err.println("   -D " + 
JOB_NAME_CONF_KEY
-367+ "=jobName - use the specified 
mapreduce job name for the wal player");
-368System.err.println("For performance 
also consider the following options:\n"
-369+ "  
-Dmapreduce.map.speculative=false\n" + "  
-Dmapreduce.reduce.speculative=false");
-370  }
-371
-372  /**
-373   * Main entry point.
-374   * @param args The command line 
parameters.
-375   * @throws Exception When running the 
job fails.
-376   */
-377  public static void main(String[] args) 
throws Exception {
-378int ret = ToolRunner.run(new 
WALPlayer(HBaseConfiguration.create()), args);
-379System.exit(ret);
-380  }
-381
-382  @Override
-383  public int run(String[] args) throws 
Exception {
-384if (args.length  2) {
-385  usage("Wrong number of arguments: " 
+ args.length);
-386  System.exit(-1);
-387}
-388Job job = 

[09/51] [partial] hbase-site git commit: Published site at 2912c953551bedbfbf30c32c156ed7bb187d54c3.

2018-04-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html
index 8302e28..c370eb9 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html
@@ -2113,3031 +2113,3033 @@
 2105
errors.reportError(ERROR_CODE.ORPHAN_TABLE_STATE,
 2106tableName + " unable to 
delete dangling table state " + tableState);
 2107  }
-2108} else {
-2109  
errors.reportError(ERROR_CODE.ORPHAN_TABLE_STATE,
-2110  tableName + " has dangling 
table state " + tableState);
-2111}
-2112  }
-2113}
-2114// check that all tables have 
states
-2115for (TableName tableName : 
tablesInfo.keySet()) {
-2116  if (isTableIncluded(tableName) 
 !tableStates.containsKey(tableName)) {
-2117if (fixMeta) {
-2118  
MetaTableAccessor.updateTableState(connection, tableName, 
TableState.State.ENABLED);
-2119  TableState newState = 
MetaTableAccessor.getTableState(connection, tableName);
-2120  if (newState == null) {
-2121
errors.reportError(ERROR_CODE.NO_TABLE_STATE,
-2122"Unable to change state 
for table " + tableName + " in meta ");
-2123  }
-2124} else {
-2125  
errors.reportError(ERROR_CODE.NO_TABLE_STATE,
-2126  tableName + " has no state 
in meta ");
-2127}
-2128  }
-2129}
-2130  }
-2131
-2132  private void preCheckPermission() 
throws IOException, AccessDeniedException {
-2133if 
(shouldIgnorePreCheckPermission()) {
-2134  return;
-2135}
-2136
-2137Path hbaseDir = 
FSUtils.getRootDir(getConf());
-2138FileSystem fs = 
hbaseDir.getFileSystem(getConf());
-2139UserProvider userProvider = 
UserProvider.instantiate(getConf());
-2140UserGroupInformation ugi = 
userProvider.getCurrent().getUGI();
-2141FileStatus[] files = 
fs.listStatus(hbaseDir);
-2142for (FileStatus file : files) {
-2143  try {
-2144FSUtils.checkAccess(ugi, file, 
FsAction.WRITE);
-2145  } catch (AccessDeniedException 
ace) {
-2146LOG.warn("Got 
AccessDeniedException when preCheckPermission ", ace);
-2147
errors.reportError(ERROR_CODE.WRONG_USAGE, "Current user " + 
ugi.getUserName()
-2148  + " does not have write perms 
to " + file.getPath()
-2149  + ". Please rerun hbck as hdfs 
user " + file.getOwner());
-2150throw ace;
-2151  }
-2152}
-2153  }
-2154
-2155  /**
-2156   * Deletes region from meta table
-2157   */
-2158  private void deleteMetaRegion(HbckInfo 
hi) throws IOException {
-2159
deleteMetaRegion(hi.metaEntry.getRegionName());
-2160  }
-2161
-2162  /**
-2163   * Deletes region from meta table
-2164   */
-2165  private void deleteMetaRegion(byte[] 
metaKey) throws IOException {
-2166Delete d = new Delete(metaKey);
-2167meta.delete(d);
-2168LOG.info("Deleted " + 
Bytes.toString(metaKey) + " from META" );
-2169  }
-2170
-2171  /**
-2172   * Reset the split parent region info 
in meta table
-2173   */
-2174  private void resetSplitParent(HbckInfo 
hi) throws IOException {
-2175RowMutations mutations = new 
RowMutations(hi.metaEntry.getRegionName());
-2176Delete d = new 
Delete(hi.metaEntry.getRegionName());
-2177
d.addColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER);
-2178
d.addColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER);
-2179mutations.add(d);
-2180
-2181RegionInfo hri = 
RegionInfoBuilder.newBuilder(hi.metaEntry)
-2182.setOffline(false)
-2183.setSplit(false)
-2184.build();
-2185Put p = 
MetaTableAccessor.makePutFromRegionInfo(hri, 
EnvironmentEdgeManager.currentTime());
-2186mutations.add(p);
-2187
-2188meta.mutateRow(mutations);
-2189LOG.info("Reset split parent " + 
hi.metaEntry.getRegionNameAsString() + " in META" );
-2190  }
-2191
-2192  /**
-2193   * This backwards-compatibility 
wrapper for permanently offlining a region
-2194   * that should not be alive.  If the 
region server does not support the
-2195   * "offline" method, it will use the 
closest unassign method instead.  This
-2196   * will basically work until one 
attempts to disable or delete the affected
-2197   * table.  The problem has to do with 
in-memory only master state, so
-2198   * restarting the HMaster or failing 
over to another should fix this.
-2199   */
-2200  private void offline(byte[] 
regionName) throws IOException {
-2201String regionString = 

[41/51] [partial] hbase-site git commit: Published site at 2912c953551bedbfbf30c32c156ed7bb187d54c3.

2018-04-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/devapidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html 
b/devapidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html
index f481b56..8618b6c 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":6,"i1":6,"i2":18,"i3":6,"i4":6,"i5":6,"i6":18,"i7":6,"i8":6,"i9":6,"i10":6,"i11":6,"i12":6,"i13":6,"i14":6,"i15":18,"i16":18,"i17":6,"i18":6,"i19":6,"i20":6,"i21":6,"i22":6,"i23":6,"i24":6,"i25":6,"i26":6,"i27":6,"i28":6,"i29":6,"i30":6,"i31":6,"i32":6,"i33":6,"i34":6,"i35":6,"i36":6,"i37":6,"i38":6,"i39":6,"i40":6,"i41":6,"i42":6,"i43":6,"i44":6,"i45":6,"i46":6,"i47":6,"i48":18,"i49":6,"i50":6,"i51":18,"i52":6,"i53":6,"i54":6,"i55":6,"i56":6,"i57":6,"i58":18,"i59":18,"i60":18,"i61":6,"i62":6,"i63":6,"i64":6,"i65":6,"i66":6,"i67":6,"i68":18,"i69":6,"i70":6,"i71":6,"i72":6,"i73":6,"i74":6,"i75":6,"i76":6,"i77":6,"i78":6,"i79":6,"i80":6,"i81":6,"i82":6,"i83":6,"i84":18,"i85":6,"i86":6,"i87":6,"i88":6,"i89":6,"i90":6,"i91":6,"i92":18,"i93":6,"i94":6,"i95":6,"i96":18,"i97":6,"i98":6,"i99":6,"i100":6,"i101":6,"i102":18,"i103":18,"i104":6,"i105":6,"i106":6,"i107":6,"i108":6,"i109":6,"i110":6,"i111":6,"i112":6,"i113":6,"i114":6,"i115":6,"i116":6,"i117":6,"i118":6,"i119"
 
:6,"i120":6,"i121":6,"i122":6,"i123":6,"i124":6,"i125":6,"i126":6,"i127":6,"i128":6,"i129":6,"i130":18,"i131":18,"i132":6,"i133":6,"i134":6,"i135":6,"i136":6,"i137":6,"i138":6,"i139":6,"i140":6,"i141":6,"i142":6,"i143":6,"i144":6};
+var methods = 
{"i0":6,"i1":6,"i2":18,"i3":6,"i4":6,"i5":6,"i6":18,"i7":6,"i8":6,"i9":6,"i10":6,"i11":6,"i12":6,"i13":6,"i14":6,"i15":6,"i16":18,"i17":18,"i18":6,"i19":6,"i20":6,"i21":6,"i22":6,"i23":6,"i24":6,"i25":6,"i26":6,"i27":6,"i28":6,"i29":6,"i30":6,"i31":6,"i32":6,"i33":6,"i34":6,"i35":6,"i36":6,"i37":6,"i38":6,"i39":6,"i40":6,"i41":6,"i42":6,"i43":6,"i44":6,"i45":6,"i46":6,"i47":6,"i48":6,"i49":18,"i50":6,"i51":6,"i52":18,"i53":6,"i54":6,"i55":6,"i56":6,"i57":6,"i58":6,"i59":18,"i60":18,"i61":18,"i62":6,"i63":6,"i64":6,"i65":6,"i66":6,"i67":6,"i68":6,"i69":18,"i70":6,"i71":6,"i72":6,"i73":6,"i74":6,"i75":6,"i76":6,"i77":6,"i78":6,"i79":6,"i80":6,"i81":6,"i82":6,"i83":6,"i84":6,"i85":18,"i86":6,"i87":6,"i88":6,"i89":6,"i90":6,"i91":6,"i92":6,"i93":18,"i94":6,"i95":6,"i96":6,"i97":18,"i98":6,"i99":6,"i100":6,"i101":6,"i102":6,"i103":18,"i104":18,"i105":6,"i106":6,"i107":6,"i108":6,"i109":6,"i110":6,"i111":6,"i112":6,"i113":6,"i114":6,"i115":6,"i116":6,"i117":6,"i118":6,"i119"
 
:6,"i120":6,"i121":6,"i122":6,"i123":6,"i124":6,"i125":6,"i126":6,"i127":6,"i128":6,"i129":6,"i130":6,"i131":18,"i132":18,"i133":6,"i134":6,"i135":6,"i136":6,"i137":6,"i138":6,"i139":6,"i140":6,"i141":6,"i142":6,"i143":6,"i144":6,"i145":6};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],4:["t3","Abstract Methods"],16:["t5","Default Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -227,19 +227,27 @@ public interface 
+https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
+cloneTableSchema(TableNametableName,
+TableNamenewTableName,
+booleanpreserveSplits)
+Create a new table by cloning the existent table 
schema.
+
+
+
 default https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
 compact(TableNametableName)
 Compact a table.
 
 
-
+
 default https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
 compact(TableNametableName,
byte[]columnFamily)
 Compact a column family within a table.
 
 
-
+
 https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
 compact(TableNametableName,
byte[]columnFamily,
@@ -247,40 +255,40 @@ public interface Compact a column family within a table.
 
 
-
+
 

[32/51] [partial] hbase-site git commit: Published site at 2912c953551bedbfbf30c32c156ed7bb187d54c3.

2018-04-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.html 
b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.html
index 9573c5f..392306b 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.html
@@ -2068,7 +2068,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 
 
 cmp
-static finalhttps://docs.oracle.com/javase/8/docs/api/java/util/Comparator.html?is-external=true;
 title="class or interface in java.util">ComparatorHBaseFsck.HbckInfo cmp
+static finalhttps://docs.oracle.com/javase/8/docs/api/java/util/Comparator.html?is-external=true;
 title="class or interface in java.util">ComparatorHBaseFsck.HbckInfo cmp
 
 
 
@@ -2979,7 +2979,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 
 
 preCheckPermission
-privatevoidpreCheckPermission()
+privatevoidpreCheckPermission()
  throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException,
 AccessDeniedException
 
@@ -2995,7 +2995,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 
 
 deleteMetaRegion
-privatevoiddeleteMetaRegion(HBaseFsck.HbckInfohi)
+privatevoiddeleteMetaRegion(HBaseFsck.HbckInfohi)
throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Deletes region from meta table
 
@@ -3010,7 +3010,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 
 
 deleteMetaRegion
-privatevoiddeleteMetaRegion(byte[]metaKey)
+privatevoiddeleteMetaRegion(byte[]metaKey)
throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Deletes region from meta table
 
@@ -3025,7 +3025,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 
 
 resetSplitParent
-privatevoidresetSplitParent(HBaseFsck.HbckInfohi)
+privatevoidresetSplitParent(HBaseFsck.HbckInfohi)
throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Reset the split parent region info in meta table
 
@@ -3040,7 +3040,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 
 
 offline
-privatevoidoffline(byte[]regionName)
+privatevoidoffline(byte[]regionName)
   throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 This backwards-compatibility wrapper for permanently 
offlining a region
  that should not be alive.  If the region server does not support the
@@ -3060,7 +3060,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 
 
 undeployRegions
-privatevoidundeployRegions(HBaseFsck.HbckInfohi)
+privatevoidundeployRegions(HBaseFsck.HbckInfohi)
   throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException,
  https://docs.oracle.com/javase/8/docs/api/java/lang/InterruptedException.html?is-external=true;
 title="class or interface in java.lang">InterruptedException
 
@@ -3076,7 +3076,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 
 
 undeployRegionsForHbi
-privatevoidundeployRegionsForHbi(HBaseFsck.HbckInfohi)
+privatevoidundeployRegionsForHbi(HBaseFsck.HbckInfohi)
 throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException,
https://docs.oracle.com/javase/8/docs/api/java/lang/InterruptedException.html?is-external=true;
 title="class or interface in java.lang">InterruptedException
 
@@ -3092,7 +3092,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 
 
 closeRegion
-privatevoidcloseRegion(HBaseFsck.HbckInfohi)
+privatevoidcloseRegion(HBaseFsck.HbckInfohi)
   throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException,
  https://docs.oracle.com/javase/8/docs/api/java/lang/InterruptedException.html?is-external=true;
 title="class or interface in java.lang">InterruptedException
 Attempts to undeploy a region from a region server based in 
information in
@@ -3118,7 +3118,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 
 
 

[23/51] [partial] hbase-site git commit: Published site at 2912c953551bedbfbf30c32c156ed7bb187d54c3.

2018-04-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.SyncMetrics.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.SyncMetrics.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.SyncMetrics.html
index e6e43ee..a8b77ae 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.SyncMetrics.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.SyncMetrics.html
@@ -367,650 +367,650 @@
 359lock.lock();
 360try {
 361  LOG.trace("Starting WAL Procedure 
Store lease recovery");
-362  FileStatus[] oldLogs = 
getLogFiles();
-363  while (isRunning()) {
+362  while (isRunning()) {
+363FileStatus[] oldLogs = 
getLogFiles();
 364// Get Log-MaxID and recover 
lease on old logs
 365try {
 366  flushLogId = 
initOldLogs(oldLogs);
 367} catch (FileNotFoundException e) 
{
 368  LOG.warn("Someone else is 
active and deleted logs. retrying.", e);
-369  oldLogs = getLogFiles();
-370  continue;
-371}
-372
-373// Create new state-log
-374if (!rollWriter(flushLogId + 1)) 
{
-375  // someone else has already 
created this log
-376  LOG.debug("Someone else has 
already created log " + flushLogId);
-377  continue;
-378}
-379
-380// We have the lease on the log
-381oldLogs = getLogFiles();
-382if (getMaxLogId(oldLogs)  
flushLogId) {
-383  if (LOG.isDebugEnabled()) {
-384LOG.debug("Someone else 
created new logs. Expected maxLogId  " + flushLogId);
-385  }
-386  
logs.getLast().removeFile(this.walArchiveDir);
-387  continue;
-388}
-389
-390LOG.trace("Lease acquired for 
flushLogId={}", flushLogId);
-391break;
-392  }
-393} finally {
-394  lock.unlock();
-395}
-396  }
-397
-398  @Override
-399  public void load(final ProcedureLoader 
loader) throws IOException {
-400lock.lock();
-401try {
-402  if (logs.isEmpty()) {
-403throw new 
RuntimeException("recoverLease() must be called before loading data");
-404  }
-405
-406  // Nothing to do, If we have only 
the current log.
-407  if (logs.size() == 1) {
-408LOG.trace("No state logs to 
replay.");
-409loader.setMaxProcId(0);
-410return;
-411  }
-412
-413  // Load the old logs
-414  final 
IteratorProcedureWALFile it = logs.descendingIterator();
-415  it.next(); // Skip the current 
log
-416
-417  ProcedureWALFormat.load(it, 
storeTracker, new ProcedureWALFormat.Loader() {
-418@Override
-419public void setMaxProcId(long 
maxProcId) {
-420  
loader.setMaxProcId(maxProcId);
-421}
-422
-423@Override
-424public void 
load(ProcedureIterator procIter) throws IOException {
-425  loader.load(procIter);
-426}
-427
-428@Override
-429public void 
handleCorrupted(ProcedureIterator procIter) throws IOException {
-430  
loader.handleCorrupted(procIter);
-431}
-432
-433@Override
-434public void 
markCorruptedWAL(ProcedureWALFile log, IOException e) {
-435  if (corruptedLogs == null) {
-436corruptedLogs = new 
HashSet();
-437  }
-438  corruptedLogs.add(log);
-439  // TODO: sideline corrupted 
log
-440}
-441  });
-442} finally {
-443  try {
-444// try to cleanup inactive wals 
and complete the operation
-445buildHoldingCleanupTracker();
-446tryCleanupLogsOnLoad();
-447loading.set(false);
-448  } finally {
-449lock.unlock();
-450  }
-451}
-452  }
-453
-454  private void tryCleanupLogsOnLoad() {
-455// nothing to cleanup.
-456if (logs.size() = 1) return;
-457
-458// the config says to not cleanup 
wals on load.
-459if 
(!conf.getBoolean(EXEC_WAL_CLEANUP_ON_LOAD_CONF_KEY,
-460  
DEFAULT_EXEC_WAL_CLEANUP_ON_LOAD_CONF_KEY)) {
-461  LOG.debug("WALs cleanup on load is 
not enabled: " + getActiveLogs());
-462  return;
-463}
-464
-465try {
-466  periodicRoll();
-467} catch (IOException e) {
-468  LOG.warn("Unable to cleanup logs on 
load: " + e.getMessage(), e);
-469}
-470  }
-471
-472  @Override
-473  public void insert(final Procedure 
proc, final Procedure[] subprocs) {
-474if (LOG.isTraceEnabled()) {
-475  LOG.trace("Insert " + proc + ", 
subproc=" + Arrays.toString(subprocs));
-476}
-477
-478ByteSlot slot = acquireSlot();
-479try {
-480  // Serialize the insert
-481  long[] subProcIds = null;
-482  if (subprocs != null) {
-483

[20/51] [partial] hbase-site git commit: Published site at 2912c953551bedbfbf30c32c156ed7bb187d54c3.

2018-04-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/devapidocs/src-html/org/apache/hadoop/hbase/thrift2/ThriftServer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/thrift2/ThriftServer.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/thrift2/ThriftServer.html
index 1c73421..783dc34 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/thrift2/ThriftServer.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/thrift2/ThriftServer.html
@@ -31,115 +31,115 @@
 023import java.net.InetSocketAddress;
 024import java.net.UnknownHostException;
 025import java.security.PrivilegedAction;
-026import java.util.List;
-027import java.util.Map;
-028import 
java.util.concurrent.ExecutorService;
-029import 
java.util.concurrent.LinkedBlockingQueue;
-030import 
java.util.concurrent.SynchronousQueue;
-031import 
java.util.concurrent.ThreadPoolExecutor;
-032import java.util.concurrent.TimeUnit;
-033
-034import 
javax.security.auth.callback.Callback;
-035import 
javax.security.auth.callback.UnsupportedCallbackException;
-036import 
javax.security.sasl.AuthorizeCallback;
-037import javax.security.sasl.SaslServer;
-038
-039import 
org.apache.hadoop.conf.Configuration;
-040import 
org.apache.hadoop.conf.Configured;
-041import 
org.apache.hadoop.hbase.HBaseConfiguration;
-042import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
-043import 
org.apache.hadoop.hbase.filter.ParseFilter;
-044import 
org.apache.hadoop.hbase.http.InfoServer;
-045import 
org.apache.hadoop.hbase.security.SaslUtil;
-046import 
org.apache.hadoop.hbase.security.SecurityUtil;
-047import 
org.apache.hadoop.hbase.security.UserProvider;
-048import 
org.apache.hadoop.hbase.thrift.CallQueue;
-049import 
org.apache.hadoop.hbase.thrift.THBaseThreadPoolExecutor;
-050import 
org.apache.hadoop.hbase.thrift.ThriftMetrics;
-051import 
org.apache.hadoop.hbase.thrift2.generated.THBaseService;
-052import 
org.apache.hadoop.hbase.util.DNS;
-053import 
org.apache.hadoop.hbase.util.JvmPauseMonitor;
-054import 
org.apache.hadoop.hbase.util.Strings;
-055import 
org.apache.hadoop.security.SaslRpcServer.SaslGssCallbackHandler;
-056import 
org.apache.hadoop.security.UserGroupInformation;
-057import org.apache.hadoop.util.Tool;
-058import 
org.apache.hadoop.util.ToolRunner;
-059import org.apache.thrift.TException;
-060import org.apache.thrift.TProcessor;
-061import 
org.apache.thrift.protocol.TBinaryProtocol;
-062import 
org.apache.thrift.protocol.TCompactProtocol;
-063import 
org.apache.thrift.protocol.TProtocol;
-064import 
org.apache.thrift.protocol.TProtocolFactory;
-065import 
org.apache.thrift.server.THsHaServer;
-066import 
org.apache.thrift.server.TNonblockingServer;
-067import 
org.apache.thrift.server.TServer;
-068import 
org.apache.thrift.server.TThreadPoolServer;
-069import 
org.apache.thrift.server.TThreadedSelectorServer;
-070import 
org.apache.thrift.transport.TFramedTransport;
-071import 
org.apache.thrift.transport.TNonblockingServerSocket;
-072import 
org.apache.thrift.transport.TNonblockingServerTransport;
-073import 
org.apache.thrift.transport.TSaslServerTransport;
-074import 
org.apache.thrift.transport.TServerSocket;
-075import 
org.apache.thrift.transport.TServerTransport;
-076import 
org.apache.thrift.transport.TTransportException;
-077import 
org.apache.thrift.transport.TTransportFactory;
-078import 
org.apache.yetus.audience.InterfaceAudience;
-079import org.slf4j.Logger;
-080import org.slf4j.LoggerFactory;
-081import 
org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
-082import 
org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine;
-083import 
org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLineParser;
+026import java.util.Map;
+027import 
java.util.concurrent.ExecutorService;
+028import 
java.util.concurrent.LinkedBlockingQueue;
+029import 
java.util.concurrent.SynchronousQueue;
+030import 
java.util.concurrent.ThreadPoolExecutor;
+031import java.util.concurrent.TimeUnit;
+032
+033import 
javax.security.auth.callback.Callback;
+034import 
javax.security.auth.callback.UnsupportedCallbackException;
+035import 
javax.security.sasl.AuthorizeCallback;
+036import javax.security.sasl.SaslServer;
+037
+038import 
org.apache.hadoop.conf.Configuration;
+039import 
org.apache.hadoop.conf.Configured;
+040import 
org.apache.hadoop.hbase.HBaseConfiguration;
+041import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
+042import 
org.apache.hadoop.hbase.filter.ParseFilter;
+043import 
org.apache.hadoop.hbase.http.InfoServer;
+044import 
org.apache.hadoop.hbase.security.SaslUtil;
+045import 
org.apache.hadoop.hbase.security.SecurityUtil;
+046import 
org.apache.hadoop.hbase.security.UserProvider;
+047import 
org.apache.hadoop.hbase.thrift.CallQueue;
+048import 
org.apache.hadoop.hbase.thrift.THBaseThreadPoolExecutor;
+049import 
org.apache.hadoop.hbase.thrift.ThriftMetrics;
+050import 

[48/51] [partial] hbase-site git commit: Published site at 2912c953551bedbfbf30c32c156ed7bb187d54c3.

2018-04-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/apidocs/org/apache/hadoop/hbase/client/Admin.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/client/Admin.html 
b/apidocs/org/apache/hadoop/hbase/client/Admin.html
index 39252f9..1eb1565 100644
--- a/apidocs/org/apache/hadoop/hbase/client/Admin.html
+++ b/apidocs/org/apache/hadoop/hbase/client/Admin.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":6,"i1":6,"i2":6,"i3":50,"i4":6,"i5":6,"i6":18,"i7":6,"i8":18,"i9":6,"i10":6,"i11":6,"i12":6,"i13":6,"i14":50,"i15":50,"i16":6,"i17":6,"i18":6,"i19":6,"i20":6,"i21":6,"i22":6,"i23":6,"i24":6,"i25":6,"i26":6,"i27":38,"i28":38,"i29":38,"i30":38,"i31":6,"i32":6,"i33":6,"i34":6,"i35":6,"i36":6,"i37":6,"i38":50,"i39":6,"i40":6,"i41":6,"i42":6,"i43":6,"i44":6,"i45":6,"i46":6,"i47":6,"i48":38,"i49":6,"i50":6,"i51":6,"i52":6,"i53":6,"i54":6,"i55":6,"i56":38,"i57":6,"i58":6,"i59":38,"i60":38,"i61":6,"i62":38,"i63":6,"i64":6,"i65":6,"i66":6,"i67":6,"i68":38,"i69":38,"i70":50,"i71":6,"i72":6,"i73":6,"i74":6,"i75":6,"i76":38,"i77":38,"i78":6,"i79":50,"i80":6,"i81":6,"i82":6,"i83":6,"i84":38,"i85":38,"i86":18,"i87":18,"i88":6,"i89":50,"i90":6,"i91":6,"i92":6,"i93":6,"i94":6,"i95":6,"i96":6,"i97":6,"i98":6,"i99":18,"i100":18,"i101":50,"i102":18,"i103":6,"i104":38,"i105":6,"i106":6,"i107":6,"i108":38,"i109":18,"i110":6,"i111":6,"i112":6,"i113":18,"i114":6,"i115":6,"i116":38,"i11
 
7":38,"i118":38,"i119":38,"i120":6,"i121":6,"i122":6,"i123":6,"i124":6,"i125":6,"i126":6,"i127":6,"i128":6,"i129":6,"i130":50,"i131":6,"i132":38,"i133":6,"i134":6,"i135":18,"i136":6,"i137":6,"i138":6,"i139":6,"i140":6,"i141":6,"i142":6,"i143":38,"i144":6,"i145":6,"i146":6,"i147":6,"i148":6,"i149":38,"i150":6,"i151":6,"i152":6,"i153":38,"i154":38,"i155":6,"i156":38,"i157":38,"i158":38,"i159":38,"i160":38,"i161":6,"i162":38,"i163":6,"i164":6,"i165":6,"i166":6,"i167":6,"i168":6,"i169":6,"i170":38,"i171":6,"i172":6,"i173":6,"i174":50,"i175":6,"i176":6,"i177":6,"i178":6,"i179":6,"i180":38,"i181":6,"i182":38,"i183":6,"i184":6,"i185":6,"i186":6,"i187":6,"i188":6,"i189":6,"i190":6,"i191":6,"i192":6,"i193":6,"i194":6,"i195":6,"i196":6,"i197":6,"i198":6,"i199":50,"i200":6,"i201":50,"i202":50,"i203":50,"i204":6,"i205":50,"i206":6,"i207":6,"i208":6,"i209":6,"i210":6,"i211":6,"i212":6,"i213":6,"i214":38,"i215":38,"i216":6,"i217":6,"i218":6,"i219":6,"i220":6,"i221":50,"i222":6,"i223":6,"i224":6,"
 i225":6,"i226":6,"i227":6,"i228":6};
+var methods = 
{"i0":6,"i1":6,"i2":6,"i3":50,"i4":6,"i5":6,"i6":18,"i7":6,"i8":18,"i9":6,"i10":6,"i11":6,"i12":6,"i13":6,"i14":50,"i15":50,"i16":6,"i17":6,"i18":6,"i19":6,"i20":6,"i21":6,"i22":6,"i23":6,"i24":6,"i25":6,"i26":6,"i27":6,"i28":38,"i29":38,"i30":38,"i31":38,"i32":6,"i33":6,"i34":6,"i35":6,"i36":6,"i37":6,"i38":6,"i39":50,"i40":6,"i41":6,"i42":6,"i43":6,"i44":6,"i45":6,"i46":6,"i47":6,"i48":6,"i49":38,"i50":6,"i51":6,"i52":6,"i53":6,"i54":6,"i55":6,"i56":6,"i57":38,"i58":6,"i59":6,"i60":38,"i61":38,"i62":6,"i63":38,"i64":6,"i65":6,"i66":6,"i67":6,"i68":6,"i69":38,"i70":38,"i71":50,"i72":6,"i73":6,"i74":6,"i75":6,"i76":6,"i77":38,"i78":38,"i79":6,"i80":50,"i81":6,"i82":6,"i83":6,"i84":6,"i85":38,"i86":38,"i87":18,"i88":18,"i89":6,"i90":50,"i91":6,"i92":6,"i93":6,"i94":6,"i95":6,"i96":6,"i97":6,"i98":6,"i99":6,"i100":18,"i101":18,"i102":50,"i103":18,"i104":6,"i105":38,"i106":6,"i107":6,"i108":6,"i109":38,"i110":18,"i111":6,"i112":6,"i113":6,"i114":18,"i115":6,"i116":6,"i117
 
":38,"i118":38,"i119":38,"i120":38,"i121":6,"i122":6,"i123":6,"i124":6,"i125":6,"i126":6,"i127":6,"i128":6,"i129":6,"i130":6,"i131":50,"i132":6,"i133":38,"i134":6,"i135":6,"i136":18,"i137":6,"i138":6,"i139":6,"i140":6,"i141":6,"i142":6,"i143":6,"i144":38,"i145":6,"i146":6,"i147":6,"i148":6,"i149":6,"i150":38,"i151":6,"i152":6,"i153":6,"i154":38,"i155":38,"i156":6,"i157":38,"i158":38,"i159":38,"i160":38,"i161":38,"i162":6,"i163":38,"i164":6,"i165":6,"i166":6,"i167":6,"i168":6,"i169":6,"i170":6,"i171":38,"i172":6,"i173":6,"i174":6,"i175":50,"i176":6,"i177":6,"i178":6,"i179":6,"i180":6,"i181":38,"i182":6,"i183":38,"i184":6,"i185":6,"i186":6,"i187":6,"i188":6,"i189":6,"i190":6,"i191":6,"i192":6,"i193":6,"i194":6,"i195":6,"i196":6,"i197":6,"i198":6,"i199":6,"i200":50,"i201":6,"i202":50,"i203":50,"i204":50,"i205":6,"i206":50,"i207":6,"i208":6,"i209":6,"i210":6,"i211":6,"i212":6,"i213":6,"i214":6,"i215":38,"i216":38,"i217":6,"i218":6,"i219":6,"i220":6,"i221":6,"i222":50,"i223":6,"i224":6,"
 i225":6,"i226":6,"i227":6,"i228":6,"i229":6};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],4:["t3","Abstract Methods"],16:["t5","Default 
Methods"],32:["t6","Deprecated Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -327,10 +327,18 @@ extends org.apache.hadoop.hbase.Abortable, https://docs.oracle.com/java
 
 
 void
-close()
+cloneTableSchema(TableNametableName,
+TableNamenewTableName,
+  

[25/51] [partial] hbase-site git commit: Published site at 2912c953551bedbfbf30c32c156ed7bb187d54c3.

2018-04-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.LeaseRecovery.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.LeaseRecovery.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.LeaseRecovery.html
index e6e43ee..a8b77ae 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.LeaseRecovery.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.LeaseRecovery.html
@@ -367,650 +367,650 @@
 359lock.lock();
 360try {
 361  LOG.trace("Starting WAL Procedure 
Store lease recovery");
-362  FileStatus[] oldLogs = 
getLogFiles();
-363  while (isRunning()) {
+362  while (isRunning()) {
+363FileStatus[] oldLogs = 
getLogFiles();
 364// Get Log-MaxID and recover 
lease on old logs
 365try {
 366  flushLogId = 
initOldLogs(oldLogs);
 367} catch (FileNotFoundException e) 
{
 368  LOG.warn("Someone else is 
active and deleted logs. retrying.", e);
-369  oldLogs = getLogFiles();
-370  continue;
-371}
-372
-373// Create new state-log
-374if (!rollWriter(flushLogId + 1)) 
{
-375  // someone else has already 
created this log
-376  LOG.debug("Someone else has 
already created log " + flushLogId);
-377  continue;
-378}
-379
-380// We have the lease on the log
-381oldLogs = getLogFiles();
-382if (getMaxLogId(oldLogs)  
flushLogId) {
-383  if (LOG.isDebugEnabled()) {
-384LOG.debug("Someone else 
created new logs. Expected maxLogId  " + flushLogId);
-385  }
-386  
logs.getLast().removeFile(this.walArchiveDir);
-387  continue;
-388}
-389
-390LOG.trace("Lease acquired for 
flushLogId={}", flushLogId);
-391break;
-392  }
-393} finally {
-394  lock.unlock();
-395}
-396  }
-397
-398  @Override
-399  public void load(final ProcedureLoader 
loader) throws IOException {
-400lock.lock();
-401try {
-402  if (logs.isEmpty()) {
-403throw new 
RuntimeException("recoverLease() must be called before loading data");
-404  }
-405
-406  // Nothing to do, If we have only 
the current log.
-407  if (logs.size() == 1) {
-408LOG.trace("No state logs to 
replay.");
-409loader.setMaxProcId(0);
-410return;
-411  }
-412
-413  // Load the old logs
-414  final 
IteratorProcedureWALFile it = logs.descendingIterator();
-415  it.next(); // Skip the current 
log
-416
-417  ProcedureWALFormat.load(it, 
storeTracker, new ProcedureWALFormat.Loader() {
-418@Override
-419public void setMaxProcId(long 
maxProcId) {
-420  
loader.setMaxProcId(maxProcId);
-421}
-422
-423@Override
-424public void 
load(ProcedureIterator procIter) throws IOException {
-425  loader.load(procIter);
-426}
-427
-428@Override
-429public void 
handleCorrupted(ProcedureIterator procIter) throws IOException {
-430  
loader.handleCorrupted(procIter);
-431}
-432
-433@Override
-434public void 
markCorruptedWAL(ProcedureWALFile log, IOException e) {
-435  if (corruptedLogs == null) {
-436corruptedLogs = new 
HashSet();
-437  }
-438  corruptedLogs.add(log);
-439  // TODO: sideline corrupted 
log
-440}
-441  });
-442} finally {
-443  try {
-444// try to cleanup inactive wals 
and complete the operation
-445buildHoldingCleanupTracker();
-446tryCleanupLogsOnLoad();
-447loading.set(false);
-448  } finally {
-449lock.unlock();
-450  }
-451}
-452  }
-453
-454  private void tryCleanupLogsOnLoad() {
-455// nothing to cleanup.
-456if (logs.size() = 1) return;
-457
-458// the config says to not cleanup 
wals on load.
-459if 
(!conf.getBoolean(EXEC_WAL_CLEANUP_ON_LOAD_CONF_KEY,
-460  
DEFAULT_EXEC_WAL_CLEANUP_ON_LOAD_CONF_KEY)) {
-461  LOG.debug("WALs cleanup on load is 
not enabled: " + getActiveLogs());
-462  return;
-463}
-464
-465try {
-466  periodicRoll();
-467} catch (IOException e) {
-468  LOG.warn("Unable to cleanup logs on 
load: " + e.getMessage(), e);
-469}
-470  }
-471
-472  @Override
-473  public void insert(final Procedure 
proc, final Procedure[] subprocs) {
-474if (LOG.isTraceEnabled()) {
-475  LOG.trace("Insert " + proc + ", 
subproc=" + Arrays.toString(subprocs));
-476}
-477
-478ByteSlot slot = acquireSlot();
-479try {
-480  // Serialize the insert
-481  long[] subProcIds = null;
-482  if (subprocs != null) {
-483

[43/51] [partial] hbase-site git commit: Published site at 2912c953551bedbfbf30c32c156ed7bb187d54c3.

2018-04-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/devapidocs/org/apache/hadoop/hbase/class-use/TableName.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/TableName.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/TableName.html
index db470db..ee6c19d 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/TableName.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/TableName.html
@@ -2761,44 +2761,78 @@ service.
 
 
 void
+Admin.cloneTableSchema(TableNametableName,
+TableNamenewTableName,
+booleanpreserveSplits)
+Create a new table by cloning the existent table 
schema.
+
+
+
+https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
+RawAsyncHBaseAdmin.cloneTableSchema(TableNametableName,
+TableNamenewTableName,
+booleanpreserveSplits)
+
+
+https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
+AsyncHBaseAdmin.cloneTableSchema(TableNametableName,
+TableNamenewTableName,
+booleanpreserveSplits)
+
+
+void
+HBaseAdmin.cloneTableSchema(TableNametableName,
+TableNamenewTableName,
+booleanpreserveSplits)
+
+
+https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
+AsyncAdmin.cloneTableSchema(TableNametableName,
+TableNamenewTableName,
+booleanpreserveSplits)
+Create a new table by cloning the existent table 
schema.
+
+
+
+void
 Admin.compact(TableNametableName)
 Compact a table.
 
 
-
+
 void
 HBaseAdmin.compact(TableNametableName)
 Compact a table.
 
 
-
+
 default https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
 AsyncAdmin.compact(TableNametableName)
 Compact a table.
 
 
-
+
 void
 Admin.compact(TableNametableName,
byte[]columnFamily)
 Compact a column family within a table.
 
 
-
+
 void
 HBaseAdmin.compact(TableNametableName,
byte[]columnFamily)
 Compact a column family within a table.
 
 
-
+
 default https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
 AsyncAdmin.compact(TableNametableName,
byte[]columnFamily)
 Compact a column family within a table.
 
 
-
+
 private https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
 RawAsyncHBaseAdmin.compact(TableNametableName,
byte[]columnFamily,
@@ -2807,7 +2841,7 @@ service.
 Compact column family of a table, Asynchronous operation 
even if CompletableFuture.get()
 
 
-
+
 private void
 HBaseAdmin.compact(TableNametableName,
byte[]columnFamily,
@@ -2816,7 +2850,7 @@ service.
 Compact a table.
 
 
-
+
 void
 Admin.compact(TableNametableName,
byte[]columnFamily,
@@ -2824,19 +2858,19 @@ service.
 Compact a column family within a table.
 
 
-
+
 https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
 RawAsyncHBaseAdmin.compact(TableNametableName,
byte[]columnFamily,
CompactTypecompactType)
 
-
+
 https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
 AsyncHBaseAdmin.compact(TableNametableName,

hbase-site git commit: INFRA-10751 Empty commit

2018-04-12 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site d220bc5e7 -> f5cbe4dd1


INFRA-10751 Empty commit


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/f5cbe4dd
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/f5cbe4dd
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/f5cbe4dd

Branch: refs/heads/asf-site
Commit: f5cbe4dd12bb2131231ca9303059bf2f82ad3ead
Parents: d220bc5
Author: jenkins 
Authored: Thu Apr 12 14:49:20 2018 +
Committer: jenkins 
Committed: Thu Apr 12 14:49:20 2018 +

--

--




[40/51] [partial] hbase-site git commit: Published site at 2912c953551bedbfbf30c32c156ed7bb187d54c3.

2018-04-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html 
b/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html
index 16f1a0e..6b3b8d1 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i
 
109":10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i
 
109":10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -273,46 +273,54 @@ implements 
 https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
+cloneTableSchema(TableNametableName,
+TableNamenewTableName,
+booleanpreserveSplits)
+Create a new table by cloning the existent table 
schema.
+
+
+
+https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
 compact(TableNametableName,
byte[]columnFamily,
CompactTypecompactType)
 Compact a column family within a table.
 
 
-
+
 https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
 compact(TableNametableName,
CompactTypecompactType)
 Compact a table.
 
 
-
+
 https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
 compactRegion(byte[]regionName)
 Compact an individual region.
 
 
-
+
 https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in 

[46/51] [partial] hbase-site git commit: Published site at 2912c953551bedbfbf30c32c156ed7bb187d54c3.

2018-04-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/apidocs/org/apache/hadoop/hbase/mapreduce/CellCounter.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/mapreduce/CellCounter.html 
b/apidocs/org/apache/hadoop/hbase/mapreduce/CellCounter.html
index 2fd2324..354e2d3 100644
--- a/apidocs/org/apache/hadoop/hbase/mapreduce/CellCounter.html
+++ b/apidocs/org/apache/hadoop/hbase/mapreduce/CellCounter.html
@@ -286,7 +286,7 @@ implements org.apache.hadoop.util.Tool
 
 
 main
-public staticvoidmain(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String[]args)
+public staticvoidmain(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String[]args)
  throws https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">Exception
 Main entry point.
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/apidocs/org/apache/hadoop/hbase/mapreduce/WALPlayer.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/mapreduce/WALPlayer.html 
b/apidocs/org/apache/hadoop/hbase/mapreduce/WALPlayer.html
index 9a603e5..0b077d2 100644
--- a/apidocs/org/apache/hadoop/hbase/mapreduce/WALPlayer.html
+++ b/apidocs/org/apache/hadoop/hbase/mapreduce/WALPlayer.html
@@ -416,7 +416,7 @@ implements org.apache.hadoop.util.Tool
 
 
 main
-public staticvoidmain(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String[]args)
+public staticvoidmain(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String[]args)
  throws https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">Exception
 Main entry point.
 
@@ -433,7 +433,7 @@ implements org.apache.hadoop.util.Tool
 
 
 run
-publicintrun(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String[]args)
+publicintrun(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String[]args)
 throws https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">Exception
 
 Specified by:

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/apidocs/src-html/org/apache/hadoop/hbase/client/Admin.html
--
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/client/Admin.html 
b/apidocs/src-html/org/apache/hadoop/hbase/client/Admin.html
index 4bdaf23..863532f 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/client/Admin.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/client/Admin.html
@@ -2735,7 +2735,18 @@
 2727   * @return List of servers that are 
not cleared
 2728   */
 2729  ListServerName 
clearDeadServers(final ListServerName servers) throws IOException;
-2730}
+2730
+2731  /**
+2732   * Create a new table by cloning the 
existent table schema.
+2733   *
+2734   * @param tableName name of the table 
to be cloned
+2735   * @param newTableName name of the new 
table where the table will be created
+2736   * @param preserveSplits True if the 
splits should be preserved
+2737   * @throws IOException if a remote or 
network exception occurs
+2738   */
+2739  void cloneTableSchema(final TableName 
tableName, final TableName newTableName,
+2740  final boolean preserveSplits) 
throws IOException;
+2741}
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/apidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html
--
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html 
b/apidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html
index 5c18b82..27eb0b0 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html
@@ -1238,7 +1238,17 @@
 1230   * @return CacheEvictionStats related 
to the eviction wrapped by a {@link CompletableFuture}.
 1231   */
 1232  
CompletableFutureCacheEvictionStats clearBlockCache(final TableName 
tableName);
-1233}
+1233
+1234  /**
+1235   * Create a new table by cloning the 
existent table schema.
+1236   *
+1237   * @param tableName name of the table 
to be cloned
+1238   * @param newTableName name of the new 
table where the table will be created
+1239   * @param preserveSplits True if the 
splits should be preserved
+1240   */
+1241  CompletableFutureVoid  

[28/51] [partial] hbase-site git commit: Published site at 2912c953551bedbfbf30c32c156ed7bb187d54c3.

2018-04-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TableOperator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TableOperator.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TableOperator.html
index e63cd50..d8c0d2b 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TableOperator.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TableOperator.html
@@ -422,7 +422,7 @@
 414  }
 415
 416  /**
-417   * {@link #listTables(boolean)}
+417   * {@link 
#listTableDescriptors(boolean)}
 418   */
 419  @Override
 420  public 
CompletableFutureListTableDescriptor 
listTableDescriptors(Pattern pattern,
@@ -3476,16 +3476,79 @@
 3468return future;
 3469  }
 3470
-3471  private 
CompletableFutureCacheEvictionStats clearBlockCache(ServerName 
serverName,
-3472  ListRegionInfo hris) {
-3473return 
this.CacheEvictionStats newAdminCaller().action((controller, stub) 
- this
-3474  .ClearRegionBlockCacheRequest, 
ClearRegionBlockCacheResponse, CacheEvictionStats adminCall(
-3475controller, stub, 
RequestConverter.buildClearRegionBlockCacheRequest(hris),
-3476(s, c, req, done) - 
s.clearRegionBlockCache(controller, req, done),
-3477resp - 
ProtobufUtil.toCacheEvictionStats(resp.getStats(
-3478  .serverName(serverName).call();
-3479  }
-3480}
+3471  @Override
+3472  public CompletableFutureVoid 
cloneTableSchema(TableName tableName, TableName newTableName,
+3473  boolean preserveSplits) {
+3474CompletableFutureVoid future 
= new CompletableFuture();
+3475
tableExists(tableName).whenComplete(
+3476  (exist, err) - {
+3477if (err != null) {
+3478  
future.completeExceptionally(err);
+3479  return;
+3480}
+3481if (!exist) {
+3482  
future.completeExceptionally(new TableNotFoundException(tableName));
+3483  return;
+3484}
+3485
tableExists(newTableName).whenComplete(
+3486  (exist1, err1) - {
+3487if (err1 != null) {
+3488  
future.completeExceptionally(err1);
+3489  return;
+3490}
+3491if (exist1) {
+3492  
future.completeExceptionally(new TableExistsException(newTableName));
+3493  return;
+3494}
+3495
getDescriptor(tableName).whenComplete(
+3496  (tableDesc, err2) - 
{
+3497if (err2 != null) {
+3498  
future.completeExceptionally(err2);
+3499  return;
+3500}
+3501TableDescriptor 
newTableDesc
+3502= 
TableDescriptorBuilder.copy(newTableName, tableDesc);
+3503if (preserveSplits) {
+3504  
getTableSplits(tableName).whenComplete((splits, err3) - {
+3505if (err3 != null) 
{
+3506  
future.completeExceptionally(err3);
+3507} else {
+3508  
createTable(newTableDesc, splits).whenComplete(
+3509(result, err4) 
- {
+3510  if (err4 != 
null) {
+3511
future.completeExceptionally(err4);
+3512  } else {
+3513
future.complete(result);
+3514  }
+3515});
+3516}
+3517  });
+3518} else {
+3519  
createTable(newTableDesc).whenComplete(
+3520(result, err5) - 
{
+3521  if (err5 != null) 
{
+3522
future.completeExceptionally(err5);
+3523  } else {
+3524
future.complete(result);
+3525  }
+3526});
+3527}
+3528  });
+3529  });
+3530  });
+3531return future;
+3532  }
+3533
+3534  private 
CompletableFutureCacheEvictionStats clearBlockCache(ServerName 
serverName,
+3535  ListRegionInfo hris) {
+3536return 
this.CacheEvictionStats newAdminCaller().action((controller, stub) 
- this
+3537  .ClearRegionBlockCacheRequest, 
ClearRegionBlockCacheResponse, CacheEvictionStats adminCall(
+3538controller, stub, 
RequestConverter.buildClearRegionBlockCacheRequest(hris),
+3539(s, c, req, done) - 
s.clearRegionBlockCache(controller, req, done),
+3540resp - 
ProtobufUtil.toCacheEvictionStats(resp.getStats(
+3541  .serverName(serverName).call();
+3542  }
+3543}
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TableProcedureBiConsumer.html

[07/51] [partial] hbase-site git commit: Published site at 2912c953551bedbfbf30c32c156ed7bb187d54c3.

2018-04-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html
index 8302e28..c370eb9 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html
@@ -2113,3031 +2113,3033 @@
 2105
errors.reportError(ERROR_CODE.ORPHAN_TABLE_STATE,
 2106tableName + " unable to 
delete dangling table state " + tableState);
 2107  }
-2108} else {
-2109  
errors.reportError(ERROR_CODE.ORPHAN_TABLE_STATE,
-2110  tableName + " has dangling 
table state " + tableState);
-2111}
-2112  }
-2113}
-2114// check that all tables have 
states
-2115for (TableName tableName : 
tablesInfo.keySet()) {
-2116  if (isTableIncluded(tableName) 
 !tableStates.containsKey(tableName)) {
-2117if (fixMeta) {
-2118  
MetaTableAccessor.updateTableState(connection, tableName, 
TableState.State.ENABLED);
-2119  TableState newState = 
MetaTableAccessor.getTableState(connection, tableName);
-2120  if (newState == null) {
-2121
errors.reportError(ERROR_CODE.NO_TABLE_STATE,
-2122"Unable to change state 
for table " + tableName + " in meta ");
-2123  }
-2124} else {
-2125  
errors.reportError(ERROR_CODE.NO_TABLE_STATE,
-2126  tableName + " has no state 
in meta ");
-2127}
-2128  }
-2129}
-2130  }
-2131
-2132  private void preCheckPermission() 
throws IOException, AccessDeniedException {
-2133if 
(shouldIgnorePreCheckPermission()) {
-2134  return;
-2135}
-2136
-2137Path hbaseDir = 
FSUtils.getRootDir(getConf());
-2138FileSystem fs = 
hbaseDir.getFileSystem(getConf());
-2139UserProvider userProvider = 
UserProvider.instantiate(getConf());
-2140UserGroupInformation ugi = 
userProvider.getCurrent().getUGI();
-2141FileStatus[] files = 
fs.listStatus(hbaseDir);
-2142for (FileStatus file : files) {
-2143  try {
-2144FSUtils.checkAccess(ugi, file, 
FsAction.WRITE);
-2145  } catch (AccessDeniedException 
ace) {
-2146LOG.warn("Got 
AccessDeniedException when preCheckPermission ", ace);
-2147
errors.reportError(ERROR_CODE.WRONG_USAGE, "Current user " + 
ugi.getUserName()
-2148  + " does not have write perms 
to " + file.getPath()
-2149  + ". Please rerun hbck as hdfs 
user " + file.getOwner());
-2150throw ace;
-2151  }
-2152}
-2153  }
-2154
-2155  /**
-2156   * Deletes region from meta table
-2157   */
-2158  private void deleteMetaRegion(HbckInfo 
hi) throws IOException {
-2159
deleteMetaRegion(hi.metaEntry.getRegionName());
-2160  }
-2161
-2162  /**
-2163   * Deletes region from meta table
-2164   */
-2165  private void deleteMetaRegion(byte[] 
metaKey) throws IOException {
-2166Delete d = new Delete(metaKey);
-2167meta.delete(d);
-2168LOG.info("Deleted " + 
Bytes.toString(metaKey) + " from META" );
-2169  }
-2170
-2171  /**
-2172   * Reset the split parent region info 
in meta table
-2173   */
-2174  private void resetSplitParent(HbckInfo 
hi) throws IOException {
-2175RowMutations mutations = new 
RowMutations(hi.metaEntry.getRegionName());
-2176Delete d = new 
Delete(hi.metaEntry.getRegionName());
-2177
d.addColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER);
-2178
d.addColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER);
-2179mutations.add(d);
-2180
-2181RegionInfo hri = 
RegionInfoBuilder.newBuilder(hi.metaEntry)
-2182.setOffline(false)
-2183.setSplit(false)
-2184.build();
-2185Put p = 
MetaTableAccessor.makePutFromRegionInfo(hri, 
EnvironmentEdgeManager.currentTime());
-2186mutations.add(p);
-2187
-2188meta.mutateRow(mutations);
-2189LOG.info("Reset split parent " + 
hi.metaEntry.getRegionNameAsString() + " in META" );
-2190  }
-2191
-2192  /**
-2193   * This backwards-compatibility 
wrapper for permanently offlining a region
-2194   * that should not be alive.  If the 
region server does not support the
-2195   * "offline" method, it will use the 
closest unassign method instead.  This
-2196   * will basically work until one 
attempts to disable or delete the affected
-2197   * table.  The problem has to do with 
in-memory only master state, so
-2198   * restarting the HMaster or failing 
over to another should fix this.
-2199   */
-2200  private void offline(byte[] 
regionName) throws IOException {
-2201String regionString = 

[10/51] [partial] hbase-site git commit: Published site at 2912c953551bedbfbf30c32c156ed7bb187d54c3.

2018-04-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html
index 8302e28..c370eb9 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html
@@ -2113,3031 +2113,3033 @@
 2105
errors.reportError(ERROR_CODE.ORPHAN_TABLE_STATE,
 2106tableName + " unable to 
delete dangling table state " + tableState);
 2107  }
-2108} else {
-2109  
errors.reportError(ERROR_CODE.ORPHAN_TABLE_STATE,
-2110  tableName + " has dangling 
table state " + tableState);
-2111}
-2112  }
-2113}
-2114// check that all tables have 
states
-2115for (TableName tableName : 
tablesInfo.keySet()) {
-2116  if (isTableIncluded(tableName) 
 !tableStates.containsKey(tableName)) {
-2117if (fixMeta) {
-2118  
MetaTableAccessor.updateTableState(connection, tableName, 
TableState.State.ENABLED);
-2119  TableState newState = 
MetaTableAccessor.getTableState(connection, tableName);
-2120  if (newState == null) {
-2121
errors.reportError(ERROR_CODE.NO_TABLE_STATE,
-2122"Unable to change state 
for table " + tableName + " in meta ");
-2123  }
-2124} else {
-2125  
errors.reportError(ERROR_CODE.NO_TABLE_STATE,
-2126  tableName + " has no state 
in meta ");
-2127}
-2128  }
-2129}
-2130  }
-2131
-2132  private void preCheckPermission() 
throws IOException, AccessDeniedException {
-2133if 
(shouldIgnorePreCheckPermission()) {
-2134  return;
-2135}
-2136
-2137Path hbaseDir = 
FSUtils.getRootDir(getConf());
-2138FileSystem fs = 
hbaseDir.getFileSystem(getConf());
-2139UserProvider userProvider = 
UserProvider.instantiate(getConf());
-2140UserGroupInformation ugi = 
userProvider.getCurrent().getUGI();
-2141FileStatus[] files = 
fs.listStatus(hbaseDir);
-2142for (FileStatus file : files) {
-2143  try {
-2144FSUtils.checkAccess(ugi, file, 
FsAction.WRITE);
-2145  } catch (AccessDeniedException 
ace) {
-2146LOG.warn("Got 
AccessDeniedException when preCheckPermission ", ace);
-2147
errors.reportError(ERROR_CODE.WRONG_USAGE, "Current user " + 
ugi.getUserName()
-2148  + " does not have write perms 
to " + file.getPath()
-2149  + ". Please rerun hbck as hdfs 
user " + file.getOwner());
-2150throw ace;
-2151  }
-2152}
-2153  }
-2154
-2155  /**
-2156   * Deletes region from meta table
-2157   */
-2158  private void deleteMetaRegion(HbckInfo 
hi) throws IOException {
-2159
deleteMetaRegion(hi.metaEntry.getRegionName());
-2160  }
-2161
-2162  /**
-2163   * Deletes region from meta table
-2164   */
-2165  private void deleteMetaRegion(byte[] 
metaKey) throws IOException {
-2166Delete d = new Delete(metaKey);
-2167meta.delete(d);
-2168LOG.info("Deleted " + 
Bytes.toString(metaKey) + " from META" );
-2169  }
-2170
-2171  /**
-2172   * Reset the split parent region info 
in meta table
-2173   */
-2174  private void resetSplitParent(HbckInfo 
hi) throws IOException {
-2175RowMutations mutations = new 
RowMutations(hi.metaEntry.getRegionName());
-2176Delete d = new 
Delete(hi.metaEntry.getRegionName());
-2177
d.addColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER);
-2178
d.addColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER);
-2179mutations.add(d);
-2180
-2181RegionInfo hri = 
RegionInfoBuilder.newBuilder(hi.metaEntry)
-2182.setOffline(false)
-2183.setSplit(false)
-2184.build();
-2185Put p = 
MetaTableAccessor.makePutFromRegionInfo(hri, 
EnvironmentEdgeManager.currentTime());
-2186mutations.add(p);
-2187
-2188meta.mutateRow(mutations);
-2189LOG.info("Reset split parent " + 
hi.metaEntry.getRegionNameAsString() + " in META" );
-2190  }
-2191
-2192  /**
-2193   * This backwards-compatibility 
wrapper for permanently offlining a region
-2194   * that should not be alive.  If the 
region server does not support the
-2195   * "offline" method, it will use the 
closest unassign method instead.  This
-2196   * will basically work until one 
attempts to disable or delete the affected
-2197   * table.  The problem has to do with 
in-memory only master state, so
-2198   * restarting the HMaster or failing 
over to another should fix this.
-2199   */
-2200  private void offline(byte[] 
regionName) throws IOException {
-2201String regionString = 
Bytes.toStringBinary(regionName);
-2202if 

[36/51] [partial] hbase-site git commit: Published site at 2912c953551bedbfbf30c32c156ed7bb187d54c3.

2018-04-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/devapidocs/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.html
 
b/devapidocs/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.html
index 544df85..8862e06 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.html
@@ -656,7 +656,7 @@ extends rollWriter()
 
 
-private boolean
+(package private) boolean
 rollWriter(longlogId)
 
 
@@ -1428,7 +1428,7 @@ extends 
 
 WALS_PATH_FILTER
-private static finalorg.apache.hadoop.fs.PathFilter WALS_PATH_FILTER
+private static finalorg.apache.hadoop.fs.PathFilter WALS_PATH_FILTER
 
 
 
@@ -1437,7 +1437,7 @@ extends 
 
 FILE_STATUS_ID_COMPARATOR
-private static finalhttps://docs.oracle.com/javase/8/docs/api/java/util/Comparator.html?is-external=true;
 title="class or interface in 
java.util">Comparatororg.apache.hadoop.fs.FileStatus FILE_STATUS_ID_COMPARATOR
+private static finalhttps://docs.oracle.com/javase/8/docs/api/java/util/Comparator.html?is-external=true;
 title="class or interface in 
java.util">Comparatororg.apache.hadoop.fs.FileStatus FILE_STATUS_ID_COMPARATOR
 
 
 
@@ -1606,7 +1606,7 @@ extends 
 
 load
-publicvoidload(ProcedureStore.ProcedureLoaderloader)
+publicvoidload(ProcedureStore.ProcedureLoaderloader)
   throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Description copied from 
interface:ProcedureStore
 Load the Procedures in the store.
@@ -1624,7 +1624,7 @@ extends 
 
 tryCleanupLogsOnLoad
-privatevoidtryCleanupLogsOnLoad()
+privatevoidtryCleanupLogsOnLoad()
 
 
 
@@ -1633,7 +1633,7 @@ extends 
 
 insert
-publicvoidinsert(Procedureproc,
+publicvoidinsert(Procedureproc,
Procedure[]subprocs)
 Description copied from 
interface:ProcedureStore
 When a procedure is submitted to the executor insert(proc, 
null) will be called.
@@ -1655,7 +1655,7 @@ extends 
 
 insert
-publicvoidinsert(Procedure[]procs)
+publicvoidinsert(Procedure[]procs)
 Description copied from 
interface:ProcedureStore
 Serialize a set of new procedures.
  These procedures are freshly submitted to the executor and each procedure
@@ -1672,7 +1672,7 @@ extends 
 
 update
-publicvoidupdate(Procedureproc)
+publicvoidupdate(Procedureproc)
 Description copied from 
interface:ProcedureStore
 The specified procedure was executed,
  and the new state should be written to the store.
@@ -1688,7 +1688,7 @@ extends 
 
 delete
-publicvoiddelete(longprocId)
+publicvoiddelete(longprocId)
 Description copied from 
interface:ProcedureStore
 The specified procId was removed from the executor,
  due to completion, abort or failure.
@@ -1705,7 +1705,7 @@ extends 
 
 delete
-publicvoiddelete(Procedureproc,
+publicvoiddelete(Procedureproc,
long[]subProcIds)
 Description copied from 
interface:ProcedureStore
 The parent procedure completed.
@@ -1723,7 +1723,7 @@ extends 
 
 delete
-publicvoiddelete(long[]procIds,
+publicvoiddelete(long[]procIds,
intoffset,
intcount)
 Description copied from 
interface:ProcedureStore
@@ -1744,7 +1744,7 @@ extends 
 
 delete
-privatevoiddelete(long[]procIds)
+privatevoiddelete(long[]procIds)
 
 
 
@@ -1753,7 +1753,7 @@ extends 
 
 acquireSlot
-privateByteSlotacquireSlot()
+privateByteSlotacquireSlot()
 
 
 
@@ -1762,7 +1762,7 @@ extends 
 
 releaseSlot
-privatevoidreleaseSlot(ByteSlotslot)
+privatevoidreleaseSlot(ByteSlotslot)
 
 
 
@@ -1771,7 +1771,7 @@ extends 
 
 pushData
-privatelongpushData(WALProcedureStore.PushTypetype,
+privatelongpushData(WALProcedureStore.PushTypetype,
   ByteSlotslot,
   longprocId,
   long[]subProcIds)
@@ -1783,7 +1783,7 @@ extends 
 
 updateStoreTracker
-privatevoidupdateStoreTracker(WALProcedureStore.PushTypetype,
+privatevoidupdateStoreTracker(WALProcedureStore.PushTypetype,
 longprocId,
 long[]subProcIds)
 
@@ -1794,7 +1794,7 @@ extends 
 
 isSyncAborted
-privatebooleanisSyncAborted()
+privatebooleanisSyncAborted()
 
 
 
@@ -1803,7 +1803,7 @@ extends 
 
 syncLoop
-privatevoidsyncLoop()
+privatevoidsyncLoop()
throws https://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true;
 title="class or interface in java.lang">Throwable
 
 Throws:
@@ -1817,7 +1817,7 @@ extends 
 
 getSyncMetrics
-publichttps://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in java.util">ArrayListWALProcedureStore.SyncMetricsgetSyncMetrics()

[12/51] [partial] hbase-site git commit: Published site at 2912c953551bedbfbf30c32c156ed7bb187d54c3.

2018-04-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html
index 8302e28..c370eb9 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html
@@ -2113,3031 +2113,3033 @@
 2105
errors.reportError(ERROR_CODE.ORPHAN_TABLE_STATE,
 2106tableName + " unable to 
delete dangling table state " + tableState);
 2107  }
-2108} else {
-2109  
errors.reportError(ERROR_CODE.ORPHAN_TABLE_STATE,
-2110  tableName + " has dangling 
table state " + tableState);
-2111}
-2112  }
-2113}
-2114// check that all tables have 
states
-2115for (TableName tableName : 
tablesInfo.keySet()) {
-2116  if (isTableIncluded(tableName) 
 !tableStates.containsKey(tableName)) {
-2117if (fixMeta) {
-2118  
MetaTableAccessor.updateTableState(connection, tableName, 
TableState.State.ENABLED);
-2119  TableState newState = 
MetaTableAccessor.getTableState(connection, tableName);
-2120  if (newState == null) {
-2121
errors.reportError(ERROR_CODE.NO_TABLE_STATE,
-2122"Unable to change state 
for table " + tableName + " in meta ");
-2123  }
-2124} else {
-2125  
errors.reportError(ERROR_CODE.NO_TABLE_STATE,
-2126  tableName + " has no state 
in meta ");
-2127}
-2128  }
-2129}
-2130  }
-2131
-2132  private void preCheckPermission() 
throws IOException, AccessDeniedException {
-2133if 
(shouldIgnorePreCheckPermission()) {
-2134  return;
-2135}
-2136
-2137Path hbaseDir = 
FSUtils.getRootDir(getConf());
-2138FileSystem fs = 
hbaseDir.getFileSystem(getConf());
-2139UserProvider userProvider = 
UserProvider.instantiate(getConf());
-2140UserGroupInformation ugi = 
userProvider.getCurrent().getUGI();
-2141FileStatus[] files = 
fs.listStatus(hbaseDir);
-2142for (FileStatus file : files) {
-2143  try {
-2144FSUtils.checkAccess(ugi, file, 
FsAction.WRITE);
-2145  } catch (AccessDeniedException 
ace) {
-2146LOG.warn("Got 
AccessDeniedException when preCheckPermission ", ace);
-2147
errors.reportError(ERROR_CODE.WRONG_USAGE, "Current user " + 
ugi.getUserName()
-2148  + " does not have write perms 
to " + file.getPath()
-2149  + ". Please rerun hbck as hdfs 
user " + file.getOwner());
-2150throw ace;
-2151  }
-2152}
-2153  }
-2154
-2155  /**
-2156   * Deletes region from meta table
-2157   */
-2158  private void deleteMetaRegion(HbckInfo 
hi) throws IOException {
-2159
deleteMetaRegion(hi.metaEntry.getRegionName());
-2160  }
-2161
-2162  /**
-2163   * Deletes region from meta table
-2164   */
-2165  private void deleteMetaRegion(byte[] 
metaKey) throws IOException {
-2166Delete d = new Delete(metaKey);
-2167meta.delete(d);
-2168LOG.info("Deleted " + 
Bytes.toString(metaKey) + " from META" );
-2169  }
-2170
-2171  /**
-2172   * Reset the split parent region info 
in meta table
-2173   */
-2174  private void resetSplitParent(HbckInfo 
hi) throws IOException {
-2175RowMutations mutations = new 
RowMutations(hi.metaEntry.getRegionName());
-2176Delete d = new 
Delete(hi.metaEntry.getRegionName());
-2177
d.addColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER);
-2178
d.addColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER);
-2179mutations.add(d);
-2180
-2181RegionInfo hri = 
RegionInfoBuilder.newBuilder(hi.metaEntry)
-2182.setOffline(false)
-2183.setSplit(false)
-2184.build();
-2185Put p = 
MetaTableAccessor.makePutFromRegionInfo(hri, 
EnvironmentEdgeManager.currentTime());
-2186mutations.add(p);
-2187
-2188meta.mutateRow(mutations);
-2189LOG.info("Reset split parent " + 
hi.metaEntry.getRegionNameAsString() + " in META" );
-2190  }
-2191
-2192  /**
-2193   * This backwards-compatibility 
wrapper for permanently offlining a region
-2194   * that should not be alive.  If the 
region server does not support the
-2195   * "offline" method, it will use the 
closest unassign method instead.  This
-2196   * will basically work until one 
attempts to disable or delete the affected
-2197   * table.  The problem has to do with 
in-memory only master state, so
-2198   * restarting the HMaster or failing 
over to another should fix this.
-2199   */
-2200  private void offline(byte[] 
regionName) throws IOException {
-2201String regionString = 
Bytes.toStringBinary(regionName);
-2202if (!rsSupportsOffline) {
-2203  LOG.warn("Using unassign region " 
+ 

[14/51] [partial] hbase-site git commit: Published site at 2912c953551bedbfbf30c32c156ed7bb187d54c3.

2018-04-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html
index 8302e28..c370eb9 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html
@@ -2113,3031 +2113,3033 @@
 2105
errors.reportError(ERROR_CODE.ORPHAN_TABLE_STATE,
 2106tableName + " unable to 
delete dangling table state " + tableState);
 2107  }
-2108} else {
-2109  
errors.reportError(ERROR_CODE.ORPHAN_TABLE_STATE,
-2110  tableName + " has dangling 
table state " + tableState);
-2111}
-2112  }
-2113}
-2114// check that all tables have 
states
-2115for (TableName tableName : 
tablesInfo.keySet()) {
-2116  if (isTableIncluded(tableName) 
 !tableStates.containsKey(tableName)) {
-2117if (fixMeta) {
-2118  
MetaTableAccessor.updateTableState(connection, tableName, 
TableState.State.ENABLED);
-2119  TableState newState = 
MetaTableAccessor.getTableState(connection, tableName);
-2120  if (newState == null) {
-2121
errors.reportError(ERROR_CODE.NO_TABLE_STATE,
-2122"Unable to change state 
for table " + tableName + " in meta ");
-2123  }
-2124} else {
-2125  
errors.reportError(ERROR_CODE.NO_TABLE_STATE,
-2126  tableName + " has no state 
in meta ");
-2127}
-2128  }
-2129}
-2130  }
-2131
-2132  private void preCheckPermission() 
throws IOException, AccessDeniedException {
-2133if 
(shouldIgnorePreCheckPermission()) {
-2134  return;
-2135}
-2136
-2137Path hbaseDir = 
FSUtils.getRootDir(getConf());
-2138FileSystem fs = 
hbaseDir.getFileSystem(getConf());
-2139UserProvider userProvider = 
UserProvider.instantiate(getConf());
-2140UserGroupInformation ugi = 
userProvider.getCurrent().getUGI();
-2141FileStatus[] files = 
fs.listStatus(hbaseDir);
-2142for (FileStatus file : files) {
-2143  try {
-2144FSUtils.checkAccess(ugi, file, 
FsAction.WRITE);
-2145  } catch (AccessDeniedException 
ace) {
-2146LOG.warn("Got 
AccessDeniedException when preCheckPermission ", ace);
-2147
errors.reportError(ERROR_CODE.WRONG_USAGE, "Current user " + 
ugi.getUserName()
-2148  + " does not have write perms 
to " + file.getPath()
-2149  + ". Please rerun hbck as hdfs 
user " + file.getOwner());
-2150throw ace;
-2151  }
-2152}
-2153  }
-2154
-2155  /**
-2156   * Deletes region from meta table
-2157   */
-2158  private void deleteMetaRegion(HbckInfo 
hi) throws IOException {
-2159
deleteMetaRegion(hi.metaEntry.getRegionName());
-2160  }
-2161
-2162  /**
-2163   * Deletes region from meta table
-2164   */
-2165  private void deleteMetaRegion(byte[] 
metaKey) throws IOException {
-2166Delete d = new Delete(metaKey);
-2167meta.delete(d);
-2168LOG.info("Deleted " + 
Bytes.toString(metaKey) + " from META" );
-2169  }
-2170
-2171  /**
-2172   * Reset the split parent region info 
in meta table
-2173   */
-2174  private void resetSplitParent(HbckInfo 
hi) throws IOException {
-2175RowMutations mutations = new 
RowMutations(hi.metaEntry.getRegionName());
-2176Delete d = new 
Delete(hi.metaEntry.getRegionName());
-2177
d.addColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER);
-2178
d.addColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER);
-2179mutations.add(d);
-2180
-2181RegionInfo hri = 
RegionInfoBuilder.newBuilder(hi.metaEntry)
-2182.setOffline(false)
-2183.setSplit(false)
-2184.build();
-2185Put p = 
MetaTableAccessor.makePutFromRegionInfo(hri, 
EnvironmentEdgeManager.currentTime());
-2186mutations.add(p);
-2187
-2188meta.mutateRow(mutations);
-2189LOG.info("Reset split parent " + 
hi.metaEntry.getRegionNameAsString() + " in META" );
-2190  }
-2191
-2192  /**
-2193   * This backwards-compatibility 
wrapper for permanently offlining a region
-2194   * that should not be alive.  If the 
region server does not support the
-2195   * "offline" method, it will use the 
closest unassign method instead.  This
-2196   * will basically work until one 
attempts to disable or delete the affected
-2197   * table.  The problem has to do with 
in-memory only master state, so
-2198   * restarting the HMaster or failing 
over to another should fix this.
-2199   */
-2200  private void offline(byte[] 
regionName) throws IOException {
-2201String regionString = 
Bytes.toStringBinary(regionName);
-2202if (!rsSupportsOffline) {
-2203  LOG.warn("Using unassign region " 
+ regionString

[47/51] [partial] hbase-site git commit: Published site at 2912c953551bedbfbf30c32c156ed7bb187d54c3.

2018-04-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/apidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html 
b/apidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html
index 2d899da..275a600 100644
--- a/apidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html
+++ b/apidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":6,"i1":6,"i2":18,"i3":6,"i4":6,"i5":6,"i6":18,"i7":6,"i8":6,"i9":6,"i10":6,"i11":6,"i12":6,"i13":6,"i14":6,"i15":18,"i16":18,"i17":6,"i18":6,"i19":6,"i20":6,"i21":6,"i22":6,"i23":6,"i24":6,"i25":6,"i26":6,"i27":6,"i28":6,"i29":6,"i30":6,"i31":6,"i32":6,"i33":6,"i34":6,"i35":6,"i36":6,"i37":6,"i38":6,"i39":6,"i40":6,"i41":6,"i42":6,"i43":6,"i44":6,"i45":6,"i46":6,"i47":6,"i48":18,"i49":6,"i50":6,"i51":18,"i52":6,"i53":6,"i54":6,"i55":6,"i56":6,"i57":6,"i58":18,"i59":18,"i60":18,"i61":6,"i62":6,"i63":6,"i64":6,"i65":6,"i66":6,"i67":6,"i68":18,"i69":6,"i70":6,"i71":6,"i72":6,"i73":6,"i74":6,"i75":6,"i76":6,"i77":6,"i78":6,"i79":6,"i80":6,"i81":6,"i82":6,"i83":6,"i84":18,"i85":6,"i86":6,"i87":6,"i88":6,"i89":6,"i90":6,"i91":6,"i92":18,"i93":6,"i94":6,"i95":6,"i96":18,"i97":6,"i98":6,"i99":6,"i100":6,"i101":6,"i102":18,"i103":18,"i104":6,"i105":6,"i106":6,"i107":6,"i108":6,"i109":6,"i110":6,"i111":6,"i112":6,"i113":6,"i114":6,"i115":6,"i116":6,"i117":6,"i118":6,"i119"
 
:6,"i120":6,"i121":6,"i122":6,"i123":6,"i124":6,"i125":6,"i126":6,"i127":6,"i128":6,"i129":6,"i130":18,"i131":18,"i132":6,"i133":6,"i134":6,"i135":6,"i136":6,"i137":6,"i138":6,"i139":6,"i140":6,"i141":6,"i142":6,"i143":6,"i144":6};
+var methods = 
{"i0":6,"i1":6,"i2":18,"i3":6,"i4":6,"i5":6,"i6":18,"i7":6,"i8":6,"i9":6,"i10":6,"i11":6,"i12":6,"i13":6,"i14":6,"i15":6,"i16":18,"i17":18,"i18":6,"i19":6,"i20":6,"i21":6,"i22":6,"i23":6,"i24":6,"i25":6,"i26":6,"i27":6,"i28":6,"i29":6,"i30":6,"i31":6,"i32":6,"i33":6,"i34":6,"i35":6,"i36":6,"i37":6,"i38":6,"i39":6,"i40":6,"i41":6,"i42":6,"i43":6,"i44":6,"i45":6,"i46":6,"i47":6,"i48":6,"i49":18,"i50":6,"i51":6,"i52":18,"i53":6,"i54":6,"i55":6,"i56":6,"i57":6,"i58":6,"i59":18,"i60":18,"i61":18,"i62":6,"i63":6,"i64":6,"i65":6,"i66":6,"i67":6,"i68":6,"i69":18,"i70":6,"i71":6,"i72":6,"i73":6,"i74":6,"i75":6,"i76":6,"i77":6,"i78":6,"i79":6,"i80":6,"i81":6,"i82":6,"i83":6,"i84":6,"i85":18,"i86":6,"i87":6,"i88":6,"i89":6,"i90":6,"i91":6,"i92":6,"i93":18,"i94":6,"i95":6,"i96":6,"i97":18,"i98":6,"i99":6,"i100":6,"i101":6,"i102":6,"i103":18,"i104":18,"i105":6,"i106":6,"i107":6,"i108":6,"i109":6,"i110":6,"i111":6,"i112":6,"i113":6,"i114":6,"i115":6,"i116":6,"i117":6,"i118":6,"i119"
 
:6,"i120":6,"i121":6,"i122":6,"i123":6,"i124":6,"i125":6,"i126":6,"i127":6,"i128":6,"i129":6,"i130":6,"i131":18,"i132":18,"i133":6,"i134":6,"i135":6,"i136":6,"i137":6,"i138":6,"i139":6,"i140":6,"i141":6,"i142":6,"i143":6,"i144":6,"i145":6};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],4:["t3","Abstract Methods"],16:["t5","Default Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -223,19 +223,27 @@ public interface 
+https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
+cloneTableSchema(TableNametableName,
+TableNamenewTableName,
+booleanpreserveSplits)
+Create a new table by cloning the existent table 
schema.
+
+
+
 default https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
 compact(TableNametableName)
 Compact a table.
 
 
-
+
 default https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
 compact(TableNametableName,
byte[]columnFamily)
 Compact a column family within a table.
 
 
-
+
 https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
 compact(TableNametableName,
byte[]columnFamily,
@@ -243,40 +251,40 @@ public interface Compact a column family within a table.
 
 
-
+
 

[13/51] [partial] hbase-site git commit: Published site at 2912c953551bedbfbf30c32c156ed7bb187d54c3.

2018-04-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html
index 8302e28..c370eb9 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html
@@ -2113,3031 +2113,3033 @@
 2105
errors.reportError(ERROR_CODE.ORPHAN_TABLE_STATE,
 2106tableName + " unable to 
delete dangling table state " + tableState);
 2107  }
-2108} else {
-2109  
errors.reportError(ERROR_CODE.ORPHAN_TABLE_STATE,
-2110  tableName + " has dangling 
table state " + tableState);
-2111}
-2112  }
-2113}
-2114// check that all tables have 
states
-2115for (TableName tableName : 
tablesInfo.keySet()) {
-2116  if (isTableIncluded(tableName) 
 !tableStates.containsKey(tableName)) {
-2117if (fixMeta) {
-2118  
MetaTableAccessor.updateTableState(connection, tableName, 
TableState.State.ENABLED);
-2119  TableState newState = 
MetaTableAccessor.getTableState(connection, tableName);
-2120  if (newState == null) {
-2121
errors.reportError(ERROR_CODE.NO_TABLE_STATE,
-2122"Unable to change state 
for table " + tableName + " in meta ");
-2123  }
-2124} else {
-2125  
errors.reportError(ERROR_CODE.NO_TABLE_STATE,
-2126  tableName + " has no state 
in meta ");
-2127}
-2128  }
-2129}
-2130  }
-2131
-2132  private void preCheckPermission() 
throws IOException, AccessDeniedException {
-2133if 
(shouldIgnorePreCheckPermission()) {
-2134  return;
-2135}
-2136
-2137Path hbaseDir = 
FSUtils.getRootDir(getConf());
-2138FileSystem fs = 
hbaseDir.getFileSystem(getConf());
-2139UserProvider userProvider = 
UserProvider.instantiate(getConf());
-2140UserGroupInformation ugi = 
userProvider.getCurrent().getUGI();
-2141FileStatus[] files = 
fs.listStatus(hbaseDir);
-2142for (FileStatus file : files) {
-2143  try {
-2144FSUtils.checkAccess(ugi, file, 
FsAction.WRITE);
-2145  } catch (AccessDeniedException 
ace) {
-2146LOG.warn("Got 
AccessDeniedException when preCheckPermission ", ace);
-2147
errors.reportError(ERROR_CODE.WRONG_USAGE, "Current user " + 
ugi.getUserName()
-2148  + " does not have write perms 
to " + file.getPath()
-2149  + ". Please rerun hbck as hdfs 
user " + file.getOwner());
-2150throw ace;
-2151  }
-2152}
-2153  }
-2154
-2155  /**
-2156   * Deletes region from meta table
-2157   */
-2158  private void deleteMetaRegion(HbckInfo 
hi) throws IOException {
-2159
deleteMetaRegion(hi.metaEntry.getRegionName());
-2160  }
-2161
-2162  /**
-2163   * Deletes region from meta table
-2164   */
-2165  private void deleteMetaRegion(byte[] 
metaKey) throws IOException {
-2166Delete d = new Delete(metaKey);
-2167meta.delete(d);
-2168LOG.info("Deleted " + 
Bytes.toString(metaKey) + " from META" );
-2169  }
-2170
-2171  /**
-2172   * Reset the split parent region info 
in meta table
-2173   */
-2174  private void resetSplitParent(HbckInfo 
hi) throws IOException {
-2175RowMutations mutations = new 
RowMutations(hi.metaEntry.getRegionName());
-2176Delete d = new 
Delete(hi.metaEntry.getRegionName());
-2177
d.addColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER);
-2178
d.addColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER);
-2179mutations.add(d);
-2180
-2181RegionInfo hri = 
RegionInfoBuilder.newBuilder(hi.metaEntry)
-2182.setOffline(false)
-2183.setSplit(false)
-2184.build();
-2185Put p = 
MetaTableAccessor.makePutFromRegionInfo(hri, 
EnvironmentEdgeManager.currentTime());
-2186mutations.add(p);
-2187
-2188meta.mutateRow(mutations);
-2189LOG.info("Reset split parent " + 
hi.metaEntry.getRegionNameAsString() + " in META" );
-2190  }
-2191
-2192  /**
-2193   * This backwards-compatibility 
wrapper for permanently offlining a region
-2194   * that should not be alive.  If the 
region server does not support the
-2195   * "offline" method, it will use the 
closest unassign method instead.  This
-2196   * will basically work until one 
attempts to disable or delete the affected
-2197   * table.  The problem has to do with 
in-memory only master state, so
-2198   * restarting the HMaster or failing 
over to another should fix this.
-2199   */
-2200  private void offline(byte[] 
regionName) throws IOException {
-2201String regionString = 
Bytes.toStringBinary(regionName);
-2202if (!rsSupportsOffline) {
-2203  LOG.warn("Using unassign region " 
+ 

[22/51] [partial] hbase-site git commit: Published site at 2912c953551bedbfbf30c32c156ed7bb187d54c3.

2018-04-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.html
index e6e43ee..a8b77ae 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.html
@@ -367,650 +367,650 @@
 359lock.lock();
 360try {
 361  LOG.trace("Starting WAL Procedure 
Store lease recovery");
-362  FileStatus[] oldLogs = 
getLogFiles();
-363  while (isRunning()) {
+362  while (isRunning()) {
+363FileStatus[] oldLogs = 
getLogFiles();
 364// Get Log-MaxID and recover 
lease on old logs
 365try {
 366  flushLogId = 
initOldLogs(oldLogs);
 367} catch (FileNotFoundException e) 
{
 368  LOG.warn("Someone else is 
active and deleted logs. retrying.", e);
-369  oldLogs = getLogFiles();
-370  continue;
-371}
-372
-373// Create new state-log
-374if (!rollWriter(flushLogId + 1)) 
{
-375  // someone else has already 
created this log
-376  LOG.debug("Someone else has 
already created log " + flushLogId);
-377  continue;
-378}
-379
-380// We have the lease on the log
-381oldLogs = getLogFiles();
-382if (getMaxLogId(oldLogs)  
flushLogId) {
-383  if (LOG.isDebugEnabled()) {
-384LOG.debug("Someone else 
created new logs. Expected maxLogId  " + flushLogId);
-385  }
-386  
logs.getLast().removeFile(this.walArchiveDir);
-387  continue;
-388}
-389
-390LOG.trace("Lease acquired for 
flushLogId={}", flushLogId);
-391break;
-392  }
-393} finally {
-394  lock.unlock();
-395}
-396  }
-397
-398  @Override
-399  public void load(final ProcedureLoader 
loader) throws IOException {
-400lock.lock();
-401try {
-402  if (logs.isEmpty()) {
-403throw new 
RuntimeException("recoverLease() must be called before loading data");
-404  }
-405
-406  // Nothing to do, If we have only 
the current log.
-407  if (logs.size() == 1) {
-408LOG.trace("No state logs to 
replay.");
-409loader.setMaxProcId(0);
-410return;
-411  }
-412
-413  // Load the old logs
-414  final 
IteratorProcedureWALFile it = logs.descendingIterator();
-415  it.next(); // Skip the current 
log
-416
-417  ProcedureWALFormat.load(it, 
storeTracker, new ProcedureWALFormat.Loader() {
-418@Override
-419public void setMaxProcId(long 
maxProcId) {
-420  
loader.setMaxProcId(maxProcId);
-421}
-422
-423@Override
-424public void 
load(ProcedureIterator procIter) throws IOException {
-425  loader.load(procIter);
-426}
-427
-428@Override
-429public void 
handleCorrupted(ProcedureIterator procIter) throws IOException {
-430  
loader.handleCorrupted(procIter);
-431}
-432
-433@Override
-434public void 
markCorruptedWAL(ProcedureWALFile log, IOException e) {
-435  if (corruptedLogs == null) {
-436corruptedLogs = new 
HashSet();
-437  }
-438  corruptedLogs.add(log);
-439  // TODO: sideline corrupted 
log
-440}
-441  });
-442} finally {
-443  try {
-444// try to cleanup inactive wals 
and complete the operation
-445buildHoldingCleanupTracker();
-446tryCleanupLogsOnLoad();
-447loading.set(false);
-448  } finally {
-449lock.unlock();
-450  }
-451}
-452  }
-453
-454  private void tryCleanupLogsOnLoad() {
-455// nothing to cleanup.
-456if (logs.size() = 1) return;
-457
-458// the config says to not cleanup 
wals on load.
-459if 
(!conf.getBoolean(EXEC_WAL_CLEANUP_ON_LOAD_CONF_KEY,
-460  
DEFAULT_EXEC_WAL_CLEANUP_ON_LOAD_CONF_KEY)) {
-461  LOG.debug("WALs cleanup on load is 
not enabled: " + getActiveLogs());
-462  return;
-463}
-464
-465try {
-466  periodicRoll();
-467} catch (IOException e) {
-468  LOG.warn("Unable to cleanup logs on 
load: " + e.getMessage(), e);
-469}
-470  }
-471
-472  @Override
-473  public void insert(final Procedure 
proc, final Procedure[] subprocs) {
-474if (LOG.isTraceEnabled()) {
-475  LOG.trace("Insert " + proc + ", 
subproc=" + Arrays.toString(subprocs));
-476}
-477
-478ByteSlot slot = acquireSlot();
-479try {
-480  // Serialize the insert
-481  long[] subProcIds = null;
-482  if (subprocs != null) {
-483
ProcedureWALFormat.writeInsert(slot, proc, subprocs);
-484subProcIds 

[44/51] [partial] hbase-site git commit: Published site at 2912c953551bedbfbf30c32c156ed7bb187d54c3.

2018-04-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/checkstyle.rss
--
diff --git a/checkstyle.rss b/checkstyle.rss
index cedb178..3fecdff 100644
--- a/checkstyle.rss
+++ b/checkstyle.rss
@@ -26,7 +26,7 @@ under the License.
 2007 - 2018 The Apache Software Foundation
 
   File: 3601,
- Errors: 15876,
+ Errors: 15869,
  Warnings: 0,
  Infos: 0
   
@@ -601,7 +601,7 @@ under the License.
   0
 
 
-  6
+  4
 
   
   
@@ -5263,7 +5263,7 @@ under the License.
   0
 
 
-  9
+  6
 
   
   
@@ -10961,7 +10961,7 @@ under the License.
   0
 
 
-  10
+  9
 
   
   
@@ -21755,7 +21755,7 @@ under the License.
   0
 
 
-  6
+  5
 
   
   
@@ -34901,7 +34901,7 @@ under the License.
   0
 
 
-  0
+  1
 
   
   
@@ -35531,7 +35531,7 @@ under the License.
   0
 
 
-  3
+  1
 
   
   
@@ -38205,7 +38205,7 @@ under the License.
   0
 
 
-  1
+  2
 
   
   

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/coc.html
--
diff --git a/coc.html b/coc.html
index 1e5db94..a0bacd3 100644
--- a/coc.html
+++ b/coc.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  
   Code of Conduct Policy
@@ -375,7 +375,7 @@ email to mailto:priv...@hbase.apache.org;>the priv
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-04-11
+  Last Published: 
2018-04-12
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/dependencies.html
--
diff --git a/dependencies.html b/dependencies.html
index d83a829..737aff5 100644
--- a/dependencies.html
+++ b/dependencies.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Project Dependencies
 
@@ -440,7 +440,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-04-11
+  Last Published: 
2018-04-12
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/dependency-convergence.html
--
diff --git a/dependency-convergence.html b/dependency-convergence.html
index 3f48b95..01b64bc 100644
--- a/dependency-convergence.html
+++ b/dependency-convergence.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Reactor Dependency Convergence
 
@@ -1105,7 +1105,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-04-11
+  Last Published: 
2018-04-12
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/dependency-info.html
--
diff --git a/dependency-info.html b/dependency-info.html
index c9c7fcf..dc32122 100644
--- a/dependency-info.html
+++ b/dependency-info.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Dependency Information
 
@@ -313,7 +313,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-04-11
+  Last Published: 
2018-04-12
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/dependency-management.html
--
diff --git a/dependency-management.html b/dependency-management.html
index 179fa26..ade5a79 100644
--- a/dependency-management.html
+++ 

[49/51] [partial] hbase-site git commit: Published site at 2912c953551bedbfbf30c32c156ed7bb187d54c3.

2018-04-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/apidocs/index-all.html
--
diff --git a/apidocs/index-all.html b/apidocs/index-all.html
index 6f7301a..cc74120 100644
--- a/apidocs/index-all.html
+++ b/apidocs/index-all.html
@@ -1534,6 +1534,14 @@
 Create a new table by cloning the snapshot content, but 
does not block
  and wait for it to be completely cloned.
 
+cloneTableSchema(TableName,
 TableName, boolean) - Method in interface 
org.apache.hadoop.hbase.client.Admin
+
+Create a new table by cloning the existent table 
schema.
+
+cloneTableSchema(TableName,
 TableName, boolean) - Method in interface 
org.apache.hadoop.hbase.client.AsyncAdmin
+
+Create a new table by cloning the existent table 
schema.
+
 cloneTags(Cell)
 - Static method in class org.apache.hadoop.hbase.CellUtil
 
 Deprecated.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/apidocs/org/apache/hadoop/hbase/class-use/TableName.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/class-use/TableName.html 
b/apidocs/org/apache/hadoop/hbase/class-use/TableName.html
index b4ccfa8..7d05122 100644
--- a/apidocs/org/apache/hadoop/hbase/class-use/TableName.html
+++ b/apidocs/org/apache/hadoop/hbase/class-use/TableName.html
@@ -656,6 +656,22 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 void
+Admin.cloneTableSchema(TableNametableName,
+TableNamenewTableName,
+booleanpreserveSplits)
+Create a new table by cloning the existent table 
schema.
+
+
+
+https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
+AsyncAdmin.cloneTableSchema(TableNametableName,
+TableNamenewTableName,
+booleanpreserveSplits)
+Create a new table by cloning the existent table 
schema.
+
+
+
+void
 Admin.compact(TableNametableName)
 Compact a table.
 



[24/51] [partial] hbase-site git commit: Published site at 2912c953551bedbfbf30c32c156ed7bb187d54c3.

2018-04-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.PushType.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.PushType.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.PushType.html
index e6e43ee..a8b77ae 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.PushType.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.PushType.html
@@ -367,650 +367,650 @@
 359lock.lock();
 360try {
 361  LOG.trace("Starting WAL Procedure 
Store lease recovery");
-362  FileStatus[] oldLogs = 
getLogFiles();
-363  while (isRunning()) {
+362  while (isRunning()) {
+363FileStatus[] oldLogs = 
getLogFiles();
 364// Get Log-MaxID and recover 
lease on old logs
 365try {
 366  flushLogId = 
initOldLogs(oldLogs);
 367} catch (FileNotFoundException e) 
{
 368  LOG.warn("Someone else is 
active and deleted logs. retrying.", e);
-369  oldLogs = getLogFiles();
-370  continue;
-371}
-372
-373// Create new state-log
-374if (!rollWriter(flushLogId + 1)) 
{
-375  // someone else has already 
created this log
-376  LOG.debug("Someone else has 
already created log " + flushLogId);
-377  continue;
-378}
-379
-380// We have the lease on the log
-381oldLogs = getLogFiles();
-382if (getMaxLogId(oldLogs)  
flushLogId) {
-383  if (LOG.isDebugEnabled()) {
-384LOG.debug("Someone else 
created new logs. Expected maxLogId  " + flushLogId);
-385  }
-386  
logs.getLast().removeFile(this.walArchiveDir);
-387  continue;
-388}
-389
-390LOG.trace("Lease acquired for 
flushLogId={}", flushLogId);
-391break;
-392  }
-393} finally {
-394  lock.unlock();
-395}
-396  }
-397
-398  @Override
-399  public void load(final ProcedureLoader 
loader) throws IOException {
-400lock.lock();
-401try {
-402  if (logs.isEmpty()) {
-403throw new 
RuntimeException("recoverLease() must be called before loading data");
-404  }
-405
-406  // Nothing to do, If we have only 
the current log.
-407  if (logs.size() == 1) {
-408LOG.trace("No state logs to 
replay.");
-409loader.setMaxProcId(0);
-410return;
-411  }
-412
-413  // Load the old logs
-414  final 
IteratorProcedureWALFile it = logs.descendingIterator();
-415  it.next(); // Skip the current 
log
-416
-417  ProcedureWALFormat.load(it, 
storeTracker, new ProcedureWALFormat.Loader() {
-418@Override
-419public void setMaxProcId(long 
maxProcId) {
-420  
loader.setMaxProcId(maxProcId);
-421}
-422
-423@Override
-424public void 
load(ProcedureIterator procIter) throws IOException {
-425  loader.load(procIter);
-426}
-427
-428@Override
-429public void 
handleCorrupted(ProcedureIterator procIter) throws IOException {
-430  
loader.handleCorrupted(procIter);
-431}
-432
-433@Override
-434public void 
markCorruptedWAL(ProcedureWALFile log, IOException e) {
-435  if (corruptedLogs == null) {
-436corruptedLogs = new 
HashSet();
-437  }
-438  corruptedLogs.add(log);
-439  // TODO: sideline corrupted 
log
-440}
-441  });
-442} finally {
-443  try {
-444// try to cleanup inactive wals 
and complete the operation
-445buildHoldingCleanupTracker();
-446tryCleanupLogsOnLoad();
-447loading.set(false);
-448  } finally {
-449lock.unlock();
-450  }
-451}
-452  }
-453
-454  private void tryCleanupLogsOnLoad() {
-455// nothing to cleanup.
-456if (logs.size() = 1) return;
-457
-458// the config says to not cleanup 
wals on load.
-459if 
(!conf.getBoolean(EXEC_WAL_CLEANUP_ON_LOAD_CONF_KEY,
-460  
DEFAULT_EXEC_WAL_CLEANUP_ON_LOAD_CONF_KEY)) {
-461  LOG.debug("WALs cleanup on load is 
not enabled: " + getActiveLogs());
-462  return;
-463}
-464
-465try {
-466  periodicRoll();
-467} catch (IOException e) {
-468  LOG.warn("Unable to cleanup logs on 
load: " + e.getMessage(), e);
-469}
-470  }
-471
-472  @Override
-473  public void insert(final Procedure 
proc, final Procedure[] subprocs) {
-474if (LOG.isTraceEnabled()) {
-475  LOG.trace("Insert " + proc + ", 
subproc=" + Arrays.toString(subprocs));
-476}
-477
-478ByteSlot slot = acquireSlot();
-479try {
-480  // Serialize the insert
-481  long[] subProcIds = null;
-482  if (subprocs != null) {
-483

[31/51] [partial] hbase-site git commit: Published site at 2912c953551bedbfbf30c32c156ed7bb187d54c3.

2018-04-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.AbortProcedureFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.AbortProcedureFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.AbortProcedureFuture.html
index b862507..b6e7636 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.AbortProcedureFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.AbortProcedureFuture.html
@@ -4235,7 +4235,22 @@
 4227  }
 4228});
 4229  }
-4230}
+4230
+4231  @Override
+4232  public void cloneTableSchema(final 
TableName tableName, final TableName newTableName,
+4233  final boolean preserveSplits) 
throws IOException {
+4234checkTableExists(tableName);
+4235if (tableExists(newTableName)) {
+4236  throw new 
TableExistsException(newTableName);
+4237}
+4238TableDescriptor htd = 
TableDescriptorBuilder.copy(newTableName, getTableDescriptor(tableName));
+4239if (preserveSplits) {
+4240  createTable(htd, 
getTableSplits(tableName));
+4241} else {
+4242  createTable(htd);
+4243}
+4244  }
+4245}
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.AddColumnFamilyFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.AddColumnFamilyFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.AddColumnFamilyFuture.html
index b862507..b6e7636 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.AddColumnFamilyFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.AddColumnFamilyFuture.html
@@ -4235,7 +4235,22 @@
 4227  }
 4228});
 4229  }
-4230}
+4230
+4231  @Override
+4232  public void cloneTableSchema(final 
TableName tableName, final TableName newTableName,
+4233  final boolean preserveSplits) 
throws IOException {
+4234checkTableExists(tableName);
+4235if (tableExists(newTableName)) {
+4236  throw new 
TableExistsException(newTableName);
+4237}
+4238TableDescriptor htd = 
TableDescriptorBuilder.copy(newTableName, getTableDescriptor(tableName));
+4239if (preserveSplits) {
+4240  createTable(htd, 
getTableSplits(tableName));
+4241} else {
+4242  createTable(htd);
+4243}
+4244  }
+4245}
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.CreateTableFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.CreateTableFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.CreateTableFuture.html
index b862507..b6e7636 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.CreateTableFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.CreateTableFuture.html
@@ -4235,7 +4235,22 @@
 4227  }
 4228});
 4229  }
-4230}
+4230
+4231  @Override
+4232  public void cloneTableSchema(final 
TableName tableName, final TableName newTableName,
+4233  final boolean preserveSplits) 
throws IOException {
+4234checkTableExists(tableName);
+4235if (tableExists(newTableName)) {
+4236  throw new 
TableExistsException(newTableName);
+4237}
+4238TableDescriptor htd = 
TableDescriptorBuilder.copy(newTableName, getTableDescriptor(tableName));
+4239if (preserveSplits) {
+4240  createTable(htd, 
getTableSplits(tableName));
+4241} else {
+4242  createTable(htd);
+4243}
+4244  }
+4245}
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteColumnFamilyFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteColumnFamilyFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteColumnFamilyFuture.html
index b862507..b6e7636 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteColumnFamilyFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteColumnFamilyFuture.html
@@ -4235,7 +4235,22 @@
 4227  }
 4228});
 4229  }
-4230}
+4230
+4231  @Override
+4232  public void cloneTableSchema(final 
TableName tableName, final TableName newTableName,
+4233  final boolean preserveSplits) 
throws IOException {
+4234checkTableExists(tableName);
+4235if (tableExists(newTableName)) {
+4236  throw new 
TableExistsException(newTableName);
+4237}
+4238TableDescriptor htd = 

[06/51] [partial] hbase-site git commit: Published site at 2912c953551bedbfbf30c32c156ed7bb187d54c3.

2018-04-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html
index 8302e28..c370eb9 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html
@@ -2113,3031 +2113,3033 @@
 2105
errors.reportError(ERROR_CODE.ORPHAN_TABLE_STATE,
 2106tableName + " unable to 
delete dangling table state " + tableState);
 2107  }
-2108} else {
-2109  
errors.reportError(ERROR_CODE.ORPHAN_TABLE_STATE,
-2110  tableName + " has dangling 
table state " + tableState);
-2111}
-2112  }
-2113}
-2114// check that all tables have 
states
-2115for (TableName tableName : 
tablesInfo.keySet()) {
-2116  if (isTableIncluded(tableName) 
 !tableStates.containsKey(tableName)) {
-2117if (fixMeta) {
-2118  
MetaTableAccessor.updateTableState(connection, tableName, 
TableState.State.ENABLED);
-2119  TableState newState = 
MetaTableAccessor.getTableState(connection, tableName);
-2120  if (newState == null) {
-2121
errors.reportError(ERROR_CODE.NO_TABLE_STATE,
-2122"Unable to change state 
for table " + tableName + " in meta ");
-2123  }
-2124} else {
-2125  
errors.reportError(ERROR_CODE.NO_TABLE_STATE,
-2126  tableName + " has no state 
in meta ");
-2127}
-2128  }
-2129}
-2130  }
-2131
-2132  private void preCheckPermission() 
throws IOException, AccessDeniedException {
-2133if 
(shouldIgnorePreCheckPermission()) {
-2134  return;
-2135}
-2136
-2137Path hbaseDir = 
FSUtils.getRootDir(getConf());
-2138FileSystem fs = 
hbaseDir.getFileSystem(getConf());
-2139UserProvider userProvider = 
UserProvider.instantiate(getConf());
-2140UserGroupInformation ugi = 
userProvider.getCurrent().getUGI();
-2141FileStatus[] files = 
fs.listStatus(hbaseDir);
-2142for (FileStatus file : files) {
-2143  try {
-2144FSUtils.checkAccess(ugi, file, 
FsAction.WRITE);
-2145  } catch (AccessDeniedException 
ace) {
-2146LOG.warn("Got 
AccessDeniedException when preCheckPermission ", ace);
-2147
errors.reportError(ERROR_CODE.WRONG_USAGE, "Current user " + 
ugi.getUserName()
-2148  + " does not have write perms 
to " + file.getPath()
-2149  + ". Please rerun hbck as hdfs 
user " + file.getOwner());
-2150throw ace;
-2151  }
-2152}
-2153  }
-2154
-2155  /**
-2156   * Deletes region from meta table
-2157   */
-2158  private void deleteMetaRegion(HbckInfo 
hi) throws IOException {
-2159
deleteMetaRegion(hi.metaEntry.getRegionName());
-2160  }
-2161
-2162  /**
-2163   * Deletes region from meta table
-2164   */
-2165  private void deleteMetaRegion(byte[] 
metaKey) throws IOException {
-2166Delete d = new Delete(metaKey);
-2167meta.delete(d);
-2168LOG.info("Deleted " + 
Bytes.toString(metaKey) + " from META" );
-2169  }
-2170
-2171  /**
-2172   * Reset the split parent region info 
in meta table
-2173   */
-2174  private void resetSplitParent(HbckInfo 
hi) throws IOException {
-2175RowMutations mutations = new 
RowMutations(hi.metaEntry.getRegionName());
-2176Delete d = new 
Delete(hi.metaEntry.getRegionName());
-2177
d.addColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER);
-2178
d.addColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER);
-2179mutations.add(d);
-2180
-2181RegionInfo hri = 
RegionInfoBuilder.newBuilder(hi.metaEntry)
-2182.setOffline(false)
-2183.setSplit(false)
-2184.build();
-2185Put p = 
MetaTableAccessor.makePutFromRegionInfo(hri, 
EnvironmentEdgeManager.currentTime());
-2186mutations.add(p);
-2187
-2188meta.mutateRow(mutations);
-2189LOG.info("Reset split parent " + 
hi.metaEntry.getRegionNameAsString() + " in META" );
-2190  }
-2191
-2192  /**
-2193   * This backwards-compatibility 
wrapper for permanently offlining a region
-2194   * that should not be alive.  If the 
region server does not support the
-2195   * "offline" method, it will use the 
closest unassign method instead.  This
-2196   * will basically work until one 
attempts to disable or delete the affected
-2197   * table.  The problem has to do with 
in-memory only master state, so
-2198   * restarting the HMaster or failing 
over to another should fix this.
-2199   */
-2200  private void offline(byte[] 
regionName) throws IOException {
-2201String regionString = 

[26/51] [partial] hbase-site git commit: Published site at 2912c953551bedbfbf30c32c156ed7bb187d54c3.

2018-04-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStateStore.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStateStore.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStateStore.html
index cdb80a0..b442170 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStateStore.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStateStore.html
@@ -126,227 +126,226 @@
 118  final long openSeqNum = -1;
 119
 120  // TODO: move under trace, now is 
visible for debugging
-121  LOG.info(String.format("Load 
hbase:meta entry region=%s regionState=%s lastHost=%s regionLocation=%s",
-122regionInfo, state, lastHost, 
regionLocation));
-123
-124  
visitor.visitRegionState(regionInfo, state, regionLocation, lastHost, 
openSeqNum);
-125}
-126  }
-127
-128  public void 
updateRegionLocation(RegionStates.RegionStateNode regionStateNode)
-129  throws IOException {
-130if 
(regionStateNode.getRegionInfo().isMetaRegion()) {
-131  
updateMetaLocation(regionStateNode.getRegionInfo(), 
regionStateNode.getRegionLocation());
-132} else {
-133  long openSeqNum = 
regionStateNode.getState() == State.OPEN ?
-134  regionStateNode.getOpenSeqNum() 
: HConstants.NO_SEQNUM;
-135  
updateUserRegionLocation(regionStateNode.getRegionInfo(), 
regionStateNode.getState(),
-136  
regionStateNode.getRegionLocation(), regionStateNode.getLastHost(), 
openSeqNum,
-137  
regionStateNode.getProcedure().getProcId());
-138}
-139  }
-140
-141  private void updateMetaLocation(final 
RegionInfo regionInfo, final ServerName serverName)
-142  throws IOException {
-143try {
-144  
MetaTableLocator.setMetaLocation(master.getZooKeeper(), serverName,
-145regionInfo.getReplicaId(), 
State.OPEN);
-146} catch (KeeperException e) {
-147  throw new IOException(e);
-148}
-149  }
-150
-151  private void 
updateUserRegionLocation(final RegionInfo regionInfo, final State state,
-152  final ServerName regionLocation, 
final ServerName lastHost, final long openSeqNum,
-153  final long pid)
-154  throws IOException {
-155long time = 
EnvironmentEdgeManager.currentTime();
-156final int replicaId = 
regionInfo.getReplicaId();
-157final Put put = new 
Put(MetaTableAccessor.getMetaKeyForRegion(regionInfo), time);
-158MetaTableAccessor.addRegionInfo(put, 
regionInfo);
-159final StringBuilder info =
-160  new 
StringBuilder("pid=").append(pid).append(" updating hbase:meta row=")
-161
.append(regionInfo.getRegionNameAsString()).append(", 
regionState=").append(state);
-162if (openSeqNum = 0) {
-163  Preconditions.checkArgument(state 
== State.OPEN  regionLocation != null,
-164  "Open region should be on a 
server");
-165  MetaTableAccessor.addLocation(put, 
regionLocation, openSeqNum, replicaId);
-166  // only update replication barrier 
for default replica
-167  if (regionInfo.getReplicaId() == 
RegionInfo.DEFAULT_REPLICA_ID 
-168
hasGlobalReplicationScope(regionInfo.getTable())) {
-169
MetaTableAccessor.addReplicationBarrier(put, openSeqNum);
-170  }
-171  info.append(", 
openSeqNum=").append(openSeqNum);
-172  info.append(", 
regionLocation=").append(regionLocation);
-173} else if (regionLocation != null 
 !regionLocation.equals(lastHost)) {
-174  // Ideally, if no regionLocation, 
write null to the hbase:meta but this will confuse clients
-175  // currently; they want a server to 
hit. TODO: Make clients wait if no location.
-176  
put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY)
-177  .setRow(put.getRow())
-178  
.setFamily(HConstants.CATALOG_FAMILY)
-179  
.setQualifier(getServerNameColumn(replicaId))
-180  
.setTimestamp(put.getTimestamp())
-181  .setType(Cell.Type.Put)
-182  
.setValue(Bytes.toBytes(regionLocation.getServerName()))
-183  .build());
-184  info.append(", 
regionLocation=").append(regionLocation);
-185}
-186
put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY)
-187.setRow(put.getRow())
-188
.setFamily(HConstants.CATALOG_FAMILY)
-189
.setQualifier(getStateColumn(replicaId))
-190
.setTimestamp(put.getTimestamp())
-191.setType(Cell.Type.Put)
-192
.setValue(Bytes.toBytes(state.name()))
-193.build());
-194LOG.info(info.toString());
-195updateRegionLocation(regionInfo, 
state, put);
-196  }
-197
-198  private void 
updateRegionLocation(RegionInfo regionInfo, State state, Put put)
-199  throws IOException {
-200try (Table table = 
master.getConnection().getTable(TableName.META_TABLE_NAME)) {
-201  

[02/51] [partial] hbase-site git commit: Published site at 2912c953551bedbfbf30c32c156ed7bb187d54c3.

2018-04-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemOverlapMerge.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemOverlapMerge.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemOverlapMerge.html
index 8302e28..c370eb9 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemOverlapMerge.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemOverlapMerge.html
@@ -2113,3031 +2113,3033 @@
 2105
errors.reportError(ERROR_CODE.ORPHAN_TABLE_STATE,
 2106tableName + " unable to 
delete dangling table state " + tableState);
 2107  }
-2108} else {
-2109  
errors.reportError(ERROR_CODE.ORPHAN_TABLE_STATE,
-2110  tableName + " has dangling 
table state " + tableState);
-2111}
-2112  }
-2113}
-2114// check that all tables have 
states
-2115for (TableName tableName : 
tablesInfo.keySet()) {
-2116  if (isTableIncluded(tableName) 
 !tableStates.containsKey(tableName)) {
-2117if (fixMeta) {
-2118  
MetaTableAccessor.updateTableState(connection, tableName, 
TableState.State.ENABLED);
-2119  TableState newState = 
MetaTableAccessor.getTableState(connection, tableName);
-2120  if (newState == null) {
-2121
errors.reportError(ERROR_CODE.NO_TABLE_STATE,
-2122"Unable to change state 
for table " + tableName + " in meta ");
-2123  }
-2124} else {
-2125  
errors.reportError(ERROR_CODE.NO_TABLE_STATE,
-2126  tableName + " has no state 
in meta ");
-2127}
-2128  }
-2129}
-2130  }
-2131
-2132  private void preCheckPermission() 
throws IOException, AccessDeniedException {
-2133if 
(shouldIgnorePreCheckPermission()) {
-2134  return;
-2135}
-2136
-2137Path hbaseDir = 
FSUtils.getRootDir(getConf());
-2138FileSystem fs = 
hbaseDir.getFileSystem(getConf());
-2139UserProvider userProvider = 
UserProvider.instantiate(getConf());
-2140UserGroupInformation ugi = 
userProvider.getCurrent().getUGI();
-2141FileStatus[] files = 
fs.listStatus(hbaseDir);
-2142for (FileStatus file : files) {
-2143  try {
-2144FSUtils.checkAccess(ugi, file, 
FsAction.WRITE);
-2145  } catch (AccessDeniedException 
ace) {
-2146LOG.warn("Got 
AccessDeniedException when preCheckPermission ", ace);
-2147
errors.reportError(ERROR_CODE.WRONG_USAGE, "Current user " + 
ugi.getUserName()
-2148  + " does not have write perms 
to " + file.getPath()
-2149  + ". Please rerun hbck as hdfs 
user " + file.getOwner());
-2150throw ace;
-2151  }
-2152}
-2153  }
-2154
-2155  /**
-2156   * Deletes region from meta table
-2157   */
-2158  private void deleteMetaRegion(HbckInfo 
hi) throws IOException {
-2159
deleteMetaRegion(hi.metaEntry.getRegionName());
-2160  }
-2161
-2162  /**
-2163   * Deletes region from meta table
-2164   */
-2165  private void deleteMetaRegion(byte[] 
metaKey) throws IOException {
-2166Delete d = new Delete(metaKey);
-2167meta.delete(d);
-2168LOG.info("Deleted " + 
Bytes.toString(metaKey) + " from META" );
-2169  }
-2170
-2171  /**
-2172   * Reset the split parent region info 
in meta table
-2173   */
-2174  private void resetSplitParent(HbckInfo 
hi) throws IOException {
-2175RowMutations mutations = new 
RowMutations(hi.metaEntry.getRegionName());
-2176Delete d = new 
Delete(hi.metaEntry.getRegionName());
-2177
d.addColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER);
-2178
d.addColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER);
-2179mutations.add(d);
-2180
-2181RegionInfo hri = 
RegionInfoBuilder.newBuilder(hi.metaEntry)
-2182.setOffline(false)
-2183.setSplit(false)
-2184.build();
-2185Put p = 
MetaTableAccessor.makePutFromRegionInfo(hri, 
EnvironmentEdgeManager.currentTime());
-2186mutations.add(p);
-2187
-2188meta.mutateRow(mutations);
-2189LOG.info("Reset split parent " + 
hi.metaEntry.getRegionNameAsString() + " in META" );
-2190  }
-2191
-2192  /**
-2193   * This backwards-compatibility 
wrapper for permanently offlining a region
-2194   * that should not be alive.  If the 
region server does not support the
-2195   * "offline" method, it will use the 
closest unassign method instead.  This
-2196   * will basically work until one 
attempts to disable or delete the affected
-2197   * table.  The problem has to do with 
in-memory only master state, so
-2198   * restarting the HMaster or failing 
over to another should fix this.
-2199   */
-2200  private void offline(byte[] 
regionName) throws IOException {
-2201String regionString = 
Bytes.toStringBinary(regionName);
-2202if (!rsSupportsOffline) 

[01/51] [partial] hbase-site git commit: Published site at 2912c953551bedbfbf30c32c156ed7bb187d54c3.

2018-04-12 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site f50447c3e -> d220bc5e7


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html
index 8302e28..c370eb9 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html
@@ -2113,3031 +2113,3033 @@
 2105
errors.reportError(ERROR_CODE.ORPHAN_TABLE_STATE,
 2106tableName + " unable to 
delete dangling table state " + tableState);
 2107  }
-2108} else {
-2109  
errors.reportError(ERROR_CODE.ORPHAN_TABLE_STATE,
-2110  tableName + " has dangling 
table state " + tableState);
-2111}
-2112  }
-2113}
-2114// check that all tables have 
states
-2115for (TableName tableName : 
tablesInfo.keySet()) {
-2116  if (isTableIncluded(tableName) 
 !tableStates.containsKey(tableName)) {
-2117if (fixMeta) {
-2118  
MetaTableAccessor.updateTableState(connection, tableName, 
TableState.State.ENABLED);
-2119  TableState newState = 
MetaTableAccessor.getTableState(connection, tableName);
-2120  if (newState == null) {
-2121
errors.reportError(ERROR_CODE.NO_TABLE_STATE,
-2122"Unable to change state 
for table " + tableName + " in meta ");
-2123  }
-2124} else {
-2125  
errors.reportError(ERROR_CODE.NO_TABLE_STATE,
-2126  tableName + " has no state 
in meta ");
-2127}
-2128  }
-2129}
-2130  }
-2131
-2132  private void preCheckPermission() 
throws IOException, AccessDeniedException {
-2133if 
(shouldIgnorePreCheckPermission()) {
-2134  return;
-2135}
-2136
-2137Path hbaseDir = 
FSUtils.getRootDir(getConf());
-2138FileSystem fs = 
hbaseDir.getFileSystem(getConf());
-2139UserProvider userProvider = 
UserProvider.instantiate(getConf());
-2140UserGroupInformation ugi = 
userProvider.getCurrent().getUGI();
-2141FileStatus[] files = 
fs.listStatus(hbaseDir);
-2142for (FileStatus file : files) {
-2143  try {
-2144FSUtils.checkAccess(ugi, file, 
FsAction.WRITE);
-2145  } catch (AccessDeniedException 
ace) {
-2146LOG.warn("Got 
AccessDeniedException when preCheckPermission ", ace);
-2147
errors.reportError(ERROR_CODE.WRONG_USAGE, "Current user " + 
ugi.getUserName()
-2148  + " does not have write perms 
to " + file.getPath()
-2149  + ". Please rerun hbck as hdfs 
user " + file.getOwner());
-2150throw ace;
-2151  }
-2152}
-2153  }
-2154
-2155  /**
-2156   * Deletes region from meta table
-2157   */
-2158  private void deleteMetaRegion(HbckInfo 
hi) throws IOException {
-2159
deleteMetaRegion(hi.metaEntry.getRegionName());
-2160  }
-2161
-2162  /**
-2163   * Deletes region from meta table
-2164   */
-2165  private void deleteMetaRegion(byte[] 
metaKey) throws IOException {
-2166Delete d = new Delete(metaKey);
-2167meta.delete(d);
-2168LOG.info("Deleted " + 
Bytes.toString(metaKey) + " from META" );
-2169  }
-2170
-2171  /**
-2172   * Reset the split parent region info 
in meta table
-2173   */
-2174  private void resetSplitParent(HbckInfo 
hi) throws IOException {
-2175RowMutations mutations = new 
RowMutations(hi.metaEntry.getRegionName());
-2176Delete d = new 
Delete(hi.metaEntry.getRegionName());
-2177
d.addColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER);
-2178
d.addColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER);
-2179mutations.add(d);
-2180
-2181RegionInfo hri = 
RegionInfoBuilder.newBuilder(hi.metaEntry)
-2182.setOffline(false)
-2183.setSplit(false)
-2184.build();
-2185Put p = 
MetaTableAccessor.makePutFromRegionInfo(hri, 
EnvironmentEdgeManager.currentTime());
-2186mutations.add(p);
-2187
-2188meta.mutateRow(mutations);
-2189LOG.info("Reset split parent " + 
hi.metaEntry.getRegionNameAsString() + " in META" );
-2190  }
-2191
-2192  /**
-2193   * This backwards-compatibility 
wrapper for permanently offlining a region
-2194   * that should not be alive.  If the 
region server does not support the
-2195   * "offline" method, it will use the 
closest unassign method instead.  This
-2196   * will basically work until one 
attempts to disable or delete the affected
-2197   * table.  The problem has to do with 
in-memory only master state, so
-2198   * restarting the HMaster or failing 
over to another should fix this.
-2199   */
-2200  private void offline(byte[] 
regionName) throws IOException {
-2201String regionString = 

[05/51] [partial] hbase-site git commit: Published site at 2912c953551bedbfbf30c32c156ed7bb187d54c3.

2018-04-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html
index 8302e28..c370eb9 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html
@@ -2113,3031 +2113,3033 @@
 2105
errors.reportError(ERROR_CODE.ORPHAN_TABLE_STATE,
 2106tableName + " unable to 
delete dangling table state " + tableState);
 2107  }
-2108} else {
-2109  
errors.reportError(ERROR_CODE.ORPHAN_TABLE_STATE,
-2110  tableName + " has dangling 
table state " + tableState);
-2111}
-2112  }
-2113}
-2114// check that all tables have 
states
-2115for (TableName tableName : 
tablesInfo.keySet()) {
-2116  if (isTableIncluded(tableName) 
 !tableStates.containsKey(tableName)) {
-2117if (fixMeta) {
-2118  
MetaTableAccessor.updateTableState(connection, tableName, 
TableState.State.ENABLED);
-2119  TableState newState = 
MetaTableAccessor.getTableState(connection, tableName);
-2120  if (newState == null) {
-2121
errors.reportError(ERROR_CODE.NO_TABLE_STATE,
-2122"Unable to change state 
for table " + tableName + " in meta ");
-2123  }
-2124} else {
-2125  
errors.reportError(ERROR_CODE.NO_TABLE_STATE,
-2126  tableName + " has no state 
in meta ");
-2127}
-2128  }
-2129}
-2130  }
-2131
-2132  private void preCheckPermission() 
throws IOException, AccessDeniedException {
-2133if 
(shouldIgnorePreCheckPermission()) {
-2134  return;
-2135}
-2136
-2137Path hbaseDir = 
FSUtils.getRootDir(getConf());
-2138FileSystem fs = 
hbaseDir.getFileSystem(getConf());
-2139UserProvider userProvider = 
UserProvider.instantiate(getConf());
-2140UserGroupInformation ugi = 
userProvider.getCurrent().getUGI();
-2141FileStatus[] files = 
fs.listStatus(hbaseDir);
-2142for (FileStatus file : files) {
-2143  try {
-2144FSUtils.checkAccess(ugi, file, 
FsAction.WRITE);
-2145  } catch (AccessDeniedException 
ace) {
-2146LOG.warn("Got 
AccessDeniedException when preCheckPermission ", ace);
-2147
errors.reportError(ERROR_CODE.WRONG_USAGE, "Current user " + 
ugi.getUserName()
-2148  + " does not have write perms 
to " + file.getPath()
-2149  + ". Please rerun hbck as hdfs 
user " + file.getOwner());
-2150throw ace;
-2151  }
-2152}
-2153  }
-2154
-2155  /**
-2156   * Deletes region from meta table
-2157   */
-2158  private void deleteMetaRegion(HbckInfo 
hi) throws IOException {
-2159
deleteMetaRegion(hi.metaEntry.getRegionName());
-2160  }
-2161
-2162  /**
-2163   * Deletes region from meta table
-2164   */
-2165  private void deleteMetaRegion(byte[] 
metaKey) throws IOException {
-2166Delete d = new Delete(metaKey);
-2167meta.delete(d);
-2168LOG.info("Deleted " + 
Bytes.toString(metaKey) + " from META" );
-2169  }
-2170
-2171  /**
-2172   * Reset the split parent region info 
in meta table
-2173   */
-2174  private void resetSplitParent(HbckInfo 
hi) throws IOException {
-2175RowMutations mutations = new 
RowMutations(hi.metaEntry.getRegionName());
-2176Delete d = new 
Delete(hi.metaEntry.getRegionName());
-2177
d.addColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER);
-2178
d.addColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER);
-2179mutations.add(d);
-2180
-2181RegionInfo hri = 
RegionInfoBuilder.newBuilder(hi.metaEntry)
-2182.setOffline(false)
-2183.setSplit(false)
-2184.build();
-2185Put p = 
MetaTableAccessor.makePutFromRegionInfo(hri, 
EnvironmentEdgeManager.currentTime());
-2186mutations.add(p);
-2187
-2188meta.mutateRow(mutations);
-2189LOG.info("Reset split parent " + 
hi.metaEntry.getRegionNameAsString() + " in META" );
-2190  }
-2191
-2192  /**
-2193   * This backwards-compatibility 
wrapper for permanently offlining a region
-2194   * that should not be alive.  If the 
region server does not support the
-2195   * "offline" method, it will use the 
closest unassign method instead.  This
-2196   * will basically work until one 
attempts to disable or delete the affected
-2197   * table.  The problem has to do with 
in-memory only master state, so
-2198   * restarting the HMaster or failing 
over to another should fix this.
-2199   */
-2200  private void offline(byte[] 
regionName) throws IOException {
-2201String regionString = 
Bytes.toStringBinary(regionName);
-2202if (!rsSupportsOffline) {
-2203  LOG.warn("Using unassign region " 
+ 

hbase git commit: HBASE-20397 Make it more explicit that monkey.properties is found on CLASSPATH

2018-04-12 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/master d14a7ff18 -> 2912c9535


HBASE-20397 Make it more explicit that monkey.properties is found on CLASSPATH


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2912c953
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2912c953
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2912c953

Branch: refs/heads/master
Commit: 2912c953551bedbfbf30c32c156ed7bb187d54c3
Parents: d14a7ff
Author: Michael Stack 
Authored: Thu Apr 12 04:36:36 2018 -0700
Committer: Michael Stack 
Committed: Thu Apr 12 04:36:36 2018 -0700

--
 .../java/org/apache/hadoop/hbase/IntegrationTestBase.java   | 2 +-
 src/main/asciidoc/_chapters/developer.adoc  | 9 ++---
 2 files changed, 7 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/2912c953/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBase.java
--
diff --git 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBase.java 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBase.java
index ee94078..125b7ca 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBase.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBase.java
@@ -100,7 +100,7 @@ public abstract class IntegrationTestBase extends 
AbstractHBaseTool {
   monkeyProps.load(this.getClass().getClassLoader()
   .getResourceAsStream(chaosMonkeyPropsFile));
 } catch (IOException e) {
-  LOG.warn(e.toString(), e);
+  LOG.warn("Failed load of monkey properties {} from CLASSPATH", 
chaosMonkeyPropsFile, e);
   System.exit(EXIT_FAILURE);
 }
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/2912c953/src/main/asciidoc/_chapters/developer.adoc
--
diff --git a/src/main/asciidoc/_chapters/developer.adoc 
b/src/main/asciidoc/_chapters/developer.adoc
index a6e9c3e..48dc79e 100644
--- a/src/main/asciidoc/_chapters/developer.adoc
+++ b/src/main/asciidoc/_chapters/developer.adoc
@@ -1465,9 +1465,8 @@ HBase ships with several ChaosMonkey policies, available 
in the
 [[chaos.monkey.properties]]
  Configuring Individual ChaosMonkey Actions
 
-Since HBase version 1.0.0 
(link:https://issues.apache.org/jira/browse/HBASE-11348[HBASE-11348]),
 ChaosMonkey integration tests can be configured per test run.
-Create a Java properties file in the HBase classpath and pass it to 
ChaosMonkey using
+Create a Java properties file in the HBase CLASSPATH and pass it to 
ChaosMonkey using
 the `-monkeyProps` configuration flag. Configurable properties, along with 
their default
 values if applicable, are listed in the 
`org.apache.hadoop.hbase.chaos.factories.MonkeyConstants`
 class. For properties that have defaults, you can override them by including 
them
@@ -1480,7 +1479,9 @@ The following example uses a properties file called 
<

hbase git commit: HBASE-20349 [DOC] upgrade guide should call out removal of prefix-tree data block encoding

2018-04-12 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/master c4ebf666b -> d14a7ff18


HBASE-20349 [DOC] upgrade guide should call out removal of prefix-tree data 
block encoding


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d14a7ff1
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d14a7ff1
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d14a7ff1

Branch: refs/heads/master
Commit: d14a7ff1876d68aab3d019b5179a7eff8f3e2356
Parents: c4ebf66
Author: Michael Stack 
Authored: Wed Apr 11 15:07:27 2018 -0700
Committer: Michael Stack 
Committed: Thu Apr 12 04:32:46 2018 -0700

--
 src/main/asciidoc/_chapters/troubleshooting.adoc |  4 ++--
 src/main/asciidoc/_chapters/upgrading.adoc   | 11 +++
 2 files changed, 13 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d14a7ff1/src/main/asciidoc/_chapters/troubleshooting.adoc
--
diff --git a/src/main/asciidoc/_chapters/troubleshooting.adoc 
b/src/main/asciidoc/_chapters/troubleshooting.adoc
index 83f1989..52c0860 100644
--- a/src/main/asciidoc/_chapters/troubleshooting.adoc
+++ b/src/main/asciidoc/_chapters/troubleshooting.adoc
@@ -102,9 +102,9 @@ To disable, set the logging level back to `INFO` level.
 === JVM Garbage Collection Logs
 
 [NOTE]
-
+
 All example Garbage Collection logs in this section are based on Java 8 
output. The introduction of Unified Logging in Java 9 and newer will result in 
very different looking logs.
-
+
 
 HBase is memory intensive, and using the default GC you can see long pauses in 
all threads including the _Juliet Pause_ aka "GC of Death". To help debug this 
or confirm this is happening GC logging can be turned on in the Java virtual 
machine.
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/d14a7ff1/src/main/asciidoc/_chapters/upgrading.adoc
--
diff --git a/src/main/asciidoc/_chapters/upgrading.adoc 
b/src/main/asciidoc/_chapters/upgrading.adoc
index 0c7edcc..046fc90 100644
--- a/src/main/asciidoc/_chapters/upgrading.adoc
+++ b/src/main/asciidoc/_chapters/upgrading.adoc
@@ -409,6 +409,17 @@ A brief summary of related changes:
 
 The Distributed Log Replay feature was broken and has been removed from HBase 
2.y+. As a consequence all related configs, metrics, RPC fields, and logging 
have also been removed. Note that this feature was found to be unreliable in 
the run up to HBase 1.0, defaulted to being unused, and was effectively removed 
in HBase 1.2.0 when we started ignoring the config that turns it on 
(link:https://issues.apache.org/jira/browse/HBASE-14465[HBASE-14465]). If you 
are currently using the feature, be sure to perform a clean shutdown, ensure 
all DLR work is complete, and disable the feature prior to upgrading.
 
+[[upgrade2.0.prefix-tree.removed]]
+._prefix-tree_ encoding removed
+
+The prefix-tree encoding was removed from HBase 2.0.0 
(link:https://issues.apache.org/jira/browse/HBASE-19179[HBASE-19179]).
+It was (late!) deprecated in hbase-1.2.7, hbase-1.4.0, and hbase-1.3.2.
+
+This feature was removed because it as not being actively maintained. If 
interested in reviving this
+sweet facility which improved random read latencies at the expensive of slowed 
writes,
+write the HBase developers list at _dev at hbase dot apache dot org_.
+
+
 [[upgrade2.0.metrics]]
 .Changed metrics
 



hbase git commit: HBASE-20376 RowCounter and CellCounter documentations are incorrect

2018-04-12 Thread psomogyi
Repository: hbase
Updated Branches:
  refs/heads/branch-2.0 e72a1c694 -> 263cc8d14


HBASE-20376 RowCounter and CellCounter documentations are incorrect


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/263cc8d1
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/263cc8d1
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/263cc8d1

Branch: refs/heads/branch-2.0
Commit: 263cc8d14371db4abeebcf2967393ee4a1ae614f
Parents: e72a1c6
Author: Peter Somogyi 
Authored: Thu Apr 12 10:05:17 2018 +0200
Committer: Peter Somogyi 
Committed: Thu Apr 12 10:06:42 2018 +0200

--
 bin/hbase   |  6 +++
 .../hadoop/hbase/mapreduce/CellCounter.java | 47 +++-
 .../hadoop/hbase/mapreduce/RowCounter.java  |  6 +--
 .../hadoop/hbase/mapreduce/TestRowCounter.java  | 22 +
 4 files changed, 45 insertions(+), 36 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/263cc8d1/bin/hbase
--
diff --git a/bin/hbase b/bin/hbase
index 5087e59..78dbbdd 100755
--- a/bin/hbase
+++ b/bin/hbase
@@ -104,6 +104,8 @@ if [ $# = 0 ]; then
   echo "  canary  Run the Canary tool"
   echo "  version Print the version"
   echo "  regionsplitter  Run RegionSplitter tool"
+  echo "  rowcounter  Run RowCounter tool"
+  echo "  cellcounter Run CellCounter tool"
   echo "  CLASSNAME   Run the class named CLASSNAME"
   exit 1
 fi
@@ -459,6 +461,10 @@ elif [ "$COMMAND" = "version" ] ; then
   CLASS='org.apache.hadoop.hbase.util.VersionInfo'
 elif [ "$COMMAND" = "regionsplitter" ] ; then
   CLASS='org.apache.hadoop.hbase.util.RegionSplitter'
+elif [ "$COMMAND" = "rowcounter" ] ; then
+  CLASS='org.apache.hadoop.hbase.mapreduce.RowCounter'
+elif [ "$COMMAND" = "cellcounter" ] ; then
+  CLASS='org.apache.hadoop.hbase.mapreduce.CellCounter'
 else
   CLASS=$COMMAND
 fi

http://git-wip-us.apache.org/repos/asf/hbase/blob/263cc8d1/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCounter.java
--
diff --git 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCounter.java
 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCounter.java
index aa79aac..ff0f01c 100644
--- 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCounter.java
+++ 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCounter.java
@@ -292,33 +292,38 @@ public class CellCounter extends Configured implements 
Tool {
   @Override
   public int run(String[] args) throws Exception {
 if (args.length < 2) {
-  System.err.println("ERROR: Wrong number of parameters: " + args.length);
-  System.err.println("Usage: CellCounter ");
-  System.err.println("  
[^[regex pattern] or " +
-"[Prefix] for row filter]] --starttime=[starttime] 
--endtime=[endtime]");
-  System.err.println("  Note: -D properties will be applied to the conf 
used. ");
-  System.err.println("  Additionally, all of the SCAN properties from 
TableInputFormat");
-  System.err.println("  can be specified to get fine grained control on 
what is counted..");
-  System.err.println("   -D " + TableInputFormat.SCAN_ROW_START + 
"=");
-  System.err.println("   -D " + TableInputFormat.SCAN_ROW_STOP + 
"=");
-  System.err.println("   -D " + TableInputFormat.SCAN_COLUMNS + "=\" 
...\"");
-  System.err.println("   -D " + TableInputFormat.SCAN_COLUMN_FAMILY + 
"=,, ...");
-  System.err.println("   -D " + TableInputFormat.SCAN_TIMESTAMP + 
"=");
-  System.err.println("   -D " + TableInputFormat.SCAN_TIMERANGE_START + 
"=");
-  System.err.println("   -D " + TableInputFormat.SCAN_TIMERANGE_END + 
"=");
-  System.err.println("   -D " + TableInputFormat.SCAN_MAXVERSIONS + 
"=");
-  System.err.println("   -D " + TableInputFormat.SCAN_CACHEDROWS + 
"=");
-  System.err.println("   -D " + TableInputFormat.SCAN_BATCHSIZE + 
"=");
-  System.err.println("  parameter can be used to override 
the default report separator " +
-  "string : used to separate the rowId/column family name and 
qualifier name.");
-  System.err.println(" [^[regex pattern] or [Prefix] parameter can be used 
to limit the cell counter count " +
-  "operation to a limited subset of rows from the table based on regex 
or prefix pattern.");
+  printUsage(args.length);
   return -1;
 }
 Job job = createSubmittableJob(getConf(), args);
 return (job.waitForCompletion(true) ? 0 : 1);
   }
 
+  private void printUsage(int parameterCount) {
+System.err.println("ERROR: Wrong number of 

hbase git commit: HBASE-20376 RowCounter and CellCounter documentations are incorrect

2018-04-12 Thread psomogyi
Repository: hbase
Updated Branches:
  refs/heads/branch-2 b0701434e -> e51ced4f1


HBASE-20376 RowCounter and CellCounter documentations are incorrect


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e51ced4f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e51ced4f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e51ced4f

Branch: refs/heads/branch-2
Commit: e51ced4f174759fd794a787ba82bf139875a6a96
Parents: b070143
Author: Peter Somogyi 
Authored: Thu Apr 12 10:05:17 2018 +0200
Committer: Peter Somogyi 
Committed: Thu Apr 12 10:05:45 2018 +0200

--
 bin/hbase   |  6 +++
 .../hadoop/hbase/mapreduce/CellCounter.java | 47 +++-
 .../hadoop/hbase/mapreduce/RowCounter.java  |  6 +--
 .../hadoop/hbase/mapreduce/TestRowCounter.java  | 22 +
 4 files changed, 45 insertions(+), 36 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e51ced4f/bin/hbase
--
diff --git a/bin/hbase b/bin/hbase
index 5087e59..78dbbdd 100755
--- a/bin/hbase
+++ b/bin/hbase
@@ -104,6 +104,8 @@ if [ $# = 0 ]; then
   echo "  canary  Run the Canary tool"
   echo "  version Print the version"
   echo "  regionsplitter  Run RegionSplitter tool"
+  echo "  rowcounter  Run RowCounter tool"
+  echo "  cellcounter Run CellCounter tool"
   echo "  CLASSNAME   Run the class named CLASSNAME"
   exit 1
 fi
@@ -459,6 +461,10 @@ elif [ "$COMMAND" = "version" ] ; then
   CLASS='org.apache.hadoop.hbase.util.VersionInfo'
 elif [ "$COMMAND" = "regionsplitter" ] ; then
   CLASS='org.apache.hadoop.hbase.util.RegionSplitter'
+elif [ "$COMMAND" = "rowcounter" ] ; then
+  CLASS='org.apache.hadoop.hbase.mapreduce.RowCounter'
+elif [ "$COMMAND" = "cellcounter" ] ; then
+  CLASS='org.apache.hadoop.hbase.mapreduce.CellCounter'
 else
   CLASS=$COMMAND
 fi

http://git-wip-us.apache.org/repos/asf/hbase/blob/e51ced4f/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCounter.java
--
diff --git 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCounter.java
 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCounter.java
index aa79aac..ff0f01c 100644
--- 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCounter.java
+++ 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCounter.java
@@ -292,33 +292,38 @@ public class CellCounter extends Configured implements 
Tool {
   @Override
   public int run(String[] args) throws Exception {
 if (args.length < 2) {
-  System.err.println("ERROR: Wrong number of parameters: " + args.length);
-  System.err.println("Usage: CellCounter ");
-  System.err.println("  
[^[regex pattern] or " +
-"[Prefix] for row filter]] --starttime=[starttime] 
--endtime=[endtime]");
-  System.err.println("  Note: -D properties will be applied to the conf 
used. ");
-  System.err.println("  Additionally, all of the SCAN properties from 
TableInputFormat");
-  System.err.println("  can be specified to get fine grained control on 
what is counted..");
-  System.err.println("   -D " + TableInputFormat.SCAN_ROW_START + 
"=");
-  System.err.println("   -D " + TableInputFormat.SCAN_ROW_STOP + 
"=");
-  System.err.println("   -D " + TableInputFormat.SCAN_COLUMNS + "=\" 
...\"");
-  System.err.println("   -D " + TableInputFormat.SCAN_COLUMN_FAMILY + 
"=,, ...");
-  System.err.println("   -D " + TableInputFormat.SCAN_TIMESTAMP + 
"=");
-  System.err.println("   -D " + TableInputFormat.SCAN_TIMERANGE_START + 
"=");
-  System.err.println("   -D " + TableInputFormat.SCAN_TIMERANGE_END + 
"=");
-  System.err.println("   -D " + TableInputFormat.SCAN_MAXVERSIONS + 
"=");
-  System.err.println("   -D " + TableInputFormat.SCAN_CACHEDROWS + 
"=");
-  System.err.println("   -D " + TableInputFormat.SCAN_BATCHSIZE + 
"=");
-  System.err.println("  parameter can be used to override 
the default report separator " +
-  "string : used to separate the rowId/column family name and 
qualifier name.");
-  System.err.println(" [^[regex pattern] or [Prefix] parameter can be used 
to limit the cell counter count " +
-  "operation to a limited subset of rows from the table based on regex 
or prefix pattern.");
+  printUsage(args.length);
   return -1;
 }
 Job job = createSubmittableJob(getConf(), args);
 return (job.waitForCompletion(true) ? 0 : 1);
   }
 
+  private void printUsage(int parameterCount) {
+System.err.println("ERROR: Wrong number of parameters: " 

hbase git commit: HBASE-20376 RowCounter and CellCounter documentations are incorrect

2018-04-12 Thread psomogyi
Repository: hbase
Updated Branches:
  refs/heads/master 5a69465ea -> c4ebf666b


HBASE-20376 RowCounter and CellCounter documentations are incorrect


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c4ebf666
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c4ebf666
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c4ebf666

Branch: refs/heads/master
Commit: c4ebf666b78f92a6d02652eece8dd95360bd0482
Parents: 5a69465
Author: Peter Somogyi 
Authored: Tue Apr 10 15:16:03 2018 +0200
Committer: Peter Somogyi 
Committed: Thu Apr 12 10:00:38 2018 +0200

--
 bin/hbase   |  6 +++
 .../hadoop/hbase/mapreduce/CellCounter.java | 47 +++-
 .../hadoop/hbase/mapreduce/RowCounter.java  |  6 +--
 .../hadoop/hbase/mapreduce/TestRowCounter.java  | 22 +
 src/main/asciidoc/_chapters/ops_mgt.adoc| 31 -
 5 files changed, 64 insertions(+), 48 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c4ebf666/bin/hbase
--
diff --git a/bin/hbase b/bin/hbase
index 8e37f5f..f1e2306 100755
--- a/bin/hbase
+++ b/bin/hbase
@@ -106,6 +106,8 @@ if [ $# = 0 ]; then
   echo "  backup  Backup tables for recovery"
   echo "  restore Restore tables from existing backup image"
   echo "  regionsplitter  Run RegionSplitter tool"
+  echo "  rowcounter  Run RowCounter tool"
+  echo "  cellcounter Run CellCounter tool"
   echo "  CLASSNAME   Run the class named CLASSNAME"
   exit 1
 fi
@@ -465,6 +467,10 @@ elif [ "$COMMAND" = "version" ] ; then
   CLASS='org.apache.hadoop.hbase.util.VersionInfo'
 elif [ "$COMMAND" = "regionsplitter" ] ; then
   CLASS='org.apache.hadoop.hbase.util.RegionSplitter'
+elif [ "$COMMAND" = "rowcounter" ] ; then
+  CLASS='org.apache.hadoop.hbase.mapreduce.RowCounter'
+elif [ "$COMMAND" = "cellcounter" ] ; then
+  CLASS='org.apache.hadoop.hbase.mapreduce.CellCounter'
 else
   CLASS=$COMMAND
 fi

http://git-wip-us.apache.org/repos/asf/hbase/blob/c4ebf666/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCounter.java
--
diff --git 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCounter.java
 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCounter.java
index aa79aac..ff0f01c 100644
--- 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCounter.java
+++ 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCounter.java
@@ -292,33 +292,38 @@ public class CellCounter extends Configured implements 
Tool {
   @Override
   public int run(String[] args) throws Exception {
 if (args.length < 2) {
-  System.err.println("ERROR: Wrong number of parameters: " + args.length);
-  System.err.println("Usage: CellCounter ");
-  System.err.println("  
[^[regex pattern] or " +
-"[Prefix] for row filter]] --starttime=[starttime] 
--endtime=[endtime]");
-  System.err.println("  Note: -D properties will be applied to the conf 
used. ");
-  System.err.println("  Additionally, all of the SCAN properties from 
TableInputFormat");
-  System.err.println("  can be specified to get fine grained control on 
what is counted..");
-  System.err.println("   -D " + TableInputFormat.SCAN_ROW_START + 
"=");
-  System.err.println("   -D " + TableInputFormat.SCAN_ROW_STOP + 
"=");
-  System.err.println("   -D " + TableInputFormat.SCAN_COLUMNS + "=\" 
...\"");
-  System.err.println("   -D " + TableInputFormat.SCAN_COLUMN_FAMILY + 
"=,, ...");
-  System.err.println("   -D " + TableInputFormat.SCAN_TIMESTAMP + 
"=");
-  System.err.println("   -D " + TableInputFormat.SCAN_TIMERANGE_START + 
"=");
-  System.err.println("   -D " + TableInputFormat.SCAN_TIMERANGE_END + 
"=");
-  System.err.println("   -D " + TableInputFormat.SCAN_MAXVERSIONS + 
"=");
-  System.err.println("   -D " + TableInputFormat.SCAN_CACHEDROWS + 
"=");
-  System.err.println("   -D " + TableInputFormat.SCAN_BATCHSIZE + 
"=");
-  System.err.println("  parameter can be used to override 
the default report separator " +
-  "string : used to separate the rowId/column family name and 
qualifier name.");
-  System.err.println(" [^[regex pattern] or [Prefix] parameter can be used 
to limit the cell counter count " +
-  "operation to a limited subset of rows from the table based on regex 
or prefix pattern.");
+  printUsage(args.length);
   return -1;
 }
 Job job = createSubmittableJob(getConf(), args);
 return (job.waitForCompletion(true) ? 0 : 1);
   }
 
+  private void