hbase git commit: Revert "Backport "HBASE-21126 Add ability for HBase Canary to ignore a configurable number of ZooKeeper down nodes" to branch-2.0"

2018-12-05 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-2.1 f60617ca0 -> ec39dc8c1


Revert "Backport "HBASE-21126 Add ability for HBase Canary to ignore a 
configurable number of ZooKeeper down nodes" to branch-2.0"

This reverts commit f60617ca0f7d142f22aced96f1e7865ae107a291.

Misapplied. Revert to fix.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ec39dc8c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ec39dc8c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ec39dc8c

Branch: refs/heads/branch-2.1
Commit: ec39dc8c149b9f89a91596d57d27de812973f0a9
Parents: f60617c
Author: stack 
Authored: Wed Dec 5 22:24:09 2018 -0800
Committer: stack 
Committed: Wed Dec 5 22:24:09 2018 -0800

--
 .../main/java/org/apache/hadoop/hbase/tool/Canary.java   | 11 ++-
 1 file changed, 6 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/ec39dc8c/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
index 71af23e..40f4aa6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
@@ -599,6 +599,7 @@ public final class Canary implements Tool {
* True if we are to run in zookeeper 'mode'.
*/
   private boolean zookeeperMode = false;
+
   private long permittedFailures = 0;
   private boolean regionServerAllRegions = false;
   private boolean writeSniffing = false;
@@ -891,8 +892,6 @@ public final class Canary implements Tool {
 "random one.");
 System.err.println(" -zookeeper  set 'zookeeper mode'; grab 
zookeeper.znode.parent on " +
 "each ensemble member");
-System.err.println(" -permittedZookeeperFailures  Ignore first N 
failures when attempting to " +
-"connect to individual zookeeper nodes in the ensemble");
 System.err.println(" -daemon continuous check at defined 
intervals.");
 System.err.println(" -intervalinterval between checks in seconds");
 System.err.println(" -e  consider table/regionserver argument 
as regular " +
@@ -958,7 +957,8 @@ public final class Canary implements Tool {
   monitor =
   new ZookeeperMonitor(connection, monitorTargets, this.useRegExp,
   getSink(connection.getConfiguration(), 
ZookeeperStdOutSink.class),
-  this.executor, this.treatFailureAsError, this.permittedFailures);
+  this.executor, this.treatFailureAsError,
+  this.permittedFailures);
 } else {
   monitor =
   new RegionMonitor(connection, monitorTargets, this.useRegExp,
@@ -1078,9 +1078,10 @@ public final class Canary implements Tool {
 public RegionMonitor(Connection connection, String[] monitorTargets, 
boolean useRegExp,
 Sink sink, ExecutorService executor, boolean writeSniffing, TableName 
writeTableName,
 boolean treatFailureAsError, HashMap 
configuredReadTableTimeouts,
-long configuredWriteTableTimeout, long allowedFailures) {
+long configuredWriteTableTimeout,
+long allowedFailures) {
   super(connection, monitorTargets, useRegExp, sink, executor, 
treatFailureAsError,
-  allowedFailures);
+  allowedFailures);
   Configuration conf = connection.getConfiguration();
   this.writeSniffing = writeSniffing;
   this.writeTableName = writeTableName;



hbase git commit: Backport "HBASE-21126 Add ability for HBase Canary to ignore a configurable number of ZooKeeper down nodes" to branch-2.0

2018-12-05 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-2.0 a93d4be03 -> 14f0f72ac


Backport "HBASE-21126 Add ability for HBase Canary to ignore a configurable 
number of ZooKeeper down nodes" to branch-2.0

Signed-off-by: Michael Stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/14f0f72a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/14f0f72a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/14f0f72a

Branch: refs/heads/branch-2.0
Commit: 14f0f72ac49f986ab80678d8b85589076a9e38c5
Parents: a93d4be
Author: David Manning 
Authored: Wed Aug 29 12:06:59 2018 -0700
Committer: stack 
Committed: Wed Dec 5 22:22:11 2018 -0800

--
 .../org/apache/hadoop/hbase/tool/Canary.java| 50 +++-
 .../hadoop/hbase/tool/TestCanaryTool.java   | 35 +-
 2 files changed, 61 insertions(+), 24 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/14f0f72a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
index 66cf637..e6aa0bd 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
@@ -599,6 +599,7 @@ public final class Canary implements Tool {
* True if we are to run in zookeeper 'mode'.
*/
   private boolean zookeeperMode = false;
+  private long permittedFailures = 0;
   private boolean regionServerAllRegions = false;
   private boolean writeSniffing = false;
   private long configuredWriteTableTimeout = DEFAULT_TIMEOUT;
@@ -755,6 +756,19 @@ public final class Canary implements Tool {
 }
 this.configuredReadTableTimeouts.put(nameTimeout[0], timeoutVal);
   }
+} else if (cmd.equals("-permittedZookeeperFailures")) {
+  i++;
+
+  if (i == args.length) {
+System.err.println("-permittedZookeeperFailures needs a numeric 
value argument.");
+printUsageAndExit();
+  }
+  try {
+this.permittedFailures = Long.parseLong(args[i]);
+  } catch (NumberFormatException e) {
+System.err.println("-permittedZookeeperFailures needs a numeric 
value argument.");
+printUsageAndExit();
+  }
 } else {
   // no options match
   System.err.println(cmd + " options is invalid.");
@@ -776,6 +790,10 @@ public final class Canary implements Tool {
 printUsageAndExit();
   }
 }
+if (this.permittedFailures != 0 && !this.zookeeperMode) {
+  System.err.println("-permittedZookeeperFailures requires -zookeeper 
mode.");
+  printUsageAndExit();
+}
 if (!this.configuredReadTableTimeouts.isEmpty() && (this.regionServerMode 
|| this.zookeeperMode)) {
   System.err.println("-readTableTimeouts can only be configured in region 
mode.");
   printUsageAndExit();
@@ -873,6 +891,8 @@ public final class Canary implements Tool {
 "random one.");
 System.err.println(" -zookeeper  set 'zookeeper mode'; grab 
zookeeper.znode.parent on " +
 "each ensemble member");
+System.err.println(" -permittedZookeeperFailures  Ignore first N 
failures when attempting to " +
+"connect to individual zookeeper nodes in the ensemble");
 System.err.println(" -daemon continuous check at defined 
intervals.");
 System.err.println(" -intervalinterval between checks in seconds");
 System.err.println(" -e  consider table/regionserver argument 
as regular " +
@@ -931,19 +951,19 @@ public final class Canary implements Tool {
   new RegionServerMonitor(connection, monitorTargets, this.useRegExp,
   getSink(connection.getConfiguration(), 
RegionServerStdOutSink.class),
   this.executor, this.regionServerAllRegions,
-  this.treatFailureAsError);
+  this.treatFailureAsError, this.permittedFailures);
 } else if (this.zookeeperMode) {
   monitor =
   new ZookeeperMonitor(connection, monitorTargets, this.useRegExp,
   getSink(connection.getConfiguration(), 
ZookeeperStdOutSink.class),
-  this.executor, this.treatFailureAsError);
+  this.executor, this.treatFailureAsError, this.permittedFailures);
 } else {
   monitor =
   new RegionMonitor(connection, monitorTargets, this.useRegExp,
   getSink(connection.getConfiguration(), RegionStdOutSink.class),
   this.executor, this.writeSniffing,
   this.writeTableName, this.treatFailureAsError, 

hbase git commit: Backport "HBASE-21126 Add ability for HBase Canary to ignore a configurable number of ZooKeeper down nodes" to branch-2.0

2018-12-05 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-2.1 7c1f15bd2 -> f60617ca0


Backport "HBASE-21126 Add ability for HBase Canary to ignore a configurable 
number of ZooKeeper down nodes" to branch-2.0

Signed-off-by: Michael Stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f60617ca
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f60617ca
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f60617ca

Branch: refs/heads/branch-2.1
Commit: f60617ca0f7d142f22aced96f1e7865ae107a291
Parents: 7c1f15b
Author: David Manning 
Authored: Wed Aug 29 12:06:59 2018 -0700
Committer: stack 
Committed: Wed Dec 5 22:19:20 2018 -0800

--
 .../main/java/org/apache/hadoop/hbase/tool/Canary.java   | 11 +--
 1 file changed, 5 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/f60617ca/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
index 40f4aa6..71af23e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
@@ -599,7 +599,6 @@ public final class Canary implements Tool {
* True if we are to run in zookeeper 'mode'.
*/
   private boolean zookeeperMode = false;
-
   private long permittedFailures = 0;
   private boolean regionServerAllRegions = false;
   private boolean writeSniffing = false;
@@ -892,6 +891,8 @@ public final class Canary implements Tool {
 "random one.");
 System.err.println(" -zookeeper  set 'zookeeper mode'; grab 
zookeeper.znode.parent on " +
 "each ensemble member");
+System.err.println(" -permittedZookeeperFailures  Ignore first N 
failures when attempting to " +
+"connect to individual zookeeper nodes in the ensemble");
 System.err.println(" -daemon continuous check at defined 
intervals.");
 System.err.println(" -intervalinterval between checks in seconds");
 System.err.println(" -e  consider table/regionserver argument 
as regular " +
@@ -957,8 +958,7 @@ public final class Canary implements Tool {
   monitor =
   new ZookeeperMonitor(connection, monitorTargets, this.useRegExp,
   getSink(connection.getConfiguration(), 
ZookeeperStdOutSink.class),
-  this.executor, this.treatFailureAsError,
-  this.permittedFailures);
+  this.executor, this.treatFailureAsError, this.permittedFailures);
 } else {
   monitor =
   new RegionMonitor(connection, monitorTargets, this.useRegExp,
@@ -1078,10 +1078,9 @@ public final class Canary implements Tool {
 public RegionMonitor(Connection connection, String[] monitorTargets, 
boolean useRegExp,
 Sink sink, ExecutorService executor, boolean writeSniffing, TableName 
writeTableName,
 boolean treatFailureAsError, HashMap 
configuredReadTableTimeouts,
-long configuredWriteTableTimeout,
-long allowedFailures) {
+long configuredWriteTableTimeout, long allowedFailures) {
   super(connection, monitorTargets, useRegExp, sink, executor, 
treatFailureAsError,
-  allowedFailures);
+  allowedFailures);
   Configuration conf = connection.getConfiguration();
   this.writeSniffing = writeSniffing;
   this.writeTableName = writeTableName;



hbase git commit: Add 'strong' notice that 2.1.1 and 2.0.3 have a memory leak

2018-12-05 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/master 67ab8b888 -> 12e75a8a6


Add 'strong' notice that 2.1.1 and 2.0.3 have a memory leak


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/12e75a8a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/12e75a8a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/12e75a8a

Branch: refs/heads/master
Commit: 12e75a8a635785b279900b6905c86a1617526c72
Parents: 67ab8b8
Author: stack 
Authored: Wed Dec 5 21:50:39 2018 -0800
Committer: stack 
Committed: Wed Dec 5 21:50:39 2018 -0800

--
 src/site/xdoc/downloads.xml | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/12e75a8a/src/site/xdoc/downloads.xml
--
diff --git a/src/site/xdoc/downloads.xml b/src/site/xdoc/downloads.xml
index 7e81afd..5d3f2a6 100644
--- a/src/site/xdoc/downloads.xml
+++ b/src/site/xdoc/downloads.xml
@@ -32,6 +32,7 @@ under the License.
   https://www.apache.org/dyn/closer.cgi#verify;>Verify The Integrity 
Of The Files for
   how to verify your mirrored downloads.
   
+  NOTE: 2.1.1 and 2.0.3 have a serious memory 
leak. See HBASE-21551. We are working on replacement releases.
   
   
 



hbase git commit: HBASE-21558 Set version to 2.1.2 on branch-2.1 so can cut an RC

2018-12-05 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-2.1 0b181af4e -> 7c1f15bd2


HBASE-21558 Set version to 2.1.2 on branch-2.1 so can cut an RC


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7c1f15bd
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7c1f15bd
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7c1f15bd

Branch: refs/heads/branch-2.1
Commit: 7c1f15bd2a2173ce2588332e2e1a0051fb31613f
Parents: 0b181af
Author: stack 
Authored: Wed Dec 5 21:24:04 2018 -0800
Committer: stack 
Committed: Wed Dec 5 21:24:04 2018 -0800

--
 hbase-annotations/pom.xml  | 2 +-
 hbase-archetypes/hbase-archetype-builder/pom.xml   | 2 +-
 hbase-archetypes/hbase-client-project/pom.xml  | 2 +-
 hbase-archetypes/hbase-shaded-client-project/pom.xml   | 2 +-
 hbase-archetypes/pom.xml   | 2 +-
 hbase-assembly/pom.xml | 2 +-
 hbase-build-configuration/pom.xml  | 2 +-
 hbase-build-support/hbase-error-prone/pom.xml  | 4 ++--
 hbase-build-support/pom.xml| 2 +-
 hbase-checkstyle/pom.xml   | 4 ++--
 hbase-client/pom.xml   | 2 +-
 hbase-common/pom.xml   | 2 +-
 hbase-endpoint/pom.xml | 2 +-
 hbase-examples/pom.xml | 2 +-
 hbase-external-blockcache/pom.xml  | 2 +-
 hbase-hadoop-compat/pom.xml| 2 +-
 hbase-hadoop2-compat/pom.xml   | 2 +-
 hbase-http/pom.xml | 2 +-
 hbase-it/pom.xml   | 2 +-
 hbase-mapreduce/pom.xml| 2 +-
 hbase-metrics-api/pom.xml  | 2 +-
 hbase-metrics/pom.xml  | 2 +-
 hbase-procedure/pom.xml| 2 +-
 hbase-protocol-shaded/pom.xml  | 2 +-
 hbase-protocol/pom.xml | 2 +-
 hbase-replication/pom.xml  | 2 +-
 hbase-resource-bundle/pom.xml  | 2 +-
 hbase-rest/pom.xml | 2 +-
 hbase-rsgroup/pom.xml  | 2 +-
 hbase-server/pom.xml   | 2 +-
 hbase-shaded/hbase-shaded-check-invariants/pom.xml | 2 +-
 hbase-shaded/hbase-shaded-client-byo-hadoop/pom.xml| 2 +-
 hbase-shaded/hbase-shaded-client/pom.xml   | 2 +-
 hbase-shaded/hbase-shaded-mapreduce/pom.xml| 2 +-
 hbase-shaded/hbase-shaded-with-hadoop-check-invariants/pom.xml | 2 +-
 hbase-shaded/pom.xml   | 2 +-
 hbase-shell/pom.xml| 2 +-
 hbase-testing-util/pom.xml | 2 +-
 hbase-thrift/pom.xml   | 2 +-
 hbase-zookeeper/pom.xml| 2 +-
 pom.xml| 2 +-
 41 files changed, 43 insertions(+), 43 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7c1f15bd/hbase-annotations/pom.xml
--
diff --git a/hbase-annotations/pom.xml b/hbase-annotations/pom.xml
index c177c1d..2a245d4 100644
--- a/hbase-annotations/pom.xml
+++ b/hbase-annotations/pom.xml
@@ -23,7 +23,7 @@
   
 hbase
 org.apache.hbase
-2.1.2-SNAPSHOT
+2.1.2
 ..
   
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/7c1f15bd/hbase-archetypes/hbase-archetype-builder/pom.xml
--
diff --git a/hbase-archetypes/hbase-archetype-builder/pom.xml 
b/hbase-archetypes/hbase-archetype-builder/pom.xml
index 81582c7..71aab9c 100644
--- a/hbase-archetypes/hbase-archetype-builder/pom.xml
+++ b/hbase-archetypes/hbase-archetype-builder/pom.xml
@@ -25,7 +25,7 @@
   
 hbase-archetypes
 org.apache.hbase
-2.1.2-SNAPSHOT
+2.1.2
 ..
   
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/7c1f15bd/hbase-archetypes/hbase-client-project/pom.xml
--
diff --git a/hbase-archetypes/hbase-client-project/pom.xml 
b/hbase-archetypes/hbase-client-project/pom.xml
index 

hbase git commit: HBASE-21557 Set version to 2.0.4 on branch-2.0 so can cut an RC

2018-12-05 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-2.0 851606809 -> a93d4be03


HBASE-21557 Set version to 2.0.4 on branch-2.0 so can cut an RC


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a93d4be0
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a93d4be0
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a93d4be0

Branch: refs/heads/branch-2.0
Commit: a93d4be0355acf0bd64af3e11ad511071c61aa81
Parents: 8516068
Author: stack 
Authored: Wed Dec 5 21:19:48 2018 -0800
Committer: stack 
Committed: Wed Dec 5 21:19:48 2018 -0800

--
 hbase-annotations/pom.xml| 2 +-
 hbase-archetypes/hbase-archetype-builder/pom.xml | 2 +-
 hbase-archetypes/hbase-client-project/pom.xml| 2 +-
 hbase-archetypes/hbase-shaded-client-project/pom.xml | 2 +-
 hbase-archetypes/pom.xml | 2 +-
 hbase-assembly/pom.xml   | 2 +-
 hbase-build-configuration/pom.xml| 2 +-
 hbase-build-support/hbase-error-prone/pom.xml| 4 ++--
 hbase-build-support/pom.xml  | 2 +-
 hbase-checkstyle/pom.xml | 4 ++--
 hbase-client/pom.xml | 2 +-
 hbase-common/pom.xml | 2 +-
 hbase-endpoint/pom.xml   | 2 +-
 hbase-examples/pom.xml   | 2 +-
 hbase-external-blockcache/pom.xml| 2 +-
 hbase-hadoop-compat/pom.xml  | 2 +-
 hbase-hadoop2-compat/pom.xml | 2 +-
 hbase-http/pom.xml   | 2 +-
 hbase-it/pom.xml | 2 +-
 hbase-mapreduce/pom.xml  | 2 +-
 hbase-metrics-api/pom.xml| 2 +-
 hbase-metrics/pom.xml| 2 +-
 hbase-procedure/pom.xml  | 2 +-
 hbase-protocol-shaded/pom.xml| 2 +-
 hbase-protocol/pom.xml   | 2 +-
 hbase-replication/pom.xml| 2 +-
 hbase-resource-bundle/pom.xml| 2 +-
 hbase-rest/pom.xml   | 2 +-
 hbase-rsgroup/pom.xml| 2 +-
 hbase-server/pom.xml | 2 +-
 hbase-shaded/hbase-shaded-check-invariants/pom.xml   | 2 +-
 hbase-shaded/hbase-shaded-client/pom.xml | 2 +-
 hbase-shaded/hbase-shaded-mapreduce/pom.xml  | 2 +-
 hbase-shaded/pom.xml | 2 +-
 hbase-shell/pom.xml  | 2 +-
 hbase-testing-util/pom.xml   | 2 +-
 hbase-thrift/pom.xml | 2 +-
 hbase-zookeeper/pom.xml  | 2 +-
 pom.xml  | 2 +-
 39 files changed, 41 insertions(+), 41 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a93d4be0/hbase-annotations/pom.xml
--
diff --git a/hbase-annotations/pom.xml b/hbase-annotations/pom.xml
index 1b60c52..5703752 100644
--- a/hbase-annotations/pom.xml
+++ b/hbase-annotations/pom.xml
@@ -23,7 +23,7 @@
   
 hbase
 org.apache.hbase
-2.0.4-SNAPSHOT
+2.0.4
 ..
   
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/a93d4be0/hbase-archetypes/hbase-archetype-builder/pom.xml
--
diff --git a/hbase-archetypes/hbase-archetype-builder/pom.xml 
b/hbase-archetypes/hbase-archetype-builder/pom.xml
index 37fefdd..bb54287 100644
--- a/hbase-archetypes/hbase-archetype-builder/pom.xml
+++ b/hbase-archetypes/hbase-archetype-builder/pom.xml
@@ -25,7 +25,7 @@
   
 hbase-archetypes
 org.apache.hbase
-2.0.4-SNAPSHOT
+2.0.4
 ..
   
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/a93d4be0/hbase-archetypes/hbase-client-project/pom.xml
--
diff --git a/hbase-archetypes/hbase-client-project/pom.xml 
b/hbase-archetypes/hbase-client-project/pom.xml
index 95e47d7..e219515 100644
--- a/hbase-archetypes/hbase-client-project/pom.xml
+++ b/hbase-archetypes/hbase-client-project/pom.xml
@@ -26,7 +26,7 @@
   
 hbase-archetypes
 org.apache.hbase
-2.0.4-SNAPSHOT
+2.0.4
 ..
   
   hbase-client-project

http://git-wip-us.apache.org/repos/asf/hbase/blob/a93d4be0/hbase-archetypes/hbase-shaded-client-project/pom.xml
--
diff --git a/hbase-archetypes/hbase-shaded-client-project/pom.xml 

hbase git commit: HBASE-21551 Memory leak when use scan with STREAM at server side - (addendum)

2018-12-05 Thread openinx
Repository: hbase
Updated Branches:
  refs/heads/branch-2.0 19ad63c16 -> 851606809


HBASE-21551 Memory leak when use scan with STREAM at server side - (addendum)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/85160680
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/85160680
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/85160680

Branch: refs/heads/branch-2.0
Commit: 85160680966b2f221b782777fd87611625f23a07
Parents: 19ad63c
Author: huzheng 
Authored: Thu Dec 6 11:26:52 2018 +0800
Committer: huzheng 
Committed: Thu Dec 6 11:43:09 2018 +0800

--
 .../hbase/regionserver/TestSwitchToStreamRead.java  | 12 ++--
 1 file changed, 6 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/85160680/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.java
index 6e12fbe..7bdf2aa 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.java
@@ -44,9 +44,9 @@ import 
org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.junit.AfterClass;
+import org.junit.After;
 import org.junit.Assert;
-import org.junit.BeforeClass;
+import org.junit.Before;
 import org.junit.ClassRule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
@@ -70,8 +70,8 @@ public class TestSwitchToStreamRead {
 
   private static HRegion REGION;
 
-  @BeforeClass
-  public static void setUp() throws IOException {
+  @Before
+  public void setUp() throws IOException {
 UTIL.getConfiguration().setLong(StoreScanner.STORESCANNER_PREAD_MAX_BYTES, 
2048);
 StringBuilder sb = new StringBuilder(256);
 for (int i = 0; i < 255; i++) {
@@ -95,8 +95,8 @@ public class TestSwitchToStreamRead {
 }
   }
 
-  @AfterClass
-  public static void tearDown() throws IOException {
+  @After
+  public void tearDown() throws IOException {
 REGION.close(true);
 UTIL.cleanupTestDir();
   }



hbase git commit: HBASE-21551 Memory leak when use scan with STREAM at server side - (addendum)

2018-12-05 Thread openinx
Repository: hbase
Updated Branches:
  refs/heads/branch-2.1 e9b0d7379 -> 0b181af4e


HBASE-21551 Memory leak when use scan with STREAM at server side - (addendum)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0b181af4
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0b181af4
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0b181af4

Branch: refs/heads/branch-2.1
Commit: 0b181af4ec45fc5eb5b2448a24e461d2c98b6448
Parents: e9b0d73
Author: huzheng 
Authored: Thu Dec 6 11:26:52 2018 +0800
Committer: huzheng 
Committed: Thu Dec 6 11:28:28 2018 +0800

--
 .../hbase/regionserver/TestSwitchToStreamRead.java  | 12 ++--
 1 file changed, 6 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/0b181af4/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.java
index 037b13e..c1cecf8 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.java
@@ -47,9 +47,9 @@ import 
org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.junit.AfterClass;
+import org.junit.After;
 import org.junit.Assert;
-import org.junit.BeforeClass;
+import org.junit.Before;
 import org.junit.ClassRule;
 import org.junit.Ignore;
 import org.junit.Test;
@@ -74,8 +74,8 @@ public class TestSwitchToStreamRead {
 
   private static HRegion REGION;
 
-  @BeforeClass
-  public static void setUp() throws IOException {
+  @Before
+  public void setUp() throws IOException {
 UTIL.getConfiguration().setLong(StoreScanner.STORESCANNER_PREAD_MAX_BYTES, 
2048);
 StringBuilder sb = new StringBuilder(256);
 for (int i = 0; i < 255; i++) {
@@ -99,8 +99,8 @@ public class TestSwitchToStreamRead {
 }
   }
 
-  @AfterClass
-  public static void tearDown() throws IOException {
+  @After
+  public void tearDown() throws IOException {
 REGION.close(true);
 UTIL.cleanupTestDir();
   }



hbase git commit: HBASE-21551 Memory leak when use scan with STREAM at server side - (addendum)

2018-12-05 Thread openinx
Repository: hbase
Updated Branches:
  refs/heads/master 3b854859f -> 67ab8b888


HBASE-21551 Memory leak when use scan with STREAM at server side - (addendum)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/67ab8b88
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/67ab8b88
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/67ab8b88

Branch: refs/heads/master
Commit: 67ab8b888f8b393979624a2bd7d527fefd9dd6d7
Parents: 3b85485
Author: huzheng 
Authored: Thu Dec 6 11:26:52 2018 +0800
Committer: huzheng 
Committed: Thu Dec 6 11:26:52 2018 +0800

--
 .../hbase/regionserver/TestSwitchToStreamRead.java  | 12 ++--
 1 file changed, 6 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/67ab8b88/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.java
index 037b13e..c1cecf8 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.java
@@ -47,9 +47,9 @@ import 
org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.junit.AfterClass;
+import org.junit.After;
 import org.junit.Assert;
-import org.junit.BeforeClass;
+import org.junit.Before;
 import org.junit.ClassRule;
 import org.junit.Ignore;
 import org.junit.Test;
@@ -74,8 +74,8 @@ public class TestSwitchToStreamRead {
 
   private static HRegion REGION;
 
-  @BeforeClass
-  public static void setUp() throws IOException {
+  @Before
+  public void setUp() throws IOException {
 UTIL.getConfiguration().setLong(StoreScanner.STORESCANNER_PREAD_MAX_BYTES, 
2048);
 StringBuilder sb = new StringBuilder(256);
 for (int i = 0; i < 255; i++) {
@@ -99,8 +99,8 @@ public class TestSwitchToStreamRead {
 }
   }
 
-  @AfterClass
-  public static void tearDown() throws IOException {
+  @After
+  public void tearDown() throws IOException {
 REGION.close(true);
 UTIL.cleanupTestDir();
   }



hbase git commit: HBASE-21551 Memory leak when use scan with STREAM at server side

2018-12-05 Thread openinx
Repository: hbase
Updated Branches:
  refs/heads/branch-2 96f8e0cbe -> 1a1a65b56


HBASE-21551 Memory leak when use scan with STREAM at server side


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1a1a65b5
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1a1a65b5
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1a1a65b5

Branch: refs/heads/branch-2
Commit: 1a1a65b565354e4a6349db3be22dd5630cf60179
Parents: 96f8e0c
Author: huzheng 
Authored: Wed Dec 5 22:57:49 2018 +0800
Committer: huzheng 
Committed: Thu Dec 6 11:20:38 2018 +0800

--
 .../hadoop/hbase/regionserver/HStoreFile.java   |  3 +-
 .../hbase/regionserver/StoreFileReader.java |  3 +
 .../regionserver/TestSwitchToStreamRead.java| 62 ++--
 3 files changed, 61 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/1a1a65b5/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java
index 4aff949..9c94990 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java
@@ -126,7 +126,8 @@ public class HStoreFile implements StoreFile, 
StoreFileReader.Listener {
   private final AtomicInteger refCount = new AtomicInteger(0);
 
   // Set implementation must be of concurrent type
-  private final Set streamReaders;
+  @VisibleForTesting
+  final Set streamReaders;
 
   private final boolean noReadahead;
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/1a1a65b5/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
index 3fbddf2..d9008b2 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
@@ -186,6 +186,9 @@ public class StoreFileReader {
 if (!shared) {
   try {
 reader.close(false);
+if (this.listener != null) {
+  this.listener.storeFileReaderClosed(this);
+}
   } catch (IOException e) {
 LOG.warn("failed to close stream reader", e);
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/1a1a65b5/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.java
index 815643d..c1cecf8 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.java
@@ -23,8 +23,13 @@ import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
 import java.util.List;
+import java.util.Set;
 import java.util.concurrent.ThreadLocalRandom;
+import java.util.stream.Collectors;
+
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
@@ -33,6 +38,7 @@ import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Scan.ReadType;
 import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.filter.Filter;
 import org.apache.hadoop.hbase.filter.FilterBase;
@@ -41,8 +47,9 @@ import 
org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
 import org.junit.ClassRule;
 import org.junit.Ignore;
 import org.junit.Test;
@@ -67,8 +74,8 @@ public class TestSwitchToStreamRead {
 
   private static HRegion REGION;
 

hbase git commit: HBASE-21551 Memory leak when use scan with STREAM at server side

2018-12-05 Thread openinx
Repository: hbase
Updated Branches:
  refs/heads/branch-2.0 361dea85c -> 19ad63c16


HBASE-21551 Memory leak when use scan with STREAM at server side


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/19ad63c1
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/19ad63c1
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/19ad63c1

Branch: refs/heads/branch-2.0
Commit: 19ad63c16b73d9cbd1fa8232e493319f7e12ebfe
Parents: 361dea8
Author: huzheng 
Authored: Wed Dec 5 22:57:49 2018 +0800
Committer: huzheng 
Committed: Thu Dec 6 11:02:49 2018 +0800

--
 .../hadoop/hbase/regionserver/HStoreFile.java   |  3 +-
 .../hbase/regionserver/StoreFileReader.java |  3 ++
 .../regionserver/TestSwitchToStreamRead.java| 50 
 3 files changed, 55 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/19ad63c1/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java
index 4a0c66f..17789d4 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java
@@ -123,7 +123,8 @@ public class HStoreFile implements StoreFile, 
StoreFileReader.Listener {
   private final AtomicInteger refCount = new AtomicInteger(0);
 
   // Set implementation must be of concurrent type
-  private final Set streamReaders;
+  @VisibleForTesting
+  final Set streamReaders;
 
   private final boolean noReadahead;
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/19ad63c1/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
index b500abf..0a4efb0 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
@@ -181,6 +181,9 @@ public class StoreFileReader {
 if (!shared) {
   try {
 reader.close(false);
+if (this.listener != null) {
+  this.listener.storeFileReaderClosed(this);
+}
   } catch (IOException e) {
 LOG.warn("failed to close stream reader", e);
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/19ad63c1/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.java
index 0af2970..6e12fbe 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.java
@@ -23,8 +23,13 @@ import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
 import java.util.List;
+import java.util.Set;
 import java.util.concurrent.ThreadLocalRandom;
+import java.util.stream.Collectors;
+
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
@@ -33,12 +38,14 @@ import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Scan.ReadType;
 import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.junit.AfterClass;
+import org.junit.Assert;
 import org.junit.BeforeClass;
 import org.junit.ClassRule;
 import org.junit.Test;
@@ -94,6 +101,49 @@ public class TestSwitchToStreamRead {
 UTIL.cleanupTestDir();
   }
 
+  private Set getStreamReaders() {
+List stores = REGION.getStores();
+Assert.assertEquals(1, stores.size());
+HStore firstStore = stores.get(0);
+

hbase git commit: HBASE-21551 Memory leak when use scan with STREAM at server side

2018-12-05 Thread openinx
Repository: hbase
Updated Branches:
  refs/heads/branch-2.1 58cfed60e -> e9b0d7379


HBASE-21551 Memory leak when use scan with STREAM at server side


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e9b0d737
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e9b0d737
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e9b0d737

Branch: refs/heads/branch-2.1
Commit: e9b0d737937e2b8557c5bc17feea3b820e282c19
Parents: 58cfed6
Author: huzheng 
Authored: Wed Dec 5 22:57:49 2018 +0800
Committer: huzheng 
Committed: Thu Dec 6 10:58:04 2018 +0800

--
 .../hadoop/hbase/regionserver/HStoreFile.java   |  3 +-
 .../hbase/regionserver/StoreFileReader.java |  3 ++
 .../regionserver/TestSwitchToStreamRead.java| 50 
 3 files changed, 55 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e9b0d737/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java
index 4a0c66f..17789d4 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java
@@ -123,7 +123,8 @@ public class HStoreFile implements StoreFile, 
StoreFileReader.Listener {
   private final AtomicInteger refCount = new AtomicInteger(0);
 
   // Set implementation must be of concurrent type
-  private final Set streamReaders;
+  @VisibleForTesting
+  final Set streamReaders;
 
   private final boolean noReadahead;
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/e9b0d737/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
index b500abf..0a4efb0 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
@@ -181,6 +181,9 @@ public class StoreFileReader {
 if (!shared) {
   try {
 reader.close(false);
+if (this.listener != null) {
+  this.listener.storeFileReaderClosed(this);
+}
   } catch (IOException e) {
 LOG.warn("failed to close stream reader", e);
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/e9b0d737/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.java
index 815643d..037b13e 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.java
@@ -23,8 +23,13 @@ import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
 import java.util.List;
+import java.util.Set;
 import java.util.concurrent.ThreadLocalRandom;
+import java.util.stream.Collectors;
+
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
@@ -33,6 +38,7 @@ import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Scan.ReadType;
 import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.filter.Filter;
 import org.apache.hadoop.hbase.filter.FilterBase;
@@ -42,6 +48,7 @@ import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.junit.AfterClass;
+import org.junit.Assert;
 import org.junit.BeforeClass;
 import org.junit.ClassRule;
 import org.junit.Ignore;
@@ -98,6 +105,49 @@ public class TestSwitchToStreamRead {
 UTIL.cleanupTestDir();
   }
 
+  private Set getStreamReaders() {
+List stores = REGION.getStores();
+Assert.assertEquals(1, stores.size());
+HStore firstStore = 

hbase git commit: HBASE-21551 Memory leak when use scan with STREAM at server side

2018-12-05 Thread openinx
Repository: hbase
Updated Branches:
  refs/heads/master f49baf259 -> 3b854859f


HBASE-21551 Memory leak when use scan with STREAM at server side


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3b854859
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3b854859
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3b854859

Branch: refs/heads/master
Commit: 3b854859f6fad44cbf31164374569a6ab23f3623
Parents: f49baf2
Author: huzheng 
Authored: Wed Dec 5 22:57:49 2018 +0800
Committer: huzheng 
Committed: Thu Dec 6 10:55:42 2018 +0800

--
 .../hadoop/hbase/regionserver/HStoreFile.java   |  3 +-
 .../hbase/regionserver/StoreFileReader.java |  3 ++
 .../regionserver/TestSwitchToStreamRead.java| 50 
 3 files changed, 55 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/3b854859/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java
index 4aff949..9c94990 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java
@@ -126,7 +126,8 @@ public class HStoreFile implements StoreFile, 
StoreFileReader.Listener {
   private final AtomicInteger refCount = new AtomicInteger(0);
 
   // Set implementation must be of concurrent type
-  private final Set streamReaders;
+  @VisibleForTesting
+  final Set streamReaders;
 
   private final boolean noReadahead;
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/3b854859/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
index 3fbddf2..d9008b2 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
@@ -186,6 +186,9 @@ public class StoreFileReader {
 if (!shared) {
   try {
 reader.close(false);
+if (this.listener != null) {
+  this.listener.storeFileReaderClosed(this);
+}
   } catch (IOException e) {
 LOG.warn("failed to close stream reader", e);
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/3b854859/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.java
index 815643d..037b13e 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.java
@@ -23,8 +23,13 @@ import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
 import java.util.List;
+import java.util.Set;
 import java.util.concurrent.ThreadLocalRandom;
+import java.util.stream.Collectors;
+
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
@@ -33,6 +38,7 @@ import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Scan.ReadType;
 import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.filter.Filter;
 import org.apache.hadoop.hbase.filter.FilterBase;
@@ -42,6 +48,7 @@ import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.junit.AfterClass;
+import org.junit.Assert;
 import org.junit.BeforeClass;
 import org.junit.ClassRule;
 import org.junit.Ignore;
@@ -98,6 +105,49 @@ public class TestSwitchToStreamRead {
 UTIL.cleanupTestDir();
   }
 
+  private Set getStreamReaders() {
+List stores = REGION.getStores();
+Assert.assertEquals(1, stores.size());
+HStore firstStore = 

hbase git commit: HBASE-21534 TestAssignmentManager is flakey

2018-12-05 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/branch-2 7e8ce1c5c -> 96f8e0cbe


HBASE-21534 TestAssignmentManager is flakey


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/96f8e0cb
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/96f8e0cb
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/96f8e0cb

Branch: refs/heads/branch-2
Commit: 96f8e0cbe9b6d1d6a5f4773c7b210b026c65b300
Parents: 7e8ce1c
Author: Duo Zhang 
Authored: Fri Nov 30 15:26:04 2018 +0800
Committer: Duo Zhang 
Committed: Thu Dec 6 09:54:18 2018 +0800

--
 .../assignment/TestAssignmentManagerBase.java   | 25 +---
 1 file changed, 22 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/96f8e0cb/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManagerBase.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManagerBase.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManagerBase.java
index 7ab37bc..f666ab8 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManagerBase.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManagerBase.java
@@ -22,6 +22,7 @@ import static org.junit.Assert.assertNotEquals;
 
 import java.io.IOException;
 import java.io.InterruptedIOException;
+import java.io.UncheckedIOException;
 import java.net.SocketTimeoutException;
 import java.util.Arrays;
 import java.util.NavigableMap;
@@ -38,8 +39,10 @@ import java.util.concurrent.TimeUnit;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.NotServingRegionException;
+import org.apache.hadoop.hbase.ServerMetricsBuilder;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.YouAreDeadException;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.RegionInfoBuilder;
 import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
@@ -64,6 +67,8 @@ import org.junit.rules.TestName;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import 
org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
+
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
@@ -110,9 +115,11 @@ public abstract class TestAssignmentManagerBase {
   protected long unassignSubmittedCount = 0;
   protected long unassignFailedCount = 0;
 
+  protected int newRsAdded;
+
   protected int getAssignMaxAttempts() {
 // Have many so we succeed eventually.
-return 100;
+return 1000;
   }
 
   protected void setupConfiguration(Configuration conf) throws Exception {
@@ -127,11 +134,13 @@ public abstract class TestAssignmentManagerBase {
   @Before
   public void setUp() throws Exception {
 util = new HBaseTestingUtility();
-this.executor = Executors.newSingleThreadScheduledExecutor();
+this.executor = Executors.newSingleThreadScheduledExecutor(new 
ThreadFactoryBuilder()
+  .setUncaughtExceptionHandler((t, e) -> LOG.warn("Uncaught: ", 
e)).build());
 setupConfiguration(util.getConfiguration());
 master = new MockMasterServices(util.getConfiguration(), 
this.regionsToRegionServers);
 rsDispatcher = new MockRSProcedureDispatcher(master);
 master.start(NSERVERS, rsDispatcher);
+newRsAdded = 0;
 am = master.getAssignmentManager();
 assignProcMetrics = 
am.getAssignmentManagerMetrics().getAssignProcMetrics();
 unassignProcMetrics = 
am.getAssignmentManagerMetrics().getUnassignProcMetrics();
@@ -186,7 +195,7 @@ public abstract class TestAssignmentManagerBase {
 
   protected byte[] waitOnFuture(final Future future) throws Exception {
 try {
-  return future.get(60, TimeUnit.SECONDS);
+  return future.get(3, TimeUnit.MINUTES);
 } catch (ExecutionException e) {
   LOG.info("ExecutionException", e);
   Exception ee = (Exception) e.getCause();
@@ -271,7 +280,17 @@ public abstract class TestAssignmentManagerBase {
   }
 
   protected void doCrash(final ServerName serverName) {
+this.master.getServerManager().moveFromOnlineToDeadServers(serverName);
 this.am.submitServerCrash(serverName, false/* No WALs here */);
+// add a new server to avoid killing all the region servers which may hang 
the UTs
+ServerName newSn = ServerName.valueOf("localhost", 1 + newRsAdded, 1);
+

svn commit: r31382 - in /dev/hbase/hbase-1.4.9RC1: ./ compat-check-report.html hbase-1.4.9-bin.tar.gz hbase-1.4.9-bin.tar.gz.asc hbase-1.4.9-bin.tar.gz.sha512 hbase-1.4.9-src.tar.gz hbase-1.4.9-src.ta

2018-12-05 Thread apurtell
Author: apurtell
Date: Thu Dec  6 01:21:58 2018
New Revision: 31382

Log:
Stage HBase 1.4.9RC1 artifacts

Added:
dev/hbase/hbase-1.4.9RC1/
dev/hbase/hbase-1.4.9RC1/compat-check-report.html
dev/hbase/hbase-1.4.9RC1/hbase-1.4.9-bin.tar.gz   (with props)
dev/hbase/hbase-1.4.9RC1/hbase-1.4.9-bin.tar.gz.asc
dev/hbase/hbase-1.4.9RC1/hbase-1.4.9-bin.tar.gz.sha512
dev/hbase/hbase-1.4.9RC1/hbase-1.4.9-src.tar.gz   (with props)
dev/hbase/hbase-1.4.9RC1/hbase-1.4.9-src.tar.gz.asc
dev/hbase/hbase-1.4.9RC1/hbase-1.4.9-src.tar.gz.sha512

Added: dev/hbase/hbase-1.4.9RC1/compat-check-report.html
==
--- dev/hbase/hbase-1.4.9RC1/compat-check-report.html (added)
+++ dev/hbase/hbase-1.4.9RC1/compat-check-report.html Thu Dec  6 01:21:58 2018
@@ -0,0 +1,635 @@
+
+
+http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
+http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
+
+
+
+
+
+hbase: rel/1.4.8 to 1.4.9RC1 compatibility report
+
+body {
+font-family:Arial, sans-serif;
+background-color:White;
+color:Black;
+}
+hr {
+color:Black;
+background-color:Black;
+height:1px;
+border:0;
+}
+h1 {
+margin-bottom:0px;
+padding-bottom:0px;
+font-size:1.625em;
+}
+h2 {
+margin-bottom:0px;
+padding-bottom:0px;
+font-size:1.25em;
+white-space:nowrap;
+}
+div.symbols {
+color:#003E69;
+}
+div.symbols i {
+color:Brown;
+}
+span.section {
+font-weight:bold;
+cursor:pointer;
+color:#003E69;
+white-space:nowrap;
+margin-left:0.3125em;
+}
+span:hover.section {
+color:#336699;
+}
+span.sect_aff {
+cursor:pointer;
+padding-left:1.55em;
+font-size:0.875em;
+color:#cc3300;
+}
+span.ext {
+font-weight:normal;
+}
+span.jar {
+color:#cc3300;
+font-size:0.875em;
+font-weight:bold;
+}
+div.jar_list {
+padding-left:0.4em;
+font-size:0.94em;
+}
+span.pkg_t {
+color:#408080;
+font-size:0.875em;
+}
+span.pkg {
+color:#408080;
+font-size:0.875em;
+font-weight:bold;
+}
+span.cname {
+color:Green;
+font-size:0.875em;
+font-weight:bold;
+}
+span.iname_b {
+font-weight:bold;
+}
+span.iname_a {
+color:#33;
+font-weight:bold;
+font-size:0.94em;
+}
+span.sym_p {
+font-weight:normal;
+white-space:normal;
+}
+span.sym_pd {
+white-space:normal;
+}
+span.sym_p span, span.sym_pd span {
+white-space:nowrap;
+}
+span.attr {
+color:Black;
+font-weight:normal;
+}
+span.deprecated {
+color:Red;
+font-weight:bold;
+font-family:Monaco, monospace;
+}
+div.affect {
+padding-left:1em;
+padding-bottom:10px;
+font-size:0.87em;
+font-style:italic;
+line-height:0.9em;
+}
+div.affected {
+padding-left:2em;
+padding-top:10px;
+}
+table.ptable {
+border-collapse:collapse;
+border:1px outset black;
+margin-left:0.95em;
+margin-top:3px;
+margin-bottom:3px;
+width:56.25em;
+}
+table.ptable td {
+border:1px solid Gray;
+padding:3px;
+font-size:0.875em;
+text-align:left;
+vertical-align:top;
+max-width:28em;
+word-wrap:break-word;
+}
+table.ptable th {
+background-color:#ee;
+font-weight:bold;
+color:#33;
+font-family:Verdana, Arial;
+font-size:0.875em;
+border:1px solid Gray;
+text-align:center;
+vertical-align:top;
+white-space:nowrap;
+padding:3px;
+}
+table.summary {
+border-collapse:collapse;
+border:1px outset black;
+}
+table.summary th {
+background-color:#ee;
+font-weight:normal;
+text-align:left;
+font-size:0.94em;
+white-space:nowrap;
+border:1px inset Gray;
+padding:3px;
+}
+table.summary td {
+text-align:right;
+white-space:nowrap;
+border:1px inset Gray;
+padding:3px 5px 3px 10px;
+}
+span.mngl {
+padding-left:1em;
+font-size:0.875em;
+cursor:text;
+color:#44;
+font-weight:bold;
+}
+span.pleft {
+padding-left:2.5em;
+}
+span.color_p {
+font-style:italic;
+color:Brown;
+}
+span.param {
+font-style:italic;
+}
+span.focus_p {
+font-style:italic;
+background-color:#DCDCDC;
+}
+span.ttype {
+font-weight:normal;
+}
+span.nowrap {
+white-space:nowrap;
+}
+span.value {
+white-space:nowrap;
+font-weight:bold;
+}
+.passed {
+background-color:#CCFFCC;
+font-weight:normal;
+}
+.warning {
+background-color:#F4F4AF;
+font-weight:normal;
+}
+.failed {
+background-color:#FF;
+font-weight:normal;
+}
+.new {
+background-color:#C6DEFF;
+font-weight:normal;
+}
+
+.compatible {
+background-color:#CCFFCC;
+font-weight:normal;
+}
+.almost_compatible {
+background-color:#FFDAA3;
+font-weight:normal;
+}
+.incompatible {
+background-color:#FF;
+font-weight:normal;
+}
+.gray {
+background-color:#DCDCDC;
+font-weight:normal;
+}
+
+.top_ref {
+font-size:0.69em;
+}

[2/2] hbase git commit: Update CHANGES.txt for 1.4.9rc1

2018-12-05 Thread apurtell
Update CHANGES.txt for 1.4.9rc1


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d625b212
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d625b212
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d625b212

Branch: refs/heads/branch-1.4
Commit: d625b212e46d01cb17db9ac2e9e927fdb201afa1
Parents: e7c79c7
Author: Andrew Purtell 
Authored: Wed Dec 5 10:58:34 2018 -0800
Committer: Andrew Purtell 
Committed: Wed Dec 5 10:58:34 2018 -0800

--
 CHANGES.txt | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d625b212/CHANGES.txt
--
diff --git a/CHANGES.txt b/CHANGES.txt
index ed5e3ca..7ad28d4 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -20,6 +20,7 @@ Release Notes - HBase - Version 1.4.9 12/7/2018
 * [HBASE-21445] - CopyTable by bulkload will write hfile into yarn's HDFS 
 * [HBASE-21464] - Splitting blocked with meta NSRE during split transaction
 * [HBASE-21504] - If enable FIFOCompactionPolicy, a compaction may write a 
"empty" hfile whose maxTimeStamp is long max. This kind of hfile will never be 
archived.
+* [HBASE-21546] - ConnectException in TestThriftHttpServer
 
 ** Improvement
 * [HBASE-21103] - nightly test cache of yetus install needs to be more 
thorough in verification



hbase git commit: HBASE-21464 Splitting blocked with meta NSRE during split transaction

2018-12-05 Thread apurtell
Repository: hbase
Updated Branches:
  refs/heads/branch-1 333cd1972 -> e80cc3286


HBASE-21464 Splitting blocked with meta NSRE during split transaction

Signed-off-by: Lars Hofhansl 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e80cc328
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e80cc328
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e80cc328

Branch: refs/heads/branch-1
Commit: e80cc3286c30c8184f826ffae6ae1c78c9e31719
Parents: 333cd19
Author: Andrew Purtell 
Authored: Fri Nov 30 15:23:34 2018 -0800
Committer: Andrew Purtell 
Committed: Wed Dec 5 10:54:35 2018 -0800

--
 .../hadoop/hbase/client/ConnectionManager.java  | 42 
 1 file changed, 25 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e80cc328/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
index 7cf09c2..35ffa3e 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
@@ -1231,38 +1231,46 @@ class ConnectionManager {
   }
 }
 
+private volatile RegionLocations metaLocations = null;
+private volatile long lastMetaLookupTime = 
EnvironmentEdgeManager.currentTime();
+// cache meta location at most 10 seconds
+private final static long META_LOOKUP_CACHE_INTERVAL = 1;
+
 private RegionLocations locateMeta(final TableName tableName,
 boolean useCache, int replicaId) throws IOException {
-  // HBASE-10785: We cache the location of the META itself, so that we are 
not overloading
-  // zookeeper with one request for every region lookup. We cache the META 
with empty row
-  // key in MetaCache.
-  byte[] metaCacheKey = HConstants.EMPTY_START_ROW; // use byte[0] as the 
row for meta
-  RegionLocations locations = null;
+  // We cache the location of the META itself, so that we are not 
overloading
+  // zookeeper with one request for every region lookup. If relocating, 
bypass
+  // the cache immediately.
   if (useCache) {
-locations = getCachedLocation(tableName, metaCacheKey);
-if (locations != null && locations.getRegionLocation(replicaId) != 
null) {
-  return locations;
+long now = EnvironmentEdgeManager.currentTime();
+if (now - lastMetaLookupTime < META_LOOKUP_CACHE_INTERVAL) {
+  if (metaLocations != null &&
+  metaLocations.getRegionLocation(replicaId) != null) {
+return metaLocations;
+  }
+} else {
+  useCache = false;
 }
   }
-
   // only one thread should do the lookup.
   synchronized (metaRegionLock) {
 // Check the cache again for a hit in case some other thread made the
 // same query while we were waiting on the lock.
 if (useCache) {
-  locations = getCachedLocation(tableName, metaCacheKey);
-  if (locations != null && locations.getRegionLocation(replicaId) != 
null) {
-return locations;
+  if (metaLocations != null &&
+  metaLocations.getRegionLocation(replicaId) != null) {
+return metaLocations;
   }
 }
-
 // Look up from zookeeper
-locations = this.registry.getMetaRegionLocation();
-if (locations != null) {
-  cacheLocation(tableName, locations);
+metaLocations = this.registry.getMetaRegionLocation();
+lastMetaLookupTime = EnvironmentEdgeManager.currentTime();
+if (metaLocations != null &&
+metaLocations.getRegionLocation(replicaId) != null) {
+  return metaLocations;
 }
+return null;
   }
-  return locations;
 }
 
 /*



[1/2] hbase git commit: HBASE-21464 Splitting blocked with meta NSRE during split transaction

2018-12-05 Thread apurtell
Repository: hbase
Updated Branches:
  refs/heads/branch-1.4 dab7b39f2 -> d625b212e
Updated Tags:  refs/tags/1.4.9RC1 [created] c5b401c4f


HBASE-21464 Splitting blocked with meta NSRE during split transaction

Signed-off-by: Lars Hofhansl 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e7c79c72
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e7c79c72
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e7c79c72

Branch: refs/heads/branch-1.4
Commit: e7c79c722376aa152dae6f976c01faebefed224f
Parents: dab7b39
Author: Andrew Purtell 
Authored: Fri Nov 30 15:23:34 2018 -0800
Committer: Andrew Purtell 
Committed: Wed Dec 5 10:54:59 2018 -0800

--
 .../hadoop/hbase/client/ConnectionManager.java  | 42 
 1 file changed, 25 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e7c79c72/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
index 7cf09c2..35ffa3e 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
@@ -1231,38 +1231,46 @@ class ConnectionManager {
   }
 }
 
+private volatile RegionLocations metaLocations = null;
+private volatile long lastMetaLookupTime = 
EnvironmentEdgeManager.currentTime();
+// cache meta location at most 10 seconds
+private final static long META_LOOKUP_CACHE_INTERVAL = 1;
+
 private RegionLocations locateMeta(final TableName tableName,
 boolean useCache, int replicaId) throws IOException {
-  // HBASE-10785: We cache the location of the META itself, so that we are 
not overloading
-  // zookeeper with one request for every region lookup. We cache the META 
with empty row
-  // key in MetaCache.
-  byte[] metaCacheKey = HConstants.EMPTY_START_ROW; // use byte[0] as the 
row for meta
-  RegionLocations locations = null;
+  // We cache the location of the META itself, so that we are not 
overloading
+  // zookeeper with one request for every region lookup. If relocating, 
bypass
+  // the cache immediately.
   if (useCache) {
-locations = getCachedLocation(tableName, metaCacheKey);
-if (locations != null && locations.getRegionLocation(replicaId) != 
null) {
-  return locations;
+long now = EnvironmentEdgeManager.currentTime();
+if (now - lastMetaLookupTime < META_LOOKUP_CACHE_INTERVAL) {
+  if (metaLocations != null &&
+  metaLocations.getRegionLocation(replicaId) != null) {
+return metaLocations;
+  }
+} else {
+  useCache = false;
 }
   }
-
   // only one thread should do the lookup.
   synchronized (metaRegionLock) {
 // Check the cache again for a hit in case some other thread made the
 // same query while we were waiting on the lock.
 if (useCache) {
-  locations = getCachedLocation(tableName, metaCacheKey);
-  if (locations != null && locations.getRegionLocation(replicaId) != 
null) {
-return locations;
+  if (metaLocations != null &&
+  metaLocations.getRegionLocation(replicaId) != null) {
+return metaLocations;
   }
 }
-
 // Look up from zookeeper
-locations = this.registry.getMetaRegionLocation();
-if (locations != null) {
-  cacheLocation(tableName, locations);
+metaLocations = this.registry.getMetaRegionLocation();
+lastMetaLookupTime = EnvironmentEdgeManager.currentTime();
+if (metaLocations != null &&
+metaLocations.getRegionLocation(replicaId) != null) {
+  return metaLocations;
 }
+return null;
   }
-  return locations;
 }
 
 /*



hbase git commit: HBASE-21550 Add a new method preCreateTableRegionInfos for MasterObserver which allows CPs to modify the TableDescriptor

2018-12-05 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/branch-2 564833754 -> 7e8ce1c5c


HBASE-21550 Add a new method preCreateTableRegionInfos for MasterObserver which 
allows CPs to modify the TableDescriptor


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7e8ce1c5
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7e8ce1c5
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7e8ce1c5

Branch: refs/heads/branch-2
Commit: 7e8ce1c5cc9f0f15e5a456869435c96221fbfa8b
Parents: 5648337
Author: Duo Zhang 
Authored: Wed Dec 5 18:19:15 2018 +0800
Committer: zhangduo 
Committed: Thu Dec 6 08:30:37 2018 +0800

--
 .../hbase/coprocessor/MasterObserver.java   | 15 +
 .../org/apache/hadoop/hbase/master/HMaster.java | 68 ++--
 .../hbase/master/MasterCoprocessorHost.java | 14 
 .../hbase/coprocessor/TestMasterObserver.java   | 14 +++-
 4 files changed, 75 insertions(+), 36 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7e8ce1c5/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
index a37f21a..a1e9be5 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
@@ -69,6 +69,21 @@ import org.apache.yetus.audience.InterfaceStability;
 @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC)
 @InterfaceStability.Evolving
 public interface MasterObserver {
+
+  /**
+   * Called before we create the region infos for this table. Called as part 
of create table RPC
+   * call.
+   * @param ctx the environment to interact with the framework and master
+   * @param desc the TableDescriptor for the table
+   * @return the TableDescriptor used to create the table. Default is the one 
passed in. Return
+   * {@code null} means cancel the creation.
+   */
+  default TableDescriptor preCreateTableRegionsInfos(
+  final ObserverContext ctx, TableDescriptor 
desc)
+  throws IOException {
+return desc;
+  }
+
   /**
* Called before a new table is created by
* {@link org.apache.hadoop.hbase.master.HMaster}.  Called as part of create

http://git-wip-us.apache.org/repos/asf/hbase/blob/7e8ce1c5/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index ecc9bde..a023a4f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -2009,45 +2009,45 @@ public class HMaster extends HRegionServer implements 
MasterServices {
   }
 
   @Override
-  public long createTable(
-  final TableDescriptor tableDescriptor,
-  final byte [][] splitKeys,
-  final long nonceGroup,
-  final long nonce) throws IOException {
+  public long createTable(final TableDescriptor tableDescriptor, final 
byte[][] splitKeys,
+  final long nonceGroup, final long nonce) throws IOException {
 checkInitialized();
-
-String namespace = tableDescriptor.getTableName().getNamespaceAsString();
+TableDescriptor desc = 
getMasterCoprocessorHost().preCreateTableRegionsInfos(tableDescriptor);
+if (desc == null) {
+  throw new IOException("Creation for " + tableDescriptor + " is canceled 
by CP");
+}
+String namespace = desc.getTableName().getNamespaceAsString();
 this.clusterSchemaService.getNamespace(namespace);
 
-RegionInfo[] newRegions = 
ModifyRegionUtils.createRegionInfos(tableDescriptor, splitKeys);
-sanityCheckTableDescriptor(tableDescriptor);
-
-return MasterProcedureUtil.submitProcedure(
-new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, 
nonce) {
-  @Override
-  protected void run() throws IOException {
-getMaster().getMasterCoprocessorHost().preCreateTable(tableDescriptor, 
newRegions);
-
-LOG.info(getClientIdAuditPrefix() + " create " + tableDescriptor);
-
-// TODO: We can handle/merge duplicate requests, and differentiate the 
case of
-//   TableExistsException by saying if the schema is the same or 
not.
-//
-// We need to wait for the procedure to potentially fail due to 
"prepare" sanity
-// checks. This will block only the beginning of the procedure. See 
HBASE-19953.
-   

hbase git commit: HBASE-21550 Add a new method preCreateTableRegionInfos for MasterObserver which allows CPs to modify the TableDescriptor

2018-12-05 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/master 8bf966c8e -> f49baf259


HBASE-21550 Add a new method preCreateTableRegionInfos for MasterObserver which 
allows CPs to modify the TableDescriptor


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f49baf25
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f49baf25
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f49baf25

Branch: refs/heads/master
Commit: f49baf259ec6bc2c8634debd2dbfc592753245d3
Parents: 8bf966c
Author: Duo Zhang 
Authored: Wed Dec 5 18:19:15 2018 +0800
Committer: zhangduo 
Committed: Thu Dec 6 08:30:32 2018 +0800

--
 .../hbase/coprocessor/MasterObserver.java   | 15 +
 .../org/apache/hadoop/hbase/master/HMaster.java | 68 ++--
 .../hbase/master/MasterCoprocessorHost.java | 14 
 .../hbase/coprocessor/TestMasterObserver.java   | 14 +++-
 4 files changed, 75 insertions(+), 36 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/f49baf25/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
index 573ac7a..a0863e4 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
@@ -70,6 +70,21 @@ import org.apache.yetus.audience.InterfaceStability;
 @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC)
 @InterfaceStability.Evolving
 public interface MasterObserver {
+
+  /**
+   * Called before we create the region infos for this table. Called as part 
of create table RPC
+   * call.
+   * @param ctx the environment to interact with the framework and master
+   * @param desc the TableDescriptor for the table
+   * @return the TableDescriptor used to create the table. Default is the one 
passed in. Return
+   * {@code null} means cancel the creation.
+   */
+  default TableDescriptor preCreateTableRegionsInfos(
+  final ObserverContext ctx, TableDescriptor 
desc)
+  throws IOException {
+return desc;
+  }
+
   /**
* Called before a new table is created by
* {@link org.apache.hadoop.hbase.master.HMaster}.  Called as part of create

http://git-wip-us.apache.org/repos/asf/hbase/blob/f49baf25/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 132e271..e96dc36 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -2030,45 +2030,45 @@ public class HMaster extends HRegionServer implements 
MasterServices {
   }
 
   @Override
-  public long createTable(
-  final TableDescriptor tableDescriptor,
-  final byte [][] splitKeys,
-  final long nonceGroup,
-  final long nonce) throws IOException {
+  public long createTable(final TableDescriptor tableDescriptor, final 
byte[][] splitKeys,
+  final long nonceGroup, final long nonce) throws IOException {
 checkInitialized();
-
-String namespace = tableDescriptor.getTableName().getNamespaceAsString();
+TableDescriptor desc = 
getMasterCoprocessorHost().preCreateTableRegionsInfos(tableDescriptor);
+if (desc == null) {
+  throw new IOException("Creation for " + tableDescriptor + " is canceled 
by CP");
+}
+String namespace = desc.getTableName().getNamespaceAsString();
 this.clusterSchemaService.getNamespace(namespace);
 
-RegionInfo[] newRegions = 
ModifyRegionUtils.createRegionInfos(tableDescriptor, splitKeys);
-sanityCheckTableDescriptor(tableDescriptor);
-
-return MasterProcedureUtil.submitProcedure(
-new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, 
nonce) {
-  @Override
-  protected void run() throws IOException {
-getMaster().getMasterCoprocessorHost().preCreateTable(tableDescriptor, 
newRegions);
-
-LOG.info(getClientIdAuditPrefix() + " create " + tableDescriptor);
-
-// TODO: We can handle/merge duplicate requests, and differentiate the 
case of
-//   TableExistsException by saying if the schema is the same or 
not.
-//
-// We need to wait for the procedure to potentially fail due to 
"prepare" sanity
-// checks. This will block only the beginning of the procedure. See 
HBASE-19953.
-   

hbase git commit: HBASE-21544 Backport HBASE-20734 Colocate recovered edits directory with hbase.wal.dir

2018-12-05 Thread elserj
Repository: hbase
Updated Branches:
  refs/heads/branch-2.0 8e36aae9d -> 361dea85c


HBASE-21544 Backport HBASE-20734 Colocate recovered edits directory with 
hbase.wal.dir

JE: Fairly direct backport from >=branch-2.1 to solve an issue where
an over-aggressive check for hflush() breaks Azure-based FileSystems.

Amending-Author: Reid Chan 
Signed-off-by: Reid Chan 
Signed-off-by: Josh Elser 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/361dea85
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/361dea85
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/361dea85

Branch: refs/heads/branch-2.0
Commit: 361dea85c90229d3c2752a814bfc38073f14a2c9
Parents: 8e36aae
Author: Zach York 
Authored: Wed Jun 27 16:18:53 2018 -0700
Committer: Josh Elser 
Committed: Wed Dec 5 15:06:03 2018 -0500

--
 .../apache/hadoop/hbase/util/CommonFSUtils.java |  28 +++
 .../assignment/MergeTableRegionsProcedure.java  |   8 +-
 .../assignment/SplitTableRegionProcedure.java   |  10 +-
 .../AbstractStateMachineTableProcedure.java |   6 +
 .../hadoop/hbase/regionserver/HRegion.java  | 159 ++-
 .../apache/hadoop/hbase/wal/WALSplitter.java| 198 +--
 .../hadoop/hbase/master/AbstractTestDLS.java|   6 +-
 .../hadoop/hbase/regionserver/TestHRegion.java  |   8 +-
 .../regionserver/wal/AbstractTestWALReplay.java |   8 +-
 .../hbase/wal/TestReadWriteSeqIdFiles.java  |  18 +-
 .../apache/hadoop/hbase/wal/TestWALFactory.java |   2 +-
 .../apache/hadoop/hbase/wal/TestWALSplit.java   |  58 +++---
 12 files changed, 304 insertions(+), 205 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/361dea85/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java
--
diff --git 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java
index a34048a..a08f9f2 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java
@@ -417,6 +417,34 @@ public abstract class CommonFSUtils {
   }
 
   /**
+   * Returns the WAL region directory based on the given table name and region 
name
+   * @param conf configuration to determine WALRootDir
+   * @param tableName Table that the region is under
+   * @param encodedRegionName Region name used for creating the final region 
directory
+   * @return the region directory used to store WALs under the WALRootDir
+   * @throws IOException if there is an exception determining the WALRootDir
+   */
+  public static Path getWALRegionDir(final Configuration conf,
+  final TableName tableName, final String encodedRegionName)
+  throws IOException {
+return new Path(getWALTableDir(conf, tableName),
+encodedRegionName);
+  }
+
+  /**
+   * Returns the Table directory under the WALRootDir for the specified table 
name
+   * @param conf configuration used to get the WALRootDir
+   * @param tableName Table to get the directory for
+   * @return a path to the WAL table directory for the specified table
+   * @throws IOException if there is an exception determining the WALRootDir
+   */
+  public static Path getWALTableDir(final Configuration conf, final TableName 
tableName)
+  throws IOException {
+return new Path(new Path(getWALRootDir(conf), 
tableName.getNamespaceAsString()),
+tableName.getQualifierAsString());
+  }
+
+  /**
* Returns the {@link org.apache.hadoop.fs.Path} object representing the 
table directory under
* path rootdir
*

http://git-wip-us.apache.org/repos/asf/hbase/blob/361dea85/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
index accc051..a3ec9cc 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
@@ -824,14 +824,16 @@ public class MergeTableRegionsProcedure
   }
 
   private void writeMaxSequenceIdFile(MasterProcedureEnv env) throws 
IOException {
-FileSystem fs = 
env.getMasterServices().getMasterFileSystem().getFileSystem();
+FileSystem walFS = 
env.getMasterServices().getMasterWalManager().getFileSystem();
 long maxSequenceId = -1L;
 for (RegionInfo region : regionsToMerge) {
   

[hbase] Git Push Summary

2018-12-05 Thread apurtell
Repository: hbase
Updated Tags:  refs/tags/1.4.9RC0 [deleted] 660bd81bf


[41/51] [partial] hbase-site git commit: Published site at 8bf966c8e936dec4d83bcbe85c5aab543f14a0df.

2018-12-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html
 
b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html
index 5572799..1b532de 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html
@@ -122,7 +122,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public static enum HBaseFsck.ErrorReporter.ERROR_CODE
+public static enum HBaseFsck.ErrorReporter.ERROR_CODE
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumHBaseFsck.ErrorReporter.ERROR_CODE
 
 
@@ -315,7 +315,7 @@ the order they are declared.
 
 
 UNKNOWN
-public static finalHBaseFsck.ErrorReporter.ERROR_CODE UNKNOWN
+public static finalHBaseFsck.ErrorReporter.ERROR_CODE UNKNOWN
 
 
 
@@ -324,7 +324,7 @@ the order they are declared.
 
 
 NO_META_REGION
-public static finalHBaseFsck.ErrorReporter.ERROR_CODE NO_META_REGION
+public static finalHBaseFsck.ErrorReporter.ERROR_CODE NO_META_REGION
 
 
 
@@ -333,7 +333,7 @@ the order they are declared.
 
 
 NULL_META_REGION
-public static finalHBaseFsck.ErrorReporter.ERROR_CODE NULL_META_REGION
+public static finalHBaseFsck.ErrorReporter.ERROR_CODE NULL_META_REGION
 
 
 
@@ -342,7 +342,7 @@ the order they are declared.
 
 
 NO_VERSION_FILE
-public static finalHBaseFsck.ErrorReporter.ERROR_CODE NO_VERSION_FILE
+public static finalHBaseFsck.ErrorReporter.ERROR_CODE NO_VERSION_FILE
 
 
 
@@ -351,7 +351,7 @@ the order they are declared.
 
 
 NOT_IN_META_HDFS
-public static finalHBaseFsck.ErrorReporter.ERROR_CODE NOT_IN_META_HDFS
+public static finalHBaseFsck.ErrorReporter.ERROR_CODE NOT_IN_META_HDFS
 
 
 
@@ -360,7 +360,7 @@ the order they are declared.
 
 
 NOT_IN_META
-public static finalHBaseFsck.ErrorReporter.ERROR_CODE NOT_IN_META
+public static finalHBaseFsck.ErrorReporter.ERROR_CODE NOT_IN_META
 
 
 
@@ -369,7 +369,7 @@ the order they are declared.
 
 
 NOT_IN_META_OR_DEPLOYED
-public static finalHBaseFsck.ErrorReporter.ERROR_CODE NOT_IN_META_OR_DEPLOYED
+public static finalHBaseFsck.ErrorReporter.ERROR_CODE NOT_IN_META_OR_DEPLOYED
 
 
 
@@ -378,7 +378,7 @@ the order they are declared.
 
 
 NOT_IN_HDFS_OR_DEPLOYED
-public static finalHBaseFsck.ErrorReporter.ERROR_CODE NOT_IN_HDFS_OR_DEPLOYED
+public static finalHBaseFsck.ErrorReporter.ERROR_CODE NOT_IN_HDFS_OR_DEPLOYED
 
 
 
@@ -387,7 +387,7 @@ the order they are declared.
 
 
 NOT_IN_HDFS
-public static finalHBaseFsck.ErrorReporter.ERROR_CODE NOT_IN_HDFS
+public static finalHBaseFsck.ErrorReporter.ERROR_CODE NOT_IN_HDFS
 
 
 
@@ -396,7 +396,7 @@ the order they are declared.
 
 
 SERVER_DOES_NOT_MATCH_META
-public static finalHBaseFsck.ErrorReporter.ERROR_CODE SERVER_DOES_NOT_MATCH_META
+public static finalHBaseFsck.ErrorReporter.ERROR_CODE SERVER_DOES_NOT_MATCH_META
 
 
 
@@ -405,7 +405,7 @@ the order they are declared.
 
 
 NOT_DEPLOYED
-public static finalHBaseFsck.ErrorReporter.ERROR_CODE NOT_DEPLOYED
+public static finalHBaseFsck.ErrorReporter.ERROR_CODE NOT_DEPLOYED
 
 
 
@@ -414,7 +414,7 @@ the order they are declared.
 
 
 MULTI_DEPLOYED
-public static finalHBaseFsck.ErrorReporter.ERROR_CODE MULTI_DEPLOYED
+public static finalHBaseFsck.ErrorReporter.ERROR_CODE MULTI_DEPLOYED
 
 
 
@@ -423,7 +423,7 @@ the order they are declared.
 
 
 SHOULD_NOT_BE_DEPLOYED
-public static finalHBaseFsck.ErrorReporter.ERROR_CODE SHOULD_NOT_BE_DEPLOYED
+public static finalHBaseFsck.ErrorReporter.ERROR_CODE SHOULD_NOT_BE_DEPLOYED
 
 
 
@@ -432,7 +432,7 @@ the order they are declared.
 
 
 MULTI_META_REGION
-public static finalHBaseFsck.ErrorReporter.ERROR_CODE MULTI_META_REGION
+public static finalHBaseFsck.ErrorReporter.ERROR_CODE MULTI_META_REGION
 
 
 
@@ -441,7 +441,7 @@ the order they are declared.
 
 
 RS_CONNECT_FAILURE
-public static finalHBaseFsck.ErrorReporter.ERROR_CODE RS_CONNECT_FAILURE
+public static finalHBaseFsck.ErrorReporter.ERROR_CODE RS_CONNECT_FAILURE
 
 
 
@@ -450,7 +450,7 @@ the order they are declared.
 
 
 FIRST_REGION_STARTKEY_NOT_EMPTY
-public static finalHBaseFsck.ErrorReporter.ERROR_CODE FIRST_REGION_STARTKEY_NOT_EMPTY
+public static finalHBaseFsck.ErrorReporter.ERROR_CODE FIRST_REGION_STARTKEY_NOT_EMPTY
 
 
 
@@ -459,7 +459,7 @@ the order they are declared.
 
 
 LAST_REGION_ENDKEY_NOT_EMPTY
-public static finalHBaseFsck.ErrorReporter.ERROR_CODE LAST_REGION_ENDKEY_NOT_EMPTY
+public static finalHBaseFsck.ErrorReporter.ERROR_CODE LAST_REGION_ENDKEY_NOT_EMPTY
 
 
 
@@ -468,7 +468,7 @@ the order they are declared.
 
 
 DUPE_STARTKEYS
-public static finalHBaseFsck.ErrorReporter.ERROR_CODE DUPE_STARTKEYS
+public static finalHBaseFsck.ErrorReporter.ERROR_CODE 

[45/51] [partial] hbase-site git commit: Published site at 8bf966c8e936dec4d83bcbe85c5aab543f14a0df.

2018-12-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/org/apache/hadoop/hbase/master/MasterRpcServices.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/master/MasterRpcServices.html 
b/devapidocs/org/apache/hadoop/hbase/master/MasterRpcServices.html
index 6c0477a..f14d23f 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/MasterRpcServices.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/MasterRpcServices.html
@@ -119,7 +119,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class MasterRpcServices
+public class MasterRpcServices
 extends RSRpcServices
 implements 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService.BlockingInterface,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStatusService.BlockingInterface,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockService.BlockingInterface,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.HbckService.BlockingInterface
 Implements the master RPC services.
@@ -851,7 +851,7 @@ implements 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.Master
 
 
 LOG
-private static finalorg.slf4j.Logger LOG
+private static finalorg.slf4j.Logger LOG
 
 
 
@@ -860,7 +860,7 @@ implements 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.Master
 
 
 master
-private finalHMaster master
+private finalHMaster master
 
 
 
@@ -877,7 +877,7 @@ implements 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.Master
 
 
 MasterRpcServices
-publicMasterRpcServices(HMasterm)
+publicMasterRpcServices(HMasterm)
   throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Throws:
@@ -899,7 +899,7 @@ implements 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.Master
 
 
 createConfigurationSubset
-privateorg.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse.BuildercreateConfigurationSubset()
+privateorg.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse.BuildercreateConfigurationSubset()
 
 Returns:
 Subset of configuration to pass initializing regionservers: e.g.
@@ -913,7 +913,7 @@ implements 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.Master
 
 
 addConfig
-privateorg.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse.BuilderaddConfig(org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse.Builderresp,
+privateorg.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse.BuilderaddConfig(org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse.Builderresp,

  https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringkey)
 
 
@@ -923,7 +923,7 @@ implements 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.Master
 
 
 getRpcSchedulerFactoryClass
-protectedhttps://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true;
 title="class or interface in java.lang">Class?getRpcSchedulerFactoryClass()
+protectedhttps://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true;
 title="class or interface in java.lang">Class?getRpcSchedulerFactoryClass()
 
 Overrides:
 getRpcSchedulerFactoryClassin
 classRSRpcServices
@@ -936,7 +936,7 @@ implements 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.Master
 
 
 createRpcServer
-protectedRpcServerInterfacecreateRpcServer(Serverserver,
+protectedRpcServerInterfacecreateRpcServer(Serverserver,
  
org.apache.hadoop.conf.Configurationconf,
  RpcSchedulerFactoryrpcSchedulerFactory,
  https://docs.oracle.com/javase/8/docs/api/java/net/InetSocketAddress.html?is-external=true;
 title="class or interface in java.net">InetSocketAddressbindAddress,
@@ -956,7 +956,7 @@ implements 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.Master
 
 
 createPriority
-protectedPriorityFunctioncreatePriority()
+protectedPriorityFunctioncreatePriority()
 
 Overrides:
 createPriorityin
 classRSRpcServices
@@ -969,7 +969,7 @@ implements 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.Master
 
 
 rpcPreCheck
-privatevoidrpcPreCheck(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringrequestName)

[46/51] [partial] hbase-site git commit: Published site at 8bf966c8e936dec4d83bcbe85c5aab543f14a0df.

2018-12-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/org/apache/hadoop/hbase/master/MasterRpcServices.BalanceSwitchMode.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/MasterRpcServices.BalanceSwitchMode.html
 
b/devapidocs/org/apache/hadoop/hbase/master/MasterRpcServices.BalanceSwitchMode.html
index dcf1bc9..4c3e262 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/MasterRpcServices.BalanceSwitchMode.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/MasterRpcServices.BalanceSwitchMode.html
@@ -122,7 +122,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-static enum MasterRpcServices.BalanceSwitchMode
+static enum MasterRpcServices.BalanceSwitchMode
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumMasterRpcServices.BalanceSwitchMode
 
 
@@ -210,7 +210,7 @@ the order they are declared.
 
 
 SYNC
-public static finalMasterRpcServices.BalanceSwitchMode SYNC
+public static finalMasterRpcServices.BalanceSwitchMode SYNC
 
 
 
@@ -219,7 +219,7 @@ the order they are declared.
 
 
 ASYNC
-public static finalMasterRpcServices.BalanceSwitchMode ASYNC
+public static finalMasterRpcServices.BalanceSwitchMode ASYNC
 
 
 



[39/51] [partial] hbase-site git commit: Published site at 8bf966c8e936dec4d83bcbe85c5aab543f14a0df.

2018-12-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemOverlapMerge.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemOverlapMerge.html 
b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemOverlapMerge.html
index 3aa1909..f57507c 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemOverlapMerge.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemOverlapMerge.html
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-static class HBaseFsck.WorkItemOverlapMerge
+static class HBaseFsck.WorkItemOverlapMerge
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Callable.html?is-external=true;
 title="class or interface in java.util.concurrent">Callablehttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
 
@@ -211,7 +211,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/util/concurre
 
 
 handler
-privateTableIntegrityErrorHandler handler
+privateTableIntegrityErrorHandler handler
 
 
 
@@ -220,7 +220,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/util/concurre
 
 
 overlapgroup
-https://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">CollectionHBaseFsck.HbckInfo overlapgroup
+https://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">CollectionHBaseFsck.HbckInfo overlapgroup
 
 
 
@@ -237,7 +237,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/util/concurre
 
 
 WorkItemOverlapMerge
-WorkItemOverlapMerge(https://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">CollectionHBaseFsck.HbckInfooverlapgroup,
+WorkItemOverlapMerge(https://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">CollectionHBaseFsck.HbckInfooverlapgroup,
  TableIntegrityErrorHandlerhandler)
 
 
@@ -255,7 +255,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/util/concurre
 
 
 call
-publichttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Voidcall()
+publichttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Voidcall()
   throws https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">Exception
 
 Specified by:

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html 
b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html
index ffea861..5a504a4 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-static class HBaseFsck.WorkItemRegion
+static class HBaseFsck.WorkItemRegion
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Callable.html?is-external=true;
 title="class or interface in java.util.concurrent">Callablehttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
 Contact a region server and get all information from 
it
@@ -226,7 +226,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/util/concurre
 
 
 hbck
-private finalHBaseFsck hbck
+private finalHBaseFsck hbck
 
 
 
@@ -235,7 +235,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/util/concurre
 
 
 rsinfo
-private finalServerName rsinfo
+private finalServerName rsinfo
 
 
 
@@ -244,7 +244,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/util/concurre
 
 
 errors
-private finalHBaseFsck.ErrorReporter 
errors
+private finalHBaseFsck.ErrorReporter 
errors
 
 
 
@@ -253,7 +253,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/util/concurre
 
 
 connection
-private finalClusterConnection connection
+private finalClusterConnection connection
 
 
 
@@ -270,7 +270,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/util/concurre
 
 
 WorkItemRegion
-WorkItemRegion(HBaseFsckhbck,

hbase-site git commit: INFRA-10751 Empty commit

2018-12-05 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 275553168 -> 0f7d611e9


INFRA-10751 Empty commit


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/0f7d611e
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/0f7d611e
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/0f7d611e

Branch: refs/heads/asf-site
Commit: 0f7d611e986d776af5bb180277f1dbb495a601ac
Parents: 2755531
Author: jenkins 
Authored: Wed Dec 5 14:52:59 2018 +
Committer: jenkins 
Committed: Wed Dec 5 14:52:59 2018 +

--

--




[04/51] [partial] hbase-site git commit: Published site at 8bf966c8e936dec4d83bcbe85c5aab543f14a0df.

2018-12-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsDir.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsDir.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsDir.html
index a957d31..62f81b6 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsDir.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsDir.html
@@ -142,5192 +142,5186 @@
 134import org.apache.hadoop.hbase.wal.WAL;
 135import 
org.apache.hadoop.hbase.wal.WALFactory;
 136import 
org.apache.hadoop.hbase.wal.WALSplitter;
-137import 
org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-138import 
org.apache.hadoop.hbase.zookeeper.ZKUtil;
-139import 
org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-140import 
org.apache.hadoop.hbase.zookeeper.ZNodePaths;
-141import 
org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
-142import 
org.apache.hadoop.ipc.RemoteException;
-143import 
org.apache.hadoop.security.UserGroupInformation;
-144import 
org.apache.hadoop.util.ReflectionUtils;
-145import org.apache.hadoop.util.Tool;
-146import 
org.apache.hadoop.util.ToolRunner;
-147import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-148import 
org.apache.hbase.thirdparty.com.google.common.collect.Sets;
-149import 
org.apache.yetus.audience.InterfaceAudience;
-150import 
org.apache.yetus.audience.InterfaceStability;
-151import 
org.apache.zookeeper.KeeperException;
-152import org.slf4j.Logger;
-153import org.slf4j.LoggerFactory;
-154
-155import 
org.apache.hbase.thirdparty.com.google.common.base.Joiner;
-156import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-157import 
org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;
-158import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
-159import 
org.apache.hbase.thirdparty.com.google.common.collect.Multimap;
-160import 
org.apache.hbase.thirdparty.com.google.common.collect.Ordering;
-161import 
org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;
-162
-163import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-164import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;
-165
-166/**
-167 * HBaseFsck (hbck) is a tool for 
checking and repairing region consistency and
-168 * table integrity problems in a 
corrupted HBase. This tool was written for hbase-1.x. It does not
-169 * work with hbase-2.x; it can read state 
but is not allowed to change state; i.e. effect 'repair'.
-170 * See hbck2 (HBASE-19121) for a hbck 
tool for hbase2.
-171 *
-172 * p
-173 * Region consistency checks verify that 
hbase:meta, region deployment on region
-174 * servers and the state of data in HDFS 
(.regioninfo files) all are in
-175 * accordance.
-176 * p
-177 * Table integrity checks verify that all 
possible row keys resolve to exactly
-178 * one region of a table.  This means 
there are no individual degenerate
-179 * or backwards regions; no holes between 
regions; and that there are no
-180 * overlapping regions.
-181 * p
-182 * The general repair strategy works in 
two phases:
-183 * ol
-184 * li Repair Table Integrity on 
HDFS. (merge or fabricate regions)
-185 * li Repair Region Consistency 
with hbase:meta and assignments
-186 * /ol
-187 * p
-188 * For table integrity repairs, the 
tables' region directories are scanned
-189 * for .regioninfo files.  Each table's 
integrity is then verified.  If there
-190 * are any orphan regions (regions with 
no .regioninfo files) or holes, new
-191 * regions are fabricated.  Backwards 
regions are sidelined as well as empty
-192 * degenerate (endkey==startkey) regions. 
 If there are any overlapping regions,
-193 * a new region is created and all data 
is merged into the new region.
-194 * p
-195 * Table integrity repairs deal solely 
with HDFS and could potentially be done
-196 * offline -- the hbase region servers or 
master do not need to be running.
-197 * This phase can eventually be used to 
completely reconstruct the hbase:meta table in
-198 * an offline fashion.
-199 * p
-200 * Region consistency requires three 
conditions -- 1) valid .regioninfo file
-201 * present in an HDFS region dir,  2) 
valid row with .regioninfo data in META,
-202 * and 3) a region is deployed only at 
the regionserver that was assigned to
-203 * with proper state in the master.
-204 * p
-205 * Region consistency repairs require 
hbase to be online so that hbck can
-206 * contact the HBase master and region 
servers.  The hbck#connect() method must
-207 * first be called successfully.  Much of 
the region consistency information
-208 * is transient and less risky to 
repair.
-209 * p
-210 * If hbck is run from the command line, 
there are a handful of arguments that
-211 * 

[13/51] [partial] hbase-site git commit: Published site at 8bf966c8e936dec4d83bcbe85c5aab543f14a0df.

2018-12-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html
index a957d31..62f81b6 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html
@@ -142,5192 +142,5186 @@
 134import org.apache.hadoop.hbase.wal.WAL;
 135import 
org.apache.hadoop.hbase.wal.WALFactory;
 136import 
org.apache.hadoop.hbase.wal.WALSplitter;
-137import 
org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-138import 
org.apache.hadoop.hbase.zookeeper.ZKUtil;
-139import 
org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-140import 
org.apache.hadoop.hbase.zookeeper.ZNodePaths;
-141import 
org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
-142import 
org.apache.hadoop.ipc.RemoteException;
-143import 
org.apache.hadoop.security.UserGroupInformation;
-144import 
org.apache.hadoop.util.ReflectionUtils;
-145import org.apache.hadoop.util.Tool;
-146import 
org.apache.hadoop.util.ToolRunner;
-147import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-148import 
org.apache.hbase.thirdparty.com.google.common.collect.Sets;
-149import 
org.apache.yetus.audience.InterfaceAudience;
-150import 
org.apache.yetus.audience.InterfaceStability;
-151import 
org.apache.zookeeper.KeeperException;
-152import org.slf4j.Logger;
-153import org.slf4j.LoggerFactory;
-154
-155import 
org.apache.hbase.thirdparty.com.google.common.base.Joiner;
-156import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-157import 
org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;
-158import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
-159import 
org.apache.hbase.thirdparty.com.google.common.collect.Multimap;
-160import 
org.apache.hbase.thirdparty.com.google.common.collect.Ordering;
-161import 
org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;
-162
-163import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-164import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;
-165
-166/**
-167 * HBaseFsck (hbck) is a tool for 
checking and repairing region consistency and
-168 * table integrity problems in a 
corrupted HBase. This tool was written for hbase-1.x. It does not
-169 * work with hbase-2.x; it can read state 
but is not allowed to change state; i.e. effect 'repair'.
-170 * See hbck2 (HBASE-19121) for a hbck 
tool for hbase2.
-171 *
-172 * p
-173 * Region consistency checks verify that 
hbase:meta, region deployment on region
-174 * servers and the state of data in HDFS 
(.regioninfo files) all are in
-175 * accordance.
-176 * p
-177 * Table integrity checks verify that all 
possible row keys resolve to exactly
-178 * one region of a table.  This means 
there are no individual degenerate
-179 * or backwards regions; no holes between 
regions; and that there are no
-180 * overlapping regions.
-181 * p
-182 * The general repair strategy works in 
two phases:
-183 * ol
-184 * li Repair Table Integrity on 
HDFS. (merge or fabricate regions)
-185 * li Repair Region Consistency 
with hbase:meta and assignments
-186 * /ol
-187 * p
-188 * For table integrity repairs, the 
tables' region directories are scanned
-189 * for .regioninfo files.  Each table's 
integrity is then verified.  If there
-190 * are any orphan regions (regions with 
no .regioninfo files) or holes, new
-191 * regions are fabricated.  Backwards 
regions are sidelined as well as empty
-192 * degenerate (endkey==startkey) regions. 
 If there are any overlapping regions,
-193 * a new region is created and all data 
is merged into the new region.
-194 * p
-195 * Table integrity repairs deal solely 
with HDFS and could potentially be done
-196 * offline -- the hbase region servers or 
master do not need to be running.
-197 * This phase can eventually be used to 
completely reconstruct the hbase:meta table in
-198 * an offline fashion.
-199 * p
-200 * Region consistency requires three 
conditions -- 1) valid .regioninfo file
-201 * present in an HDFS region dir,  2) 
valid row with .regioninfo data in META,
-202 * and 3) a region is deployed only at 
the regionserver that was assigned to
-203 * with proper state in the master.
-204 * p
-205 * Region consistency repairs require 
hbase to be online so that hbck can
-206 * contact the HBase master and region 
servers.  The hbck#connect() method must
-207 * first be called successfully.  Much of 
the region consistency information
-208 * is transient and less risky to 
repair.
-209 * p
-210 * If hbck is run from the command line, 
there are a handful of arguments that
-211 * can be used to limit the kinds of 

[10/51] [partial] hbase-site git commit: Published site at 8bf966c8e936dec4d83bcbe85c5aab543f14a0df.

2018-12-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html
index a957d31..62f81b6 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html
@@ -142,5192 +142,5186 @@
 134import org.apache.hadoop.hbase.wal.WAL;
 135import 
org.apache.hadoop.hbase.wal.WALFactory;
 136import 
org.apache.hadoop.hbase.wal.WALSplitter;
-137import 
org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-138import 
org.apache.hadoop.hbase.zookeeper.ZKUtil;
-139import 
org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-140import 
org.apache.hadoop.hbase.zookeeper.ZNodePaths;
-141import 
org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
-142import 
org.apache.hadoop.ipc.RemoteException;
-143import 
org.apache.hadoop.security.UserGroupInformation;
-144import 
org.apache.hadoop.util.ReflectionUtils;
-145import org.apache.hadoop.util.Tool;
-146import 
org.apache.hadoop.util.ToolRunner;
-147import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-148import 
org.apache.hbase.thirdparty.com.google.common.collect.Sets;
-149import 
org.apache.yetus.audience.InterfaceAudience;
-150import 
org.apache.yetus.audience.InterfaceStability;
-151import 
org.apache.zookeeper.KeeperException;
-152import org.slf4j.Logger;
-153import org.slf4j.LoggerFactory;
-154
-155import 
org.apache.hbase.thirdparty.com.google.common.base.Joiner;
-156import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-157import 
org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;
-158import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
-159import 
org.apache.hbase.thirdparty.com.google.common.collect.Multimap;
-160import 
org.apache.hbase.thirdparty.com.google.common.collect.Ordering;
-161import 
org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;
-162
-163import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-164import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;
-165
-166/**
-167 * HBaseFsck (hbck) is a tool for 
checking and repairing region consistency and
-168 * table integrity problems in a 
corrupted HBase. This tool was written for hbase-1.x. It does not
-169 * work with hbase-2.x; it can read state 
but is not allowed to change state; i.e. effect 'repair'.
-170 * See hbck2 (HBASE-19121) for a hbck 
tool for hbase2.
-171 *
-172 * p
-173 * Region consistency checks verify that 
hbase:meta, region deployment on region
-174 * servers and the state of data in HDFS 
(.regioninfo files) all are in
-175 * accordance.
-176 * p
-177 * Table integrity checks verify that all 
possible row keys resolve to exactly
-178 * one region of a table.  This means 
there are no individual degenerate
-179 * or backwards regions; no holes between 
regions; and that there are no
-180 * overlapping regions.
-181 * p
-182 * The general repair strategy works in 
two phases:
-183 * ol
-184 * li Repair Table Integrity on 
HDFS. (merge or fabricate regions)
-185 * li Repair Region Consistency 
with hbase:meta and assignments
-186 * /ol
-187 * p
-188 * For table integrity repairs, the 
tables' region directories are scanned
-189 * for .regioninfo files.  Each table's 
integrity is then verified.  If there
-190 * are any orphan regions (regions with 
no .regioninfo files) or holes, new
-191 * regions are fabricated.  Backwards 
regions are sidelined as well as empty
-192 * degenerate (endkey==startkey) regions. 
 If there are any overlapping regions,
-193 * a new region is created and all data 
is merged into the new region.
-194 * p
-195 * Table integrity repairs deal solely 
with HDFS and could potentially be done
-196 * offline -- the hbase region servers or 
master do not need to be running.
-197 * This phase can eventually be used to 
completely reconstruct the hbase:meta table in
-198 * an offline fashion.
-199 * p
-200 * Region consistency requires three 
conditions -- 1) valid .regioninfo file
-201 * present in an HDFS region dir,  2) 
valid row with .regioninfo data in META,
-202 * and 3) a region is deployed only at 
the regionserver that was assigned to
-203 * with proper state in the master.
-204 * p
-205 * Region consistency repairs require 
hbase to be online so that hbck can
-206 * contact the HBase master and region 
servers.  The hbck#connect() method must
-207 * first be called successfully.  Much of 
the region consistency information
-208 * is transient and less risky to 
repair.
-209 * p
-210 * If hbck is run from the command line, 
there are a 

[12/51] [partial] hbase-site git commit: Published site at 8bf966c8e936dec4d83bcbe85c5aab543f14a0df.

2018-12-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html
index a957d31..62f81b6 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.MetaEntry.html
@@ -142,5192 +142,5186 @@
 134import org.apache.hadoop.hbase.wal.WAL;
 135import 
org.apache.hadoop.hbase.wal.WALFactory;
 136import 
org.apache.hadoop.hbase.wal.WALSplitter;
-137import 
org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-138import 
org.apache.hadoop.hbase.zookeeper.ZKUtil;
-139import 
org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-140import 
org.apache.hadoop.hbase.zookeeper.ZNodePaths;
-141import 
org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
-142import 
org.apache.hadoop.ipc.RemoteException;
-143import 
org.apache.hadoop.security.UserGroupInformation;
-144import 
org.apache.hadoop.util.ReflectionUtils;
-145import org.apache.hadoop.util.Tool;
-146import 
org.apache.hadoop.util.ToolRunner;
-147import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-148import 
org.apache.hbase.thirdparty.com.google.common.collect.Sets;
-149import 
org.apache.yetus.audience.InterfaceAudience;
-150import 
org.apache.yetus.audience.InterfaceStability;
-151import 
org.apache.zookeeper.KeeperException;
-152import org.slf4j.Logger;
-153import org.slf4j.LoggerFactory;
-154
-155import 
org.apache.hbase.thirdparty.com.google.common.base.Joiner;
-156import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-157import 
org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;
-158import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
-159import 
org.apache.hbase.thirdparty.com.google.common.collect.Multimap;
-160import 
org.apache.hbase.thirdparty.com.google.common.collect.Ordering;
-161import 
org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;
-162
-163import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-164import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;
-165
-166/**
-167 * HBaseFsck (hbck) is a tool for 
checking and repairing region consistency and
-168 * table integrity problems in a 
corrupted HBase. This tool was written for hbase-1.x. It does not
-169 * work with hbase-2.x; it can read state 
but is not allowed to change state; i.e. effect 'repair'.
-170 * See hbck2 (HBASE-19121) for a hbck 
tool for hbase2.
-171 *
-172 * p
-173 * Region consistency checks verify that 
hbase:meta, region deployment on region
-174 * servers and the state of data in HDFS 
(.regioninfo files) all are in
-175 * accordance.
-176 * p
-177 * Table integrity checks verify that all 
possible row keys resolve to exactly
-178 * one region of a table.  This means 
there are no individual degenerate
-179 * or backwards regions; no holes between 
regions; and that there are no
-180 * overlapping regions.
-181 * p
-182 * The general repair strategy works in 
two phases:
-183 * ol
-184 * li Repair Table Integrity on 
HDFS. (merge or fabricate regions)
-185 * li Repair Region Consistency 
with hbase:meta and assignments
-186 * /ol
-187 * p
-188 * For table integrity repairs, the 
tables' region directories are scanned
-189 * for .regioninfo files.  Each table's 
integrity is then verified.  If there
-190 * are any orphan regions (regions with 
no .regioninfo files) or holes, new
-191 * regions are fabricated.  Backwards 
regions are sidelined as well as empty
-192 * degenerate (endkey==startkey) regions. 
 If there are any overlapping regions,
-193 * a new region is created and all data 
is merged into the new region.
-194 * p
-195 * Table integrity repairs deal solely 
with HDFS and could potentially be done
-196 * offline -- the hbase region servers or 
master do not need to be running.
-197 * This phase can eventually be used to 
completely reconstruct the hbase:meta table in
-198 * an offline fashion.
-199 * p
-200 * Region consistency requires three 
conditions -- 1) valid .regioninfo file
-201 * present in an HDFS region dir,  2) 
valid row with .regioninfo data in META,
-202 * and 3) a region is deployed only at 
the regionserver that was assigned to
-203 * with proper state in the master.
-204 * p
-205 * Region consistency repairs require 
hbase to be online so that hbck can
-206 * contact the HBase master and region 
servers.  The hbck#connect() method must
-207 * first be called successfully.  Much of 
the region consistency information
-208 * is transient and less risky to 
repair.
-209 * p
-210 * If hbck is run from the command line, 
there are a handful of arguments that
-211 * can be used to limit the kinds of 

[09/51] [partial] hbase-site git commit: Published site at 8bf966c8e936dec4d83bcbe85c5aab543f14a0df.

2018-12-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html
index a957d31..62f81b6 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html
@@ -142,5192 +142,5186 @@
 134import org.apache.hadoop.hbase.wal.WAL;
 135import 
org.apache.hadoop.hbase.wal.WALFactory;
 136import 
org.apache.hadoop.hbase.wal.WALSplitter;
-137import 
org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-138import 
org.apache.hadoop.hbase.zookeeper.ZKUtil;
-139import 
org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-140import 
org.apache.hadoop.hbase.zookeeper.ZNodePaths;
-141import 
org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
-142import 
org.apache.hadoop.ipc.RemoteException;
-143import 
org.apache.hadoop.security.UserGroupInformation;
-144import 
org.apache.hadoop.util.ReflectionUtils;
-145import org.apache.hadoop.util.Tool;
-146import 
org.apache.hadoop.util.ToolRunner;
-147import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-148import 
org.apache.hbase.thirdparty.com.google.common.collect.Sets;
-149import 
org.apache.yetus.audience.InterfaceAudience;
-150import 
org.apache.yetus.audience.InterfaceStability;
-151import 
org.apache.zookeeper.KeeperException;
-152import org.slf4j.Logger;
-153import org.slf4j.LoggerFactory;
-154
-155import 
org.apache.hbase.thirdparty.com.google.common.base.Joiner;
-156import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-157import 
org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;
-158import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
-159import 
org.apache.hbase.thirdparty.com.google.common.collect.Multimap;
-160import 
org.apache.hbase.thirdparty.com.google.common.collect.Ordering;
-161import 
org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;
-162
-163import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-164import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;
-165
-166/**
-167 * HBaseFsck (hbck) is a tool for 
checking and repairing region consistency and
-168 * table integrity problems in a 
corrupted HBase. This tool was written for hbase-1.x. It does not
-169 * work with hbase-2.x; it can read state 
but is not allowed to change state; i.e. effect 'repair'.
-170 * See hbck2 (HBASE-19121) for a hbck 
tool for hbase2.
-171 *
-172 * p
-173 * Region consistency checks verify that 
hbase:meta, region deployment on region
-174 * servers and the state of data in HDFS 
(.regioninfo files) all are in
-175 * accordance.
-176 * p
-177 * Table integrity checks verify that all 
possible row keys resolve to exactly
-178 * one region of a table.  This means 
there are no individual degenerate
-179 * or backwards regions; no holes between 
regions; and that there are no
-180 * overlapping regions.
-181 * p
-182 * The general repair strategy works in 
two phases:
-183 * ol
-184 * li Repair Table Integrity on 
HDFS. (merge or fabricate regions)
-185 * li Repair Region Consistency 
with hbase:meta and assignments
-186 * /ol
-187 * p
-188 * For table integrity repairs, the 
tables' region directories are scanned
-189 * for .regioninfo files.  Each table's 
integrity is then verified.  If there
-190 * are any orphan regions (regions with 
no .regioninfo files) or holes, new
-191 * regions are fabricated.  Backwards 
regions are sidelined as well as empty
-192 * degenerate (endkey==startkey) regions. 
 If there are any overlapping regions,
-193 * a new region is created and all data 
is merged into the new region.
-194 * p
-195 * Table integrity repairs deal solely 
with HDFS and could potentially be done
-196 * offline -- the hbase region servers or 
master do not need to be running.
-197 * This phase can eventually be used to 
completely reconstruct the hbase:meta table in
-198 * an offline fashion.
-199 * p
-200 * Region consistency requires three 
conditions -- 1) valid .regioninfo file
-201 * present in an HDFS region dir,  2) 
valid row with .regioninfo data in META,
-202 * and 3) a region is deployed only at 
the regionserver that was assigned to
-203 * with proper state in the master.
-204 * p
-205 * Region consistency repairs require 
hbase to be online so that hbck can
-206 * contact the HBase master and region 
servers.  The hbck#connect() method must
-207 * first be called successfully.  Much of 
the region consistency information
-208 * is transient and less risky to 
repair.
-209 * p
-210 * If hbck is run from the 

[36/51] [partial] hbase-site git commit: Published site at 8bf966c8e936dec4d83bcbe85c5aab543f14a0df.

2018-12-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.html 
b/devapidocs/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.html
index cd0b50b..8167077 100644
--- a/devapidocs/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.html
+++ b/devapidocs/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.html
@@ -18,8 +18,8 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":9,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":9,"i14":9,"i15":10,"i16":10,"i17":9,"i18":9,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10};
-var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
+var methods = 
{"i0":9,"i1":9,"i2":9,"i3":9,"i4":9,"i5":9,"i6":9,"i7":9,"i8":9,"i9":9,"i10":9,"i11":9,"i12":9,"i13":9,"i14":9,"i15":9,"i16":9,"i17":9,"i18":9};
+var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
 var tableTab = "tableTab";
@@ -110,20 +110,17 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class MetaTableLocator
+public final class MetaTableLocator
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
-Utility class to perform operation (get/wait 
for/verify/set/delete) on znode in ZooKeeper
- which keeps hbase:meta region server location.
-
- Stateless class with a bunch of static methods. Doesn't manage resources 
passed in
- (e.g. Connection, ZKWatcher etc).
-
- Meta region location is set by RegionServerServices.
- This class doesn't use ZK watchers, rather accesses ZK directly.
-
- This class it stateless. The only reason it's not made a non-instantiable 
util class
- with a collection of static methods is that it'd be rather hard to mock 
properly in tests.
-
+Utility class to perform operation (get/wait 
for/verify/set/delete) on znode in ZooKeeper which
+ keeps hbase:meta region server location.
+ 
+ Stateless class with a bunch of static methods. Doesn't manage resources 
passed in (e.g.
+ Connection, ZKWatcher etc).
+ 
+ Meta region location is set by RegionServerServices. This class 
doesn't use ZK
+ watchers, rather accesses ZK directly.
+ 
  TODO: rewrite using RPC calls to master to find out about hbase:meta.
 
 
@@ -147,10 +144,6 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 private static org.slf4j.Logger
 LOG
 
-
-private boolean
-stopped
-
 
 
 
@@ -163,10 +156,12 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 Constructors
 
-Constructor and Description
+Modifier
+Constructor and Description
 
 
-MetaTableLocator()
+private 
+MetaTableLocator()
 
 
 
@@ -178,13 +173,13 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 Method Summary
 
-All MethodsStatic MethodsInstance MethodsConcrete Methods
+All MethodsStatic MethodsConcrete Methods
 
 Modifier and Type
 Method and Description
 
 
-ServerName
+static ServerName
 blockUntilAvailable(ZKWatcherzkw,
intreplicaId,
longtimeout)
@@ -192,14 +187,14 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 
-ServerName
+static ServerName
 blockUntilAvailable(ZKWatcherzkw,
longtimeout)
 Wait until the meta region is available and is not in 
transition.
 
 
 
-https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListServerName
+static https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListServerName
 blockUntilAvailable(ZKWatcherzkw,
longtimeout,
org.apache.hadoop.conf.Configurationconf)
@@ -207,92 +202,77 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 
-void
+static void
 deleteMetaLocation(ZKWatcherzookeeper)
 Deletes the location of hbase:meta in 
ZooKeeper.
 
 
 
-void
+static void
 deleteMetaLocation(ZKWatcherzookeeper,
   intreplicaId)
 
 
-private static 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface
-getCachedConnection(ClusterConnectionconnection,
-   ServerNamesn)
-
-
-private https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfo
+private static https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfo
 

[35/51] [partial] hbase-site git commit: Published site at 8bf966c8e936dec4d83bcbe85c5aab543f14a0df.

2018-12-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/org/apache/hadoop/hbase/zookeeper/ZKUtil.JaasConfiguration.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/zookeeper/ZKUtil.JaasConfiguration.html 
b/devapidocs/org/apache/hadoop/hbase/zookeeper/ZKUtil.JaasConfiguration.html
index 53c524e..f69e20a 100644
--- a/devapidocs/org/apache/hadoop/hbase/zookeeper/ZKUtil.JaasConfiguration.html
+++ b/devapidocs/org/apache/hadoop/hbase/zookeeper/ZKUtil.JaasConfiguration.html
@@ -118,7 +118,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private static class ZKUtil.JaasConfiguration
+private static class ZKUtil.JaasConfiguration
 extends https://docs.oracle.com/javase/8/docs/api/javax/security/auth/login/Configuration.html?is-external=true;
 title="class or interface in javax.security.auth.login">Configuration
 A JAAS configuration that defines the login modules that we 
want to use for login.
 
@@ -280,7 +280,7 @@ extends https://docs.oracle.com/javase/8/docs/api/javax/security/auth/l
 
 
 SERVER_KEYTAB_KERBEROS_CONFIG_NAME
-private static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String SERVER_KEYTAB_KERBEROS_CONFIG_NAME
+private static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String SERVER_KEYTAB_KERBEROS_CONFIG_NAME
 
 See Also:
 Constant
 Field Values
@@ -293,7 +293,7 @@ extends https://docs.oracle.com/javase/8/docs/api/javax/security/auth/l
 
 
 CLIENT_KEYTAB_KERBEROS_CONFIG_NAME
-private static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String CLIENT_KEYTAB_KERBEROS_CONFIG_NAME
+private static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String CLIENT_KEYTAB_KERBEROS_CONFIG_NAME
 
 See Also:
 Constant
 Field Values
@@ -306,7 +306,7 @@ extends https://docs.oracle.com/javase/8/docs/api/javax/security/auth/l
 
 
 BASIC_JAAS_OPTIONS
-private static finalhttps://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String BASIC_JAAS_OPTIONS
+private static finalhttps://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String BASIC_JAAS_OPTIONS
 
 
 
@@ -315,7 +315,7 @@ extends https://docs.oracle.com/javase/8/docs/api/javax/security/auth/l
 
 
 KEYTAB_KERBEROS_OPTIONS
-private static finalhttps://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String KEYTAB_KERBEROS_OPTIONS
+private static finalhttps://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String KEYTAB_KERBEROS_OPTIONS
 
 
 
@@ -324,7 +324,7 @@ extends https://docs.oracle.com/javase/8/docs/api/javax/security/auth/l
 
 
 KEYTAB_KERBEROS_LOGIN
-private static finalhttps://docs.oracle.com/javase/8/docs/api/javax/security/auth/login/AppConfigurationEntry.html?is-external=true;
 title="class or interface in 
javax.security.auth.login">AppConfigurationEntry KEYTAB_KERBEROS_LOGIN
+private static finalhttps://docs.oracle.com/javase/8/docs/api/javax/security/auth/login/AppConfigurationEntry.html?is-external=true;
 title="class or interface in 
javax.security.auth.login">AppConfigurationEntry KEYTAB_KERBEROS_LOGIN
 
 
 
@@ -333,7 +333,7 @@ extends https://docs.oracle.com/javase/8/docs/api/javax/security/auth/l
 
 
 KEYTAB_KERBEROS_CONF
-private static finalhttps://docs.oracle.com/javase/8/docs/api/javax/security/auth/login/AppConfigurationEntry.html?is-external=true;
 title="class or interface in 
javax.security.auth.login">AppConfigurationEntry[] KEYTAB_KERBEROS_CONF
+private 

[06/51] [partial] hbase-site git commit: Published site at 8bf966c8e936dec4d83bcbe85c5aab543f14a0df.

2018-12-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html
index a957d31..62f81b6 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html
@@ -142,5192 +142,5186 @@
 134import org.apache.hadoop.hbase.wal.WAL;
 135import 
org.apache.hadoop.hbase.wal.WALFactory;
 136import 
org.apache.hadoop.hbase.wal.WALSplitter;
-137import 
org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-138import 
org.apache.hadoop.hbase.zookeeper.ZKUtil;
-139import 
org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-140import 
org.apache.hadoop.hbase.zookeeper.ZNodePaths;
-141import 
org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
-142import 
org.apache.hadoop.ipc.RemoteException;
-143import 
org.apache.hadoop.security.UserGroupInformation;
-144import 
org.apache.hadoop.util.ReflectionUtils;
-145import org.apache.hadoop.util.Tool;
-146import 
org.apache.hadoop.util.ToolRunner;
-147import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-148import 
org.apache.hbase.thirdparty.com.google.common.collect.Sets;
-149import 
org.apache.yetus.audience.InterfaceAudience;
-150import 
org.apache.yetus.audience.InterfaceStability;
-151import 
org.apache.zookeeper.KeeperException;
-152import org.slf4j.Logger;
-153import org.slf4j.LoggerFactory;
-154
-155import 
org.apache.hbase.thirdparty.com.google.common.base.Joiner;
-156import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-157import 
org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;
-158import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
-159import 
org.apache.hbase.thirdparty.com.google.common.collect.Multimap;
-160import 
org.apache.hbase.thirdparty.com.google.common.collect.Ordering;
-161import 
org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;
-162
-163import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-164import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;
-165
-166/**
-167 * HBaseFsck (hbck) is a tool for 
checking and repairing region consistency and
-168 * table integrity problems in a 
corrupted HBase. This tool was written for hbase-1.x. It does not
-169 * work with hbase-2.x; it can read state 
but is not allowed to change state; i.e. effect 'repair'.
-170 * See hbck2 (HBASE-19121) for a hbck 
tool for hbase2.
-171 *
-172 * p
-173 * Region consistency checks verify that 
hbase:meta, region deployment on region
-174 * servers and the state of data in HDFS 
(.regioninfo files) all are in
-175 * accordance.
-176 * p
-177 * Table integrity checks verify that all 
possible row keys resolve to exactly
-178 * one region of a table.  This means 
there are no individual degenerate
-179 * or backwards regions; no holes between 
regions; and that there are no
-180 * overlapping regions.
-181 * p
-182 * The general repair strategy works in 
two phases:
-183 * ol
-184 * li Repair Table Integrity on 
HDFS. (merge or fabricate regions)
-185 * li Repair Region Consistency 
with hbase:meta and assignments
-186 * /ol
-187 * p
-188 * For table integrity repairs, the 
tables' region directories are scanned
-189 * for .regioninfo files.  Each table's 
integrity is then verified.  If there
-190 * are any orphan regions (regions with 
no .regioninfo files) or holes, new
-191 * regions are fabricated.  Backwards 
regions are sidelined as well as empty
-192 * degenerate (endkey==startkey) regions. 
 If there are any overlapping regions,
-193 * a new region is created and all data 
is merged into the new region.
-194 * p
-195 * Table integrity repairs deal solely 
with HDFS and could potentially be done
-196 * offline -- the hbase region servers or 
master do not need to be running.
-197 * This phase can eventually be used to 
completely reconstruct the hbase:meta table in
-198 * an offline fashion.
-199 * p
-200 * Region consistency requires three 
conditions -- 1) valid .regioninfo file
-201 * present in an HDFS region dir,  2) 
valid row with .regioninfo data in META,
-202 * and 3) a region is deployed only at 
the regionserver that was assigned to
-203 * with proper state in the master.
-204 * p
-205 * Region consistency repairs require 
hbase to be online so that hbck can
-206 * contact the HBase master and region 
servers.  The hbck#connect() method must
-207 * first be called successfully.  Much of 
the region consistency information
-208 * is transient and less risky to 
repair.
-209 * p
-210 * If 

[33/51] [partial] hbase-site git commit: Published site at 8bf966c8e936dec4d83bcbe85c5aab543f14a0df.

2018-12-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/org/apache/hadoop/hbase/zookeeper/class-use/MetaTableLocator.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/zookeeper/class-use/MetaTableLocator.html 
b/devapidocs/org/apache/hadoop/hbase/zookeeper/class-use/MetaTableLocator.html
index b297b54..25f088c 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/zookeeper/class-use/MetaTableLocator.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/zookeeper/class-use/MetaTableLocator.html
@@ -72,107 +72,7 @@
 
 Uses of 
Classorg.apache.hadoop.hbase.zookeeper.MetaTableLocator
 
-
-
-
-
-Packages that use MetaTableLocator
-
-Package
-Description
-
-
-
-org.apache.hadoop.hbase
-
-
-
-org.apache.hadoop.hbase.regionserver
-
-
-
-org.apache.hadoop.hbase.replication.regionserver
-
-
-
-
-
-
-
-
-
-
-Uses of MetaTableLocator in org.apache.hadoop.hbase
-
-Methods in org.apache.hadoop.hbase
 that return MetaTableLocator
-
-Modifier and Type
-Method and Description
-
-
-
-MetaTableLocator
-Server.getMetaTableLocator()
-Returns instance of MetaTableLocator
- running inside this server.
-
-
-
-
-
-
-
-
-Uses of MetaTableLocator in org.apache.hadoop.hbase.regionserver
-
-Fields in org.apache.hadoop.hbase.regionserver
 declared as MetaTableLocator
-
-Modifier and Type
-Field and Description
-
-
-
-protected MetaTableLocator
-HRegionServer.metaTableLocator
-
-
-
-
-Methods in org.apache.hadoop.hbase.regionserver
 that return MetaTableLocator
-
-Modifier and Type
-Method and Description
-
-
-
-MetaTableLocator
-HRegionServer.getMetaTableLocator()
-
-
-
-
-
-
-
-Uses of MetaTableLocator in org.apache.hadoop.hbase.replication.regionserver
-
-Methods in org.apache.hadoop.hbase.replication.regionserver
 that return MetaTableLocator
-
-Modifier and Type
-Method and Description
-
-
-
-MetaTableLocator
-ReplicationSyncUp.DummyServer.getMetaTableLocator()
-
-
-
-
-
-
-
-
+No usage of 
org.apache.hadoop.hbase.zookeeper.MetaTableLocator
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/org/apache/hadoop/hbase/zookeeper/class-use/ZKWatcher.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/zookeeper/class-use/ZKWatcher.html 
b/devapidocs/org/apache/hadoop/hbase/zookeeper/class-use/ZKWatcher.html
index 804473a..7cd57f0 100644
--- a/devapidocs/org/apache/hadoop/hbase/zookeeper/class-use/ZKWatcher.html
+++ b/devapidocs/org/apache/hadoop/hbase/zookeeper/class-use/ZKWatcher.html
@@ -794,6 +794,42 @@
 
 
 
+
+Methods in org.apache.hadoop.hbase.rsgroup
 with parameters of type ZKWatcher
+
+Modifier and Type
+Method and Description
+
+
+
+private static 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface
+Utility.getMetaServerConnection(ClusterConnectionconnection,
+   ZKWatcherzkw,
+   longtimeout,
+   intreplicaId)
+Gets a connection to the server hosting meta, as reported 
by ZooKeeper, waiting up to the
+ specified timeout for availability.
+
+
+
+static boolean
+Utility.verifyMetaRegionLocation(ClusterConnectionhConnection,
+ZKWatcherzkw,
+longtimeout)
+Verify hbase:meta is deployed and 
accessible.
+
+
+
+static boolean
+Utility.verifyMetaRegionLocation(ClusterConnectionconnection,
+ZKWatcherzkw,
+longtimeout,
+intreplicaId)
+Verify hbase:meta is deployed and 
accessible.
+
+
+
+
 
 
 
@@ -1072,7 +1108,7 @@
 
 
 
-ServerName
+static ServerName
 MetaTableLocator.blockUntilAvailable(ZKWatcherzkw,
intreplicaId,
longtimeout)
@@ -1080,14 +1116,14 @@
 
 
 
-ServerName
+static ServerName
 MetaTableLocator.blockUntilAvailable(ZKWatcherzkw,
longtimeout)
 Wait until the meta region is available and is not in 
transition.
 
 
 
-https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListServerName
+static https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListServerName
 MetaTableLocator.blockUntilAvailable(ZKWatcherzkw,
longtimeout,
org.apache.hadoop.conf.Configurationconf)
@@ -1213,13 +1249,13 @@
 
 
 
-void
+static void
 MetaTableLocator.deleteMetaLocation(ZKWatcherzookeeper)
 Deletes the location of hbase:meta in 
ZooKeeper.
 
 
 
-void
+static void
 MetaTableLocator.deleteMetaLocation(ZKWatcherzookeeper,
   intreplicaId)
 
@@ -1335,37 +1371,37 @@
 
 
 
-ServerName
+static ServerName
 MetaTableLocator.getMetaRegionLocation(ZKWatcherzkw)
 Gets the meta region location, if available.
 
 
 
-ServerName
+static ServerName
 MetaTableLocator.getMetaRegionLocation(ZKWatcherzkw,
 

[14/51] [partial] hbase-site git commit: Published site at 8bf966c8e936dec4d83bcbe85c5aab543f14a0df.

2018-12-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html
index a957d31..62f81b6 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HbckInfo.html
@@ -142,5192 +142,5186 @@
 134import org.apache.hadoop.hbase.wal.WAL;
 135import 
org.apache.hadoop.hbase.wal.WALFactory;
 136import 
org.apache.hadoop.hbase.wal.WALSplitter;
-137import 
org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-138import 
org.apache.hadoop.hbase.zookeeper.ZKUtil;
-139import 
org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-140import 
org.apache.hadoop.hbase.zookeeper.ZNodePaths;
-141import 
org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
-142import 
org.apache.hadoop.ipc.RemoteException;
-143import 
org.apache.hadoop.security.UserGroupInformation;
-144import 
org.apache.hadoop.util.ReflectionUtils;
-145import org.apache.hadoop.util.Tool;
-146import 
org.apache.hadoop.util.ToolRunner;
-147import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-148import 
org.apache.hbase.thirdparty.com.google.common.collect.Sets;
-149import 
org.apache.yetus.audience.InterfaceAudience;
-150import 
org.apache.yetus.audience.InterfaceStability;
-151import 
org.apache.zookeeper.KeeperException;
-152import org.slf4j.Logger;
-153import org.slf4j.LoggerFactory;
-154
-155import 
org.apache.hbase.thirdparty.com.google.common.base.Joiner;
-156import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-157import 
org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;
-158import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
-159import 
org.apache.hbase.thirdparty.com.google.common.collect.Multimap;
-160import 
org.apache.hbase.thirdparty.com.google.common.collect.Ordering;
-161import 
org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;
-162
-163import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-164import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;
-165
-166/**
-167 * HBaseFsck (hbck) is a tool for 
checking and repairing region consistency and
-168 * table integrity problems in a 
corrupted HBase. This tool was written for hbase-1.x. It does not
-169 * work with hbase-2.x; it can read state 
but is not allowed to change state; i.e. effect 'repair'.
-170 * See hbck2 (HBASE-19121) for a hbck 
tool for hbase2.
-171 *
-172 * p
-173 * Region consistency checks verify that 
hbase:meta, region deployment on region
-174 * servers and the state of data in HDFS 
(.regioninfo files) all are in
-175 * accordance.
-176 * p
-177 * Table integrity checks verify that all 
possible row keys resolve to exactly
-178 * one region of a table.  This means 
there are no individual degenerate
-179 * or backwards regions; no holes between 
regions; and that there are no
-180 * overlapping regions.
-181 * p
-182 * The general repair strategy works in 
two phases:
-183 * ol
-184 * li Repair Table Integrity on 
HDFS. (merge or fabricate regions)
-185 * li Repair Region Consistency 
with hbase:meta and assignments
-186 * /ol
-187 * p
-188 * For table integrity repairs, the 
tables' region directories are scanned
-189 * for .regioninfo files.  Each table's 
integrity is then verified.  If there
-190 * are any orphan regions (regions with 
no .regioninfo files) or holes, new
-191 * regions are fabricated.  Backwards 
regions are sidelined as well as empty
-192 * degenerate (endkey==startkey) regions. 
 If there are any overlapping regions,
-193 * a new region is created and all data 
is merged into the new region.
-194 * p
-195 * Table integrity repairs deal solely 
with HDFS and could potentially be done
-196 * offline -- the hbase region servers or 
master do not need to be running.
-197 * This phase can eventually be used to 
completely reconstruct the hbase:meta table in
-198 * an offline fashion.
-199 * p
-200 * Region consistency requires three 
conditions -- 1) valid .regioninfo file
-201 * present in an HDFS region dir,  2) 
valid row with .regioninfo data in META,
-202 * and 3) a region is deployed only at 
the regionserver that was assigned to
-203 * with proper state in the master.
-204 * p
-205 * Region consistency repairs require 
hbase to be online so that hbck can
-206 * contact the HBase master and region 
servers.  The hbck#connect() method must
-207 * first be called successfully.  Much of 
the region consistency information
-208 * is transient and less risky to 
repair.
-209 * p
-210 * If hbck is run from the command line, 
there are a handful of arguments that
-211 * can be used to limit the kinds of 

[16/51] [partial] hbase-site git commit: Published site at 8bf966c8e936dec4d83bcbe85c5aab543f14a0df.

2018-12-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html
index a957d31..62f81b6 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html
@@ -142,5192 +142,5186 @@
 134import org.apache.hadoop.hbase.wal.WAL;
 135import 
org.apache.hadoop.hbase.wal.WALFactory;
 136import 
org.apache.hadoop.hbase.wal.WALSplitter;
-137import 
org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-138import 
org.apache.hadoop.hbase.zookeeper.ZKUtil;
-139import 
org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-140import 
org.apache.hadoop.hbase.zookeeper.ZNodePaths;
-141import 
org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
-142import 
org.apache.hadoop.ipc.RemoteException;
-143import 
org.apache.hadoop.security.UserGroupInformation;
-144import 
org.apache.hadoop.util.ReflectionUtils;
-145import org.apache.hadoop.util.Tool;
-146import 
org.apache.hadoop.util.ToolRunner;
-147import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-148import 
org.apache.hbase.thirdparty.com.google.common.collect.Sets;
-149import 
org.apache.yetus.audience.InterfaceAudience;
-150import 
org.apache.yetus.audience.InterfaceStability;
-151import 
org.apache.zookeeper.KeeperException;
-152import org.slf4j.Logger;
-153import org.slf4j.LoggerFactory;
-154
-155import 
org.apache.hbase.thirdparty.com.google.common.base.Joiner;
-156import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-157import 
org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;
-158import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
-159import 
org.apache.hbase.thirdparty.com.google.common.collect.Multimap;
-160import 
org.apache.hbase.thirdparty.com.google.common.collect.Ordering;
-161import 
org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;
-162
-163import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-164import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;
-165
-166/**
-167 * HBaseFsck (hbck) is a tool for 
checking and repairing region consistency and
-168 * table integrity problems in a 
corrupted HBase. This tool was written for hbase-1.x. It does not
-169 * work with hbase-2.x; it can read state 
but is not allowed to change state; i.e. effect 'repair'.
-170 * See hbck2 (HBASE-19121) for a hbck 
tool for hbase2.
-171 *
-172 * p
-173 * Region consistency checks verify that 
hbase:meta, region deployment on region
-174 * servers and the state of data in HDFS 
(.regioninfo files) all are in
-175 * accordance.
-176 * p
-177 * Table integrity checks verify that all 
possible row keys resolve to exactly
-178 * one region of a table.  This means 
there are no individual degenerate
-179 * or backwards regions; no holes between 
regions; and that there are no
-180 * overlapping regions.
-181 * p
-182 * The general repair strategy works in 
two phases:
-183 * ol
-184 * li Repair Table Integrity on 
HDFS. (merge or fabricate regions)
-185 * li Repair Region Consistency 
with hbase:meta and assignments
-186 * /ol
-187 * p
-188 * For table integrity repairs, the 
tables' region directories are scanned
-189 * for .regioninfo files.  Each table's 
integrity is then verified.  If there
-190 * are any orphan regions (regions with 
no .regioninfo files) or holes, new
-191 * regions are fabricated.  Backwards 
regions are sidelined as well as empty
-192 * degenerate (endkey==startkey) regions. 
 If there are any overlapping regions,
-193 * a new region is created and all data 
is merged into the new region.
-194 * p
-195 * Table integrity repairs deal solely 
with HDFS and could potentially be done
-196 * offline -- the hbase region servers or 
master do not need to be running.
-197 * This phase can eventually be used to 
completely reconstruct the hbase:meta table in
-198 * an offline fashion.
-199 * p
-200 * Region consistency requires three 
conditions -- 1) valid .regioninfo file
-201 * present in an HDFS region dir,  2) 
valid row with .regioninfo data in META,
-202 * and 3) a region is deployed only at 
the regionserver that was assigned to
-203 * with proper state in the master.
-204 * p
-205 * Region consistency repairs require 
hbase to be online so that hbck can
-206 * contact the HBase master and region 
servers.  The hbck#connect() method must
-207 * first be called successfully.  Much of 
the region consistency information
-208 * is transient and less risky to 
repair.
-209 * p
-210 * If hbck is run from the command line, 
there are a handful of arguments that

[03/51] [partial] hbase-site git commit: Published site at 8bf966c8e936dec4d83bcbe85c5aab543f14a0df.

2018-12-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html
index a957d31..62f81b6 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html
@@ -142,5192 +142,5186 @@
 134import org.apache.hadoop.hbase.wal.WAL;
 135import 
org.apache.hadoop.hbase.wal.WALFactory;
 136import 
org.apache.hadoop.hbase.wal.WALSplitter;
-137import 
org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-138import 
org.apache.hadoop.hbase.zookeeper.ZKUtil;
-139import 
org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-140import 
org.apache.hadoop.hbase.zookeeper.ZNodePaths;
-141import 
org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
-142import 
org.apache.hadoop.ipc.RemoteException;
-143import 
org.apache.hadoop.security.UserGroupInformation;
-144import 
org.apache.hadoop.util.ReflectionUtils;
-145import org.apache.hadoop.util.Tool;
-146import 
org.apache.hadoop.util.ToolRunner;
-147import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-148import 
org.apache.hbase.thirdparty.com.google.common.collect.Sets;
-149import 
org.apache.yetus.audience.InterfaceAudience;
-150import 
org.apache.yetus.audience.InterfaceStability;
-151import 
org.apache.zookeeper.KeeperException;
-152import org.slf4j.Logger;
-153import org.slf4j.LoggerFactory;
-154
-155import 
org.apache.hbase.thirdparty.com.google.common.base.Joiner;
-156import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-157import 
org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;
-158import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
-159import 
org.apache.hbase.thirdparty.com.google.common.collect.Multimap;
-160import 
org.apache.hbase.thirdparty.com.google.common.collect.Ordering;
-161import 
org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;
-162
-163import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-164import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;
-165
-166/**
-167 * HBaseFsck (hbck) is a tool for 
checking and repairing region consistency and
-168 * table integrity problems in a 
corrupted HBase. This tool was written for hbase-1.x. It does not
-169 * work with hbase-2.x; it can read state 
but is not allowed to change state; i.e. effect 'repair'.
-170 * See hbck2 (HBASE-19121) for a hbck 
tool for hbase2.
-171 *
-172 * p
-173 * Region consistency checks verify that 
hbase:meta, region deployment on region
-174 * servers and the state of data in HDFS 
(.regioninfo files) all are in
-175 * accordance.
-176 * p
-177 * Table integrity checks verify that all 
possible row keys resolve to exactly
-178 * one region of a table.  This means 
there are no individual degenerate
-179 * or backwards regions; no holes between 
regions; and that there are no
-180 * overlapping regions.
-181 * p
-182 * The general repair strategy works in 
two phases:
-183 * ol
-184 * li Repair Table Integrity on 
HDFS. (merge or fabricate regions)
-185 * li Repair Region Consistency 
with hbase:meta and assignments
-186 * /ol
-187 * p
-188 * For table integrity repairs, the 
tables' region directories are scanned
-189 * for .regioninfo files.  Each table's 
integrity is then verified.  If there
-190 * are any orphan regions (regions with 
no .regioninfo files) or holes, new
-191 * regions are fabricated.  Backwards 
regions are sidelined as well as empty
-192 * degenerate (endkey==startkey) regions. 
 If there are any overlapping regions,
-193 * a new region is created and all data 
is merged into the new region.
-194 * p
-195 * Table integrity repairs deal solely 
with HDFS and could potentially be done
-196 * offline -- the hbase region servers or 
master do not need to be running.
-197 * This phase can eventually be used to 
completely reconstruct the hbase:meta table in
-198 * an offline fashion.
-199 * p
-200 * Region consistency requires three 
conditions -- 1) valid .regioninfo file
-201 * present in an HDFS region dir,  2) 
valid row with .regioninfo data in META,
-202 * and 3) a region is deployed only at 
the regionserver that was assigned to
-203 * with proper state in the master.
-204 * p
-205 * Region consistency repairs require 
hbase to be online so that hbck can
-206 * contact the HBase master and region 
servers.  The hbck#connect() method must
-207 * first be called successfully.  Much of 
the region consistency information
-208 * is transient and less risky to 
repair.
-209 * p
-210 * If hbck is run from the command line, 
there are a 

[32/51] [partial] hbase-site git commit: Published site at 8bf966c8e936dec4d83bcbe85c5aab543f14a0df.

2018-12-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterMetaBootstrap.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterMetaBootstrap.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterMetaBootstrap.html
index 7b680e9..a730a53 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterMetaBootstrap.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterMetaBootstrap.html
@@ -73,55 +73,54 @@
 065  throw new 
IllegalStateException("hbase:meta must be initialized first before we can " +
 066  "assign out its replicas");
 067}
-068ServerName metaServername =
-069
this.master.getMetaTableLocator().getMetaRegionLocation(this.master.getZooKeeper());
-070for (int i = 1; i  numReplicas; 
i++) {
-071  // Get current meta state for 
replica from zk.
-072  RegionState metaState = 
MetaTableLocator.getMetaRegionState(master.getZooKeeper(), i);
-073  RegionInfo hri = 
RegionReplicaUtil.getRegionInfoForReplica(
-074  
RegionInfoBuilder.FIRST_META_REGIONINFO, i);
-075  
LOG.debug(hri.getRegionNameAsString() + " replica region state from zookeeper=" 
+ metaState);
-076  if 
(metaServername.equals(metaState.getServerName())) {
-077metaState = null;
-078
LOG.info(hri.getRegionNameAsString() +
-079  " old location is same as 
current hbase:meta location; setting location as null...");
-080  }
-081  // These assigns run inline. All is 
blocked till they complete. Only interrupt is shutting
-082  // down hosting server which calls 
AM#stop.
-083  if (metaState != null  
metaState.getServerName() != null) {
-084// Try to retain old 
assignment.
-085assignmentManager.assign(hri, 
metaState.getServerName());
-086  } else {
-087assignmentManager.assign(hri);
-088  }
-089}
-090
unassignExcessMetaReplica(numReplicas);
-091  }
-092
-093  private void 
unassignExcessMetaReplica(int numMetaReplicasConfigured) {
-094final ZKWatcher zooKeeper = 
master.getZooKeeper();
-095// unassign the unneeded replicas 
(for e.g., if the previous master was configured
-096// with a replication of 3 and now it 
is 2, we need to unassign the 1 unneeded replica)
-097try {
-098  ListString 
metaReplicaZnodes = zooKeeper.getMetaReplicaNodes();
-099  for (String metaReplicaZnode : 
metaReplicaZnodes) {
-100int replicaId = 
zooKeeper.getZNodePaths().getMetaReplicaIdFromZnode(metaReplicaZnode);
-101if (replicaId = 
numMetaReplicasConfigured) {
-102  RegionState r = 
MetaTableLocator.getMetaRegionState(zooKeeper, replicaId);
-103  LOG.info("Closing excess 
replica of meta region " + r.getRegion());
-104  // send a close and wait for a 
max of 30 seconds
-105  
ServerManager.closeRegionSilentlyAndWait(master.getClusterConnection(),
-106  r.getServerName(), 
r.getRegion(), 3);
-107  ZKUtil.deleteNode(zooKeeper, 
zooKeeper.getZNodePaths().getZNodeForReplica(replicaId));
-108}
-109  }
-110} catch (Exception ex) {
-111  // ignore the exception since we 
don't want the master to be wedged due to potential
-112  // issues in the cleanup of the 
extra regions. We can do that cleanup via hbck or manually
-113  LOG.warn("Ignoring exception " + 
ex);
-114}
-115  }
-116}
+068ServerName metaServername = 
MetaTableLocator.getMetaRegionLocation(this.master.getZooKeeper());
+069for (int i = 1; i  numReplicas; 
i++) {
+070  // Get current meta state for 
replica from zk.
+071  RegionState metaState = 
MetaTableLocator.getMetaRegionState(master.getZooKeeper(), i);
+072  RegionInfo hri = 
RegionReplicaUtil.getRegionInfoForReplica(
+073  
RegionInfoBuilder.FIRST_META_REGIONINFO, i);
+074  
LOG.debug(hri.getRegionNameAsString() + " replica region state from zookeeper=" 
+ metaState);
+075  if 
(metaServername.equals(metaState.getServerName())) {
+076metaState = null;
+077
LOG.info(hri.getRegionNameAsString() +
+078  " old location is same as 
current hbase:meta location; setting location as null...");
+079  }
+080  // These assigns run inline. All is 
blocked till they complete. Only interrupt is shutting
+081  // down hosting server which calls 
AM#stop.
+082  if (metaState != null  
metaState.getServerName() != null) {
+083// Try to retain old 
assignment.
+084assignmentManager.assign(hri, 
metaState.getServerName());
+085  } else {
+086assignmentManager.assign(hri);
+087  }
+088}
+089
unassignExcessMetaReplica(numReplicas);
+090  }
+091
+092  private void 
unassignExcessMetaReplica(int numMetaReplicasConfigured) {
+093final ZKWatcher zooKeeper = 
master.getZooKeeper();
+094// unassign the 

[21/51] [partial] hbase-site git commit: Published site at 8bf966c8e936dec4d83bcbe85c5aab543f14a0df.

2018-12-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/src-html/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.RSGroupStartupWorker.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.RSGroupStartupWorker.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.RSGroupStartupWorker.html
index 809f66f..9b60dd0 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.RSGroupStartupWorker.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.RSGroupStartupWorker.html
@@ -765,146 +765,145 @@
 757found.set(true);
 758try {
 759  boolean rootMetaFound =
-760  
masterServices.getMetaTableLocator().verifyMetaRegionLocation(
-761  conn, 
masterServices.getZooKeeper(), 1);
-762  if (rootMetaFound) {
-763MetaTableAccessor.Visitor 
visitor = new DefaultVisitorBase() {
-764  @Override
-765  public boolean 
visitInternal(Result row) throws IOException {
-766RegionInfo info = 
MetaTableAccessor.getRegionInfo(row);
-767if (info != null) {
-768  Cell serverCell =
-769  
row.getColumnLatestCell(HConstants.CATALOG_FAMILY,
-770  
HConstants.SERVER_QUALIFIER);
-771  if 
(RSGROUP_TABLE_NAME.equals(info.getTable())  serverCell != null) {
-772ServerName sn =
-773
ServerName.parseVersionedServerName(CellUtil.cloneValue(serverCell));
-774if (sn == null) {
-775  found.set(false);
-776} else if 
(tsm.isTableState(RSGROUP_TABLE_NAME, TableState.State.ENABLED)) {
-777  try {
-778
ClientProtos.ClientService.BlockingInterface rs = conn.getClient(sn);
-779
ClientProtos.GetRequest request =
-780
RequestConverter.buildGetRequest(info.getRegionName(),
-781new 
Get(ROW_KEY));
-782rs.get(null, 
request);
-783
assignedRegions.add(info);
-784  } catch(Exception 
ex) {
-785LOG.debug("Caught 
exception while verifying group region", ex);
-786  }
-787}
-788
foundRegions.add(info);
-789  }
-790}
-791return true;
-792  }
-793};
-794
MetaTableAccessor.fullScanRegions(conn, visitor);
-795// if no regions in meta then 
we have to create the table
-796if (foundRegions.size()  
1  rootMetaFound  !createSent) {
-797  createRSGroupTable();
-798  createSent = true;
-799}
-800LOG.info("RSGroup table=" + 
RSGROUP_TABLE_NAME + " isOnline=" + found.get()
-801+ ", regionCount=" + 
foundRegions.size() + ", assignCount="
-802+ assignedRegions.size() 
+ ", rootMetaFound=" + rootMetaFound);
-803found.set(found.get() 
 assignedRegions.size() == foundRegions.size()
-804 
foundRegions.size()  0);
-805  } else {
-806LOG.info("Waiting for catalog 
tables to come online");
-807found.set(false);
-808  }
-809  if (found.get()) {
-810LOG.debug("With group table 
online, refreshing cached information.");
-811
RSGroupInfoManagerImpl.this.refresh(true);
-812online = true;
-813//flush any inconsistencies 
between ZK and HTable
-814
RSGroupInfoManagerImpl.this.flushConfig();
-815  }
-816} catch (RuntimeException e) {
-817  throw e;
-818} catch(Exception e) {
-819  found.set(false);
-820  LOG.warn("Failed to perform 
check", e);
-821}
-822try {
-823  Thread.sleep(100);
-824} catch (InterruptedException e) 
{
-825  LOG.info("Sleep interrupted", 
e);
-826}
-827  }
-828  return found.get();
-829}
-830
-831private void createRSGroupTable() 
throws IOException {
-832  Long procId = 
masterServices.createSystemTable(RSGROUP_TABLE_DESC);
-833  // wait for region to be online
-834  int tries = 600;
-835  while 
(!(masterServices.getMasterProcedureExecutor().isFinished(procId))
-836   
masterServices.getMasterProcedureExecutor().isRunning()
-837   tries  0) {
-838try {
-839  Thread.sleep(100);
-840} catch (InterruptedException e) 
{
-841  throw new IOException("Wait 
interrupted ", e);
-842}
-843tries--;
-844  }
-845  if(tries = 0) {
-846throw new 

[17/51] [partial] hbase-site git commit: Published site at 8bf966c8e936dec4d83bcbe85c5aab543f14a0df.

2018-12-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html
index a957d31..62f81b6 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html
@@ -142,5192 +142,5186 @@
 134import org.apache.hadoop.hbase.wal.WAL;
 135import 
org.apache.hadoop.hbase.wal.WALFactory;
 136import 
org.apache.hadoop.hbase.wal.WALSplitter;
-137import 
org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-138import 
org.apache.hadoop.hbase.zookeeper.ZKUtil;
-139import 
org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-140import 
org.apache.hadoop.hbase.zookeeper.ZNodePaths;
-141import 
org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
-142import 
org.apache.hadoop.ipc.RemoteException;
-143import 
org.apache.hadoop.security.UserGroupInformation;
-144import 
org.apache.hadoop.util.ReflectionUtils;
-145import org.apache.hadoop.util.Tool;
-146import 
org.apache.hadoop.util.ToolRunner;
-147import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-148import 
org.apache.hbase.thirdparty.com.google.common.collect.Sets;
-149import 
org.apache.yetus.audience.InterfaceAudience;
-150import 
org.apache.yetus.audience.InterfaceStability;
-151import 
org.apache.zookeeper.KeeperException;
-152import org.slf4j.Logger;
-153import org.slf4j.LoggerFactory;
-154
-155import 
org.apache.hbase.thirdparty.com.google.common.base.Joiner;
-156import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-157import 
org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;
-158import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
-159import 
org.apache.hbase.thirdparty.com.google.common.collect.Multimap;
-160import 
org.apache.hbase.thirdparty.com.google.common.collect.Ordering;
-161import 
org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;
-162
-163import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-164import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;
-165
-166/**
-167 * HBaseFsck (hbck) is a tool for 
checking and repairing region consistency and
-168 * table integrity problems in a 
corrupted HBase. This tool was written for hbase-1.x. It does not
-169 * work with hbase-2.x; it can read state 
but is not allowed to change state; i.e. effect 'repair'.
-170 * See hbck2 (HBASE-19121) for a hbck 
tool for hbase2.
-171 *
-172 * p
-173 * Region consistency checks verify that 
hbase:meta, region deployment on region
-174 * servers and the state of data in HDFS 
(.regioninfo files) all are in
-175 * accordance.
-176 * p
-177 * Table integrity checks verify that all 
possible row keys resolve to exactly
-178 * one region of a table.  This means 
there are no individual degenerate
-179 * or backwards regions; no holes between 
regions; and that there are no
-180 * overlapping regions.
-181 * p
-182 * The general repair strategy works in 
two phases:
-183 * ol
-184 * li Repair Table Integrity on 
HDFS. (merge or fabricate regions)
-185 * li Repair Region Consistency 
with hbase:meta and assignments
-186 * /ol
-187 * p
-188 * For table integrity repairs, the 
tables' region directories are scanned
-189 * for .regioninfo files.  Each table's 
integrity is then verified.  If there
-190 * are any orphan regions (regions with 
no .regioninfo files) or holes, new
-191 * regions are fabricated.  Backwards 
regions are sidelined as well as empty
-192 * degenerate (endkey==startkey) regions. 
 If there are any overlapping regions,
-193 * a new region is created and all data 
is merged into the new region.
-194 * p
-195 * Table integrity repairs deal solely 
with HDFS and could potentially be done
-196 * offline -- the hbase region servers or 
master do not need to be running.
-197 * This phase can eventually be used to 
completely reconstruct the hbase:meta table in
-198 * an offline fashion.
-199 * p
-200 * Region consistency requires three 
conditions -- 1) valid .regioninfo file
-201 * present in an HDFS region dir,  2) 
valid row with .regioninfo data in META,
-202 * and 3) a region is deployed only at 
the regionserver that was assigned to
-203 * with proper state in the master.
-204 * p
-205 * Region consistency repairs require 
hbase to be online so that hbck can
-206 * contact the HBase master and region 
servers.  The hbck#connect() method must
-207 * first be called successfully.  Much of 
the region consistency information
-208 * is transient and less risky to 
repair.
-209 * p
-210 * If hbck is run from the command line, 
there are a handful of arguments that
-211 * can be used 

[31/51] [partial] hbase-site git commit: Published site at 8bf966c8e936dec4d83bcbe85c5aab543f14a0df.

2018-12-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.BalanceSwitchMode.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.BalanceSwitchMode.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.BalanceSwitchMode.html
index 2cdee19..e6bc675 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.BalanceSwitchMode.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.BalanceSwitchMode.html
@@ -110,2406 +110,2407 @@
 102import 
org.apache.hadoop.hbase.util.ForeignExceptionUtil;
 103import 
org.apache.hadoop.hbase.util.Pair;
 104import 
org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
-105import 
org.apache.yetus.audience.InterfaceAudience;
-106import 
org.apache.zookeeper.KeeperException;
-107import org.slf4j.Logger;
-108import org.slf4j.LoggerFactory;
-109
-110import 
org.apache.hbase.thirdparty.com.google.protobuf.RpcController;
-111import 
org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
-112import 
org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations;
-113
-114import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-115import 
org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
-116import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-117import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-118import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
-119import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
-120import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
-121import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos;
-122import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds;
-123import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
-124import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameStringPair;
-125import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-126import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-127import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.VersionInfo;
-128import 
org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockHeartbeatRequest;
-129import 
org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockHeartbeatResponse;
-130import 
org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockRequest;
-131import 
org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockResponse;
-132import 
org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockService;
-133import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos;
-134import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-135import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-136import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-137import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-138import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-139import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-140import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-141import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
-142import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersRequest;
-143import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersResponse;
-144import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-145import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
-146import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest;
-147import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse;
-148import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersRequest;
-149import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersResponse;
-150import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
-151import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
-152import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
-153import 

[51/51] [partial] hbase-site git commit: Published site at 8bf966c8e936dec4d83bcbe85c5aab543f14a0df.

2018-12-05 Thread git-site-role
Published site at 8bf966c8e936dec4d83bcbe85c5aab543f14a0df.


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/27555316
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/27555316
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/27555316

Branch: refs/heads/asf-site
Commit: 27555316811b8f031d18508100f39b4345de49ce
Parents: e467988
Author: jenkins 
Authored: Wed Dec 5 14:52:35 2018 +
Committer: jenkins 
Committed: Wed Dec 5 14:52:35 2018 +

--
 acid-semantics.html | 4 +-
 apache_hbase_reference_guide.pdf| 4 +-
 book.html   | 2 +-
 bulk-loads.html | 4 +-
 checkstyle-aggregate.html   | 16482 -
 checkstyle.rss  |32 +-
 coc.html| 4 +-
 dependencies.html   | 4 +-
 dependency-convergence.html | 4 +-
 dependency-info.html| 4 +-
 dependency-management.html  | 4 +-
 devapidocs/constant-values.html | 4 +-
 devapidocs/index-all.html   |73 +-
 devapidocs/org/apache/hadoop/hbase/Server.html  |51 +-
 .../hadoop/hbase/backup/package-tree.html   | 4 +-
 .../NotAllMetaRegionsOnlineException.html   | 8 +-
 .../hadoop/hbase/class-use/ServerName.html  |76 +-
 .../client/class-use/ClusterConnection.html |   101 +-
 .../hbase/client/class-use/RegionInfo.html  |12 +-
 .../hadoop/hbase/client/package-tree.html   |26 +-
 .../apache/hadoop/hbase/client/package-use.html | 5 -
 .../hadoop/hbase/executor/package-tree.html | 2 +-
 .../hadoop/hbase/filter/package-tree.html   | 8 +-
 .../hadoop/hbase/io/hfile/package-tree.html | 6 +-
 .../apache/hadoop/hbase/ipc/package-tree.html   | 2 +-
 .../hadoop/hbase/mapreduce/package-tree.html| 4 +-
 .../org/apache/hadoop/hbase/master/HMaster.html | 6 +-
 .../master/HMasterCommandLine.LocalHMaster.html | 6 +-
 .../hbase/master/MasterMetaBootstrap.html   | 2 +-
 .../MasterRpcServices.BalanceSwitchMode.html| 6 +-
 .../hadoop/hbase/master/MasterRpcServices.html  |   230 +-
 .../hadoop/hbase/master/MasterServices.html | 2 +-
 .../hbase/master/MasterStatusServlet.html   |12 +-
 .../hadoop/hbase/master/package-tree.html   | 4 +-
 .../hbase/master/procedure/package-tree.html| 2 +-
 .../hadoop/hbase/monitoring/package-tree.html   | 2 +-
 .../org/apache/hadoop/hbase/package-tree.html   |18 +-
 .../hadoop/hbase/procedure2/package-tree.html   | 6 +-
 .../hadoop/hbase/quotas/package-tree.html   | 8 +-
 .../HRegionServer.CompactionChecker.html|14 +-
 .../HRegionServer.MovedRegionInfo.html  |16 +-
 .../HRegionServer.MovedRegionsCleaner.html  |16 +-
 .../HRegionServer.PeriodicMemStoreFlusher.html  |14 +-
 ...RegionServer.SystemExitWhenAbortTimeout.html | 6 +-
 .../hbase/regionserver/HRegionServer.html   |   883 +-
 .../regionserver/RegionServerServices.html  | 2 +-
 .../hadoop/hbase/regionserver/package-tree.html |20 +-
 .../regionserver/querymatcher/package-tree.html | 2 +-
 .../ReplicationSyncUp.DummyServer.html  |76 +-
 .../regionserver/ReplicationSyncUp.html |10 +-
 .../replication/regionserver/package-tree.html  | 2 +-
 .../hadoop/hbase/rest/model/package-tree.html   | 2 +-
 ...oupInfoManagerImpl.RSGroupStartupWorker.html | 4 +-
 .../hbase/rsgroup/RSGroupInfoManagerImpl.html   | 6 +-
 .../apache/hadoop/hbase/rsgroup/Utility.html|   222 +-
 .../hbase/security/access/package-tree.html | 4 +-
 .../hadoop/hbase/security/package-tree.html | 2 +-
 .../hadoop/hbase/thrift/package-tree.html   | 2 +-
 ...BaseFsck.CheckRegionConsistencyWorkItem.html |10 +-
 .../HBaseFsck.ErrorReporter.ERROR_CODE.html |80 +-
 .../hbase/util/HBaseFsck.ErrorReporter.html |30 +-
 .../hbase/util/HBaseFsck.FileLockCallable.html  |16 +-
 .../hbase/util/HBaseFsck.HBaseFsckTool.html | 6 +-
 .../hadoop/hbase/util/HBaseFsck.HbckInfo.html   |56 +-
 .../hadoop/hbase/util/HBaseFsck.HdfsEntry.html  |14 +-
 .../hadoop/hbase/util/HBaseFsck.MetaEntry.html  |18 +-
 .../hbase/util/HBaseFsck.OnlineEntry.html   |10 +-
 .../util/HBaseFsck.PrintingErrorReporter.html   |42 +-
 .../HBaseFsck.RegionBoundariesInformation.html  |16 +-
 .../util/HBaseFsck.RegionRepairException.html   | 8 +-
 .../HBaseFsck.TableInfo.HDFSIntegrityFixer.html |22 +-
 ...aseFsck.TableInfo.IntegrityFixSuggester.html |20 +-
 

[19/51] [partial] hbase-site git commit: Published site at 8bf966c8e936dec4d83bcbe85c5aab543f14a0df.

2018-12-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html
index a957d31..62f81b6 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html
@@ -142,5192 +142,5186 @@
 134import org.apache.hadoop.hbase.wal.WAL;
 135import 
org.apache.hadoop.hbase.wal.WALFactory;
 136import 
org.apache.hadoop.hbase.wal.WALSplitter;
-137import 
org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-138import 
org.apache.hadoop.hbase.zookeeper.ZKUtil;
-139import 
org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-140import 
org.apache.hadoop.hbase.zookeeper.ZNodePaths;
-141import 
org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
-142import 
org.apache.hadoop.ipc.RemoteException;
-143import 
org.apache.hadoop.security.UserGroupInformation;
-144import 
org.apache.hadoop.util.ReflectionUtils;
-145import org.apache.hadoop.util.Tool;
-146import 
org.apache.hadoop.util.ToolRunner;
-147import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-148import 
org.apache.hbase.thirdparty.com.google.common.collect.Sets;
-149import 
org.apache.yetus.audience.InterfaceAudience;
-150import 
org.apache.yetus.audience.InterfaceStability;
-151import 
org.apache.zookeeper.KeeperException;
-152import org.slf4j.Logger;
-153import org.slf4j.LoggerFactory;
-154
-155import 
org.apache.hbase.thirdparty.com.google.common.base.Joiner;
-156import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-157import 
org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;
-158import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
-159import 
org.apache.hbase.thirdparty.com.google.common.collect.Multimap;
-160import 
org.apache.hbase.thirdparty.com.google.common.collect.Ordering;
-161import 
org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;
-162
-163import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-164import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;
-165
-166/**
-167 * HBaseFsck (hbck) is a tool for 
checking and repairing region consistency and
-168 * table integrity problems in a 
corrupted HBase. This tool was written for hbase-1.x. It does not
-169 * work with hbase-2.x; it can read state 
but is not allowed to change state; i.e. effect 'repair'.
-170 * See hbck2 (HBASE-19121) for a hbck 
tool for hbase2.
-171 *
-172 * p
-173 * Region consistency checks verify that 
hbase:meta, region deployment on region
-174 * servers and the state of data in HDFS 
(.regioninfo files) all are in
-175 * accordance.
-176 * p
-177 * Table integrity checks verify that all 
possible row keys resolve to exactly
-178 * one region of a table.  This means 
there are no individual degenerate
-179 * or backwards regions; no holes between 
regions; and that there are no
-180 * overlapping regions.
-181 * p
-182 * The general repair strategy works in 
two phases:
-183 * ol
-184 * li Repair Table Integrity on 
HDFS. (merge or fabricate regions)
-185 * li Repair Region Consistency 
with hbase:meta and assignments
-186 * /ol
-187 * p
-188 * For table integrity repairs, the 
tables' region directories are scanned
-189 * for .regioninfo files.  Each table's 
integrity is then verified.  If there
-190 * are any orphan regions (regions with 
no .regioninfo files) or holes, new
-191 * regions are fabricated.  Backwards 
regions are sidelined as well as empty
-192 * degenerate (endkey==startkey) regions. 
 If there are any overlapping regions,
-193 * a new region is created and all data 
is merged into the new region.
-194 * p
-195 * Table integrity repairs deal solely 
with HDFS and could potentially be done
-196 * offline -- the hbase region servers or 
master do not need to be running.
-197 * This phase can eventually be used to 
completely reconstruct the hbase:meta table in
-198 * an offline fashion.
-199 * p
-200 * Region consistency requires three 
conditions -- 1) valid .regioninfo file
-201 * present in an HDFS region dir,  2) 
valid row with .regioninfo data in META,
-202 * and 3) a region is deployed only at 
the regionserver that was assigned to
-203 * with proper state in the master.
-204 * p
-205 * Region consistency repairs require 
hbase to be online so that hbck can
-206 * contact the HBase master and region 
servers.  The hbck#connect() method must
-207 * first be called successfully.  Much of 
the region consistency information
-208 * is transient and less risky to 
repair.
-209 * p
-210 * If hbck is 

[34/51] [partial] hbase-site git commit: Published site at 8bf966c8e936dec4d83bcbe85c5aab543f14a0df.

2018-12-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/org/apache/hadoop/hbase/zookeeper/ZKUtil.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/zookeeper/ZKUtil.html 
b/devapidocs/org/apache/hadoop/hbase/zookeeper/ZKUtil.html
index 5832df4..2d5212e 100644
--- a/devapidocs/org/apache/hadoop/hbase/zookeeper/ZKUtil.html
+++ b/devapidocs/org/apache/hadoop/hbase/zookeeper/ZKUtil.html
@@ -110,7 +110,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public final class ZKUtil
+public final class ZKUtil
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 Internal HBase utility class for ZooKeeper.
 
@@ -738,7 +738,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 LOG
-private static finalorg.slf4j.Logger LOG
+private static finalorg.slf4j.Logger LOG
 
 
 
@@ -747,7 +747,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 zkDumpConnectionTimeOut
-private staticint zkDumpConnectionTimeOut
+private staticint zkDumpConnectionTimeOut
 
 
 
@@ -764,7 +764,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 ZKUtil
-privateZKUtil()
+privateZKUtil()
 
 
 
@@ -781,7 +781,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 connect
-public staticRecoverableZooKeeperconnect(org.apache.hadoop.conf.Configurationconf,
+public staticRecoverableZooKeeperconnect(org.apache.hadoop.conf.Configurationconf,

org.apache.zookeeper.Watcherwatcher)
 throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Creates a new connection to ZooKeeper, pulling settings and 
ensemble config
@@ -805,7 +805,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 connect
-public staticRecoverableZooKeeperconnect(org.apache.hadoop.conf.Configurationconf,
+public staticRecoverableZooKeeperconnect(org.apache.hadoop.conf.Configurationconf,
https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringensemble,

org.apache.zookeeper.Watcherwatcher)
 throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
@@ -821,7 +821,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 connect
-public staticRecoverableZooKeeperconnect(org.apache.hadoop.conf.Configurationconf,
+public staticRecoverableZooKeeperconnect(org.apache.hadoop.conf.Configurationconf,
https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringensemble,

org.apache.zookeeper.Watcherwatcher,
https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringidentifier)
@@ -838,7 +838,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 loginServer
-public staticvoidloginServer(org.apache.hadoop.conf.Configurationconf,
+public staticvoidloginServer(org.apache.hadoop.conf.Configurationconf,
https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringkeytabFileKey,
https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringuserNameKey,
https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringhostname)
@@ -866,7 +866,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 loginClient
-public staticvoidloginClient(org.apache.hadoop.conf.Configurationconf,
+public staticvoidloginClient(org.apache.hadoop.conf.Configurationconf,
https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringkeytabFileKey,
https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringuserNameKey,
https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringhostname)
@@ -894,7 +894,7 @@ extends 

[24/51] [partial] hbase-site git commit: Published site at 8bf966c8e936dec4d83bcbe85c5aab543f14a0df.

2018-12-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.SystemExitWhenAbortTimeout.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.SystemExitWhenAbortTimeout.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.SystemExitWhenAbortTimeout.html
index ea05301..26a93dd 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.SystemExitWhenAbortTimeout.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.SystemExitWhenAbortTimeout.html
@@ -269,3590 +269,3574 @@
 261   */
 262  protected ClusterConnection 
clusterConnection;
 263
-264  /*
-265   * Long-living meta table locator, 
which is created when the server is started and stopped
-266   * when server shuts down. References 
to this locator shall be used to perform according
-267   * operations in EventHandlers. Primary 
reason for this decision is to make it mockable
-268   * for tests.
-269   */
-270  protected MetaTableLocator 
metaTableLocator;
-271
-272  /**
-273   * Go here to get table descriptors.
-274   */
-275  protected TableDescriptors 
tableDescriptors;
-276
-277  // Replication services. If no 
replication, this handler will be null.
-278  protected ReplicationSourceService 
replicationSourceHandler;
-279  protected ReplicationSinkService 
replicationSinkHandler;
-280
-281  // Compactions
-282  public CompactSplit 
compactSplitThread;
-283
-284  /**
-285   * Map of regions currently being 
served by this region server. Key is the
-286   * encoded region name.  All access 
should be synchronized.
-287   */
-288  protected final MapString, 
HRegion onlineRegions = new ConcurrentHashMap();
-289
-290  /**
-291   * Map of encoded region names to the 
DataNode locations they should be hosted on
-292   * We store the value as 
InetSocketAddress since this is used only in HDFS
-293   * API (create() that takes favored 
nodes as hints for placing file blocks).
-294   * We could have used ServerName here 
as the value class, but we'd need to
-295   * convert it to InetSocketAddress at 
some point before the HDFS API call, and
-296   * it seems a bit weird to store 
ServerName since ServerName refers to RegionServers
-297   * and here we really mean DataNode 
locations.
-298   */
-299  protected final MapString, 
InetSocketAddress[] regionFavoredNodesMap =
-300  new ConcurrentHashMap();
-301
-302  // Leases
-303  protected Leases leases;
+264  /**
+265   * Go here to get table descriptors.
+266   */
+267  protected TableDescriptors 
tableDescriptors;
+268
+269  // Replication services. If no 
replication, this handler will be null.
+270  protected ReplicationSourceService 
replicationSourceHandler;
+271  protected ReplicationSinkService 
replicationSinkHandler;
+272
+273  // Compactions
+274  public CompactSplit 
compactSplitThread;
+275
+276  /**
+277   * Map of regions currently being 
served by this region server. Key is the
+278   * encoded region name.  All access 
should be synchronized.
+279   */
+280  protected final MapString, 
HRegion onlineRegions = new ConcurrentHashMap();
+281
+282  /**
+283   * Map of encoded region names to the 
DataNode locations they should be hosted on
+284   * We store the value as 
InetSocketAddress since this is used only in HDFS
+285   * API (create() that takes favored 
nodes as hints for placing file blocks).
+286   * We could have used ServerName here 
as the value class, but we'd need to
+287   * convert it to InetSocketAddress at 
some point before the HDFS API call, and
+288   * it seems a bit weird to store 
ServerName since ServerName refers to RegionServers
+289   * and here we really mean DataNode 
locations.
+290   */
+291  protected final MapString, 
InetSocketAddress[] regionFavoredNodesMap =
+292  new ConcurrentHashMap();
+293
+294  // Leases
+295  protected Leases leases;
+296
+297  // Instance of the hbase executor 
executorService.
+298  protected ExecutorService 
executorService;
+299
+300  // If false, the file system has become 
unavailable
+301  protected volatile boolean fsOk;
+302  protected HFileSystem fs;
+303  protected HFileSystem walFs;
 304
-305  // Instance of the hbase executor 
executorService.
-306  protected ExecutorService 
executorService;
-307
-308  // If false, the file system has become 
unavailable
-309  protected volatile boolean fsOk;
-310  protected HFileSystem fs;
-311  protected HFileSystem walFs;
-312
-313  // Set when a report to the master 
comes back with a message asking us to
-314  // shutdown. Also set by call to stop 
when debugging or running unit tests
-315  // of HRegionServer in isolation.
-316  private volatile boolean stopped = 
false;
-317
-318  // Go down hard. Used if file system 
becomes unavailable and also in
-319  // debugging and unit tests.
-320  private volatile 

[23/51] [partial] hbase-site git commit: Published site at 8bf966c8e936dec4d83bcbe85c5aab543f14a0df.

2018-12-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.html
index ea05301..26a93dd 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.html
@@ -269,3590 +269,3574 @@
 261   */
 262  protected ClusterConnection 
clusterConnection;
 263
-264  /*
-265   * Long-living meta table locator, 
which is created when the server is started and stopped
-266   * when server shuts down. References 
to this locator shall be used to perform according
-267   * operations in EventHandlers. Primary 
reason for this decision is to make it mockable
-268   * for tests.
-269   */
-270  protected MetaTableLocator 
metaTableLocator;
-271
-272  /**
-273   * Go here to get table descriptors.
-274   */
-275  protected TableDescriptors 
tableDescriptors;
-276
-277  // Replication services. If no 
replication, this handler will be null.
-278  protected ReplicationSourceService 
replicationSourceHandler;
-279  protected ReplicationSinkService 
replicationSinkHandler;
-280
-281  // Compactions
-282  public CompactSplit 
compactSplitThread;
-283
-284  /**
-285   * Map of regions currently being 
served by this region server. Key is the
-286   * encoded region name.  All access 
should be synchronized.
-287   */
-288  protected final MapString, 
HRegion onlineRegions = new ConcurrentHashMap();
-289
-290  /**
-291   * Map of encoded region names to the 
DataNode locations they should be hosted on
-292   * We store the value as 
InetSocketAddress since this is used only in HDFS
-293   * API (create() that takes favored 
nodes as hints for placing file blocks).
-294   * We could have used ServerName here 
as the value class, but we'd need to
-295   * convert it to InetSocketAddress at 
some point before the HDFS API call, and
-296   * it seems a bit weird to store 
ServerName since ServerName refers to RegionServers
-297   * and here we really mean DataNode 
locations.
-298   */
-299  protected final MapString, 
InetSocketAddress[] regionFavoredNodesMap =
-300  new ConcurrentHashMap();
-301
-302  // Leases
-303  protected Leases leases;
+264  /**
+265   * Go here to get table descriptors.
+266   */
+267  protected TableDescriptors 
tableDescriptors;
+268
+269  // Replication services. If no 
replication, this handler will be null.
+270  protected ReplicationSourceService 
replicationSourceHandler;
+271  protected ReplicationSinkService 
replicationSinkHandler;
+272
+273  // Compactions
+274  public CompactSplit 
compactSplitThread;
+275
+276  /**
+277   * Map of regions currently being 
served by this region server. Key is the
+278   * encoded region name.  All access 
should be synchronized.
+279   */
+280  protected final MapString, 
HRegion onlineRegions = new ConcurrentHashMap();
+281
+282  /**
+283   * Map of encoded region names to the 
DataNode locations they should be hosted on
+284   * We store the value as 
InetSocketAddress since this is used only in HDFS
+285   * API (create() that takes favored 
nodes as hints for placing file blocks).
+286   * We could have used ServerName here 
as the value class, but we'd need to
+287   * convert it to InetSocketAddress at 
some point before the HDFS API call, and
+288   * it seems a bit weird to store 
ServerName since ServerName refers to RegionServers
+289   * and here we really mean DataNode 
locations.
+290   */
+291  protected final MapString, 
InetSocketAddress[] regionFavoredNodesMap =
+292  new ConcurrentHashMap();
+293
+294  // Leases
+295  protected Leases leases;
+296
+297  // Instance of the hbase executor 
executorService.
+298  protected ExecutorService 
executorService;
+299
+300  // If false, the file system has become 
unavailable
+301  protected volatile boolean fsOk;
+302  protected HFileSystem fs;
+303  protected HFileSystem walFs;
 304
-305  // Instance of the hbase executor 
executorService.
-306  protected ExecutorService 
executorService;
-307
-308  // If false, the file system has become 
unavailable
-309  protected volatile boolean fsOk;
-310  protected HFileSystem fs;
-311  protected HFileSystem walFs;
-312
-313  // Set when a report to the master 
comes back with a message asking us to
-314  // shutdown. Also set by call to stop 
when debugging or running unit tests
-315  // of HRegionServer in isolation.
-316  private volatile boolean stopped = 
false;
-317
-318  // Go down hard. Used if file system 
becomes unavailable and also in
-319  // debugging and unit tests.
-320  private volatile boolean 
abortRequested;
-321  public static final String 
ABORT_TIMEOUT = "hbase.regionserver.abort.timeout";
-322  // Default abort timeout 

[38/51] [partial] hbase-site git commit: Published site at 8bf966c8e936dec4d83bcbe85c5aab543f14a0df.

2018-12-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.html 
b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.html
index 422ad2f..97f206a 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":9,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":9,"i22":10,"i23":9,"i24":9,"i25":10,"i26":9,"i27":9,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":9,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":9,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":9,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":9,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":9,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":9,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i109":10,"i11
 
0":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":9,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10,"i135":10,"i136":10,"i137":10,"i138":10,"i139":10,"i140":10,"i141":10,"i142":10,"i143":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":9,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":9,"i22":10,"i23":9,"i24":9,"i25":10,"i26":9,"i27":9,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":9,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":9,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":9,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":9,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":9,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":9,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i109":10,"i11
 
0":10,"i111":10,"i112":10,"i113":10,"i114":9,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10,"i135":10,"i136":10,"i137":10,"i138":10,"i139":10,"i140":10,"i141":10,"i142":10};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -120,7 +120,7 @@ var activeTableTab = "activeTableTab";
 
 @InterfaceAudience.LimitedPrivate(value="Tools")
  @InterfaceStability.Evolving
-public class HBaseFsck
+public class HBaseFsck
 extends org.apache.hadoop.conf.Configured
 implements https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html?is-external=true;
 title="class or interface in java.io">Closeable
 HBaseFsck (hbck) is a tool for checking and repairing 
region consistency and
@@ -880,33 +880,29 @@ implements https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 getMaxOverlapsToSideline()
 
 
-private ServerName
-getMetaRegionServerName(intreplicaId)
-
-
 private HBaseFsck.HbckInfo
 getOrCreateInfo(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringname)
 Gets the entry in regionInfo corresponding to the the given 
encoded
  region name.
 
 
-
+
 org.apache.hbase.thirdparty.com.google.common.collect.Multimapbyte[],HBaseFsck.HbckInfo
 getOverlapGroups(TableNametable)
 
-
+
 int
 getRetCode()
 
-
+
 private org.apache.hadoop.fs.Path
 getSidelineDir()
 
-
+
 (package private) TableDescriptor[]
 getTableDescriptors(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListTableNametableNames)
 
-
+
 (package private) TableDescriptor[]
 getTables(https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true;
 title="class or interface in 
java.util.concurrent.atomic">AtomicIntegernumSkipped)
 Return a list of user-space table names whose 

[30/51] [partial] hbase-site git commit: Published site at 8bf966c8e936dec4d83bcbe85c5aab543f14a0df.

2018-12-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.html
index 2cdee19..e6bc675 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.html
@@ -110,2406 +110,2407 @@
 102import 
org.apache.hadoop.hbase.util.ForeignExceptionUtil;
 103import 
org.apache.hadoop.hbase.util.Pair;
 104import 
org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
-105import 
org.apache.yetus.audience.InterfaceAudience;
-106import 
org.apache.zookeeper.KeeperException;
-107import org.slf4j.Logger;
-108import org.slf4j.LoggerFactory;
-109
-110import 
org.apache.hbase.thirdparty.com.google.protobuf.RpcController;
-111import 
org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
-112import 
org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations;
-113
-114import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-115import 
org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
-116import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-117import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-118import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
-119import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
-120import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
-121import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos;
-122import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds;
-123import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
-124import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameStringPair;
-125import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-126import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-127import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.VersionInfo;
-128import 
org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockHeartbeatRequest;
-129import 
org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockHeartbeatResponse;
-130import 
org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockRequest;
-131import 
org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockResponse;
-132import 
org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockService;
-133import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos;
-134import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-135import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-136import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-137import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-138import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-139import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-140import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-141import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
-142import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersRequest;
-143import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersResponse;
-144import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-145import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
-146import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest;
-147import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse;
-148import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersRequest;
-149import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersResponse;
-150import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
-151import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
-152import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
-153import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse;
-154import 

[40/51] [partial] hbase-site git commit: Published site at 8bf966c8e936dec4d83bcbe85c5aab543f14a0df.

2018-12-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.OnlineEntry.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.OnlineEntry.html 
b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.OnlineEntry.html
index 03f5fdc..3efb403 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.OnlineEntry.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.OnlineEntry.html
@@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-static class HBaseFsck.OnlineEntry
+static class HBaseFsck.OnlineEntry
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 Stores the regioninfo retrieved from Online region 
servers.
 
@@ -206,7 +206,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 hri
-RegionInfo hri
+RegionInfo hri
 
 
 
@@ -215,7 +215,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 hsa
-ServerName hsa
+ServerName hsa
 
 
 
@@ -232,7 +232,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 OnlineEntry
-OnlineEntry()
+OnlineEntry()
 
 
 
@@ -249,7 +249,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 toString
-publichttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringtoString()
+publichttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringtoString()
 
 Overrides:
 https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#toString--;
 title="class or interface in java.lang">toStringin 
classhttps://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html 
b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html
index 47b9a9e..0be63ac 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-static class HBaseFsck.PrintingErrorReporter
+static class HBaseFsck.PrintingErrorReporter
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements HBaseFsck.ErrorReporter
 
@@ -301,7 +301,7 @@ implements 
 
 errorCount
-publicint errorCount
+publicint errorCount
 
 
 
@@ -310,7 +310,7 @@ implements 
 
 showProgress
-privateint showProgress
+privateint showProgress
 
 
 
@@ -319,7 +319,7 @@ implements 
 
 progressThreshold
-private static finalint progressThreshold
+private static finalint progressThreshold
 
 See Also:
 Constant
 Field Values
@@ -332,7 +332,7 @@ implements 
 
 errorTables
-https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">SetHBaseFsck.TableInfo errorTables
+https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">SetHBaseFsck.TableInfo errorTables
 
 
 
@@ -341,7 +341,7 @@ implements 
 
 errorList
-privatehttps://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in java.util">ArrayListHBaseFsck.ErrorReporter.ERROR_CODE errorList
+privatehttps://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in java.util">ArrayListHBaseFsck.ErrorReporter.ERROR_CODE errorList
 
 
 
@@ -358,7 +358,7 @@ implements 
 
 PrintingErrorReporter
-PrintingErrorReporter()
+PrintingErrorReporter()
 
 
 
@@ -375,7 +375,7 @@ implements 
 
 clear
-publicvoidclear()
+publicvoidclear()
 
 Specified by:
 clearin
 interfaceHBaseFsck.ErrorReporter
@@ -388,7 +388,7 @@ implements 
 
 reportError
-publicvoidreportError(HBaseFsck.ErrorReporter.ERROR_CODEerrorCode,
+publicvoidreportError(HBaseFsck.ErrorReporter.ERROR_CODEerrorCode,
 https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringmessage)
 
 Specified by:
@@ -402,7 +402,7 @@ implements 
 
 reportError
-publicvoidreportError(HBaseFsck.ErrorReporter.ERROR_CODEerrorCode,
+publicvoidreportError(HBaseFsck.ErrorReporter.ERROR_CODEerrorCode,
 https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or 

[44/51] [partial] hbase-site git commit: Published site at 8bf966c8e936dec4d83bcbe85c5aab543f14a0df.

2018-12-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/org/apache/hadoop/hbase/master/MasterServices.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/master/MasterServices.html 
b/devapidocs/org/apache/hadoop/hbase/master/MasterServices.html
index 9e51da1..a620cbf 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/MasterServices.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/MasterServices.html
@@ -471,7 +471,7 @@ extends Server
-createConnection,
 getChoreService,
 getClusterConnection,
 getConfiguration,
 getConnection,
 getCoordinatedStateManager,
 getFileSystem,
 getMetaTableLocator,
 getServerName,
 getZooKeeper, isStopping
+createConnection,
 getChoreService,
 getClusterConnection,
 getConfiguration,
 getConnection,
 getCoordinatedStateManager,
 getFileSystem,
 getServerName,
 getZooKeeper,
 isStopping
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/org/apache/hadoop/hbase/master/MasterStatusServlet.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/master/MasterStatusServlet.html 
b/devapidocs/org/apache/hadoop/hbase/master/MasterStatusServlet.html
index a837a46..0dafaa4 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/MasterStatusServlet.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/MasterStatusServlet.html
@@ -124,7 +124,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class MasterStatusServlet
+public class MasterStatusServlet
 extends javax.servlet.http.HttpServlet
 The servlet responsible for rendering the index page of the
  master.
@@ -242,7 +242,7 @@ extends javax.servlet.http.HttpServlet
 
 
 serialVersionUID
-private static finallong serialVersionUID
+private static finallong serialVersionUID
 
 See Also:
 Constant
 Field Values
@@ -263,7 +263,7 @@ extends javax.servlet.http.HttpServlet
 
 
 MasterStatusServlet
-publicMasterStatusServlet()
+publicMasterStatusServlet()
 
 
 
@@ -280,7 +280,7 @@ extends javax.servlet.http.HttpServlet
 
 
 doGet
-publicvoiddoGet(javax.servlet.http.HttpServletRequestrequest,
+publicvoiddoGet(javax.servlet.http.HttpServletRequestrequest,
   javax.servlet.http.HttpServletResponseresponse)
throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
@@ -297,7 +297,7 @@ extends javax.servlet.http.HttpServlet
 
 
 getMetaLocationOrNull
-privateServerNamegetMetaLocationOrNull(HMastermaster)
+privateServerNamegetMetaLocationOrNull(HMastermaster)
 
 
 
@@ -306,7 +306,7 @@ extends javax.servlet.http.HttpServlet
 
 
 getFragmentationInfo
-privatehttps://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,https://docs.oracle.com/javase/8/docs/api/java/lang/Integer.html?is-external=true;
 title="class or interface in java.lang">IntegergetFragmentationInfo(HMastermaster,
+privatehttps://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,https://docs.oracle.com/javase/8/docs/api/java/lang/Integer.html?is-external=true;
 title="class or interface in java.lang">IntegergetFragmentationInfo(HMastermaster,
  
org.apache.hadoop.conf.Configurationconf)
   throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/org/apache/hadoop/hbase/master/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/master/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/master/package-tree.html
index c03b1ea..9b187b9 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/package-tree.html
@@ -347,10 +347,10 @@
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.master.RegionState.State
 

[27/51] [partial] hbase-site git commit: Published site at 8bf966c8e936dec4d83bcbe85c5aab543f14a0df.

2018-12-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionInfo.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionInfo.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionInfo.html
index ea05301..26a93dd 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionInfo.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionInfo.html
@@ -269,3590 +269,3574 @@
 261   */
 262  protected ClusterConnection 
clusterConnection;
 263
-264  /*
-265   * Long-living meta table locator, 
which is created when the server is started and stopped
-266   * when server shuts down. References 
to this locator shall be used to perform according
-267   * operations in EventHandlers. Primary 
reason for this decision is to make it mockable
-268   * for tests.
-269   */
-270  protected MetaTableLocator 
metaTableLocator;
-271
-272  /**
-273   * Go here to get table descriptors.
-274   */
-275  protected TableDescriptors 
tableDescriptors;
-276
-277  // Replication services. If no 
replication, this handler will be null.
-278  protected ReplicationSourceService 
replicationSourceHandler;
-279  protected ReplicationSinkService 
replicationSinkHandler;
-280
-281  // Compactions
-282  public CompactSplit 
compactSplitThread;
-283
-284  /**
-285   * Map of regions currently being 
served by this region server. Key is the
-286   * encoded region name.  All access 
should be synchronized.
-287   */
-288  protected final MapString, 
HRegion onlineRegions = new ConcurrentHashMap();
-289
-290  /**
-291   * Map of encoded region names to the 
DataNode locations they should be hosted on
-292   * We store the value as 
InetSocketAddress since this is used only in HDFS
-293   * API (create() that takes favored 
nodes as hints for placing file blocks).
-294   * We could have used ServerName here 
as the value class, but we'd need to
-295   * convert it to InetSocketAddress at 
some point before the HDFS API call, and
-296   * it seems a bit weird to store 
ServerName since ServerName refers to RegionServers
-297   * and here we really mean DataNode 
locations.
-298   */
-299  protected final MapString, 
InetSocketAddress[] regionFavoredNodesMap =
-300  new ConcurrentHashMap();
-301
-302  // Leases
-303  protected Leases leases;
+264  /**
+265   * Go here to get table descriptors.
+266   */
+267  protected TableDescriptors 
tableDescriptors;
+268
+269  // Replication services. If no 
replication, this handler will be null.
+270  protected ReplicationSourceService 
replicationSourceHandler;
+271  protected ReplicationSinkService 
replicationSinkHandler;
+272
+273  // Compactions
+274  public CompactSplit 
compactSplitThread;
+275
+276  /**
+277   * Map of regions currently being 
served by this region server. Key is the
+278   * encoded region name.  All access 
should be synchronized.
+279   */
+280  protected final MapString, 
HRegion onlineRegions = new ConcurrentHashMap();
+281
+282  /**
+283   * Map of encoded region names to the 
DataNode locations they should be hosted on
+284   * We store the value as 
InetSocketAddress since this is used only in HDFS
+285   * API (create() that takes favored 
nodes as hints for placing file blocks).
+286   * We could have used ServerName here 
as the value class, but we'd need to
+287   * convert it to InetSocketAddress at 
some point before the HDFS API call, and
+288   * it seems a bit weird to store 
ServerName since ServerName refers to RegionServers
+289   * and here we really mean DataNode 
locations.
+290   */
+291  protected final MapString, 
InetSocketAddress[] regionFavoredNodesMap =
+292  new ConcurrentHashMap();
+293
+294  // Leases
+295  protected Leases leases;
+296
+297  // Instance of the hbase executor 
executorService.
+298  protected ExecutorService 
executorService;
+299
+300  // If false, the file system has become 
unavailable
+301  protected volatile boolean fsOk;
+302  protected HFileSystem fs;
+303  protected HFileSystem walFs;
 304
-305  // Instance of the hbase executor 
executorService.
-306  protected ExecutorService 
executorService;
-307
-308  // If false, the file system has become 
unavailable
-309  protected volatile boolean fsOk;
-310  protected HFileSystem fs;
-311  protected HFileSystem walFs;
-312
-313  // Set when a report to the master 
comes back with a message asking us to
-314  // shutdown. Also set by call to stop 
when debugging or running unit tests
-315  // of HRegionServer in isolation.
-316  private volatile boolean stopped = 
false;
-317
-318  // Go down hard. Used if file system 
becomes unavailable and also in
-319  // debugging and unit tests.
-320  private volatile boolean 
abortRequested;
-321  public static final String 

[50/51] [partial] hbase-site git commit: Published site at 8bf966c8e936dec4d83bcbe85c5aab543f14a0df.

2018-12-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/checkstyle-aggregate.html
--
diff --git a/checkstyle-aggregate.html b/checkstyle-aggregate.html
index e1d46ba..ce593d3 100644
--- a/checkstyle-aggregate.html
+++ b/checkstyle-aggregate.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Checkstyle Results
 
@@ -291,10 +291,10 @@
 Warnings
 Errors
 
-3816
+3817
 0
 0
-14791
+14743
 
 Files
 
@@ -669,170 +669,160 @@
 0
 5
 
-org/apache/hadoop/hbase/Server.java
-0
-0
-1
-
 org/apache/hadoop/hbase/ServerLoad.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/ServerName.java
 0
 0
 25
-
+
 org/apache/hadoop/hbase/SplitLogCounters.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/SplitLogTask.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/StripeCompactionsPerformanceEvaluation.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/TableDescriptors.java
 0
 0
 12
-
+
 org/apache/hadoop/hbase/TableInfoMissingException.java
 0
 0
 6
-
+
 org/apache/hadoop/hbase/TableName.java
 0
 0
 17
-
+
 org/apache/hadoop/hbase/TableNotDisabledException.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/TableNotEnabledException.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/TableNotFoundException.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/TagType.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/TestCellUtil.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/TestClassFinder.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/TestClientClusterStatus.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/TestClientOperationTimeout.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/TestClusterPortAssignment.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/TestGlobalMemStoreSize.java
 0
 0
 6
-
+
 org/apache/hadoop/hbase/TestHBaseConfiguration.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/TestHBaseTestingUtility.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/TestHColumnDescriptor.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/TestHColumnDescriptorDefaultVersions.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/TestHTableDescriptor.java
 0
 0
 11
-
+
 org/apache/hadoop/hbase/TestIOFencing.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/TestInfoServers.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/TestJMXConnectorServer.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/TestKeyValue.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/TestLocalHBaseCluster.java
 0
 0
 1
-
-org/apache/hadoop/hbase/TestMetaTableAccessor.java
-0
-0
-8
 
-org/apache/hadoop/hbase/TestMetaTableAccessorNoCluster.java
+org/apache/hadoop/hbase/TestMetaTableAccessor.java
 0
 0
 7
 
-org/apache/hadoop/hbase/TestMetaTableLocator.java
+org/apache/hadoop/hbase/TestMetaTableAccessorNoCluster.java
 0
 0
-41
+7
 
 org/apache/hadoop/hbase/TestMovedRegionsCleaner.java
 0
@@ -4602,7 +4592,7 @@
 org/apache/hadoop/hbase/master/MasterStatusServlet.java
 0
 0
-4
+3
 
 org/apache/hadoop/hbase/master/MasterWalManager.java
 0
@@ -5487,7 +5477,7 @@
 org/apache/hadoop/hbase/procedure/flush/MasterFlushTableProcedureManager.java
 0
 0
-2
+1
 
 org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java
 0
@@ -6182,7 +6172,7 @@
 org/apache/hadoop/hbase/regionserver/HRegionServer.java
 0
 0
-74
+73
 
 org/apache/hadoop/hbase/regionserver/HRegionServerCommandLine.java
 0
@@ -9027,7 +9017,7 @@
 org/apache/hadoop/hbase/util/HBaseFsck.java
 0
 0
-104
+102
 
 org/apache/hadoop/hbase/util/HBaseFsckRepair.java
 0
@@ -9674,7 +9664,7 @@
 
 blocks
 http://checkstyle.sourceforge.net/config_blocks.html#EmptyBlock;>EmptyBlock
-41
+40
 Error
 
 
@@ -9684,12 +9674,12 @@
 
 
 http://checkstyle.sourceforge.net/config_blocks.html#NeedBraces;>NeedBraces
-1760
+1756
 Error
 
 coding
 http://checkstyle.sourceforge.net/config_coding.html#EmptyStatement;>EmptyStatement
-30
+29
 Error
 
 
@@ -9751,7 +9741,7 @@
 sortStaticImportsAlphabetically: true
 groups: 
*,org.apache.hbase.thirdparty,org.apache.hadoop.hbase.shaded
 option: top
-1131
+1128
 Error
 
 
@@ -9763,7 +9753,7 @@
 http://checkstyle.sourceforge.net/config_imports.html#UnusedImports;>UnusedImports
 
 processJavadoc: true
-95
+94
 Error
 
 indentation
@@ -9774,19 +9764,19 @@
 caseIndent: 2
 basicOffset: 2
 lineWrappingIndentation: 2
-4663
+4649
 Error
 
 javadoc
 http://checkstyle.sourceforge.net/config_javadoc.html#JavadocTagContinuationIndentation;>JavadocTagContinuationIndentation
 
 offset: 2
-730
+728
 Error
 
 
 http://checkstyle.sourceforge.net/config_javadoc.html#NonEmptyAtclauseDescription;>NonEmptyAtclauseDescription
-3436
+3417
 Error
 
 misc
@@ -9804,7 +9794,7 @@
 
 max: 100
 ignorePattern: ^package.*|^import.*|a 
href|href|http://|https://|ftp://|org.apache.thrift.|com.google.protobuf.|hbase.protobuf.generated
-1429
+1426
 Error
 
 
@@ -17320,21 +17310,6 @@
 'if' construct must use '{}'s.
 274
 
-org/apache/hadoop/hbase/Server.java
-
-
-Severity
-Category
-Rule
-Message
-Line
-
-Error
-imports
-ImportOrder
-Wrong order for 'java.io.IOException' import.
-29
-
 

[28/51] [partial] hbase-site git commit: Published site at 8bf966c8e936dec4d83bcbe85c5aab543f14a0df.

2018-12-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html
index ea05301..26a93dd 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html
@@ -269,3590 +269,3574 @@
 261   */
 262  protected ClusterConnection 
clusterConnection;
 263
-264  /*
-265   * Long-living meta table locator, 
which is created when the server is started and stopped
-266   * when server shuts down. References 
to this locator shall be used to perform according
-267   * operations in EventHandlers. Primary 
reason for this decision is to make it mockable
-268   * for tests.
-269   */
-270  protected MetaTableLocator 
metaTableLocator;
-271
-272  /**
-273   * Go here to get table descriptors.
-274   */
-275  protected TableDescriptors 
tableDescriptors;
-276
-277  // Replication services. If no 
replication, this handler will be null.
-278  protected ReplicationSourceService 
replicationSourceHandler;
-279  protected ReplicationSinkService 
replicationSinkHandler;
-280
-281  // Compactions
-282  public CompactSplit 
compactSplitThread;
-283
-284  /**
-285   * Map of regions currently being 
served by this region server. Key is the
-286   * encoded region name.  All access 
should be synchronized.
-287   */
-288  protected final MapString, 
HRegion onlineRegions = new ConcurrentHashMap();
-289
-290  /**
-291   * Map of encoded region names to the 
DataNode locations they should be hosted on
-292   * We store the value as 
InetSocketAddress since this is used only in HDFS
-293   * API (create() that takes favored 
nodes as hints for placing file blocks).
-294   * We could have used ServerName here 
as the value class, but we'd need to
-295   * convert it to InetSocketAddress at 
some point before the HDFS API call, and
-296   * it seems a bit weird to store 
ServerName since ServerName refers to RegionServers
-297   * and here we really mean DataNode 
locations.
-298   */
-299  protected final MapString, 
InetSocketAddress[] regionFavoredNodesMap =
-300  new ConcurrentHashMap();
-301
-302  // Leases
-303  protected Leases leases;
+264  /**
+265   * Go here to get table descriptors.
+266   */
+267  protected TableDescriptors 
tableDescriptors;
+268
+269  // Replication services. If no 
replication, this handler will be null.
+270  protected ReplicationSourceService 
replicationSourceHandler;
+271  protected ReplicationSinkService 
replicationSinkHandler;
+272
+273  // Compactions
+274  public CompactSplit 
compactSplitThread;
+275
+276  /**
+277   * Map of regions currently being 
served by this region server. Key is the
+278   * encoded region name.  All access 
should be synchronized.
+279   */
+280  protected final MapString, 
HRegion onlineRegions = new ConcurrentHashMap();
+281
+282  /**
+283   * Map of encoded region names to the 
DataNode locations they should be hosted on
+284   * We store the value as 
InetSocketAddress since this is used only in HDFS
+285   * API (create() that takes favored 
nodes as hints for placing file blocks).
+286   * We could have used ServerName here 
as the value class, but we'd need to
+287   * convert it to InetSocketAddress at 
some point before the HDFS API call, and
+288   * it seems a bit weird to store 
ServerName since ServerName refers to RegionServers
+289   * and here we really mean DataNode 
locations.
+290   */
+291  protected final MapString, 
InetSocketAddress[] regionFavoredNodesMap =
+292  new ConcurrentHashMap();
+293
+294  // Leases
+295  protected Leases leases;
+296
+297  // Instance of the hbase executor 
executorService.
+298  protected ExecutorService 
executorService;
+299
+300  // If false, the file system has become 
unavailable
+301  protected volatile boolean fsOk;
+302  protected HFileSystem fs;
+303  protected HFileSystem walFs;
 304
-305  // Instance of the hbase executor 
executorService.
-306  protected ExecutorService 
executorService;
-307
-308  // If false, the file system has become 
unavailable
-309  protected volatile boolean fsOk;
-310  protected HFileSystem fs;
-311  protected HFileSystem walFs;
-312
-313  // Set when a report to the master 
comes back with a message asking us to
-314  // shutdown. Also set by call to stop 
when debugging or running unit tests
-315  // of HRegionServer in isolation.
-316  private volatile boolean stopped = 
false;
-317
-318  // Go down hard. Used if file system 
becomes unavailable and also in
-319  // debugging and unit tests.
-320  private volatile boolean 
abortRequested;
-321  public static final 

[48/51] [partial] hbase-site git commit: Published site at 8bf966c8e936dec4d83bcbe85c5aab543f14a0df.

2018-12-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/org/apache/hadoop/hbase/client/class-use/ClusterConnection.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/ClusterConnection.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/ClusterConnection.html
index 4da24d26..25fe0d6 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/ClusterConnection.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/ClusterConnection.html
@@ -120,10 +120,6 @@
 org.apache.hadoop.hbase.util
 
 
-
-org.apache.hadoop.hbase.zookeeper
-
-
 
 
 
@@ -794,49 +790,8 @@
 
 
 
-
-
-
-
-Uses of ClusterConnection in org.apache.hadoop.hbase.util
-
-Fields in org.apache.hadoop.hbase.util
 declared as ClusterConnection
-
-Modifier and Type
-Field and Description
-
-
-
-private ClusterConnection
-HBaseFsck.connection
-
-
-private ClusterConnection
-HBaseFsck.WorkItemRegion.connection
-
-
-
-
-Constructors in org.apache.hadoop.hbase.util
 with parameters of type ClusterConnection
-
-Constructor and Description
-
-
-
-WorkItemRegion(HBaseFsckhbck,
-  ServerNameinfo,
-  HBaseFsck.ErrorReportererrors,
-  ClusterConnectionconnection)
-
-
-
-
-
-
-
-Uses of ClusterConnection in org.apache.hadoop.hbase.zookeeper
 
-Methods in org.apache.hadoop.hbase.zookeeper
 with parameters of type ClusterConnection
+Methods in org.apache.hadoop.hbase.rsgroup
 with parameters of type ClusterConnection
 
 Modifier and Type
 Method and Description
@@ -844,12 +799,12 @@
 
 
 private static 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface
-MetaTableLocator.getCachedConnection(ClusterConnectionconnection,
+Utility.getCachedConnection(ClusterConnectionconnection,
ServerNamesn)
 
 
-private 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface
-MetaTableLocator.getMetaServerConnection(ClusterConnectionconnection,
+private static 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface
+Utility.getMetaServerConnection(ClusterConnectionconnection,
ZKWatcherzkw,
longtimeout,
intreplicaId)
@@ -858,16 +813,16 @@
 
 
 
-boolean
-MetaTableLocator.verifyMetaRegionLocation(ClusterConnectionhConnection,
+static boolean
+Utility.verifyMetaRegionLocation(ClusterConnectionhConnection,
 ZKWatcherzkw,
 longtimeout)
 Verify hbase:meta is deployed and 
accessible.
 
 
 
-boolean
-MetaTableLocator.verifyMetaRegionLocation(ClusterConnectionconnection,
+static boolean
+Utility.verifyMetaRegionLocation(ClusterConnectionconnection,
 ZKWatcherzkw,
 longtimeout,
 intreplicaId)
@@ -875,8 +830,8 @@
 
 
 
-private boolean
-MetaTableLocator.verifyRegionLocation(ClusterConnectionconnection,
+private static boolean
+Utility.verifyRegionLocation(ClusterConnectionconnection,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterfacehostingServer,
 ServerNameaddress,
 byte[]regionName)
@@ -887,6 +842,42 @@
 
 
 
+
+
+
+Uses of ClusterConnection in org.apache.hadoop.hbase.util
+
+Fields in org.apache.hadoop.hbase.util
 declared as ClusterConnection
+
+Modifier and Type
+Field and Description
+
+
+
+private ClusterConnection
+HBaseFsck.connection
+
+
+private ClusterConnection
+HBaseFsck.WorkItemRegion.connection
+
+
+
+
+Constructors in org.apache.hadoop.hbase.util
 with parameters of type ClusterConnection
+
+Constructor and Description
+
+
+
+WorkItemRegion(HBaseFsckhbck,
+  ServerNameinfo,
+  HBaseFsck.ErrorReportererrors,
+  ClusterConnectionconnection)
+
+
+
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfo.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfo.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfo.html
index 094abbe..4e44863 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfo.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfo.html
@@ -7046,28 +7046,28 @@ Input/OutputFormats, a table indexing MapReduce job, 
and utility methods.
 
 
 
-private https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfo
+private static https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfo
 

[29/51] [partial] hbase-site git commit: Published site at 8bf966c8e936dec4d83bcbe85c5aab543f14a0df.

2018-12-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterStatusServlet.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterStatusServlet.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterStatusServlet.html
index 98a70a6..8f8bcd8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterStatusServlet.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterStatusServlet.html
@@ -30,82 +30,78 @@
 022import java.util.List;
 023import java.util.Map;
 024import java.util.Set;
-025
-026import javax.servlet.http.HttpServlet;
-027import 
javax.servlet.http.HttpServletRequest;
-028import 
javax.servlet.http.HttpServletResponse;
-029
-030import 
org.apache.yetus.audience.InterfaceAudience;
-031import 
org.apache.hadoop.conf.Configuration;
-032import 
org.apache.hadoop.hbase.ServerName;
-033import 
org.apache.hadoop.hbase.tmpl.master.MasterStatusTmpl;
-034import 
org.apache.hadoop.hbase.util.FSUtils;
-035import 
org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-036
-037/**
-038 * The servlet responsible for rendering 
the index page of the
-039 * master.
-040 */
-041@InterfaceAudience.Private
-042public class MasterStatusServlet extends 
HttpServlet {
-043  private static final long 
serialVersionUID = 1L;
-044
-045  @Override
-046  public void doGet(HttpServletRequest 
request, HttpServletResponse response)
-047throws IOException
-048  {
-049HMaster master = (HMaster) 
getServletContext().getAttribute(HMaster.MASTER);
-050assert master != null : "No Master in 
context!";
+025import javax.servlet.http.HttpServlet;
+026import 
javax.servlet.http.HttpServletRequest;
+027import 
javax.servlet.http.HttpServletResponse;
+028import 
org.apache.hadoop.conf.Configuration;
+029import 
org.apache.hadoop.hbase.ServerName;
+030import 
org.apache.hadoop.hbase.tmpl.master.MasterStatusTmpl;
+031import 
org.apache.hadoop.hbase.util.FSUtils;
+032import 
org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
+033import 
org.apache.yetus.audience.InterfaceAudience;
+034
+035/**
+036 * The servlet responsible for rendering 
the index page of the
+037 * master.
+038 */
+039@InterfaceAudience.Private
+040public class MasterStatusServlet extends 
HttpServlet {
+041  private static final long 
serialVersionUID = 1L;
+042
+043  @Override
+044  public void doGet(HttpServletRequest 
request, HttpServletResponse response)
+045throws IOException
+046  {
+047HMaster master = (HMaster) 
getServletContext().getAttribute(HMaster.MASTER);
+048assert master != null : "No Master in 
context!";
+049
+050
response.setContentType("text/html");
 051
-052
response.setContentType("text/html");
+052Configuration conf = 
master.getConfiguration();
 053
-054Configuration conf = 
master.getConfiguration();
-055
-056MapString, Integer frags = 
getFragmentationInfo(master, conf);
-057ServerName metaLocation = null;
-058ListServerName servers = 
null;
-059SetServerName deadServers = 
null;
-060
-061if(master.isActiveMaster()) {
-062  metaLocation = 
getMetaLocationOrNull(master);
-063  ServerManager serverManager = 
master.getServerManager();
-064  if (serverManager != null) {
-065deadServers = 
serverManager.getDeadServers().copyServerNames();
-066servers = 
serverManager.getOnlineServersList();
-067  }
-068}
-069
-070MasterStatusTmpl tmpl = new 
MasterStatusTmpl()
-071  .setFrags(frags)
-072  .setMetaLocation(metaLocation)
-073  .setServers(servers)
-074  .setDeadServers(deadServers)
-075  
.setCatalogJanitorEnabled(master.isCatalogJanitorEnabled());
-076
-077if (request.getParameter("filter") != 
null)
-078  
tmpl.setFilter(request.getParameter("filter"));
-079if (request.getParameter("format") != 
null)
-080  
tmpl.setFormat(request.getParameter("format"));
-081tmpl.render(response.getWriter(), 
master);
-082  }
-083
-084  private ServerName 
getMetaLocationOrNull(HMaster master) {
-085MetaTableLocator metaTableLocator = 
master.getMetaTableLocator();
-086return metaTableLocator == null ? 
null :
-087  
metaTableLocator.getMetaRegionLocation(master.getZooKeeper());
-088  }
-089
-090  private MapString, Integer 
getFragmentationInfo(
-091  HMaster master, Configuration conf) 
throws IOException {
-092boolean showFragmentation = 
conf.getBoolean(
-093
"hbase.master.ui.fragmentation.enabled", false);
-094if (showFragmentation) {
-095  return 
FSUtils.getTableFragmentation(master);
-096} else {
-097  return null;
-098}
-099  }
-100}
+054MapString, Integer frags = 
getFragmentationInfo(master, conf);
+055ServerName metaLocation = null;
+056ListServerName servers = 
null;
+057SetServerName deadServers = 
null;
+058
+059if(master.isActiveMaster()) {
+060  

[26/51] [partial] hbase-site git commit: Published site at 8bf966c8e936dec4d83bcbe85c5aab543f14a0df.

2018-12-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html
index ea05301..26a93dd 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html
@@ -269,3590 +269,3574 @@
 261   */
 262  protected ClusterConnection 
clusterConnection;
 263
-264  /*
-265   * Long-living meta table locator, 
which is created when the server is started and stopped
-266   * when server shuts down. References 
to this locator shall be used to perform according
-267   * operations in EventHandlers. Primary 
reason for this decision is to make it mockable
-268   * for tests.
-269   */
-270  protected MetaTableLocator 
metaTableLocator;
-271
-272  /**
-273   * Go here to get table descriptors.
-274   */
-275  protected TableDescriptors 
tableDescriptors;
-276
-277  // Replication services. If no 
replication, this handler will be null.
-278  protected ReplicationSourceService 
replicationSourceHandler;
-279  protected ReplicationSinkService 
replicationSinkHandler;
-280
-281  // Compactions
-282  public CompactSplit 
compactSplitThread;
-283
-284  /**
-285   * Map of regions currently being 
served by this region server. Key is the
-286   * encoded region name.  All access 
should be synchronized.
-287   */
-288  protected final MapString, 
HRegion onlineRegions = new ConcurrentHashMap();
-289
-290  /**
-291   * Map of encoded region names to the 
DataNode locations they should be hosted on
-292   * We store the value as 
InetSocketAddress since this is used only in HDFS
-293   * API (create() that takes favored 
nodes as hints for placing file blocks).
-294   * We could have used ServerName here 
as the value class, but we'd need to
-295   * convert it to InetSocketAddress at 
some point before the HDFS API call, and
-296   * it seems a bit weird to store 
ServerName since ServerName refers to RegionServers
-297   * and here we really mean DataNode 
locations.
-298   */
-299  protected final MapString, 
InetSocketAddress[] regionFavoredNodesMap =
-300  new ConcurrentHashMap();
-301
-302  // Leases
-303  protected Leases leases;
+264  /**
+265   * Go here to get table descriptors.
+266   */
+267  protected TableDescriptors 
tableDescriptors;
+268
+269  // Replication services. If no 
replication, this handler will be null.
+270  protected ReplicationSourceService 
replicationSourceHandler;
+271  protected ReplicationSinkService 
replicationSinkHandler;
+272
+273  // Compactions
+274  public CompactSplit 
compactSplitThread;
+275
+276  /**
+277   * Map of regions currently being 
served by this region server. Key is the
+278   * encoded region name.  All access 
should be synchronized.
+279   */
+280  protected final MapString, 
HRegion onlineRegions = new ConcurrentHashMap();
+281
+282  /**
+283   * Map of encoded region names to the 
DataNode locations they should be hosted on
+284   * We store the value as 
InetSocketAddress since this is used only in HDFS
+285   * API (create() that takes favored 
nodes as hints for placing file blocks).
+286   * We could have used ServerName here 
as the value class, but we'd need to
+287   * convert it to InetSocketAddress at 
some point before the HDFS API call, and
+288   * it seems a bit weird to store 
ServerName since ServerName refers to RegionServers
+289   * and here we really mean DataNode 
locations.
+290   */
+291  protected final MapString, 
InetSocketAddress[] regionFavoredNodesMap =
+292  new ConcurrentHashMap();
+293
+294  // Leases
+295  protected Leases leases;
+296
+297  // Instance of the hbase executor 
executorService.
+298  protected ExecutorService 
executorService;
+299
+300  // If false, the file system has become 
unavailable
+301  protected volatile boolean fsOk;
+302  protected HFileSystem fs;
+303  protected HFileSystem walFs;
 304
-305  // Instance of the hbase executor 
executorService.
-306  protected ExecutorService 
executorService;
-307
-308  // If false, the file system has become 
unavailable
-309  protected volatile boolean fsOk;
-310  protected HFileSystem fs;
-311  protected HFileSystem walFs;
-312
-313  // Set when a report to the master 
comes back with a message asking us to
-314  // shutdown. Also set by call to stop 
when debugging or running unit tests
-315  // of HRegionServer in isolation.
-316  private volatile boolean stopped = 
false;
-317
-318  // Go down hard. Used if file system 
becomes unavailable and also in
-319  // debugging and unit tests.
-320  private volatile boolean 
abortRequested;
-321  public 

[22/51] [partial] hbase-site git commit: Published site at 8bf966c8e936dec4d83bcbe85c5aab543f14a0df.

2018-12-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.DummyServer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.DummyServer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.DummyServer.html
index 6f82cee..3bf3150 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.DummyServer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.DummyServer.html
@@ -40,162 +40,156 @@
 032import 
org.apache.hadoop.hbase.client.ClusterConnection;
 033import 
org.apache.hadoop.hbase.client.Connection;
 034import 
org.apache.hadoop.hbase.util.FSUtils;
-035import 
org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-036import 
org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-037import org.apache.hadoop.util.Tool;
-038import 
org.apache.hadoop.util.ToolRunner;
-039import 
org.apache.yetus.audience.InterfaceAudience;
-040
-041/**
-042 * In a scenario of Replication based 
Disaster/Recovery, when hbase Master-Cluster crashes, this
-043 * tool is used to sync-up the delta from 
Master to Slave using the info from ZooKeeper. The tool
-044 * will run on Master-Cluser, and assume 
ZK, Filesystem and NetWork still available after hbase
-045 * crashes
-046 *
-047 * pre
-048 * hbase 
org.apache.hadoop.hbase.replication.regionserver.ReplicationSyncUp
-049 * /pre
-050 */
-051@InterfaceAudience.Private
-052public class ReplicationSyncUp extends 
Configured implements Tool {
-053
-054  private static final long SLEEP_TIME = 
1;
-055
-056  /**
-057   * Main program
-058   */
-059  public static void main(String[] args) 
throws Exception {
-060int ret = 
ToolRunner.run(HBaseConfiguration.create(), new ReplicationSyncUp(), args);
-061System.exit(ret);
-062  }
-063
-064  @Override
-065  public int run(String[] args) throws 
Exception {
-066Abortable abortable = new Abortable() 
{
-067  @Override
-068  public void abort(String why, 
Throwable e) {
-069  }
-070
-071  @Override
-072  public boolean isAborted() {
-073return false;
-074  }
-075};
-076Configuration conf = getConf();
-077try (ZKWatcher zkw =
-078  new ZKWatcher(conf, 
"syncupReplication" + System.currentTimeMillis(), abortable, true)) {
-079  Path walRootDir = 
FSUtils.getWALRootDir(conf);
-080  FileSystem fs = 
FSUtils.getWALFileSystem(conf);
-081  Path oldLogDir = new 
Path(walRootDir, HConstants.HREGION_OLDLOGDIR_NAME);
-082  Path logDir = new Path(walRootDir, 
HConstants.HREGION_LOGDIR_NAME);
-083
-084  System.out.println("Start 
Replication Server start");
-085  Replication replication = new 
Replication();
-086  replication.initialize(new 
DummyServer(zkw), fs, logDir, oldLogDir, null);
-087  ReplicationSourceManager manager = 
replication.getReplicationManager();
-088  manager.init().get();
-089  while 
(manager.activeFailoverTaskCount()  0) {
-090Thread.sleep(SLEEP_TIME);
-091  }
-092  while 
(manager.getOldSources().size()  0) {
-093Thread.sleep(SLEEP_TIME);
-094  }
-095  manager.join();
-096} catch (InterruptedException e) {
-097  System.err.println("didn't wait 
long enough:" + e);
-098  return -1;
-099}
-100return 0;
-101  }
-102
-103  class DummyServer implements Server {
-104String hostname;
-105ZKWatcher zkw;
-106
-107DummyServer(ZKWatcher zkw) {
-108  // an unique name in case the first 
run fails
-109  hostname = 
System.currentTimeMillis() + ".SyncUpTool.replication.org";
-110  this.zkw = zkw;
-111}
-112
-113DummyServer(String hostname) {
-114  this.hostname = hostname;
-115}
-116
-117@Override
-118public Configuration 
getConfiguration() {
-119  return getConf();
-120}
-121
-122@Override
-123public ZKWatcher getZooKeeper() {
-124  return zkw;
-125}
-126
-127@Override
-128public CoordinatedStateManager 
getCoordinatedStateManager() {
-129  return null;
-130}
-131
-132@Override
-133public MetaTableLocator 
getMetaTableLocator() {
-134  return null;
-135}
-136
-137@Override
-138public ServerName getServerName() {
-139  return ServerName.valueOf(hostname, 
1234, 1L);
-140}
-141
-142@Override
-143public void abort(String why, 
Throwable e) {
-144}
-145
-146@Override
-147public boolean isAborted() {
-148  return false;
-149}
-150
-151@Override
-152public void stop(String why) {
-153}
-154
-155@Override
-156public boolean isStopped() {
-157  return false;
-158}
-159
-160@Override
-161public ClusterConnection 
getConnection() {
-162  return null;
-163}
-164
-165

[49/51] [partial] hbase-site git commit: Published site at 8bf966c8e936dec4d83bcbe85c5aab543f14a0df.

2018-12-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/checkstyle.rss
--
diff --git a/checkstyle.rss b/checkstyle.rss
index 8560113..195a4f2 100644
--- a/checkstyle.rss
+++ b/checkstyle.rss
@@ -25,8 +25,8 @@ under the License.
 en-us
 2007 - 2018 The Apache Software Foundation
 
-  File: 3816,
- Errors: 14791,
+  File: 3817,
+ Errors: 14743,
  Warnings: 0,
  Infos: 0
   
@@ -8651,7 +8651,7 @@ under the License.
   0
 
 
-  8
+  7
 
   
   
@@ -14727,7 +14727,7 @@ under the License.
   0
 
 
-  74
+  73
 
   
   
@@ -16449,7 +16449,7 @@ under the License.
   0
 
 
-  2
+  1
 
   
   
@@ -18717,7 +18717,7 @@ under the License.
   0
 
 
-  4
+  3
 
   
   
@@ -24359,7 +24359,7 @@ under the License.
   0
 
 
-  1
+  0
 
   
   
@@ -29082,6 +29082,20 @@ under the License.
   
   
 
+  http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.rsgroup.TestUtility.java;>org/apache/hadoop/hbase/rsgroup/TestUtility.java
+
+
+  0
+
+
+  0
+
+
+  0
+
+  
+  
+
   http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.regionserver.MetricsRegionAggregateSourceImpl.java;>org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSourceImpl.java
 
 
@@ -39927,7 +39941,7 @@ under the License.
   0
 
 
-  41
+  0
 
   
   
@@ -51911,7 +51925,7 @@ under the License.
   0
 
 
-  104
+  102
 
   
   

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/coc.html
--
diff --git a/coc.html b/coc.html
index 5f9e984..241b964 100644
--- a/coc.html
+++ b/coc.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  
   Code of Conduct Policy
@@ -385,7 +385,7 @@ email to mailto:priv...@hbase.apache.org;>the priv
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-12-03
+  Last Published: 
2018-12-05
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/dependencies.html
--
diff --git a/dependencies.html b/dependencies.html
index 5da6a60..0afcaff 100644
--- a/dependencies.html
+++ b/dependencies.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Project Dependencies
 
@@ -450,7 +450,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-12-03
+  Last Published: 
2018-12-05
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/dependency-convergence.html
--
diff --git a/dependency-convergence.html b/dependency-convergence.html
index d5278d1..ea79107 100644
--- a/dependency-convergence.html
+++ b/dependency-convergence.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Reactor Dependency Convergence
 
@@ -680,7 +680,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-12-03
+  Last Published: 
2018-12-05
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/dependency-info.html
--
diff --git a/dependency-info.html 

[47/51] [partial] hbase-site git commit: Published site at 8bf966c8e936dec4d83bcbe85c5aab543f14a0df.

2018-12-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/org/apache/hadoop/hbase/master/HMaster.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/master/HMaster.html 
b/devapidocs/org/apache/hadoop/hbase/master/HMaster.html
index 8080e27..dc3be8f 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/HMaster.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/HMaster.html
@@ -481,7 +481,7 @@ implements HRegionServer
-ABORT_TIMEOUT,
 ABORT_TIMEOUT_TASK,
 cacheConfig,
 cacheFlusher,
 clusterConnection,
 clusterId,
 clusterStatusTracker,
 compactSplitThread,
 conf, configurationManager,
 csm,
 executorService,
 fs,
 fsOk,
 fsUtilizationChore,
 hMemManager,
 infoServer,
 leases, lock,
 MASTER_HOSTNAME_KEY,
 metaTableLocator,
 movedRegions,
 msgInterval,
 numRegionsToReport,
 onlineRegions,
 regionFavoredNodesMap,
 REGIONSERVER, regionsInTransitionInRS,
 replicationSinkHandler,
 replicationSourceHandler,
 rpcServices,
 secureBulkLoadManager,
 serverName,
 sleeper,
 startcode, tableDescriptors,
 TEST_SKIP_REPORTING_TRANSITION,
 threadWakeFrequency,
 useThisHostnameInstead,
 walFactory,
 walFs,
 walRoller,
 zooKeeper
+ABORT_TIMEOUT,
 ABORT_TIMEOUT_TASK,
 cacheConfig,
 cacheFlusher,
 clusterConnection,
 clusterId,
 clusterStatusTracker,
 compactSplitThread,
 conf, configurationManager,
 csm,
 executorService,
 fs,
 fsOk,
 fsUtilizationChore,
 hMemManager,
 infoServer,
 leases, lock,
 MASTER_HOSTNAME_KEY,
 movedRegions,
 msgInterval,
 numRegionsToReport,
 onlineRegions,
 regionFavoredNodesMap,
 REGIONSERVER,
 regionsInTransitionInRS, 
replicationSinkHandler,
 replicationSourceHandler,
 rpcServices,
 secureBulkLoadManager,
 serverName,
 sleeper,
 startcode,
 tableDescriptors, TEST_SKIP_REPORTING_TRANSITION,
 threadWakeFrequency,
 useThisHostnameInstead,
 walFactory,
 walFs,
 walRoller,
 zooKeeper
 
 
 
@@ -1467,7 +1467,7 @@ implements HRegionServer
-abort,
 addRegion,
 addToMovedRegions,
 checkFileSystem,
 cleanMovedRegions,
 clearRegionBlockCache,
 closeAllRegions, href="../../../../../org/apache/hadoop/hbase/regionserver/HRegionServer.html#closeAndOfflineRegionForSplitOrMerge-java.util.List-">closeAndOfflineRegionForSplitOrMerge,
 > href="../../../../../org/apache/hadoop/hbase/regionserver/HRegionServer.html#closeRegion-java.lang.String-boolean-org.apache.hadoop.hbase.ServerName-">closeRegion,
 > href="../../../../../org/apache/hadoop/hbase/regionserver/HRegionServer.html#constructRegionServer-java.lang.Class-org.apache.hadoop.conf.Configuration-">constructRegionServer,
 > href="../../../../../org/apache/hadoop/hbase/regionserver/HRegionServer.html#convertThrowableToIOE-java.lang.Throwable-java.lang.String-">convertThrowableToIOE,
 > href="../../../../../org/apache/hadoop/hbase/regionserver/HRegionServer.html#createConnection-org.apache.hadoop.conf.Configuration-">createConnection,
 > href="../../../../../org/apache/hadoop/hbase/regionserver/HRegionServer.html#createRegionLoad-java.lang.String-">createRegionLoad,
  createRegionServerStatusStub,
 createRegionServerStatusStub,
 execRegionServerService,
 executeProcedure,
 getCacheConfig,
 getChoreServi
 ce, getClusterConnection,
 getClusterId,
 getCompactionPressure,
 getCompactionRequestor,
 getCompactSplitThread,
 getConfiguration,
 getConfigurationManager,
 getConnection, getCoordinatedStateManager,
 getEventLoopGroupConfig,
 getExecutorService,
 getFavoredNodesForRegion,
 getFileSystem,
 getFlushPressure,
 getFlushRequester,
 getFlushThroughputController,
 getFsTableDescriptors,
 getHeapMemoryManager,
 getInfoServer,
 getLastSequenceId,
 getLeases,
 getMasterAddressTracker,
 getMetaTableLocator,
 getMetrics,
 getMostLoadedRegions,
 getNonceManager,
 getNumberOfOnlineRegions,
 getOnlineRegion,
 getOnlineRegionsLocalContext,
 getOnlineTables,
 getRegion,
 getRegion,
 getRegionBlockLocations,
 getRegionByEncodedName,
 getRegionByEncodedName,
 getRegions,
 getRegions,
 getRegionServerAccounting,
 getRegionServerCoprocessorHost,
 getRegionServerCoprocessors,
 getRegionServerMetrics,
 getRegionServerRpcQuotaManager,
 getRegionServerSpaceQuotaManager,
 getRegionsInTransitionInRS,
 getReplicationSinkService,
 getReplicationSourceService,
 getRootDir,
 getRpcServer,
 getRSRpcServices,
 getSecureBulkLoadManager,
 getStartcode,
 getTableDescriptors,
 getThreadWakeFrequency,
 getWAL,
 getWALFileSystem,
 getWalGroupsReplicationStatus,
 getWalRoller,
 getWALRootDir,
 getWALs,
 handleReportForDutyResponse,
 initializeMemStoreChunkCreator,
 isAborted,
 isClusterUp,
 isOnline,
 isShutDown,
 isStopped,
 isStopping,
 kill,
 mo
 vedRegionCleanerPeriod, onConfigurationChange,
 postOpenDeployTasks,
 regionLock,
 remoteProcedureComplete,
 removeRegion,
 reportFileArchivalForQuotas,
 reportRegionSizesForQuotas,
 reportRegionStateTransition,
 sendShutdownInterrupt,
 setupClusterConnection,
 stop,
 toString,
 tryRegionServerReport,
 unassign,
 updateConfiguration,
 

[42/51] [partial] hbase-site git commit: Published site at 8bf966c8e936dec4d83bcbe85c5aab543f14a0df.

2018-12-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/org/apache/hadoop/hbase/regionserver/RegionServerServices.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/RegionServerServices.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/RegionServerServices.html
index 5549ccc..399a075 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/RegionServerServices.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/RegionServerServices.html
@@ -304,7 +304,7 @@ extends Server
-createConnection,
 getChoreService,
 getClusterConnection,
 getConfiguration,
 getConnection,
 getCoordinatedStateManager,
 getFileSystem,
 getMetaTableLocator,
 getServerName,
 getZooKeeper, isStopping
+createConnection,
 getChoreService,
 getClusterConnection,
 getConfiguration,
 getConnection,
 getCoordinatedStateManager,
 getFileSystem,
 getServerName,
 getZooKeeper,
 isStopping
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
index 52bcb5f..39721a5 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
@@ -716,20 +716,20 @@
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.regionserver.ChunkCreator.ChunkType
-org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactoryImpl.FactoryStorage
-org.apache.hadoop.hbase.regionserver.TimeRangeTracker.Type
+org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope
 org.apache.hadoop.hbase.regionserver.MemStoreCompactionStrategy.Action
-org.apache.hadoop.hbase.regionserver.Region.Operation
-org.apache.hadoop.hbase.regionserver.FlushType
+org.apache.hadoop.hbase.regionserver.HRegion.FlushResult.Result
+org.apache.hadoop.hbase.regionserver.DefaultHeapMemoryTuner.StepDirection
 org.apache.hadoop.hbase.regionserver.SplitLogWorker.TaskExecutor.Status
-org.apache.hadoop.hbase.regionserver.CompactingMemStore.IndexType
 org.apache.hadoop.hbase.regionserver.BloomType
-org.apache.hadoop.hbase.regionserver.DefaultHeapMemoryTuner.StepDirection
-org.apache.hadoop.hbase.regionserver.ScanType
+org.apache.hadoop.hbase.regionserver.ChunkCreator.ChunkType
+org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactoryImpl.FactoryStorage
 org.apache.hadoop.hbase.regionserver.ScannerContext.NextState
-org.apache.hadoop.hbase.regionserver.HRegion.FlushResult.Result
-org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope
+org.apache.hadoop.hbase.regionserver.CompactingMemStore.IndexType
+org.apache.hadoop.hbase.regionserver.ScanType
+org.apache.hadoop.hbase.regionserver.Region.Operation
+org.apache.hadoop.hbase.regionserver.FlushType
+org.apache.hadoop.hbase.regionserver.TimeRangeTracker.Type
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
index 2731576..3bd22b5 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
@@ -131,8 +131,8 @@
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
 org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher.MatchCode
-org.apache.hadoop.hbase.regionserver.querymatcher.DeleteTracker.DeleteResult
 org.apache.hadoop.hbase.regionserver.querymatcher.StripeCompactionScanQueryMatcher.DropDeletesInOutput
+org.apache.hadoop.hbase.regionserver.querymatcher.DeleteTracker.DeleteResult
 
 
 


[37/51] [partial] hbase-site git commit: Published site at 8bf966c8e936dec4d83bcbe85c5aab543f14a0df.

2018-12-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/org/apache/hadoop/hbase/util/class-use/Pair.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/util/class-use/Pair.html 
b/devapidocs/org/apache/hadoop/hbase/util/class-use/Pair.html
index 397f886..da375bf 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/class-use/Pair.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/class-use/Pair.html
@@ -2077,11 +2077,11 @@ Input/OutputFormats, a table indexing MapReduce job, 
and utility methods.
 
 
 
-https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListPairRegionInfo,ServerName
+static https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListPairRegionInfo,ServerName
 MetaTableLocator.getMetaRegionsAndLocations(ZKWatcherzkw)
 
 
-https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListPairRegionInfo,ServerName
+static https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListPairRegionInfo,ServerName
 MetaTableLocator.getMetaRegionsAndLocations(ZKWatcherzkw,
   intreplicaId)
 Gets the meta regions and their locations for the given 
path and replica ID.
@@ -2097,7 +2097,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 
-private https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfo
+private static https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfo
 MetaTableLocator.getListOfRegionInfos(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListPairRegionInfo,ServerNamepairs)
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/org/apache/hadoop/hbase/util/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/util/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/util/package-tree.html
index 0661cfe..f9a1d76 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/package-tree.html
@@ -540,14 +540,14 @@
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
+org.apache.hadoop.hbase.util.Bytes.LexicographicalComparerHolder.UnsafeComparer
 (implements org.apache.hadoop.hbase.util.Bytes.ComparerT)
+org.apache.hadoop.hbase.util.ChecksumType
+org.apache.hadoop.hbase.util.PrettyPrinter.Unit
 org.apache.hadoop.hbase.util.Bytes.LexicographicalComparerHolder.PureJavaComparer
 (implements org.apache.hadoop.hbase.util.Bytes.ComparerT)
+org.apache.hadoop.hbase.util.Order
 org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter.ERROR_CODE
-org.apache.hadoop.hbase.util.ChecksumType
 org.apache.hadoop.hbase.util.IdReadWriteLock.ReferenceType
-org.apache.hadoop.hbase.util.Bytes.LexicographicalComparerHolder.UnsafeComparer
 (implements org.apache.hadoop.hbase.util.Bytes.ComparerT)
 org.apache.hadoop.hbase.util.PoolMap.PoolType
-org.apache.hadoop.hbase.util.PrettyPrinter.Unit
-org.apache.hadoop.hbase.util.Order
 
 
 



[43/51] [partial] hbase-site git commit: Published site at 8bf966c8e936dec4d83bcbe85c5aab543f14a0df.

2018-12-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.html
index 8d1bfab..cd14215 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":9,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":9,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":9,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i109
 
":10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":9,"i132":10,"i133":9,"i134":10,"i135":10,"i136":10,"i137":10,"i138":10,"i139":10,"i140":10,"i141":10,"i142":10,"i143":10,"i144":10,"i145":10,"i146":10,"i147":10,"i148":10,"i149":10,"i150":10,"i151":10,"i152":9,"i153":10,"i154":9,"i155":10,"i156":10,"i157":10,"i158":10,"i159":10,"i160":10,"i161":10,"i162":10,"i163":10,"i164":10,"i165":10,"i166":10,"i167":10,"i168":10,"i169":10,"i170":10,"i171":10,"i172":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":9,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":9,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":9,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i109
 
":10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":9,"i131":10,"i132":9,"i133":10,"i134":10,"i135":10,"i136":10,"i137":10,"i138":10,"i139":10,"i140":10,"i141":10,"i142":10,"i143":10,"i144":10,"i145":10,"i146":10,"i147":10,"i148":10,"i149":10,"i150":10,"i151":9,"i152":10,"i153":9,"i154":10,"i155":10,"i156":10,"i157":10,"i158":10,"i159":10,"i160":10,"i161":10,"i162":10,"i163":10,"i164":10,"i165":10,"i166":10,"i167":10,"i168":10,"i169":10,"i170":10,"i171":10};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -351,82 +351,78 @@ implements MASTERLESS_CONFIG_NAME
 
 
-protected MetaTableLocator
-metaTableLocator
-
-
 (package private) MetricsRegionServer
 metricsRegionServer
 
-
+
 (package private) MetricsTable
 metricsTable
 
-
+
 (package private) MobCacheConfig
 mobCacheConfig
 
-
+
 protected https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,HRegionServer.MovedRegionInfo
 movedRegions
 
-
+
 private HRegionServer.MovedRegionsCleaner
 movedRegionsCleaner
 Chore to clean periodically the moved region list
 
 
-
+
 protected int
 msgInterval
 
-
+
 (package private) ServerNonceManager
 nonceManager
 Nonce manager.
 
 
-
+
 private ScheduledChore
 nonceManagerChore
 The nonce manager chore.
 
 
-
+
 protected int
 numRegionsToReport
 
-
+
 (package private) int
 

[25/51] [partial] hbase-site git commit: Published site at 8bf966c8e936dec4d83bcbe85c5aab543f14a0df.

2018-12-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemStoreFlusher.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemStoreFlusher.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemStoreFlusher.html
index ea05301..26a93dd 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemStoreFlusher.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemStoreFlusher.html
@@ -269,3590 +269,3574 @@
 261   */
 262  protected ClusterConnection 
clusterConnection;
 263
-264  /*
-265   * Long-living meta table locator, 
which is created when the server is started and stopped
-266   * when server shuts down. References 
to this locator shall be used to perform according
-267   * operations in EventHandlers. Primary 
reason for this decision is to make it mockable
-268   * for tests.
-269   */
-270  protected MetaTableLocator 
metaTableLocator;
-271
-272  /**
-273   * Go here to get table descriptors.
-274   */
-275  protected TableDescriptors 
tableDescriptors;
-276
-277  // Replication services. If no 
replication, this handler will be null.
-278  protected ReplicationSourceService 
replicationSourceHandler;
-279  protected ReplicationSinkService 
replicationSinkHandler;
-280
-281  // Compactions
-282  public CompactSplit 
compactSplitThread;
-283
-284  /**
-285   * Map of regions currently being 
served by this region server. Key is the
-286   * encoded region name.  All access 
should be synchronized.
-287   */
-288  protected final MapString, 
HRegion onlineRegions = new ConcurrentHashMap();
-289
-290  /**
-291   * Map of encoded region names to the 
DataNode locations they should be hosted on
-292   * We store the value as 
InetSocketAddress since this is used only in HDFS
-293   * API (create() that takes favored 
nodes as hints for placing file blocks).
-294   * We could have used ServerName here 
as the value class, but we'd need to
-295   * convert it to InetSocketAddress at 
some point before the HDFS API call, and
-296   * it seems a bit weird to store 
ServerName since ServerName refers to RegionServers
-297   * and here we really mean DataNode 
locations.
-298   */
-299  protected final MapString, 
InetSocketAddress[] regionFavoredNodesMap =
-300  new ConcurrentHashMap();
-301
-302  // Leases
-303  protected Leases leases;
+264  /**
+265   * Go here to get table descriptors.
+266   */
+267  protected TableDescriptors 
tableDescriptors;
+268
+269  // Replication services. If no 
replication, this handler will be null.
+270  protected ReplicationSourceService 
replicationSourceHandler;
+271  protected ReplicationSinkService 
replicationSinkHandler;
+272
+273  // Compactions
+274  public CompactSplit 
compactSplitThread;
+275
+276  /**
+277   * Map of regions currently being 
served by this region server. Key is the
+278   * encoded region name.  All access 
should be synchronized.
+279   */
+280  protected final MapString, 
HRegion onlineRegions = new ConcurrentHashMap();
+281
+282  /**
+283   * Map of encoded region names to the 
DataNode locations they should be hosted on
+284   * We store the value as 
InetSocketAddress since this is used only in HDFS
+285   * API (create() that takes favored 
nodes as hints for placing file blocks).
+286   * We could have used ServerName here 
as the value class, but we'd need to
+287   * convert it to InetSocketAddress at 
some point before the HDFS API call, and
+288   * it seems a bit weird to store 
ServerName since ServerName refers to RegionServers
+289   * and here we really mean DataNode 
locations.
+290   */
+291  protected final MapString, 
InetSocketAddress[] regionFavoredNodesMap =
+292  new ConcurrentHashMap();
+293
+294  // Leases
+295  protected Leases leases;
+296
+297  // Instance of the hbase executor 
executorService.
+298  protected ExecutorService 
executorService;
+299
+300  // If false, the file system has become 
unavailable
+301  protected volatile boolean fsOk;
+302  protected HFileSystem fs;
+303  protected HFileSystem walFs;
 304
-305  // Instance of the hbase executor 
executorService.
-306  protected ExecutorService 
executorService;
-307
-308  // If false, the file system has become 
unavailable
-309  protected volatile boolean fsOk;
-310  protected HFileSystem fs;
-311  protected HFileSystem walFs;
-312
-313  // Set when a report to the master 
comes back with a message asking us to
-314  // shutdown. Also set by call to stop 
when debugging or running unit tests
-315  // of HRegionServer in isolation.
-316  private volatile boolean stopped = 
false;
-317
-318  // Go down hard. Used if file system 
becomes unavailable and also in
-319  // debugging and unit tests.
-320  private volatile boolean 

[18/51] [partial] hbase-site git commit: Published site at 8bf966c8e936dec4d83bcbe85c5aab543f14a0df.

2018-12-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html
index a957d31..62f81b6 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html
@@ -142,5192 +142,5186 @@
 134import org.apache.hadoop.hbase.wal.WAL;
 135import 
org.apache.hadoop.hbase.wal.WALFactory;
 136import 
org.apache.hadoop.hbase.wal.WALSplitter;
-137import 
org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-138import 
org.apache.hadoop.hbase.zookeeper.ZKUtil;
-139import 
org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-140import 
org.apache.hadoop.hbase.zookeeper.ZNodePaths;
-141import 
org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
-142import 
org.apache.hadoop.ipc.RemoteException;
-143import 
org.apache.hadoop.security.UserGroupInformation;
-144import 
org.apache.hadoop.util.ReflectionUtils;
-145import org.apache.hadoop.util.Tool;
-146import 
org.apache.hadoop.util.ToolRunner;
-147import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-148import 
org.apache.hbase.thirdparty.com.google.common.collect.Sets;
-149import 
org.apache.yetus.audience.InterfaceAudience;
-150import 
org.apache.yetus.audience.InterfaceStability;
-151import 
org.apache.zookeeper.KeeperException;
-152import org.slf4j.Logger;
-153import org.slf4j.LoggerFactory;
-154
-155import 
org.apache.hbase.thirdparty.com.google.common.base.Joiner;
-156import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-157import 
org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;
-158import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
-159import 
org.apache.hbase.thirdparty.com.google.common.collect.Multimap;
-160import 
org.apache.hbase.thirdparty.com.google.common.collect.Ordering;
-161import 
org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;
-162
-163import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-164import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;
-165
-166/**
-167 * HBaseFsck (hbck) is a tool for 
checking and repairing region consistency and
-168 * table integrity problems in a 
corrupted HBase. This tool was written for hbase-1.x. It does not
-169 * work with hbase-2.x; it can read state 
but is not allowed to change state; i.e. effect 'repair'.
-170 * See hbck2 (HBASE-19121) for a hbck 
tool for hbase2.
-171 *
-172 * p
-173 * Region consistency checks verify that 
hbase:meta, region deployment on region
-174 * servers and the state of data in HDFS 
(.regioninfo files) all are in
-175 * accordance.
-176 * p
-177 * Table integrity checks verify that all 
possible row keys resolve to exactly
-178 * one region of a table.  This means 
there are no individual degenerate
-179 * or backwards regions; no holes between 
regions; and that there are no
-180 * overlapping regions.
-181 * p
-182 * The general repair strategy works in 
two phases:
-183 * ol
-184 * li Repair Table Integrity on 
HDFS. (merge or fabricate regions)
-185 * li Repair Region Consistency 
with hbase:meta and assignments
-186 * /ol
-187 * p
-188 * For table integrity repairs, the 
tables' region directories are scanned
-189 * for .regioninfo files.  Each table's 
integrity is then verified.  If there
-190 * are any orphan regions (regions with 
no .regioninfo files) or holes, new
-191 * regions are fabricated.  Backwards 
regions are sidelined as well as empty
-192 * degenerate (endkey==startkey) regions. 
 If there are any overlapping regions,
-193 * a new region is created and all data 
is merged into the new region.
-194 * p
-195 * Table integrity repairs deal solely 
with HDFS and could potentially be done
-196 * offline -- the hbase region servers or 
master do not need to be running.
-197 * This phase can eventually be used to 
completely reconstruct the hbase:meta table in
-198 * an offline fashion.
-199 * p
-200 * Region consistency requires three 
conditions -- 1) valid .regioninfo file
-201 * present in an HDFS region dir,  2) 
valid row with .regioninfo data in META,
-202 * and 3) a region is deployed only at 
the regionserver that was assigned to
-203 * with proper state in the master.
-204 * p
-205 * Region consistency repairs require 
hbase to be online so that hbck can
-206 * contact the HBase master and region 
servers.  The hbck#connect() method must
-207 * first be called successfully.  Much of 
the region consistency information
-208 * is transient and less risky to 
repair.
-209 * p
-210 * If hbck is run from the command line, 

[05/51] [partial] hbase-site git commit: Published site at 8bf966c8e936dec4d83bcbe85c5aab543f14a0df.

2018-12-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html
index a957d31..62f81b6 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html
@@ -142,5192 +142,5186 @@
 134import org.apache.hadoop.hbase.wal.WAL;
 135import 
org.apache.hadoop.hbase.wal.WALFactory;
 136import 
org.apache.hadoop.hbase.wal.WALSplitter;
-137import 
org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-138import 
org.apache.hadoop.hbase.zookeeper.ZKUtil;
-139import 
org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-140import 
org.apache.hadoop.hbase.zookeeper.ZNodePaths;
-141import 
org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
-142import 
org.apache.hadoop.ipc.RemoteException;
-143import 
org.apache.hadoop.security.UserGroupInformation;
-144import 
org.apache.hadoop.util.ReflectionUtils;
-145import org.apache.hadoop.util.Tool;
-146import 
org.apache.hadoop.util.ToolRunner;
-147import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-148import 
org.apache.hbase.thirdparty.com.google.common.collect.Sets;
-149import 
org.apache.yetus.audience.InterfaceAudience;
-150import 
org.apache.yetus.audience.InterfaceStability;
-151import 
org.apache.zookeeper.KeeperException;
-152import org.slf4j.Logger;
-153import org.slf4j.LoggerFactory;
-154
-155import 
org.apache.hbase.thirdparty.com.google.common.base.Joiner;
-156import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-157import 
org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;
-158import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
-159import 
org.apache.hbase.thirdparty.com.google.common.collect.Multimap;
-160import 
org.apache.hbase.thirdparty.com.google.common.collect.Ordering;
-161import 
org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;
-162
-163import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-164import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;
-165
-166/**
-167 * HBaseFsck (hbck) is a tool for 
checking and repairing region consistency and
-168 * table integrity problems in a 
corrupted HBase. This tool was written for hbase-1.x. It does not
-169 * work with hbase-2.x; it can read state 
but is not allowed to change state; i.e. effect 'repair'.
-170 * See hbck2 (HBASE-19121) for a hbck 
tool for hbase2.
-171 *
-172 * p
-173 * Region consistency checks verify that 
hbase:meta, region deployment on region
-174 * servers and the state of data in HDFS 
(.regioninfo files) all are in
-175 * accordance.
-176 * p
-177 * Table integrity checks verify that all 
possible row keys resolve to exactly
-178 * one region of a table.  This means 
there are no individual degenerate
-179 * or backwards regions; no holes between 
regions; and that there are no
-180 * overlapping regions.
-181 * p
-182 * The general repair strategy works in 
two phases:
-183 * ol
-184 * li Repair Table Integrity on 
HDFS. (merge or fabricate regions)
-185 * li Repair Region Consistency 
with hbase:meta and assignments
-186 * /ol
-187 * p
-188 * For table integrity repairs, the 
tables' region directories are scanned
-189 * for .regioninfo files.  Each table's 
integrity is then verified.  If there
-190 * are any orphan regions (regions with 
no .regioninfo files) or holes, new
-191 * regions are fabricated.  Backwards 
regions are sidelined as well as empty
-192 * degenerate (endkey==startkey) regions. 
 If there are any overlapping regions,
-193 * a new region is created and all data 
is merged into the new region.
-194 * p
-195 * Table integrity repairs deal solely 
with HDFS and could potentially be done
-196 * offline -- the hbase region servers or 
master do not need to be running.
-197 * This phase can eventually be used to 
completely reconstruct the hbase:meta table in
-198 * an offline fashion.
-199 * p
-200 * Region consistency requires three 
conditions -- 1) valid .regioninfo file
-201 * present in an HDFS region dir,  2) 
valid row with .regioninfo data in META,
-202 * and 3) a region is deployed only at 
the regionserver that was assigned to
-203 * with proper state in the master.
-204 * p
-205 * Region consistency repairs require 
hbase to be online so that hbck can
-206 * contact the HBase master and region 
servers.  The hbck#connect() method must
-207 * first be called successfully.  Much of 
the region consistency information
-208 * is transient and less risky to 
repair.
-209 * p
-210 * If hbck is run from the command line, 
there are a handful of arguments that
-211 * can be used to limit the kinds of 

[02/51] [partial] hbase-site git commit: Published site at 8bf966c8e936dec4d83bcbe85c5aab543f14a0df.

2018-12-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemOverlapMerge.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemOverlapMerge.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemOverlapMerge.html
index a957d31..62f81b6 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemOverlapMerge.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemOverlapMerge.html
@@ -142,5192 +142,5186 @@
 134import org.apache.hadoop.hbase.wal.WAL;
 135import 
org.apache.hadoop.hbase.wal.WALFactory;
 136import 
org.apache.hadoop.hbase.wal.WALSplitter;
-137import 
org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-138import 
org.apache.hadoop.hbase.zookeeper.ZKUtil;
-139import 
org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-140import 
org.apache.hadoop.hbase.zookeeper.ZNodePaths;
-141import 
org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
-142import 
org.apache.hadoop.ipc.RemoteException;
-143import 
org.apache.hadoop.security.UserGroupInformation;
-144import 
org.apache.hadoop.util.ReflectionUtils;
-145import org.apache.hadoop.util.Tool;
-146import 
org.apache.hadoop.util.ToolRunner;
-147import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-148import 
org.apache.hbase.thirdparty.com.google.common.collect.Sets;
-149import 
org.apache.yetus.audience.InterfaceAudience;
-150import 
org.apache.yetus.audience.InterfaceStability;
-151import 
org.apache.zookeeper.KeeperException;
-152import org.slf4j.Logger;
-153import org.slf4j.LoggerFactory;
-154
-155import 
org.apache.hbase.thirdparty.com.google.common.base.Joiner;
-156import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-157import 
org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;
-158import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
-159import 
org.apache.hbase.thirdparty.com.google.common.collect.Multimap;
-160import 
org.apache.hbase.thirdparty.com.google.common.collect.Ordering;
-161import 
org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;
-162
-163import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-164import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;
-165
-166/**
-167 * HBaseFsck (hbck) is a tool for 
checking and repairing region consistency and
-168 * table integrity problems in a 
corrupted HBase. This tool was written for hbase-1.x. It does not
-169 * work with hbase-2.x; it can read state 
but is not allowed to change state; i.e. effect 'repair'.
-170 * See hbck2 (HBASE-19121) for a hbck 
tool for hbase2.
-171 *
-172 * p
-173 * Region consistency checks verify that 
hbase:meta, region deployment on region
-174 * servers and the state of data in HDFS 
(.regioninfo files) all are in
-175 * accordance.
-176 * p
-177 * Table integrity checks verify that all 
possible row keys resolve to exactly
-178 * one region of a table.  This means 
there are no individual degenerate
-179 * or backwards regions; no holes between 
regions; and that there are no
-180 * overlapping regions.
-181 * p
-182 * The general repair strategy works in 
two phases:
-183 * ol
-184 * li Repair Table Integrity on 
HDFS. (merge or fabricate regions)
-185 * li Repair Region Consistency 
with hbase:meta and assignments
-186 * /ol
-187 * p
-188 * For table integrity repairs, the 
tables' region directories are scanned
-189 * for .regioninfo files.  Each table's 
integrity is then verified.  If there
-190 * are any orphan regions (regions with 
no .regioninfo files) or holes, new
-191 * regions are fabricated.  Backwards 
regions are sidelined as well as empty
-192 * degenerate (endkey==startkey) regions. 
 If there are any overlapping regions,
-193 * a new region is created and all data 
is merged into the new region.
-194 * p
-195 * Table integrity repairs deal solely 
with HDFS and could potentially be done
-196 * offline -- the hbase region servers or 
master do not need to be running.
-197 * This phase can eventually be used to 
completely reconstruct the hbase:meta table in
-198 * an offline fashion.
-199 * p
-200 * Region consistency requires three 
conditions -- 1) valid .regioninfo file
-201 * present in an HDFS region dir,  2) 
valid row with .regioninfo data in META,
-202 * and 3) a region is deployed only at 
the regionserver that was assigned to
-203 * with proper state in the master.
-204 * p
-205 * Region consistency repairs require 
hbase to be online so that hbck can
-206 * contact the HBase master and region 
servers.  The hbck#connect() method must
-207 * first be called successfully.  Much of 
the region consistency information
-208 * is transient and less risky to 
repair.
-209 * p
-210 * If hbck is run from the command line, 
there are a handful 

[01/51] [partial] hbase-site git commit: Published site at 8bf966c8e936dec4d83bcbe85c5aab543f14a0df.

2018-12-05 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site e46798831 -> 275553168


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html
index a957d31..62f81b6 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html
@@ -142,5192 +142,5186 @@
 134import org.apache.hadoop.hbase.wal.WAL;
 135import 
org.apache.hadoop.hbase.wal.WALFactory;
 136import 
org.apache.hadoop.hbase.wal.WALSplitter;
-137import 
org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-138import 
org.apache.hadoop.hbase.zookeeper.ZKUtil;
-139import 
org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-140import 
org.apache.hadoop.hbase.zookeeper.ZNodePaths;
-141import 
org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
-142import 
org.apache.hadoop.ipc.RemoteException;
-143import 
org.apache.hadoop.security.UserGroupInformation;
-144import 
org.apache.hadoop.util.ReflectionUtils;
-145import org.apache.hadoop.util.Tool;
-146import 
org.apache.hadoop.util.ToolRunner;
-147import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-148import 
org.apache.hbase.thirdparty.com.google.common.collect.Sets;
-149import 
org.apache.yetus.audience.InterfaceAudience;
-150import 
org.apache.yetus.audience.InterfaceStability;
-151import 
org.apache.zookeeper.KeeperException;
-152import org.slf4j.Logger;
-153import org.slf4j.LoggerFactory;
-154
-155import 
org.apache.hbase.thirdparty.com.google.common.base.Joiner;
-156import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-157import 
org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;
-158import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
-159import 
org.apache.hbase.thirdparty.com.google.common.collect.Multimap;
-160import 
org.apache.hbase.thirdparty.com.google.common.collect.Ordering;
-161import 
org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;
-162
-163import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-164import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;
-165
-166/**
-167 * HBaseFsck (hbck) is a tool for 
checking and repairing region consistency and
-168 * table integrity problems in a 
corrupted HBase. This tool was written for hbase-1.x. It does not
-169 * work with hbase-2.x; it can read state 
but is not allowed to change state; i.e. effect 'repair'.
-170 * See hbck2 (HBASE-19121) for a hbck 
tool for hbase2.
-171 *
-172 * p
-173 * Region consistency checks verify that 
hbase:meta, region deployment on region
-174 * servers and the state of data in HDFS 
(.regioninfo files) all are in
-175 * accordance.
-176 * p
-177 * Table integrity checks verify that all 
possible row keys resolve to exactly
-178 * one region of a table.  This means 
there are no individual degenerate
-179 * or backwards regions; no holes between 
regions; and that there are no
-180 * overlapping regions.
-181 * p
-182 * The general repair strategy works in 
two phases:
-183 * ol
-184 * li Repair Table Integrity on 
HDFS. (merge or fabricate regions)
-185 * li Repair Region Consistency 
with hbase:meta and assignments
-186 * /ol
-187 * p
-188 * For table integrity repairs, the 
tables' region directories are scanned
-189 * for .regioninfo files.  Each table's 
integrity is then verified.  If there
-190 * are any orphan regions (regions with 
no .regioninfo files) or holes, new
-191 * regions are fabricated.  Backwards 
regions are sidelined as well as empty
-192 * degenerate (endkey==startkey) regions. 
 If there are any overlapping regions,
-193 * a new region is created and all data 
is merged into the new region.
-194 * p
-195 * Table integrity repairs deal solely 
with HDFS and could potentially be done
-196 * offline -- the hbase region servers or 
master do not need to be running.
-197 * This phase can eventually be used to 
completely reconstruct the hbase:meta table in
-198 * an offline fashion.
-199 * p
-200 * Region consistency requires three 
conditions -- 1) valid .regioninfo file
-201 * present in an HDFS region dir,  2) 
valid row with .regioninfo data in META,
-202 * and 3) a region is deployed only at 
the regionserver that was assigned to
-203 * with proper state in the master.
-204 * p
-205 * Region consistency repairs require 
hbase to be online so that hbck can
-206 * contact the HBase master and region 
servers.  The hbck#connect() method must
-207 * first be called successfully.  Much of 
the region consistency information
-208 * is transient and less risky to 
repair.
-209 * p
-210 * If 

[15/51] [partial] hbase-site git commit: Published site at 8bf966c8e936dec4d83bcbe85c5aab543f14a0df.

2018-12-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HBaseFsckTool.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HBaseFsckTool.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HBaseFsckTool.html
index a957d31..62f81b6 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HBaseFsckTool.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HBaseFsckTool.html
@@ -142,5192 +142,5186 @@
 134import org.apache.hadoop.hbase.wal.WAL;
 135import 
org.apache.hadoop.hbase.wal.WALFactory;
 136import 
org.apache.hadoop.hbase.wal.WALSplitter;
-137import 
org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-138import 
org.apache.hadoop.hbase.zookeeper.ZKUtil;
-139import 
org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-140import 
org.apache.hadoop.hbase.zookeeper.ZNodePaths;
-141import 
org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
-142import 
org.apache.hadoop.ipc.RemoteException;
-143import 
org.apache.hadoop.security.UserGroupInformation;
-144import 
org.apache.hadoop.util.ReflectionUtils;
-145import org.apache.hadoop.util.Tool;
-146import 
org.apache.hadoop.util.ToolRunner;
-147import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-148import 
org.apache.hbase.thirdparty.com.google.common.collect.Sets;
-149import 
org.apache.yetus.audience.InterfaceAudience;
-150import 
org.apache.yetus.audience.InterfaceStability;
-151import 
org.apache.zookeeper.KeeperException;
-152import org.slf4j.Logger;
-153import org.slf4j.LoggerFactory;
-154
-155import 
org.apache.hbase.thirdparty.com.google.common.base.Joiner;
-156import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-157import 
org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;
-158import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
-159import 
org.apache.hbase.thirdparty.com.google.common.collect.Multimap;
-160import 
org.apache.hbase.thirdparty.com.google.common.collect.Ordering;
-161import 
org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;
-162
-163import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-164import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;
-165
-166/**
-167 * HBaseFsck (hbck) is a tool for 
checking and repairing region consistency and
-168 * table integrity problems in a 
corrupted HBase. This tool was written for hbase-1.x. It does not
-169 * work with hbase-2.x; it can read state 
but is not allowed to change state; i.e. effect 'repair'.
-170 * See hbck2 (HBASE-19121) for a hbck 
tool for hbase2.
-171 *
-172 * p
-173 * Region consistency checks verify that 
hbase:meta, region deployment on region
-174 * servers and the state of data in HDFS 
(.regioninfo files) all are in
-175 * accordance.
-176 * p
-177 * Table integrity checks verify that all 
possible row keys resolve to exactly
-178 * one region of a table.  This means 
there are no individual degenerate
-179 * or backwards regions; no holes between 
regions; and that there are no
-180 * overlapping regions.
-181 * p
-182 * The general repair strategy works in 
two phases:
-183 * ol
-184 * li Repair Table Integrity on 
HDFS. (merge or fabricate regions)
-185 * li Repair Region Consistency 
with hbase:meta and assignments
-186 * /ol
-187 * p
-188 * For table integrity repairs, the 
tables' region directories are scanned
-189 * for .regioninfo files.  Each table's 
integrity is then verified.  If there
-190 * are any orphan regions (regions with 
no .regioninfo files) or holes, new
-191 * regions are fabricated.  Backwards 
regions are sidelined as well as empty
-192 * degenerate (endkey==startkey) regions. 
 If there are any overlapping regions,
-193 * a new region is created and all data 
is merged into the new region.
-194 * p
-195 * Table integrity repairs deal solely 
with HDFS and could potentially be done
-196 * offline -- the hbase region servers or 
master do not need to be running.
-197 * This phase can eventually be used to 
completely reconstruct the hbase:meta table in
-198 * an offline fashion.
-199 * p
-200 * Region consistency requires three 
conditions -- 1) valid .regioninfo file
-201 * present in an HDFS region dir,  2) 
valid row with .regioninfo data in META,
-202 * and 3) a region is deployed only at 
the regionserver that was assigned to
-203 * with proper state in the master.
-204 * p
-205 * Region consistency repairs require 
hbase to be online so that hbck can
-206 * contact the HBase master and region 
servers.  The hbck#connect() method must
-207 * first be called successfully.  Much of 
the region consistency information
-208 * is transient and less risky to 
repair.
-209 * p
-210 * If hbck is run from the command line, 
there are a handful of arguments that
-211 * can be used 

[07/51] [partial] hbase-site git commit: Published site at 8bf966c8e936dec4d83bcbe85c5aab543f14a0df.

2018-12-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html
index a957d31..62f81b6 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.HDFSIntegrityFixer.html
@@ -142,5192 +142,5186 @@
 134import org.apache.hadoop.hbase.wal.WAL;
 135import 
org.apache.hadoop.hbase.wal.WALFactory;
 136import 
org.apache.hadoop.hbase.wal.WALSplitter;
-137import 
org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-138import 
org.apache.hadoop.hbase.zookeeper.ZKUtil;
-139import 
org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-140import 
org.apache.hadoop.hbase.zookeeper.ZNodePaths;
-141import 
org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
-142import 
org.apache.hadoop.ipc.RemoteException;
-143import 
org.apache.hadoop.security.UserGroupInformation;
-144import 
org.apache.hadoop.util.ReflectionUtils;
-145import org.apache.hadoop.util.Tool;
-146import 
org.apache.hadoop.util.ToolRunner;
-147import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-148import 
org.apache.hbase.thirdparty.com.google.common.collect.Sets;
-149import 
org.apache.yetus.audience.InterfaceAudience;
-150import 
org.apache.yetus.audience.InterfaceStability;
-151import 
org.apache.zookeeper.KeeperException;
-152import org.slf4j.Logger;
-153import org.slf4j.LoggerFactory;
-154
-155import 
org.apache.hbase.thirdparty.com.google.common.base.Joiner;
-156import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-157import 
org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;
-158import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
-159import 
org.apache.hbase.thirdparty.com.google.common.collect.Multimap;
-160import 
org.apache.hbase.thirdparty.com.google.common.collect.Ordering;
-161import 
org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;
-162
-163import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-164import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;
-165
-166/**
-167 * HBaseFsck (hbck) is a tool for 
checking and repairing region consistency and
-168 * table integrity problems in a 
corrupted HBase. This tool was written for hbase-1.x. It does not
-169 * work with hbase-2.x; it can read state 
but is not allowed to change state; i.e. effect 'repair'.
-170 * See hbck2 (HBASE-19121) for a hbck 
tool for hbase2.
-171 *
-172 * p
-173 * Region consistency checks verify that 
hbase:meta, region deployment on region
-174 * servers and the state of data in HDFS 
(.regioninfo files) all are in
-175 * accordance.
-176 * p
-177 * Table integrity checks verify that all 
possible row keys resolve to exactly
-178 * one region of a table.  This means 
there are no individual degenerate
-179 * or backwards regions; no holes between 
regions; and that there are no
-180 * overlapping regions.
-181 * p
-182 * The general repair strategy works in 
two phases:
-183 * ol
-184 * li Repair Table Integrity on 
HDFS. (merge or fabricate regions)
-185 * li Repair Region Consistency 
with hbase:meta and assignments
-186 * /ol
-187 * p
-188 * For table integrity repairs, the 
tables' region directories are scanned
-189 * for .regioninfo files.  Each table's 
integrity is then verified.  If there
-190 * are any orphan regions (regions with 
no .regioninfo files) or holes, new
-191 * regions are fabricated.  Backwards 
regions are sidelined as well as empty
-192 * degenerate (endkey==startkey) regions. 
 If there are any overlapping regions,
-193 * a new region is created and all data 
is merged into the new region.
-194 * p
-195 * Table integrity repairs deal solely 
with HDFS and could potentially be done
-196 * offline -- the hbase region servers or 
master do not need to be running.
-197 * This phase can eventually be used to 
completely reconstruct the hbase:meta table in
-198 * an offline fashion.
-199 * p
-200 * Region consistency requires three 
conditions -- 1) valid .regioninfo file
-201 * present in an HDFS region dir,  2) 
valid row with .regioninfo data in META,
-202 * and 3) a region is deployed only at 
the regionserver that was assigned to
-203 * with proper state in the master.
-204 * p
-205 * Region consistency repairs require 
hbase to be online so that hbck can
-206 * contact the HBase master and region 
servers.  The hbck#connect() method must
-207 * first be called successfully.  Much of 
the region consistency information
-208 * is transient and less risky to 
repair.
-209 * p
-210 * If hbck is run from 

[08/51] [partial] hbase-site git commit: Published site at 8bf966c8e936dec4d83bcbe85c5aab543f14a0df.

2018-12-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionRepairException.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionRepairException.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionRepairException.html
index a957d31..62f81b6 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionRepairException.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionRepairException.html
@@ -142,5192 +142,5186 @@
 134import org.apache.hadoop.hbase.wal.WAL;
 135import 
org.apache.hadoop.hbase.wal.WALFactory;
 136import 
org.apache.hadoop.hbase.wal.WALSplitter;
-137import 
org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-138import 
org.apache.hadoop.hbase.zookeeper.ZKUtil;
-139import 
org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-140import 
org.apache.hadoop.hbase.zookeeper.ZNodePaths;
-141import 
org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
-142import 
org.apache.hadoop.ipc.RemoteException;
-143import 
org.apache.hadoop.security.UserGroupInformation;
-144import 
org.apache.hadoop.util.ReflectionUtils;
-145import org.apache.hadoop.util.Tool;
-146import 
org.apache.hadoop.util.ToolRunner;
-147import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-148import 
org.apache.hbase.thirdparty.com.google.common.collect.Sets;
-149import 
org.apache.yetus.audience.InterfaceAudience;
-150import 
org.apache.yetus.audience.InterfaceStability;
-151import 
org.apache.zookeeper.KeeperException;
-152import org.slf4j.Logger;
-153import org.slf4j.LoggerFactory;
-154
-155import 
org.apache.hbase.thirdparty.com.google.common.base.Joiner;
-156import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-157import 
org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;
-158import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
-159import 
org.apache.hbase.thirdparty.com.google.common.collect.Multimap;
-160import 
org.apache.hbase.thirdparty.com.google.common.collect.Ordering;
-161import 
org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;
-162
-163import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-164import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;
-165
-166/**
-167 * HBaseFsck (hbck) is a tool for 
checking and repairing region consistency and
-168 * table integrity problems in a 
corrupted HBase. This tool was written for hbase-1.x. It does not
-169 * work with hbase-2.x; it can read state 
but is not allowed to change state; i.e. effect 'repair'.
-170 * See hbck2 (HBASE-19121) for a hbck 
tool for hbase2.
-171 *
-172 * p
-173 * Region consistency checks verify that 
hbase:meta, region deployment on region
-174 * servers and the state of data in HDFS 
(.regioninfo files) all are in
-175 * accordance.
-176 * p
-177 * Table integrity checks verify that all 
possible row keys resolve to exactly
-178 * one region of a table.  This means 
there are no individual degenerate
-179 * or backwards regions; no holes between 
regions; and that there are no
-180 * overlapping regions.
-181 * p
-182 * The general repair strategy works in 
two phases:
-183 * ol
-184 * li Repair Table Integrity on 
HDFS. (merge or fabricate regions)
-185 * li Repair Region Consistency 
with hbase:meta and assignments
-186 * /ol
-187 * p
-188 * For table integrity repairs, the 
tables' region directories are scanned
-189 * for .regioninfo files.  Each table's 
integrity is then verified.  If there
-190 * are any orphan regions (regions with 
no .regioninfo files) or holes, new
-191 * regions are fabricated.  Backwards 
regions are sidelined as well as empty
-192 * degenerate (endkey==startkey) regions. 
 If there are any overlapping regions,
-193 * a new region is created and all data 
is merged into the new region.
-194 * p
-195 * Table integrity repairs deal solely 
with HDFS and could potentially be done
-196 * offline -- the hbase region servers or 
master do not need to be running.
-197 * This phase can eventually be used to 
completely reconstruct the hbase:meta table in
-198 * an offline fashion.
-199 * p
-200 * Region consistency requires three 
conditions -- 1) valid .regioninfo file
-201 * present in an HDFS region dir,  2) 
valid row with .regioninfo data in META,
-202 * and 3) a region is deployed only at 
the regionserver that was assigned to
-203 * with proper state in the master.
-204 * p
-205 * Region consistency repairs require 
hbase to be online so that hbck can
-206 * contact the HBase master and region 
servers.  The hbck#connect() method must
-207 * first be called successfully.  Much of 
the region consistency information
-208 * is transient and less risky to 
repair.
-209 * p
-210 * If hbck is run from the command line, 
there are a 

[11/51] [partial] hbase-site git commit: Published site at 8bf966c8e936dec4d83bcbe85c5aab543f14a0df.

2018-12-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.OnlineEntry.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.OnlineEntry.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.OnlineEntry.html
index a957d31..62f81b6 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.OnlineEntry.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.OnlineEntry.html
@@ -142,5192 +142,5186 @@
 134import org.apache.hadoop.hbase.wal.WAL;
 135import 
org.apache.hadoop.hbase.wal.WALFactory;
 136import 
org.apache.hadoop.hbase.wal.WALSplitter;
-137import 
org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-138import 
org.apache.hadoop.hbase.zookeeper.ZKUtil;
-139import 
org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-140import 
org.apache.hadoop.hbase.zookeeper.ZNodePaths;
-141import 
org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
-142import 
org.apache.hadoop.ipc.RemoteException;
-143import 
org.apache.hadoop.security.UserGroupInformation;
-144import 
org.apache.hadoop.util.ReflectionUtils;
-145import org.apache.hadoop.util.Tool;
-146import 
org.apache.hadoop.util.ToolRunner;
-147import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-148import 
org.apache.hbase.thirdparty.com.google.common.collect.Sets;
-149import 
org.apache.yetus.audience.InterfaceAudience;
-150import 
org.apache.yetus.audience.InterfaceStability;
-151import 
org.apache.zookeeper.KeeperException;
-152import org.slf4j.Logger;
-153import org.slf4j.LoggerFactory;
-154
-155import 
org.apache.hbase.thirdparty.com.google.common.base.Joiner;
-156import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-157import 
org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;
-158import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
-159import 
org.apache.hbase.thirdparty.com.google.common.collect.Multimap;
-160import 
org.apache.hbase.thirdparty.com.google.common.collect.Ordering;
-161import 
org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;
-162
-163import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-164import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;
-165
-166/**
-167 * HBaseFsck (hbck) is a tool for 
checking and repairing region consistency and
-168 * table integrity problems in a 
corrupted HBase. This tool was written for hbase-1.x. It does not
-169 * work with hbase-2.x; it can read state 
but is not allowed to change state; i.e. effect 'repair'.
-170 * See hbck2 (HBASE-19121) for a hbck 
tool for hbase2.
-171 *
-172 * p
-173 * Region consistency checks verify that 
hbase:meta, region deployment on region
-174 * servers and the state of data in HDFS 
(.regioninfo files) all are in
-175 * accordance.
-176 * p
-177 * Table integrity checks verify that all 
possible row keys resolve to exactly
-178 * one region of a table.  This means 
there are no individual degenerate
-179 * or backwards regions; no holes between 
regions; and that there are no
-180 * overlapping regions.
-181 * p
-182 * The general repair strategy works in 
two phases:
-183 * ol
-184 * li Repair Table Integrity on 
HDFS. (merge or fabricate regions)
-185 * li Repair Region Consistency 
with hbase:meta and assignments
-186 * /ol
-187 * p
-188 * For table integrity repairs, the 
tables' region directories are scanned
-189 * for .regioninfo files.  Each table's 
integrity is then verified.  If there
-190 * are any orphan regions (regions with 
no .regioninfo files) or holes, new
-191 * regions are fabricated.  Backwards 
regions are sidelined as well as empty
-192 * degenerate (endkey==startkey) regions. 
 If there are any overlapping regions,
-193 * a new region is created and all data 
is merged into the new region.
-194 * p
-195 * Table integrity repairs deal solely 
with HDFS and could potentially be done
-196 * offline -- the hbase region servers or 
master do not need to be running.
-197 * This phase can eventually be used to 
completely reconstruct the hbase:meta table in
-198 * an offline fashion.
-199 * p
-200 * Region consistency requires three 
conditions -- 1) valid .regioninfo file
-201 * present in an HDFS region dir,  2) 
valid row with .regioninfo data in META,
-202 * and 3) a region is deployed only at 
the regionserver that was assigned to
-203 * with proper state in the master.
-204 * p
-205 * Region consistency repairs require 
hbase to be online so that hbck can
-206 * contact the HBase master and region 
servers.  The hbck#connect() method must
-207 * first be called successfully.  Much of 
the region consistency information
-208 * is transient and less risky to 
repair.
-209 * p
-210 * If hbck is run from the command line, 
there are a handful of arguments that
-211 * can be used to limit 

[20/51] [partial] hbase-site git commit: Published site at 8bf966c8e936dec4d83bcbe85c5aab543f14a0df.

2018-12-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/src-html/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.html
index 809f66f..9b60dd0 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.html
@@ -765,146 +765,145 @@
 757found.set(true);
 758try {
 759  boolean rootMetaFound =
-760  
masterServices.getMetaTableLocator().verifyMetaRegionLocation(
-761  conn, 
masterServices.getZooKeeper(), 1);
-762  if (rootMetaFound) {
-763MetaTableAccessor.Visitor 
visitor = new DefaultVisitorBase() {
-764  @Override
-765  public boolean 
visitInternal(Result row) throws IOException {
-766RegionInfo info = 
MetaTableAccessor.getRegionInfo(row);
-767if (info != null) {
-768  Cell serverCell =
-769  
row.getColumnLatestCell(HConstants.CATALOG_FAMILY,
-770  
HConstants.SERVER_QUALIFIER);
-771  if 
(RSGROUP_TABLE_NAME.equals(info.getTable())  serverCell != null) {
-772ServerName sn =
-773
ServerName.parseVersionedServerName(CellUtil.cloneValue(serverCell));
-774if (sn == null) {
-775  found.set(false);
-776} else if 
(tsm.isTableState(RSGROUP_TABLE_NAME, TableState.State.ENABLED)) {
-777  try {
-778
ClientProtos.ClientService.BlockingInterface rs = conn.getClient(sn);
-779
ClientProtos.GetRequest request =
-780
RequestConverter.buildGetRequest(info.getRegionName(),
-781new 
Get(ROW_KEY));
-782rs.get(null, 
request);
-783
assignedRegions.add(info);
-784  } catch(Exception 
ex) {
-785LOG.debug("Caught 
exception while verifying group region", ex);
-786  }
-787}
-788
foundRegions.add(info);
-789  }
-790}
-791return true;
-792  }
-793};
-794
MetaTableAccessor.fullScanRegions(conn, visitor);
-795// if no regions in meta then 
we have to create the table
-796if (foundRegions.size()  
1  rootMetaFound  !createSent) {
-797  createRSGroupTable();
-798  createSent = true;
-799}
-800LOG.info("RSGroup table=" + 
RSGROUP_TABLE_NAME + " isOnline=" + found.get()
-801+ ", regionCount=" + 
foundRegions.size() + ", assignCount="
-802+ assignedRegions.size() 
+ ", rootMetaFound=" + rootMetaFound);
-803found.set(found.get() 
 assignedRegions.size() == foundRegions.size()
-804 
foundRegions.size()  0);
-805  } else {
-806LOG.info("Waiting for catalog 
tables to come online");
-807found.set(false);
-808  }
-809  if (found.get()) {
-810LOG.debug("With group table 
online, refreshing cached information.");
-811
RSGroupInfoManagerImpl.this.refresh(true);
-812online = true;
-813//flush any inconsistencies 
between ZK and HTable
-814
RSGroupInfoManagerImpl.this.flushConfig();
-815  }
-816} catch (RuntimeException e) {
-817  throw e;
-818} catch(Exception e) {
-819  found.set(false);
-820  LOG.warn("Failed to perform 
check", e);
-821}
-822try {
-823  Thread.sleep(100);
-824} catch (InterruptedException e) 
{
-825  LOG.info("Sleep interrupted", 
e);
-826}
-827  }
-828  return found.get();
-829}
-830
-831private void createRSGroupTable() 
throws IOException {
-832  Long procId = 
masterServices.createSystemTable(RSGROUP_TABLE_DESC);
-833  // wait for region to be online
-834  int tries = 600;
-835  while 
(!(masterServices.getMasterProcedureExecutor().isFinished(procId))
-836   
masterServices.getMasterProcedureExecutor().isRunning()
-837   tries  0) {
-838try {
-839  Thread.sleep(100);
-840} catch (InterruptedException e) 
{
-841  throw new IOException("Wait 
interrupted ", e);
-842}
-843tries--;
-844  }
-845  if(tries = 0) {
-846throw new IOException("Failed to 
create group table in a given time.");
-847  } else {
-848Procedure? result =