[18/51] [partial] hbase-site git commit: Published site at 620d70d6186fb800299bcc62ad7179fccfd1be41.

2019-01-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aa3fb87f/devapidocs/src-html/org/apache/hadoop/hbase/master/RegionState.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/RegionState.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/RegionState.html
index 214b37c..4d1bf1e 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/master/RegionState.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/RegionState.html
@@ -67,381 +67,390 @@
 059   // apply it to a 
region in this state, as it may lead to data loss as we
 060   // may have some 
data in recovered edits.
 061
-062/**
-063 * Convert to protobuf 
ClusterStatusProtos.RegionState.State
-064 */
-065public 
ClusterStatusProtos.RegionState.State convert() {
-066  
ClusterStatusProtos.RegionState.State rs;
-067  switch (this) {
-068case OFFLINE:
-069  rs = 
ClusterStatusProtos.RegionState.State.OFFLINE;
-070  break;
-071case OPENING:
-072  rs = 
ClusterStatusProtos.RegionState.State.OPENING;
-073  break;
-074case OPEN:
-075  rs = 
ClusterStatusProtos.RegionState.State.OPEN;
-076  break;
-077case CLOSING:
-078  rs = 
ClusterStatusProtos.RegionState.State.CLOSING;
+062public boolean matches(State... 
expected) {
+063  for (State state : expected) {
+064if (this == state) {
+065  return true;
+066}
+067  }
+068  return false;
+069}
+070
+071/**
+072 * Convert to protobuf 
ClusterStatusProtos.RegionState.State
+073 */
+074public 
ClusterStatusProtos.RegionState.State convert() {
+075  
ClusterStatusProtos.RegionState.State rs;
+076  switch (this) {
+077case OFFLINE:
+078  rs = 
ClusterStatusProtos.RegionState.State.OFFLINE;
 079  break;
-080case CLOSED:
-081  rs = 
ClusterStatusProtos.RegionState.State.CLOSED;
+080case OPENING:
+081  rs = 
ClusterStatusProtos.RegionState.State.OPENING;
 082  break;
-083case SPLITTING:
-084  rs = 
ClusterStatusProtos.RegionState.State.SPLITTING;
+083case OPEN:
+084  rs = 
ClusterStatusProtos.RegionState.State.OPEN;
 085  break;
-086case SPLIT:
-087  rs = 
ClusterStatusProtos.RegionState.State.SPLIT;
+086case CLOSING:
+087  rs = 
ClusterStatusProtos.RegionState.State.CLOSING;
 088  break;
-089case FAILED_OPEN:
-090  rs = 
ClusterStatusProtos.RegionState.State.FAILED_OPEN;
+089case CLOSED:
+090  rs = 
ClusterStatusProtos.RegionState.State.CLOSED;
 091  break;
-092case FAILED_CLOSE:
-093  rs = 
ClusterStatusProtos.RegionState.State.FAILED_CLOSE;
+092case SPLITTING:
+093  rs = 
ClusterStatusProtos.RegionState.State.SPLITTING;
 094  break;
-095case MERGING:
-096  rs = 
ClusterStatusProtos.RegionState.State.MERGING;
+095case SPLIT:
+096  rs = 
ClusterStatusProtos.RegionState.State.SPLIT;
 097  break;
-098case MERGED:
-099  rs = 
ClusterStatusProtos.RegionState.State.MERGED;
+098case FAILED_OPEN:
+099  rs = 
ClusterStatusProtos.RegionState.State.FAILED_OPEN;
 100  break;
-101case SPLITTING_NEW:
-102  rs = 
ClusterStatusProtos.RegionState.State.SPLITTING_NEW;
+101case FAILED_CLOSE:
+102  rs = 
ClusterStatusProtos.RegionState.State.FAILED_CLOSE;
 103  break;
-104case MERGING_NEW:
-105  rs = 
ClusterStatusProtos.RegionState.State.MERGING_NEW;
+104case MERGING:
+105  rs = 
ClusterStatusProtos.RegionState.State.MERGING;
 106  break;
-107case ABNORMALLY_CLOSED:
-108  rs = 
ClusterStatusProtos.RegionState.State.ABNORMALLY_CLOSED;
+107case MERGED:
+108  rs = 
ClusterStatusProtos.RegionState.State.MERGED;
 109  break;
-110default:
-111  throw new 
IllegalStateException("");
-112  }
-113  return rs;
-114}
-115
-116/**
-117 * Convert a protobuf 
HBaseProtos.RegionState.State to a RegionState.State
-118 *
-119 * @return the RegionState.State
-120 */
-121public static State 
convert(ClusterStatusProtos.RegionState.State protoState) {
-122  State state;
-123  switch (protoState) {
-124case OFFLINE:
-125  state = OFFLINE;
-126  break;
-127case PENDING_OPEN:
-128case OPENING:
-129  state = OPENING;
-130  break;
-131case OPEN:
-132  state = OPEN;
-133  break;
-134case PENDING_CLOSE:
-135case CLOSING:
-136  state = CLOSING;
-137  break;
-138case CLOSED:
-139  state = CLOSED;
-140  break;
-141case 

[18/51] [partial] hbase-site git commit: Published site at 281d6429e55149cc4c05430dcc1d1dc136d8b245.

2019-01-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/901d593a/devapidocs/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.html 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.html
index 0740f3f..7e363ab 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -124,7 +124,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class ServerCrashProcedure
+public class ServerCrashProcedure
 extends StateMachineProcedureMasterProcedureEnv,org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.ServerCrashState
 implements ServerProcedureInterface
 Handle crashed server. This is a port to ProcedureV2 of 
what used to be euphemistically called
@@ -290,112 +290,126 @@ implements 
+private void
+cleanupSplitDir(MasterProcedureEnvenv)
+
+
+private Procedure[]
+createSplittingWalProcedures(MasterProcedureEnvenv,
+booleansplitMeta)
+
+
 protected void
 deserializeStateData(ProcedureStateSerializerserializer)
 Called on store load to allow the user to decode the 
previously serialized
  state.
 
 
-
+
 protected StateMachineProcedure.Flow
 executeFromState(MasterProcedureEnvenv,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.ServerCrashStatestate)
 called to perform a single step of the specified 'state' of 
the procedure
 
 
-
+
 private boolean
 filterDefaultMetaRegions()
 
-
+
 protected 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.ServerCrashState
 getInitialState()
 Return the initial state object that will be used for the 
first call to executeFromState().
 
 
-
+
 protected ProcedureMetrics
 getProcedureMetrics(MasterProcedureEnvenv)
 Override this method to provide procedure specific counters 
for submitted count, failed
  count and time histogram.
 
 
-
+
 ServerName
 getServerName()
 
-
+
 ServerProcedureInterface.ServerOperationType
 getServerOperationType()
 Given an operation type we can take decisions about what to 
do with pending operations.
 
 
-
+
 protected 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.ServerCrashState
 getState(intstateId)
 Convert an ordinal (or state id) to an Enum (or more 
descriptive) state object.
 
 
-
+
 protected int
 getStateId(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.ServerCrashStatestate)
 Convert the Enum (or more descriptive) state object to an 
ordinal (or state id).
 
 
-
+
 boolean
 hasMetaTableRegion()
 
-
+
 protected boolean
 holdLock(MasterProcedureEnvenv)
 Used to keep the procedure lock even when the procedure is 
yielding or suspended.
 
 
-
+
 private boolean
 isDefaultMetaRegion(RegionInfohri)
 
-
+
 boolean
 isInRecoverMetaState()
 
-
+
+private boolean
+isSplittingDone(MasterProcedureEnvenv,
+   booleansplitMeta)
+
+
 protected void
 releaseLock(MasterProcedureEnvenv)
 The user should override this method, and release lock if 
necessary.
 
 
-
+
 protected void
 rollbackState(MasterProcedureEnvenv,
  
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.ServerCrashStatestate)
 called to perform the rollback of the specified state
 
 
-
+
 protected void
 serializeStateData(ProcedureStateSerializerserializer)
 The user-level code of the procedure may have some state to
  persist (e.g.
 
 
-
+
 protected boolean
 shouldWaitClientAck(MasterProcedureEnvenv)
 By default, the executor will keep the procedure result 
around util
  the eviction TTL is expired.
 
 
-
+
 private void
 splitLogs(MasterProcedureEnvenv)
 
-
+
 private void
 splitMetaLogs(MasterProcedureEnvenv)
 
-
+
 void
 toStringClassDetails(https://docs.oracle.com/javase/8/docs/api/java/lang/StringBuilder.html?is-external=true;
 title="class or interface in java.lang">StringBuildersb)
 Extend the toString() information with the procedure details
@@ -444,7 +458,7 @@ implements 
 
 LOG
-private static finalorg.slf4j.Logger LOG
+private static finalorg.slf4j.Logger LOG
 
 
 
@@ -453,7 

[18/51] [partial] hbase-site git commit: Published site at 466fa920fee572fe20db3b77ebf539dc304d5f31.

2019-01-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2bf59208/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder.html
index 2e150bc..0b315b8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder.html
@@ -25,22 +25,22 @@
 017 */
 018package org.apache.hadoop.hbase.client;
 019
-020import static 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions.checkArgument;
-021import static 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions.checkNotNull;
-022import static 
org.apache.hadoop.hbase.client.ConnectionUtils.retries2Attempts;
+020import static 
org.apache.hadoop.hbase.client.ConnectionUtils.retries2Attempts;
+021import static 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions.checkArgument;
+022import static 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions.checkNotNull;
 023
-024import 
org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer;
-025
-026import java.util.List;
-027import 
java.util.concurrent.CompletableFuture;
-028import java.util.concurrent.TimeUnit;
-029
-030import 
org.apache.hadoop.hbase.HRegionLocation;
-031import 
org.apache.hadoop.hbase.ServerName;
-032import 
org.apache.hadoop.hbase.TableName;
-033import 
org.apache.yetus.audience.InterfaceAudience;
-034import 
org.apache.hadoop.hbase.client.metrics.ScanMetrics;
-035import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
+024import java.util.List;
+025import 
java.util.concurrent.CompletableFuture;
+026import java.util.concurrent.TimeUnit;
+027import 
org.apache.hadoop.hbase.HRegionLocation;
+028import 
org.apache.hadoop.hbase.ServerName;
+029import 
org.apache.hadoop.hbase.TableName;
+030import 
org.apache.hadoop.hbase.client.metrics.ScanMetrics;
+031import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
+032import 
org.apache.yetus.audience.InterfaceAudience;
+033
+034import 
org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer;
+035
 036import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService;
 037import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanResponse;
 038
@@ -83,432 +83,441 @@
 075
 076private RegionLocateType locateType = 
RegionLocateType.CURRENT;
 077
-078public 
SingleRequestCallerBuilderT table(TableName tableName) {
-079  this.tableName = tableName;
-080  return this;
-081}
-082
-083public 
SingleRequestCallerBuilderT row(byte[] row) {
-084  this.row = row;
-085  return this;
-086}
-087
-088public 
SingleRequestCallerBuilderT action(
-089
AsyncSingleRequestRpcRetryingCaller.CallableT callable) {
-090  this.callable = callable;
-091  return this;
-092}
-093
-094public 
SingleRequestCallerBuilderT operationTimeout(long operationTimeout, 
TimeUnit unit) {
-095  this.operationTimeoutNs = 
unit.toNanos(operationTimeout);
-096  return this;
-097}
-098
-099public 
SingleRequestCallerBuilderT rpcTimeout(long rpcTimeout, TimeUnit unit) 
{
-100  this.rpcTimeoutNs = 
unit.toNanos(rpcTimeout);
-101  return this;
-102}
-103
-104public 
SingleRequestCallerBuilderT locateType(RegionLocateType locateType) {
-105  this.locateType = locateType;
-106  return this;
-107}
-108
-109public 
SingleRequestCallerBuilderT pause(long pause, TimeUnit unit) {
-110  this.pauseNs = 
unit.toNanos(pause);
-111  return this;
-112}
-113
-114public 
SingleRequestCallerBuilderT maxAttempts(int maxAttempts) {
-115  this.maxAttempts = maxAttempts;
-116  return this;
-117}
-118
-119public 
SingleRequestCallerBuilderT startLogErrorsCnt(int startLogErrorsCnt) 
{
-120  this.startLogErrorsCnt = 
startLogErrorsCnt;
-121  return this;
-122}
-123
-124public 
AsyncSingleRequestRpcRetryingCallerT build() {
-125  return new 
AsyncSingleRequestRpcRetryingCaller(retryTimer, conn,
-126  checkNotNull(tableName, 
"tableName is null"), checkNotNull(row, "row is null"),
-127  checkNotNull(locateType, 
"locateType is null"), checkNotNull(callable, "action is null"),
-128  pauseNs, maxAttempts, 
operationTimeoutNs, rpcTimeoutNs, startLogErrorsCnt);
+078private int replicaId = 
RegionReplicaUtil.DEFAULT_REPLICA_ID;
+079
+080public 
SingleRequestCallerBuilderT table(TableName tableName) {
+081  this.tableName = tableName;
+082  return this;
+083}
+084
+085public 
SingleRequestCallerBuilderT row(byte[] 

[18/51] [partial] hbase-site git commit: Published site at e4b6b4afb933a961f543537875f87a2dc62d3757.

2019-01-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/849d84a8/devapidocs/src-html/org/apache/hadoop/hbase/thrift/ThriftHttpServlet.HttpKerberosServerAction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/thrift/ThriftHttpServlet.HttpKerberosServerAction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/thrift/ThriftHttpServlet.HttpKerberosServerAction.html
index 4ddef9a..5828be2 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/thrift/ThriftHttpServlet.HttpKerberosServerAction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/thrift/ThriftHttpServlet.HttpKerberosServerAction.html
@@ -26,8 +26,8 @@
 018
 019package org.apache.hadoop.hbase.thrift;
 020
-021import static 
org.apache.hadoop.hbase.thrift.ThriftServerRunner.THRIFT_SPNEGO_KEYTAB_FILE_KEY;
-022import static 
org.apache.hadoop.hbase.thrift.ThriftServerRunner.THRIFT_SPNEGO_PRINCIPAL_KEY;
+021import static 
org.apache.hadoop.hbase.thrift.Constants.THRIFT_SPNEGO_KEYTAB_FILE_KEY;
+022import static 
org.apache.hadoop.hbase.thrift.Constants.THRIFT_SPNEGO_PRINCIPAL_KEY;
 023
 024import java.io.IOException;
 025import 
java.security.PrivilegedExceptionAction;
@@ -66,7 +66,7 @@
 058  private static final Logger LOG = 
LoggerFactory.getLogger(ThriftHttpServlet.class.getName());
 059  private final transient 
UserGroupInformation serviceUGI;
 060  private final transient 
UserGroupInformation httpUGI;
-061  private final transient 
ThriftServerRunner.HBaseHandler hbaseHandler;
+061  private final transient 
HBaseServiceHandler handler;
 062  private final boolean doAsEnabled;
 063  private final boolean 
securityEnabled;
 064
@@ -75,11 +75,11 @@
 067
 068  public ThriftHttpServlet(TProcessor 
processor, TProtocolFactory protocolFactory,
 069  UserGroupInformation serviceUGI, 
Configuration conf,
-070  ThriftServerRunner.HBaseHandler 
hbaseHandler, boolean securityEnabled, boolean doAsEnabled)
+070  HBaseServiceHandler handler, 
boolean securityEnabled, boolean doAsEnabled)
 071  throws IOException {
 072super(processor, protocolFactory);
 073this.serviceUGI = serviceUGI;
-074this.hbaseHandler = hbaseHandler;
+074this.handler = handler;
 075this.securityEnabled = 
securityEnabled;
 076this.doAsEnabled = doAsEnabled;
 077
@@ -154,7 +154,7 @@
 146  }
 147  effectiveUser = 
doAsUserFromQuery;
 148}
-149
hbaseHandler.setEffectiveUser(effectiveUser);
+149
handler.setEffectiveUser(effectiveUser);
 150super.doPost(request, response);
 151  }
 152

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/849d84a8/devapidocs/src-html/org/apache/hadoop/hbase/thrift/ThriftHttpServlet.RemoteUserIdentity.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/thrift/ThriftHttpServlet.RemoteUserIdentity.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/thrift/ThriftHttpServlet.RemoteUserIdentity.html
index 4ddef9a..5828be2 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/thrift/ThriftHttpServlet.RemoteUserIdentity.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/thrift/ThriftHttpServlet.RemoteUserIdentity.html
@@ -26,8 +26,8 @@
 018
 019package org.apache.hadoop.hbase.thrift;
 020
-021import static 
org.apache.hadoop.hbase.thrift.ThriftServerRunner.THRIFT_SPNEGO_KEYTAB_FILE_KEY;
-022import static 
org.apache.hadoop.hbase.thrift.ThriftServerRunner.THRIFT_SPNEGO_PRINCIPAL_KEY;
+021import static 
org.apache.hadoop.hbase.thrift.Constants.THRIFT_SPNEGO_KEYTAB_FILE_KEY;
+022import static 
org.apache.hadoop.hbase.thrift.Constants.THRIFT_SPNEGO_PRINCIPAL_KEY;
 023
 024import java.io.IOException;
 025import 
java.security.PrivilegedExceptionAction;
@@ -66,7 +66,7 @@
 058  private static final Logger LOG = 
LoggerFactory.getLogger(ThriftHttpServlet.class.getName());
 059  private final transient 
UserGroupInformation serviceUGI;
 060  private final transient 
UserGroupInformation httpUGI;
-061  private final transient 
ThriftServerRunner.HBaseHandler hbaseHandler;
+061  private final transient 
HBaseServiceHandler handler;
 062  private final boolean doAsEnabled;
 063  private final boolean 
securityEnabled;
 064
@@ -75,11 +75,11 @@
 067
 068  public ThriftHttpServlet(TProcessor 
processor, TProtocolFactory protocolFactory,
 069  UserGroupInformation serviceUGI, 
Configuration conf,
-070  ThriftServerRunner.HBaseHandler 
hbaseHandler, boolean securityEnabled, boolean doAsEnabled)
+070  HBaseServiceHandler handler, 
boolean securityEnabled, boolean doAsEnabled)
 071  throws IOException {
 072super(processor, protocolFactory);
 073this.serviceUGI = serviceUGI;
-074this.hbaseHandler = hbaseHandler;
+074this.handler = handler;
 075this.securityEnabled = 
securityEnabled;
 076this.doAsEnabled = doAsEnabled;
 077
@@ -154,7 +154,7 @@
 146  }
 147  effectiveUser = 
doAsUserFromQuery;
 

[18/51] [partial] hbase-site git commit: Published site at 3ab895979b643a2980bcdb7fee2078f14b614210.

2019-01-01 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/http/FilterContainer.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/http/FilterContainer.html 
b/devapidocs/org/apache/hadoop/hbase/http/FilterContainer.html
index 9a0bbeb..a84d8ba 100644
--- a/devapidocs/org/apache/hadoop/hbase/http/FilterContainer.html
+++ b/devapidocs/org/apache/hadoop/hbase/http/FilterContainer.html
@@ -264,6 +264,6 @@ public interface Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/http/FilterInitializer.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/http/FilterInitializer.html 
b/devapidocs/org/apache/hadoop/hbase/http/FilterInitializer.html
index 5e0f823..e326827 100644
--- a/devapidocs/org/apache/hadoop/hbase/http/FilterInitializer.html
+++ b/devapidocs/org/apache/hadoop/hbase/http/FilterInitializer.html
@@ -283,6 +283,6 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 
-Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/http/HtmlQuoting.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/http/HtmlQuoting.html 
b/devapidocs/org/apache/hadoop/hbase/http/HtmlQuoting.html
index b7b1d9c..4fcb49d 100644
--- a/devapidocs/org/apache/hadoop/hbase/http/HtmlQuoting.html
+++ b/devapidocs/org/apache/hadoop/hbase/http/HtmlQuoting.html
@@ -517,6 +517,6 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 
-Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/http/HttpConfig.Policy.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/http/HttpConfig.Policy.html 
b/devapidocs/org/apache/hadoop/hbase/http/HttpConfig.Policy.html
index 685afdc..b88e387 100644
--- a/devapidocs/org/apache/hadoop/hbase/http/HttpConfig.Policy.html
+++ b/devapidocs/org/apache/hadoop/hbase/http/HttpConfig.Policy.html
@@ -392,6 +392,6 @@ not permitted.)
 
 
 
-Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/http/HttpConfig.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/http/HttpConfig.html 
b/devapidocs/org/apache/hadoop/hbase/http/HttpConfig.html
index a9c9564..354dd26 100644
--- a/devapidocs/org/apache/hadoop/hbase/http/HttpConfig.html
+++ b/devapidocs/org/apache/hadoop/hbase/http/HttpConfig.html
@@ -364,6 +364,6 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 
-Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/http/HttpRequestLog.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/http/HttpRequestLog.html 
b/devapidocs/org/apache/hadoop/hbase/http/HttpRequestLog.html
index 59b735f..77799a0 100644
--- a/devapidocs/org/apache/hadoop/hbase/http/HttpRequestLog.html
+++ b/devapidocs/org/apache/hadoop/hbase/http/HttpRequestLog.html
@@ -333,6 +333,6 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 
-Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/http/HttpRequestLogAppender.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/http/HttpRequestLogAppender.html 

[18/51] [partial] hbase-site git commit: Published site at 7820ba1dbdba58b1002cdfde08eb21aa7a0bb6da.

2018-12-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/09ea0d5f/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/RegionObserver.MutationType.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/RegionObserver.MutationType.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/RegionObserver.MutationType.html
index 2c6f38a..c537059 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/RegionObserver.MutationType.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/RegionObserver.MutationType.html
@@ -28,1039 +28,1086 @@
 020package 
org.apache.hadoop.hbase.coprocessor;
 021
 022import java.io.IOException;
-023import java.util.List;
-024import java.util.Map;
-025
-026import org.apache.hadoop.fs.FileSystem;
-027import org.apache.hadoop.fs.Path;
-028import org.apache.hadoop.hbase.Cell;
-029import 
org.apache.hadoop.hbase.CompareOperator;
-030import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
-031import 
org.apache.hadoop.hbase.client.Append;
-032import 
org.apache.hadoop.hbase.client.Delete;
-033import 
org.apache.hadoop.hbase.client.Durability;
-034import 
org.apache.hadoop.hbase.client.Get;
-035import 
org.apache.hadoop.hbase.client.Increment;
-036import 
org.apache.hadoop.hbase.client.Mutation;
-037import 
org.apache.hadoop.hbase.client.Put;
-038import 
org.apache.hadoop.hbase.client.RegionInfo;
-039import 
org.apache.hadoop.hbase.client.Result;
-040import 
org.apache.hadoop.hbase.client.Scan;
-041import 
org.apache.hadoop.hbase.filter.ByteArrayComparable;
-042import 
org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
-043import 
org.apache.hadoop.hbase.io.Reference;
-044import 
org.apache.hadoop.hbase.io.hfile.CacheConfig;
-045import 
org.apache.hadoop.hbase.regionserver.FlushLifeCycleTracker;
-046import 
org.apache.hadoop.hbase.regionserver.InternalScanner;
-047import 
org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress;
-048import 
org.apache.hadoop.hbase.regionserver.OperationStatus;
-049import 
org.apache.hadoop.hbase.regionserver.Region;
-050import 
org.apache.hadoop.hbase.regionserver.Region.Operation;
-051import 
org.apache.hadoop.hbase.regionserver.RegionScanner;
-052import 
org.apache.hadoop.hbase.regionserver.ScanOptions;
-053import 
org.apache.hadoop.hbase.regionserver.ScanType;
-054import 
org.apache.hadoop.hbase.regionserver.Store;
-055import 
org.apache.hadoop.hbase.regionserver.StoreFile;
-056import 
org.apache.hadoop.hbase.regionserver.StoreFileReader;
-057import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker;
-058import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
-059import 
org.apache.hadoop.hbase.regionserver.querymatcher.DeleteTracker;
-060import 
org.apache.hadoop.hbase.util.Pair;
-061import 
org.apache.hadoop.hbase.wal.WALEdit;
-062import 
org.apache.hadoop.hbase.wal.WALKey;
-063import 
org.apache.yetus.audience.InterfaceAudience;
-064import 
org.apache.yetus.audience.InterfaceStability;
-065
-066/**
-067 * Coprocessors implement this interface 
to observe and mediate client actions on the region.
-068 * p
-069 * Since most implementations will be 
interested in only a subset of hooks, this class uses
-070 * 'default' functions to avoid having to 
add unnecessary overrides. When the functions are
-071 * non-empty, it's simply to satisfy the 
compiler by returning value of expected (non-void) type. It
-072 * is done in a way that these default 
definitions act as no-op. So our suggestion to implementation
-073 * would be to not call these 'default' 
methods from overrides.
-074 * p
-075 * h3Exception 
Handling/h3br
-076 * For all functions, exception handling 
is done as follows:
-077 * ul
-078 * liExceptions of type {@link 
IOException} are reported back to client./li
-079 * liFor any other kind of 
exception:
-080 * ul
-081 * liIf the configuration {@link 
CoprocessorHost#ABORT_ON_ERROR_KEY} is set to true, then the
-082 * server aborts./li
-083 * liOtherwise, coprocessor is 
removed from the server and
-084 * {@link 
org.apache.hadoop.hbase.DoNotRetryIOException} is returned to the 
client./li
-085 * /ul
-086 * /li
-087 * /ul
-088 * p
-089 * h3For Split Related 
Hooks/h3 br
-090 * In hbase2/AMv2, master runs splits, so 
the split related hooks are moved to
-091 * {@link MasterObserver}.
-092 * p
-093 * h3Increment Column 
Value/h3br
-094 * We do not call this hook anymore.
-095 */
-096@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC)
-097@InterfaceStability.Evolving
-098// TODO as method signatures need to 
break, update to
-099// ObserverContext? extends 
RegionCoprocessorEnvironment
-100// so we can use additional environment 
state that isn't exposed to coprocessors.
-101public interface RegionObserver {
-102  /** Mutation type for 
postMutationBeforeWAL hook */
-103  enum MutationType {
-104APPEND, INCREMENT
-105  }
-106
-107  /**
-108   * Called 

[18/51] [partial] hbase-site git commit: Published site at c448604ceb987d113913f0583452b2abce04db0d.

2018-12-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4f8b8424/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/Version.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
index 6e51c4e..5a2a23d 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
@@ -18,9 +18,9 @@
 010  public static final String version = 
"3.0.0-SNAPSHOT";
 011  public static final String revision = 
"";
 012  public static final String user = 
"jenkins";
-013  public static final String date = "Sat 
Dec 15 14:43:55 UTC 2018";
+013  public static final String date = "Tue 
Dec 18 14:46:13 UTC 2018";
 014  public static final String url = 
"git://jenkins-websites1.apache.org/home/jenkins/jenkins-slave/workspace/hbase_generate_website/hbase";
-015  public static final String srcChecksum 
= "25dbb3fd216cbc954b8b2ebf17377a47";
+015  public static final String srcChecksum 
= "0ff9598693406383f0355b09c8c7ea64";
 016}
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4f8b8424/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/BlockCacheFactory.ExternalBlockCaches.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/BlockCacheFactory.ExternalBlockCaches.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/BlockCacheFactory.ExternalBlockCaches.html
new file mode 100644
index 000..2a70085
--- /dev/null
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/BlockCacheFactory.ExternalBlockCaches.html
@@ -0,0 +1,298 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+Source code
+
+
+
+
+001/**
+002 * Licensed to the Apache Software 
Foundation (ASF) under one
+003 * or more contributor license 
agreements.  See the NOTICE file
+004 * distributed with this work for 
additional information
+005 * regarding copyright ownership.  The 
ASF licenses this file
+006 * to you under the Apache License, 
Version 2.0 (the
+007 * "License"); you may not use this file 
except in compliance
+008 * with the License.  You may obtain a 
copy of the License at
+009 *
+010 * 
http://www.apache.org/licenses/LICENSE-2.0
+011 *
+012 * Unless required by applicable law or 
agreed to in writing, software
+013 * distributed under the License is 
distributed on an "AS IS" BASIS,
+014 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
+015 * See the License for the specific 
language governing permissions and
+016 * limitations under the License.
+017 */
+018package 
org.apache.hadoop.hbase.io.hfile;
+019
+020import static 
org.apache.hadoop.hbase.HConstants.BUCKET_CACHE_IOENGINE_KEY;
+021import static 
org.apache.hadoop.hbase.HConstants.BUCKET_CACHE_SIZE_KEY;
+022
+023import java.io.IOException;
+024
+025import 
org.apache.hadoop.conf.Configuration;
+026import 
org.apache.hadoop.hbase.HConstants;
+027import 
org.apache.hadoop.hbase.io.hfile.bucket.BucketCache;
+028import 
org.apache.hadoop.hbase.io.util.MemorySizeUtil;
+029import 
org.apache.hadoop.hbase.util.ReflectionUtils;
+030import 
org.apache.hadoop.util.StringUtils;
+031import 
org.apache.yetus.audience.InterfaceAudience;
+032import org.slf4j.Logger;
+033import org.slf4j.LoggerFactory;
+034
+035@InterfaceAudience.Private
+036public final class BlockCacheFactory {
+037
+038  private static final Logger LOG = 
LoggerFactory.getLogger(BlockCacheFactory.class.getName());
+039
+040  /**
+041   * Configuration keys for Bucket 
cache
+042   */
+043
+044  /**
+045   * If the chosen ioengine can persist 
its state across restarts, the path to the file to persist
+046   * to. This file is NOT the data file. 
It is a file into which we will serialize the map of
+047   * what is in the data file. For 
example, if you pass the following argument as
+048   * BUCKET_CACHE_IOENGINE_KEY 
("hbase.bucketcache.ioengine"),
+049   * 
codefile:/tmp/bucketcache.data /code, then we will write the 
bucketcache data to the file
+050   * 
code/tmp/bucketcache.data/code but the metadata on where the 
data is in the supplied file
+051   * is an in-memory map that needs to be 
persisted across restarts. Where to store this
+052   * in-memory state is what you supply 
here: e.g. code/tmp/bucketcache.map/code.
+053   */
+054  public static final String 
BUCKET_CACHE_PERSISTENT_PATH_KEY = "hbase.bucketcache.persistent.path";
+055
+056  public static final String 
BUCKET_CACHE_WRITER_THREADS_KEY = "hbase.bucketcache.writer.threads";
+057
+058  public static final String 
BUCKET_CACHE_WRITER_QUEUE_KEY = "hbase.bucketcache.writer.queuelength";
+059
+060  /**
+061   * A comma-delimited array of values 
for use as bucket sizes.
+062   */
+063  public static final String 
BUCKET_CACHE_BUCKETS_KEY = "hbase.bucketcache.bucket.sizes";
+064
+065  /**
+066   * Defaults for Bucket cache
+067   

[18/51] [partial] hbase-site git commit: Published site at 8bf966c8e936dec4d83bcbe85c5aab543f14a0df.

2018-12-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html
index a957d31..62f81b6 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html
@@ -142,5192 +142,5186 @@
 134import org.apache.hadoop.hbase.wal.WAL;
 135import 
org.apache.hadoop.hbase.wal.WALFactory;
 136import 
org.apache.hadoop.hbase.wal.WALSplitter;
-137import 
org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-138import 
org.apache.hadoop.hbase.zookeeper.ZKUtil;
-139import 
org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-140import 
org.apache.hadoop.hbase.zookeeper.ZNodePaths;
-141import 
org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
-142import 
org.apache.hadoop.ipc.RemoteException;
-143import 
org.apache.hadoop.security.UserGroupInformation;
-144import 
org.apache.hadoop.util.ReflectionUtils;
-145import org.apache.hadoop.util.Tool;
-146import 
org.apache.hadoop.util.ToolRunner;
-147import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-148import 
org.apache.hbase.thirdparty.com.google.common.collect.Sets;
-149import 
org.apache.yetus.audience.InterfaceAudience;
-150import 
org.apache.yetus.audience.InterfaceStability;
-151import 
org.apache.zookeeper.KeeperException;
-152import org.slf4j.Logger;
-153import org.slf4j.LoggerFactory;
-154
-155import 
org.apache.hbase.thirdparty.com.google.common.base.Joiner;
-156import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-157import 
org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;
-158import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
-159import 
org.apache.hbase.thirdparty.com.google.common.collect.Multimap;
-160import 
org.apache.hbase.thirdparty.com.google.common.collect.Ordering;
-161import 
org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;
-162
-163import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-164import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;
-165
-166/**
-167 * HBaseFsck (hbck) is a tool for 
checking and repairing region consistency and
-168 * table integrity problems in a 
corrupted HBase. This tool was written for hbase-1.x. It does not
-169 * work with hbase-2.x; it can read state 
but is not allowed to change state; i.e. effect 'repair'.
-170 * See hbck2 (HBASE-19121) for a hbck 
tool for hbase2.
-171 *
-172 * p
-173 * Region consistency checks verify that 
hbase:meta, region deployment on region
-174 * servers and the state of data in HDFS 
(.regioninfo files) all are in
-175 * accordance.
-176 * p
-177 * Table integrity checks verify that all 
possible row keys resolve to exactly
-178 * one region of a table.  This means 
there are no individual degenerate
-179 * or backwards regions; no holes between 
regions; and that there are no
-180 * overlapping regions.
-181 * p
-182 * The general repair strategy works in 
two phases:
-183 * ol
-184 * li Repair Table Integrity on 
HDFS. (merge or fabricate regions)
-185 * li Repair Region Consistency 
with hbase:meta and assignments
-186 * /ol
-187 * p
-188 * For table integrity repairs, the 
tables' region directories are scanned
-189 * for .regioninfo files.  Each table's 
integrity is then verified.  If there
-190 * are any orphan regions (regions with 
no .regioninfo files) or holes, new
-191 * regions are fabricated.  Backwards 
regions are sidelined as well as empty
-192 * degenerate (endkey==startkey) regions. 
 If there are any overlapping regions,
-193 * a new region is created and all data 
is merged into the new region.
-194 * p
-195 * Table integrity repairs deal solely 
with HDFS and could potentially be done
-196 * offline -- the hbase region servers or 
master do not need to be running.
-197 * This phase can eventually be used to 
completely reconstruct the hbase:meta table in
-198 * an offline fashion.
-199 * p
-200 * Region consistency requires three 
conditions -- 1) valid .regioninfo file
-201 * present in an HDFS region dir,  2) 
valid row with .regioninfo data in META,
-202 * and 3) a region is deployed only at 
the regionserver that was assigned to
-203 * with proper state in the master.
-204 * p
-205 * Region consistency repairs require 
hbase to be online so that hbck can
-206 * contact the HBase master and region 
servers.  The hbck#connect() method must
-207 * first be called successfully.  Much of 
the region consistency information
-208 * is transient and less risky to 
repair.
-209 * p
-210 * If hbck is run from the command line, 

[18/51] [partial] hbase-site git commit: Published site at 1acbd36c903b048141866b143507bfce124a5c5f.

2018-11-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5299e667/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatchOperation.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatchOperation.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatchOperation.html
index 36d3241..b062755 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatchOperation.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatchOperation.html
@@ -118,7 +118,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-static class HRegion.MutationBatchOperation
+static class HRegion.MutationBatchOperation
 extends HRegion.BatchOperationMutation
 Batch of mutation operations. Base class is shared with HRegion.ReplayBatchOperation
 as most
  of the logic is same.
@@ -342,7 +342,7 @@ extends 
 
 nonceGroup
-privatelong nonceGroup
+privatelong nonceGroup
 
 
 
@@ -351,7 +351,7 @@ extends 
 
 nonce
-privatelong nonce
+privatelong nonce
 
 
 
@@ -368,7 +368,7 @@ extends 
 
 MutationBatchOperation
-publicMutationBatchOperation(HRegionregion,
+publicMutationBatchOperation(HRegionregion,
   Mutation[]operations,
   booleanatomic,
   longnonceGroup,
@@ -389,7 +389,7 @@ extends 
 
 getMutation
-publicMutationgetMutation(intindex)
+publicMutationgetMutation(intindex)
 
 Specified by:
 getMutationin
 classHRegion.BatchOperationMutation
@@ -402,7 +402,7 @@ extends 
 
 getNonceGroup
-publiclonggetNonceGroup(intindex)
+publiclonggetNonceGroup(intindex)
 
 Specified by:
 getNonceGroupin
 classHRegion.BatchOperationMutation
@@ -415,7 +415,7 @@ extends 
 
 getNonce
-publiclonggetNonce(intindex)
+publiclonggetNonce(intindex)
 
 Specified by:
 getNoncein
 classHRegion.BatchOperationMutation
@@ -428,7 +428,7 @@ extends 
 
 getMutationsForCoprocs
-publicMutation[]getMutationsForCoprocs()
+publicMutation[]getMutationsForCoprocs()
 Description copied from 
class:HRegion.BatchOperation
 This method is potentially expensive and useful mostly for 
non-replay CP path.
 
@@ -443,7 +443,7 @@ extends 
 
 isInReplay
-publicbooleanisInReplay()
+publicbooleanisInReplay()
 
 Specified by:
 isInReplayin
 classHRegion.BatchOperationMutation
@@ -456,7 +456,7 @@ extends 
 
 getOrigLogSeqNum
-publiclonggetOrigLogSeqNum()
+publiclonggetOrigLogSeqNum()
 
 Specified by:
 getOrigLogSeqNumin
 classHRegion.BatchOperationMutation
@@ -469,7 +469,7 @@ extends 
 
 startRegionOperation
-publicvoidstartRegionOperation()
+publicvoidstartRegionOperation()
   throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Specified by:
@@ -485,7 +485,7 @@ extends 
 
 closeRegionOperation
-publicvoidcloseRegionOperation()
+publicvoidcloseRegionOperation()
   throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Specified by:
@@ -501,7 +501,7 @@ extends 
 
 checkAndPreparePut
-publicvoidcheckAndPreparePut(Putp)
+publicvoidcheckAndPreparePut(Putp)
 throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Description copied from 
class:HRegion.BatchOperation
 Implement any Put request specific check and prepare logic 
here. Please refer to
@@ -520,7 +520,7 @@ extends 
 
 checkAndPrepare
-publicvoidcheckAndPrepare()
+publicvoidcheckAndPrepare()
  throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Description copied from 
class:HRegion.BatchOperation
 Validates each mutation and prepares a batch for write. If 
necessary (non-replay case), runs
@@ -542,7 +542,7 @@ extends 
 
 prepareMiniBatchOperations
-publicvoidprepareMiniBatchOperations(MiniBatchOperationInProgressMutationminiBatchOp,
+publicvoidprepareMiniBatchOperations(MiniBatchOperationInProgressMutationminiBatchOp,
longtimestamp,
https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegion.RowLockacquiredRowLocks)
 throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
@@ -563,7 +563,7 @@ extends 
 
 buildWALEdits
-publichttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListPairNonceKey,WALEditbuildWALEdits(MiniBatchOperationInProgressMutation
 ;miniBatchOp)

[18/51] [partial] hbase-site git commit: Published site at 130057f13774f6b213cdb06952c805a29d59396e.

2018-11-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/68eae623/devapidocs/src-html/org/apache/hadoop/hbase/security/access/Permission.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/security/access/Permission.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/security/access/Permission.html
index 042bf4a..cb2cfdc 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/security/access/Permission.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/security/access/Permission.html
@@ -30,194 +30,226 @@
 022import java.io.DataOutput;
 023import java.io.IOException;
 024import java.util.Arrays;
-025import java.util.Map;
-026
-027import 
org.apache.yetus.audience.InterfaceAudience;
-028import org.slf4j.Logger;
-029import org.slf4j.LoggerFactory;
-030import 
org.apache.hadoop.hbase.util.Bytes;
-031import 
org.apache.hadoop.io.VersionedWritable;
-032
-033import 
org.apache.hbase.thirdparty.com.google.common.collect.Maps;
-034
-035/**
-036 * Base permissions instance representing 
the ability to perform a given set
-037 * of actions.
-038 *
-039 * @see TablePermission
-040 */
-041@InterfaceAudience.Public
-042public class Permission extends 
VersionedWritable {
-043  protected static final byte VERSION = 
0;
-044
-045  @InterfaceAudience.Public
-046  public enum Action {
-047READ('R'), WRITE('W'), EXEC('X'), 
CREATE('C'), ADMIN('A');
-048
-049private final byte code;
-050Action(char code) {
-051  this.code = (byte)code;
-052}
-053
-054public byte code() { return code; }
-055  }
-056
-057  private static final Logger LOG = 
LoggerFactory.getLogger(Permission.class);
-058  protected static final 
MapByte,Action ACTION_BY_CODE = Maps.newHashMap();
-059
-060  protected Action[] actions;
+025import java.util.EnumSet;
+026import java.util.Map;
+027
+028import 
org.apache.yetus.audience.InterfaceAudience;
+029import org.slf4j.Logger;
+030import org.slf4j.LoggerFactory;
+031import 
org.apache.hadoop.hbase.util.Bytes;
+032import 
org.apache.hadoop.io.VersionedWritable;
+033
+034import 
org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap;
+035
+036/**
+037 * Base permissions instance representing 
the ability to perform a given set
+038 * of actions.
+039 *
+040 * @see TablePermission
+041 */
+042@InterfaceAudience.Public
+043public class Permission extends 
VersionedWritable {
+044  protected static final byte VERSION = 
0;
+045
+046  @InterfaceAudience.Public
+047  public enum Action {
+048READ('R'), WRITE('W'), EXEC('X'), 
CREATE('C'), ADMIN('A');
+049
+050private final byte code;
+051Action(char code) {
+052  this.code = (byte) code;
+053}
+054
+055public byte code() { return code; }
+056  }
+057
+058  @InterfaceAudience.Private
+059  protected enum Scope {
+060GLOBAL('G'), NAMESPACE('N'), 
TABLE('T'), EMPTY('E');
 061
-062  static {
-063for (Action a : Action.values()) {
-064  ACTION_BY_CODE.put(a.code(), a);
+062private final byte code;
+063Scope(char code) {
+064  this.code = (byte) code;
 065}
-066  }
-067
-068  /** Empty constructor for Writable 
implementation.  bDo not use./b */
-069  public Permission() {
-070super();
-071  }
-072
-073  public Permission(Action... assigned) 
{
-074if (assigned != null  
assigned.length  0) {
-075  actions = Arrays.copyOf(assigned, 
assigned.length);
-076}
-077  }
-078
-079  public Permission(byte[] actionCodes) 
{
-080if (actionCodes != null) {
-081  Action acts[] = new 
Action[actionCodes.length];
-082  int j = 0;
-083  for (int i=0; 
iactionCodes.length; i++) {
-084byte b = actionCodes[i];
-085Action a = 
ACTION_BY_CODE.get(b);
-086if (a == null) {
-087  LOG.error("Ignoring unknown 
action code '"+
-088  Bytes.toStringBinary(new 
byte[]{b})+"'");
-089  continue;
-090}
-091acts[j++] = a;
-092  }
-093  this.actions = Arrays.copyOf(acts, 
j);
-094}
+066
+067public byte code() {
+068  return code;
+069}
+070  }
+071
+072  private static final Logger LOG = 
LoggerFactory.getLogger(Permission.class);
+073
+074  protected static final MapByte, 
Action ACTION_BY_CODE;
+075  protected static final MapByte, 
Scope SCOPE_BY_CODE;
+076
+077  protected EnumSetAction actions 
= EnumSet.noneOf(Action.class);
+078  protected Scope scope = Scope.EMPTY;
+079
+080  static {
+081ACTION_BY_CODE = ImmutableMap.of(
+082  Action.READ.code, Action.READ,
+083  Action.WRITE.code, Action.WRITE,
+084  Action.EXEC.code, Action.EXEC,
+085  Action.CREATE.code, 
Action.CREATE,
+086  Action.ADMIN.code, Action.ADMIN
+087);
+088
+089SCOPE_BY_CODE = ImmutableMap.of(
+090  Scope.GLOBAL.code, Scope.GLOBAL,
+091  Scope.NAMESPACE.code, 
Scope.NAMESPACE,
+092  Scope.TABLE.code, Scope.TABLE,
+093  Scope.EMPTY.code, Scope.EMPTY
+094);
 095  }
 096
-097  public 

[18/51] [partial] hbase-site git commit: Published site at d5e4faacc354c1bc4d93efa71ca97ee3a056123e.

2018-10-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b5e107c3/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.html
index b2a9771..bf81ebb 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.html
@@ -46,3768 +46,3806 @@
 038import java.util.Objects;
 039import java.util.Set;
 040import java.util.SortedMap;
-041import java.util.TreeMap;
-042import java.util.TreeSet;
-043import 
java.util.concurrent.ConcurrentHashMap;
-044import 
java.util.concurrent.ConcurrentMap;
-045import 
java.util.concurrent.ConcurrentSkipListMap;
-046import 
java.util.concurrent.atomic.AtomicBoolean;
-047import 
java.util.concurrent.locks.ReentrantReadWriteLock;
-048import java.util.function.Function;
-049import 
javax.management.MalformedObjectNameException;
-050import javax.servlet.http.HttpServlet;
-051import 
org.apache.commons.lang3.RandomUtils;
-052import 
org.apache.commons.lang3.StringUtils;
-053import 
org.apache.commons.lang3.SystemUtils;
-054import 
org.apache.hadoop.conf.Configuration;
-055import org.apache.hadoop.fs.FileSystem;
-056import org.apache.hadoop.fs.Path;
-057import 
org.apache.hadoop.hbase.Abortable;
-058import 
org.apache.hadoop.hbase.CacheEvictionStats;
-059import 
org.apache.hadoop.hbase.ChoreService;
-060import 
org.apache.hadoop.hbase.ClockOutOfSyncException;
-061import 
org.apache.hadoop.hbase.CoordinatedStateManager;
-062import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-063import 
org.apache.hadoop.hbase.HBaseConfiguration;
-064import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
-065import 
org.apache.hadoop.hbase.HConstants;
-066import 
org.apache.hadoop.hbase.HealthCheckChore;
-067import 
org.apache.hadoop.hbase.MetaTableAccessor;
-068import 
org.apache.hadoop.hbase.NotServingRegionException;
-069import 
org.apache.hadoop.hbase.PleaseHoldException;
-070import 
org.apache.hadoop.hbase.ScheduledChore;
-071import 
org.apache.hadoop.hbase.ServerName;
-072import 
org.apache.hadoop.hbase.Stoppable;
-073import 
org.apache.hadoop.hbase.TableDescriptors;
-074import 
org.apache.hadoop.hbase.TableName;
-075import 
org.apache.hadoop.hbase.YouAreDeadException;
-076import 
org.apache.hadoop.hbase.ZNodeClearer;
-077import 
org.apache.hadoop.hbase.client.ClusterConnection;
-078import 
org.apache.hadoop.hbase.client.Connection;
-079import 
org.apache.hadoop.hbase.client.ConnectionUtils;
-080import 
org.apache.hadoop.hbase.client.RegionInfo;
-081import 
org.apache.hadoop.hbase.client.RegionInfoBuilder;
-082import 
org.apache.hadoop.hbase.client.RpcRetryingCallerFactory;
-083import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-084import 
org.apache.hadoop.hbase.client.locking.EntityLock;
-085import 
org.apache.hadoop.hbase.client.locking.LockServiceClient;
-086import 
org.apache.hadoop.hbase.conf.ConfigurationManager;
-087import 
org.apache.hadoop.hbase.conf.ConfigurationObserver;
-088import 
org.apache.hadoop.hbase.coordination.SplitLogWorkerCoordination;
-089import 
org.apache.hadoop.hbase.coordination.ZkCoordinatedStateManager;
-090import 
org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
-091import 
org.apache.hadoop.hbase.exceptions.RegionMovedException;
-092import 
org.apache.hadoop.hbase.exceptions.RegionOpeningException;
-093import 
org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
-094import 
org.apache.hadoop.hbase.executor.ExecutorService;
-095import 
org.apache.hadoop.hbase.executor.ExecutorType;
-096import 
org.apache.hadoop.hbase.fs.HFileSystem;
-097import 
org.apache.hadoop.hbase.http.InfoServer;
-098import 
org.apache.hadoop.hbase.io.hfile.BlockCache;
-099import 
org.apache.hadoop.hbase.io.hfile.CacheConfig;
-100import 
org.apache.hadoop.hbase.io.hfile.HFile;
-101import 
org.apache.hadoop.hbase.io.util.MemorySizeUtil;
-102import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
-103import 
org.apache.hadoop.hbase.ipc.NettyRpcClientConfigHelper;
-104import 
org.apache.hadoop.hbase.ipc.RpcClient;
-105import 
org.apache.hadoop.hbase.ipc.RpcClientFactory;
-106import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-107import 
org.apache.hadoop.hbase.ipc.RpcServer;
-108import 
org.apache.hadoop.hbase.ipc.RpcServerInterface;
-109import 
org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
-110import 
org.apache.hadoop.hbase.ipc.ServerRpcController;
-111import 
org.apache.hadoop.hbase.log.HBaseMarkers;
-112import 
org.apache.hadoop.hbase.master.HMaster;
-113import 
org.apache.hadoop.hbase.master.LoadBalancer;
-114import 
org.apache.hadoop.hbase.master.RegionState.State;
-115import 
org.apache.hadoop.hbase.mob.MobCacheConfig;
-116import 

[18/51] [partial] hbase-site git commit: Published site at 3fe8649b2c9ba1271c25e8f476548907e4c7a90d.

2018-10-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8f09a71d/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaRackCostFunction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaRackCostFunction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaRackCostFunction.html
index c7d99b2..9d1542c 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaRackCostFunction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaRackCostFunction.html
@@ -382,1357 +382,1365 @@
 374for (int i = 0; i  
this.curFunctionCosts.length; i++) {
 375  curFunctionCosts[i] = 
tempFunctionCosts[i];
 376}
-377LOG.info("start 
StochasticLoadBalancer.balancer, initCost=" + currentCost + ", functionCost="
-378+ functionCost());
+377double initCost = currentCost;
+378double newCost = currentCost;
 379
-380double initCost = currentCost;
-381double newCost = currentCost;
-382
-383long computedMaxSteps;
-384if (runMaxSteps) {
-385  computedMaxSteps = 
Math.max(this.maxSteps,
-386  ((long)cluster.numRegions * 
(long)this.stepsPerRegion * (long)cluster.numServers));
-387} else {
-388  computedMaxSteps = 
Math.min(this.maxSteps,
-389  ((long)cluster.numRegions * 
(long)this.stepsPerRegion * (long)cluster.numServers));
-390}
-391// Perform a stochastic walk to see 
if we can get a good fit.
-392long step;
-393
-394for (step = 0; step  
computedMaxSteps; step++) {
-395  Cluster.Action action = 
nextAction(cluster);
-396
-397  if (action.type == Type.NULL) {
-398continue;
-399  }
-400
-401  cluster.doAction(action);
-402  updateCostsWithAction(cluster, 
action);
-403
-404  newCost = computeCost(cluster, 
currentCost);
-405
-406  // Should this be kept?
-407  if (newCost  currentCost) {
-408currentCost = newCost;
-409
-410// save for JMX
-411curOverallCost = currentCost;
-412for (int i = 0; i  
this.curFunctionCosts.length; i++) {
-413  curFunctionCosts[i] = 
tempFunctionCosts[i];
-414}
-415  } else {
-416// Put things back the way they 
were before.
-417// TODO: undo by remembering old 
values
-418Action undoAction = 
action.undoAction();
-419cluster.doAction(undoAction);
-420updateCostsWithAction(cluster, 
undoAction);
-421  }
-422
-423  if 
(EnvironmentEdgeManager.currentTime() - startTime 
-424  maxRunningTime) {
-425break;
-426  }
-427}
-428long endTime = 
EnvironmentEdgeManager.currentTime();
-429
-430
metricsBalancer.balanceCluster(endTime - startTime);
-431
-432// update costs metrics
-433updateStochasticCosts(tableName, 
curOverallCost, curFunctionCosts);
-434if (initCost  currentCost) {
-435  plans = 
createRegionPlans(cluster);
-436  LOG.info("Finished computing new 
load balance plan. Computation took {}" +
-437" to try {} different iterations. 
 Found a solution that moves " +
-438"{} regions; Going from a 
computed cost of {}" +
-439" to a new cost of {}", 
java.time.Duration.ofMillis(endTime - startTime),
-440step, plans.size(), initCost, 
currentCost);
-441  return plans;
-442}
-443LOG.info("Could not find a better 
load balance plan.  Tried {} different configurations in " +
-444  "{}, and did not find anything with 
a computed cost less than {}", step,
-445  java.time.Duration.ofMillis(endTime 
- startTime), initCost);
-446return null;
-447  }
-448
-449  /**
-450   * update costs to JMX
-451   */
-452  private void 
updateStochasticCosts(TableName tableName, Double overall, Double[] subCosts) 
{
-453if (tableName == null) return;
-454
-455// check if the metricsBalancer is 
MetricsStochasticBalancer before casting
-456if (metricsBalancer instanceof 
MetricsStochasticBalancer) {
-457  MetricsStochasticBalancer balancer 
= (MetricsStochasticBalancer) metricsBalancer;
-458  // overall cost
-459  
balancer.updateStochasticCost(tableName.getNameAsString(),
-460"Overall", "Overall cost", 
overall);
-461
-462  // each cost function
-463  for (int i = 0; i  
costFunctions.length; i++) {
-464CostFunction costFunction = 
costFunctions[i];
-465String costFunctionName = 
costFunction.getClass().getSimpleName();
-466Double costPercent = (overall == 
0) ? 0 : (subCosts[i] / overall);
-467// TODO: cost function may need a 
specific description
-468
balancer.updateStochasticCost(tableName.getNameAsString(), costFunctionName,
-469  "The percent of " + 
costFunctionName, costPercent);

[18/51] [partial] hbase-site git commit: Published site at 7adf590106826b9e4432cfeee06acdc0ccff8c6e.

2018-10-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/425db230/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHRegion.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHRegion.html 
b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHRegion.html
index 5ecb88d..62158d8 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHRegion.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHRegion.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":9,"i1":10,"i2":10,"i3":10,"i4":9,"i5":9,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":9,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":9,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i109":
 
10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":9,"i131":10};
+var methods = 
{"i0":9,"i1":10,"i2":10,"i3":10,"i4":9,"i5":9,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":9,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":9,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i109":
 
10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":9,"i132":10};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -868,22 +868,26 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 void
-testScanner_DeleteOneFamilyNotAnother()
+testReverseScanWhenPutCellsAfterOpenReverseScan()
 
 
 void
-testScanner_ExplicitColumns_FromFilesOnly_EnforceVersions()
+testScanner_DeleteOneFamilyNotAnother()
 
 
 void
-testScanner_ExplicitColumns_FromMemStore_EnforceVersions()
+testScanner_ExplicitColumns_FromFilesOnly_EnforceVersions()
 
 
 void
-testScanner_ExplicitColumns_FromMemStoreAndFiles_EnforceVersions()
+testScanner_ExplicitColumns_FromMemStore_EnforceVersions()
 
 
 void
+testScanner_ExplicitColumns_FromMemStoreAndFiles_EnforceVersions()
+
+
+void
 testScanner_JoinedScanners()
 Added for HBASE-5416
 
@@ -891,7 +895,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
  conditions.
 
 
-
+
 void
 testScanner_JoinedScannersWithLimits()
 HBASE-5416
@@ -899,82 +903,82 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
  Test case when scan limits amount of KVs returned on each next() call.
 
 
-
+
 void
 testScanner_StopRow1542()
 
-
+
 void
 testScanner_Wildcard_FromFilesOnly_EnforceVersions()
 
-
+
 void
 testScanner_Wildcard_FromMemStore_EnforceVersions()
 
-
+
 void
 testScanner_Wildcard_FromMemStoreAndFiles_EnforceVersions()
 
-
+
 void
 testSequenceId()
 Test that I can use the max flushed sequence id after the 
close.
 
 
-
+
 void
 testSkipRecoveredEditsReplay()
 
-
+
 void
 testSkipRecoveredEditsReplayAllIgnored()
 
-
+
 void
 testSkipRecoveredEditsReplaySomeIgnored()
 
-
+
 void
 testSkipRecoveredEditsReplayTheLastFileIgnored()
 
-
+
 void
 testStatusSettingToAbortIfAnyExceptionDuringRegionInitilization()
 Testcase to check state of region initialization task set 
to ABORTED 

[18/51] [partial] hbase-site git commit: Published site at 5fbb227deb365fe812d433fe39b85ac4b0ddee20.

2018-10-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9ebe686/devapidocs/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.html
 
b/devapidocs/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.html
index dd2a3cf..9763618 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.html
@@ -377,7 +377,7 @@ extends Procedure
-addStackIndex,
 afterReplay,
 beforeReplay,
 compareTo,
 completionCleanup,
 doExecute,
 doRollback,
 elapsedTime,
 getChildrenLatch,
 getException,
 getLastUpdate,
 getNonceKey,
 getOwner,
 getParentProcId,
 getProcedureMetrics,
 getProcId,
 getProcIdHashCode, getProcName,
 getResult,
 getRootProcedureId,
 getRootProcId,
 getStackIndexes,
 getState,
 getSubmittedTime,
 getTimeout,
 getTimeoutTimestamp,
 hasChildren,
 hasException,
 hasLock,
 hasOwner,
 hasParent,
 hasTimeout,
 haveSameParent,
 incChildrenLatch,
 isBypass,
 isFailed,
 isFinished,
 isInitializing,
 isRunnable,
 isSuccess,
 isWaiting,
 removeStackIndex,
 setAbortFailure,
 setChildrenLatch,
 setFailure,
 setFailure,
 setLastUpdate,
 setNonceKey,
 setOwner,
 setOwner, setParentProcId,
 setProcId,
 setResult,
 setRootProcId,
 setStackIndexes,
 setState,
 setSubmittedTime,
 setTimeout, shouldWaitClientAck,
 skipPersistence,
 toString,
 toStringClass,
 toStringClassDetails,
 toStringDetails,
 toStringSimpleSB,
 updateMetricsOnFinish, updateMetricsOnSubmit,
 updateTimestamp,
 wasExecuted
+addStackIndex,
 afterReplay,
 beforeReplay,
 bypass,
 compareTo,
 completionCleanup,
 doExecute,
 doRollback,
 elapsedTime,
 getChildrenLatch,
 getException,
 getLastUpdate,
 getNonceKey,
 getOwner,
 getParentProcId,
 getProcedureMetrics,
 getProcId, getProcIdHashCode,
 getProcName,
 getResult,
 getRootProcedureId,
 getRootProcId,
 getStackIndexes,
 getState,
 getSubmittedTime,
 <
 a 
href="../../../../../../org/apache/hadoop/hbase/procedure2/Procedure.html#getTimeout--">getTimeout,
 getTimeoutTimestamp,
 hasChildren,
 hasException,
 hasLock,
 hasOwner,
 hasParent,
 hasTimeout,
 haveSameParent, incChildrenLatch,
 isBypass,
 isFailed,
 isFinished,
 isInitializing,
 isRunnable,
 isSuccess,
 isWaiting,
 removeStackIndex, setAbortFailure,
 setChildrenLatch,
 setFailure,
 setFailure,
 setLastUpdate,
 setNonceKey,
 setOwner, setOwner,
 setParentProcId,
 setProcId,
 setResult,
 setRootProcId,
 setStackIndexes,
 setState,
 setSubmittedTime, setTimeout,
 shouldWaitClientAck,
 skipPersistence,
 toString,
 toStringClass,
 toStringClassDetails,
 toStringDetails,
 toStringSimpleSB, updateMetricsOnFinish,
 updateMetricsOnSubmit,
 updateTimestamp,
 wasExecuted
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9ebe686/devapidocs/org/apache/hadoop/hbase/master/replication/RecoverStandbyProcedure.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/replication/RecoverStandbyProcedure.html
 
b/devapidocs/org/apache/hadoop/hbase/master/replication/RecoverStandbyProcedure.html
index dd93830..e7e7564 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/replication/RecoverStandbyProcedure.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/replication/RecoverStandbyProcedure.html
@@ -310,7 +310,7 @@ extends Procedure
-addStackIndex,
 afterReplay,
 beforeReplay,
 compareTo,
 completionCleanup,
 doExecute,
 doRollback,
 elapsedTime,
 getChildrenLatch,
 getException,
 getLastUpdate,
 getNonceKey,
 getOwner,
 getParentProcId,
 getProcedureMetrics,
 getProcId,
 getProcIdHashCode, getProcName,
 getResult,
 getRootProcedureId,
 getRootProcId,
 getStackIndexes,
 getState,
 getSubmittedTime,
 getTimeout,
 getTimeoutTimestamp,
 hasChildren,
 hasException,
 hasLock,
 hasOwner,
 hasParent,
 hasTimeout,
 haveSameParent,
 incChildrenLatch,
 isBypass,
 isFailed,
 isFinished,
 isInitializing,
 isRunnable,
 isSuccess,
 isWaiting,
 removeStackIndex,
 setAbortFailure,
 setChildrenLatch,
 setFailure,
 setFailure,
 setLastUpdate,
 setNonceKey,
 setOwner,
 setOwner, setParentProcId,
 setProcId,
 setResult,
 setRootProcId,
 setStackIndexes,
 setState,
 setSubmittedTime,
 setTimeout, shouldWaitClientAck,
 skipPersistence,
 toString,
 toStringClass,
 toStringClassDetails,
 toStringDetails,
 toStringSimpleSB,
 updateMetricsOnFinish, updateMetricsOnSubmit,
 updateTimestamp,
 wasExecuted
+addStackIndex,
 afterReplay,
 beforeReplay,
 bypass,
 compareTo,
 completionCleanup,
 doExecute,
 doRollback,
 elapsedTime,
 getChildrenLatch,
 getException,
 getLastUpdate,
 getNonceKey,
 getOwner,
 getParentProcId,
 getProcedureMetrics,
 getProcId, getProcIdHashCode,
 getProcName,
 getResult,
 getRootProcedureId,
 getRootProcId,
 getStackIndexes,
 getState,
 getSubmittedTime,
 <
 a 

[18/51] [partial] hbase-site git commit: Published site at 821e4d7de2d576189f4288d1c2acf9e9a9471f5c.

2018-10-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/323b17d9/testdevapidocs/org/apache/hadoop/hbase/client/class-use/TestMobRestoreSnapshotFromClientGetCompactionState.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/client/class-use/TestMobRestoreSnapshotFromClientGetCompactionState.html
 
b/testdevapidocs/org/apache/hadoop/hbase/client/class-use/TestMobRestoreSnapshotFromClientGetCompactionState.html
new file mode 100644
index 000..41e1f47
--- /dev/null
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/client/class-use/TestMobRestoreSnapshotFromClientGetCompactionState.html
@@ -0,0 +1,125 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+Uses of Class 
org.apache.hadoop.hbase.client.TestMobRestoreSnapshotFromClientGetCompactionState
 (Apache HBase 3.0.0-SNAPSHOT Test API)
+
+
+
+
+
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev
+Next
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+
+
+
+Uses of 
Classorg.apache.hadoop.hbase.client.TestMobRestoreSnapshotFromClientGetCompactionState
+
+No usage of 
org.apache.hadoop.hbase.client.TestMobRestoreSnapshotFromClientGetCompactionState
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev
+Next
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+
+
+Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+
+

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/323b17d9/testdevapidocs/org/apache/hadoop/hbase/client/class-use/TestMobRestoreSnapshotFromClientSchemaChange.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/client/class-use/TestMobRestoreSnapshotFromClientSchemaChange.html
 
b/testdevapidocs/org/apache/hadoop/hbase/client/class-use/TestMobRestoreSnapshotFromClientSchemaChange.html
new file mode 100644
index 000..4075001
--- /dev/null
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/client/class-use/TestMobRestoreSnapshotFromClientSchemaChange.html
@@ -0,0 +1,125 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+Uses of Class 
org.apache.hadoop.hbase.client.TestMobRestoreSnapshotFromClientSchemaChange 
(Apache HBase 3.0.0-SNAPSHOT Test API)
+
+
+
+
+
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev
+Next
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+
+
+
+Uses of 
Classorg.apache.hadoop.hbase.client.TestMobRestoreSnapshotFromClientSchemaChange
+
+No usage of 
org.apache.hadoop.hbase.client.TestMobRestoreSnapshotFromClientSchemaChange
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev
+Next
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+
+
+Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+
+

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/323b17d9/testdevapidocs/org/apache/hadoop/hbase/client/class-use/TestMobRestoreSnapshotFromClientSimple.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/client/class-use/TestMobRestoreSnapshotFromClientSimple.html
 

[18/51] [partial] hbase-site git commit: Published site at fa5fa6ecdd071b72b58971058ff3ab9d28c3e709.

2018-10-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d1341859/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.html
index bd1b3f6..0c69df9 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.html
@@ -31,430 +31,441 @@
 023import java.util.Iterator;
 024import java.util.Map;
 025import java.util.TreeMap;
-026
+026import java.util.stream.LongStream;
 027import 
org.apache.yetus.audience.InterfaceAudience;
 028import 
org.apache.yetus.audience.InterfaceStability;
-029import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos;
-030
-031/**
-032 * Keeps track of live procedures.
-033 *
-034 * It can be used by the ProcedureStore 
to identify which procedures are already
-035 * deleted/completed to avoid the 
deserialization step on restart
-036 */
-037@InterfaceAudience.Private
-038@InterfaceStability.Evolving
-039public class ProcedureStoreTracker {
-040  // Key is procedure id corresponding to 
first bit of the bitmap.
-041  private final TreeMapLong, 
BitSetNode map = new TreeMap();
-042
-043  /**
-044   * If true, do not remove bits 
corresponding to deleted procedures. Note that this can result
-045   * in huge bitmaps overtime.
-046   * Currently, it's set to true only 
when building tracker state from logs during recovery. During
-047   * recovery, if we are sure that a 
procedure has been deleted, reading its old update entries
-048   * can be skipped.
-049   */
-050  private boolean keepDeletes = false;
-051  /**
-052   * If true, it means tracker has 
incomplete information about the active/deleted procedures.
-053   * It's set to true only when 
recovering from old logs. See {@link #isDeleted(long)} docs to
-054   * understand it's real use.
-055   */
-056  boolean partial = false;
-057
-058  private long minModifiedProcId = 
Long.MAX_VALUE;
-059  private long maxModifiedProcId = 
Long.MIN_VALUE;
-060
-061  public enum DeleteState { YES, NO, 
MAYBE }
-062
-063  public void 
resetToProto(ProcedureProtos.ProcedureStoreTracker trackerProtoBuf) {
-064reset();
-065for 
(ProcedureProtos.ProcedureStoreTracker.TrackerNode protoNode: 
trackerProtoBuf.getNodeList()) {
-066  final BitSetNode node = new 
BitSetNode(protoNode);
-067  map.put(node.getStart(), node);
-068}
-069  }
-070
-071  /**
-072   * Resets internal state to same as 
given {@code tracker}. Does deep copy of the bitmap.
-073   */
-074  public void 
resetTo(ProcedureStoreTracker tracker) {
-075resetTo(tracker, false);
-076  }
-077
-078  /**
-079   * Resets internal state to same as 
given {@code tracker}, and change the deleted flag according
-080   * to the modified flag if {@code 
resetDelete} is true. Does deep copy of the bitmap.
-081   * p/
-082   * The {@code resetDelete} will be set 
to true when building cleanup tracker, please see the
-083   * comments in {@link 
BitSetNode#BitSetNode(BitSetNode, boolean)} to learn how we change the
-084   * deleted flag if {@code resetDelete} 
is true.
-085   */
-086  public void 
resetTo(ProcedureStoreTracker tracker, boolean resetDelete) {
-087reset();
-088this.partial = tracker.partial;
-089this.minModifiedProcId = 
tracker.minModifiedProcId;
-090this.maxModifiedProcId = 
tracker.maxModifiedProcId;
-091this.keepDeletes = 
tracker.keepDeletes;
-092for (Map.EntryLong, 
BitSetNode entry : tracker.map.entrySet()) {
-093  map.put(entry.getKey(), new 
BitSetNode(entry.getValue(), resetDelete));
-094}
-095  }
-096
-097  public void insert(long procId) {
-098insert(null, procId);
-099  }
-100
-101  public void insert(long[] procIds) {
-102for (int i = 0; i  
procIds.length; ++i) {
-103  insert(procIds[i]);
-104}
-105  }
-106
-107  public void insert(long procId, long[] 
subProcIds) {
-108BitSetNode node = update(null, 
procId);
-109for (int i = 0; i  
subProcIds.length; ++i) {
-110  node = insert(node, 
subProcIds[i]);
-111}
-112  }
-113
-114  private BitSetNode insert(BitSetNode 
node, long procId) {
-115if (node == null || 
!node.contains(procId)) {
-116  node = getOrCreateNode(procId);
-117}
-118node.insertOrUpdate(procId);
-119trackProcIds(procId);
-120return node;
-121  }
-122
-123  public void update(long procId) {
-124update(null, procId);
-125  }
-126
-127  private BitSetNode update(BitSetNode 
node, long procId) {
-128node = lookupClosestNode(node, 
procId);
-129assert node != null : "expected node 
to update procId=" + procId;
-130assert node.contains(procId) : 
"expected procId=" + procId + " in the node";
-131

[18/51] [partial] hbase-site git commit: Published site at 6bc7089f9e0793efc9bdd46a84f5ccd9bc4579ad.

2018-09-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/419d0338/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.html
index 43c66a8..061ce80 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.html
@@ -23,2136 +23,2142 @@
 015 * See the License for the specific 
language governing permissions and
 016 * limitations under the License.
 017 */
-018
-019package 
org.apache.hadoop.hbase.procedure2;
-020
-021import java.io.IOException;
-022import java.util.ArrayDeque;
-023import java.util.ArrayList;
-024import java.util.Arrays;
-025import java.util.Collection;
-026import java.util.Deque;
-027import java.util.HashSet;
-028import java.util.Iterator;
-029import java.util.List;
-030import java.util.Map;
-031import java.util.Objects;
-032import java.util.Set;
-033import 
java.util.concurrent.ConcurrentHashMap;
-034import 
java.util.concurrent.CopyOnWriteArrayList;
-035import java.util.concurrent.TimeUnit;
-036import 
java.util.concurrent.atomic.AtomicBoolean;
-037import 
java.util.concurrent.atomic.AtomicInteger;
-038import 
java.util.concurrent.atomic.AtomicLong;
-039import java.util.stream.Collectors;
-040import java.util.stream.Stream;
-041
-042import 
org.apache.hadoop.conf.Configuration;
-043import 
org.apache.hadoop.hbase.HConstants;
-044import 
org.apache.hadoop.hbase.exceptions.IllegalArgumentIOException;
-045import 
org.apache.hadoop.hbase.log.HBaseMarkers;
-046import 
org.apache.hadoop.hbase.procedure2.Procedure.LockState;
-047import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore;
-048import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureIterator;
-049import 
org.apache.hadoop.hbase.procedure2.util.StringUtils;
-050import 
org.apache.hadoop.hbase.security.User;
-051import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-052import 
org.apache.hadoop.hbase.util.IdLock;
-053import 
org.apache.hadoop.hbase.util.NonceKey;
-054import 
org.apache.hadoop.hbase.util.Threads;
-055import 
org.apache.yetus.audience.InterfaceAudience;
-056import org.slf4j.Logger;
-057import org.slf4j.LoggerFactory;
-058
-059import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-060import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
+018package 
org.apache.hadoop.hbase.procedure2;
+019
+020import java.io.IOException;
+021import java.util.ArrayDeque;
+022import java.util.ArrayList;
+023import java.util.Arrays;
+024import java.util.Collection;
+025import java.util.Deque;
+026import java.util.HashSet;
+027import java.util.Iterator;
+028import java.util.List;
+029import java.util.Map;
+030import java.util.Objects;
+031import java.util.Set;
+032import 
java.util.concurrent.ConcurrentHashMap;
+033import 
java.util.concurrent.CopyOnWriteArrayList;
+034import java.util.concurrent.TimeUnit;
+035import 
java.util.concurrent.atomic.AtomicBoolean;
+036import 
java.util.concurrent.atomic.AtomicInteger;
+037import 
java.util.concurrent.atomic.AtomicLong;
+038import java.util.stream.Collectors;
+039import java.util.stream.Stream;
+040import 
org.apache.hadoop.conf.Configuration;
+041import 
org.apache.hadoop.hbase.HConstants;
+042import 
org.apache.hadoop.hbase.exceptions.IllegalArgumentIOException;
+043import 
org.apache.hadoop.hbase.log.HBaseMarkers;
+044import 
org.apache.hadoop.hbase.procedure2.Procedure.LockState;
+045import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore;
+046import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureIterator;
+047import 
org.apache.hadoop.hbase.procedure2.util.StringUtils;
+048import 
org.apache.hadoop.hbase.security.User;
+049import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+050import 
org.apache.hadoop.hbase.util.IdLock;
+051import 
org.apache.hadoop.hbase.util.NonceKey;
+052import 
org.apache.hadoop.hbase.util.Threads;
+053import 
org.apache.yetus.audience.InterfaceAudience;
+054import org.slf4j.Logger;
+055import org.slf4j.LoggerFactory;
+056
+057import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+058import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
+059
+060import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState;
 061
-062import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState;
-063
-064/**
-065 * Thread Pool that executes the 
submitted procedures.
-066 * The executor has a ProcedureStore 
associated.
-067 * Each operation is logged and on 
restart the pending procedures are resumed.
-068 *
-069 * Unless the Procedure code throws an 
error (e.g. invalid user input)
-070 * the 

[18/51] [partial] hbase-site git commit: Published site at d7e08317d2f214e4cca7b67578aba0ed7a567d54.

2018-09-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/37cf49a6/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
index 976894f..721035e 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
@@ -3020,926 +3020,927 @@
 3012}
 3013  }
 3014
-3015  void checkServiceStarted() throws 
ServerNotRunningYetException {
-3016if (!serviceStarted) {
-3017  throw new 
ServerNotRunningYetException("Server is not running yet");
-3018}
-3019  }
-3020
-3021  public static class 
MasterStoppedException extends DoNotRetryIOException {
-3022MasterStoppedException() {
-3023  super();
-3024}
-3025  }
-3026
-3027  void checkInitialized() throws 
PleaseHoldException, ServerNotRunningYetException,
-3028  MasterNotRunningException, 
MasterStoppedException {
-3029checkServiceStarted();
-3030if (!isInitialized()) {
-3031  throw new 
PleaseHoldException("Master is initializing");
-3032}
-3033if (isStopped()) {
-3034  throw new 
MasterStoppedException();
-3035}
-3036  }
-3037
-3038  /**
-3039   * Report whether this master is 
currently the active master or not.
-3040   * If not active master, we are parked 
on ZK waiting to become active.
-3041   *
-3042   * This method is used for testing.
-3043   *
-3044   * @return true if active master, 
false if not.
-3045   */
-3046  @Override
-3047  public boolean isActiveMaster() {
-3048return activeMaster;
-3049  }
-3050
-3051  /**
-3052   * Report whether this master has 
completed with its initialization and is
-3053   * ready.  If ready, the master is 
also the active master.  A standby master
-3054   * is never ready.
-3055   *
-3056   * This method is used for testing.
-3057   *
-3058   * @return true if master is ready to 
go, false if not.
-3059   */
-3060  @Override
-3061  public boolean isInitialized() {
-3062return initialized.isReady();
-3063  }
-3064
-3065  /**
-3066   * Report whether this master is in 
maintenance mode.
-3067   *
-3068   * @return true if master is in 
maintenanceMode
-3069   */
-3070  @Override
-3071  public boolean isInMaintenanceMode() 
throws IOException {
-3072if (!isInitialized()) {
-3073  throw new 
PleaseHoldException("Master is initializing");
-3074}
-3075return 
maintenanceModeTracker.isInMaintenanceMode();
-3076  }
-3077
-3078  @VisibleForTesting
-3079  public void setInitialized(boolean 
isInitialized) {
-3080
procedureExecutor.getEnvironment().setEventReady(initialized, isInitialized);
-3081  }
-3082
-3083  @Override
-3084  public ProcedureEvent? 
getInitializedEvent() {
-3085return initialized;
-3086  }
-3087
-3088  /**
-3089   * Compute the average load across all 
region servers.
-3090   * Currently, this uses a very naive 
computation - just uses the number of
-3091   * regions being served, ignoring 
stats about number of requests.
-3092   * @return the average load
-3093   */
-3094  public double getAverageLoad() {
-3095if (this.assignmentManager == null) 
{
-3096  return 0;
-3097}
-3098
-3099RegionStates regionStates = 
this.assignmentManager.getRegionStates();
-3100if (regionStates == null) {
-3101  return 0;
-3102}
-3103return 
regionStates.getAverageLoad();
-3104  }
-3105
-3106  /*
-3107   * @return the count of region split 
plans executed
-3108   */
-3109  public long getSplitPlanCount() {
-3110return splitPlanCount;
-3111  }
-3112
-3113  /*
-3114   * @return the count of region merge 
plans executed
-3115   */
-3116  public long getMergePlanCount() {
-3117return mergePlanCount;
-3118  }
-3119
-3120  @Override
-3121  public boolean registerService(Service 
instance) {
-3122/*
-3123 * No stacking of instances is 
allowed for a single service name
-3124 */
-3125Descriptors.ServiceDescriptor 
serviceDesc = instance.getDescriptorForType();
-3126String serviceName = 
CoprocessorRpcUtils.getServiceName(serviceDesc);
-3127if 
(coprocessorServiceHandlers.containsKey(serviceName)) {
-3128  LOG.error("Coprocessor service 
"+serviceName+
-3129  " already registered, 
rejecting request from "+instance
-3130  );
-3131  return false;
-3132}
-3133
-3134
coprocessorServiceHandlers.put(serviceName, instance);
-3135if (LOG.isDebugEnabled()) {
-3136  LOG.debug("Registered master 
coprocessor service: service="+serviceName);
-3137}
-3138return true;
-3139  }
-3140
-3141  /**
-3142   * Utility for constructing an 
instance of the passed HMaster class.
-3143   * @param masterClass
-3144   * @return HMaster instance.
-3145   */
-3146  public static HMaster 

[18/51] [partial] hbase-site git commit: Published site at 8eaaa63114a64bcaeaf0ed9bdd88615ee22255c1.

2018-09-25 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f6f9d4f3/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionInfo.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionInfo.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionInfo.html
index 8cc5add..34858d6 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionInfo.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionInfo.html
@@ -2188,1428 +2188,1428 @@
 2180  }
 2181
 2182  @Override
-2183  public void postOpenDeployTasks(final 
PostOpenDeployContext context)
-2184  throws KeeperException, 
IOException {
-2185HRegion r = context.getRegion();
-2186long masterSystemTime = 
context.getMasterSystemTime();
-2187rpcServices.checkOpen();
-2188LOG.info("Post open deploy tasks for 
" + r.getRegionInfo().getRegionNameAsString());
-2189// Do checks to see if we need to 
compact (references or too many files)
-2190for (HStore s : r.stores.values()) 
{
-2191  if (s.hasReferences() || 
s.needsCompaction()) {
-2192
this.compactSplitThread.requestSystemCompaction(r, s, "Opening Region");
-2193  }
-2194}
-2195long openSeqNum = 
r.getOpenSeqNum();
-2196if (openSeqNum == 
HConstants.NO_SEQNUM) {
-2197  // If we opened a region, we 
should have read some sequence number from it.
-2198  LOG.error("No sequence number 
found when opening " +
-2199
r.getRegionInfo().getRegionNameAsString());
-2200  openSeqNum = 0;
-2201}
-2202
-2203// Notify master
-2204if (!reportRegionStateTransition(new 
RegionStateTransitionContext(
-2205TransitionCode.OPENED, 
openSeqNum, masterSystemTime, r.getRegionInfo( {
-2206  throw new IOException("Failed to 
report opened region to master: "
-2207+ 
r.getRegionInfo().getRegionNameAsString());
-2208}
-2209
-2210triggerFlushInPrimaryRegion(r);
-2211
-2212LOG.debug("Finished post open deploy 
task for " + r.getRegionInfo().getRegionNameAsString());
-2213  }
-2214
-2215  @Override
-2216  public boolean 
reportRegionStateTransition(final RegionStateTransitionContext context) {
-2217TransitionCode code = 
context.getCode();
-2218long openSeqNum = 
context.getOpenSeqNum();
-2219long masterSystemTime = 
context.getMasterSystemTime();
-2220RegionInfo[] hris = 
context.getHris();
-2221
-if (TEST_SKIP_REPORTING_TRANSITION) 
{
-2223  // This is for testing only in 
case there is no master
-2224  // to handle the region transition 
report at all.
-2225  if (code == TransitionCode.OPENED) 
{
-2226Preconditions.checkArgument(hris 
!= null  hris.length == 1);
-2227if (hris[0].isMetaRegion()) {
-2228  try {
-2229
MetaTableLocator.setMetaLocation(getZooKeeper(), serverName,
-2230
hris[0].getReplicaId(),State.OPEN);
-2231  } catch (KeeperException e) 
{
-2232LOG.info("Failed to update 
meta location", e);
-2233return false;
-2234  }
-2235} else {
-2236  try {
-2237
MetaTableAccessor.updateRegionLocation(clusterConnection,
-2238  hris[0], serverName, 
openSeqNum, masterSystemTime);
-2239  } catch (IOException e) {
-2240LOG.info("Failed to update 
meta", e);
-2241return false;
-2242  }
-2243}
-2244  }
-2245  return true;
-2246}
-2247
-2248
ReportRegionStateTransitionRequest.Builder builder =
-2249  
ReportRegionStateTransitionRequest.newBuilder();
-2250
builder.setServer(ProtobufUtil.toServerName(serverName));
-2251RegionStateTransition.Builder 
transition = builder.addTransitionBuilder();
-2252
transition.setTransitionCode(code);
-2253if (code == TransitionCode.OPENED 
 openSeqNum = 0) {
-2254  
transition.setOpenSeqNum(openSeqNum);
-2255}
-2256for (RegionInfo hri: hris) {
-2257  
transition.addRegionInfo(ProtobufUtil.toRegionInfo(hri));
-2258}
-2259ReportRegionStateTransitionRequest 
request = builder.build();
-2260int tries = 0;
-2261long pauseTime = 
INIT_PAUSE_TIME_MS;
-2262// Keep looping till we get an 
error. We want to send reports even though server is going down.
-2263// Only go down if clusterConnection 
is null. It is set to null almost as last thing as the
-2264// HRegionServer does down.
-2265while (this.clusterConnection != 
null  !this.clusterConnection.isClosed()) {
-2266  
RegionServerStatusService.BlockingInterface rss = rssStub;
-2267  try {
-2268if (rss == null) {
-2269  
createRegionServerStatusStub();
-2270  continue;
-2271}
-2272
ReportRegionStateTransitionResponse response =
-2273  

[18/51] [partial] hbase-site git commit: Published site at cd161d976ef47b84e904f2d54bac65d2f3417c2a.

2018-09-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fa1bebf8/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseHbck.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseHbck.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseHbck.html
index 370686c..2cd495b 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseHbck.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseHbck.html
@@ -26,81 +26,140 @@
 018package org.apache.hadoop.hbase.client;
 019
 020import java.io.IOException;
-021import 
org.apache.hadoop.conf.Configuration;
-022import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-023import 
org.apache.yetus.audience.InterfaceAudience;
-024import org.slf4j.Logger;
-025import org.slf4j.LoggerFactory;
-026import 
org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
-027import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-028import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableStateResponse;
-029import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.HbckService.BlockingInterface;
-030
-031
-032/**
-033 * Use {@link 
ClusterConnection#getHbck()} to obtain an instance of {@link Hbck} instead of
-034 * constructing
-035 * an HBaseHbck directly. This will be 
mostly used by hbck tool.
-036 *
-037 * pConnection should be an 
iunmanaged/i connection obtained via
-038 * {@link 
ConnectionFactory#createConnection(Configuration)}./p
-039 *
-040 * pAn instance of this class is 
lightweight and not-thread safe. A new instance should be created
-041 * by each thread. Pooling or caching of 
the instance is not recommended./p
-042 *
-043 * @see ConnectionFactory
-044 * @see ClusterConnection
-045 * @see Hbck
-046 */
-047@InterfaceAudience.Private
-048public class HBaseHbck implements Hbck 
{
-049  private static final Logger LOG = 
LoggerFactory.getLogger(HBaseHbck.class);
-050
-051  private boolean aborted;
-052  private final BlockingInterface hbck;
-053
-054  private RpcControllerFactory 
rpcControllerFactory;
-055
-056  HBaseHbck(ClusterConnection connection, 
BlockingInterface hbck) throws IOException {
-057this.hbck = hbck;
-058this.rpcControllerFactory = 
connection.getRpcControllerFactory();
-059  }
-060
-061  @Override
-062  public void close() throws IOException 
{
-063// currently does nothing
-064  }
-065
-066  @Override
-067  public void abort(String why, Throwable 
e) {
-068this.aborted = true;
-069// Currently does nothing but throw 
the passed message and exception
-070throw new RuntimeException(why, e);
-071  }
-072
-073  @Override
-074  public boolean isAborted() {
-075return this.aborted;
-076  }
-077
-078  /**
-079   * NOTE: This is a dangerous action, as 
existing running procedures for the table or regions
-080   * which belong to the table may get 
confused.
-081   */
-082  @Override
-083  public TableState 
setTableStateInMeta(TableState state) throws IOException {
-084try {
-085  GetTableStateResponse response = 
hbck.setTableStateInMeta(
-086  
rpcControllerFactory.newController(),
-087  
RequestConverter.buildSetTableStateInMetaRequest(state));
-088  return 
TableState.convert(state.getTableName(), response.getTableState());
-089} catch (ServiceException se) {
-090  LOG.debug("ServiceException while 
updating table state in meta. table={}, state={}",
-091  state.getTableName(), 
state.getState());
-092  throw new IOException(se);
-093}
-094  }
-095}
+021import java.util.List;
+022import java.util.concurrent.Callable;
+023import java.util.stream.Collectors;
+024
+025import 
org.apache.hadoop.conf.Configuration;
+026import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
+027import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+028import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
+029import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos;
+030import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableStateResponse;
+031import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.HbckService.BlockingInterface;
+032
+033import 
org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
+034
+035import 
org.apache.yetus.audience.InterfaceAudience;
+036
+037import org.slf4j.Logger;
+038import org.slf4j.LoggerFactory;
+039
+040
+041/**
+042 * Use {@link 
ClusterConnection#getHbck()} to obtain an instance of {@link Hbck} instead of
+043 * constructing an HBaseHbck directly.
+044 *
+045 * pConnection should be an 
iunmanaged/i connection obtained via
+046 * {@link 
ConnectionFactory#createConnection(Configuration)}./p
+047 *
+048 * pNOTE: The methods in here can 
do damage to a cluster if applied in the wrong sequence or at
+049 * the wrong time. Use with caution. For 
experts only. These methods are only for the
+050 * extreme case 

[18/51] [partial] hbase-site git commit: Published site at c6a65ba63fce85ac7c4b62b96ef2bbe6c35d2f00.

2018-09-04 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/293abb17/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
index fe4e081..eecf20f 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
@@ -44,1858 +44,1838 @@
 036import 
org.apache.hadoop.hbase.HBaseIOException;
 037import 
org.apache.hadoop.hbase.HConstants;
 038import 
org.apache.hadoop.hbase.PleaseHoldException;
-039import 
org.apache.hadoop.hbase.RegionException;
-040import 
org.apache.hadoop.hbase.ServerName;
-041import 
org.apache.hadoop.hbase.TableName;
-042import 
org.apache.hadoop.hbase.UnknownRegionException;
-043import 
org.apache.hadoop.hbase.YouAreDeadException;
-044import 
org.apache.hadoop.hbase.client.DoNotRetryRegionException;
-045import 
org.apache.hadoop.hbase.client.RegionInfo;
-046import 
org.apache.hadoop.hbase.client.RegionInfoBuilder;
-047import 
org.apache.hadoop.hbase.client.Result;
-048import 
org.apache.hadoop.hbase.client.TableState;
-049import 
org.apache.hadoop.hbase.exceptions.UnexpectedStateException;
-050import 
org.apache.hadoop.hbase.favored.FavoredNodesManager;
-051import 
org.apache.hadoop.hbase.favored.FavoredNodesPromoter;
-052import 
org.apache.hadoop.hbase.master.LoadBalancer;
-053import 
org.apache.hadoop.hbase.master.MasterServices;
-054import 
org.apache.hadoop.hbase.master.MetricsAssignmentManager;
-055import 
org.apache.hadoop.hbase.master.NoSuchProcedureException;
-056import 
org.apache.hadoop.hbase.master.RegionPlan;
-057import 
org.apache.hadoop.hbase.master.RegionState;
-058import 
org.apache.hadoop.hbase.master.RegionState.State;
-059import 
org.apache.hadoop.hbase.master.ServerListener;
-060import 
org.apache.hadoop.hbase.master.TableStateManager;
-061import 
org.apache.hadoop.hbase.master.balancer.FavoredStochasticBalancer;
-062import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
-063import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureScheduler;
-064import 
org.apache.hadoop.hbase.master.procedure.ProcedureSyncWait;
-065import 
org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure;
-066import 
org.apache.hadoop.hbase.procedure2.Procedure;
-067import 
org.apache.hadoop.hbase.procedure2.ProcedureEvent;
-068import 
org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
-069import 
org.apache.hadoop.hbase.procedure2.ProcedureInMemoryChore;
-070import 
org.apache.hadoop.hbase.procedure2.util.StringUtils;
-071import 
org.apache.hadoop.hbase.regionserver.SequenceId;
-072import 
org.apache.hadoop.hbase.util.Bytes;
-073import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-074import 
org.apache.hadoop.hbase.util.HasThread;
-075import 
org.apache.hadoop.hbase.util.Pair;
-076import 
org.apache.hadoop.hbase.util.Threads;
-077import 
org.apache.hadoop.hbase.util.VersionInfo;
-078import 
org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-079import 
org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-080import 
org.apache.yetus.audience.InterfaceAudience;
-081import 
org.apache.zookeeper.KeeperException;
-082import org.slf4j.Logger;
-083import org.slf4j.LoggerFactory;
+039import 
org.apache.hadoop.hbase.ServerName;
+040import 
org.apache.hadoop.hbase.TableName;
+041import 
org.apache.hadoop.hbase.UnknownRegionException;
+042import 
org.apache.hadoop.hbase.YouAreDeadException;
+043import 
org.apache.hadoop.hbase.client.DoNotRetryRegionException;
+044import 
org.apache.hadoop.hbase.client.RegionInfo;
+045import 
org.apache.hadoop.hbase.client.RegionInfoBuilder;
+046import 
org.apache.hadoop.hbase.client.Result;
+047import 
org.apache.hadoop.hbase.client.TableState;
+048import 
org.apache.hadoop.hbase.exceptions.UnexpectedStateException;
+049import 
org.apache.hadoop.hbase.favored.FavoredNodesManager;
+050import 
org.apache.hadoop.hbase.favored.FavoredNodesPromoter;
+051import 
org.apache.hadoop.hbase.master.LoadBalancer;
+052import 
org.apache.hadoop.hbase.master.MasterServices;
+053import 
org.apache.hadoop.hbase.master.MetricsAssignmentManager;
+054import 
org.apache.hadoop.hbase.master.RegionPlan;
+055import 
org.apache.hadoop.hbase.master.RegionState;
+056import 
org.apache.hadoop.hbase.master.RegionState.State;
+057import 
org.apache.hadoop.hbase.master.ServerListener;
+058import 
org.apache.hadoop.hbase.master.TableStateManager;
+059import 
org.apache.hadoop.hbase.master.balancer.FavoredStochasticBalancer;
+060import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+061import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureScheduler;
+062import 
org.apache.hadoop.hbase.master.procedure.ProcedureSyncWait;
+063import 

[18/51] [partial] hbase-site git commit: Published site at 7c1fad4992a169a35b4457e6f4afcb30d04406e9.

2018-08-31 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/74f60271/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.BlockingServiceAndInterface.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.BlockingServiceAndInterface.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.BlockingServiceAndInterface.html
index 51bf304..db6ac15 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.BlockingServiceAndInterface.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.BlockingServiceAndInterface.html
@@ -199,614 +199,635 @@
 191  protected static final int 
DEFAULT_WARN_RESPONSE_TIME = 1; // milliseconds
 192  protected static final int 
DEFAULT_WARN_RESPONSE_SIZE = 100 * 1024 * 1024;
 193
-194  protected static final ObjectMapper 
MAPPER = new ObjectMapper();
-195
-196  protected final int maxRequestSize;
-197  protected final int warnResponseTime;
-198  protected final int warnResponseSize;
+194  protected static final int 
DEFAULT_TRACE_LOG_MAX_LENGTH = 1000;
+195  protected static final String 
TRACE_LOG_MAX_LENGTH = "hbase.ipc.trace.log.max.length";
+196  protected static final String 
KEY_WORD_TRUNCATED = " TRUNCATED";
+197
+198  protected static final ObjectMapper 
MAPPER = new ObjectMapper();
 199
-200  protected final int 
minClientRequestTimeout;
-201
-202  protected final Server server;
-203  protected final 
ListBlockingServiceAndInterface services;
-204
-205  protected final RpcScheduler 
scheduler;
-206
-207  protected UserProvider userProvider;
+200  protected final int maxRequestSize;
+201  protected final int warnResponseTime;
+202  protected final int warnResponseSize;
+203
+204  protected final int 
minClientRequestTimeout;
+205
+206  protected final Server server;
+207  protected final 
ListBlockingServiceAndInterface services;
 208
-209  protected final ByteBufferPool 
reservoir;
-210  // The requests and response will use 
buffers from ByteBufferPool, when the size of the
-211  // request/response is at least this 
size.
-212  // We make this to be 1/6th of the pool 
buffer size.
-213  protected final int 
minSizeForReservoirUse;
-214
-215  protected volatile boolean 
allowFallbackToSimpleAuth;
-216
-217  /**
-218   * Used to get details for scan with a 
scanner_idbr/
-219   * TODO try to figure out a better way 
and remove reference from regionserver package later.
-220   */
-221  private RSRpcServices rsRpcServices;
-222
-223  @FunctionalInterface
-224  protected static interface CallCleanup 
{
-225void run();
-226  }
-227
-228  /**
-229   * Datastructure for passing a {@link 
BlockingService} and its associated class of
-230   * protobuf service interface.  For 
example, a server that fielded what is defined
-231   * in the client protobuf service would 
pass in an implementation of the client blocking service
-232   * and then its 
ClientService.BlockingInterface.class.  Used checking connection setup.
-233   */
-234  public static class 
BlockingServiceAndInterface {
-235private final BlockingService 
service;
-236private final Class? 
serviceInterface;
-237public 
BlockingServiceAndInterface(final BlockingService service,
-238final Class? 
serviceInterface) {
-239  this.service = service;
-240  this.serviceInterface = 
serviceInterface;
-241}
-242public Class? 
getServiceInterface() {
-243  return this.serviceInterface;
-244}
-245public BlockingService 
getBlockingService() {
-246  return this.service;
-247}
-248  }
-249
-250  /**
-251   * Constructs a server listening on the 
named port and address.
-252   * @param server hosting instance of 
{@link Server}. We will do authentications if an
-253   * instance else pass null for no 
authentication check.
-254   * @param name Used keying this rpc 
servers' metrics and for naming the Listener thread.
-255   * @param services A list of 
services.
-256   * @param bindAddress Where to listen
-257   * @param conf
-258   * @param scheduler
-259   * @param reservoirEnabled Enable 
ByteBufferPool or not.
-260   */
-261  public RpcServer(final Server server, 
final String name,
-262  final 
ListBlockingServiceAndInterface services,
-263  final InetSocketAddress 
bindAddress, Configuration conf,
-264  RpcScheduler scheduler, boolean 
reservoirEnabled) throws IOException {
-265if (reservoirEnabled) {
-266  int poolBufSize = 
conf.getInt(ByteBufferPool.BUFFER_SIZE_KEY,
-267  
ByteBufferPool.DEFAULT_BUFFER_SIZE);
-268  // The max number of buffers to be 
pooled in the ByteBufferPool. The default value been
-269  // selected based on the #handlers 
configured. When it is read request, 2 MB is the max size
-270  // at which we will send back one 
RPC request. Means max we need 2 MB for creating the
-271  // response cell block. (Well it 
might be much lesser than this because in 2 MB size calc, 

[18/51] [partial] hbase-site git commit: Published site at 3afe9fb7e6ebfa71187cbe131558a83fae61cecd.

2018-08-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/424d7e41/testapidocs/org/apache/hadoop/hbase/MiniHBaseCluster.html
--
diff --git a/testapidocs/org/apache/hadoop/hbase/MiniHBaseCluster.html 
b/testapidocs/org/apache/hadoop/hbase/MiniHBaseCluster.html
index 1e87652..03b7595 100644
--- a/testapidocs/org/apache/hadoop/hbase/MiniHBaseCluster.html
+++ b/testapidocs/org/apache/hadoop/hbase/MiniHBaseCluster.html
@@ -50,7 +50,7 @@ var activeTableTab = "activeTableTab";
 
 
 PrevClass
-NextClass
+NextClass
 
 
 Frames
@@ -1962,7 +1962,7 @@ public
 
 PrevClass
-NextClass
+NextClass
 
 
 Frames

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/424d7e41/testapidocs/org/apache/hadoop/hbase/StartMiniClusterOption.html
--
diff --git a/testapidocs/org/apache/hadoop/hbase/StartMiniClusterOption.html 
b/testapidocs/org/apache/hadoop/hbase/StartMiniClusterOption.html
new file mode 100644
index 000..938e342
--- /dev/null
+++ b/testapidocs/org/apache/hadoop/hbase/StartMiniClusterOption.html
@@ -0,0 +1,402 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+StartMiniClusterOption (Apache HBase 3.0.0-SNAPSHOT Test API)
+
+
+
+
+
+var methods = 
{"i0":9,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10};
+var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase
+Class 
StartMiniClusterOption
+
+
+
+https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.StartMiniClusterOption
+
+
+
+
+
+
+
+
+@InterfaceAudience.Public
+public final class StartMiniClusterOption
+extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+Options for starting up a mini cluster (including an hbase, 
dfs and zookeeper clusters) in test.
+ The options include HDFS options to build mini dfs cluster, Zookeeper options 
to build mini zk
+ cluster, and mostly HBase options to build mini hbase cluster.
+
+ To create an object, use a StartMiniClusterOption.Builder.
+ Example usage:
+ 
+StartMiniClusterOption option = StartMiniClusterOption.builder().
+
.numMasters(3).rsClass(MyRegionServer.class).createWALDir(true).build();
+ 
+
+ Default values can be found in StartMiniClusterOption.Builder.
+
+
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All MethodsStatic MethodsInstance MethodsConcrete Methods
+
+Modifier and Type
+Method and Description
+
+
+static StartMiniClusterOption.Builder
+builder()
+
+
+https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String[]
+getDataNodeHosts()
+
+
+https://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true;
 title="class or interface in java.lang">Class? extends HMaster
+getMasterClass()
+
+
+int
+getNumDataNodes()
+
+
+int
+getNumMasters()
+
+
+int
+getNumRegionServers()
+
+
+int
+getNumZkServers()
+
+
+https://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true;
 title="class or interface in java.lang">Class? extends MiniHBaseCluster.MiniHBaseClusterRegionServer
+getRsClass()
+
+
+https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttps://docs.oracle.com/javase/8/docs/api/java/lang/Integer.html?is-external=true;
 title="class or interface in java.lang">Integer
+getRsPorts()
+
+
+boolean
+isCreateRootDir()
+
+
+boolean
+isCreateWALDir()
+
+
+https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+toString()
+
+
+
+
+
+
+Methods inherited from classjava.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object

[18/51] [partial] hbase-site git commit: Published site at a452487a9b82bfd33bc10683c3f8b8ae74d58883.

2018-08-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0cf79db0/devapidocs/org/apache/hadoop/hbase/filter/FilterList.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/filter/FilterList.html 
b/devapidocs/org/apache/hadoop/hbase/filter/FilterList.html
index 04bffdc..021000b 100644
--- a/devapidocs/org/apache/hadoop/hbase/filter/FilterList.html
+++ b/devapidocs/org/apache/hadoop/hbase/filter/FilterList.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":42,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":9,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":42,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":9,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete 
Methods"],32:["t6","Deprecated Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -120,7 +120,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Public
-public final class FilterList
+public final class FilterList
 extends FilterBase
 Implementation of Filter that represents an 
ordered List of Filters which will be
  evaluated with a specified boolean operator FilterList.Operator.MUST_PASS_ALL
 (AND) or
@@ -265,24 +265,28 @@ extends 
 
 boolean
+equals(https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Objectobj)
+
+
+boolean
 filterAllRemaining()
 Filters that never filter all remaining can inherit this 
implementation that
  never stops the filter early.
 
 
-
+
 Filter.ReturnCode
 filterCell(Cellc)
 A way to filter based on the column family, column 
qualifier and/or the column value.
 
 
-
+
 Filter.ReturnCode
 filterKeyValue(Cellc)
 Deprecated.
 
 
-
+
 boolean
 filterRow()
 Filters that never filter by rows based on previously 
gathered state from
@@ -290,14 +294,14 @@ extends 
 
 
-
+
 void
 filterRowCells(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListCellcells)
 Filters that never filter by modifying the returned List of 
Cells can inherit this
  implementation that does nothing.
 
 
-
+
 boolean
 filterRowKey(byte[]rowKey,
 intoffset,
@@ -306,82 +310,86 @@ extends 
 
 
-
+
 boolean
 filterRowKey(CellfirstRowCell)
 Filters a row based on the row key.
 
 
-
+
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListFilter
 getFilters()
 Get the filters.
 
 
-
+
 Cell
 getNextCellHint(CellcurrentCell)
 Filters that are not sure which key must be next seeked to, 
can inherit
  this implementation that, by default, returns a null Cell.
 
 
-
+
 FilterList.Operator
 getOperator()
 Get the operator.
 
 
-
+
 boolean
 hasFilterRow()
 Fitlers that never filter by modifying the returned List of 
Cells can
  inherit this implementation that does nothing.
 
 
-
+
+int
+hashCode()
+
+
 boolean
 isFamilyEssential(byte[]name)
 By default, we require all scan's column families to be 
present.
 
 
-
+
 boolean
 isReversed()
 
-
+
 static FilterList
 parseFrom(byte[]pbBytes)
 
-
+
 void
 reset()
 Filters that are purely stateless and do nothing in their 
reset() methods can inherit
  this null/empty implementation.
 
 
-
+
 void
 setReversed(booleanreversed)
 alter the reversed scan flag
 
 
-
+
 int
 size()
 
-
+
 byte[]
 toByteArray()
 Return length 0 byte array for Filters that don't require 
special serialization
 
 
-
+
 https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 toString()
 Return filter's info for debugging and logging 
purpose.
 
 
-
+
 Cell
 transformCell(Cellc)
 By default no transformation takes place
@@ -402,7 +410,7 @@ extends 
 
 Methods inherited from classjava.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
-https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--;
 title="class or interface in java.lang">clone, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-;
 title="class or interface in java.lang">equals, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--;
 title="class or interface in java.lang">finalize, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--;
 title="class or interface in java.lang">getClass, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--;
 title="class or 

[18/51] [partial] hbase-site git commit: Published site at 6a5b4f2a5c188f8eef4f2250b8b7db7dd1e750e4.

2018-08-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ff05a18/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
index db8431b..a8cb7c4 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
@@ -885,7766 +885,7797 @@
 877   * @return What the next sequence 
(edit) id should be.
 878   * @throws IOException e
 879   */
-880  private long initialize(final 
CancelableProgressable reporter) throws IOException {
-881
-882//Refuse to open the region if there 
is no column family in the table
-883if 
(htableDescriptor.getColumnFamilyCount() == 0) {
-884  throw new 
DoNotRetryIOException("Table " + 
htableDescriptor.getTableName().getNameAsString()+
-885  " should have at least one 
column family.");
-886}
-887
-888MonitoredTask status = 
TaskMonitor.get().createStatus("Initializing region " + this);
-889long nextSeqId = -1;
-890try {
-891  nextSeqId = 
initializeRegionInternals(reporter, status);
-892  return nextSeqId;
-893} finally {
-894  // nextSeqid will be -1 if the 
initialization fails.
-895  // At least it will be 0 
otherwise.
-896  if (nextSeqId == -1) {
-897status.abort("Exception during 
region " + getRegionInfo().getRegionNameAsString() +
-898  " initialization.");
-899  }
-900}
-901  }
-902
-903  private long 
initializeRegionInternals(final CancelableProgressable reporter,
-904  final MonitoredTask status) throws 
IOException {
-905if (coprocessorHost != null) {
-906  status.setStatus("Running 
coprocessor pre-open hook");
-907  coprocessorHost.preOpen();
-908}
-909
-910// Write HRI to a file in case we 
need to recover hbase:meta
-911// Only the primary replica should 
write .regioninfo
-912if 
(this.getRegionInfo().getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) {
-913  status.setStatus("Writing region 
info on filesystem");
-914  fs.checkRegionInfoOnFilesystem();
-915}
-916
-917// Initialize all the HStores
-918status.setStatus("Initializing all 
the Stores");
-919long maxSeqId = 
initializeStores(reporter, status);
-920this.mvcc.advanceTo(maxSeqId);
-921if 
(ServerRegionReplicaUtil.shouldReplayRecoveredEdits(this)) {
-922  CollectionHStore stores = 
this.stores.values();
-923  try {
-924// update the stores that we are 
replaying
-925LOG.debug("replaying wal for " + 
this.getRegionInfo().getEncodedName());
-926
stores.forEach(HStore::startReplayingFromWAL);
-927// Recover any edits if 
available.
-928maxSeqId = Math.max(maxSeqId,
-929  
replayRecoveredEditsIfAny(this.fs.getRegionDir(), maxSeqIdInStores, reporter, 
status));
-930// Make sure mvcc is up to max.
-931this.mvcc.advanceTo(maxSeqId);
-932  } finally {
-933LOG.debug("stopping wal replay 
for " + this.getRegionInfo().getEncodedName());
-934// update the stores that we are 
done replaying
-935
stores.forEach(HStore::stopReplayingFromWAL);
-936  }
-937}
-938this.lastReplayedOpenRegionSeqId = 
maxSeqId;
-939
-940
this.writestate.setReadOnly(ServerRegionReplicaUtil.isReadOnly(this));
-941this.writestate.flushRequested = 
false;
-942this.writestate.compacting.set(0);
-943
-944if (this.writestate.writesEnabled) 
{
-945  LOG.debug("Cleaning up temporary 
data for " + this.getRegionInfo().getEncodedName());
-946  // Remove temporary data left over 
from old regions
-947  status.setStatus("Cleaning up 
temporary data from old regions");
-948  fs.cleanupTempDir();
-949}
-950
-951if (this.writestate.writesEnabled) 
{
-952  status.setStatus("Cleaning up 
detritus from prior splits");
-953  // Get rid of any splits or merges 
that were lost in-progress.  Clean out
-954  // these directories here on open.  
We may be opening a region that was
-955  // being split but we crashed in 
the middle of it all.
-956  LOG.debug("Cleaning up detritus for 
" + this.getRegionInfo().getEncodedName());
-957  fs.cleanupAnySplitDetritus();
-958  fs.cleanupMergesDir();
-959}
+880  @VisibleForTesting
+881  long initialize(final 
CancelableProgressable reporter) throws IOException {
+882
+883//Refuse to open the region if there 
is no column family in the table
+884if 
(htableDescriptor.getColumnFamilyCount() == 0) {
+885  throw new 
DoNotRetryIOException("Table " + 
htableDescriptor.getTableName().getNameAsString()+
+886  " should have at least one 
column family.");
+887}
+888
+889MonitoredTask status = 

[18/51] [partial] hbase-site git commit: Published site at 63f2d3cbdc8151f5f61f33e0a078c51b9ac076a5.

2018-08-21 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7ae6a80c/devapidocs/org/apache/hadoop/hbase/master/assignment/class-use/TransitRegionStateProcedure.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/class-use/TransitRegionStateProcedure.html
 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/class-use/TransitRegionStateProcedure.html
new file mode 100644
index 000..7fb1829
--- /dev/null
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/class-use/TransitRegionStateProcedure.html
@@ -0,0 +1,315 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+Uses of Class 
org.apache.hadoop.hbase.master.assignment.TransitRegionStateProcedure (Apache 
HBase 3.0.0-SNAPSHOT API)
+
+
+
+
+
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev
+Next
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+
+
+
+Uses of 
Classorg.apache.hadoop.hbase.master.assignment.TransitRegionStateProcedure
+
+
+
+
+
+Packages that use TransitRegionStateProcedure
+
+Package
+Description
+
+
+
+org.apache.hadoop.hbase.master.assignment
+
+
+
+
+
+
+
+
+
+
+Uses of TransitRegionStateProcedure in 
org.apache.hadoop.hbase.master.assignment
+
+Fields in org.apache.hadoop.hbase.master.assignment
 declared as TransitRegionStateProcedure
+
+Modifier and Type
+Field and Description
+
+
+
+private TransitRegionStateProcedure
+RegionStateNode.procedure
+
+
+
+
+Methods in org.apache.hadoop.hbase.master.assignment
 that return TransitRegionStateProcedure
+
+Modifier and Type
+Method and Description
+
+
+
+static TransitRegionStateProcedure
+TransitRegionStateProcedure.assign(MasterProcedureEnvenv,
+  RegionInforegion,
+  ServerNametargetServer)
+
+
+private TransitRegionStateProcedure
+AssignmentManager.createAssignProcedure(RegionStateNoderegionNode,
+ ServerNametargetServer)
+
+
+TransitRegionStateProcedure[]
+AssignmentManager.createAssignProcedures(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfohris)
+Create an array of TransitRegionStateProcedure w/o 
specifying a target server.
+
+
+
+private TransitRegionStateProcedure[]
+AssignmentManager.createAssignProcedures(https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapServerName,https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfoassignments)
+
+
+private TransitRegionStateProcedure[]
+SplitTableRegionProcedure.createAssignProcedures(MasterProcedureEnvenv)
+
+
+private TransitRegionStateProcedure[]
+MergeTableRegionsProcedure.createAssignProcedures(MasterProcedureEnvenv)
+
+
+private static TransitRegionStateProcedure[]
+AssignmentManagerUtil.createAssignProcedures(MasterProcedureEnvenv,
+  https://docs.oracle.com/javase/8/docs/api/java/util/stream/Stream.html?is-external=true;
 title="class or interface in java.util.stream">StreamRegionInforegions,
+  intregionReplication,
+  ServerNametargetServer)
+
+
+(package private) static TransitRegionStateProcedure[]
+AssignmentManagerUtil.createAssignProceduresForOpeningNewRegions(MasterProcedureEnvenv,
+  https://docs.oracle.com/javase/8/docs/api/java/util/stream/Stream.html?is-external=true;
 title="class or interface in java.util.stream">StreamRegionInforegions,
+  intregionReplication,
+  ServerNametargetServer)
+
+
+private TransitRegionStateProcedure
+AssignmentManager.createMoveRegionProcedure(RegionInforegionInfo,
+ ServerNametargetServer)
+
+
+TransitRegionStateProcedure[]
+AssignmentManager.createRoundRobinAssignProcedures(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfohris)
+Create round-robin assigns.
+
+
+
+private TransitRegionStateProcedure[]
+SplitTableRegionProcedure.createUnassignProcedures(MasterProcedureEnvenv)
+
+
+private TransitRegionStateProcedure[]
+MergeTableRegionsProcedure.createUnassignProcedures(MasterProcedureEnvenv)
+
+
+TransitRegionStateProcedure[]

[18/51] [partial] hbase-site git commit: Published site at 092efb42749bf7fc6ad338c96aae8e7b9d3a2c74.

2018-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/devapidocs/src-html/org/apache/hadoop/hbase/thrift/HttpDoAsClient.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/thrift/HttpDoAsClient.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/thrift/HttpDoAsClient.html
index b87dfff..0480193 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/thrift/HttpDoAsClient.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/thrift/HttpDoAsClient.html
@@ -61,238 +61,241 @@
 053import org.ietf.jgss.GSSManager;
 054import org.ietf.jgss.GSSName;
 055import org.ietf.jgss.Oid;
-056
-057/**
-058 * See the instructions under 
hbase-examples/README.txt
-059 */
-060@InterfaceAudience.Private
-061public class HttpDoAsClient {
-062
-063  static protected int port;
-064  static protected String host;
-065  CharsetDecoder decoder = null;
-066  private static boolean secure = 
false;
-067  static protected String doAsUser = 
null;
-068  static protected String principal = 
null;
-069
-070  public static void main(String[] args) 
throws Exception {
-071
-072if (args.length  3 || args.length 
 4) {
-073
-074  System.out.println("Invalid 
arguments!");
-075  System.out.println("Usage: 
HttpDoAsClient host port doAsUserName [security=true]");
-076  System.exit(-1);
-077}
-078
-079host = args[0];
-080port = Integer.parseInt(args[1]);
-081doAsUser = args[2];
-082if (args.length  3) {
-083  secure = 
Boolean.parseBoolean(args[3]);
-084  principal = 
getSubject().getPrincipals().iterator().next().getName();
-085}
-086
-087final HttpDoAsClient client = new 
HttpDoAsClient();
-088Subject.doAs(getSubject(),
-089new 
PrivilegedExceptionActionVoid() {
-090  @Override
-091  public Void run() throws 
Exception {
-092client.run();
-093return null;
-094  }
-095});
-096  }
-097
-098  HttpDoAsClient() {
-099decoder = 
Charset.forName("UTF-8").newDecoder();
-100  }
-101
-102  // Helper to translate byte[]'s to UTF8 
strings
-103  private String utf8(byte[] buf) {
-104try {
-105  return 
decoder.decode(ByteBuffer.wrap(buf)).toString();
-106} catch (CharacterCodingException e) 
{
-107  return "[INVALID UTF-8]";
-108}
-109  }
-110
-111  // Helper to translate strings to UTF8 
bytes
-112  private byte[] bytes(String s) {
-113try {
-114  return s.getBytes("UTF-8");
-115} catch (UnsupportedEncodingException 
e) {
-116  e.printStackTrace();
-117  return null;
-118}
-119  }
-120
-121  private void run() throws Exception {
-122TTransport transport = new 
TSocket(host, port);
+056import org.slf4j.Logger;
+057import org.slf4j.LoggerFactory;
+058
+059/**
+060 * See the instructions under 
hbase-examples/README.txt
+061 */
+062@InterfaceAudience.Private
+063public class HttpDoAsClient {
+064  private static final Logger LOG = 
LoggerFactory.getLogger(HttpDoAsClient.class);
+065
+066  static protected int port;
+067  static protected String host;
+068  CharsetDecoder decoder = null;
+069  private static boolean secure = 
false;
+070  static protected String doAsUser = 
null;
+071  static protected String principal = 
null;
+072
+073  public static void main(String[] args) 
throws Exception {
+074
+075if (args.length  3 || args.length 
 4) {
+076
+077  System.out.println("Invalid 
arguments!");
+078  System.out.println("Usage: 
HttpDoAsClient host port doAsUserName [security=true]");
+079  System.exit(-1);
+080}
+081
+082host = args[0];
+083port = Integer.parseInt(args[1]);
+084doAsUser = args[2];
+085if (args.length  3) {
+086  secure = 
Boolean.parseBoolean(args[3]);
+087  principal = 
getSubject().getPrincipals().iterator().next().getName();
+088}
+089
+090final HttpDoAsClient client = new 
HttpDoAsClient();
+091Subject.doAs(getSubject(),
+092new 
PrivilegedExceptionActionVoid() {
+093  @Override
+094  public Void run() throws 
Exception {
+095client.run();
+096return null;
+097  }
+098});
+099  }
+100
+101  HttpDoAsClient() {
+102decoder = 
Charset.forName("UTF-8").newDecoder();
+103  }
+104
+105  // Helper to translate byte[]'s to UTF8 
strings
+106  private String utf8(byte[] buf) {
+107try {
+108  return 
decoder.decode(ByteBuffer.wrap(buf)).toString();
+109} catch (CharacterCodingException e) 
{
+110  return "[INVALID UTF-8]";
+111}
+112  }
+113
+114  // Helper to translate strings to UTF8 
bytes
+115  private byte[] bytes(String s) {
+116try {
+117  return s.getBytes("UTF-8");
+118} catch (UnsupportedEncodingException 
e) {
+119  LOG.error("CharSetName {} not 
supported", s, e);
+120  return null;
+121}
+122  }
 123
-124transport.open();
-125String url = "http://; + host + ":" + 
port;
-126THttpClient 

[18/51] [partial] hbase-site git commit: Published site at 613d831429960348dc42c3bdb6ea5d31be15c81c.

2018-08-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7cf6034b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.RAMQueueEntry.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.RAMQueueEntry.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.RAMQueueEntry.html
index bd3c59e..21e240a 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.RAMQueueEntry.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.RAMQueueEntry.html
@@ -33,62 +33,62 @@
 025import java.io.FileNotFoundException;
 026import java.io.FileOutputStream;
 027import java.io.IOException;
-028import java.io.ObjectInputStream;
-029import java.io.ObjectOutputStream;
-030import java.io.Serializable;
-031import java.nio.ByteBuffer;
-032import java.util.ArrayList;
-033import java.util.Comparator;
-034import java.util.HashSet;
-035import java.util.Iterator;
-036import java.util.List;
-037import java.util.Map;
-038import java.util.NavigableSet;
-039import java.util.PriorityQueue;
-040import java.util.Set;
-041import 
java.util.concurrent.ArrayBlockingQueue;
-042import 
java.util.concurrent.BlockingQueue;
-043import 
java.util.concurrent.ConcurrentHashMap;
-044import 
java.util.concurrent.ConcurrentMap;
-045import 
java.util.concurrent.ConcurrentSkipListSet;
-046import java.util.concurrent.Executors;
-047import 
java.util.concurrent.ScheduledExecutorService;
-048import java.util.concurrent.TimeUnit;
-049import 
java.util.concurrent.atomic.AtomicInteger;
-050import 
java.util.concurrent.atomic.AtomicLong;
-051import 
java.util.concurrent.atomic.LongAdder;
-052import java.util.concurrent.locks.Lock;
-053import 
java.util.concurrent.locks.ReentrantLock;
-054import 
java.util.concurrent.locks.ReentrantReadWriteLock;
-055import 
org.apache.hadoop.conf.Configuration;
-056import 
org.apache.hadoop.hbase.HBaseConfiguration;
-057import 
org.apache.hadoop.hbase.io.HeapSize;
-058import 
org.apache.hadoop.hbase.io.hfile.BlockCache;
-059import 
org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
-060import 
org.apache.hadoop.hbase.io.hfile.BlockCacheUtil;
-061import 
org.apache.hadoop.hbase.io.hfile.BlockPriority;
-062import 
org.apache.hadoop.hbase.io.hfile.BlockType;
-063import 
org.apache.hadoop.hbase.io.hfile.CacheStats;
-064import 
org.apache.hadoop.hbase.io.hfile.Cacheable;
-065import 
org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType;
-066import 
org.apache.hadoop.hbase.io.hfile.CacheableDeserializer;
-067import 
org.apache.hadoop.hbase.io.hfile.CacheableDeserializerIdManager;
-068import 
org.apache.hadoop.hbase.io.hfile.CachedBlock;
-069import 
org.apache.hadoop.hbase.io.hfile.HFileBlock;
-070import 
org.apache.hadoop.hbase.nio.ByteBuff;
-071import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-072import 
org.apache.hadoop.hbase.util.HasThread;
-073import 
org.apache.hadoop.hbase.util.IdReadWriteLock;
-074import 
org.apache.hadoop.hbase.util.IdReadWriteLock.ReferenceType;
-075import 
org.apache.hadoop.hbase.util.UnsafeAvailChecker;
-076import 
org.apache.hadoop.util.StringUtils;
-077import 
org.apache.yetus.audience.InterfaceAudience;
-078import org.slf4j.Logger;
-079import org.slf4j.LoggerFactory;
-080
-081import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-082import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-083import 
org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
+028import java.io.Serializable;
+029import java.nio.ByteBuffer;
+030import java.util.ArrayList;
+031import java.util.Comparator;
+032import java.util.HashSet;
+033import java.util.Iterator;
+034import java.util.List;
+035import java.util.Map;
+036import java.util.NavigableSet;
+037import java.util.PriorityQueue;
+038import java.util.Set;
+039import 
java.util.concurrent.ArrayBlockingQueue;
+040import 
java.util.concurrent.BlockingQueue;
+041import 
java.util.concurrent.ConcurrentHashMap;
+042import 
java.util.concurrent.ConcurrentMap;
+043import 
java.util.concurrent.ConcurrentSkipListSet;
+044import java.util.concurrent.Executors;
+045import 
java.util.concurrent.ScheduledExecutorService;
+046import java.util.concurrent.TimeUnit;
+047import 
java.util.concurrent.atomic.AtomicInteger;
+048import 
java.util.concurrent.atomic.AtomicLong;
+049import 
java.util.concurrent.atomic.LongAdder;
+050import java.util.concurrent.locks.Lock;
+051import 
java.util.concurrent.locks.ReentrantLock;
+052import 
java.util.concurrent.locks.ReentrantReadWriteLock;
+053import 
org.apache.hadoop.conf.Configuration;
+054import 
org.apache.hadoop.hbase.HBaseConfiguration;
+055import 
org.apache.hadoop.hbase.io.HeapSize;
+056import 
org.apache.hadoop.hbase.io.hfile.BlockCache;
+057import 
org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
+058import 

[18/51] [partial] hbase-site git commit: Published site at ba5d1c1f28301adc99019d9d6c4a04fac98ae511.

2018-07-25 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/804782f0/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.html 
b/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.html
index e65cf97..e0d5c9f 100644
--- a/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.html
+++ b/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -110,7 +110,6 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
- @InterfaceStability.Evolving
 public class ProcedureExecutorTEnvironment
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 Thread Pool that executes the submitted procedures.
@@ -150,7 +149,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 private static class
-ProcedureExecutor.CompletedProcedureRetainer
+ProcedureExecutor.CompletedProcedureRetainerTEnvironment
 
 
 static class
@@ -204,7 +203,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 checkOwnerSet
 
 
-private https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentHashMap.html?is-external=true;
 title="class or interface in java.util.concurrent">ConcurrentHashMaphttps://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true;
 title="class or interface in java.lang">Long,ProcedureExecutor.CompletedProcedureRetainer
+private https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentHashMap.html?is-external=true;
 title="class or interface in java.util.concurrent">ConcurrentHashMaphttps://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true;
 title="class or interface in java.lang">Long,ProcedureExecutor.CompletedProcedureRetainerTEnvironment
 completed
 Map the the procId returned by submitProcedure(), the 
Root-ProcID, to the Procedure.
 
@@ -256,13 +255,13 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 
-private https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentHashMap.html?is-external=true;
 title="class or interface in java.util.concurrent">ConcurrentHashMaphttps://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true;
 title="class or interface in java.lang">Long,Procedure
+private https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentHashMap.html?is-external=true;
 title="class or interface in java.util.concurrent">ConcurrentHashMaphttps://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true;
 title="class or interface in java.lang">Long,ProcedureTEnvironment
 procedures
 Helper map to lookup the live procedures by ID.
 
 
 
-private https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentHashMap.html?is-external=true;
 title="class or interface in java.util.concurrent">ConcurrentHashMaphttps://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true;
 title="class or interface in java.lang">Long,RootProcedureState
+private https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentHashMap.html?is-external=true;
 title="class or interface in java.util.concurrent">ConcurrentHashMaphttps://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true;
 title="class or interface in java.lang">Long,RootProcedureStateTEnvironment
 rollbackStack
 Map the the procId returned by submitProcedure(), the 
Root-ProcID, to the 

[18/51] [partial] hbase-site git commit: Published site at b4759ce6e72f50ccd9d410bd5917dc5a515414f1.

2018-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RackLocalityCostFunction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RackLocalityCostFunction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RackLocalityCostFunction.html
index 233dba3..91b9055 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RackLocalityCostFunction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RackLocalityCostFunction.html
@@ -540,1205 +540,1204 @@
 532  
sm.getRegionMetrics().forEach((byte[] regionName, RegionMetrics rm) - {
 533DequeBalancerRegionLoad 
rLoads = oldLoads.get(Bytes.toString(regionName));
 534if (rLoads == null) {
-535  // There was nothing there
-536  rLoads = new 
ArrayDeque();
-537} else if (rLoads.size() = 
numRegionLoadsToRemember) {
-538  rLoads.remove();
-539}
-540rLoads.add(new 
BalancerRegionLoad(rm));
-541
loads.put(Bytes.toString(regionName), rLoads);
-542  });
-543});
-544
-545for(CostFromRegionLoadFunction cost : 
regionLoadFunctions) {
-546  cost.setLoads(loads);
-547}
-548  }
-549
-550  protected void initCosts(Cluster 
cluster) {
-551for (CostFunction c:costFunctions) 
{
-552  c.init(cluster);
-553}
-554  }
-555
-556  protected void 
updateCostsWithAction(Cluster cluster, Action action) {
-557for (CostFunction c : costFunctions) 
{
-558  c.postAction(action);
-559}
-560  }
-561
-562  /**
-563   * Get the names of the cost 
functions
-564   */
-565  public String[] getCostFunctionNames() 
{
-566if (costFunctions == null) return 
null;
-567String[] ret = new 
String[costFunctions.length];
-568for (int i = 0; i  
costFunctions.length; i++) {
-569  CostFunction c = 
costFunctions[i];
-570  ret[i] = 
c.getClass().getSimpleName();
-571}
-572
-573return ret;
-574  }
-575
-576  /**
-577   * This is the main cost function.  It 
will compute a cost associated with a proposed cluster
-578   * state.  All different costs will be 
combined with their multipliers to produce a double cost.
-579   *
-580   * @param cluster The state of the 
cluster
-581   * @param previousCost the previous 
cost. This is used as an early out.
-582   * @return a double of a cost 
associated with the proposed cluster state.  This cost is an
-583   * aggregate of all individual 
cost functions.
-584   */
-585  protected double computeCost(Cluster 
cluster, double previousCost) {
-586double total = 0;
-587
-588for (int i = 0; i  
costFunctions.length; i++) {
-589  CostFunction c = 
costFunctions[i];
-590  this.tempFunctionCosts[i] = 0.0;
-591
-592  if (c.getMultiplier() = 0) {
-593continue;
-594  }
-595
-596  Float multiplier = 
c.getMultiplier();
-597  Double cost = c.cost();
-598
-599  this.tempFunctionCosts[i] = 
multiplier*cost;
-600  total += 
this.tempFunctionCosts[i];
-601
-602  if (total  previousCost) {
-603break;
-604  }
-605}
-606
-607return total;
-608  }
-609
-610  /** Generates a candidate action to be 
applied to the cluster for cost function search */
-611  abstract static class 
CandidateGenerator {
-612abstract Cluster.Action 
generate(Cluster cluster);
-613
-614/**
-615 * From a list of regions pick a 
random one. Null can be returned which
-616 * {@link 
StochasticLoadBalancer#balanceCluster(Map)} recognize as signal to try a region 
move
-617 * rather than swap.
-618 *
-619 * @param clusterThe state of 
the cluster
-620 * @param server index of the 
server
-621 * @param chanceOfNoSwap Chance that 
this will decide to try a move rather
-622 *   than a 
swap.
-623 * @return a random {@link 
RegionInfo} or null if an asymmetrical move is
-624 * suggested.
-625 */
-626protected int 
pickRandomRegion(Cluster cluster, int server, double chanceOfNoSwap) {
-627  // Check to see if this is just a 
move.
-628  if 
(cluster.regionsPerServer[server].length == 0 || RANDOM.nextFloat()  
chanceOfNoSwap) {
-629// signal a move only.
-630return -1;
-631  }
-632  int rand = 
RANDOM.nextInt(cluster.regionsPerServer[server].length);
-633  return 
cluster.regionsPerServer[server][rand];
-634
-635}
-636protected int 
pickRandomServer(Cluster cluster) {
-637  if (cluster.numServers  1) {
-638return -1;
-639  }
-640
-641  return 
RANDOM.nextInt(cluster.numServers);
-642}
-643
-644protected int pickRandomRack(Cluster 
cluster) {
-645  if (cluster.numRacks  1) {
-646return -1;
-647  

[18/51] [partial] hbase-site git commit: Published site at e66a6603e36ecd67237ca16acd5e2de03f0d372d.

2018-07-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0c6f447e/apidocs/org/apache/hadoop/hbase/ServerMetrics.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/ServerMetrics.html 
b/apidocs/org/apache/hadoop/hbase/ServerMetrics.html
index 5e17cdd..29337c5 100644
--- a/apidocs/org/apache/hadoop/hbase/ServerMetrics.html
+++ b/apidocs/org/apache/hadoop/hbase/ServerMetrics.html
@@ -1,6 +1,6 @@
 http://www.w3.org/TR/html4/loose.dtd;>
 
-
+
 
 
 
@@ -19,45 +19,45 @@
 }
 //-->
 var methods = 
{"i0":6,"i1":6,"i2":6,"i3":6,"i4":6,"i5":6,"i6":6,"i7":6,"i8":6,"i9":6,"i10":6,"i11":6,"i12":18,"i13":18};
-var tabs = 
{65535:["t0","所有方法"],2:["t2","实例方法"],4:["t3","抽象方法"],16:["t5","默认方法"]};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],4:["t3","Abstract Methods"],16:["t5","Default Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
 var tableTab = "tableTab";
 var activeTableTab = "activeTableTab";
 
 
-您的浏览器已禁用 JavaScript。
+JavaScript is disabled on your browser.
 
 
 
 
 
-跳过导航链接
+Skip navigation links
 
 
 
-
-概览
-程序包
-ç±»
-使用
-树
-已过时
-索引
-帮助
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
 
 
 
 
-上一个类
-下一个类
+PrevClass
+NextClass
 
 
-框架
-无框架
+Frames
+NoFrames
 
 
-所有类
+AllClasses
 
 
 

[18/51] [partial] hbase-site git commit: Published site at e66a6603e36ecd67237ca16acd5e2de03f0d372d.

2018-07-19 Thread zhangduo
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5427a45e/apidocs/org/apache/hadoop/hbase/ServerLoad.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/ServerLoad.html 
b/apidocs/org/apache/hadoop/hbase/ServerLoad.html
index f9ac436..83435a4 100644
--- a/apidocs/org/apache/hadoop/hbase/ServerLoad.html
+++ b/apidocs/org/apache/hadoop/hbase/ServerLoad.html
@@ -1,6 +1,6 @@
 http://www.w3.org/TR/html4/loose.dtd;>
 
-
+
 
 
 
@@ -19,45 +19,45 @@
 }
 //-->
 var methods = 
{"i0":42,"i1":42,"i2":42,"i3":42,"i4":42,"i5":42,"i6":42,"i7":42,"i8":42,"i9":42,"i10":42,"i11":42,"i12":42,"i13":42,"i14":42,"i15":42,"i16":42,"i17":42,"i18":42,"i19":42,"i20":42,"i21":42,"i22":42,"i23":42,"i24":42,"i25":42,"i26":42,"i27":42,"i28":42,"i29":42,"i30":42,"i31":42,"i32":42,"i33":42,"i34":42,"i35":42,"i36":42,"i37":42,"i38":42,"i39":42,"i40":42,"i41":42,"i42":42,"i43":42,"i44":42,"i45":42,"i46":42,"i47":42};
-var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"],32:["t6","Deprecated Methods"]};
+var tabs = {65535:["t0","所有方法"],2:["t2","实例方法"],8:["t4","å…
·ä½“方法"],32:["t6","已过时的方法"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
 var tableTab = "tableTab";
 var activeTableTab = "activeTableTab";
 
 
-JavaScript is disabled on your browser.
+您的浏览器已禁用 JavaScript。
 
 
 
 
 
-Skip navigation links
+跳过导航链接
 
 
 
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
+
+概览
+程序包
+ç±»
+使用
+树
+已过时
+索引
+帮助
 
 
 
 
-PrevClass
-NextClass
+上一个类
+下一个类
 
 
-Frames
-NoFrames
+框架
+无框架
 
 
-AllClasses
+所有类
 
 
 

[18/51] [partial] hbase-site git commit: Published site at 0f23784182ab88649de340d75804e0ff20dcd0fc.

2018-07-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bcb555af/devapidocs/org/apache/hadoop/hbase/tmpl/regionserver/package-tree.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/tmpl/regionserver/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/tmpl/regionserver/package-tree.html
index f824378..4814bd4 100644
--- a/devapidocs/org/apache/hadoop/hbase/tmpl/regionserver/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/tmpl/regionserver/package-tree.html
@@ -86,6 +86,7 @@
 org.apache.hadoop.hbase.tmpl.regionserver.BlockCacheTmplImpl (implements 
org.apache.hadoop.hbase.tmpl.regionserver.BlockCacheTmpl.Intf)
 org.apache.hadoop.hbase.tmpl.regionserver.BlockCacheViewTmplImpl (implements 
org.apache.hadoop.hbase.tmpl.regionserver.BlockCacheViewTmpl.Intf)
 org.apache.hadoop.hbase.tmpl.regionserver.RegionListTmplImpl (implements 
org.apache.hadoop.hbase.tmpl.regionserver.RegionListTmpl.Intf)
+org.apache.hadoop.hbase.tmpl.regionserver.ReplicationStatusTmplImpl (implements 
org.apache.hadoop.hbase.tmpl.regionserver.ReplicationStatusTmpl.Intf)
 org.apache.hadoop.hbase.tmpl.regionserver.RSStatusTmplImpl (implements 
org.apache.hadoop.hbase.tmpl.regionserver.RSStatusTmpl.Intf)
 org.apache.hadoop.hbase.tmpl.regionserver.ServerMetricsTmplImpl (implements 
org.apache.hadoop.hbase.tmpl.regionserver.ServerMetricsTmpl.Intf)
 
@@ -95,6 +96,7 @@
 org.apache.hadoop.hbase.tmpl.regionserver.BlockCacheTmpl
 org.apache.hadoop.hbase.tmpl.regionserver.BlockCacheViewTmpl
 org.apache.hadoop.hbase.tmpl.regionserver.RegionListTmpl
+org.apache.hadoop.hbase.tmpl.regionserver.ReplicationStatusTmpl
 org.apache.hadoop.hbase.tmpl.regionserver.RSStatusTmpl
 org.apache.hadoop.hbase.tmpl.regionserver.ServerMetricsTmpl
 
@@ -104,6 +106,7 @@
 org.apache.hadoop.hbase.tmpl.regionserver.BlockCacheTmpl.ImplData
 org.apache.hadoop.hbase.tmpl.regionserver.BlockCacheViewTmpl.ImplData
 org.apache.hadoop.hbase.tmpl.regionserver.RegionListTmpl.ImplData
+org.apache.hadoop.hbase.tmpl.regionserver.ReplicationStatusTmpl.ImplData
 org.apache.hadoop.hbase.tmpl.regionserver.RSStatusTmpl.ImplData
 org.apache.hadoop.hbase.tmpl.regionserver.ServerMetricsTmpl.ImplData
 
@@ -118,6 +121,7 @@
 org.apache.hadoop.hbase.tmpl.regionserver.BlockCacheTmpl.Intf
 org.apache.hadoop.hbase.tmpl.regionserver.BlockCacheViewTmpl.Intf
 org.apache.hadoop.hbase.tmpl.regionserver.RegionListTmpl.Intf
+org.apache.hadoop.hbase.tmpl.regionserver.ReplicationStatusTmpl.Intf
 org.apache.hadoop.hbase.tmpl.regionserver.RSStatusTmpl.Intf
 org.apache.hadoop.hbase.tmpl.regionserver.ServerMetricsTmpl.Intf
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bcb555af/devapidocs/org/apache/hadoop/hbase/tmpl/regionserver/package-use.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/tmpl/regionserver/package-use.html 
b/devapidocs/org/apache/hadoop/hbase/tmpl/regionserver/package-use.html
index 78ba6e0..4814761 100644
--- a/devapidocs/org/apache/hadoop/hbase/tmpl/regionserver/package-use.html
+++ b/devapidocs/org/apache/hadoop/hbase/tmpl/regionserver/package-use.html
@@ -117,6 +117,12 @@
 RegionListTmpl.Intf
 
 
+ReplicationStatusTmpl.ImplData
+
+
+ReplicationStatusTmpl.Intf
+
+
 RSStatusTmpl
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bcb555af/devapidocs/org/apache/hadoop/hbase/util/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/util/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/util/package-tree.html
index d61aa1c..9894900 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/package-tree.html
@@ -533,14 +533,14 @@
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.util.Order
-org.apache.hadoop.hbase.util.ChecksumType
-org.apache.hadoop.hbase.util.PrettyPrinter.Unit
+org.apache.hadoop.hbase.util.PoolMap.PoolType
 org.apache.hadoop.hbase.util.Bytes.LexicographicalComparerHolder.PureJavaComparer
 (implements org.apache.hadoop.hbase.util.Bytes.ComparerT)
 org.apache.hadoop.hbase.util.Bytes.LexicographicalComparerHolder.UnsafeComparer
 (implements org.apache.hadoop.hbase.util.Bytes.ComparerT)
+org.apache.hadoop.hbase.util.PrettyPrinter.Unit
 org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter.ERROR_CODE
+org.apache.hadoop.hbase.util.Order
+org.apache.hadoop.hbase.util.ChecksumType
 org.apache.hadoop.hbase.util.IdReadWriteLock.ReferenceType

[18/51] [partial] hbase-site git commit: Published site at 85b41f36e01214b6485c9352875c84ebf877dab3.

2018-06-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a5c66de0/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/Version.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
index 0287bfb..7da2489 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
@@ -16,11 +16,11 @@
 008@InterfaceAudience.Private
 009public class Version {
 010  public static final String version = 
"3.0.0-SNAPSHOT";
-011  public static final String revision = 
"6198e1fc7dfa85c3bc6b2855f9a5fb5f4b2354ff";
+011  public static final String revision = 
"85b41f36e01214b6485c9352875c84ebf877dab3";
 012  public static final String user = 
"jenkins";
-013  public static final String date = "Thu 
Jun 28 14:39:45 UTC 2018";
+013  public static final String date = "Fri 
Jun 29 16:23:43 UTC 2018";
 014  public static final String url = 
"git://jenkins-websites1.apache.org/home/jenkins/jenkins-slave/workspace/hbase_generate_website/hbase";
-015  public static final String srcChecksum 
= "7ee64003a6b54ac58a4f6db4dd4dd6ee";
+015  public static final String srcChecksum 
= "e33d90cf6b0a22ff92d3c16af518f498";
 016}
 
 



[18/51] [partial] hbase-site git commit: Published site at 6198e1fc7dfa85c3bc6b2855f9a5fb5f4b2354ff.

2018-06-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/eb5d2c62/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/ObserverContext.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/ObserverContext.html 
b/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/ObserverContext.html
index 3582e2d..f2a9b9f 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/ObserverContext.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/ObserverContext.html
@@ -1102,12 +1102,21 @@
 
 
 default void
+MasterObserver.postTransitReplicationPeerSyncReplicationState(ObserverContextMasterCoprocessorEnvironmentctx,
+  https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringpeerId,
+  SyncReplicationStatefrom,
+  SyncReplicationStateto)
+Called after transit current cluster state for the 
specified synchronous replication peer
+
+
+
+default void
 MasterObserver.postTruncateTable(ObserverContextMasterCoprocessorEnvironmentctx,
  TableNametableName)
 Called after the truncateTable operation has been 
requested.
 
 
-
+
 default void
 MasterObserver.postUnassign(ObserverContextMasterCoprocessorEnvironmentctx,
 RegionInforegionInfo,
@@ -1115,7 +1124,7 @@
 Called after the region unassignment has been 
requested.
 
 
-
+
 default void
 MasterObserver.postUpdateReplicationPeerConfig(ObserverContextMasterCoprocessorEnvironmentctx,
https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringpeerId,
@@ -1123,7 +1132,7 @@
 Called after update peerConfig for the specified peer
 
 
-
+
 default void
 RegionObserver.postWALRestore(ObserverContext? extends RegionCoprocessorEnvironmentctx,
   RegionInfoinfo,
@@ -1133,7 +1142,7 @@
  replayed for this region.
 
 
-
+
 default void
 WALObserver.postWALRoll(ObserverContext? extends WALCoprocessorEnvironmentctx,
org.apache.hadoop.fs.PatholdPath,
@@ -1141,7 +1150,7 @@
 Called after rolling the current WAL
 
 
-
+
 default void
 WALObserver.postWALWrite(ObserverContext? extends WALCoprocessorEnvironmentctx,
 RegionInfoinfo,
@@ -1153,14 +1162,14 @@
 
 
 
-
+
 default void
 MasterObserver.preAbortProcedure(ObserverContextMasterCoprocessorEnvironmentctx,
  longprocId)
 Called before a abortProcedure request has been 
processed.
 
 
-
+
 default void
 MasterObserver.preAddReplicationPeer(ObserverContextMasterCoprocessorEnvironmentctx,
  https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringpeerId,
@@ -1168,70 +1177,70 @@
 Called before add a replication peer
 
 
-
+
 default void
 MasterObserver.preAddRSGroup(ObserverContextMasterCoprocessorEnvironmentctx,
  https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringname)
 Called before a new region server group is added
 
 
-
+
 default Result
 RegionObserver.preAppend(ObserverContextRegionCoprocessorEnvironmentc,
  Appendappend)
 Called before Append.
 
 
-
+
 default Result
 RegionObserver.preAppendAfterRowLock(ObserverContextRegionCoprocessorEnvironmentc,
  Appendappend)
 Called before Append but after acquiring rowlock.
 
 
-
+
 default void
 MasterObserver.preAssign(ObserverContextMasterCoprocessorEnvironmentctx,
  RegionInforegionInfo)
 Called prior to assigning a specific region.
 
 
-
+
 default void
 MasterObserver.preBalance(ObserverContextMasterCoprocessorEnvironmentctx)
 Called prior to requesting rebalancing of the cluster 
regions, though after
  the initial checks for regions in transition and the balance switch 
flag.
 
 
-
+
 default void
 MasterObserver.preBalanceRSGroup(ObserverContextMasterCoprocessorEnvironmentctx,
  https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringgroupName)
 Called before a region server group is removed
 
 
-
+
 default void
 MasterObserver.preBalanceSwitch(ObserverContextMasterCoprocessorEnvironmentctx,
 booleannewValue)
 Called prior to modifying the flag used to enable/disable 
region balancing.
 
 
-
+
 default void
 RegionObserver.preBatchMutate(ObserverContextRegionCoprocessorEnvironmentc,
   MiniBatchOperationInProgressMutationminiBatchOp)
 This will be called for every batch mutation operation 
happening at the server.
 
 
-
+
 default void
 RegionObserver.preBulkLoadHFile(ObserverContextRegionCoprocessorEnvironmentctx,
 

[18/51] [partial] hbase-site git commit: Published site at 14087cc919da9f2e0b1a68f701f6365ad9d1d71f.

2018-06-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/55ce8d97/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
index 8e5146d..084d210 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
@@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-static class HRegion.WriteState
+static class HRegion.WriteState
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 
 
@@ -239,7 +239,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 flushing
-volatileboolean flushing
+volatileboolean flushing
 
 
 
@@ -248,7 +248,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 flushRequested
-volatileboolean flushRequested
+volatileboolean flushRequested
 
 
 
@@ -257,7 +257,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 compacting
-https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true;
 title="class or interface in java.util.concurrent.atomic">AtomicInteger compacting
+https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true;
 title="class or interface in java.util.concurrent.atomic">AtomicInteger compacting
 
 
 
@@ -266,7 +266,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 writesEnabled
-volatileboolean writesEnabled
+volatileboolean writesEnabled
 
 
 
@@ -275,7 +275,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 readOnly
-volatileboolean readOnly
+volatileboolean readOnly
 
 
 
@@ -284,7 +284,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 readsEnabled
-volatileboolean readsEnabled
+volatileboolean readsEnabled
 
 
 
@@ -293,7 +293,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 HEAP_SIZE
-static finallong HEAP_SIZE
+static finallong HEAP_SIZE
 
 
 
@@ -310,7 +310,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 WriteState
-WriteState()
+WriteState()
 
 
 
@@ -327,7 +327,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 setReadOnly
-voidsetReadOnly(booleanonOff)
+voidsetReadOnly(booleanonOff)
 Set flags that make this region read-only.
 
 Parameters:
@@ -341,7 +341,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 isReadOnly
-booleanisReadOnly()
+booleanisReadOnly()
 
 
 
@@ -350,7 +350,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 isFlushRequested
-booleanisFlushRequested()
+booleanisFlushRequested()
 
 
 
@@ -359,7 +359,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 setReadsEnabled
-voidsetReadsEnabled(booleanreadsEnabled)
+voidsetReadsEnabled(booleanreadsEnabled)
 
 
 



[18/51] [partial] hbase-site git commit: Published site at 72784c2d836a4b977667449d3adec5e8d15453f5.

2018-06-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2b11656f/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.RestoreSnapshotFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.RestoreSnapshotFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.RestoreSnapshotFuture.html
index b6e7636..592c2cc 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.RestoreSnapshotFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.RestoreSnapshotFuture.html
@@ -356,3901 +356,3924 @@
 348  public FutureVoid 
modifyTableAsync(TableDescriptor td) throws IOException {
 349ModifyTableResponse response = 
executeCallable(
 350  new 
MasterCallableModifyTableResponse(getConnection(), 
getRpcControllerFactory()) {
-351@Override
-352protected ModifyTableResponse 
rpcCall() throws Exception {
-353  
setPriority(td.getTableName());
-354  ModifyTableRequest request = 
RequestConverter.buildModifyTableRequest(
-355td.getTableName(), td, 
ng.getNonceGroup(), ng.newNonce());
-356  return 
master.modifyTable(getRpcController(), request);
-357}
-358  });
-359return new ModifyTableFuture(this, 
td.getTableName(), response);
-360  }
-361
-362  @Override
-363  public ListTableDescriptor 
listTableDescriptorsByNamespace(byte[] name) throws IOException {
-364return executeCallable(new 
MasterCallableListTableDescriptor(getConnection(),
-365getRpcControllerFactory()) {
-366  @Override
-367  protected 
ListTableDescriptor rpcCall() throws Exception {
-368return 
master.listTableDescriptorsByNamespace(getRpcController(),
-369
ListTableDescriptorsByNamespaceRequest.newBuilder()
-370  
.setNamespaceName(Bytes.toString(name)).build())
-371.getTableSchemaList()
-372.stream()
-373
.map(ProtobufUtil::toTableDescriptor)
-374
.collect(Collectors.toList());
-375  }
-376});
-377  }
-378
-379  @Override
-380  public ListTableDescriptor 
listTableDescriptors(ListTableName tableNames) throws IOException {
-381return executeCallable(new 
MasterCallableListTableDescriptor(getConnection(),
-382getRpcControllerFactory()) {
-383  @Override
-384  protected 
ListTableDescriptor rpcCall() throws Exception {
-385GetTableDescriptorsRequest req 
=
-386
RequestConverter.buildGetTableDescriptorsRequest(tableNames);
-387  return 
ProtobufUtil.toTableDescriptorList(master.getTableDescriptors(getRpcController(),
-388  req));
-389  }
-390});
-391  }
-392
-393  @Override
-394  public ListRegionInfo 
getRegions(final ServerName sn) throws IOException {
-395AdminService.BlockingInterface admin 
= this.connection.getAdmin(sn);
-396// TODO: There is no timeout on this 
controller. Set one!
-397HBaseRpcController controller = 
rpcControllerFactory.newController();
-398return 
ProtobufUtil.getOnlineRegions(controller, admin);
-399  }
-400
-401  @Override
-402  public ListRegionInfo 
getRegions(TableName tableName) throws IOException {
-403if 
(TableName.isMetaTableName(tableName)) {
-404  return 
Arrays.asList(RegionInfoBuilder.FIRST_META_REGIONINFO);
-405} else {
-406  return 
MetaTableAccessor.getTableRegions(connection, tableName, true);
-407}
-408  }
-409
-410  private static class 
AbortProcedureFuture extends ProcedureFutureBoolean {
-411private boolean isAbortInProgress;
-412
-413public AbortProcedureFuture(
-414final HBaseAdmin admin,
-415final Long procId,
-416final Boolean abortProcResponse) 
{
-417  super(admin, procId);
-418  this.isAbortInProgress = 
abortProcResponse;
-419}
-420
-421@Override
-422public Boolean get(long timeout, 
TimeUnit unit)
-423throws InterruptedException, 
ExecutionException, TimeoutException {
-424  if (!this.isAbortInProgress) {
-425return false;
-426  }
-427  super.get(timeout, unit);
-428  return true;
-429}
-430  }
-431
-432  /** @return Connection used by this 
object. */
-433  @Override
-434  public Connection getConnection() {
-435return connection;
-436  }
-437
-438  @Override
-439  public boolean tableExists(final 
TableName tableName) throws IOException {
-440return executeCallable(new 
RpcRetryingCallableBoolean() {
-441  @Override
-442  protected Boolean rpcCall(int 
callTimeout) throws Exception {
-443return 
MetaTableAccessor.tableExists(connection, tableName);
-444  }
-445});
-446  }
-447
-448  @Override
-449  public HTableDescriptor[] listTables() 
throws IOException {
-450return listTables((Pattern)null, 
false);
-451  }
-452
-453  @Override
-454  public HTableDescriptor[] 
listTables(Pattern pattern) throws 

[18/51] [partial] hbase-site git commit: Published site at 9101fc246f86445006bfbcdfda5cc495016dc280.

2018-06-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/65565d77/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.TableVisitorBase.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.TableVisitorBase.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.TableVisitorBase.html
index fea2b5a..c7a6cc4 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.TableVisitorBase.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.TableVisitorBase.html
@@ -1354,816 +1354,824 @@
 1346   */
 1347  public static void 
putsToMetaTable(final Connection connection, final ListPut ps)
 1348  throws IOException {
-1349try (Table t = 
getMetaHTable(connection)) {
-1350  debugLogMutations(ps);
-1351  t.put(ps);
-1352}
-1353  }
-1354
-1355  /**
-1356   * Delete the passed 
coded/code from the codehbase:meta/code 
table.
-1357   * @param connection connection we're 
using
-1358   * @param d Delete to add to 
hbase:meta
-1359   */
-1360  private static void 
deleteFromMetaTable(final Connection connection, final Delete d)
-1361  throws IOException {
-1362ListDelete dels = new 
ArrayList(1);
-1363dels.add(d);
-1364deleteFromMetaTable(connection, 
dels);
-1365  }
-1366
-1367  /**
-1368   * Delete the passed 
codedeletes/code from the codehbase:meta/code 
table.
-1369   * @param connection connection we're 
using
-1370   * @param deletes Deletes to add to 
hbase:meta  This list should support #remove.
-1371   */
-1372  private static void 
deleteFromMetaTable(final Connection connection, final ListDelete 
deletes)
-1373  throws IOException {
-1374try (Table t = 
getMetaHTable(connection)) {
-1375  debugLogMutations(deletes);
-1376  t.delete(deletes);
-1377}
-1378  }
-1379
-1380  /**
-1381   * Deletes some replica columns 
corresponding to replicas for the passed rows
-1382   * @param metaRows rows in 
hbase:meta
-1383   * @param replicaIndexToDeleteFrom the 
replica ID we would start deleting from
-1384   * @param numReplicasToRemove how many 
replicas to remove
-1385   * @param connection connection we're 
using to access meta table
-1386   */
-1387  public static void 
removeRegionReplicasFromMeta(Setbyte[] metaRows,
-1388int replicaIndexToDeleteFrom, int 
numReplicasToRemove, Connection connection)
-1389  throws IOException {
-1390int absoluteIndex = 
replicaIndexToDeleteFrom + numReplicasToRemove;
-1391for (byte[] row : metaRows) {
-1392  long now = 
EnvironmentEdgeManager.currentTime();
-1393  Delete deleteReplicaLocations = 
new Delete(row);
-1394  for (int i = 
replicaIndexToDeleteFrom; i  absoluteIndex; i++) {
-1395
deleteReplicaLocations.addColumns(getCatalogFamily(),
-1396  getServerColumn(i), now);
-1397
deleteReplicaLocations.addColumns(getCatalogFamily(),
-1398  getSeqNumColumn(i), now);
-1399
deleteReplicaLocations.addColumns(getCatalogFamily(),
-1400  getStartCodeColumn(i), now);
-1401  }
-1402  deleteFromMetaTable(connection, 
deleteReplicaLocations);
-1403}
-1404  }
-1405
-1406  /**
-1407   * Execute the passed 
codemutations/code against codehbase:meta/code 
table.
-1408   * @param connection connection we're 
using
-1409   * @param mutations Puts and Deletes 
to execute on hbase:meta
-1410   * @throws IOException
-1411   */
-1412  public static void 
mutateMetaTable(final Connection connection,
-1413 
final ListMutation mutations)
-1414throws IOException {
-1415Table t = 
getMetaHTable(connection);
-1416try {
-1417  debugLogMutations(mutations);
-1418  t.batch(mutations, null);
-1419} catch (InterruptedException e) {
-1420  InterruptedIOException ie = new 
InterruptedIOException(e.getMessage());
-1421  ie.initCause(e);
-1422  throw ie;
-1423} finally {
-1424  t.close();
-1425}
-1426  }
-1427
-1428  private static void 
addRegionStateToPut(Put put, RegionState.State state) throws IOException {
-1429
put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY)
-1430.setRow(put.getRow())
-1431
.setFamily(HConstants.CATALOG_FAMILY)
-1432
.setQualifier(getRegionStateColumn())
-1433
.setTimestamp(put.getTimestamp())
-1434.setType(Cell.Type.Put)
-1435
.setValue(Bytes.toBytes(state.name()))
-1436.build());
-1437  }
-1438
-1439  /**
-1440   * Adds daughter region infos to 
hbase:meta row for the specified region. Note that this does not
-1441   * add its daughter's as different 
rows, but adds information about the daughters in the same row
-1442   * as the parent. Use
-1443   * {@link #splitRegion(Connection, 
RegionInfo, long, RegionInfo, RegionInfo, ServerName, int)}
-1444   * if you want to do that.
-1445   * @param connection connection we're 
using
-1446   * 

[18/51] [partial] hbase-site git commit: Published site at 0b28155d274910b4e667b949d51f78809a1eff0b.

2018-06-14 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e11cf2cb/devapidocs/src-html/org/apache/hadoop/hbase/master/ServerManager.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/ServerManager.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/ServerManager.html
index 51fcd6e..6b9e2a8 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/master/ServerManager.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/ServerManager.html
@@ -54,1029 +54,1026 @@
 046import 
org.apache.hadoop.hbase.ServerMetricsBuilder;
 047import 
org.apache.hadoop.hbase.ServerName;
 048import 
org.apache.hadoop.hbase.YouAreDeadException;
-049import 
org.apache.hadoop.hbase.ZooKeeperConnectionException;
-050import 
org.apache.hadoop.hbase.client.ClusterConnection;
-051import 
org.apache.hadoop.hbase.client.RegionInfo;
-052import 
org.apache.hadoop.hbase.client.RetriesExhaustedException;
-053import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-054import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-055import 
org.apache.hadoop.hbase.monitoring.MonitoredTask;
-056import 
org.apache.hadoop.hbase.regionserver.HRegionServer;
-057import 
org.apache.hadoop.hbase.util.Bytes;
-058import 
org.apache.hadoop.hbase.zookeeper.ZKUtil;
-059import 
org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-060import 
org.apache.yetus.audience.InterfaceAudience;
-061import 
org.apache.zookeeper.KeeperException;
-062import org.slf4j.Logger;
-063import org.slf4j.LoggerFactory;
-064
-065import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-066import 
org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations;
-067
-068import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-069import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-070import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds;
-071import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.StoreSequenceId;
-072import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest;
-073
-074/**
-075 * The ServerManager class manages info 
about region servers.
-076 * p
-077 * Maintains lists of online and dead 
servers.  Processes the startups,
-078 * shutdowns, and deaths of region 
servers.
-079 * p
-080 * Servers are distinguished in two 
different ways.  A given server has a
-081 * location, specified by hostname and 
port, and of which there can only be one
-082 * online at any given time.  A server 
instance is specified by the location
-083 * (hostname and port) as well as the 
startcode (timestamp from when the server
-084 * was started).  This is used to 
differentiate a restarted instance of a given
-085 * server from the original instance.
-086 * p
-087 * If a sever is known not to be running 
any more, it is called dead. The dead
-088 * server needs to be handled by a 
ServerShutdownHandler.  If the handler is not
-089 * enabled yet, the server can't be 
handled right away so it is queued up.
-090 * After the handler is enabled, the 
server will be submitted to a handler to handle.
-091 * However, the handler may be just 
partially enabled.  If so,
-092 * the server cannot be fully processed, 
and be queued up for further processing.
-093 * A server is fully processed only after 
the handler is fully enabled
-094 * and has completed the handling.
-095 */
-096@InterfaceAudience.Private
-097public class ServerManager {
-098  public static final String 
WAIT_ON_REGIONSERVERS_MAXTOSTART =
-099  
"hbase.master.wait.on.regionservers.maxtostart";
-100
-101  public static final String 
WAIT_ON_REGIONSERVERS_MINTOSTART =
-102  
"hbase.master.wait.on.regionservers.mintostart";
-103
-104  public static final String 
WAIT_ON_REGIONSERVERS_TIMEOUT =
-105  
"hbase.master.wait.on.regionservers.timeout";
-106
-107  public static final String 
WAIT_ON_REGIONSERVERS_INTERVAL =
-108  
"hbase.master.wait.on.regionservers.interval";
-109
-110  private static final Logger LOG = 
LoggerFactory.getLogger(ServerManager.class);
-111
-112  // Set if we are to shutdown the 
cluster.
-113  private AtomicBoolean clusterShutdown = 
new AtomicBoolean(false);
-114
-115  /**
-116   * The last flushed sequence id for a 
region.
-117   */
-118  private final 
ConcurrentNavigableMapbyte[], Long flushedSequenceIdByRegion =
-119new 
ConcurrentSkipListMap(Bytes.BYTES_COMPARATOR);
-120
-121  /**
-122   * The last flushed sequence id for a 
store in a region.
-123   */
-124  private final 
ConcurrentNavigableMapbyte[], ConcurrentNavigableMapbyte[], 
Long
-125storeFlushedSequenceIdsByRegion = new 
ConcurrentSkipListMap(Bytes.BYTES_COMPARATOR);
-126
-127  /** Map of registered servers to their 
current load */
-128  private final 
ConcurrentNavigableMapServerName, ServerMetrics onlineServers =
-129new 

[18/51] [partial] hbase-site git commit: Published site at 7d3750bd9fc9747623549c242cc4171e224b3eaf.

2018-06-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionFailedOpen.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionFailedOpen.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionFailedOpen.html
index 5420d82..6ea3672 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionFailedOpen.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionFailedOpen.html
@@ -316,687 +316,728 @@
 308}
 309  }
 310
-311  public enum ServerState { ONLINE, 
SPLITTING, OFFLINE }
-312  public static class ServerStateNode 
implements ComparableServerStateNode {
-313private final ServerReportEvent 
reportEvent;
-314
-315private final 
SetRegionStateNode regions;
-316private final ServerName 
serverName;
-317
-318private volatile ServerState state = 
ServerState.ONLINE;
-319private volatile int versionNumber = 
0;
-320
-321public ServerStateNode(final 
ServerName serverName) {
-322  this.serverName = serverName;
-323  this.regions = 
ConcurrentHashMap.newKeySet();
-324  this.reportEvent = new 
ServerReportEvent(serverName);
-325}
-326
-327public ServerName getServerName() {
-328  return serverName;
-329}
+311  /**
+312   * Server State.
+313   */
+314  public enum ServerState {
+315/**
+316 * Initial state. Available.
+317 */
+318ONLINE,
+319
+320/**
+321 * Server expired/crashed. Currently 
undergoing WAL splitting.
+322 */
+323SPLITTING,
+324
+325/**
+326 * WAL splitting done.
+327 */
+328OFFLINE
+329  }
 330
-331public ServerState getState() {
-332  return state;
-333}
-334
-335public int getVersionNumber() {
-336  return versionNumber;
-337}
-338
-339public ProcedureEvent? 
getReportEvent() {
-340  return reportEvent;
-341}
+331  /**
+332   * State of Server; list of hosted 
regions, etc.
+333   */
+334  public static class ServerStateNode 
implements ComparableServerStateNode {
+335private final ServerReportEvent 
reportEvent;
+336
+337private final 
SetRegionStateNode regions;
+338private final ServerName 
serverName;
+339
+340private volatile ServerState state = 
ServerState.ONLINE;
+341private volatile int versionNumber = 
0;
 342
-343public boolean isInState(final 
ServerState... expected) {
-344  boolean expectedState = false;
-345  if (expected != null) {
-346for (int i = 0; i  
expected.length; ++i) {
-347  expectedState |= (state == 
expected[i]);
-348}
-349  }
-350  return expectedState;
+343public ServerStateNode(final 
ServerName serverName) {
+344  this.serverName = serverName;
+345  this.regions = 
ConcurrentHashMap.newKeySet();
+346  this.reportEvent = new 
ServerReportEvent(serverName);
+347}
+348
+349public ServerName getServerName() {
+350  return serverName;
 351}
 352
-353public void setState(final 
ServerState state) {
-354  this.state = state;
+353public ServerState getState() {
+354  return state;
 355}
 356
-357public void setVersionNumber(final 
int versionNumber) {
-358  this.versionNumber = 
versionNumber;
+357public int getVersionNumber() {
+358  return versionNumber;
 359}
 360
-361public SetRegionStateNode 
getRegions() {
-362  return regions;
+361public ProcedureEvent? 
getReportEvent() {
+362  return reportEvent;
 363}
 364
-365public int getRegionCount() {
-366  return regions.size();
+365public boolean isOffline() {
+366  return 
this.state.equals(ServerState.OFFLINE);
 367}
 368
-369public ArrayListRegionInfo 
getRegionInfoList() {
-370  ArrayListRegionInfo hris = 
new ArrayListRegionInfo(regions.size());
-371  for (RegionStateNode region: 
regions) {
-372
hris.add(region.getRegionInfo());
-373  }
-374  return hris;
-375}
-376
-377public void addRegion(final 
RegionStateNode regionNode) {
-378  this.regions.add(regionNode);
-379}
-380
-381public void removeRegion(final 
RegionStateNode regionNode) {
-382  this.regions.remove(regionNode);
-383}
-384
-385@Override
-386public int compareTo(final 
ServerStateNode other) {
-387  return 
getServerName().compareTo(other.getServerName());
-388}
-389
-390@Override
-391public int hashCode() {
-392  return 
getServerName().hashCode();
+369public boolean isInState(final 
ServerState... expected) {
+370  boolean expectedState = false;
+371  if (expected != null) {
+372for (int i = 0; i  
expected.length; ++i) {
+373  expectedState |= (state == 
expected[i]);
+374}
+375  }
+376  return expectedState;

[18/51] [partial] hbase-site git commit: Published site at 997747076d8ec0b4346d7cb99c4b0667a7c14905.

2018-05-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4df09ed9/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
index 3da432b..d30fa8f 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
@@ -928,7690 +928,7698 @@
 920  CollectionHStore stores = 
this.stores.values();
 921  try {
 922// update the stores that we are 
replaying
-923
stores.forEach(HStore::startReplayingFromWAL);
-924// Recover any edits if 
available.
-925maxSeqId = Math.max(maxSeqId,
-926  
replayRecoveredEditsIfAny(this.fs.getRegionDir(), maxSeqIdInStores, reporter, 
status));
-927// Make sure mvcc is up to max.
-928this.mvcc.advanceTo(maxSeqId);
-929  } finally {
-930// update the stores that we are 
done replaying
-931
stores.forEach(HStore::stopReplayingFromWAL);
-932  }
-933}
-934this.lastReplayedOpenRegionSeqId = 
maxSeqId;
-935
-936
this.writestate.setReadOnly(ServerRegionReplicaUtil.isReadOnly(this));
-937this.writestate.flushRequested = 
false;
-938this.writestate.compacting.set(0);
-939
-940if (this.writestate.writesEnabled) 
{
-941  // Remove temporary data left over 
from old regions
-942  status.setStatus("Cleaning up 
temporary data from old regions");
-943  fs.cleanupTempDir();
-944}
-945
-946if (this.writestate.writesEnabled) 
{
-947  status.setStatus("Cleaning up 
detritus from prior splits");
-948  // Get rid of any splits or merges 
that were lost in-progress.  Clean out
-949  // these directories here on open.  
We may be opening a region that was
-950  // being split but we crashed in 
the middle of it all.
-951  fs.cleanupAnySplitDetritus();
-952  fs.cleanupMergesDir();
-953}
-954
-955// Initialize split policy
-956this.splitPolicy = 
RegionSplitPolicy.create(this, conf);
-957
-958// Initialize flush policy
-959this.flushPolicy = 
FlushPolicyFactory.create(this, conf);
-960
-961long lastFlushTime = 
EnvironmentEdgeManager.currentTime();
-962for (HStore store: stores.values()) 
{
-963  
this.lastStoreFlushTimeMap.put(store, lastFlushTime);
-964}
-965
-966// Use maximum of log sequenceid or 
that which was found in stores
-967// (particularly if no recovered 
edits, seqid will be -1).
-968long maxSeqIdFromFile =
-969  
WALSplitter.getMaxRegionSequenceId(fs.getFileSystem(), fs.getRegionDir());
-970long nextSeqId = Math.max(maxSeqId, 
maxSeqIdFromFile) + 1;
-971if (writestate.writesEnabled) {
-972  
WALSplitter.writeRegionSequenceIdFile(fs.getFileSystem(), fs.getRegionDir(), 
nextSeqId - 1);
-973}
-974
-975LOG.info("Opened {}; next 
sequenceid={}", this.getRegionInfo().getShortNameToLog(), nextSeqId);
-976
-977// A region can be reopened if failed 
a split; reset flags
-978this.closing.set(false);
-979this.closed.set(false);
-980
-981if (coprocessorHost != null) {
-982  status.setStatus("Running 
coprocessor post-open hooks");
-983  coprocessorHost.postOpen();
-984}
+923LOG.debug("replaying wal for " + 
this.getRegionInfo().getEncodedName());
+924
stores.forEach(HStore::startReplayingFromWAL);
+925// Recover any edits if 
available.
+926maxSeqId = Math.max(maxSeqId,
+927  
replayRecoveredEditsIfAny(this.fs.getRegionDir(), maxSeqIdInStores, reporter, 
status));
+928// Make sure mvcc is up to max.
+929this.mvcc.advanceTo(maxSeqId);
+930  } finally {
+931LOG.debug("stopping wal replay 
for " + this.getRegionInfo().getEncodedName());
+932// update the stores that we are 
done replaying
+933
stores.forEach(HStore::stopReplayingFromWAL);
+934  }
+935}
+936this.lastReplayedOpenRegionSeqId = 
maxSeqId;
+937
+938
this.writestate.setReadOnly(ServerRegionReplicaUtil.isReadOnly(this));
+939this.writestate.flushRequested = 
false;
+940this.writestate.compacting.set(0);
+941
+942if (this.writestate.writesEnabled) 
{
+943  LOG.debug("Cleaning up temporary 
data for " + this.getRegionInfo().getEncodedName());
+944  // Remove temporary data left over 
from old regions
+945  status.setStatus("Cleaning up 
temporary data from old regions");
+946  fs.cleanupTempDir();
+947}
+948
+949if (this.writestate.writesEnabled) 
{
+950  status.setStatus("Cleaning up 
detritus from prior splits");
+951  // Get rid of any splits or merges 
that were lost in-progress.  Clean out
+952  // these directories here on open.  
We may be 

[18/51] [partial] hbase-site git commit: Published site at f3d1c021de2264301f68eadb9ef126ff83d7ef53.

2018-05-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/883dde2f/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.html
index a99b4a7..119472c 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.html
@@ -109,528 +109,529 @@
 101  protected FileSystem fs;
 102  // id of this cluster
 103  private UUID clusterId;
-104  // id of the other cluster
-105  private UUID peerClusterId;
-106  // total number of edits we 
replicated
-107  private AtomicLong totalReplicatedEdits 
= new AtomicLong(0);
-108  // The znode we currently play with
-109  protected String queueId;
-110  // Maximum number of retries before 
taking bold actions
-111  private int maxRetriesMultiplier;
-112  // Indicates if this particular source 
is running
-113  private volatile boolean sourceRunning 
= false;
-114  // Metrics for this source
-115  private MetricsSource metrics;
-116  // WARN threshold for the number of 
queued logs, defaults to 2
-117  private int logQueueWarnThreshold;
-118  // ReplicationEndpoint which will 
handle the actual replication
-119  private volatile ReplicationEndpoint 
replicationEndpoint;
-120  // A filter (or a chain of filters) for 
the WAL entries.
-121  protected WALEntryFilter 
walEntryFilter;
-122  // throttler
-123  private ReplicationThrottler 
throttler;
-124  private long defaultBandwidth;
-125  private long currentBandwidth;
-126  private WALFileLengthProvider 
walFileLengthProvider;
-127  protected final 
ConcurrentHashMapString, ReplicationSourceShipper workerThreads =
-128  new ConcurrentHashMap();
+104  // total number of edits we 
replicated
+105  private AtomicLong totalReplicatedEdits 
= new AtomicLong(0);
+106  // The znode we currently play with
+107  protected String queueId;
+108  // Maximum number of retries before 
taking bold actions
+109  private int maxRetriesMultiplier;
+110  // Indicates if this particular source 
is running
+111  private volatile boolean sourceRunning 
= false;
+112  // Metrics for this source
+113  private MetricsSource metrics;
+114  // WARN threshold for the number of 
queued logs, defaults to 2
+115  private int logQueueWarnThreshold;
+116  // ReplicationEndpoint which will 
handle the actual replication
+117  private volatile ReplicationEndpoint 
replicationEndpoint;
+118  // A filter (or a chain of filters) for 
the WAL entries.
+119  protected volatile WALEntryFilter 
walEntryFilter;
+120  // throttler
+121  private ReplicationThrottler 
throttler;
+122  private long defaultBandwidth;
+123  private long currentBandwidth;
+124  private WALFileLengthProvider 
walFileLengthProvider;
+125  protected final 
ConcurrentHashMapString, ReplicationSourceShipper workerThreads =
+126  new ConcurrentHashMap();
+127
+128  private AtomicLong totalBufferUsed;
 129
-130  private AtomicLong totalBufferUsed;
-131
-132  public static final String 
WAIT_ON_ENDPOINT_SECONDS =
-133
"hbase.replication.wait.on.endpoint.seconds";
-134  public static final int 
DEFAULT_WAIT_ON_ENDPOINT_SECONDS = 30;
-135  private int waitOnEndpointSeconds = 
-1;
+130  public static final String 
WAIT_ON_ENDPOINT_SECONDS =
+131
"hbase.replication.wait.on.endpoint.seconds";
+132  public static final int 
DEFAULT_WAIT_ON_ENDPOINT_SECONDS = 30;
+133  private int waitOnEndpointSeconds = 
-1;
+134
+135  private Thread initThread;
 136
-137  private Thread initThread;
-138
-139  /**
-140   * Instantiation method used by region 
servers
-141   * @param conf configuration to use
-142   * @param fs file system to use
-143   * @param manager replication manager 
to ping to
-144   * @param server the server for this 
region server
-145   * @param queueId the id of our 
replication queue
-146   * @param clusterId unique UUID for the 
cluster
-147   * @param metrics metrics for 
replication source
-148   */
-149  @Override
-150  public void init(Configuration conf, 
FileSystem fs, ReplicationSourceManager manager,
-151  ReplicationQueueStorage 
queueStorage, ReplicationPeer replicationPeer, Server server,
-152  String queueId, UUID clusterId, 
WALFileLengthProvider walFileLengthProvider,
-153  MetricsSource metrics) throws 
IOException {
-154this.server = server;
-155this.conf = 
HBaseConfiguration.create(conf);
-156this.waitOnEndpointSeconds =
-157  
this.conf.getInt(WAIT_ON_ENDPOINT_SECONDS, DEFAULT_WAIT_ON_ENDPOINT_SECONDS);
-158decorateConf();
-159this.sleepForRetries =
-160
this.conf.getLong("replication.source.sleepforretries", 1000);// 1 second
-161this.maxRetriesMultiplier =
-162

[18/51] [partial] hbase-site git commit: Published site at cf529f18a9959589fa635f78df4840472526ea2c.

2018-05-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.html
--
diff --git a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.html 
b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.html
index 0653ad2..edce544 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.html
@@ -386,26 +386,30 @@ implements org.apache.hadoop.util.Tool
 ONE_GB
 
 
+(package private) static https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+PE_COMMAND_SHORTNAME
+
+
 private static org.apache.hadoop.fs.Path
 PERF_EVAL_DIR
 
-
+
 (package private) static https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 RANDOM_READ
 
-
+
 (package private) static https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 RANDOM_SEEK_SCAN
 
-
+
 static int
 ROW_LENGTH
 
-
+
 static https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 TABLE_NAME
 
-
+
 private static int
 TAG_LENGTH
 
@@ -545,7 +549,7 @@ implements org.apache.hadoop.util.Tool
 
 
 protected static void
-printUsage(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringclassName,
+printUsage(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringshortName,
   https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">Stringmessage)
 
 
@@ -644,13 +648,26 @@ implements org.apache.hadoop.util.Tool
 
 
 
+
+
+
+
+
+PE_COMMAND_SHORTNAME
+static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String PE_COMMAND_SHORTNAME
+
+See Also:
+Constant
 Field Values
+
+
+
 
 
 
 
 
 LOG
-private static finalorg.slf4j.Logger LOG
+private static finalorg.slf4j.Logger LOG
 
 
 
@@ -659,7 +676,7 @@ implements org.apache.hadoop.util.Tool
 
 
 MAPPER
-private static finalcom.fasterxml.jackson.databind.ObjectMapper MAPPER
+private static finalcom.fasterxml.jackson.databind.ObjectMapper MAPPER
 
 
 
@@ -668,7 +685,7 @@ implements org.apache.hadoop.util.Tool
 
 
 TABLE_NAME
-public static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String TABLE_NAME
+public static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String TABLE_NAME
 
 See Also:
 Constant
 Field Values
@@ -681,7 +698,7 @@ implements org.apache.hadoop.util.Tool
 
 
 FAMILY_NAME_BASE
-public static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String FAMILY_NAME_BASE
+public static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String FAMILY_NAME_BASE
 
 See Also:
 Constant
 Field Values
@@ -694,7 +711,7 @@ implements org.apache.hadoop.util.Tool
 
 
 FAMILY_ZERO
-public static finalbyte[] FAMILY_ZERO
+public static finalbyte[] FAMILY_ZERO
 
 
 
@@ -703,7 +720,7 @@ implements org.apache.hadoop.util.Tool
 
 
 COLUMN_ZERO
-public static finalbyte[] COLUMN_ZERO
+public static finalbyte[] COLUMN_ZERO
 
 
 
@@ -712,7 +729,7 @@ implements org.apache.hadoop.util.Tool
 
 
 DEFAULT_VALUE_LENGTH
-public static finalint DEFAULT_VALUE_LENGTH
+public static finalint DEFAULT_VALUE_LENGTH
 
 See Also:
 Constant
 Field Values
@@ -725,7 +742,7 @@ implements org.apache.hadoop.util.Tool
 
 
 ROW_LENGTH
-public static finalint ROW_LENGTH
+public static finalint ROW_LENGTH
 
 See Also:
 Constant
 Field Values
@@ -738,7 +755,7 @@ implements org.apache.hadoop.util.Tool
 
 
 ONE_GB
-private static finalint ONE_GB
+private static finalint ONE_GB
 
 See Also:
 Constant
 Field Values
@@ -751,7 +768,7 @@ implements org.apache.hadoop.util.Tool
 
 
 DEFAULT_ROWS_PER_GB
-private static finalint DEFAULT_ROWS_PER_GB
+private static finalint DEFAULT_ROWS_PER_GB
 
 See Also:
 Constant
 Field Values
@@ -764,7 +781,7 @@ implements org.apache.hadoop.util.Tool
 
 
 TAG_LENGTH
-private static finalint TAG_LENGTH
+private static finalint TAG_LENGTH
 
 See Also:
 Constant
 Field Values
@@ -777,7 +794,7 @@ implements org.apache.hadoop.util.Tool
 
 
 FMT
-private static finalhttps://docs.oracle.com/javase/8/docs/api/java/text/DecimalFormat.html?is-external=true;
 title="class or interface in java.text">DecimalFormat FMT

[18/51] [partial] hbase-site git commit: Published site at 021f66d11d2cbb7308308093e29e69d6e7661ee9.

2018-05-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/92a26cfb/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
index 4a879bb..7d27402 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
@@ -300,7 +300,7 @@
 292  private MapString, 
com.google.protobuf.Service coprocessorServiceHandlers = 
Maps.newHashMap();
 293
 294  // Track data size in all memstores
-295  private final MemStoreSizing 
memStoreSize = new MemStoreSizing();
+295  private final MemStoreSizing 
memStoreSizing = new ThreadSafeMemStoreSizing();
 296  private final RegionServicesForStores 
regionServicesForStores = new RegionServicesForStores(this);
 297
 298  // Debug possible data loss due to WAL 
off
@@ -1218,7389 +1218,7399 @@
 1210   * Increase the size of mem store in 
this region and the size of global mem
 1211   * store
 1212   */
-1213  public void 
incMemStoreSize(MemStoreSize memStoreSize) {
-1214if (this.rsAccounting != null) {
-1215  
rsAccounting.incGlobalMemStoreSize(memStoreSize);
-1216}
-1217long dataSize;
-1218synchronized (this.memStoreSize) {
-1219  
this.memStoreSize.incMemStoreSize(memStoreSize);
-1220  dataSize = 
this.memStoreSize.getDataSize();
-1221}
-1222
checkNegativeMemStoreDataSize(dataSize, memStoreSize.getDataSize());
-1223  }
-1224
-1225  public void 
decrMemStoreSize(MemStoreSize memStoreSize) {
-1226if (this.rsAccounting != null) {
-1227  
rsAccounting.decGlobalMemStoreSize(memStoreSize);
-1228}
-1229long size;
-1230synchronized (this.memStoreSize) {
-1231  
this.memStoreSize.decMemStoreSize(memStoreSize);
-1232  size = 
this.memStoreSize.getDataSize();
+1213  void incMemStoreSize(MemStoreSize mss) 
{
+1214incMemStoreSize(mss.getDataSize(), 
mss.getHeapSize(), mss.getOffHeapSize());
+1215  }
+1216
+1217  void incMemStoreSize(long 
dataSizeDelta, long heapSizeDelta, long offHeapSizeDelta) {
+1218if (this.rsAccounting != null) {
+1219  
rsAccounting.incGlobalMemStoreSize(dataSizeDelta, heapSizeDelta, 
offHeapSizeDelta);
+1220}
+1221long dataSize =
+1222
this.memStoreSizing.incMemStoreSize(dataSizeDelta, heapSizeDelta, 
offHeapSizeDelta);
+1223
checkNegativeMemStoreDataSize(dataSize, dataSizeDelta);
+1224  }
+1225
+1226  void decrMemStoreSize(MemStoreSize 
mss) {
+1227decrMemStoreSize(mss.getDataSize(), 
mss.getHeapSize(), mss.getOffHeapSize());
+1228  }
+1229
+1230  void decrMemStoreSize(long 
dataSizeDelta, long heapSizeDelta, long offHeapSizeDelta) {
+1231if (this.rsAccounting != null) {
+1232  
rsAccounting.decGlobalMemStoreSize(dataSizeDelta, heapSizeDelta, 
offHeapSizeDelta);
 1233}
-1234checkNegativeMemStoreDataSize(size, 
-memStoreSize.getDataSize());
-1235  }
-1236
-1237  private void 
checkNegativeMemStoreDataSize(long memStoreDataSize, long delta) {
-1238// This is extremely bad if we make 
memStoreSize negative. Log as much info on the offending
-1239// caller as possible. (memStoreSize 
might be a negative value already -- freeing memory)
-1240if (memStoreDataSize  0) {
-1241  LOG.error("Asked to modify this 
region's (" + this.toString()
-1242  + ") memStoreSize to a 
negative value which is incorrect. Current memStoreSize="
-1243  + (memStoreDataSize - delta) + 
", delta=" + delta, new Exception());
-1244}
-1245  }
-1246
-1247  @Override
-1248  public RegionInfo getRegionInfo() {
-1249return this.fs.getRegionInfo();
-1250  }
-1251
-1252  /**
-1253   * @return Instance of {@link 
RegionServerServices} used by this HRegion.
-1254   * Can be null.
-1255   */
-1256  RegionServerServices 
getRegionServerServices() {
-1257return this.rsServices;
-1258  }
-1259
-1260  @Override
-1261  public long getReadRequestsCount() {
-1262return readRequestsCount.sum();
-1263  }
-1264
-1265  @Override
-1266  public long 
getFilteredReadRequestsCount() {
-1267return 
filteredReadRequestsCount.sum();
-1268  }
-1269
-1270  @Override
-1271  public long getWriteRequestsCount() 
{
-1272return writeRequestsCount.sum();
-1273  }
-1274
-1275  @Override
-1276  public long getMemStoreDataSize() {
-1277return memStoreSize.getDataSize();
-1278  }
-1279
-1280  @Override
-1281  public long getMemStoreHeapSize() {
-1282return memStoreSize.getHeapSize();
-1283  }
-1284
-1285  @Override
-1286  public long getMemStoreOffHeapSize() 
{
-1287return 
memStoreSize.getOffHeapSize();
-1288  }
-1289
-1290  /** @return store services for this 
region, to access services required by store level needs */
-1291  public 

[18/51] [partial] hbase-site git commit: Published site at acd0d1e446c164d9c54bfb461b2d449c8d717c07.

2018-05-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f2065178/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.Counter.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.Counter.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.Counter.html
index 2510283..418c60c 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.Counter.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.Counter.html
@@ -77,77 +77,77 @@
 069import 
org.apache.hadoop.hbase.client.RowMutations;
 070import 
org.apache.hadoop.hbase.client.Scan;
 071import 
org.apache.hadoop.hbase.client.Table;
-072import 
org.apache.hadoop.hbase.filter.BinaryComparator;
-073import 
org.apache.hadoop.hbase.filter.Filter;
-074import 
org.apache.hadoop.hbase.filter.FilterAllFilter;
-075import 
org.apache.hadoop.hbase.filter.FilterList;
-076import 
org.apache.hadoop.hbase.filter.PageFilter;
-077import 
org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
-078import 
org.apache.hadoop.hbase.filter.WhileMatchFilter;
-079import 
org.apache.hadoop.hbase.io.compress.Compression;
-080import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
-081import 
org.apache.hadoop.hbase.io.hfile.RandomDistribution;
-082import 
org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
-083import 
org.apache.hadoop.hbase.regionserver.BloomType;
-084import 
org.apache.hadoop.hbase.regionserver.CompactingMemStore;
-085import 
org.apache.hadoop.hbase.trace.HBaseHTraceConfiguration;
-086import 
org.apache.hadoop.hbase.trace.SpanReceiverHost;
-087import 
org.apache.hadoop.hbase.trace.TraceUtil;
-088import 
org.apache.hadoop.hbase.util.ByteArrayHashKey;
-089import 
org.apache.hadoop.hbase.util.Bytes;
-090import 
org.apache.hadoop.hbase.util.Hash;
-091import 
org.apache.hadoop.hbase.util.MurmurHash;
-092import 
org.apache.hadoop.hbase.util.Pair;
-093import 
org.apache.hadoop.hbase.util.YammerHistogramUtils;
-094import 
org.apache.hadoop.io.LongWritable;
-095import org.apache.hadoop.io.Text;
-096import org.apache.hadoop.mapreduce.Job;
-097import 
org.apache.hadoop.mapreduce.Mapper;
-098import 
org.apache.hadoop.mapreduce.lib.input.NLineInputFormat;
-099import 
org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
-100import 
org.apache.hadoop.mapreduce.lib.reduce.LongSumReducer;
-101import org.apache.hadoop.util.Tool;
-102import 
org.apache.hadoop.util.ToolRunner;
-103import 
org.apache.htrace.core.ProbabilitySampler;
-104import org.apache.htrace.core.Sampler;
-105import 
org.apache.htrace.core.TraceScope;
-106import 
org.apache.yetus.audience.InterfaceAudience;
-107import org.slf4j.Logger;
-108import org.slf4j.LoggerFactory;
-109import 
org.apache.hbase.thirdparty.com.google.common.base.MoreObjects;
-110import 
org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
-111
-112/**
-113 * Script used evaluating HBase 
performance and scalability.  Runs a HBase
-114 * client that steps through one of a set 
of hardcoded tests or 'experiments'
-115 * (e.g. a random reads test, a random 
writes test, etc.). Pass on the
-116 * command-line which test to run and how 
many clients are participating in
-117 * this experiment. Run {@code 
PerformanceEvaluation --help} to obtain usage.
-118 *
-119 * pThis class sets up and runs 
the evaluation programs described in
-120 * Section 7, iPerformance 
Evaluation/i, of the a
-121 * 
href="http://labs.google.com/papers/bigtable.html"Bigtable/a;
-122 * paper, pages 8-10.
-123 *
-124 * pBy default, runs as a 
mapreduce job where each mapper runs a single test
-125 * client. Can also run as a 
non-mapreduce, multithreaded application by
-126 * specifying {@code --nomapred}. Each 
client does about 1GB of data, unless
-127 * specified otherwise.
-128 */
-129@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
-130public class PerformanceEvaluation 
extends Configured implements Tool {
-131  static final String RANDOM_SEEK_SCAN = 
"randomSeekScan";
-132  static final String RANDOM_READ = 
"randomRead";
-133  private static final Logger LOG = 
LoggerFactory.getLogger(PerformanceEvaluation.class.getName());
-134  private static final ObjectMapper 
MAPPER = new ObjectMapper();
-135  static {
-136
MAPPER.configure(MapperFeature.SORT_PROPERTIES_ALPHABETICALLY, true);
-137  }
-138
-139  public static final String TABLE_NAME = 
"TestTable";
-140  public static final byte[] FAMILY_NAME 
= Bytes.toBytes("info");
-141  public static final byte [] COLUMN_ZERO 
= Bytes.toBytes("" + 0);
-142  public static final byte [] 
QUALIFIER_NAME = COLUMN_ZERO;
+072import 
org.apache.hadoop.hbase.client.metrics.ScanMetrics;
+073import 
org.apache.hadoop.hbase.filter.BinaryComparator;
+074import 
org.apache.hadoop.hbase.filter.Filter;
+075import 
org.apache.hadoop.hbase.filter.FilterAllFilter;
+076import 

[18/51] [partial] hbase-site git commit: Published site at 87f5b5f3411d96c31b4cb61b9a57ced22be91d1f.

2018-05-04 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/de18d468/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html
index e1bc325..63e7421 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html
@@ -66,5125 +66,5224 @@
 058import 
java.util.concurrent.TimeoutException;
 059import 
java.util.concurrent.atomic.AtomicBoolean;
 060import 
java.util.concurrent.atomic.AtomicInteger;
-061import org.apache.commons.io.IOUtils;
-062import 
org.apache.commons.lang3.RandomStringUtils;
-063import 
org.apache.commons.lang3.StringUtils;
-064import 
org.apache.hadoop.conf.Configuration;
-065import 
org.apache.hadoop.conf.Configured;
-066import 
org.apache.hadoop.fs.FSDataOutputStream;
-067import org.apache.hadoop.fs.FileStatus;
-068import org.apache.hadoop.fs.FileSystem;
-069import org.apache.hadoop.fs.Path;
-070import 
org.apache.hadoop.fs.permission.FsAction;
-071import 
org.apache.hadoop.fs.permission.FsPermission;
-072import 
org.apache.hadoop.hbase.Abortable;
-073import org.apache.hadoop.hbase.Cell;
-074import 
org.apache.hadoop.hbase.CellUtil;
-075import 
org.apache.hadoop.hbase.ClusterMetrics;
-076import 
org.apache.hadoop.hbase.ClusterMetrics.Option;
-077import 
org.apache.hadoop.hbase.HBaseConfiguration;
-078import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
-079import 
org.apache.hadoop.hbase.HConstants;
-080import 
org.apache.hadoop.hbase.HRegionInfo;
-081import 
org.apache.hadoop.hbase.HRegionLocation;
-082import 
org.apache.hadoop.hbase.KeyValue;
-083import 
org.apache.hadoop.hbase.MasterNotRunningException;
-084import 
org.apache.hadoop.hbase.MetaTableAccessor;
-085import 
org.apache.hadoop.hbase.RegionLocations;
-086import 
org.apache.hadoop.hbase.ServerName;
-087import 
org.apache.hadoop.hbase.TableName;
-088import 
org.apache.hadoop.hbase.ZooKeeperConnectionException;
-089import 
org.apache.hadoop.hbase.client.Admin;
-090import 
org.apache.hadoop.hbase.client.ClusterConnection;
-091import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-092import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
-093import 
org.apache.hadoop.hbase.client.Connection;
-094import 
org.apache.hadoop.hbase.client.ConnectionFactory;
-095import 
org.apache.hadoop.hbase.client.Delete;
-096import 
org.apache.hadoop.hbase.client.Get;
-097import 
org.apache.hadoop.hbase.client.Put;
-098import 
org.apache.hadoop.hbase.client.RegionInfo;
-099import 
org.apache.hadoop.hbase.client.RegionInfoBuilder;
-100import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
-101import 
org.apache.hadoop.hbase.client.Result;
-102import 
org.apache.hadoop.hbase.client.RowMutations;
-103import 
org.apache.hadoop.hbase.client.Table;
-104import 
org.apache.hadoop.hbase.client.TableDescriptor;
-105import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-106import 
org.apache.hadoop.hbase.client.TableState;
-107import 
org.apache.hadoop.hbase.io.FileLink;
-108import 
org.apache.hadoop.hbase.io.HFileLink;
-109import 
org.apache.hadoop.hbase.io.hfile.CacheConfig;
-110import 
org.apache.hadoop.hbase.io.hfile.HFile;
-111import 
org.apache.hadoop.hbase.log.HBaseMarkers;
-112import 
org.apache.hadoop.hbase.master.MasterFileSystem;
-113import 
org.apache.hadoop.hbase.master.RegionState;
-114import 
org.apache.hadoop.hbase.regionserver.HRegion;
-115import 
org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
-116import 
org.apache.hadoop.hbase.regionserver.StoreFileInfo;
-117import 
org.apache.hadoop.hbase.replication.ReplicationException;
-118import 
org.apache.hadoop.hbase.security.AccessDeniedException;
-119import 
org.apache.hadoop.hbase.security.UserProvider;
-120import 
org.apache.hadoop.hbase.util.Bytes.ByteArrayComparator;
-121import 
org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter.ERROR_CODE;
-122import 
org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker;
-123import 
org.apache.hadoop.hbase.util.hbck.ReplicationChecker;
-124import 
org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandler;
-125import 
org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandlerImpl;
-126import org.apache.hadoop.hbase.wal.WAL;
-127import 
org.apache.hadoop.hbase.wal.WALFactory;
-128import 
org.apache.hadoop.hbase.wal.WALSplitter;
-129import 
org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-130import 
org.apache.hadoop.hbase.zookeeper.ZKUtil;
-131import 
org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-132import 
org.apache.hadoop.hbase.zookeeper.ZNodePaths;
-133import 
org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
-134import 
org.apache.hadoop.ipc.RemoteException;
-135import 

[18/51] [partial] hbase-site git commit: Published site at 2912c953551bedbfbf30c32c156ed7bb187d54c3.

2018-04-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html
index 8302e28..c370eb9 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html
@@ -2113,3031 +2113,3033 @@
 2105
errors.reportError(ERROR_CODE.ORPHAN_TABLE_STATE,
 2106tableName + " unable to 
delete dangling table state " + tableState);
 2107  }
-2108} else {
-2109  
errors.reportError(ERROR_CODE.ORPHAN_TABLE_STATE,
-2110  tableName + " has dangling 
table state " + tableState);
-2111}
-2112  }
-2113}
-2114// check that all tables have 
states
-2115for (TableName tableName : 
tablesInfo.keySet()) {
-2116  if (isTableIncluded(tableName) 
 !tableStates.containsKey(tableName)) {
-2117if (fixMeta) {
-2118  
MetaTableAccessor.updateTableState(connection, tableName, 
TableState.State.ENABLED);
-2119  TableState newState = 
MetaTableAccessor.getTableState(connection, tableName);
-2120  if (newState == null) {
-2121
errors.reportError(ERROR_CODE.NO_TABLE_STATE,
-2122"Unable to change state 
for table " + tableName + " in meta ");
-2123  }
-2124} else {
-2125  
errors.reportError(ERROR_CODE.NO_TABLE_STATE,
-2126  tableName + " has no state 
in meta ");
-2127}
-2128  }
-2129}
-2130  }
-2131
-2132  private void preCheckPermission() 
throws IOException, AccessDeniedException {
-2133if 
(shouldIgnorePreCheckPermission()) {
-2134  return;
-2135}
-2136
-2137Path hbaseDir = 
FSUtils.getRootDir(getConf());
-2138FileSystem fs = 
hbaseDir.getFileSystem(getConf());
-2139UserProvider userProvider = 
UserProvider.instantiate(getConf());
-2140UserGroupInformation ugi = 
userProvider.getCurrent().getUGI();
-2141FileStatus[] files = 
fs.listStatus(hbaseDir);
-2142for (FileStatus file : files) {
-2143  try {
-2144FSUtils.checkAccess(ugi, file, 
FsAction.WRITE);
-2145  } catch (AccessDeniedException 
ace) {
-2146LOG.warn("Got 
AccessDeniedException when preCheckPermission ", ace);
-2147
errors.reportError(ERROR_CODE.WRONG_USAGE, "Current user " + 
ugi.getUserName()
-2148  + " does not have write perms 
to " + file.getPath()
-2149  + ". Please rerun hbck as hdfs 
user " + file.getOwner());
-2150throw ace;
-2151  }
-2152}
-2153  }
-2154
-2155  /**
-2156   * Deletes region from meta table
-2157   */
-2158  private void deleteMetaRegion(HbckInfo 
hi) throws IOException {
-2159
deleteMetaRegion(hi.metaEntry.getRegionName());
-2160  }
-2161
-2162  /**
-2163   * Deletes region from meta table
-2164   */
-2165  private void deleteMetaRegion(byte[] 
metaKey) throws IOException {
-2166Delete d = new Delete(metaKey);
-2167meta.delete(d);
-2168LOG.info("Deleted " + 
Bytes.toString(metaKey) + " from META" );
-2169  }
-2170
-2171  /**
-2172   * Reset the split parent region info 
in meta table
-2173   */
-2174  private void resetSplitParent(HbckInfo 
hi) throws IOException {
-2175RowMutations mutations = new 
RowMutations(hi.metaEntry.getRegionName());
-2176Delete d = new 
Delete(hi.metaEntry.getRegionName());
-2177
d.addColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER);
-2178
d.addColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER);
-2179mutations.add(d);
-2180
-2181RegionInfo hri = 
RegionInfoBuilder.newBuilder(hi.metaEntry)
-2182.setOffline(false)
-2183.setSplit(false)
-2184.build();
-2185Put p = 
MetaTableAccessor.makePutFromRegionInfo(hri, 
EnvironmentEdgeManager.currentTime());
-2186mutations.add(p);
-2187
-2188meta.mutateRow(mutations);
-2189LOG.info("Reset split parent " + 
hi.metaEntry.getRegionNameAsString() + " in META" );
-2190  }
-2191
-2192  /**
-2193   * This backwards-compatibility 
wrapper for permanently offlining a region
-2194   * that should not be alive.  If the 
region server does not support the
-2195   * "offline" method, it will use the 
closest unassign method instead.  This
-2196   * will basically work until one 
attempts to disable or delete the affected
-2197   * table.  The problem has to do with 
in-memory only master state, so
-2198   * restarting the HMaster or failing 
over to another should fix this.
-2199   */
-2200  private void offline(byte[] 
regionName) throws IOException {
-2201String regionString = 
Bytes.toStringBinary(regionName);
-2202if 

[18/51] [partial] hbase-site git commit: Published site at 2a2258656b2fcd92b967131b6c1f037363553bc4.

2018-03-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e0fb1fde/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceStubMaker.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceStubMaker.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceStubMaker.html
index d7aa8b1..98a45a0 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceStubMaker.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceStubMaker.html
@@ -680,1330 +680,1333 @@
 672}
 673ListHRegionLocation locations 
= new ArrayList();
 674for (RegionInfo regionInfo : regions) 
{
-675  RegionLocations list = 
locateRegion(tableName, regionInfo.getStartKey(), useCache, true);
-676  if (list != null) {
-677for (HRegionLocation loc : 
list.getRegionLocations()) {
-678  if (loc != null) {
-679locations.add(loc);
-680  }
-681}
-682  }
-683}
-684return locations;
-685  }
-686
-687  @Override
-688  public HRegionLocation 
locateRegion(final TableName tableName, final byte[] row)
-689  throws IOException {
-690RegionLocations locations = 
locateRegion(tableName, row, true, true);
-691return locations == null ? null : 
locations.getRegionLocation();
-692  }
-693
-694  @Override
-695  public HRegionLocation 
relocateRegion(final TableName tableName, final byte[] row)
-696  throws IOException {
-697RegionLocations locations =
-698  relocateRegion(tableName, row, 
RegionReplicaUtil.DEFAULT_REPLICA_ID);
-699return locations == null ? null
-700  : 
locations.getRegionLocation(RegionReplicaUtil.DEFAULT_REPLICA_ID);
-701  }
-702
-703  @Override
-704  public RegionLocations 
relocateRegion(final TableName tableName,
-705  final byte [] row, int replicaId) 
throws IOException{
-706// Since this is an explicit request 
not to use any caching, finding
-707// disabled tables should not be 
desirable.  This will ensure that an exception is thrown when
-708// the first time a disabled table is 
interacted with.
-709if 
(!tableName.equals(TableName.META_TABLE_NAME)  
isTableDisabled(tableName)) {
-710  throw new 
TableNotEnabledException(tableName.getNameAsString() + " is disabled.");
-711}
-712
-713return locateRegion(tableName, row, 
false, true, replicaId);
-714  }
+675  if 
(!RegionReplicaUtil.isDefaultReplica(regionInfo)) {
+676continue;
+677  }
+678  RegionLocations list = 
locateRegion(tableName, regionInfo.getStartKey(), useCache, true);
+679  if (list != null) {
+680for (HRegionLocation loc : 
list.getRegionLocations()) {
+681  if (loc != null) {
+682locations.add(loc);
+683  }
+684}
+685  }
+686}
+687return locations;
+688  }
+689
+690  @Override
+691  public HRegionLocation 
locateRegion(final TableName tableName, final byte[] row)
+692  throws IOException {
+693RegionLocations locations = 
locateRegion(tableName, row, true, true);
+694return locations == null ? null : 
locations.getRegionLocation();
+695  }
+696
+697  @Override
+698  public HRegionLocation 
relocateRegion(final TableName tableName, final byte[] row)
+699  throws IOException {
+700RegionLocations locations =
+701  relocateRegion(tableName, row, 
RegionReplicaUtil.DEFAULT_REPLICA_ID);
+702return locations == null ? null
+703  : 
locations.getRegionLocation(RegionReplicaUtil.DEFAULT_REPLICA_ID);
+704  }
+705
+706  @Override
+707  public RegionLocations 
relocateRegion(final TableName tableName,
+708  final byte [] row, int replicaId) 
throws IOException{
+709// Since this is an explicit request 
not to use any caching, finding
+710// disabled tables should not be 
desirable.  This will ensure that an exception is thrown when
+711// the first time a disabled table is 
interacted with.
+712if 
(!tableName.equals(TableName.META_TABLE_NAME)  
isTableDisabled(tableName)) {
+713  throw new 
TableNotEnabledException(tableName.getNameAsString() + " is disabled.");
+714}
 715
-716  @Override
-717  public RegionLocations 
locateRegion(final TableName tableName, final byte[] row, boolean useCache,
-718  boolean retry) throws IOException 
{
-719return locateRegion(tableName, row, 
useCache, retry, RegionReplicaUtil.DEFAULT_REPLICA_ID);
-720  }
-721
-722  @Override
-723  public RegionLocations 
locateRegion(final TableName tableName, final byte[] row, boolean useCache,
-724  boolean retry, int replicaId) 
throws IOException {
-725checkClosed();
-726if (tableName == null || 
tableName.getName().length == 0) {
-727  throw new 
IllegalArgumentException("table name cannot be null or zero length");
-728}
-729

[18/51] [partial] hbase-site git commit: Published site at e468b4022f76688851b3e0c34722f01a56bd624f.

2018-03-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/16541468/devapidocs/org/apache/hadoop/hbase/class-use/CellComparator.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/CellComparator.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/CellComparator.html
index 8fcef9c..9db8fac 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/CellComparator.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/CellComparator.html
@@ -275,16 +275,16 @@
 
 
 DataBlockEncoder.EncodedSeeker
+DiffKeyDeltaEncoder.createSeeker(CellComparatorcomparator,
+HFileBlockDecodingContextdecodingCtx)
+
+
+DataBlockEncoder.EncodedSeeker
 DataBlockEncoder.createSeeker(CellComparatorcomparator,
 HFileBlockDecodingContextdecodingCtx)
 Create a HFileBlock seeker which find KeyValues within a 
block.
 
 
-
-DataBlockEncoder.EncodedSeeker
-CopyKeyDataBlockEncoder.createSeeker(CellComparatorcomparator,
-HFileBlockDecodingContextdecodingCtx)
-
 
 DataBlockEncoder.EncodedSeeker
 PrefixKeyDeltaEncoder.createSeeker(CellComparatorcomparator,
@@ -292,17 +292,17 @@
 
 
 DataBlockEncoder.EncodedSeeker
-FastDiffDeltaEncoder.createSeeker(CellComparatorcomparator,
+RowIndexCodecV1.createSeeker(CellComparatorcomparator,
 HFileBlockDecodingContextdecodingCtx)
 
 
 DataBlockEncoder.EncodedSeeker
-DiffKeyDeltaEncoder.createSeeker(CellComparatorcomparator,
+FastDiffDeltaEncoder.createSeeker(CellComparatorcomparator,
 HFileBlockDecodingContextdecodingCtx)
 
 
 DataBlockEncoder.EncodedSeeker
-RowIndexCodecV1.createSeeker(CellComparatorcomparator,
+CopyKeyDataBlockEncoder.createSeeker(CellComparatorcomparator,
 HFileBlockDecodingContextdecodingCtx)
 
 
@@ -341,30 +341,30 @@
 
 
 private CellComparator
-HFileBlockIndex.CellBasedKeyBlockIndexReader.comparator
-Needed doing lookup on blocks.
+HFileReaderImpl.comparator
+Key comparator
 
 
 
 protected CellComparator
-HFile.WriterFactory.comparator
-
-
-protected CellComparator
 CompoundBloomFilterBase.comparator
 Comparator used to compare Bloom filter keys
 
 
-
+
 protected CellComparator
 HFileWriterImpl.comparator
 Key comparator.
 
 
+
+protected CellComparator
+HFile.WriterFactory.comparator
+
 
 private CellComparator
-HFileReaderImpl.comparator
-Key comparator
+HFileBlockIndex.CellBasedKeyBlockIndexReader.comparator
+Needed doing lookup on blocks.
 
 
 
@@ -386,11 +386,11 @@
 
 
 CellComparator
-HFile.Reader.getComparator()
+HFileReaderImpl.getComparator()
 
 
 CellComparator
-HFileReaderImpl.getComparator()
+HFile.Reader.getComparator()
 
 
 
@@ -547,19 +547,19 @@
 
 
 private CellComparator
-StoreFileWriter.Builder.comparator
+AbstractMemStore.comparator
 
 
 protected CellComparator
 StripeStoreFlusher.StripeFlushRequest.comparator
 
 
-private CellComparator
-StoreScanner.comparator
+protected CellComparator
+StripeMultiFileWriter.comparator
 
 
-private CellComparator
-AbstractMemStore.comparator
+protected CellComparator
+HRegion.RegionScannerImpl.comparator
 
 
 private CellComparator
@@ -567,23 +567,23 @@
 
 
 private CellComparator
-Segment.comparator
+ScanInfo.comparator
 
 
-protected CellComparator
-HRegion.RegionScannerImpl.comparator
+private CellComparator
+StoreScanner.comparator
 
 
 protected CellComparator
-StripeMultiFileWriter.comparator
+HStore.comparator
 
 
 private CellComparator
-ScanInfo.comparator
+Segment.comparator
 
 
-protected CellComparator
-HStore.comparator
+private CellComparator
+StoreFileWriter.Builder.comparator
 
 
 protected CellComparator
@@ -600,39 +600,37 @@
 
 
 CellComparator
-Region.getCellComparator()
-The comparator to be used with the region
-
+HRegion.getCellComparator()
 
 
 CellComparator
-HRegion.getCellComparator()
+Region.getCellComparator()
+The comparator to be used with the region
+
 
 
-(package private) CellComparator
-StoreFileScanner.getComparator()
-
-
 protected CellComparator
 AbstractMemStore.getComparator()
 
+
+CellComparator
+KeyValueHeap.KVScannerComparator.getComparator()
+
 
 CellComparator
 StoreFileReader.getComparator()
 
 
 CellComparator
-StoreFile.getComparator()
-Get the comparator for comparing two cells.
-
+HStoreFile.getComparator()
 
 
 CellComparator
-KeyValueHeap.KVScannerComparator.getComparator()
+ScanInfo.getComparator()
 
 
 CellComparator
-HStoreFile.getComparator()
+HStore.getComparator()
 
 
 protected CellComparator
@@ -641,16 +639,18 @@
 
 
 
-CellComparator
-ScanInfo.getComparator()
+(package private) CellComparator
+StoreFileScanner.getComparator()
 
 
 CellComparator
-Store.getComparator()
+StoreFile.getComparator()
+Get the comparator for comparing two cells.
+
 
 
 CellComparator
-HStore.getComparator()
+Store.getComparator()
 
 
 
@@ -699,13 +699,13 @@
 
 
 protected void
-DateTieredStoreEngine.createComponents(org.apache.hadoop.conf.Configurationconf,
+DefaultStoreEngine.createComponents(org.apache.hadoop.conf.Configurationconf,
   

[18/51] [partial] hbase-site git commit: Published site at 64061f896fe21512504e3886a400759e88b519da.

2018-03-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f1ebf5b6/devapidocs/org/apache/hadoop/hbase/client/class-use/Result.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/class-use/Result.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/Result.html
index 12b5bec..a85184f 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/Result.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/Result.html
@@ -292,7 +292,7 @@ service.
 
 
 private static HRegionLocation
-MetaTableAccessor.getRegionLocation(Resultr,
+AsyncMetaTableAccessor.getRegionLocation(Resultr,
  RegionInforegionInfo,
  intreplicaId)
 Returns the HRegionLocation parsed from the given meta row 
Result
@@ -301,7 +301,7 @@ service.
 
 
 private static HRegionLocation
-AsyncMetaTableAccessor.getRegionLocation(Resultr,
+MetaTableAccessor.getRegionLocation(Resultr,
  RegionInforegionInfo,
  intreplicaId)
 Returns the HRegionLocation parsed from the given meta row 
Result
@@ -309,14 +309,14 @@ service.
 
 
 
-static RegionLocations
-MetaTableAccessor.getRegionLocations(Resultr)
+private static https://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in java.util">OptionalRegionLocations
+AsyncMetaTableAccessor.getRegionLocations(Resultr)
 Returns an HRegionLocationList extracted from the 
result.
 
 
 
-private static https://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in java.util">OptionalRegionLocations
-AsyncMetaTableAccessor.getRegionLocations(Resultr)
+static RegionLocations
+MetaTableAccessor.getRegionLocations(Resultr)
 Returns an HRegionLocationList extracted from the 
result.
 
 
@@ -334,42 +334,42 @@ service.
 
 
 private static long
-MetaTableAccessor.getSeqNumDuringOpen(Resultr,
+AsyncMetaTableAccessor.getSeqNumDuringOpen(Resultr,
intreplicaId)
 The latest seqnum that the server writing to meta observed 
when opening the region.
 
 
 
 private static long
-AsyncMetaTableAccessor.getSeqNumDuringOpen(Resultr,
+MetaTableAccessor.getSeqNumDuringOpen(Resultr,
intreplicaId)
 The latest seqnum that the server writing to meta observed 
when opening the region.
 
 
 
-static ServerName
-MetaTableAccessor.getServerName(Resultr,
+private static https://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in java.util">OptionalServerName
+AsyncMetaTableAccessor.getServerName(Resultr,
  intreplicaId)
 Returns a ServerName from catalog table Result.
 
 
 
-private static https://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in java.util">OptionalServerName
-AsyncMetaTableAccessor.getServerName(Resultr,
+static ServerName
+MetaTableAccessor.getServerName(Resultr,
  intreplicaId)
 Returns a ServerName from catalog table Result.
 
 
 
+private static https://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in java.util">OptionalTableState
+AsyncMetaTableAccessor.getTableState(Resultr)
+
+
 static TableState
 MetaTableAccessor.getTableState(Resultr)
 Decode table state from META Result.
 
 
-
-private static https://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in java.util">OptionalTableState
-AsyncMetaTableAccessor.getTableState(Resultr)
-
 
 void
 AsyncMetaTableAccessor.MetaTableScanResultConsumer.onNext(Result[]results,
@@ -465,13 +465,13 @@ service.
 ClientScanner.cache
 
 
-private https://docs.oracle.com/javase/8/docs/api/java/util/Deque.html?is-external=true;
 title="class or interface in java.util">DequeResult
-BatchScanResultCache.partialResults
-
-
 private https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListResult
 CompleteScanResultCache.partialResults
 
+
+private https://docs.oracle.com/javase/8/docs/api/java/util/Deque.html?is-external=true;
 title="class or interface in java.util">DequeResult
+BatchScanResultCache.partialResults
+
 
 private https://docs.oracle.com/javase/8/docs/api/java/util/Queue.html?is-external=true;
 title="class or interface in java.util">QueueResult
 AsyncTableResultScanner.queue
@@ -494,7 +494,7 @@ service.
 
 
 Result[]
-BatchScanResultCache.addAndGet(Result[]results,
+AllowPartialScanResultCache.addAndGet(Result[]results,
  booleanisHeartbeatMessage)
 
 
@@ -504,20 +504,24 @@ service.
 
 
 Result[]
-AllowPartialScanResultCache.addAndGet(Result[]results,
+BatchScanResultCache.addAndGet(Result[]results,
  booleanisHeartbeatMessage)
 
 
 Result
-HTable.append(Appendappend)
-
-
-Result
 Table.append(Appendappend)
 Appends values to one or more columns within a single 

[18/51] [partial] hbase-site git commit: Published site at 4cb40e6d846ce1f28ffb40d388c9efb753197813.

2018-03-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4dc2a2e8/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionLocateType.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionLocateType.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionLocateType.html
index 21dd94d..f794fc9 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionLocateType.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionLocateType.html
@@ -106,7 +106,7 @@
 
 
 private RegionLocateType
-AsyncRpcRetryingCallerFactory.SingleRequestCallerBuilder.locateType
+AsyncSingleRequestRpcRetryingCaller.locateType
 
 
 RegionLocateType
@@ -114,7 +114,7 @@
 
 
 private RegionLocateType
-AsyncSingleRequestRpcRetryingCaller.locateType
+AsyncRpcRetryingCallerFactory.SingleRequestCallerBuilder.locateType
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4dc2a2e8/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionLocator.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionLocator.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionLocator.html
index 195c3ee..f6e7bf3 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionLocator.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionLocator.html
@@ -230,13 +230,13 @@ service.
 
 
 private RegionLocator
-TableInputFormatBase.regionLocator
-The RegionLocator of the 
table.
-
+HFileOutputFormat2.TableInfo.regionLocator
 
 
 private RegionLocator
-HFileOutputFormat2.TableInfo.regionLocator
+TableInputFormatBase.regionLocator
+The RegionLocator of the 
table.
+
 
 
 
@@ -248,15 +248,15 @@ service.
 
 
 
+RegionLocator
+HFileOutputFormat2.TableInfo.getRegionLocator()
+
+
 protected RegionLocator
 TableInputFormatBase.getRegionLocator()
 Allows subclasses to get the RegionLocator.
 
 
-
-RegionLocator
-HFileOutputFormat2.TableInfo.getRegionLocator()
-
 
 
 



[18/51] [partial] hbase-site git commit: Published site at 8ab7b20f48951d77945181024f5e15842bc253c4.

2018-03-21 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6eb695c8/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.html
index d9048c2..455c3d2 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.html
@@ -469,7 +469,7 @@
 461  throw new 
FileNotFoundException(buildPath.toString());
 462}
 463if (LOG.isDebugEnabled()) {
-464  LOG.debug("Committing store file " 
+ buildPath + " as " + dstPath);
+464  LOG.debug("Committing " + buildPath 
+ " as " + dstPath);
 465}
 466return dstPath;
 467  }

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6eb695c8/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html
index 7f2e325..32a8ed1 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html
@@ -3429,8 +3429,8 @@
 3421  LOG.warn("Not adding moved region 
record: " + encodedName + " to self.");
 3422  return;
 3423}
-3424LOG.info("Adding moved region 
record: "
-3425  + encodedName + " to " + 
destination + " as of " + closeSeqNum);
+3424LOG.info("Adding " + encodedName + " 
move to " + destination + " record at close sequenceid=" +
+3425closeSeqNum);
 3426movedRegions.put(encodedName, new 
MovedRegionInfo(destination, closeSeqNum));
 3427  }
 3428

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6eb695c8/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionInfo.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionInfo.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionInfo.html
index 7f2e325..32a8ed1 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionInfo.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionInfo.html
@@ -3429,8 +3429,8 @@
 3421  LOG.warn("Not adding moved region 
record: " + encodedName + " to self.");
 3422  return;
 3423}
-3424LOG.info("Adding moved region 
record: "
-3425  + encodedName + " to " + 
destination + " as of " + closeSeqNum);
+3424LOG.info("Adding " + encodedName + " 
move to " + destination + " record at close sequenceid=" +
+3425closeSeqNum);
 3426movedRegions.put(encodedName, new 
MovedRegionInfo(destination, closeSeqNum));
 3427  }
 3428

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6eb695c8/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html
index 7f2e325..32a8ed1 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html
@@ -3429,8 +3429,8 @@
 3421  LOG.warn("Not adding moved region 
record: " + encodedName + " to self.");
 3422  return;
 3423}
-3424LOG.info("Adding moved region 
record: "
-3425  + encodedName + " to " + 
destination + " as of " + closeSeqNum);
+3424LOG.info("Adding " + encodedName + " 
move to " + destination + " record at close sequenceid=" +
+3425closeSeqNum);
 3426movedRegions.put(encodedName, new 
MovedRegionInfo(destination, closeSeqNum));
 3427  }
 3428

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6eb695c8/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemStoreFlusher.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemStoreFlusher.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemStoreFlusher.html
index 7f2e325..32a8ed1 100644
--- 

[18/51] [partial] hbase-site git commit: Published site at 00095a2ef9442e3fd86c04876c9d91f2f8b23ad8.

2018-03-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileWriteAccessor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileWriteAccessor.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileWriteAccessor.html
index 74fbf67..33418d0 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileWriteAccessor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileWriteAccessor.html
@@ -27,287 +27,296 @@
 019package 
org.apache.hadoop.hbase.io.hfile.bucket;
 020
 021import java.io.File;
-022import java.io.FileNotFoundException;
-023import java.io.IOException;
-024import java.io.RandomAccessFile;
-025import java.nio.ByteBuffer;
-026import 
java.nio.channels.ClosedChannelException;
-027import java.nio.channels.FileChannel;
-028import java.util.Arrays;
-029import 
org.apache.hadoop.hbase.io.hfile.Cacheable;
-030import 
org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType;
-031import 
org.apache.hadoop.hbase.io.hfile.CacheableDeserializer;
-032import 
org.apache.hadoop.hbase.nio.ByteBuff;
-033import 
org.apache.hadoop.hbase.nio.SingleByteBuff;
-034import 
org.apache.hadoop.util.StringUtils;
-035import 
org.apache.yetus.audience.InterfaceAudience;
-036import org.slf4j.Logger;
-037import org.slf4j.LoggerFactory;
-038
-039import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-040import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-041
-042/**
-043 * IO engine that stores data to a file 
on the local file system.
-044 */
-045@InterfaceAudience.Private
-046public class FileIOEngine implements 
IOEngine {
-047  private static final Logger LOG = 
LoggerFactory.getLogger(FileIOEngine.class);
-048  public static final String 
FILE_DELIMITER = ",";
-049  private final String[] filePaths;
-050  private final FileChannel[] 
fileChannels;
-051  private final RandomAccessFile[] 
rafs;
-052
-053  private final long sizePerFile;
-054  private final long capacity;
-055
-056  private FileReadAccessor readAccessor = 
new FileReadAccessor();
-057  private FileWriteAccessor writeAccessor 
= new FileWriteAccessor();
-058
-059  public FileIOEngine(long capacity, 
boolean maintainPersistence, String... filePaths)
-060  throws IOException {
-061this.sizePerFile = capacity / 
filePaths.length;
-062this.capacity = this.sizePerFile * 
filePaths.length;
-063this.filePaths = filePaths;
-064this.fileChannels = new 
FileChannel[filePaths.length];
-065if (!maintainPersistence) {
-066  for (String filePath : filePaths) 
{
-067File file = new File(filePath);
-068if (file.exists()) {
-069  if (LOG.isDebugEnabled()) {
-070LOG.debug("File " + filePath 
+ " already exists. Deleting!!");
-071  }
-072  file.delete();
-073  // If deletion fails still we 
can manage with the writes
-074}
-075  }
-076}
-077this.rafs = new 
RandomAccessFile[filePaths.length];
-078for (int i = 0; i  
filePaths.length; i++) {
-079  String filePath = filePaths[i];
-080  try {
-081rafs[i] = new 
RandomAccessFile(filePath, "rw");
-082long totalSpace = new 
File(filePath).getTotalSpace();
-083if (totalSpace  sizePerFile) 
{
-084  // The next setting length will 
throw exception,logging this message
-085  // is just used for the detail 
reason of exception,
-086  String msg = "Only " + 
StringUtils.byteDesc(totalSpace)
-087  + " total space under " + 
filePath + ", not enough for requested "
-088  + 
StringUtils.byteDesc(sizePerFile);
-089  LOG.warn(msg);
-090}
-091rafs[i].setLength(sizePerFile);
-092fileChannels[i] = 
rafs[i].getChannel();
-093LOG.info("Allocating cache " + 
StringUtils.byteDesc(sizePerFile)
-094+ ", on the path:" + 
filePath);
-095  } catch (IOException fex) {
-096LOG.error("Failed allocating 
cache on " + filePath, fex);
-097shutdown();
-098throw fex;
-099  }
-100}
-101  }
-102
-103  @Override
-104  public String toString() {
-105return "ioengine=" + 
this.getClass().getSimpleName() + ", paths="
-106+ Arrays.asList(filePaths) + ", 
capacity=" + String.format("%,d", this.capacity);
-107  }
-108
-109  /**
-110   * File IO engine is always able to 
support persistent storage for the cache
-111   * @return true
-112   */
-113  @Override
-114  public boolean isPersistent() {
-115return true;
-116  }
-117
-118  /**
-119   * Transfers data from file to the 
given byte buffer
-120   * @param offset The offset in the file 
where the first byte to be read
-121   * @param length The length of buffer 
that should be allocated for reading

[18/51] [partial] hbase-site git commit: Published site at 22f4def942f8a3367d0ca6598317e9b9a7d0cfcd.

2018-03-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8b1eaec1/devapidocs/src-html/org/apache/hadoop/hbase/client/TableDescriptorBuilder.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/TableDescriptorBuilder.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/TableDescriptorBuilder.html
index 05c0542..2d09bf8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/TableDescriptorBuilder.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/TableDescriptorBuilder.html
@@ -35,1393 +35,1419 @@
 027import java.util.HashSet;
 028import java.util.List;
 029import java.util.Map;
-030import java.util.Set;
-031import java.util.TreeMap;
-032import java.util.TreeSet;
-033import java.util.function.Function;
-034import java.util.regex.Matcher;
-035import org.apache.hadoop.fs.Path;
-036import 
org.apache.hadoop.hbase.Coprocessor;
-037import 
org.apache.hadoop.hbase.HConstants;
-038import 
org.apache.hadoop.hbase.TableName;
-039import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-040import 
org.apache.hadoop.hbase.security.User;
-041import 
org.apache.hadoop.hbase.util.Bytes;
-042import 
org.apache.yetus.audience.InterfaceAudience;
-043import org.slf4j.Logger;
-044import org.slf4j.LoggerFactory;
-045
-046import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-047import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
-048
-049/**
-050 * @since 2.0.0
-051 */
-052@InterfaceAudience.Public
-053public class TableDescriptorBuilder {
-054  public static final Logger LOG = 
LoggerFactory.getLogger(TableDescriptorBuilder.class);
-055  @InterfaceAudience.Private
-056  public static final String SPLIT_POLICY 
= "SPLIT_POLICY";
-057  private static final Bytes 
SPLIT_POLICY_KEY = new Bytes(Bytes.toBytes(SPLIT_POLICY));
-058  /**
-059   * Used by HBase Shell interface to 
access this metadata
-060   * attribute which denotes the maximum 
size of the store file after which a
-061   * region split occurs.
-062   */
-063  @InterfaceAudience.Private
-064  public static final String MAX_FILESIZE 
= "MAX_FILESIZE";
-065  private static final Bytes 
MAX_FILESIZE_KEY
-066  = new 
Bytes(Bytes.toBytes(MAX_FILESIZE));
-067
-068  @InterfaceAudience.Private
-069  public static final String OWNER = 
"OWNER";
+030import java.util.Objects;
+031import java.util.Optional;
+032import java.util.Set;
+033import java.util.TreeMap;
+034import java.util.TreeSet;
+035import java.util.function.Function;
+036import java.util.regex.Matcher;
+037import java.util.regex.Pattern;
+038import 
org.apache.hadoop.hbase.Coprocessor;
+039import 
org.apache.hadoop.hbase.HConstants;
+040import 
org.apache.hadoop.hbase.TableName;
+041import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
+042import 
org.apache.hadoop.hbase.security.User;
+043import 
org.apache.hadoop.hbase.util.Bytes;
+044import 
org.apache.yetus.audience.InterfaceAudience;
+045import org.slf4j.Logger;
+046import org.slf4j.LoggerFactory;
+047
+048import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+049import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
+050
+051/**
+052 * @since 2.0.0
+053 */
+054@InterfaceAudience.Public
+055public class TableDescriptorBuilder {
+056  public static final Logger LOG = 
LoggerFactory.getLogger(TableDescriptorBuilder.class);
+057  @InterfaceAudience.Private
+058  public static final String SPLIT_POLICY 
= "SPLIT_POLICY";
+059  private static final Bytes 
SPLIT_POLICY_KEY = new Bytes(Bytes.toBytes(SPLIT_POLICY));
+060  /**
+061   * Used by HBase Shell interface to 
access this metadata
+062   * attribute which denotes the maximum 
size of the store file after which a
+063   * region split occurs.
+064   */
+065  @InterfaceAudience.Private
+066  public static final String MAX_FILESIZE 
= "MAX_FILESIZE";
+067  private static final Bytes 
MAX_FILESIZE_KEY
+068  = new 
Bytes(Bytes.toBytes(MAX_FILESIZE));
+069
 070  @InterfaceAudience.Private
-071  public static final Bytes OWNER_KEY
-072  = new 
Bytes(Bytes.toBytes(OWNER));
-073
-074  /**
-075   * Used by rest interface to access 
this metadata attribute
-076   * which denotes if the table is Read 
Only.
-077   */
-078  @InterfaceAudience.Private
-079  public static final String READONLY = 
"READONLY";
-080  private static final Bytes 
READONLY_KEY
-081  = new 
Bytes(Bytes.toBytes(READONLY));
-082
-083  /**
-084   * Used by HBase Shell interface to 
access this metadata
-085   * attribute which denotes if the table 
is compaction enabled.
-086   */
-087  @InterfaceAudience.Private
-088  public static final String 
COMPACTION_ENABLED = "COMPACTION_ENABLED";
-089  private static final Bytes 
COMPACTION_ENABLED_KEY
-090  = new 
Bytes(Bytes.toBytes(COMPACTION_ENABLED));
-091
-092  /**
-093   * Used by HBase Shell interface to 
access this metadata
-094   * attribute which represents the 
maximum 

[18/51] [partial] hbase-site git commit: Published site at 31da4d0bce69b3a47066a5df675756087ce4dc60.

2018-03-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a754d895/devapidocs/org/apache/hadoop/hbase/procedure/flush/MasterFlushTableProcedureManager.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/procedure/flush/MasterFlushTableProcedureManager.html
 
b/devapidocs/org/apache/hadoop/hbase/procedure/flush/MasterFlushTableProcedureManager.html
index 202ef3a..e8092b6 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/procedure/flush/MasterFlushTableProcedureManager.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/procedure/flush/MasterFlushTableProcedureManager.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10};
+var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -124,7 +124,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.LimitedPrivate(value="Configuration")
-public class MasterFlushTableProcedureManager
+public class MasterFlushTableProcedureManager
 extends MasterProcedureManager
 
 
@@ -226,34 +226,42 @@ extends 
 void
+checkPermissions(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescriptiondesc,
+AccessCheckeraccessChecker,
+Useruser)
+Check for required permissions before executing the 
procedure.
+
+
+
+void
 execProcedure(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescriptiondesc)
 Execute a distributed procedure on cluster
 
 
-
+
 https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 getProcedureSignature()
 Return the unique signature of the procedure.
 
 
-
+
 void
 initialize(MasterServicesmaster,
   MetricsMastermetricsMaster)
 Initialize a globally barriered procedure for master.
 
 
-
+
 boolean
 isProcedureDone(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescriptiondesc)
 Check if the procedure is finished successfully
 
 
-
+
 boolean
 isStopped()
 
-
+
 void
 stop(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringwhy)
 Stop this service.
@@ -301,7 +309,7 @@ extends 
 
 FLUSH_TABLE_PROCEDURE_SIGNATURE
-public static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String FLUSH_TABLE_PROCEDURE_SIGNATURE
+public static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String FLUSH_TABLE_PROCEDURE_SIGNATURE
 
 See Also:
 Constant
 Field Values
@@ -314,7 +322,7 @@ extends 
 
 FLUSH_TIMEOUT_MILLIS_KEY
-private static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String FLUSH_TIMEOUT_MILLIS_KEY
+private static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String FLUSH_TIMEOUT_MILLIS_KEY
 
 See Also:
 Constant
 Field Values
@@ -327,7 +335,7 @@ extends 
 
 FLUSH_TIMEOUT_MILLIS_DEFAULT
-private static finalint FLUSH_TIMEOUT_MILLIS_DEFAULT
+private static finalint FLUSH_TIMEOUT_MILLIS_DEFAULT
 
 See Also:
 Constant
 Field Values
@@ -340,7 +348,7 @@ extends 
 
 FLUSH_WAKE_MILLIS_KEY
-private static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String FLUSH_WAKE_MILLIS_KEY
+private static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String FLUSH_WAKE_MILLIS_KEY
 
 See Also:
 Constant
 Field Values
@@ -353,7 +361,7 @@ extends 
 
 FLUSH_WAKE_MILLIS_DEFAULT
-private static finalint FLUSH_WAKE_MILLIS_DEFAULT
+private static finalint FLUSH_WAKE_MILLIS_DEFAULT
 
 See Also:
 Constant
 Field Values
@@ -366,7 +374,7 @@ extends 
 
 FLUSH_PROC_POOL_THREADS_KEY
-private static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String FLUSH_PROC_POOL_THREADS_KEY
+private static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String FLUSH_PROC_POOL_THREADS_KEY
 
 See Also:
 Constant
 Field Values
@@ -379,7 +387,7 @@ extends 
 
 FLUSH_PROC_POOL_THREADS_DEFAULT
-private static finalint FLUSH_PROC_POOL_THREADS_DEFAULT
+private static finalint FLUSH_PROC_POOL_THREADS_DEFAULT
 
 See Also:
 Constant
 Field Values
@@ -392,7 +400,7 @@ extends 
 
 LOG
-private static finalorg.slf4j.Logger LOG
+private static finalorg.slf4j.Logger LOG
 
 
 
@@ -401,7 +409,7 @@ extends 
 
 master

[18/51] [partial] hbase-site git commit: Published site at 6b77786dfc46d25ac5bb5f1c8a4a9eb47b52a604.

2018-03-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/81cde4ce/devapidocs/org/apache/hadoop/hbase/client/class-use/SnapshotDescription.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/SnapshotDescription.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/SnapshotDescription.html
index 7edabda..08add92 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/SnapshotDescription.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/SnapshotDescription.html
@@ -137,9 +137,7 @@
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListSnapshotDescription
-AsyncAdmin.listSnapshots()
-List completed snapshots.
-
+AsyncHBaseAdmin.listSnapshots()
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListSnapshotDescription
@@ -148,22 +146,22 @@
 
 
 
-https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListSnapshotDescription
-RawAsyncHBaseAdmin.listSnapshots()
-
-
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListSnapshotDescription
 HBaseAdmin.listSnapshots()
 
+
+https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListSnapshotDescription
+AsyncAdmin.listSnapshots()
+List completed snapshots.
+
+
 
 https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListSnapshotDescription
-AsyncHBaseAdmin.listSnapshots()
+RawAsyncHBaseAdmin.listSnapshots()
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListSnapshotDescription
-AsyncAdmin.listSnapshots(https://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html?is-external=true;
 title="class or interface in java.util.regex">Patternpattern)
-List all the completed snapshots matching the given 
pattern.
-
+AsyncHBaseAdmin.listSnapshots(https://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html?is-external=true;
 title="class or interface in 
java.util.regex">Patternpattern)
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListSnapshotDescription
@@ -172,16 +170,18 @@
 
 
 
-https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListSnapshotDescription
-RawAsyncHBaseAdmin.listSnapshots(https://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html?is-external=true;
 title="class or interface in 
java.util.regex">Patternpattern)
-
-
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListSnapshotDescription
 HBaseAdmin.listSnapshots(https://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html?is-external=true;
 title="class or interface in 
java.util.regex">Patternpattern)
 
+
+https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListSnapshotDescription
+AsyncAdmin.listSnapshots(https://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html?is-external=true;
 title="class or interface in java.util.regex">Patternpattern)
+List all the completed snapshots matching the given 
pattern.
+
+
 
 https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in 

[18/51] [partial] hbase-site git commit: Published site at 1384da71375427b522b09f06862bb5d629cef52f.

2018-03-06 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d347bde8/devapidocs/org/apache/hadoop/hbase/client/class-use/Result.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/class-use/Result.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/Result.html
index a1d64fc..b1c22cf 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/Result.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/Result.html
@@ -292,7 +292,7 @@ service.
 
 
 private static HRegionLocation
-AsyncMetaTableAccessor.getRegionLocation(Resultr,
+MetaTableAccessor.getRegionLocation(Resultr,
  RegionInforegionInfo,
  intreplicaId)
 Returns the HRegionLocation parsed from the given meta row 
Result
@@ -301,7 +301,7 @@ service.
 
 
 private static HRegionLocation
-MetaTableAccessor.getRegionLocation(Resultr,
+AsyncMetaTableAccessor.getRegionLocation(Resultr,
  RegionInforegionInfo,
  intreplicaId)
 Returns the HRegionLocation parsed from the given meta row 
Result
@@ -309,14 +309,14 @@ service.
 
 
 
-private static https://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in java.util">OptionalRegionLocations
-AsyncMetaTableAccessor.getRegionLocations(Resultr)
+static RegionLocations
+MetaTableAccessor.getRegionLocations(Resultr)
 Returns an HRegionLocationList extracted from the 
result.
 
 
 
-static RegionLocations
-MetaTableAccessor.getRegionLocations(Resultr)
+private static https://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in java.util">OptionalRegionLocations
+AsyncMetaTableAccessor.getRegionLocations(Resultr)
 Returns an HRegionLocationList extracted from the 
result.
 
 
@@ -334,42 +334,42 @@ service.
 
 
 private static long
-AsyncMetaTableAccessor.getSeqNumDuringOpen(Resultr,
+MetaTableAccessor.getSeqNumDuringOpen(Resultr,
intreplicaId)
 The latest seqnum that the server writing to meta observed 
when opening the region.
 
 
 
 private static long
-MetaTableAccessor.getSeqNumDuringOpen(Resultr,
+AsyncMetaTableAccessor.getSeqNumDuringOpen(Resultr,
intreplicaId)
 The latest seqnum that the server writing to meta observed 
when opening the region.
 
 
 
-private static https://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in java.util">OptionalServerName
-AsyncMetaTableAccessor.getServerName(Resultr,
+static ServerName
+MetaTableAccessor.getServerName(Resultr,
  intreplicaId)
 Returns a ServerName from catalog table Result.
 
 
 
-static ServerName
-MetaTableAccessor.getServerName(Resultr,
+private static https://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in java.util">OptionalServerName
+AsyncMetaTableAccessor.getServerName(Resultr,
  intreplicaId)
 Returns a ServerName from catalog table Result.
 
 
 
-private static https://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in java.util">OptionalTableState
-AsyncMetaTableAccessor.getTableState(Resultr)
-
-
 static TableState
 MetaTableAccessor.getTableState(Resultr)
 Decode table state from META Result.
 
 
+
+private static https://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in java.util">OptionalTableState
+AsyncMetaTableAccessor.getTableState(Resultr)
+
 
 void
 AsyncMetaTableAccessor.MetaTableScanResultConsumer.onNext(Result[]results,
@@ -465,13 +465,13 @@ service.
 ClientScanner.cache
 
 
-private https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListResult
-CompleteScanResultCache.partialResults
-
-
 private https://docs.oracle.com/javase/8/docs/api/java/util/Deque.html?is-external=true;
 title="class or interface in java.util">DequeResult
 BatchScanResultCache.partialResults
 
+
+private https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListResult
+CompleteScanResultCache.partialResults
+
 
 private https://docs.oracle.com/javase/8/docs/api/java/util/Queue.html?is-external=true;
 title="class or interface in java.util">QueueResult
 AsyncTableResultScanner.queue
@@ -494,7 +494,7 @@ service.
 
 
 Result[]
-AllowPartialScanResultCache.addAndGet(Result[]results,
+BatchScanResultCache.addAndGet(Result[]results,
  booleanisHeartbeatMessage)
 
 
@@ -504,24 +504,20 @@ service.
 
 
 Result[]
-BatchScanResultCache.addAndGet(Result[]results,
+AllowPartialScanResultCache.addAndGet(Result[]results,
  booleanisHeartbeatMessage)
 
 
 Result
-Table.append(Appendappend)
-Appends values to one or more columns within a single 
row.
-
+HTable.append(Appendappend)
 
 
 

[18/51] [partial] hbase-site git commit: Published site at b7b86839250bf9b295ebc1948826f43a88736d6c.

2018-03-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6b94a2f2/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.CloseableVisitor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.CloseableVisitor.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.CloseableVisitor.html
index df5fa53..8fffb89 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.CloseableVisitor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.CloseableVisitor.html
@@ -42,1927 +42,2060 @@
 034import java.util.TreeMap;
 035import java.util.regex.Matcher;
 036import java.util.regex.Pattern;
-037import 
org.apache.hadoop.conf.Configuration;
-038import 
org.apache.hadoop.hbase.Cell.Type;
-039import 
org.apache.hadoop.hbase.client.Connection;
-040import 
org.apache.hadoop.hbase.client.ConnectionFactory;
-041import 
org.apache.hadoop.hbase.client.Consistency;
-042import 
org.apache.hadoop.hbase.client.Delete;
-043import 
org.apache.hadoop.hbase.client.Get;
-044import 
org.apache.hadoop.hbase.client.Mutation;
-045import 
org.apache.hadoop.hbase.client.Put;
-046import 
org.apache.hadoop.hbase.client.RegionInfo;
-047import 
org.apache.hadoop.hbase.client.RegionInfoBuilder;
-048import 
org.apache.hadoop.hbase.client.RegionLocator;
-049import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
-050import 
org.apache.hadoop.hbase.client.RegionServerCallable;
-051import 
org.apache.hadoop.hbase.client.Result;
-052import 
org.apache.hadoop.hbase.client.ResultScanner;
-053import 
org.apache.hadoop.hbase.client.Scan;
-054import 
org.apache.hadoop.hbase.client.Table;
-055import 
org.apache.hadoop.hbase.client.TableState;
-056import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-057import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
-058import 
org.apache.hadoop.hbase.master.RegionState;
-059import 
org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-060import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
-061import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier;
-062import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-063import 
org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos;
-064import 
org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MutateRowsRequest;
-065import 
org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MutateRowsResponse;
-066import 
org.apache.hadoop.hbase.util.Bytes;
-067import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-068import 
org.apache.hadoop.hbase.util.ExceptionUtil;
-069import 
org.apache.hadoop.hbase.util.Pair;
-070import 
org.apache.hadoop.hbase.util.PairOfSameType;
-071import 
org.apache.yetus.audience.InterfaceAudience;
-072import org.slf4j.Logger;
-073import org.slf4j.LoggerFactory;
-074
-075import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-076
-077/**
-078 * p
-079 * Read/write operations on region and 
assignment information store in codehbase:meta/code.
-080 * /p
+037import java.util.stream.Collectors;
+038import java.util.stream.Stream;
+039import 
org.apache.hadoop.conf.Configuration;
+040import 
org.apache.hadoop.hbase.Cell.Type;
+041import 
org.apache.hadoop.hbase.client.Connection;
+042import 
org.apache.hadoop.hbase.client.ConnectionFactory;
+043import 
org.apache.hadoop.hbase.client.Consistency;
+044import 
org.apache.hadoop.hbase.client.Delete;
+045import 
org.apache.hadoop.hbase.client.Get;
+046import 
org.apache.hadoop.hbase.client.Mutation;
+047import 
org.apache.hadoop.hbase.client.Put;
+048import 
org.apache.hadoop.hbase.client.RegionInfo;
+049import 
org.apache.hadoop.hbase.client.RegionInfoBuilder;
+050import 
org.apache.hadoop.hbase.client.RegionLocator;
+051import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
+052import 
org.apache.hadoop.hbase.client.RegionServerCallable;
+053import 
org.apache.hadoop.hbase.client.Result;
+054import 
org.apache.hadoop.hbase.client.ResultScanner;
+055import 
org.apache.hadoop.hbase.client.Scan;
+056import 
org.apache.hadoop.hbase.client.Table;
+057import 
org.apache.hadoop.hbase.client.TableState;
+058import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
+059import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
+060import 
org.apache.hadoop.hbase.master.RegionState;
+061import 
org.apache.hadoop.hbase.master.RegionState.State;
+062import 
org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+063import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
+064import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier;
+065import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
+066import 
org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos;
+067import 

[18/51] [partial] hbase-site git commit: Published site at 1d25b60831b8cc8f7ad5fd366f1867de5c20d2f3.

2018-03-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/eb05e3e3/apidocs/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.html
--
diff --git 
a/apidocs/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.html 
b/apidocs/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.html
index 535d98b..49f5581 100644
--- a/apidocs/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.html
+++ b/apidocs/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.html
@@ -97,7 +97,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
 
 
 org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder
@@ -111,7 +111,7 @@ var activeTableTab = "activeTableTab";
 
 @InterfaceAudience.Public
 public class ColumnFamilyDescriptorBuilder
-extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 
 Since:
 2.0.0
@@ -256,7 +256,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 
-static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+static https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 NEW_VERSION_BEHAVIOR
 
 
@@ -287,16 +287,16 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 getCompressionType()
 
 
-static http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+static https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 getDefaultValues()
 
 
-http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 getNameAsString()
 
 
 static 
org.apache.hadoop.hbase.util.PrettyPrinter.Unit
-getUnit(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringkey)
+getUnit(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringkey)
 
 
 static byte[]
@@ -316,7 +316,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 static ColumnFamilyDescriptor
-of(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringname)
+of(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringname)
 
 
 static ColumnFamilyDescriptor
@@ -324,7 +324,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 ColumnFamilyDescriptorBuilder
-removeConfiguration(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringkey)
+removeConfiguration(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringkey)
 
 
 ColumnFamilyDescriptorBuilder
@@ -364,8 +364,8 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 ColumnFamilyDescriptorBuilder
-setConfiguration(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringkey,
-http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">Stringvalue)
+setConfiguration(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringkey,
+https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 

[18/51] [partial] hbase-site git commit: Published site at .

2018-02-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ec8bf761/devapidocs/src-html/org/apache/hadoop/hbase/client/ClusterConnection.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ClusterConnection.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ClusterConnection.html
index 25fa087..c1bc8ac 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/client/ClusterConnection.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/ClusterConnection.html
@@ -29,332 +29,312 @@
 021
 022import java.io.IOException;
 023import java.util.List;
-024
-025import 
org.apache.hadoop.conf.Configuration;
-026import 
org.apache.hadoop.hbase.HRegionLocation;
-027import 
org.apache.hadoop.hbase.MasterNotRunningException;
-028import 
org.apache.hadoop.hbase.RegionLocations;
-029import 
org.apache.hadoop.hbase.ServerName;
-030import 
org.apache.hadoop.hbase.TableName;
-031import 
org.apache.hadoop.hbase.ZooKeeperConnectionException;
-032import 
org.apache.yetus.audience.InterfaceAudience;
-033import 
org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicy;
-034import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
+024import 
org.apache.hadoop.conf.Configuration;
+025import 
org.apache.hadoop.hbase.HRegionLocation;
+026import 
org.apache.hadoop.hbase.MasterNotRunningException;
+027import 
org.apache.hadoop.hbase.RegionLocations;
+028import 
org.apache.hadoop.hbase.ServerName;
+029import 
org.apache.hadoop.hbase.TableName;
+030import 
org.apache.hadoop.hbase.ZooKeeperConnectionException;
+031import 
org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicy;
+032import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
+033import 
org.apache.yetus.audience.InterfaceAudience;
+034
 035import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
 036import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService;
-037import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService;
-038
-039/** Internal methods on Connection that 
should not be used by user code. */
-040@InterfaceAudience.Private
-041// NOTE: Although this class is public, 
this class is meant to be used directly from internal
-042// classes and unit tests only.
-043public interface ClusterConnection 
extends Connection {
-044
-045  /**
-046   * Key for configuration in 
Configuration whose value is the class we implement making a
-047   * new Connection instance.
-048   */
-049  String HBASE_CLIENT_CONNECTION_IMPL = 
"hbase.client.connection.impl";
-050
-051  /**
-052   * @return - true if the master server 
is running
-053   * @deprecated this has been deprecated 
without a replacement
-054   */
-055  @Deprecated
-056  boolean isMasterRunning()
-057  throws MasterNotRunningException, 
ZooKeeperConnectionException;
-058
-059  /**
-060   * Use this api to check if the table 
has been created with the specified number of
-061   * splitkeys which was used while 
creating the given table.
-062   * Note : If this api is used after a 
table's region gets splitted, the api may return
-063   * false.
-064   * @param tableName
-065   *  tableName
-066   * @param splitKeys
-067   *  splitKeys used while 
creating table
-068   * @throws IOException
-069   *   if a remote or network 
exception occurs
-070   */
-071  boolean isTableAvailable(TableName 
tableName, byte[][] splitKeys) throws
-072  IOException;
-073
-074  /**
-075   * A table that isTableEnabled == false 
and isTableDisabled == false
-076   * is possible. This happens when a 
table has a lot of regions
-077   * that must be processed.
-078   * @param tableName table name
-079   * @return true if the table is 
enabled, false otherwise
-080   * @throws IOException if a remote or 
network exception occurs
-081   */
-082  boolean isTableEnabled(TableName 
tableName) throws IOException;
-083
-084  /**
-085   * @param tableName table name
-086   * @return true if the table is 
disabled, false otherwise
-087   * @throws IOException if a remote or 
network exception occurs
-088   */
-089  boolean isTableDisabled(TableName 
tableName) throws IOException;
-090
-091  /**
-092   * Retrieve TableState, represent 
current table state.
-093   * @param tableName table state for
-094   * @return state of the table
-095   */
-096  TableState getTableState(TableName 
tableName)  throws IOException;
-097
-098  /**
-099   * Find the location of the region of 
itableName/i that irow/i
-100   * lives in.
-101   * @param tableName name of the table 
irow/i is in
-102   * @param row row key you're trying to 
find the region of
-103   * @return HRegionLocation that 
describes where to find the region in
-104   *   question
-105   * @throws IOException if a remote or 
network exception occurs
-106   */
-107  HRegionLocation locateRegion(final 
TableName tableName,
-108  final byte [] row) throws 
IOException;
-109
-110  /**

[18/51] [partial] hbase-site git commit: Published site at .

2018-02-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactionPipeline.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactionPipeline.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactionPipeline.html
index f6091f6..3ab770f 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactionPipeline.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactionPipeline.html
@@ -146,193 +146,208 @@
 138  if(segment != null) newDataSize = 
segment.keySize();
 139  long dataSizeDelta = suffixDataSize 
- newDataSize;
 140  long suffixHeapSize = 
getSegmentsHeapSize(suffix);
-141  long newHeapSize = 0;
-142  if(segment != null) newHeapSize = 
segment.heapSize();
-143  long heapSizeDelta = suffixHeapSize 
- newHeapSize;
-144  region.addMemStoreSize(new 
MemStoreSizing(-dataSizeDelta, -heapSizeDelta));
-145  LOG.debug("Suffix data size={}, new 
segment data size={}, suffix heap size={}," +
-146  "new segment heap 
size={}",
-147  suffixDataSize,
-148  newDataSize,
-149  suffixHeapSize,
-150  newHeapSize);
-151}
-152return true;
-153  }
-154
-155  private static long 
getSegmentsHeapSize(List? extends Segment list) {
-156long res = 0;
-157for (Segment segment : list) {
-158  res += segment.heapSize();
-159}
-160return res;
-161  }
-162
-163  private static long 
getSegmentsKeySize(List? extends Segment list) {
-164long res = 0;
-165for (Segment segment : list) {
-166  res += segment.keySize();
-167}
-168return res;
-169  }
-170
-171  /**
-172   * If the caller holds the current 
version, go over the the pipeline and try to flatten each
-173   * segment. Flattening is replacing the 
ConcurrentSkipListMap based CellSet to CellArrayMap based.
-174   * Flattening of the segment that 
initially is not based on ConcurrentSkipListMap has no effect.
-175   * Return after one segment is 
successfully flatten.
-176   *
-177   * @return true iff a segment was 
successfully flattened
-178   */
-179  public boolean flattenOneSegment(long 
requesterVersion,
-180  CompactingMemStore.IndexType 
idxType,
-181  MemStoreCompactionStrategy.Action 
action) {
-182
-183if(requesterVersion != version) {
-184  LOG.warn("Segment flattening 
failed, because versions do not match. Requester version: "
-185  + requesterVersion + ", actual 
version: " + version);
-186  return false;
-187}
-188
-189synchronized (pipeline){
-190  if(requesterVersion != version) {
-191LOG.warn("Segment flattening 
failed, because versions do not match");
-192return false;
-193  }
-194  int i = 0;
-195  for (ImmutableSegment s : pipeline) 
{
-196if ( s.canBeFlattened() ) {
-197  MemStoreSizing 
newMemstoreAccounting = new MemStoreSizing(); // the size to be updated
-198  ImmutableSegment newS = 
SegmentFactory.instance().createImmutableSegmentByFlattening(
-199  
(CSLMImmutableSegment)s,idxType,newMemstoreAccounting,action);
-200  replaceAtIndex(i,newS);
-201  if(region != null) {
-202// update the global memstore 
size counter
-203// upon flattening there is 
no change in the data size
-204region.addMemStoreSize(new 
MemStoreSize(0, newMemstoreAccounting.getHeapSize()));
-205  }
-206  LOG.debug("Compaction pipeline 
segment {} flattened", s);
-207  return true;
-208}
-209i++;
+141  long suffixOffHeapSize = 
getSegmentsOffHeapSize(suffix);
+142  long newHeapSize = 0;
+143  long newOffHeapSize = 0;
+144  if(segment != null) {
+145newHeapSize = 
segment.heapSize();
+146newOffHeapSize = 
segment.offHeapSize();
+147  }
+148  long offHeapSizeDelta = 
suffixOffHeapSize - newOffHeapSize;
+149  long heapSizeDelta = suffixHeapSize 
- newHeapSize;
+150  region.addMemStoreSize(new 
MemStoreSize(-dataSizeDelta, -heapSizeDelta, -offHeapSizeDelta));
+151  LOG.debug("Suffix data size={}, new 
segment data size={}, "
+152  + "suffix heap size={}," + 
"new segment heap size={}"
+153  + "suffix off heap 
size={}," + "new segment off heap size={}"
+154  , suffixDataSize
+155  , newDataSize
+156  , suffixHeapSize
+157  , newHeapSize
+158  , suffixOffHeapSize
+159  , newOffHeapSize);
+160}
+161return true;
+162  }
+163
+164  private static long 
getSegmentsHeapSize(List? extends Segment list) {
+165long res = 0;
+166for (Segment segment : list) {
+167  res += segment.heapSize();
+168}
+169return res;
+170  }
+171
+172  private static long 
getSegmentsOffHeapSize(List? extends Segment list) {
+173

[18/51] [partial] hbase-site git commit: Published site at .

2018-02-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/193b4259/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.Result.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.Result.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.Result.html
index bd13b53..802b925 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.Result.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.Result.html
@@ -900,7600 +900,7598 @@
 892if 
(this.getRegionInfo().getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) {
 893  status.setStatus("Writing region 
info on filesystem");
 894  fs.checkRegionInfoOnFilesystem();
-895} else {
-896  if (LOG.isDebugEnabled()) {
-897LOG.debug("Skipping creation of 
.regioninfo file for " + this.getRegionInfo());
-898  }
-899}
-900
-901// Initialize all the HStores
-902status.setStatus("Initializing all 
the Stores");
-903long maxSeqId = 
initializeStores(reporter, status);
-904this.mvcc.advanceTo(maxSeqId);
-905if 
(ServerRegionReplicaUtil.shouldReplayRecoveredEdits(this)) {
-906  CollectionHStore stores = 
this.stores.values();
-907  try {
-908// update the stores that we are 
replaying
-909
stores.forEach(HStore::startReplayingFromWAL);
-910// Recover any edits if 
available.
-911maxSeqId = Math.max(maxSeqId,
-912  
replayRecoveredEditsIfAny(this.fs.getRegionDir(), maxSeqIdInStores, reporter, 
status));
-913// Make sure mvcc is up to max.
-914this.mvcc.advanceTo(maxSeqId);
-915  } finally {
-916// update the stores that we are 
done replaying
-917
stores.forEach(HStore::stopReplayingFromWAL);
-918  }
-919}
-920this.lastReplayedOpenRegionSeqId = 
maxSeqId;
+895}
+896
+897// Initialize all the HStores
+898status.setStatus("Initializing all 
the Stores");
+899long maxSeqId = 
initializeStores(reporter, status);
+900this.mvcc.advanceTo(maxSeqId);
+901if 
(ServerRegionReplicaUtil.shouldReplayRecoveredEdits(this)) {
+902  CollectionHStore stores = 
this.stores.values();
+903  try {
+904// update the stores that we are 
replaying
+905
stores.forEach(HStore::startReplayingFromWAL);
+906// Recover any edits if 
available.
+907maxSeqId = Math.max(maxSeqId,
+908  
replayRecoveredEditsIfAny(this.fs.getRegionDir(), maxSeqIdInStores, reporter, 
status));
+909// Make sure mvcc is up to max.
+910this.mvcc.advanceTo(maxSeqId);
+911  } finally {
+912// update the stores that we are 
done replaying
+913
stores.forEach(HStore::stopReplayingFromWAL);
+914  }
+915}
+916this.lastReplayedOpenRegionSeqId = 
maxSeqId;
+917
+918
this.writestate.setReadOnly(ServerRegionReplicaUtil.isReadOnly(this));
+919this.writestate.flushRequested = 
false;
+920this.writestate.compacting.set(0);
 921
-922
this.writestate.setReadOnly(ServerRegionReplicaUtil.isReadOnly(this));
-923this.writestate.flushRequested = 
false;
-924this.writestate.compacting.set(0);
-925
-926if (this.writestate.writesEnabled) 
{
-927  // Remove temporary data left over 
from old regions
-928  status.setStatus("Cleaning up 
temporary data from old regions");
-929  fs.cleanupTempDir();
-930}
-931
-932if (this.writestate.writesEnabled) 
{
-933  status.setStatus("Cleaning up 
detritus from prior splits");
-934  // Get rid of any splits or merges 
that were lost in-progress.  Clean out
-935  // these directories here on open.  
We may be opening a region that was
-936  // being split but we crashed in 
the middle of it all.
-937  fs.cleanupAnySplitDetritus();
-938  fs.cleanupMergesDir();
-939}
-940
-941// Initialize split policy
-942this.splitPolicy = 
RegionSplitPolicy.create(this, conf);
-943
-944// Initialize flush policy
-945this.flushPolicy = 
FlushPolicyFactory.create(this, conf);
-946
-947long lastFlushTime = 
EnvironmentEdgeManager.currentTime();
-948for (HStore store: stores.values()) 
{
-949  
this.lastStoreFlushTimeMap.put(store, lastFlushTime);
-950}
-951
-952// Use maximum of log sequenceid or 
that which was found in stores
-953// (particularly if no recovered 
edits, seqid will be -1).
-954long nextSeqid = maxSeqId;
-955if (this.writestate.writesEnabled) 
{
-956  nextSeqid = 
WALSplitter.writeRegionSequenceIdFile(this.fs.getFileSystem(),
-957  this.fs.getRegionDir(), 
nextSeqid, 1);
-958} else {
-959  nextSeqid++;
-960}
-961
-962LOG.info("Onlined " + 
this.getRegionInfo().getShortNameToLog() +
-963  "; next sequenceid=" + 
nextSeqid);
+922if (this.writestate.writesEnabled) 
{
+923 

[18/51] [partial] hbase-site git commit: Published site at .

2018-02-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/94208cfe/devapidocs/org/apache/hadoop/hbase/filter/class-use/Filter.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/filter/class-use/Filter.html 
b/devapidocs/org/apache/hadoop/hbase/filter/class-use/Filter.html
index 65795ae..463f4fa 100644
--- a/devapidocs/org/apache/hadoop/hbase/filter/class-use/Filter.html
+++ b/devapidocs/org/apache/hadoop/hbase/filter/class-use/Filter.html
@@ -488,15 +488,15 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 static Filter
-ColumnPrefixFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in 
java.util">ArrayListbyte[]filterArguments)
+SingleColumnValueExcludeFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in 
java.util">ArrayListbyte[]filterArguments)
 
 
 static Filter
-ColumnCountGetFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in 
java.util">ArrayListbyte[]filterArguments)
+ValueFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in 
java.util">ArrayListbyte[]filterArguments)
 
 
 static Filter
-RowFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in 
java.util">ArrayListbyte[]filterArguments)
+FamilyFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in 
java.util">ArrayListbyte[]filterArguments)
 
 
 static Filter
@@ -506,63 +506,63 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 static Filter
-FirstKeyOnlyFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in 
java.util">ArrayListbyte[]filterArguments)
+ColumnPrefixFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in 
java.util">ArrayListbyte[]filterArguments)
 
 
 static Filter
-TimestampsFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in 
java.util">ArrayListbyte[]filterArguments)
+PageFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in 
java.util">ArrayListbyte[]filterArguments)
 
 
 static Filter
-ValueFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in 
java.util">ArrayListbyte[]filterArguments)
+RowFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in 
java.util">ArrayListbyte[]filterArguments)
 
 
 static Filter
-KeyOnlyFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in 
java.util">ArrayListbyte[]filterArguments)
+ColumnRangeFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in 
java.util">ArrayListbyte[]filterArguments)
 
 
 static Filter
-FamilyFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in 
java.util">ArrayListbyte[]filterArguments)
+ColumnCountGetFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in 
java.util">ArrayListbyte[]filterArguments)
 
 
 static Filter
-QualifierFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in 
java.util">ArrayListbyte[]filterArguments)
+MultipleColumnPrefixFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in 
java.util">ArrayListbyte[]filterArguments)
 
 
 static Filter
-ColumnRangeFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in 
java.util">ArrayListbyte[]filterArguments)
+ColumnPaginationFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in 
java.util">ArrayListbyte[]filterArguments)
 
 
 static Filter

[18/51] [partial] hbase-site git commit: Published site at .

2018-02-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0cd17dc5/devapidocs/org/apache/hadoop/hbase/client/class-use/SnapshotDescription.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/SnapshotDescription.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/SnapshotDescription.html
index fb9bdb3..4584cda 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/SnapshotDescription.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/SnapshotDescription.html
@@ -137,7 +137,9 @@
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListSnapshotDescription
-AsyncHBaseAdmin.listSnapshots()
+AsyncAdmin.listSnapshots()
+List completed snapshots.
+
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListSnapshotDescription
@@ -146,22 +148,22 @@
 
 
 
-http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListSnapshotDescription
-HBaseAdmin.listSnapshots()
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListSnapshotDescription
+RawAsyncHBaseAdmin.listSnapshots()
 
 
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListSnapshotDescription
-AsyncAdmin.listSnapshots()
-List completed snapshots.
-
+http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListSnapshotDescription
+HBaseAdmin.listSnapshots()
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListSnapshotDescription
-RawAsyncHBaseAdmin.listSnapshots()
+AsyncHBaseAdmin.listSnapshots()
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListSnapshotDescription
-AsyncHBaseAdmin.listSnapshots(http://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html?is-external=true;
 title="class or interface in 
java.util.regex">Patternpattern)
+AsyncAdmin.listSnapshots(http://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html?is-external=true;
 title="class or interface in java.util.regex">Patternpattern)
+List all the completed snapshots matching the given 
pattern.
+
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListSnapshotDescription
@@ -170,18 +172,16 @@
 
 
 
-http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListSnapshotDescription
-HBaseAdmin.listSnapshots(http://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html?is-external=true;
 title="class or interface in 
java.util.regex">Patternpattern)
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListSnapshotDescription
+RawAsyncHBaseAdmin.listSnapshots(http://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html?is-external=true;
 title="class or interface in 
java.util.regex">Patternpattern)
 
 
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListSnapshotDescription
-AsyncAdmin.listSnapshots(http://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html?is-external=true;
 title="class or interface in java.util.regex">Patternpattern)
-List all the completed snapshots matching the given 
pattern.
-

[18/51] [partial] hbase-site git commit: Published site at .

2018-02-14 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/828486ae/devapidocs/org/apache/hadoop/hbase/exceptions/class-use/DeserializationException.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/exceptions/class-use/DeserializationException.html
 
b/devapidocs/org/apache/hadoop/hbase/exceptions/class-use/DeserializationException.html
index 80108a2..a07a830 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/exceptions/class-use/DeserializationException.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/exceptions/class-use/DeserializationException.html
@@ -144,17 +144,15 @@
 
 
 
-static HColumnDescriptor
-HColumnDescriptor.parseFrom(byte[]bytes)
-Deprecated.
-
-
-
 static HTableDescriptor
 HTableDescriptor.parseFrom(byte[]bytes)
 Deprecated.
 
 
+
+static ClusterId
+ClusterId.parseFrom(byte[]bytes)
+
 
 static HRegionInfo
 HRegionInfo.parseFrom(byte[]bytes)
@@ -165,8 +163,10 @@
 
 
 
-static ClusterId
-ClusterId.parseFrom(byte[]bytes)
+static HColumnDescriptor
+HColumnDescriptor.parseFrom(byte[]bytes)
+Deprecated.
+
 
 
 static SplitLogTask
@@ -220,17 +220,17 @@
 TableDescriptorBuilder.ModifyableTableDescriptor.parseFrom(byte[]bytes)
 
 
+static RegionInfo
+RegionInfo.parseFrom(byte[]bytes)
+
+
 static ColumnFamilyDescriptor
 ColumnFamilyDescriptorBuilder.parseFrom(byte[]pbBytes)
 
-
+
 private static ColumnFamilyDescriptor
 ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor.parseFrom(byte[]bytes)
 
-
-static RegionInfo
-RegionInfo.parseFrom(byte[]bytes)
-
 
 static RegionInfo
 RegionInfo.parseFrom(byte[]bytes,
@@ -305,111 +305,111 @@
 ByteArrayComparable.parseFrom(byte[]pbBytes)
 
 
-static ColumnPrefixFilter
-ColumnPrefixFilter.parseFrom(byte[]pbBytes)
+static SingleColumnValueExcludeFilter
+SingleColumnValueExcludeFilter.parseFrom(byte[]pbBytes)
 
 
-static ColumnCountGetFilter
-ColumnCountGetFilter.parseFrom(byte[]pbBytes)
+static ValueFilter
+ValueFilter.parseFrom(byte[]pbBytes)
 
 
-static RowFilter
-RowFilter.parseFrom(byte[]pbBytes)
+static SkipFilter
+SkipFilter.parseFrom(byte[]pbBytes)
 
 
-static FuzzyRowFilter
-FuzzyRowFilter.parseFrom(byte[]pbBytes)
+static FamilyFilter
+FamilyFilter.parseFrom(byte[]pbBytes)
 
 
-static BinaryComparator
-BinaryComparator.parseFrom(byte[]pbBytes)
+static BinaryPrefixComparator
+BinaryPrefixComparator.parseFrom(byte[]pbBytes)
 
 
-static RegexStringComparator
-RegexStringComparator.parseFrom(byte[]pbBytes)
+static NullComparator
+NullComparator.parseFrom(byte[]pbBytes)
 
 
-static Filter
-Filter.parseFrom(byte[]pbBytes)
-Concrete implementers can signal a failure condition in 
their code by throwing an
- http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException.
-
+static BigDecimalComparator
+BigDecimalComparator.parseFrom(byte[]pbBytes)
 
 
-static RandomRowFilter
-RandomRowFilter.parseFrom(byte[]pbBytes)
+static ColumnPrefixFilter
+ColumnPrefixFilter.parseFrom(byte[]pbBytes)
 
 
-static FirstKeyOnlyFilter
-FirstKeyOnlyFilter.parseFrom(byte[]pbBytes)
+static PageFilter
+PageFilter.parseFrom(byte[]pbBytes)
 
 
-static SkipFilter
-SkipFilter.parseFrom(byte[]pbBytes)
+static BitComparator
+BitComparator.parseFrom(byte[]pbBytes)
 
 
-static BinaryPrefixComparator
-BinaryPrefixComparator.parseFrom(byte[]pbBytes)
+static RowFilter
+RowFilter.parseFrom(byte[]pbBytes)
 
 
-static TimestampsFilter
-TimestampsFilter.parseFrom(byte[]pbBytes)
+static ColumnRangeFilter
+ColumnRangeFilter.parseFrom(byte[]pbBytes)
 
 
-static ValueFilter
-ValueFilter.parseFrom(byte[]pbBytes)
+static ColumnCountGetFilter
+ColumnCountGetFilter.parseFrom(byte[]pbBytes)
 
 
-static KeyOnlyFilter
-KeyOnlyFilter.parseFrom(byte[]pbBytes)
+static SubstringComparator
+SubstringComparator.parseFrom(byte[]pbBytes)
 
 
-static FamilyFilter
-FamilyFilter.parseFrom(byte[]pbBytes)
+static MultipleColumnPrefixFilter
+MultipleColumnPrefixFilter.parseFrom(byte[]pbBytes)
 
 
-static QualifierFilter
-QualifierFilter.parseFrom(byte[]pbBytes)
+static ColumnPaginationFilter
+ColumnPaginationFilter.parseFrom(byte[]pbBytes)
 
 
-static FilterList
-FilterList.parseFrom(byte[]pbBytes)
+static DependentColumnFilter
+DependentColumnFilter.parseFrom(byte[]pbBytes)
 
 
-static BigDecimalComparator
-BigDecimalComparator.parseFrom(byte[]pbBytes)
+static BinaryComparator
+BinaryComparator.parseFrom(byte[]pbBytes)
 
 
-static ColumnRangeFilter
-ColumnRangeFilter.parseFrom(byte[]pbBytes)
+static InclusiveStopFilter
+InclusiveStopFilter.parseFrom(byte[]pbBytes)
 
 
-static ColumnPaginationFilter
-ColumnPaginationFilter.parseFrom(byte[]pbBytes)
+static KeyOnlyFilter
+KeyOnlyFilter.parseFrom(byte[]pbBytes)
 
 
-static SubstringComparator
-SubstringComparator.parseFrom(byte[]pbBytes)
+static MultiRowRangeFilter
+MultiRowRangeFilter.parseFrom(byte[]pbBytes)
 
 
-static WhileMatchFilter
-WhileMatchFilter.parseFrom(byte[]pbBytes)
+static Filter
+Filter.parseFrom(byte[]pbBytes)

[18/51] [partial] hbase-site git commit: Published site at .

2018-02-13 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f272b0e8/devapidocs/org/apache/hadoop/hbase/client/class-use/ResultScanner.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/ResultScanner.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/ResultScanner.html
index e3d9f70..35f0e35 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/ResultScanner.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/ResultScanner.html
@@ -208,9 +208,9 @@ service.
 
 
 
-default ResultScanner
-AsyncTable.getScanner(byte[]family)
-Gets a scanner on the current table for the given 
family.
+ResultScanner
+HTable.getScanner(byte[]family)
+The underlying HTable must 
not be closed.
 
 
 
@@ -220,16 +220,16 @@ service.
 
 
 
-ResultScanner
-HTable.getScanner(byte[]family)
-The underlying HTable must 
not be closed.
+default ResultScanner
+AsyncTable.getScanner(byte[]family)
+Gets a scanner on the current table for the given 
family.
 
 
 
-default ResultScanner
-AsyncTable.getScanner(byte[]family,
+ResultScanner
+HTable.getScanner(byte[]family,
   byte[]qualifier)
-Gets a scanner on the current table for the given family 
and qualifier.
+The underlying HTable must 
not be closed.
 
 
 
@@ -240,37 +240,37 @@ service.
 
 
 
-ResultScanner
-HTable.getScanner(byte[]family,
+default ResultScanner
+AsyncTable.getScanner(byte[]family,
   byte[]qualifier)
-The underlying HTable must 
not be closed.
+Gets a scanner on the current table for the given family 
and qualifier.
 
 
 
 ResultScanner
-AsyncTable.getScanner(Scanscan)
-Returns a scanner on the current table as specified by the 
Scan 
object.
-
+RawAsyncTableImpl.getScanner(Scanscan)
 
 
 ResultScanner
-Table.getScanner(Scanscan)
-Returns a scanner on the current table as specified by the 
Scan
- object.
+HTable.getScanner(Scanscan)
+The underlying HTable must 
not be closed.
 
 
 
 ResultScanner
-AsyncTableImpl.getScanner(Scanscan)
+Table.getScanner(Scanscan)
+Returns a scanner on the current table as specified by the 
Scan
+ object.
+
 
 
 ResultScanner
-RawAsyncTableImpl.getScanner(Scanscan)
+AsyncTableImpl.getScanner(Scanscan)
 
 
 ResultScanner
-HTable.getScanner(Scanscan)
-The underlying HTable must 
not be closed.
+AsyncTable.getScanner(Scanscan)
+Returns a scanner on the current table as specified by the 
Scan 
object.
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f272b0e8/devapidocs/org/apache/hadoop/hbase/client/class-use/RetriesExhaustedWithDetailsException.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/RetriesExhaustedWithDetailsException.html
 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/RetriesExhaustedWithDetailsException.html
index b1d1cef..d730879 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/RetriesExhaustedWithDetailsException.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/RetriesExhaustedWithDetailsException.html
@@ -106,11 +106,11 @@
 
 
 RetriesExhaustedWithDetailsException
-AsyncRequestFuture.getErrors()
+AsyncRequestFutureImpl.getErrors()
 
 
 RetriesExhaustedWithDetailsException
-AsyncRequestFutureImpl.getErrors()
+AsyncRequestFuture.getErrors()
 
 
 (package private) RetriesExhaustedWithDetailsException

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f272b0e8/devapidocs/org/apache/hadoop/hbase/client/class-use/RetryingCallable.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/RetryingCallable.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/RetryingCallable.html
index 0a290e1..9642faa 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/RetryingCallable.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/RetryingCallable.html
@@ -234,28 +234,36 @@
 
 
 
+T
+RpcRetryingCallerImpl.callWithoutRetries(RetryingCallableTcallable,
+  intcallTimeout)
+
+
 T
 RpcRetryingCaller.callWithoutRetries(RetryingCallableTcallable,
   intcallTimeout)
 Call the server once only.
 
 
-
+
 T
-RpcRetryingCallerImpl.callWithoutRetries(RetryingCallableTcallable,
-  intcallTimeout)
+RpcRetryingCallerImpl.callWithRetries(RetryingCallableTcallable,
+   intcallTimeout)
 
-
+
 T
 RpcRetryingCaller.callWithRetries(RetryingCallableTcallable,
intcallTimeout)
 Retries if invocation fails.
 
 
+
+RetryingCallerInterceptorContext
+NoOpRetryingInterceptorContext.prepare(RetryingCallable?callable)
+
 
-T
-RpcRetryingCallerImpl.callWithRetries(RetryingCallableTcallable,
-   intcallTimeout)
+FastFailInterceptorContext
+FastFailInterceptorContext.prepare(RetryingCallable?callable)
 
 
 abstract RetryingCallerInterceptorContext
@@ -267,11 +275,13 @@
 
 
 

[18/51] [partial] hbase-site git commit: Published site at .

2018-02-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c83a37c8/devapidocs/org/apache/hadoop/hbase/exceptions/class-use/DeserializationException.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/exceptions/class-use/DeserializationException.html
 
b/devapidocs/org/apache/hadoop/hbase/exceptions/class-use/DeserializationException.html
index 80108a2..a07a830 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/exceptions/class-use/DeserializationException.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/exceptions/class-use/DeserializationException.html
@@ -144,17 +144,15 @@
 
 
 
-static HColumnDescriptor
-HColumnDescriptor.parseFrom(byte[]bytes)
-Deprecated.
-
-
-
 static HTableDescriptor
 HTableDescriptor.parseFrom(byte[]bytes)
 Deprecated.
 
 
+
+static ClusterId
+ClusterId.parseFrom(byte[]bytes)
+
 
 static HRegionInfo
 HRegionInfo.parseFrom(byte[]bytes)
@@ -165,8 +163,10 @@
 
 
 
-static ClusterId
-ClusterId.parseFrom(byte[]bytes)
+static HColumnDescriptor
+HColumnDescriptor.parseFrom(byte[]bytes)
+Deprecated.
+
 
 
 static SplitLogTask
@@ -220,17 +220,17 @@
 TableDescriptorBuilder.ModifyableTableDescriptor.parseFrom(byte[]bytes)
 
 
+static RegionInfo
+RegionInfo.parseFrom(byte[]bytes)
+
+
 static ColumnFamilyDescriptor
 ColumnFamilyDescriptorBuilder.parseFrom(byte[]pbBytes)
 
-
+
 private static ColumnFamilyDescriptor
 ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor.parseFrom(byte[]bytes)
 
-
-static RegionInfo
-RegionInfo.parseFrom(byte[]bytes)
-
 
 static RegionInfo
 RegionInfo.parseFrom(byte[]bytes,
@@ -305,111 +305,111 @@
 ByteArrayComparable.parseFrom(byte[]pbBytes)
 
 
-static ColumnPrefixFilter
-ColumnPrefixFilter.parseFrom(byte[]pbBytes)
+static SingleColumnValueExcludeFilter
+SingleColumnValueExcludeFilter.parseFrom(byte[]pbBytes)
 
 
-static ColumnCountGetFilter
-ColumnCountGetFilter.parseFrom(byte[]pbBytes)
+static ValueFilter
+ValueFilter.parseFrom(byte[]pbBytes)
 
 
-static RowFilter
-RowFilter.parseFrom(byte[]pbBytes)
+static SkipFilter
+SkipFilter.parseFrom(byte[]pbBytes)
 
 
-static FuzzyRowFilter
-FuzzyRowFilter.parseFrom(byte[]pbBytes)
+static FamilyFilter
+FamilyFilter.parseFrom(byte[]pbBytes)
 
 
-static BinaryComparator
-BinaryComparator.parseFrom(byte[]pbBytes)
+static BinaryPrefixComparator
+BinaryPrefixComparator.parseFrom(byte[]pbBytes)
 
 
-static RegexStringComparator
-RegexStringComparator.parseFrom(byte[]pbBytes)
+static NullComparator
+NullComparator.parseFrom(byte[]pbBytes)
 
 
-static Filter
-Filter.parseFrom(byte[]pbBytes)
-Concrete implementers can signal a failure condition in 
their code by throwing an
- http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException.
-
+static BigDecimalComparator
+BigDecimalComparator.parseFrom(byte[]pbBytes)
 
 
-static RandomRowFilter
-RandomRowFilter.parseFrom(byte[]pbBytes)
+static ColumnPrefixFilter
+ColumnPrefixFilter.parseFrom(byte[]pbBytes)
 
 
-static FirstKeyOnlyFilter
-FirstKeyOnlyFilter.parseFrom(byte[]pbBytes)
+static PageFilter
+PageFilter.parseFrom(byte[]pbBytes)
 
 
-static SkipFilter
-SkipFilter.parseFrom(byte[]pbBytes)
+static BitComparator
+BitComparator.parseFrom(byte[]pbBytes)
 
 
-static BinaryPrefixComparator
-BinaryPrefixComparator.parseFrom(byte[]pbBytes)
+static RowFilter
+RowFilter.parseFrom(byte[]pbBytes)
 
 
-static TimestampsFilter
-TimestampsFilter.parseFrom(byte[]pbBytes)
+static ColumnRangeFilter
+ColumnRangeFilter.parseFrom(byte[]pbBytes)
 
 
-static ValueFilter
-ValueFilter.parseFrom(byte[]pbBytes)
+static ColumnCountGetFilter
+ColumnCountGetFilter.parseFrom(byte[]pbBytes)
 
 
-static KeyOnlyFilter
-KeyOnlyFilter.parseFrom(byte[]pbBytes)
+static SubstringComparator
+SubstringComparator.parseFrom(byte[]pbBytes)
 
 
-static FamilyFilter
-FamilyFilter.parseFrom(byte[]pbBytes)
+static MultipleColumnPrefixFilter
+MultipleColumnPrefixFilter.parseFrom(byte[]pbBytes)
 
 
-static QualifierFilter
-QualifierFilter.parseFrom(byte[]pbBytes)
+static ColumnPaginationFilter
+ColumnPaginationFilter.parseFrom(byte[]pbBytes)
 
 
-static FilterList
-FilterList.parseFrom(byte[]pbBytes)
+static DependentColumnFilter
+DependentColumnFilter.parseFrom(byte[]pbBytes)
 
 
-static BigDecimalComparator
-BigDecimalComparator.parseFrom(byte[]pbBytes)
+static BinaryComparator
+BinaryComparator.parseFrom(byte[]pbBytes)
 
 
-static ColumnRangeFilter
-ColumnRangeFilter.parseFrom(byte[]pbBytes)
+static InclusiveStopFilter
+InclusiveStopFilter.parseFrom(byte[]pbBytes)
 
 
-static ColumnPaginationFilter
-ColumnPaginationFilter.parseFrom(byte[]pbBytes)
+static KeyOnlyFilter
+KeyOnlyFilter.parseFrom(byte[]pbBytes)
 
 
-static SubstringComparator
-SubstringComparator.parseFrom(byte[]pbBytes)
+static MultiRowRangeFilter
+MultiRowRangeFilter.parseFrom(byte[]pbBytes)
 
 
-static WhileMatchFilter
-WhileMatchFilter.parseFrom(byte[]pbBytes)
+static Filter
+Filter.parseFrom(byte[]pbBytes)

[18/51] [partial] hbase-site git commit: Published site at .

2018-02-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0ab8335e/devapidocs/org/apache/hadoop/hbase/filter/class-use/Filter.ReturnCode.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/filter/class-use/Filter.ReturnCode.html 
b/devapidocs/org/apache/hadoop/hbase/filter/class-use/Filter.ReturnCode.html
index 0c342b2..bb2794a 100644
--- a/devapidocs/org/apache/hadoop/hbase/filter/class-use/Filter.ReturnCode.html
+++ b/devapidocs/org/apache/hadoop/hbase/filter/class-use/Filter.ReturnCode.html
@@ -151,115 +151,115 @@
 
 
 Filter.ReturnCode
-FilterListWithAND.filterCell(Cellc)
+ColumnPrefixFilter.filterCell(Cellcell)
 
 
 Filter.ReturnCode
-ValueFilter.filterCell(Cellc)
+ColumnCountGetFilter.filterCell(Cellc)
 
 
 Filter.ReturnCode
-SkipFilter.filterCell(Cellc)
+RowFilter.filterCell(Cellv)
 
 
 Filter.ReturnCode
-FamilyFilter.filterCell(Cellc)
+FuzzyRowFilter.filterCell(Cellc)
 
 
 Filter.ReturnCode
-ColumnPrefixFilter.filterCell(Cellcell)
+Filter.filterCell(Cellc)
+A way to filter based on the column family, column 
qualifier and/or the column value.
+
 
 
 Filter.ReturnCode
-PageFilter.filterCell(Cellignored)
+RandomRowFilter.filterCell(Cellc)
 
 
 Filter.ReturnCode
-RowFilter.filterCell(Cellv)
+FirstKeyOnlyFilter.filterCell(Cellc)
 
 
 Filter.ReturnCode
-ColumnRangeFilter.filterCell(Cellc)
+SkipFilter.filterCell(Cellc)
 
 
 Filter.ReturnCode
-ColumnCountGetFilter.filterCell(Cellc)
+TimestampsFilter.filterCell(Cellc)
 
 
 Filter.ReturnCode
-MultipleColumnPrefixFilter.filterCell(Cellc)
+ValueFilter.filterCell(Cellc)
 
 
 Filter.ReturnCode
-ColumnPaginationFilter.filterCell(Cellc)
+KeyOnlyFilter.filterCell(Cellignored)
 
 
 Filter.ReturnCode
-DependentColumnFilter.filterCell(Cellc)
+FamilyFilter.filterCell(Cellc)
 
 
 Filter.ReturnCode
-FilterListWithOR.filterCell(Cellc)
+QualifierFilter.filterCell(Cellc)
 
 
 Filter.ReturnCode
-InclusiveStopFilter.filterCell(Cellc)
+FilterList.filterCell(Cellc)
 
 
 Filter.ReturnCode
-KeyOnlyFilter.filterCell(Cellignored)
+ColumnRangeFilter.filterCell(Cellc)
 
 
 Filter.ReturnCode
-MultiRowRangeFilter.filterCell(Cellignored)
+ColumnPaginationFilter.filterCell(Cellc)
 
 
 Filter.ReturnCode
-Filter.filterCell(Cellc)
-A way to filter based on the column family, column 
qualifier and/or the column value.
-
+FilterListWithAND.filterCell(Cellc)
 
 
 Filter.ReturnCode
-FirstKeyOnlyFilter.filterCell(Cellc)
+WhileMatchFilter.filterCell(Cellc)
 
 
 Filter.ReturnCode
-WhileMatchFilter.filterCell(Cellc)
+MultiRowRangeFilter.filterCell(Cellignored)
 
 
 Filter.ReturnCode
-FirstKeyValueMatchingQualifiersFilter.filterCell(Cellc)
-Deprecated.
-
+PrefixFilter.filterCell(Cellc)
 
 
 Filter.ReturnCode
-TimestampsFilter.filterCell(Cellc)
+DependentColumnFilter.filterCell(Cellc)
 
 
 Filter.ReturnCode
-FuzzyRowFilter.filterCell(Cellc)
+FirstKeyValueMatchingQualifiersFilter.filterCell(Cellc)
+Deprecated.
+
 
 
 Filter.ReturnCode
-FilterList.filterCell(Cellc)
+PageFilter.filterCell(Cellignored)
 
 
 Filter.ReturnCode
-RandomRowFilter.filterCell(Cellc)
+FilterListWithOR.filterCell(Cellc)
 
 
 Filter.ReturnCode
-PrefixFilter.filterCell(Cellc)
+InclusiveStopFilter.filterCell(Cellc)
 
 
 Filter.ReturnCode
-SingleColumnValueFilter.filterCell(Cellc)
+MultipleColumnPrefixFilter.filterCell(Cellc)
 
 
 Filter.ReturnCode
-QualifierFilter.filterCell(Cellc)
+SingleColumnValueFilter.filterCell(Cellc)
 
 
 Filter.ReturnCode
@@ -275,158 +275,158 @@
 
 
 Filter.ReturnCode
-ValueFilter.filterKeyValue(Cellc)
+ColumnPrefixFilter.filterKeyValue(Cellc)
 Deprecated.
 
 
 
 Filter.ReturnCode
-SkipFilter.filterKeyValue(Cellc)
+ColumnCountGetFilter.filterKeyValue(Cellc)
 Deprecated.
 
 
 
 Filter.ReturnCode
-FilterListBase.filterKeyValue(Cellc)
+RowFilter.filterKeyValue(Cellc)
+Deprecated.
+
 
 
 Filter.ReturnCode
-FamilyFilter.filterKeyValue(Cellc)
+FuzzyRowFilter.filterKeyValue(Cellc)
 Deprecated.
 
 
 
 Filter.ReturnCode
-ColumnPrefixFilter.filterKeyValue(Cellc)
-Deprecated.
+Filter.filterKeyValue(Cellc)
+Deprecated.
+As of release 2.0.0, this 
will be removed in HBase 3.0.0.
+ Instead use filterCell(Cell)
+
 
 
 
 Filter.ReturnCode
-PageFilter.filterKeyValue(Cellc)
+RandomRowFilter.filterKeyValue(Cellc)
 Deprecated.
 
 
 
 Filter.ReturnCode
-RowFilter.filterKeyValue(Cellc)
+FirstKeyOnlyFilter.filterKeyValue(Cellc)
 Deprecated.
 
 
 
 Filter.ReturnCode
-ColumnRangeFilter.filterKeyValue(Cellc)
+SkipFilter.filterKeyValue(Cellc)
 Deprecated.
 
 
 
 Filter.ReturnCode
-ColumnCountGetFilter.filterKeyValue(Cellc)
+TimestampsFilter.filterKeyValue(Cellc)
 Deprecated.
 
 
 
 Filter.ReturnCode
-MultipleColumnPrefixFilter.filterKeyValue(Cellc)
+ValueFilter.filterKeyValue(Cellc)
 Deprecated.
 
 
 
 Filter.ReturnCode
-ColumnPaginationFilter.filterKeyValue(Cellc)
+KeyOnlyFilter.filterKeyValue(Cellignored)
 Deprecated.
 
 
 
 Filter.ReturnCode
-DependentColumnFilter.filterKeyValue(Cellc)
+FamilyFilter.filterKeyValue(Cellc)
 Deprecated.
 
 
 
 

[18/51] [partial] hbase-site git commit: Published site at .

2018-02-04 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/src-html/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.TaskFinisher.Status.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.TaskFinisher.Status.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.TaskFinisher.Status.html
index 5844c3b..80259dd 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.TaskFinisher.Status.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.TaskFinisher.Status.html
@@ -159,7 +159,7 @@
 151}
 152Task task = 
findOrCreateOrphanTask(path);
 153if (task.isOrphan()  
(task.incarnation.get() == 0)) {
-154  LOG.info("resubmitting unassigned 
orphan task " + path);
+154  LOG.info("Resubmitting unassigned 
orphan task " + path);
 155  // ignore failure to resubmit. The 
timeout-monitor will handle it later
 156  // albeit in a more crude fashion
 157  resubmitTask(path, task, FORCE);
@@ -210,7 +210,7 @@
 202  
SplitLogCounters.tot_mgr_resubmit_force.increment();
 203  version = -1;
 204}
-205LOG.info("resubmitting task " + 
path);
+205LOG.info("Resubmitting task " + 
path);
 206task.incarnation.incrementAndGet();
 207boolean result = resubmit(path, 
version);
 208if (!result) {
@@ -288,7 +288,7 @@
 280
SplitLogCounters.tot_mgr_rescan_deleted.increment();
 281  }
 282  
SplitLogCounters.tot_mgr_missing_state_in_delete.increment();
-283  LOG.debug("deleted task without in 
memory state " + path);
+283  LOG.debug("Deleted task without in 
memory state " + path);
 284  return;
 285}
 286synchronized (task) {
@@ -336,13 +336,13 @@
 328  }
 329
 330  private void createNodeSuccess(String 
path) {
-331LOG.debug("put up splitlog task at 
znode " + path);
+331LOG.debug("Put up splitlog task at 
znode " + path);
 332getDataSetWatch(path, zkretries);
 333  }
 334
 335  private void createNodeFailure(String 
path) {
 336// TODO the Manager should split the 
log locally instead of giving up
-337LOG.warn("failed to create task node" 
+ path);
+337LOG.warn("Failed to create task node 
" + path);
 338setDone(path, FAILURE);
 339  }
 340
@@ -368,15 +368,15 @@
 360data = 
ZKMetadata.removeMetaData(data);
 361SplitLogTask slt = 
SplitLogTask.parseFrom(data);
 362if (slt.isUnassigned()) {
-363  LOG.debug("task not yet acquired " 
+ path + " ver = " + version);
+363  LOG.debug("Task not yet acquired " 
+ path + ", ver=" + version);
 364  handleUnassignedTask(path);
 365} else if (slt.isOwned()) {
 366  heartbeat(path, version, 
slt.getServerName());
 367} else if (slt.isResigned()) {
-368  LOG.info("task " + path + " entered 
state: " + slt.toString());
+368  LOG.info("Task " + path + " entered 
state=" + slt.toString());
 369  resubmitOrFail(path, FORCE);
 370} else if (slt.isDone()) {
-371  LOG.info("task " + path + " entered 
state: " + slt.toString());
+371  LOG.info("Task " + path + " entered 
state=" + slt.toString());
 372  if (taskFinisher != null  
!ZKSplitLog.isRescanNode(watcher, path)) {
 373if 
(taskFinisher.finish(slt.getServerName(), ZKSplitLog.getFileName(path)) == 
Status.DONE) {
 374  setDone(path, SUCCESS);
@@ -387,7 +387,7 @@
 379setDone(path, SUCCESS);
 380  }
 381} else if (slt.isErr()) {
-382  LOG.info("task " + path + " entered 
state: " + slt.toString());
+382  LOG.info("Task " + path + " entered 
state=" + slt.toString());
 383  resubmitOrFail(path, CHECK);
 384} else {
 385  LOG.error(HBaseMarkers.FATAL, 
"logic error - unexpected zk state for path = "
@@ -403,7 +403,7 @@
 395  }
 396
 397  private void 
getDataSetWatchFailure(String path) {
-398LOG.warn("failed to set data watch " 
+ path);
+398LOG.warn("Failed to set data watch " 
+ path);
 399setDone(path, FAILURE);
 400  }
 401
@@ -412,7 +412,7 @@
 404if (task == null) {
 405  if 
(!ZKSplitLog.isRescanNode(watcher, path)) {
 406
SplitLogCounters.tot_mgr_unacquired_orphan_done.increment();
-407LOG.debug("unacquired orphan task 
is done " + path);
+407LOG.debug("Unacquired orphan task 
is done " + path);
 408  }
 409} else {
 410  synchronized (task) {
@@ -449,7 +449,7 @@
 441
 442  private Task 
findOrCreateOrphanTask(String path) {
 443return 
computeIfAbsent(details.getTasks(), path, Task::new, () - {
-444  LOG.info("creating orphan task " + 
path);
+444  LOG.info("Creating orphan task " + 
path);
 445  
SplitLogCounters.tot_mgr_orphan_task_acquired.increment();
 446});
 447  }
@@ -458,7 +458,7 @@
 450Task task = 
findOrCreateOrphanTask(path);
 451if 

[18/51] [partial] hbase-site git commit: Published site at .

2018-02-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1f2eeb22/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.BucketEntry.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.BucketEntry.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.BucketEntry.html
index b7c24d7..eecd2f9 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.BucketEntry.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.BucketEntry.html
@@ -44,792 +44,792 @@
 036import java.util.List;
 037import java.util.Map;
 038import java.util.NavigableSet;
-039import java.util.Objects;
-040import java.util.PriorityQueue;
-041import java.util.Set;
-042import 
java.util.concurrent.ArrayBlockingQueue;
-043import 
java.util.concurrent.BlockingQueue;
-044import 
java.util.concurrent.ConcurrentHashMap;
-045import 
java.util.concurrent.ConcurrentMap;
-046import 
java.util.concurrent.ConcurrentSkipListSet;
-047import java.util.concurrent.Executors;
-048import 
java.util.concurrent.ScheduledExecutorService;
-049import java.util.concurrent.TimeUnit;
-050import 
java.util.concurrent.atomic.AtomicInteger;
-051import 
java.util.concurrent.atomic.AtomicLong;
-052import 
java.util.concurrent.atomic.LongAdder;
-053import java.util.concurrent.locks.Lock;
-054import 
java.util.concurrent.locks.ReentrantLock;
-055import 
java.util.concurrent.locks.ReentrantReadWriteLock;
-056import 
org.apache.hadoop.conf.Configuration;
-057import 
org.apache.hadoop.hbase.HBaseConfiguration;
-058import 
org.apache.hadoop.hbase.io.HeapSize;
-059import 
org.apache.hadoop.hbase.io.hfile.BlockCache;
-060import 
org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
-061import 
org.apache.hadoop.hbase.io.hfile.BlockCacheUtil;
-062import 
org.apache.hadoop.hbase.io.hfile.BlockPriority;
-063import 
org.apache.hadoop.hbase.io.hfile.BlockType;
-064import 
org.apache.hadoop.hbase.io.hfile.CacheStats;
-065import 
org.apache.hadoop.hbase.io.hfile.Cacheable;
-066import 
org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType;
-067import 
org.apache.hadoop.hbase.io.hfile.CacheableDeserializer;
-068import 
org.apache.hadoop.hbase.io.hfile.CacheableDeserializerIdManager;
-069import 
org.apache.hadoop.hbase.io.hfile.CachedBlock;
-070import 
org.apache.hadoop.hbase.io.hfile.HFileBlock;
-071import 
org.apache.hadoop.hbase.nio.ByteBuff;
-072import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-073import 
org.apache.hadoop.hbase.util.HasThread;
-074import 
org.apache.hadoop.hbase.util.IdReadWriteLock;
-075import 
org.apache.hadoop.hbase.util.IdReadWriteLock.ReferenceType;
-076import 
org.apache.hadoop.util.StringUtils;
-077import 
org.apache.yetus.audience.InterfaceAudience;
-078import org.slf4j.Logger;
-079import org.slf4j.LoggerFactory;
-080
-081import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-082import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-083import 
org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
-084
-085/**
-086 * BucketCache uses {@link 
BucketAllocator} to allocate/free blocks, and uses
-087 * BucketCache#ramCache and 
BucketCache#backingMap in order to
-088 * determine if a given element is in the 
cache. The bucket cache can use on-heap or
-089 * off-heap memory {@link 
ByteBufferIOEngine} or in a file {@link FileIOEngine} to
-090 * store/read the block data.
-091 *
-092 * pEviction is via a similar 
algorithm as used in
-093 * {@link 
org.apache.hadoop.hbase.io.hfile.LruBlockCache}
-094 *
-095 * pBucketCache can be used as 
mainly a block cache (see
-096 * {@link 
org.apache.hadoop.hbase.io.hfile.CombinedBlockCache}), combined with
-097 * LruBlockCache to decrease CMS GC and 
heap fragmentation.
-098 *
-099 * pIt also can be used as a 
secondary cache (e.g. using a file on ssd/fusionio to store
-100 * blocks) to enlarge cache space via
-101 * {@link 
org.apache.hadoop.hbase.io.hfile.LruBlockCache#setVictimCache}
-102 */
-103@InterfaceAudience.Private
-104public class BucketCache implements 
BlockCache, HeapSize {
-105  private static final Logger LOG = 
LoggerFactory.getLogger(BucketCache.class);
-106
-107  /** Priority buckets config */
-108  static final String 
SINGLE_FACTOR_CONFIG_NAME = "hbase.bucketcache.single.factor";
-109  static final String 
MULTI_FACTOR_CONFIG_NAME = "hbase.bucketcache.multi.factor";
-110  static final String 
MEMORY_FACTOR_CONFIG_NAME = "hbase.bucketcache.memory.factor";
-111  static final String 
EXTRA_FREE_FACTOR_CONFIG_NAME = "hbase.bucketcache.extrafreefactor";
-112  static final String 
ACCEPT_FACTOR_CONFIG_NAME = "hbase.bucketcache.acceptfactor";
-113  static final String 
MIN_FACTOR_CONFIG_NAME = "hbase.bucketcache.minfactor";
-114
-115  /** Priority buckets */
-116  @VisibleForTesting
-117  static final float 

[18/51] [partial] hbase-site git commit: Published site at .

2018-01-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/cc6597ec/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocatorConcurrenyLimit.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocatorConcurrenyLimit.html
 
b/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocatorConcurrenyLimit.html
index 6e03d49..702b888 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocatorConcurrenyLimit.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocatorConcurrenyLimit.html
@@ -109,7 +109,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public class TestAsyncNonMetaRegionLocatorConcurrenyLimit
+public class TestAsyncNonMetaRegionLocatorConcurrenyLimit
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 
 
@@ -149,38 +149,42 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 Field and Description
 
 
+static HBaseClassTestRule
+CLASS_RULE
+
+
 private static http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true;
 title="class or interface in 
java.util.concurrent.atomic">AtomicInteger
 CONCURRENCY
 
-
+
 private static 
org.apache.hadoop.hbase.client.AsyncConnectionImpl
 CONN
 
-
+
 private static byte[]
 FAMILY
 
-
+
 private static 
org.apache.hadoop.hbase.client.AsyncNonMetaRegionLocator
 LOCATOR
 
-
+
 private static int
 MAX_ALLOWED
 
-
+
 private static http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true;
 title="class or interface in 
java.util.concurrent.atomic">AtomicInteger
 MAX_CONCURRENCY
 
-
+
 private static byte[][]
 SPLIT_KEYS
 
-
+
 private static 
org.apache.hadoop.hbase.TableName
 TABLE_NAME
 
-
+
 private static HBaseTestingUtility
 TEST_UTIL
 
@@ -254,13 +258,22 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 Field Detail
+
+
+
+
+
+CLASS_RULE
+public static finalHBaseClassTestRule CLASS_RULE
+
+
 
 
 
 
 
 TEST_UTIL
-private static finalHBaseTestingUtility TEST_UTIL
+private static finalHBaseTestingUtility TEST_UTIL
 
 
 
@@ -269,7 +282,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 TABLE_NAME
-private staticorg.apache.hadoop.hbase.TableName TABLE_NAME
+private staticorg.apache.hadoop.hbase.TableName TABLE_NAME
 
 
 
@@ -278,7 +291,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 FAMILY
-private staticbyte[] FAMILY
+private staticbyte[] FAMILY
 
 
 
@@ -287,7 +300,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 CONN
-private staticorg.apache.hadoop.hbase.client.AsyncConnectionImpl CONN
+private staticorg.apache.hadoop.hbase.client.AsyncConnectionImpl CONN
 
 
 
@@ -296,7 +309,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 LOCATOR
-private 
staticorg.apache.hadoop.hbase.client.AsyncNonMetaRegionLocator LOCATOR
+private 
staticorg.apache.hadoop.hbase.client.AsyncNonMetaRegionLocator LOCATOR
 
 
 
@@ -305,7 +318,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 SPLIT_KEYS
-private staticbyte[][] SPLIT_KEYS
+private staticbyte[][] SPLIT_KEYS
 
 
 
@@ -314,7 +327,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 MAX_ALLOWED
-private staticint MAX_ALLOWED
+private staticint MAX_ALLOWED
 
 
 
@@ -323,7 +336,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 CONCURRENCY
-private statichttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true;
 title="class or interface in java.util.concurrent.atomic">AtomicInteger CONCURRENCY
+private statichttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true;
 title="class or interface in java.util.concurrent.atomic">AtomicInteger CONCURRENCY
 
 
 
@@ -332,7 +345,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 MAX_CONCURRENCY
-private statichttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true;
 title="class or interface in java.util.concurrent.atomic">AtomicInteger MAX_CONCURRENCY
+private statichttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true;
 title="class or interface in java.util.concurrent.atomic">AtomicInteger MAX_CONCURRENCY
 
 
 
@@ -349,7 +362,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 TestAsyncNonMetaRegionLocatorConcurrenyLimit
-publicTestAsyncNonMetaRegionLocatorConcurrenyLimit()
+publicTestAsyncNonMetaRegionLocatorConcurrenyLimit()
 
 
 
@@ -366,7 +379,7 @@ extends 

[18/51] [partial] hbase-site git commit: Published site at .

2018-01-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aa7ffc92/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupManifest.BackupImage.Builder.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupManifest.BackupImage.Builder.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupManifest.BackupImage.Builder.html
index 7509dcf..ec2aa41 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupManifest.BackupImage.Builder.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupManifest.BackupImage.Builder.html
@@ -64,152 +64,152 @@
 056 */
 057@InterfaceAudience.Private
 058public class BackupManifest {
-059
-060  private static final Logger LOG = 
LoggerFactory.getLogger(BackupManifest.class);
-061
-062  // manifest file name
-063  public static final String 
MANIFEST_FILE_NAME = ".backup.manifest";
-064
-065  /**
-066   * Backup image, the dependency graph 
is made up by series of backup images BackupImage contains
-067   * all the relevant information to 
restore the backup and is used during restore operation
-068   */
-069
-070  public static class BackupImage 
implements ComparableBackupImage {
+059  private static final Logger LOG = 
LoggerFactory.getLogger(BackupManifest.class);
+060
+061  // manifest file name
+062  public static final String 
MANIFEST_FILE_NAME = ".backup.manifest";
+063
+064  /**
+065   * Backup image, the dependency graph 
is made up by series of backup images BackupImage contains
+066   * all the relevant information to 
restore the backup and is used during restore operation
+067   */
+068  public static class BackupImage 
implements ComparableBackupImage {
+069static class Builder {
+070  BackupImage image;
 071
-072static class Builder {
-073  BackupImage image;
-074
-075  Builder() {
-076image = new BackupImage();
-077  }
-078
-079  Builder withBackupId(String 
backupId) {
-080image.setBackupId(backupId);
-081return this;
-082  }
-083
-084  Builder withType(BackupType type) 
{
-085image.setType(type);
-086return this;
-087  }
-088
-089  Builder withRootDir(String rootDir) 
{
-090image.setRootDir(rootDir);
-091return this;
-092  }
-093
-094  Builder 
withTableList(ListTableName tableList) {
-095image.setTableList(tableList);
-096return this;
-097  }
-098
-099  Builder withStartTime(long 
startTime) {
-100image.setStartTs(startTime);
-101return this;
-102  }
-103
-104  Builder withCompleteTime(long 
completeTime) {
-105
image.setCompleteTs(completeTime);
-106return this;
-107  }
-108
-109  BackupImage build() {
-110return image;
-111  }
-112
-113}
-114
-115private String backupId;
-116private BackupType type;
-117private String rootDir;
-118private ListTableName 
tableList;
-119private long startTs;
-120private long completeTs;
-121private ArrayListBackupImage 
ancestors;
-122private HashMapTableName, 
HashMapString, Long incrTimeRanges;
-123
-124static Builder newBuilder() {
-125  return new Builder();
-126}
-127
-128public BackupImage() {
-129  super();
-130}
-131
-132private BackupImage(String backupId, 
BackupType type, String rootDir,
-133ListTableName tableList, 
long startTs, long completeTs) {
-134  this.backupId = backupId;
-135  this.type = type;
-136  this.rootDir = rootDir;
-137  this.tableList = tableList;
-138  this.startTs = startTs;
-139  this.completeTs = completeTs;
-140}
-141
-142static BackupImage 
fromProto(BackupProtos.BackupImage im) {
-143  String backupId = 
im.getBackupId();
-144  String rootDir = 
im.getBackupRootDir();
-145  long startTs = im.getStartTs();
-146  long completeTs = 
im.getCompleteTs();
-147  ListHBaseProtos.TableName 
tableListList = im.getTableListList();
-148  ListTableName tableList = 
new ArrayListTableName();
-149  for (HBaseProtos.TableName tn : 
tableListList) {
-150
tableList.add(ProtobufUtil.toTableName(tn));
-151  }
-152
-153  
ListBackupProtos.BackupImage ancestorList = im.getAncestorsList();
-154
-155  BackupType type =
-156  im.getBackupType() == 
BackupProtos.BackupType.FULL ? BackupType.FULL
-157  : BackupType.INCREMENTAL;
-158
-159  BackupImage image = new 
BackupImage(backupId, type, rootDir, tableList, startTs, completeTs);
-160  for (BackupProtos.BackupImage img : 
ancestorList) {
-161
image.addAncestor(fromProto(img));
-162  }
-163  
image.setIncrTimeRanges(loadIncrementalTimestampMap(im));
-164  return image;
-165}
-166
-167BackupProtos.BackupImage toProto() 
{
-168  BackupProtos.BackupImage.Builder 
builder = BackupProtos.BackupImage.newBuilder();
-169 

[18/51] [partial] hbase-site git commit: Published site at .

2018-01-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/96e5e102/devapidocs/src-html/org/apache/hadoop/hbase/rest/provider/consumer/ProtobufMessageBodyConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/rest/provider/consumer/ProtobufMessageBodyConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/rest/provider/consumer/ProtobufMessageBodyConsumer.html
index 041b052..8ddc11a 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/rest/provider/consumer/ProtobufMessageBodyConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/rest/provider/consumer/ProtobufMessageBodyConsumer.html
@@ -31,64 +31,64 @@
 023import java.io.IOException;
 024import java.io.InputStream;
 025import java.lang.annotation.Annotation;
-026import java.lang.reflect.Type;
-027
-028import javax.ws.rs.Consumes;
-029import 
javax.ws.rs.WebApplicationException;
-030import javax.ws.rs.core.MediaType;
-031import javax.ws.rs.core.MultivaluedMap;
-032import 
javax.ws.rs.ext.MessageBodyReader;
-033import javax.ws.rs.ext.Provider;
-034
-035import 
org.apache.yetus.audience.InterfaceAudience;
-036import org.slf4j.Logger;
-037import org.slf4j.LoggerFactory;
-038import 
org.apache.hadoop.hbase.rest.Constants;
-039import 
org.apache.hadoop.hbase.rest.ProtobufMessageHandler;
-040
-041/**
-042 * Adapter for hooking up Jersey content 
processing dispatch to
-043 * ProtobufMessageHandler interface 
capable handlers for decoding protobuf input.
-044 */
-045@Provider
-046@Consumes({Constants.MIMETYPE_PROTOBUF, 
Constants.MIMETYPE_PROTOBUF_IETF})
-047@InterfaceAudience.Private
-048public class 
ProtobufMessageBodyConsumer
-049implements 
MessageBodyReaderProtobufMessageHandler {
-050  private static final Logger LOG =
-051
LoggerFactory.getLogger(ProtobufMessageBodyConsumer.class);
-052
-053  @Override
-054  public boolean 
isReadable(Class? type, Type genericType,
-055  Annotation[] annotations, MediaType 
mediaType) {
-056return 
ProtobufMessageHandler.class.isAssignableFrom(type);
-057  }
-058
-059  @Override
-060  public ProtobufMessageHandler 
readFrom(ClassProtobufMessageHandler type, Type genericType,
-061  Annotation[] annotations, MediaType 
mediaType,
-062  MultivaluedMapString, 
String httpHeaders, InputStream inputStream)
-063  throws IOException, 
WebApplicationException {
-064ProtobufMessageHandler obj = null;
-065try {
-066  obj = type.newInstance();
-067  ByteArrayOutputStream baos = new 
ByteArrayOutputStream();
-068  byte[] buffer = new byte[4096];
-069  int read;
-070  do {
-071read = inputStream.read(buffer, 
0, buffer.length);
-072if (read  0) {
-073  baos.write(buffer, 0, read);
-074}
-075  } while (read  0);
-076  if (LOG.isTraceEnabled()) {
-077LOG.trace(getClass() + ": read " 
+ baos.size() + " bytes from " +
-078  inputStream);
-079  }
-080  obj = 
obj.getObjectFromMessage(baos.toByteArray());
-081} catch (InstantiationException e) 
{
-082  throw new 
WebApplicationException(e);
-083} catch (IllegalAccessException e) 
{
+026import 
java.lang.reflect.InvocationTargetException;
+027import java.lang.reflect.Type;
+028
+029import javax.ws.rs.Consumes;
+030import 
javax.ws.rs.WebApplicationException;
+031import javax.ws.rs.core.MediaType;
+032import javax.ws.rs.core.MultivaluedMap;
+033import 
javax.ws.rs.ext.MessageBodyReader;
+034import javax.ws.rs.ext.Provider;
+035
+036import 
org.apache.yetus.audience.InterfaceAudience;
+037import org.slf4j.Logger;
+038import org.slf4j.LoggerFactory;
+039import 
org.apache.hadoop.hbase.rest.Constants;
+040import 
org.apache.hadoop.hbase.rest.ProtobufMessageHandler;
+041
+042/**
+043 * Adapter for hooking up Jersey content 
processing dispatch to
+044 * ProtobufMessageHandler interface 
capable handlers for decoding protobuf input.
+045 */
+046@Provider
+047@Consumes({Constants.MIMETYPE_PROTOBUF, 
Constants.MIMETYPE_PROTOBUF_IETF})
+048@InterfaceAudience.Private
+049public class 
ProtobufMessageBodyConsumer
+050implements 
MessageBodyReaderProtobufMessageHandler {
+051  private static final Logger LOG =
+052
LoggerFactory.getLogger(ProtobufMessageBodyConsumer.class);
+053
+054  @Override
+055  public boolean 
isReadable(Class? type, Type genericType,
+056  Annotation[] annotations, MediaType 
mediaType) {
+057return 
ProtobufMessageHandler.class.isAssignableFrom(type);
+058  }
+059
+060  @Override
+061  public ProtobufMessageHandler 
readFrom(ClassProtobufMessageHandler type, Type genericType,
+062  Annotation[] annotations, MediaType 
mediaType,
+063  MultivaluedMapString, 
String httpHeaders, InputStream inputStream)
+064  throws IOException, 
WebApplicationException {
+065ProtobufMessageHandler obj = null;
+066try {
+067  obj = 
type.getDeclaredConstructor().newInstance();
+068  ByteArrayOutputStream baos = new 

[18/51] [partial] hbase-site git commit: Published site at .

2018-01-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionStateNode.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionStateNode.html
 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionStateNode.html
index c448105..4f6b995 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionStateNode.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionStateNode.html
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public static class RegionStates.RegionStateNode
+public static class RegionStates.RegionStateNode
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableRegionStates.RegionStateNode
 Current Region State.
@@ -360,7 +360,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 regionInfo
-private finalRegionInfo regionInfo
+private finalRegionInfo regionInfo
 
 
 
@@ -369,7 +369,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 event
-private finalProcedureEvent? 
event
+private finalProcedureEvent? 
event
 
 
 
@@ -378,7 +378,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 procedure
-private volatileRegionTransitionProcedure procedure
+private volatileRegionTransitionProcedure procedure
 
 
 
@@ -387,7 +387,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 regionLocation
-private volatileServerName regionLocation
+private volatileServerName regionLocation
 
 
 
@@ -396,7 +396,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 lastHost
-private volatileServerName lastHost
+private volatileServerName lastHost
 
 
 
@@ -405,7 +405,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 state
-private volatileRegionState.State state
+private volatileRegionState.State state
 A Region-in-Transition (RIT) moves through states.
  See RegionState.State for complete 
list. A Region that
  is opened moves from OFFLINE => OPENING => OPENED.
@@ -417,7 +417,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 lastUpdate
-private volatilelong lastUpdate
+private volatilelong lastUpdate
 Updated whenever a call to setRegionLocation(ServerName)
  or #setState(State, State...).
 
@@ -428,7 +428,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 openSeqNum
-private volatilelong openSeqNum
+private volatilelong openSeqNum
 
 
 
@@ -445,7 +445,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 RegionStateNode
-publicRegionStateNode(RegionInforegionInfo)
+publicRegionStateNode(RegionInforegionInfo)
 
 
 
@@ -462,7 +462,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 setState
-publicbooleansetState(RegionState.Stateupdate,
+publicbooleansetState(RegionState.Stateupdate,
 RegionState.State...expected)
 
 Parameters:
@@ -479,7 +479,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 offline
-publicServerNameoffline()
+publicServerNameoffline()
 Put region into OFFLINE mode (set state and clear 
location).
 
 Returns:
@@ -493,7 +493,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 transitionState
-publicvoidtransitionState(RegionState.Stateupdate,
+publicvoidtransitionState(RegionState.Stateupdate,
 RegionState.State...expected)
  throws UnexpectedStateException
 Set new RegionState.State but only if 
currently in expected State
@@ -510,7 +510,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 isInState
-publicbooleanisInState(RegionState.State...expected)
+publicbooleanisInState(RegionState.State...expected)
 
 
 
@@ -519,7 +519,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 isStuck
-publicbooleanisStuck()
+publicbooleanisStuck()
 
 
 
@@ -528,7 +528,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 isInTransition
-publicbooleanisInTransition()
+publicbooleanisInTransition()
 
 
 
@@ -537,7 +537,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 getLastUpdate
-publiclonggetLastUpdate()
+publiclonggetLastUpdate()
 
 
 
@@ -546,7 +546,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 setLastHost
-publicvoidsetLastHost(ServerNameserverName)
+publicvoidsetLastHost(ServerNameserverName)
 
 
 
@@ -555,7 +555,7 @@ implements 

[18/51] [partial] hbase-site git commit: Published site at .

2018-01-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/14db89d7/testdevapidocs/index-all.html
--
diff --git a/testdevapidocs/index-all.html b/testdevapidocs/index-all.html
index b374b93..a67ab00 100644
--- a/testdevapidocs/index-all.html
+++ b/testdevapidocs/index-all.html
@@ -22085,6 +22085,10 @@
 
 Start a MiniHBaseCluster.
 
+MiniHBaseCluster(Configuration,
 int, int, Class? extends HMaster, Class? extends 
MiniHBaseCluster.MiniHBaseClusterRegionServer) - Constructor for 
class org.apache.hadoop.hbase.MiniHBaseCluster
+
+Start a MiniHBaseCluster.
+
 MiniHBaseCluster(Configuration,
 int, int, ListInteger, Class? extends HMaster, Class? 
extends MiniHBaseCluster.MiniHBaseClusterRegionServer) - 
Constructor for class org.apache.hadoop.hbase.MiniHBaseCluster
 
 MiniHBaseCluster.MiniHBaseClusterRegionServer - 
Class in org.apache.hadoop.hbase

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/14db89d7/testdevapidocs/org/apache/hadoop/hbase/MiniHBaseCluster.MiniHBaseClusterRegionServer.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/MiniHBaseCluster.MiniHBaseClusterRegionServer.html
 
b/testdevapidocs/org/apache/hadoop/hbase/MiniHBaseCluster.MiniHBaseClusterRegionServer.html
index 3d99ab6..b735807 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/MiniHBaseCluster.MiniHBaseClusterRegionServer.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/MiniHBaseCluster.MiniHBaseClusterRegionServer.html
@@ -131,7 +131,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public static class MiniHBaseCluster.MiniHBaseClusterRegionServer
+public static class MiniHBaseCluster.MiniHBaseClusterRegionServer
 extends org.apache.hadoop.hbase.regionserver.HRegionServer
 Subclass so can get at protected methods (none at moment).  
Also, creates
  a FileSystem instance per instantiation.  Adds a shutdown own FileSystem
@@ -298,7 +298,7 @@ extends 
org.apache.hadoop.hbase.regionserver.HRegionServer
 
 
 shutdownThread
-privatehttp://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html?is-external=true;
 title="class or interface in java.lang">Thread shutdownThread
+privatehttp://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html?is-external=true;
 title="class or interface in java.lang">Thread shutdownThread
 
 
 
@@ -307,7 +307,7 @@ extends 
org.apache.hadoop.hbase.regionserver.HRegionServer
 
 
 user
-privateorg.apache.hadoop.hbase.security.User user
+privateorg.apache.hadoop.hbase.security.User user
 
 
 
@@ -316,7 +316,7 @@ extends 
org.apache.hadoop.hbase.regionserver.HRegionServer
 
 
 killedServers
-statichttp://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in 
java.util">Setorg.apache.hadoop.hbase.ServerName killedServers
+statichttp://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in 
java.util">Setorg.apache.hadoop.hbase.ServerName killedServers
 List of RegionServers killed so far. ServerName also 
comprises startCode of a server,
  so any restarted instances of the same server will have different ServerName 
and will not
  coincide with past dead ones. So there's no need to cleanup this list.
@@ -336,7 +336,7 @@ extends 
org.apache.hadoop.hbase.regionserver.HRegionServer
 
 
 MiniHBaseClusterRegionServer
-publicMiniHBaseClusterRegionServer(org.apache.hadoop.conf.Configurationconf)
+publicMiniHBaseClusterRegionServer(org.apache.hadoop.conf.Configurationconf)
  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException,
 http://docs.oracle.com/javase/8/docs/api/java/lang/InterruptedException.html?is-external=true;
 title="class or interface in java.lang">InterruptedException
 
@@ -360,7 +360,7 @@ extends 
org.apache.hadoop.hbase.regionserver.HRegionServer
 
 
 handleReportForDutyResponse
-protectedvoidhandleReportForDutyResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponsec)
+protectedvoidhandleReportForDutyResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponsec)
 throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Overrides:
@@ -376,7 +376,7 @@ extends 
org.apache.hadoop.hbase.regionserver.HRegionServer
 
 
 run
-publicvoidrun()
+publicvoidrun()
 
 Specified by:
 http://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.html?is-external=true#run--;
 title="class or interface in java.lang">runin 
interfacehttp://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.html?is-external=true;
 title="class or interface in java.lang">Runnable
@@ -391,7 +391,7 @@ extends 

[18/51] [partial] hbase-site git commit: Published site at .

2018-01-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/testdevapidocs/org/apache/hadoop/hbase/client/TestConnectionImplementation.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/client/TestConnectionImplementation.html
 
b/testdevapidocs/org/apache/hadoop/hbase/client/TestConnectionImplementation.html
index d79305e..197a966 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/client/TestConnectionImplementation.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/client/TestConnectionImplementation.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = {"i0":9,"i1":9,"i2":10,"i3":10};
+var methods = 
{"i0":9,"i1":9,"i2":10,"i3":9,"i4":9,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -50,7 +50,7 @@ var activeTableTab = "activeTableTab";
 
 
 PrevClass
-NextClass
+NextClass
 
 
 Frames
@@ -74,7 +74,7 @@ var activeTableTab = "activeTableTab";
 
 
 Summary:
-Nested|
+Nested|
 Field|
 Constr|
 Method
@@ -109,16 +109,34 @@ var activeTableTab = "activeTableTab";
 
 
 
-public class TestConnectionImplementation
+public class TestConnectionImplementation
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
-Tests that we fail fast when hostname resolution is not 
working and do not cache
- unresolved InetSocketAddresses.
+This class is for testing HBaseConnectionManager 
features
 
 
 
 
 
 
+
+
+
+
+
+Nested Class Summary
+
+Nested Classes
+
+Modifier and Type
+Class and Description
+
+
+static class
+TestConnectionImplementation.BlockingFilter
+
+
+
+
 
 
 
@@ -132,12 +150,56 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 Field and Description
 
 
-private static 
org.apache.hadoop.hbase.client.ConnectionImplementation
-conn
+private static byte[]
+FAM_NAM
+
+
+private static org.slf4j.Logger
+LOG
+
+
+org.junit.rules.TestName
+name
+
+
+private static byte[]
+ROW
+
+
+private static byte[]
+ROW_X
+
+
+private static int
+RPC_RETRY
+
+
+protected static http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicBoolean.html?is-external=true;
 title="class or interface in 
java.util.concurrent.atomic">AtomicBoolean
+syncBlockingFilter
+
+
+private static 
org.apache.hadoop.hbase.TableName
+TABLE_NAME
+
+
+private static 
org.apache.hadoop.hbase.TableName
+TABLE_NAME1
+
+
+private static 
org.apache.hadoop.hbase.TableName
+TABLE_NAME2
+
+
+private static 
org.apache.hadoop.hbase.TableName
+TABLE_NAME3
 
 
 private static HBaseTestingUtility
-testUtil
+TEST_UTIL
+
+
+org.junit.rules.TestRule
+timeout
 
 
 
@@ -172,20 +234,118 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 Method and Description
 
 
-static void
-setupBeforeClass()
+private static void
+assertEqualsWithJitter(longexpected,
+  longactual)
 
 
-static void
-teardownAfterClass()
+private static void
+assertEqualsWithJitter(longexpected,
+  longactual,
+  longjitterBase)
 
 
-void
-testGetAdminBadHostname()
+private int
+setNumTries(org.apache.hadoop.hbase.client.ConnectionImplementationhci,
+   intnewVal)
 
 
+static void
+setUpBeforeClass()
+
+
+static void
+tearDownAfterClass()
+
+
+void
+testAdminFactory()
+Naive test to check that Connection#getAdmin returns a 
properly constructed HBaseAdmin object
+
+
+
+void
+testCacheSeqNums()
+Test that stale cache updates don't override newer cached 
values.
+
+
+
+void
+testClosing()
+
+
+void
+testClusterConnection()
+
+
+void
+testClusterStatus()
+
+
+void
+testConnection()
+This test checks that one can connect to the cluster with 
only the
+  ZooKeeper quorum set.
+
+
+
+private void
+testConnectionClose(booleanallowsInterrupt)
+
+
+void
+testConnectionCloseAllowsInterrupt()
+Test that we can handle connection close: it will trigger a 
retry, but the calls will finish.
+
+
+
+void
+testConnectionCut()
+Test that the connection to the dead server is cut 
immediately when we receive the
+  notification.
+
+
+
+void
+testConnectionIdle()
+Test that connection can become idle without breaking 
everything.
+
+
+
 void
-testGetClientBadHostname()
+testConnectionManagement()
+Test that Connection or Pool are not closed when managed 
externally
+
+
+
+void
+testConnectionNotAllowsInterrupt()
+
+
+void
+testConnectionRideOverClusterRestart()
+
+
+void
+testCreateConnection()
+Trivial test to verify that nobody messes with
+ ConnectionFactory.createConnection(Configuration)
+
+
+
+void
+testErrorBackoffTimeCalculation()
+
+
+void
+testMulti()
+
+
+void
+testRegionCaching()
+Test that when we delete a location 

[18/51] [partial] hbase-site git commit: Published site at .

2018-01-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/testapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html
--
diff --git a/testapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html 
b/testapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html
index 4430aed..e714bcf 100644
--- a/testapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html
+++ b/testapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html
@@ -4270,7 +4270,7 @@ publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.htm
 
 
 createRegionAndWAL
-public staticHRegioncreateRegionAndWAL(RegionInfoinfo,
+public staticHRegioncreateRegionAndWAL(RegionInfoinfo,
  
org.apache.hadoop.fs.PathrootDir,
  
org.apache.hadoop.conf.Configurationconf,
  TableDescriptorhtd)
@@ -4289,7 +4289,7 @@ publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.htm
 
 
 createRegionAndWAL
-public staticHRegioncreateRegionAndWAL(RegionInfoinfo,
+public staticHRegioncreateRegionAndWAL(RegionInfoinfo,
  
org.apache.hadoop.fs.PathrootDir,
  
org.apache.hadoop.conf.Configurationconf,
  TableDescriptorhtd,
@@ -4309,7 +4309,7 @@ publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.htm
 
 
 getMetaTableRows
-publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listbyte[]getMetaTableRows()
+publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listbyte[]getMetaTableRows()
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Returns all rows from the hbase:meta table.
 
@@ -4324,7 +4324,7 @@ publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.htm
 
 
 getMetaTableRows
-publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listbyte[]getMetaTableRows(TableNametableName)
+publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listbyte[]getMetaTableRows(TableNametableName)
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Returns all rows from the hbase:meta table for a given user 
table
 
@@ -4339,7 +4339,7 @@ publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.htm
 
 
 getOtherRegionServer
-publicHRegionServergetOtherRegionServer(HRegionServerrs)
+publicHRegionServergetOtherRegionServer(HRegionServerrs)
 
 
 
@@ -4348,7 +4348,7 @@ publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.htm
 
 
 getRSForFirstRegionInTable
-publicHRegionServergetRSForFirstRegionInTable(TableNametableName)
+publicHRegionServergetRSForFirstRegionInTable(TableNametableName)
  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException,
 http://docs.oracle.com/javase/8/docs/api/java/lang/InterruptedException.html?is-external=true;
 title="class or interface in java.lang">InterruptedException
 Tool to get the reference to the region server object that 
holds the
@@ -4373,7 +4373,7 @@ publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.htm
 
 
 startMiniMapReduceCluster
-publicorg.apache.hadoop.mapred.MiniMRClusterstartMiniMapReduceCluster()
+publicorg.apache.hadoop.mapred.MiniMRClusterstartMiniMapReduceCluster()
  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Starts a MiniMRCluster with a default number of
  TaskTracker's.
@@ -4389,7 +4389,7 @@ publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.htm
 
 
 shutdownMiniMapReduceCluster
-publicvoidshutdownMiniMapReduceCluster()
+publicvoidshutdownMiniMapReduceCluster()
 Stops the previously started 
MiniMRCluster.
 
 
@@ -4399,7 +4399,7 @@ publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.htm
 
 
 createMockRegionServerService
-publicRegionServerServicescreateMockRegionServerService()
+publicRegionServerServicescreateMockRegionServerService()
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Create a stubbed out RegionServerService, mainly for 
getting FS.
 
@@ -4414,7 

[18/51] [partial] hbase-site git commit: Published site at .

2018-01-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/49431b18/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/ObserverContext.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/ObserverContext.html 
b/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/ObserverContext.html
index b4404ce..2b6bb7c 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/ObserverContext.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/ObserverContext.html
@@ -587,6 +587,12 @@
 
 
 
+default void
+RegionServerObserver.postExecuteProcedures(ObserverContextRegionServerCoprocessorEnvironmentctx)
+This will be called after executing procedures
+
+
+
 default boolean
 RegionObserver.postExists(ObserverContextRegionCoprocessorEnvironmentc,
   Getget,
@@ -594,14 +600,14 @@
 Called after the client tests for existence using a 
Get.
 
 
-
+
 default void
 RegionObserver.postFlush(ObserverContextRegionCoprocessorEnvironmentc,
  FlushLifeCycleTrackertracker)
 Called after the memstore is flushed to disk.
 
 
-
+
 default void
 RegionObserver.postFlush(ObserverContextRegionCoprocessorEnvironmentc,
  Storestore,
@@ -610,27 +616,27 @@
 Called after a Store's memstore is flushed to disk.
 
 
-
+
 default void
 MasterObserver.postGetClusterMetrics(ObserverContextMasterCoprocessorEnvironmentctx,
  ClusterMetricsstatus)
 Called after get cluster status.
 
 
-
+
 default void
 MasterObserver.postGetLocks(ObserverContextMasterCoprocessorEnvironmentctx)
 Called after a getLocks request has been processed.
 
 
-
+
 default void
 MasterObserver.postGetNamespaceDescriptor(ObserverContextMasterCoprocessorEnvironmentctx,
   NamespaceDescriptorns)
 Called after a getNamespaceDescriptor request has been 
processed.
 
 
-
+
 default void
 RegionObserver.postGetOp(ObserverContextRegionCoprocessorEnvironmentc,
  Getget,
@@ -638,20 +644,20 @@
 Called after the client performs a Get
 
 
-
+
 default void
 MasterObserver.postGetProcedures(ObserverContextMasterCoprocessorEnvironmentctx)
 Called after a getProcedures request has been 
processed.
 
 
-
+
 default void
 MasterObserver.postGetReplicationPeerConfig(ObserverContextMasterCoprocessorEnvironmentctx,
 http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringpeerId)
 Called after get the configured ReplicationPeerConfig for 
the specified peer
 
 
-
+
 default void
 MasterObserver.postGetTableDescriptors(ObserverContextMasterCoprocessorEnvironmentctx,
http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListTableNametableNamesList,
@@ -660,7 +666,7 @@
 Called after a getTableDescriptors request has been 
processed.
 
 
-
+
 default void
 MasterObserver.postGetTableNames(ObserverContextMasterCoprocessorEnvironmentctx,
  http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListTableDescriptordescriptors,
@@ -668,7 +674,7 @@
 Called after a getTableNames request has been 
processed.
 
 
-
+
 default Result
 RegionObserver.postIncrement(ObserverContextRegionCoprocessorEnvironmentc,
  Incrementincrement,
@@ -676,7 +682,7 @@
 Called after increment
 
 
-
+
 default DeleteTracker
 RegionObserver.postInstantiateDeleteTracker(ObserverContextRegionCoprocessorEnvironmentctx,
 DeleteTrackerdelTracker)
@@ -685,54 +691,54 @@
 
 
 
-
+
 default void
 MasterObserver.postListDecommissionedRegionServers(ObserverContextMasterCoprocessorEnvironmentctx)
 Called after list decommissioned region servers.
 
 
-
+
 default void
 MasterObserver.postListNamespaceDescriptors(ObserverContextMasterCoprocessorEnvironmentctx,
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListNamespaceDescriptordescriptors)
 Called after a listNamespaceDescriptors request has been 
processed.
 
 
-
+
 default void
 MasterObserver.postListReplicationPeers(ObserverContextMasterCoprocessorEnvironmentctx,
 http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringregex)
 Called after list replication peers.
 
 
-
+
 default void
 MasterObserver.postListSnapshot(ObserverContextMasterCoprocessorEnvironmentctx,
 SnapshotDescriptionsnapshot)
 Called after listSnapshots request has been processed.
 
 
-
+
 default void
 MasterObserver.postLockHeartbeat(ObserverContextMasterCoprocessorEnvironmentctx)
 Called after heartbeat to a lock.
 
 
-
+
 default void
 

[18/51] [partial] hbase-site git commit: Published site at .

2018-01-04 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7c40c62/devapidocs/org/apache/hadoop/hbase/master/LoadBalancer.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/master/LoadBalancer.html 
b/devapidocs/org/apache/hadoop/hbase/master/LoadBalancer.html
index e44efee..ab927d5 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/LoadBalancer.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/LoadBalancer.html
@@ -114,7 +114,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public interface LoadBalancer
+public interface LoadBalancer
 extends org.apache.hadoop.conf.Configurable, Stoppable, ConfigurationObserver
 Makes decisions about the placement and movement of Regions 
across
  RegionServers.
@@ -253,7 +253,7 @@ extends org.apache.hadoop.conf.Configurable, 
 void
-setClusterStatus(ClusterStatusst)
+setClusterMetrics(ClusterMetricsst)
 Set the current cluster status.
 
 
@@ -298,7 +298,7 @@ extends org.apache.hadoop.conf.Configurable, 
 
 TABLES_ON_MASTER
-static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String TABLES_ON_MASTER
+static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String TABLES_ON_MASTER
 Master can carry regions as of hbase-2.0.0.
  By default, it carries no tables.
  TODO: Add any | system as flags to indicate what it can do.
@@ -314,7 +314,7 @@ extends org.apache.hadoop.conf.Configurable, 
 
 SYSTEM_TABLES_ON_MASTER
-static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String SYSTEM_TABLES_ON_MASTER
+static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String SYSTEM_TABLES_ON_MASTER
 Master carries system tables.
 
 See Also:
@@ -328,7 +328,7 @@ extends org.apache.hadoop.conf.Configurable, 
 
 BOGUS_SERVER_NAME
-static finalServerName BOGUS_SERVER_NAME
+static finalServerName BOGUS_SERVER_NAME
 
 
 
@@ -339,13 +339,13 @@ extends org.apache.hadoop.conf.Configurable, 
+
 
 
 
 
-setClusterStatus
-voidsetClusterStatus(ClusterStatusst)
+setClusterMetrics
+voidsetClusterMetrics(ClusterMetricsst)
 Set the current cluster status.  This allows a LoadBalancer 
to map host name to a server
 
 Parameters:
@@ -359,7 +359,7 @@ extends org.apache.hadoop.conf.Configurable, 
 
 setClusterLoad
-voidsetClusterLoad(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapTableName,http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapServerName,http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfoClusterLoad)
+voidsetClusterLoad(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapTableName,http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapServerName,http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfoClusterLoad)
 Pass RegionStates and allow balancer to set the current 
cluster load.
 
 Parameters:
@@ -373,7 +373,7 @@ extends org.apache.hadoop.conf.Configurable, 
 
 setMasterServices
-voidsetMasterServices(MasterServicesmasterServices)
+voidsetMasterServices(MasterServicesmasterServices)
 Set the master service.
 
 Parameters:
@@ -387,7 +387,7 @@ extends org.apache.hadoop.conf.Configurable, 
 
 balanceCluster
-http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionPlanbalanceCluster(TableNametableName,
+http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionPlanbalanceCluster(TableNametableName,
 http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapServerName,http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfoclusterState)
  throws HBaseIOException
 Perform the major balance operation
@@ -408,7 +408,7 @@ extends org.apache.hadoop.conf.Configurable, 
 
 balanceCluster
-http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionPlanbalanceCluster(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in 

[18/51] [partial] hbase-site git commit: Published site at .

2018-01-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bb398572/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TableOperator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TableOperator.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TableOperator.html
index 5b3b750..a1f3f7e 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TableOperator.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TableOperator.html
@@ -97,3307 +97,3304 @@
 089import 
org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer;
 090import 
org.apache.hbase.thirdparty.io.netty.util.Timeout;
 091import 
org.apache.hbase.thirdparty.io.netty.util.TimerTask;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesResponse;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-100import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
-101import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
-102import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
-103import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
-104import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
-105import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest;
-106import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse;
-107import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest;
-108import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse;
-109import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest;
-110import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse;
-111import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
-112import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse;
-113import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-114import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-115import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-116import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-117import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-118import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-119import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-120import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-121import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-122import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-123import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
-124import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersRequest;
-125import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersResponse;
-126import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-127import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
-128import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest;
-129import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse;
-130import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersRequest;
-131import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersResponse;
-132import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
-133import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
-134import 

[18/51] [partial] hbase-site git commit: Published site at .

2018-01-01 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/filter/Filter.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/filter/Filter.html 
b/devapidocs/org/apache/hadoop/hbase/filter/Filter.html
index 7a86a6c..d40260f 100644
--- a/devapidocs/org/apache/hadoop/hbase/filter/Filter.html
+++ b/devapidocs/org/apache/hadoop/hbase/filter/Filter.html
@@ -834,6 +834,6 @@ publicCopyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/filter/FilterBase.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/filter/FilterBase.html 
b/devapidocs/org/apache/hadoop/hbase/filter/FilterBase.html
index d9f1596..89d601a 100644
--- a/devapidocs/org/apache/hadoop/hbase/filter/FilterBase.html
+++ b/devapidocs/org/apache/hadoop/hbase/filter/FilterBase.html
@@ -744,6 +744,6 @@ publicbooleanCopyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/filter/FilterList.Operator.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/filter/FilterList.Operator.html 
b/devapidocs/org/apache/hadoop/hbase/filter/FilterList.Operator.html
index 3f5942a..0a6b2ac 100644
--- a/devapidocs/org/apache/hadoop/hbase/filter/FilterList.Operator.html
+++ b/devapidocs/org/apache/hadoop/hbase/filter/FilterList.Operator.html
@@ -349,6 +349,6 @@ not permitted.)
 
 
 
-Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/filter/FilterList.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/filter/FilterList.html 
b/devapidocs/org/apache/hadoop/hbase/filter/FilterList.html
index 8a831b7..9703e6a 100644
--- a/devapidocs/org/apache/hadoop/hbase/filter/FilterList.html
+++ b/devapidocs/org/apache/hadoop/hbase/filter/FilterList.html
@@ -1114,6 +1114,6 @@ publicCopyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/filter/FilterListBase.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/filter/FilterListBase.html 
b/devapidocs/org/apache/hadoop/hbase/filter/FilterListBase.html
index 6bc26c0..61a8faa 100644
--- a/devapidocs/org/apache/hadoop/hbase/filter/FilterListBase.html
+++ b/devapidocs/org/apache/hadoop/hbase/filter/FilterListBase.html
@@ -710,6 +710,6 @@ extends 
 
 
-Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/filter/FilterListWithAND.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/filter/FilterListWithAND.html 
b/devapidocs/org/apache/hadoop/hbase/filter/FilterListWithAND.html
index b2a4316..f336c56 100644
--- a/devapidocs/org/apache/hadoop/hbase/filter/FilterListWithAND.html
+++ b/devapidocs/org/apache/hadoop/hbase/filter/FilterListWithAND.html
@@ -702,6 +702,6 @@ extends Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/filter/FilterListWithOR.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/filter/FilterListWithOR.html 
b/devapidocs/org/apache/hadoop/hbase/filter/FilterListWithOR.html
index ac3b473..e274efa 100644
--- a/devapidocs/org/apache/hadoop/hbase/filter/FilterListWithOR.html
+++ b/devapidocs/org/apache/hadoop/hbase/filter/FilterListWithOR.html
@@ -790,6 +790,6 @@ extends Copyright  20072017 https://www.apache.org/;>The Apache 

[18/51] [partial] hbase-site git commit: Published site at .

2017-12-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncProcess.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncProcess.html 
b/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncProcess.html
index dd18eaa..ea0347e 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncProcess.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncProcess.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":9,"i1":9,"i2":9,"i3":9,"i4":9,"i5":9,"i6":9,"i7":10,"i8":10,"i9":10,"i10":10,"i11":9,"i12":9,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10};
+var methods = 
{"i0":9,"i1":9,"i2":10,"i3":9,"i4":9,"i5":9,"i6":9,"i7":9,"i8":10,"i9":10,"i10":10,"i11":10,"i12":9,"i13":9,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -109,7 +109,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public class TestAsyncProcess
+public class TestAsyncProcess
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 
 
@@ -362,221 +362,238 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
  longmaxHeapSizePerRequest)
 
 
+private void
+checkPeriodicFlushParameters(org.apache.hadoop.hbase.client.ClusterConnectionconn,
+TestAsyncProcess.MyAsyncProcessap,
+longsetTO,
+longexpectTO,
+longsetTT,
+longexpectTT)
+
+
 private static 
org.apache.hadoop.hbase.client.BufferedMutatorParams
 createBufferedMutatorParams(TestAsyncProcess.MyAsyncProcessap,

org.apache.hadoop.hbase.TableNamename)
 
-
+
 private static 
org.apache.hadoop.hbase.client.ClusterConnection
 createHConnection()
 
-
+
 private static 
org.apache.hadoop.hbase.client.ClusterConnection
 createHConnectionCommon()
 
-
+
 private static 
org.apache.hadoop.hbase.client.ClusterConnection
 createHConnectionWithReplicas()
 
-
+
 (package private) static 
org.apache.hadoop.hbase.client.MultiResponse
 createMultiResponse(org.apache.hadoop.hbase.client.MultiActionmulti,
http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true;
 title="class or interface in 
java.util.concurrent.atomic">AtomicIntegernbMultiResponse,
http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true;
 title="class or interface in 
java.util.concurrent.atomic">AtomicIntegernbActions,
TestAsyncProcess.ResponseGeneratorgen)
 
-
+
 private 
org.apache.hadoop.hbase.client.Put
 createPut(intregCnt,
  booleansuccess)
 
-
+
 private TestAsyncProcess.MyAsyncProcessWithReplicas
 createReplicaAp(intreplicaAfterMs,
intprimaryMs,
intreplicaMs)
 
-
+
 private TestAsyncProcess.MyAsyncProcessWithReplicas
 createReplicaAp(intreplicaAfterMs,
intprimaryMs,
intreplicaMs,
intretries)
 
-
+
 private void
 doSubmitRequest(longmaxHeapSizePerRequest,
longputsHeapSize)
 
-
+
 private static http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in 
java.util">Listorg.apache.hadoop.hbase.client.Get
 makeTimelineGets(byte[]...rows)
 
-
+
 private static void
 setMockLocation(org.apache.hadoop.hbase.client.ClusterConnectionhc,
byte[]row,

org.apache.hadoop.hbase.RegionLocationsresult)
 
-
+
 void
 testAction()
 
-
+
 void
 testBatch()
 
-
+
 void
 testBufferedMutatorImplWithSharedPool()
 
-
+
 void
 testCallQueueTooLarge()
 
-
+
 void
 testErrorsServers()
 
-
+
 void
 testFail()
 
-
+
 void
 testFailAndSuccess()
 
-
+
 void
 testFlush()
 
-
+
 void
 testGlobalErrors()
 
-
+
 void
 testHTableFailedPutAndNewPut()
 
-
+
 void
 testHTablePutSuccess()
 
-
+
 void
 testListRowAccess()
 
-
+
 void
 testMaxTask()
 
-
+
 void
 

[18/51] [partial] hbase-site git commit: Published site at .

2017-12-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/63d6f712/devapidocs/org/apache/hadoop/hbase/ipc/NettyRpcDuplexHandler.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/ipc/NettyRpcDuplexHandler.html 
b/devapidocs/org/apache/hadoop/hbase/ipc/NettyRpcDuplexHandler.html
index e0852c7..dc9906a 100644
--- a/devapidocs/org/apache/hadoop/hbase/ipc/NettyRpcDuplexHandler.html
+++ b/devapidocs/org/apache/hadoop/hbase/ipc/NettyRpcDuplexHandler.html
@@ -100,13 +100,13 @@ var activeTableTab = "activeTableTab";
 http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
 
 
-org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandlerAdapter
+org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerAdapter
 
 
-org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelInboundHandlerAdapter
+org.apache.hbase.thirdparty.io.netty.channel.ChannelInboundHandlerAdapter
 
 
-org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelDuplexHandler
+org.apache.hbase.thirdparty.io.netty.channel.ChannelDuplexHandler
 
 
 org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler
@@ -124,13 +124,13 @@ var activeTableTab = "activeTableTab";
 
 
 All Implemented Interfaces:
-org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandler, 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelInboundHandler, 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelOutboundHandler
+org.apache.hbase.thirdparty.io.netty.channel.ChannelHandler, 
org.apache.hbase.thirdparty.io.netty.channel.ChannelInboundHandler, 
org.apache.hbase.thirdparty.io.netty.channel.ChannelOutboundHandler
 
 
 
 @InterfaceAudience.Private
 class NettyRpcDuplexHandler
-extends 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelDuplexHandler
+extends org.apache.hbase.thirdparty.io.netty.channel.ChannelDuplexHandler
 The netty rpc handler.
 
 Since:
@@ -149,11 +149,11 @@ extends 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelDuplexHandler
 Nested Class Summary
 
-
+
 
 
-Nested classes/interfaces inherited from 
interfaceorg.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandler
-org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandler.Sharable
+Nested classes/interfaces inherited from 
interfaceorg.apache.hbase.thirdparty.io.netty.channel.ChannelHandler
+org.apache.hbase.thirdparty.io.netty.channel.ChannelHandler.Sharable
 
 
 
@@ -230,65 +230,65 @@ extends 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelDuplexHandler
 
 void
-channelInactive(org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandlerContextctx)
+channelInactive(org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContextctx)
 
 
 void
-channelRead(org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandlerContextctx,
+channelRead(org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContextctx,
http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Objectmsg)
 
 
 private void
-cleanupCalls(org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandlerContextctx,
+cleanupCalls(org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContextctx,
 http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in 
java.io">IOExceptionerror)
 
 
 void
-exceptionCaught(org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandlerContextctx,
+exceptionCaught(org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContextctx,
http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true;
 title="class or interface in 
java.lang">Throwablecause)
 
 
 private void
-readResponse(org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandlerContextctx,
-
org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBufbuf)
+readResponse(org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContextctx,
+
org.apache.hbase.thirdparty.io.netty.buffer.ByteBufbuf)
 
 
 void
-userEventTriggered(org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandlerContextctx,
+userEventTriggered(org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContextctx,
   http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Objectevt)
 
 
 void
-write(org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandlerContextctx,
+write(org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContextctx,
  http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Objectmsg,
- 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelPromisepromise)
+ 
org.apache.hbase.thirdparty.io.netty.channel.ChannelPromisepromise)
 
 
 private void

[18/51] [partial] hbase-site git commit: Published site at .

2017-12-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d449e87f/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.html 
b/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.html
index caa6c76..ff276f7 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.html
@@ -118,7 +118,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-class ConnectionImplementation
+class ConnectionImplementation
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements ClusterConnection, http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html?is-external=true;
 title="class or interface in java.io">Closeable
 Main implementation of Connection 
and ClusterConnection interfaces.
@@ -843,7 +843,7 @@ implements 
 
 RETRIES_BY_SERVER_KEY
-public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String RETRIES_BY_SERVER_KEY
+public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String RETRIES_BY_SERVER_KEY
 
 See Also:
 Constant
 Field Values
@@ -856,7 +856,7 @@ implements 
 
 LOG
-private static finalorg.slf4j.Logger LOG
+private static finalorg.slf4j.Logger LOG
 
 
 
@@ -865,7 +865,7 @@ implements 
 
 RESOLVE_HOSTNAME_ON_FAIL_KEY
-private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String RESOLVE_HOSTNAME_ON_FAIL_KEY
+private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String RESOLVE_HOSTNAME_ON_FAIL_KEY
 
 See Also:
 Constant
 Field Values
@@ -878,7 +878,7 @@ implements 
 
 hostnamesCanChange
-private finalboolean hostnamesCanChange
+private finalboolean hostnamesCanChange
 
 
 
@@ -887,7 +887,7 @@ implements 
 
 pause
-private finallong pause
+private finallong pause
 
 
 
@@ -896,7 +896,7 @@ implements 
 
 pauseForCQTBE
-private finallong pauseForCQTBE
+private finallong pauseForCQTBE
 
 
 
@@ -905,7 +905,7 @@ implements 
 
 useMetaReplicas
-privateboolean useMetaReplicas
+privateboolean useMetaReplicas
 
 
 
@@ -914,7 +914,7 @@ implements 
 
 metaReplicaCallTimeoutScanInMicroSecond
-private finalint metaReplicaCallTimeoutScanInMicroSecond
+private finalint metaReplicaCallTimeoutScanInMicroSecond
 
 
 
@@ -923,7 +923,7 @@ implements 
 
 numTries
-private finalint numTries
+private finalint numTries
 
 
 
@@ -932,7 +932,7 @@ implements 
 
 rpcTimeout
-finalint rpcTimeout
+finalint rpcTimeout
 
 
 
@@ -941,7 +941,7 @@ implements 
 
 nonceGenerator
-private static volatileNonceGenerator nonceGenerator
+private static volatileNonceGenerator nonceGenerator
 Global nonceGenerator shared per client.Currently there's 
no reason to limit its scope.
  Once it's set under nonceGeneratorCreateLock, it is never unset or 
changed.
 
@@ -952,7 +952,7 @@ implements 
 
 nonceGeneratorCreateLock
-private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object nonceGeneratorCreateLock
+private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object nonceGeneratorCreateLock
 The nonce generator lock. Only taken when creating 
Connection, which gets a private copy.
 
 
@@ -962,7 +962,7 @@ implements 
 
 asyncProcess
-private finalAsyncProcess asyncProcess
+private finalAsyncProcess asyncProcess
 
 
 
@@ -971,7 +971,7 @@ implements 
 
 stats
-private finalServerStatisticTracker stats
+private finalServerStatisticTracker stats
 
 
 
@@ -980,7 +980,7 @@ implements 
 
 closed
-private volatileboolean closed
+private volatileboolean closed
 
 
 
@@ -989,7 +989,7 @@ implements 
 
 aborted
-private volatileboolean aborted
+private volatileboolean aborted
 
 
 
@@ -998,7 +998,7 @@ implements 
 
 clusterStatusListener
-ClusterStatusListener clusterStatusListener
+ClusterStatusListener clusterStatusListener
 
 
 
@@ -1007,7 +1007,7 @@ implements 
 
 metaRegionLock
-private finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object metaRegionLock
+private finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object metaRegionLock
 
 
 
@@ -1016,7 +1016,7 @@ implements 
 
 masterLock
-private finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or 

[18/51] [partial] hbase-site git commit: Published site at .

2017-12-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d2b28a1a/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.KVComparator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.KVComparator.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.KVComparator.html
index f8eace7..66b6656 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.KVComparator.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.KVComparator.html
@@ -27,2569 +27,2540 @@
 019 */
 020package org.apache.hadoop.hbase;
 021
-022import static 
org.apache.hadoop.hbase.Tag.TAG_LENGTH_SIZE;
-023import static 
org.apache.hadoop.hbase.util.Bytes.len;
-024
-025import java.io.DataInput;
-026import java.io.DataOutput;
-027import java.io.IOException;
-028import java.io.OutputStream;
-029import java.nio.ByteBuffer;
-030import java.util.ArrayList;
-031import java.util.Arrays;
-032import java.util.HashMap;
-033import java.util.Iterator;
-034import java.util.List;
-035import java.util.Map;
-036import java.util.Optional;
-037
-038import 
org.apache.hadoop.hbase.util.ByteBufferUtils;
-039import 
org.apache.hadoop.hbase.util.Bytes;
-040import 
org.apache.hadoop.hbase.util.ClassSize;
-041import 
org.apache.hadoop.io.RawComparator;
-042import 
org.apache.yetus.audience.InterfaceAudience;
-043import org.slf4j.Logger;
-044import org.slf4j.LoggerFactory;
-045
-046import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
-047
-048/**
-049 * An HBase Key/Value. This is the 
fundamental HBase Type.
+022import static 
org.apache.hadoop.hbase.util.Bytes.len;
+023
+024import java.io.DataInput;
+025import java.io.DataOutput;
+026import java.io.IOException;
+027import java.io.OutputStream;
+028import java.nio.ByteBuffer;
+029import java.util.ArrayList;
+030import java.util.Arrays;
+031import java.util.HashMap;
+032import java.util.Iterator;
+033import java.util.List;
+034import java.util.Map;
+035import 
org.apache.hadoop.hbase.util.ByteBufferUtils;
+036import 
org.apache.hadoop.hbase.util.Bytes;
+037import 
org.apache.hadoop.hbase.util.ClassSize;
+038import 
org.apache.hadoop.io.RawComparator;
+039import 
org.apache.yetus.audience.InterfaceAudience;
+040import org.slf4j.Logger;
+041import org.slf4j.LoggerFactory;
+042
+043import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
+044
+045/**
+046 * An HBase Key/Value. This is the 
fundamental HBase Type.
+047 * p
+048 * HBase applications and users should 
use the Cell interface and avoid directly using KeyValue and
+049 * member functions not defined in 
Cell.
 050 * p
-051 * HBase applications and users should 
use the Cell interface and avoid directly using KeyValue and
-052 * member functions not defined in 
Cell.
-053 * p
-054 * If being used client-side, the primary 
methods to access individual fields are
-055 * {@link #getRowArray()}, {@link 
#getFamilyArray()}, {@link #getQualifierArray()},
-056 * {@link #getTimestamp()}, and {@link 
#getValueArray()}. These methods allocate new byte arrays
-057 * and return copies. Avoid their use 
server-side.
-058 * p
-059 * Instances of this class are immutable. 
They do not implement Comparable but Comparators are
-060 * provided. Comparators change with 
context, whether user table or a catalog table comparison. Its
-061 * critical you use the appropriate 
comparator. There are Comparators for normal HFiles, Meta's
-062 * Hfiles, and bloom filter keys.
-063 * p
-064 * KeyValue wraps a byte array and takes 
offsets and lengths into passed array at where to start
-065 * interpreting the content as KeyValue. 
The KeyValue format inside a byte array is:
-066 * codelt;keylengthgt; 
lt;valuelengthgt; lt;keygt; 
lt;valuegt;/code Key is further
-067 * decomposed as: 
codelt;rowlengthgt; lt;rowgt; 
lt;columnfamilylengthgt;
-068 * lt;columnfamilygt; 
lt;columnqualifiergt;
-069 * lt;timestampgt; 
lt;keytypegt;/code The coderowlength/code 
maximum is
-070 * 
codeShort.MAX_SIZE/code, column family length maximum is 
codeByte.MAX_SIZE/code, and
-071 * column qualifier + key length must be 
lt; codeInteger.MAX_SIZE/code. The column does not
-072 * contain the family/qualifier 
delimiter, {@link #COLUMN_FAMILY_DELIMITER}br
-073 * KeyValue can optionally contain Tags. 
When it contains tags, it is added in the byte array after
-074 * the value part. The format for this 
part is: 
codelt;tagslengthgt;lt;tagsbytesgt;/code.
-075 * codetagslength/code 
maximum is codeShort.MAX_SIZE/code. The 
codetagsbytes/code
-076 * contain one or more tags where as each 
tag is of the form
-077 * 
codelt;taglengthgt;lt;tagtypegt;lt;tagbytesgt;/code.
 codetagtype/code is one byte
-078 * and codetaglength/code 
maximum is codeShort.MAX_SIZE/code and it includes 1 byte 
type
-079 * length and actual tag bytes length.
-080 */
-081@InterfaceAudience.Private
-082public class KeyValue implements 
ExtendedCell {
-083  private 

[18/51] [partial] hbase-site git commit: Published site at .

2017-12-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/devapidocs/org/apache/hadoop/hbase/util/Bytes.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/util/Bytes.html 
b/devapidocs/org/apache/hadoop/hbase/util/Bytes.html
index 0cbf18b..fc852d9 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/Bytes.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/Bytes.html
@@ -114,7 +114,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Public
-public class Bytes
+public class Bytes
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableBytes
 Utility class that handles byte arrays, conversions to/from 
other types,
@@ -1293,7 +1293,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 UTF8_CSN
-private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String UTF8_CSN
+private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String UTF8_CSN
 
 
 
@@ -1302,7 +1302,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 EMPTY_BYTE_ARRAY
-private static finalbyte[] EMPTY_BYTE_ARRAY
+private static finalbyte[] EMPTY_BYTE_ARRAY
 
 
 
@@ -1311,7 +1311,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 LOG
-private static finalorg.slf4j.Logger LOG
+private static finalorg.slf4j.Logger LOG
 
 
 
@@ -1320,7 +1320,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 SIZEOF_BOOLEAN
-public static finalint SIZEOF_BOOLEAN
+public static finalint SIZEOF_BOOLEAN
 Size of boolean in bytes
 
 See Also:
@@ -1334,7 +1334,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 SIZEOF_BYTE
-public static finalint SIZEOF_BYTE
+public static finalint SIZEOF_BYTE
 Size of byte in bytes
 
 See Also:
@@ -1348,7 +1348,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 SIZEOF_CHAR
-public static finalint SIZEOF_CHAR
+public static finalint SIZEOF_CHAR
 Size of char in bytes
 
 See Also:
@@ -1362,7 +1362,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 SIZEOF_DOUBLE
-public static finalint SIZEOF_DOUBLE
+public static finalint SIZEOF_DOUBLE
 Size of double in bytes
 
 See Also:
@@ -1376,7 +1376,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 SIZEOF_FLOAT
-public static finalint SIZEOF_FLOAT
+public static finalint SIZEOF_FLOAT
 Size of float in bytes
 
 See Also:
@@ -1390,7 +1390,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 SIZEOF_INT
-public static finalint SIZEOF_INT
+public static finalint SIZEOF_INT
 Size of int in bytes
 
 See Also:
@@ -1404,7 +1404,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 SIZEOF_LONG
-public static finalint SIZEOF_LONG
+public static finalint SIZEOF_LONG
 Size of long in bytes
 
 See Also:
@@ -1418,7 +1418,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 SIZEOF_SHORT
-public static finalint SIZEOF_SHORT
+public static finalint SIZEOF_SHORT
 Size of short in bytes
 
 See Also:
@@ -1432,7 +1432,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 MASK_FOR_LOWER_INT_IN_LONG
-public static finallong MASK_FOR_LOWER_INT_IN_LONG
+public static finallong MASK_FOR_LOWER_INT_IN_LONG
 Mask to apply to a long to reveal the lower int only. Use 
like this:
  int i = (int)(0xL ^ some_long_value);
 
@@ -1447,7 +1447,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 ESTIMATED_HEAP_TAX
-public static finalint ESTIMATED_HEAP_TAX
+public static finalint ESTIMATED_HEAP_TAX
 Estimate of size cost to pay beyond payload in jvm for 
instance of byte [].
  Estimate based on study of jhat and jprofiler numbers.
 
@@ -1462,7 +1462,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 UNSAFE_UNALIGNED
-private static finalboolean UNSAFE_UNALIGNED
+private static finalboolean UNSAFE_UNALIGNED
 
 
 
@@ -1471,7 +1471,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 bytes
-privatebyte[] bytes
+privatebyte[] bytes
 
 
 
@@ -1480,7 +1480,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 offset
-privateint offset
+privateint offset
 
 
 
@@ -1489,7 +1489,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 length
-privateint length
+privateint length
 
 
 
@@ -1498,7 +1498,7 @@ implements 

[18/51] [partial] hbase-site git commit: Published site at .

2017-12-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7c0589c0/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.MergeTableRegionsFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.MergeTableRegionsFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.MergeTableRegionsFuture.html
index 6fecbc9..2accda0 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.MergeTableRegionsFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.MergeTableRegionsFuture.html
@@ -34,4140 +34,4141 @@
 026import 
java.nio.charset.StandardCharsets;
 027import java.util.ArrayList;
 028import java.util.Arrays;
-029import java.util.Collection;
-030import java.util.EnumSet;
-031import java.util.HashMap;
-032import java.util.Iterator;
-033import java.util.LinkedList;
-034import java.util.List;
-035import java.util.Map;
-036import java.util.Set;
-037import java.util.concurrent.Callable;
-038import 
java.util.concurrent.ExecutionException;
-039import java.util.concurrent.Future;
-040import java.util.concurrent.TimeUnit;
-041import 
java.util.concurrent.TimeoutException;
-042import 
java.util.concurrent.atomic.AtomicInteger;
-043import 
java.util.concurrent.atomic.AtomicReference;
-044import java.util.regex.Pattern;
-045import java.util.stream.Collectors;
-046import java.util.stream.Stream;
-047import 
org.apache.hadoop.conf.Configuration;
-048import 
org.apache.hadoop.hbase.Abortable;
-049import 
org.apache.hadoop.hbase.CacheEvictionStats;
-050import 
org.apache.hadoop.hbase.CacheEvictionStatsBuilder;
-051import 
org.apache.hadoop.hbase.ClusterMetrics.Option;
-052import 
org.apache.hadoop.hbase.ClusterStatus;
-053import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-054import 
org.apache.hadoop.hbase.HBaseConfiguration;
-055import 
org.apache.hadoop.hbase.HConstants;
-056import 
org.apache.hadoop.hbase.HRegionInfo;
-057import 
org.apache.hadoop.hbase.HRegionLocation;
-058import 
org.apache.hadoop.hbase.HTableDescriptor;
-059import 
org.apache.hadoop.hbase.MasterNotRunningException;
-060import 
org.apache.hadoop.hbase.MetaTableAccessor;
-061import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-062import 
org.apache.hadoop.hbase.NamespaceNotFoundException;
-063import 
org.apache.hadoop.hbase.NotServingRegionException;
-064import 
org.apache.hadoop.hbase.RegionLoad;
-065import 
org.apache.hadoop.hbase.RegionLocations;
-066import 
org.apache.hadoop.hbase.ServerName;
-067import 
org.apache.hadoop.hbase.TableExistsException;
-068import 
org.apache.hadoop.hbase.TableName;
-069import 
org.apache.hadoop.hbase.TableNotDisabledException;
-070import 
org.apache.hadoop.hbase.TableNotFoundException;
-071import 
org.apache.hadoop.hbase.UnknownRegionException;
-072import 
org.apache.hadoop.hbase.ZooKeeperConnectionException;
-073import 
org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil;
-074import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-075import 
org.apache.hadoop.hbase.client.security.SecurityCapability;
-076import 
org.apache.hadoop.hbase.exceptions.TimeoutIOException;
-077import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
-078import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
-079import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-080import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-081import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-082import 
org.apache.hadoop.hbase.quotas.QuotaRetriever;
-083import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-084import 
org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
-085import 
org.apache.hadoop.hbase.replication.ReplicationException;
-086import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-087import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-088import 
org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
-089import 
org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
-090import 
org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
-091import 
org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
-092import 
org.apache.hadoop.hbase.snapshot.UnknownSnapshotException;
-093import 
org.apache.hadoop.hbase.util.Addressing;
-094import 
org.apache.hadoop.hbase.util.Bytes;
-095import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-096import 
org.apache.hadoop.hbase.util.ForeignExceptionUtil;
-097import 
org.apache.hadoop.hbase.util.Pair;
-098import 
org.apache.hadoop.ipc.RemoteException;
-099import 
org.apache.hadoop.util.StringUtils;
-100import 
org.apache.yetus.audience.InterfaceAudience;
-101import 
org.apache.yetus.audience.InterfaceStability;
-102import org.slf4j.Logger;
-103import org.slf4j.LoggerFactory;
-104
-105import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
-106import 

[18/51] [partial] hbase-site git commit: Published site at .

2017-12-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c4b2cc17/apidocs/src-html/org/apache/hadoop/hbase/util/RegionMover.html
--
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/util/RegionMover.html 
b/apidocs/src-html/org/apache/hadoop/hbase/util/RegionMover.html
index cc11564..1143409 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/util/RegionMover.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/util/RegionMover.html
@@ -49,968 +49,967 @@
 041import java.util.concurrent.Future;
 042import java.util.concurrent.TimeUnit;
 043import 
java.util.concurrent.TimeoutException;
-044
-045import 
org.apache.commons.cli.CommandLine;
-046import 
org.apache.hadoop.conf.Configuration;
-047import 
org.apache.hadoop.hbase.ClusterStatus.Option;
-048import 
org.apache.hadoop.hbase.HBaseConfiguration;
-049import 
org.apache.hadoop.hbase.HConstants;
-050import 
org.apache.hadoop.hbase.ServerName;
-051import 
org.apache.hadoop.hbase.TableName;
-052import 
org.apache.hadoop.hbase.client.Admin;
-053import 
org.apache.hadoop.hbase.client.Connection;
-054import 
org.apache.hadoop.hbase.client.ConnectionFactory;
-055import 
org.apache.hadoop.hbase.client.Get;
-056import 
org.apache.hadoop.hbase.client.RegionInfo;
-057import 
org.apache.hadoop.hbase.client.Result;
-058import 
org.apache.hadoop.hbase.client.ResultScanner;
-059import 
org.apache.hadoop.hbase.client.Scan;
-060import 
org.apache.hadoop.hbase.client.Table;
-061import 
org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter;
-062import 
org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-063import 
org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-064import 
org.apache.yetus.audience.InterfaceAudience;
-065import org.slf4j.Logger;
-066import org.slf4j.LoggerFactory;
-067
-068/**
-069 * Tool for loading/unloading regions 
to/from given regionserver This tool can be run from Command
-070 * line directly as a utility. Supports 
Ack/No Ack mode for loading/unloading operations.Ack mode
-071 * acknowledges if regions are online 
after movement while noAck mode is best effort mode that
-072 * improves performance but will still 
move on if region is stuck/not moved. Motivation behind noAck
-073 * mode being RS shutdown where even if a 
Region is stuck, upon shutdown master will move it
-074 * anyways. This can also be used by 
constructiong an Object using the builder and then calling
-075 * {@link #load()} or {@link #unload()} 
methods for the desired operations.
-076 */
-077@InterfaceAudience.Public
-078public class RegionMover extends 
AbstractHBaseTool {
-079  public static final String 
MOVE_RETRIES_MAX_KEY = "hbase.move.retries.max";
-080  public static final String 
MOVE_WAIT_MAX_KEY = "hbase.move.wait.max";
-081  public static final String 
SERVERSTART_WAIT_MAX_KEY = "hbase.serverstart.wait.max";
-082  public static final int 
DEFAULT_MOVE_RETRIES_MAX = 5;
-083  public static final int 
DEFAULT_MOVE_WAIT_MAX = 60;
-084  public static final int 
DEFAULT_SERVERSTART_WAIT_MAX = 180;
-085  static final Logger LOG = 
LoggerFactory.getLogger(RegionMover.class);
-086  private RegionMoverBuilder rmbuilder;
-087  private boolean ack = true;
-088  private int maxthreads = 1;
-089  private int timeout;
-090  private String loadUnload;
-091  private String hostname;
-092  private String filename;
-093  private String excludeFile;
-094  private int port;
-095
-096  private RegionMover(RegionMoverBuilder 
builder) {
-097this.hostname = builder.hostname;
-098this.filename = builder.filename;
-099this.excludeFile = 
builder.excludeFile;
-100this.maxthreads = 
builder.maxthreads;
-101this.ack = builder.ack;
-102this.port = builder.port;
-103this.timeout = builder.timeout;
-104  }
-105
-106  private RegionMover() {
-107  }
-108
-109  /**
-110   * Builder for Region mover. Use the 
{@link #build()} method to create RegionMover object. Has
-111   * {@link #filename(String)}, {@link 
#excludeFile(String)}, {@link #maxthreads(int)},
-112   * {@link #ack(boolean)}, {@link 
#timeout(int)} methods to set the corresponding options
-113   */
-114  public static class RegionMoverBuilder 
{
-115private boolean ack = true;
-116private int maxthreads = 1;
-117private int timeout = 
Integer.MAX_VALUE;
-118private String hostname;
-119private String filename;
-120private String excludeFile = null;
-121String defaultDir = 
System.getProperty("java.io.tmpdir");
-122
-123private int port = 
HConstants.DEFAULT_REGIONSERVER_PORT;
-124
-125/**
-126 * @param hostname Hostname to unload 
regions from or load regions to. Can be either hostname
-127 * or hostname:port.
-128 */
-129public RegionMoverBuilder(String 
hostname) {
-130  String[] splitHostname = 
hostname.toLowerCase().split(":");
-131  this.hostname = splitHostname[0];
-132  if (splitHostname.length == 2) {
-133this.port = 
Integer.parseInt(splitHostname[1]);
-134  }

[18/51] [partial] hbase-site git commit: Published site at .

2017-12-21 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/505bbb2e/checkstyle.rss
--
diff --git a/checkstyle.rss b/checkstyle.rss
index b06fe09..e8c7328 100644
--- a/checkstyle.rss
+++ b/checkstyle.rss
@@ -25,8 +25,8 @@ under the License.
 en-us
 2007 - 2017 The Apache Software Foundation
 
-  File: 3457,
- Errors: 19536,
+  File: 3458,
+ Errors: 19378,
  Warnings: 0,
  Infos: 0
   
@@ -167,7 +167,7 @@ under the License.
   0
 
 
-  2
+  0
 
   
   
@@ -1525,7 +1525,7 @@ under the License.
   0
 
 
-  1
+  0
 
   
   
@@ -2897,7 +2897,7 @@ under the License.
   0
 
 
-  3
+  2
 
   
   
@@ -4199,7 +4199,7 @@ under the License.
   0
 
 
-  4
+  3
 
   
   
@@ -5151,7 +5151,7 @@ under the License.
   0
 
 
-  4
+  0
 
   
   
@@ -5277,7 +5277,7 @@ under the License.
   0
 
 
-  1
+  0
 
   
   
@@ -5366,6 +5366,20 @@ under the License.
   
   
 
+  http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.log.HBaseMarkers.java;>org/apache/hadoop/hbase/log/HBaseMarkers.java
+
+
+  0
+
+
+  0
+
+
+  1
+
+  
+  
+
   http://hbase.apache.org/checkstyle.html#org.apache.hadoop.hbase.RawCellBuilder.java;>org/apache/hadoop/hbase/RawCellBuilder.java
 
 
@@ -5627,7 +5641,7 @@ under the License.
   0
 
 
-  7
+  4
 
   
   
@@ -5767,7 +5781,7 @@ under the License.
   0
 
 
-  5
+  4
 
   
   
@@ -9379,7 +9393,7 @@ under the License.
   0
 
 
-  1
+  0
 
   
   
@@ -9519,7 +9533,7 @@ under the License.
   0
 
 
-  2
+  3
 
   
   
@@ -9589,7 +9603,7 @@ under the License.
   0
 
 
-  9
+  8
 
   
   
@@ -9715,7 +9729,7 @@ under the License.
   0
 
 
-  4
+  2
 
   
   
@@ -9771,7 +9785,7 @@ under the License.
   0
 
 
-  2
+  0
 
   
   
@@ -10527,7 +10541,7 @@ under the License.
   0
 
 
-  1
+  0
 
   
   
@@ -10807,7 +10821,7 @@ under the License.
   0
 
 
-  5
+  1
 
   
   
@@ -11521,7 +11535,7 @@ under the License.
   0
 
 
-  4
+  0
 
   
   
@@ -12151,7 +12165,7 @@ under the License.
   0
 
 
-  3
+  1
 
   
   
@@ -12361,7 +12375,7 @@ under the License.
   0
 
 
-  22
+  21
 
   
   
@@ -12837,7 +12851,7 @@ under the License.
   0
 
 
-  22
+

[18/51] [partial] hbase-site git commit: Published site at .

2017-12-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c4c0cfa5/apidocs/src-html/org/apache/hadoop/hbase/filter/PrefixFilter.html
--
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/filter/PrefixFilter.html 
b/apidocs/src-html/org/apache/hadoop/hbase/filter/PrefixFilter.html
index 00cb205..c585e32 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/filter/PrefixFilter.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/filter/PrefixFilter.html
@@ -58,107 +58,113 @@
 050return prefix;
 051  }
 052
-053  public boolean filterRowKey(Cell 
firstRowCell) {
-054if (firstRowCell == null || 
this.prefix == null)
-055  return true;
-056if (filterAllRemaining()) return 
true;
-057int length = 
firstRowCell.getRowLength();
-058if (length  prefix.length) return 
true;
-059// if they are equal, return false 
= pass row
-060// else return true, filter row
-061// if we are passed the prefix, set 
flag
-062int cmp;
-063if (firstRowCell instanceof 
ByteBufferCell) {
-064  cmp = 
ByteBufferUtils.compareTo(((ByteBufferCell) firstRowCell).getRowByteBuffer(),
-065  ((ByteBufferCell) 
firstRowCell).getRowPosition(), this.prefix.length,
-066  this.prefix, 0, 
this.prefix.length);
-067} else {
-068  cmp = 
Bytes.compareTo(firstRowCell.getRowArray(), firstRowCell.getRowOffset(),
-069  this.prefix.length, 
this.prefix, 0, this.prefix.length);
-070}
-071if ((!isReversed()  cmp 
 0) || (isReversed()  cmp  0)) {
-072  passedPrefix = true;
-073}
-074filterRow = (cmp != 0);
-075return filterRow;
-076  }
-077
-078  @Deprecated
-079  @Override
-080  public ReturnCode filterKeyValue(final 
Cell c) {
-081return filterCell(c);
-082  }
-083
-084  @Override
-085  public ReturnCode filterCell(final Cell 
c) {
-086if (filterRow) return 
ReturnCode.NEXT_ROW;
-087return ReturnCode.INCLUDE;
-088  }
-089
-090  public boolean filterRow() {
-091return filterRow;
-092  }
-093
-094  public void reset() {
-095filterRow = true;
-096  }
-097
-098  public boolean filterAllRemaining() {
-099return passedPrefix;
-100  }
-101
-102  public static Filter 
createFilterFromArguments(ArrayListbyte [] filterArguments) {
-103
Preconditions.checkArgument(filterArguments.size() == 1,
-104"Expected 
1 but got: %s", filterArguments.size());
-105byte [] prefix = 
ParseFilter.removeQuotesFromByteArray(filterArguments.get(0));
-106return new PrefixFilter(prefix);
-107  }
-108
-109  /**
-110   * @return The filter serialized using 
pb
-111   */
-112  public byte [] toByteArray() {
-113FilterProtos.PrefixFilter.Builder 
builder =
-114  
FilterProtos.PrefixFilter.newBuilder();
-115if (this.prefix != null) 
builder.setPrefix(UnsafeByteOperations.unsafeWrap(this.prefix));
-116return 
builder.build().toByteArray();
-117  }
-118
-119  /**
-120   * @param pbBytes A pb serialized 
{@link PrefixFilter} instance
-121   * @return An instance of {@link 
PrefixFilter} made from codebytes/code
-122   * @throws 
org.apache.hadoop.hbase.exceptions.DeserializationException
-123   * @see #toByteArray
-124   */
-125  public static PrefixFilter 
parseFrom(final byte [] pbBytes)
-126  throws DeserializationException {
-127FilterProtos.PrefixFilter proto;
-128try {
-129  proto = 
FilterProtos.PrefixFilter.parseFrom(pbBytes);
-130} catch 
(InvalidProtocolBufferException e) {
-131  throw new 
DeserializationException(e);
-132}
-133return new 
PrefixFilter(proto.hasPrefix()?proto.getPrefix().toByteArray():null);
-134  }
-135
-136  /**
-137   * @param o the other filter to compare 
with
-138   * @return true if and only if the 
fields of the filter that are serialized
-139   * are equal to the corresponding 
fields in other.  Used for testing.
-140   */
-141  boolean areSerializedFieldsEqual(Filter 
o) {
-142if (o == this) return true;
-143if (!(o instanceof PrefixFilter)) 
return false;
-144
-145PrefixFilter other = 
(PrefixFilter)o;
-146return Bytes.equals(this.getPrefix(), 
other.getPrefix());
-147  }
-148
-149  @Override
-150  public String toString() {
-151return 
this.getClass().getSimpleName() + " " + Bytes.toStringBinary(this.prefix);
-152  }
-153}
+053  @Override
+054  public boolean filterRowKey(Cell 
firstRowCell) {
+055if (firstRowCell == null || 
this.prefix == null)
+056  return true;
+057if (filterAllRemaining()) return 
true;
+058int length = 
firstRowCell.getRowLength();
+059if (length  prefix.length) return 
true;
+060// if they are equal, return false 
= pass row
+061// else return true, filter row
+062// if we are passed the prefix, set 
flag
+063int cmp;
+064if (firstRowCell instanceof 
ByteBufferCell) {
+065  cmp = 
ByteBufferUtils.compareTo(((ByteBufferCell) firstRowCell).getRowByteBuffer(),
+066  ((ByteBufferCell) 

[18/51] [partial] hbase-site git commit: Published site at .

2017-12-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/071f974b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.Visitor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.Visitor.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.Visitor.html
index f1a2443..a469e93 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.Visitor.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.Visitor.html
@@ -1350,415 +1350,415 @@
 1342return delete;
 1343  }
 1344
-1345  public static Put 
makeBarrierPut(byte[] encodedRegionName, long seq, byte[] tableName) {
-1346byte[] seqBytes = 
Bytes.toBytes(seq);
-1347return new Put(encodedRegionName)
-1348
.addImmutable(HConstants.REPLICATION_BARRIER_FAMILY, seqBytes, seqBytes)
-1349
.addImmutable(HConstants.REPLICATION_META_FAMILY, tableNameCq, tableName);
-1350  }
-1351
-1352
-1353  public static Put 
makeDaughterPut(byte[] encodedRegionName, byte[] value) {
-1354return new 
Put(encodedRegionName).addImmutable(HConstants.REPLICATION_META_FAMILY,
-1355daughterNameCq, value);
-1356  }
-1357
-1358  public static Put makeParentPut(byte[] 
encodedRegionName, byte[] value) {
-1359return new 
Put(encodedRegionName).addImmutable(HConstants.REPLICATION_META_FAMILY,
-1360parentNameCq, value);
-1361  }
-1362
-1363  /**
-1364   * Adds split daughters to the Put
-1365   */
-1366  public static Put 
addDaughtersToPut(Put put, RegionInfo splitA, RegionInfo splitB) {
-1367if (splitA != null) {
-1368  put.addImmutable(
-1369HConstants.CATALOG_FAMILY, 
HConstants.SPLITA_QUALIFIER, RegionInfo.toByteArray(splitA));
-1370}
-1371if (splitB != null) {
-1372  put.addImmutable(
-1373HConstants.CATALOG_FAMILY, 
HConstants.SPLITB_QUALIFIER, RegionInfo.toByteArray(splitB));
-1374}
-1375return put;
-1376  }
-1377
-1378  /**
-1379   * Put the passed 
codeputs/code to the codehbase:meta/code 
table.
-1380   * Non-atomic for multi puts.
-1381   * @param connection connection we're 
using
-1382   * @param puts Put to add to 
hbase:meta
-1383   * @throws IOException
-1384   */
-1385  public static void 
putToMetaTable(final Connection connection, final Put... puts)
-1386throws IOException {
-1387put(getMetaHTable(connection), 
Arrays.asList(puts));
-1388  }
-1389
-1390  /**
-1391   * @param t Table to use (will be 
closed when done).
-1392   * @param puts puts to make
-1393   * @throws IOException
-1394   */
-1395  private static void put(final Table t, 
final ListPut puts) throws IOException {
-1396try {
-1397  if (METALOG.isDebugEnabled()) {
-1398
METALOG.debug(mutationsToString(puts));
-1399  }
-1400  t.put(puts);
-1401} finally {
-1402  t.close();
-1403}
-1404  }
-1405
-1406  /**
-1407   * Put the passed 
codeps/code to the codehbase:meta/code table.
-1408   * @param connection connection we're 
using
-1409   * @param ps Put to add to 
hbase:meta
-1410   * @throws IOException
-1411   */
-1412  public static void 
putsToMetaTable(final Connection connection, final ListPut ps)
-1413throws IOException {
-1414Table t = 
getMetaHTable(connection);
-1415try {
-1416  if (METALOG.isDebugEnabled()) {
-1417
METALOG.debug(mutationsToString(ps));
-1418  }
-1419  t.put(ps);
-1420} finally {
-1421  t.close();
-1422}
-1423  }
-1424
-1425  /**
-1426   * Delete the passed 
coded/code from the codehbase:meta/code 
table.
-1427   * @param connection connection we're 
using
-1428   * @param d Delete to add to 
hbase:meta
-1429   * @throws IOException
-1430   */
-1431  static void deleteFromMetaTable(final 
Connection connection, final Delete d)
-1432throws IOException {
-1433ListDelete dels = new 
ArrayList(1);
-1434dels.add(d);
-1435deleteFromMetaTable(connection, 
dels);
-1436  }
-1437
-1438  /**
-1439   * Delete the passed 
codedeletes/code from the codehbase:meta/code 
table.
-1440   * @param connection connection we're 
using
-1441   * @param deletes Deletes to add to 
hbase:meta  This list should support #remove.
-1442   * @throws IOException
-1443   */
-1444  public static void 
deleteFromMetaTable(final Connection connection, final ListDelete 
deletes)
-1445throws IOException {
-1446Table t = 
getMetaHTable(connection);
-1447try {
-1448  if (METALOG.isDebugEnabled()) {
-1449
METALOG.debug(mutationsToString(deletes));
-1450  }
-1451  t.delete(deletes);
-1452} finally {
-1453  t.close();
-1454}
-1455  }
-1456
-1457  /**
-1458   * Deletes some replica columns 
corresponding to replicas for the passed rows
-1459   * @param metaRows rows in 
hbase:meta
-1460   * @param replicaIndexToDeleteFrom the 
replica ID we would start deleting from
-1461   * @param numReplicasToRemove how many 
replicas to remove
-1462   * 

[18/51] [partial] hbase-site git commit: Published site at .

2017-12-14 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dc4e5c85/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.html
index 7c59e27..c904c56 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.html
@@ -119,4048 +119,4054 @@
 111import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
 112import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest;
 113import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest;
-114import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-115import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-116import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
-117import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
-118import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest;
-119import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse;
-120import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest;
-121import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
-122import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
-123import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceRequest;
-124import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceResponse;
-125import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
-126import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-127import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-128import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-129import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos;
-130import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-131import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-132import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-133import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-134import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-135import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersRequest;
-136import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-137import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
-138import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest;
-139import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse;
-140import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
-141import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
-142import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
-143import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse;
-144import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
-145import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableRequest;
-146import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableResponse;
-147import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest;
-148import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse;
-149import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
-150import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse;
-151import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest;
-152import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse;
-153import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest;
-154import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
-155import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetLocksRequest;
-156import 

[18/51] [partial] hbase-site git commit: Published site at .

2017-12-13 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4abd958d/devapidocs/org/apache/hadoop/hbase/client/package-summary.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/package-summary.html 
b/devapidocs/org/apache/hadoop/hbase/client/package-summary.html
index 13eae48..eb95695 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/package-summary.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/package-summary.html
@@ -407,12 +407,18 @@
 
 
 
+Table.CheckAndMutateBuilder
+
+A helper class for sending checkAndMutate request.
+
+
+
 TableBuilder
 
 For creating Table 
instance.
 
 
-
+
 TableDescriptor
 
 TableDescriptor contains the details about an HBase table 
such as the descriptors of
@@ -421,7 +427,7 @@
  when the region split should occur, coprocessors associated with it 
etc...
 
 
-
+
 ZKAsyncRegistry.ConverterT
 
 
@@ -943,203 +949,203 @@
 An implementation of Table.
 
 
-
+
 HTableMultiplexer
 
 HTableMultiplexer provides a thread-safe non blocking PUT 
API across all the tables.
 
 
-
+
 HTableMultiplexer.AtomicAverageCounter
 
 Helper to count the average over an interval until 
reset.
 
 
-
+
 HTableMultiplexer.FlushWorker
 
 
-
+
 HTableMultiplexer.HTableMultiplexerStatus
 
 HTableMultiplexerStatus keeps track of the current status 
of the HTableMultiplexer.
 
 
-
+
 HTableMultiplexer.PutStatus
 
 
-
+
 ImmutableHColumnDescriptor
 Deprecated
 
-
+
 ImmutableHRegionInfo
 Deprecated
 
-
+
 ImmutableHTableDescriptor
 Deprecated
 
-
+
 Increment
 
 Used to perform Increment operations on a single row.
 
 
-
+
 MasterCallableV
 
 A RetryingCallable for Master RPC operations.
 
 
-
+
 MasterCoprocessorRpcChannelImpl
 
 The implementation of a master based coprocessor rpc 
channel.
 
 
-
+
 MetaCache
 
 A cache implementation for region locations from meta.
 
 
-
+
 MetricsConnection
 
 This class is for maintaining the various connection 
statistics and publishing them through
  the metrics interfaces.
 
 
-
+
 MetricsConnection.CallStats
 
 A container class for collecting details about the RPC call 
as it percolates.
 
 
-
+
 MetricsConnection.CallTracker
 
 
-
+
 MetricsConnection.RegionStats
 
 
-
+
 MetricsConnection.RunnerStats
 
 
-
+
 MultiAction
 
 Container for Actions (i.e.
 
 
-
+
 MultiResponse
 
 A container for Result objects, grouped by regionName.
 
 
-
+
 MultiResponse.RegionResult
 
 
-
+
 MultiServerCallable
 
 Callable that handles the multi method call 
going against a single
  regionserver; i.e.
 
 
-
+
 Mutation
 
 
-
+
 NoncedRegionServerCallableT
 
 Implementations make an rpc call against a RegionService 
via a protobuf Service.
 
 
-
+
 NoOpRetryableCallerInterceptor
 
 Class that acts as a NoOpInterceptor.
 
 
-
+
 NoOpRetryingInterceptorContext
 
 
-
+
 Operation
 
 Superclass for any type that maps to a potentially 
application-level query.
 
 
-
+
 OperationWithAttributes
 
 
-
+
 PackagePrivateFieldAccessor
 
 A helper class used to access the package private field in 
o.a.h.h.client package.
 
 
-
+
 PerClientRandomNonceGenerator
 
 NonceGenerator implementation that uses client ID hash + 
random int as nonce group, and random
  numbers as nonces.
 
 
-
+
 PreemptiveFastFailInterceptor
 
 The concrete RetryingCallerInterceptor 
class that implements the preemptive fast fail
  feature.
 
 
-
+
 Put
 
 Used to perform Put operations for a single row.
 
 
-
+
 Query
 
 Base class for HBase read operations; e.g.
 
 
-
+
 QuotaStatusCalls
 
 Client class to wrap RPCs to HBase servers for space quota 
status information.
 
 
-
+
 RawAsyncHBaseAdmin
 
 The implementation of AsyncAdmin.
 
 
-
+
 RawAsyncTableImpl
 
 The implementation of RawAsyncTable.
 
 
-
+
 RegionAdminServiceCallableT
 
 Similar to RegionServerCallable but for the AdminService 
interface.
 
 
-
+
 RegionCoprocessorRpcChannel
 
 Provides clients with an RPC connection to call Coprocessor 
Endpoint
@@ -1147,103 +1153,103 @@
  against a given table region.
 
 
-
+
 RegionCoprocessorRpcChannelImpl
 
 The implementation of a region based coprocessor rpc 
channel.
 
 
-
+
 RegionCoprocessorServiceExec
 
 Represents a coprocessor service method execution against a 
single region.
 
 
-
+
 RegionInfoBuilder
 
 
-
+
 RegionInfoBuilder.MutableRegionInfo
 
 An implementation of RegionInfo that adds mutable methods 
so can build a RegionInfo instance.
 
 
-
+
 RegionInfoDisplay
 
 Utility used composing RegionInfo for 'display'; e.g.
 
 
-
+
 RegionLoadStats
 
 POJO representing region server load
 
 
-
+
 RegionReplicaUtil
 
 Utility methods which contain the logic for regions and 
replicas.
 
 
-
+
 RegionServerCallableT,S
 
 Implementations make a RPC call against a RegionService via 
a protobuf Service.
 
 
-
+
 RegionServerCoprocessorRpcChannelImpl
 
 The implementation of a region server based coprocessor rpc 
channel.
 
 
-
+
 RequestControllerFactory
 
 A factory class that constructs an RequestController.
 
 
-
+
 

[18/51] [partial] hbase-site git commit: Published site at .

2017-12-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e23b49ba/devapidocs/org/apache/hadoop/hbase/RawCell.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/RawCell.html 
b/devapidocs/org/apache/hadoop/hbase/RawCell.html
index b7bbbc8..a6ac9dc 100644
--- a/devapidocs/org/apache/hadoop/hbase/RawCell.html
+++ b/devapidocs/org/apache/hadoop/hbase/RawCell.html
@@ -109,7 +109,7 @@ var activeTableTab = "activeTableTab";
 
 
 All Known Implementing Classes:
-BufferedDataBlockEncoder.OffheapDecodedCell,
 BufferedDataBlockEncoder.OnheapDecodedCell,
 ByteBufferChunkCell, 
ByteBufferKeyValue, IndividualBytesFieldCell, KeyValue, KeyValue.KeyOnlyKeyValue, MapReduceCell, NoTagByteBufferChunkCell, NoTagsByteBufferKeyValue, NoTagsKeyValue, PrivateCellUtil.TagRewriteByteBufferCell, PrivateCellUtil.TagRewriteCell, PrivateCellUtil.ValueAndTagRewriteByteBufferCell, 
PrivateCellUtil.ValueAndTagRewriteCell, SizeCachedKeyValue, SizeCachedNoTagsKeyValue
+BufferedDataBlockEncoder.OffheapDecodedCell,
 BufferedDataBlockEncoder.OnheapDecodedCell,
 ByteBufferChunkCell, 
ByteBufferKeyValue, IndividualBytesFieldCell, KeyValue, KeyValue.KeyOnlyKeyValue, MapReduceCell, NoTagByteBufferChunkCell, NoTagsByteBufferKeyValue, NoTagsKeyValue, PrivateCellUtil.EmptyByteBufferCell, PrivateCellUtil.EmptyCell, PrivateCellUtil.FirstOnRowByteBufferCell, PrivateCellUtil.FirstOnRowCell, PrivateCellUtil.FirstOnRowColByteBufferCell, PrivateCellUtil.FirstOnRowColCell, PrivateCellUtil.FirstOnRowColTSByteBufferCell, PrivateCellUtil.FirstOnRowColTSCell, PrivateCellUtil.FirstOnRowDeleteFamilyCell, PrivateCellUtil.LastOnRowByteBufferCell, PrivateCellUtil.LastOnRowCell, PrivateCellUtil.LastOnRowColByteBufferCell, PrivateCellUtil.LastOnRowColCell, 
PrivateCellUtil.TagRewriteByteBufferCell, PrivateCellUtil.TagRewriteCell, PrivateCellUtil.ValueAndTagRewriteByteBufferCell, 
PrivateCellUtil.ValueAndTagRewriteCell, SizeCachedKeyValue, SizeCachedNoTagsKeyValue
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e23b49ba/devapidocs/org/apache/hadoop/hbase/ServiceNotRunningException.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/ServiceNotRunningException.html 
b/devapidocs/org/apache/hadoop/hbase/ServiceNotRunningException.html
index c0390a3..1921bf6 100644
--- a/devapidocs/org/apache/hadoop/hbase/ServiceNotRunningException.html
+++ b/devapidocs/org/apache/hadoop/hbase/ServiceNotRunningException.html
@@ -44,7 +44,7 @@
 
 
 PrevClass
-NextClass
+NextClass
 
 
 Frames
@@ -267,7 +267,7 @@ extends 
 
 PrevClass
-NextClass
+NextClass
 
 
 Frames

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e23b49ba/devapidocs/org/apache/hadoop/hbase/SettableSequenceId.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/SettableSequenceId.html 
b/devapidocs/org/apache/hadoop/hbase/SettableSequenceId.html
deleted file mode 100644
index 64513b9..000
--- a/devapidocs/org/apache/hadoop/hbase/SettableSequenceId.html
+++ /dev/null
@@ -1,249 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd;>
-
-
-
-
-
-SettableSequenceId (Apache HBase 3.0.0-SNAPSHOT API)
-
-
-
-
-
-var methods = {"i0":38};
-var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],4:["t3","Abstract Methods"],32:["t6","Deprecated Methods"]};
-var altColor = "altColor";
-var rowColor = "rowColor";
-var tableTab = "tableTab";
-var activeTableTab = "activeTableTab";
-
-
-JavaScript is disabled on your browser.
-
-
-
-
-
-Skip navigation links
-
-
-
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
-
-
-
-
-PrevClass
-NextClass
-
-
-Frames
-NoFrames
-
-
-AllClasses
-
-
-
-
-
-
-
-Summary:
-Nested|
-Field|
-Constr|
-Method
-
-
-Detail:
-Field|
-Constr|
-Method
-
-
-
-
-
-
-
-
-org.apache.hadoop.hbase
-Interface 
SettableSequenceId
-
-
-
-
-
-
-All Known Subinterfaces:
-ExtendedCell
-
-
-All Known Implementing Classes:
-BufferedDataBlockEncoder.OffheapDecodedCell,
 BufferedDataBlockEncoder.OnheapDecodedCell,
 ByteBufferChunkCell, 
ByteBufferKeyValue, IndividualBytesFieldCell, KeyValue, KeyValue.KeyOnlyKeyValue, MapReduceCell, NoTagByteBufferChunkCell, NoTagsByteBufferKeyValue, NoTagsKeyValue, PrivateCellUtil.EmptyByteBufferCell, PrivateCellUtil.EmptyCell, PrivateCellUtil.FirstOnRowByteBufferCell, PrivateCellUtil.FirstOnRowCell, PrivateCellUtil.FirstOnRowColByteBufferCell, PrivateCellUtil.FirstOnRowColCell, 

[18/51] [partial] hbase-site git commit: Published site at .

2017-12-06 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d171b896/export_control.html
--
diff --git a/export_control.html b/export_control.html
index c3e1d56..e4942e0 100644
--- a/export_control.html
+++ b/export_control.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  
   Export Control
@@ -336,7 +336,7 @@ for more details.
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-12-05
+  Last Published: 
2017-12-06
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d171b896/hbase-annotations/checkstyle.html
--
diff --git a/hbase-annotations/checkstyle.html 
b/hbase-annotations/checkstyle.html
index f1b188d..af35154 100644
--- a/hbase-annotations/checkstyle.html
+++ b/hbase-annotations/checkstyle.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Annotations  Checkstyle Results
 
@@ -178,7 +178,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-12-05
+  Last Published: 
2017-12-06
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d171b896/hbase-annotations/dependencies.html
--
diff --git a/hbase-annotations/dependencies.html 
b/hbase-annotations/dependencies.html
index faa7977..23b379a 100644
--- a/hbase-annotations/dependencies.html
+++ b/hbase-annotations/dependencies.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Annotations  Project Dependencies
 
@@ -272,7 +272,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-12-05
+  Last Published: 
2017-12-06
 
 
 



[18/51] [partial] hbase-site git commit: Published site at .

2017-12-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c54c242b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
index 3edfbef..9707b2c 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
@@ -2459,5936 +2459,5935 @@
 2451  }
 2452
 2453  for (HStore s : storesToFlush) {
-2454MemStoreSize flushableSize = 
s.getFlushableSize();
-2455
totalSizeOfFlushableStores.incMemStoreSize(flushableSize);
-2456
storeFlushCtxs.put(s.getColumnFamilyDescriptor().getName(),
-2457  
s.createFlushContext(flushOpSeqId, tracker));
-2458// for writing stores to WAL
-2459
committedFiles.put(s.getColumnFamilyDescriptor().getName(), null);
-2460
storeFlushableSize.put(s.getColumnFamilyDescriptor().getName(), 
flushableSize);
-2461  }
-2462
-2463  // write the snapshot start to 
WAL
-2464  if (wal != null  
!writestate.readOnly) {
-2465FlushDescriptor desc = 
ProtobufUtil.toFlushDescriptor(FlushAction.START_FLUSH,
-2466getRegionInfo(), 
flushOpSeqId, committedFiles);
-2467// No sync. Sync is below where 
no updates lock and we do FlushAction.COMMIT_FLUSH
-2468WALUtil.writeFlushMarker(wal, 
this.getReplicationScope(), getRegionInfo(), desc, false,
-2469mvcc);
-2470  }
-2471
-2472  // Prepare flush (take a 
snapshot)
-2473  for (StoreFlushContext flush : 
storeFlushCtxs.values()) {
-2474flush.prepare();
-2475  }
-2476} catch (IOException ex) {
-2477  doAbortFlushToWAL(wal, 
flushOpSeqId, committedFiles);
-2478  throw ex;
-2479} finally {
-2480  
this.updatesLock.writeLock().unlock();
-2481}
-2482String s = "Finished memstore 
snapshotting " + this + ", syncing WAL and waiting on mvcc, " +
-2483"flushsize=" + 
totalSizeOfFlushableStores;
-2484status.setStatus(s);
-2485doSyncOfUnflushedWALChanges(wal, 
getRegionInfo());
-2486return new 
PrepareFlushResult(storeFlushCtxs, committedFiles, storeFlushableSize, 
startTime,
-2487flushOpSeqId, flushedSeqId, 
totalSizeOfFlushableStores);
-2488  }
-2489
-2490  /**
-2491   * Utility method broken out of 
internalPrepareFlushCache so that method is smaller.
-2492   */
-2493  private void 
logFatLineOnFlush(CollectionHStore storesToFlush, long sequenceId) {
-2494if (!LOG.isInfoEnabled()) {
-2495  return;
-2496}
-2497// Log a fat line detailing what is 
being flushed.
-2498StringBuilder perCfExtras = null;
-2499if (!isAllFamilies(storesToFlush)) 
{
-2500  perCfExtras = new 
StringBuilder();
-2501  for (HStore store: storesToFlush) 
{
-2502perCfExtras.append("; 
").append(store.getColumnFamilyName());
-2503perCfExtras.append("=")
-2504
.append(StringUtils.byteDesc(store.getFlushableSize().getDataSize()));
-2505  }
-2506}
-2507LOG.info("Flushing " + + 
storesToFlush.size() + "/" + stores.size() +
-2508" column families, memstore=" + 
StringUtils.byteDesc(this.memstoreDataSize.get()) +
-2509((perCfExtras != null  
perCfExtras.length()  0)? perCfExtras.toString(): "") +
-2510((wal != null) ? "" : "; WAL is 
null, using passed sequenceid=" + sequenceId));
-2511  }
-2512
-2513  private void doAbortFlushToWAL(final 
WAL wal, final long flushOpSeqId,
-2514  final Mapbyte[], 
ListPath committedFiles) {
-2515if (wal == null) return;
-2516try {
-2517  FlushDescriptor desc = 
ProtobufUtil.toFlushDescriptor(FlushAction.ABORT_FLUSH,
-2518  getRegionInfo(), flushOpSeqId, 
committedFiles);
-2519  WALUtil.writeFlushMarker(wal, 
this.getReplicationScope(), getRegionInfo(), desc, false,
-2520  mvcc);
-2521} catch (Throwable t) {
-2522  LOG.warn("Received unexpected 
exception trying to write ABORT_FLUSH marker to WAL:" +
-2523  
StringUtils.stringifyException(t));
-2524  // ignore this since we will be 
aborting the RS with DSE.
-2525}
-2526// we have called 
wal.startCacheFlush(), now we have to abort it
-2527
wal.abortCacheFlush(this.getRegionInfo().getEncodedNameAsBytes());
-2528  }
-2529
-2530  /**
-2531   * Sync unflushed WAL changes. See 
HBASE-8208 for details
-2532   */
-2533  private static void 
doSyncOfUnflushedWALChanges(final WAL wal, final RegionInfo hri)
-2534  throws IOException {
-2535if (wal == null) {
-2536  return;
-2537}
-2538try {
-2539  wal.sync(); // ensure that flush 
marker is sync'ed
-2540} catch (IOException ioe) {
-2541  
wal.abortCacheFlush(hri.getEncodedNameAsBytes());
-2542  throw ioe;

[18/51] [partial] hbase-site git commit: Published site at .

2017-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.AckHandler.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.AckHandler.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.AckHandler.html
index de6cb11..dd54dd2 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.AckHandler.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.AckHandler.html
@@ -25,563 +25,558 @@
 017 */
 018package 
org.apache.hadoop.hbase.io.asyncfs;
 019
-020import static 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleState.READER_IDLE;
-021import static 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleState.WRITER_IDLE;
-022import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.HEART_BEAT_SEQNO;
-023import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.READ_TIMEOUT;
-024import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile;
-025import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.endFileLease;
-026import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.getStatus;
+020import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.HEART_BEAT_SEQNO;
+021import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.READ_TIMEOUT;
+022import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile;
+023import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.endFileLease;
+024import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.getStatus;
+025import static 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleState.READER_IDLE;
+026import static 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleState.WRITER_IDLE;
 027import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY;
 028
-029
-030import 
org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBuf;
-031import 
org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBufAllocator;
-032import 
org.apache.hadoop.hbase.shaded.io.netty.channel.Channel;
-033import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandler.Sharable;
-034import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandlerContext;
-035import 
org.apache.hadoop.hbase.shaded.io.netty.channel.EventLoop;
-036import 
org.apache.hadoop.hbase.shaded.io.netty.channel.SimpleChannelInboundHandler;
-037import 
org.apache.hadoop.hbase.shaded.io.netty.handler.codec.protobuf.ProtobufDecoder;
-038import 
org.apache.hadoop.hbase.shaded.io.netty.handler.codec.protobuf.ProtobufVarint32FrameDecoder;
-039import 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleStateEvent;
-040import 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleStateHandler;
-041import 
org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.Future;
-042import 
org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.Promise;
-043import 
org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.PromiseCombiner;
-044
-045import java.io.IOException;
-046import java.nio.ByteBuffer;
-047import java.util.ArrayDeque;
-048import java.util.Collection;
-049import java.util.Collections;
-050import java.util.Deque;
-051import java.util.IdentityHashMap;
-052import java.util.List;
-053import java.util.Set;
-054import 
java.util.concurrent.CompletableFuture;
-055import java.util.concurrent.TimeUnit;
-056import java.util.function.Supplier;
-057
-058import 
org.apache.hadoop.conf.Configuration;
-059import 
org.apache.hadoop.crypto.Encryptor;
-060import org.apache.hadoop.fs.Path;
+029import java.io.IOException;
+030import java.io.InterruptedIOException;
+031import java.nio.ByteBuffer;
+032import java.util.Collection;
+033import java.util.Collections;
+034import java.util.Iterator;
+035import java.util.List;
+036import java.util.Set;
+037import 
java.util.concurrent.CompletableFuture;
+038import 
java.util.concurrent.ConcurrentHashMap;
+039import 
java.util.concurrent.ConcurrentLinkedDeque;
+040import 
java.util.concurrent.ExecutionException;
+041import java.util.concurrent.TimeUnit;
+042import java.util.function.Supplier;
+043
+044import 
org.apache.hadoop.conf.Configuration;
+045import 
org.apache.hadoop.crypto.Encryptor;
+046import org.apache.hadoop.fs.Path;
+047import 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.CancelOnClose;
+048import 
org.apache.hadoop.hbase.util.CancelableProgressable;
+049import 
org.apache.hadoop.hbase.util.FSUtils;
+050import 
org.apache.hadoop.hdfs.DFSClient;
+051import 

[18/51] [partial] hbase-site git commit: Published site at .

2017-11-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.html
index 8ba8dc9..f973938 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.html
@@ -37,36 +37,36 @@
 029import java.io.IOException;
 030import java.util.ArrayList;
 031import java.util.Collections;
-032import java.util.IdentityHashMap;
-033import java.util.List;
-034import java.util.Map;
-035import java.util.Optional;
-036import 
java.util.concurrent.CompletableFuture;
-037import 
java.util.concurrent.ConcurrentHashMap;
-038import 
java.util.concurrent.ConcurrentLinkedQueue;
-039import 
java.util.concurrent.ConcurrentMap;
-040import 
java.util.concurrent.ConcurrentSkipListMap;
-041import java.util.concurrent.TimeUnit;
-042import java.util.function.Supplier;
-043import java.util.stream.Collectors;
-044import java.util.stream.Stream;
-045
-046import org.apache.commons.logging.Log;
-047import 
org.apache.commons.logging.LogFactory;
-048import 
org.apache.hadoop.hbase.CellScannable;
-049import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-050import 
org.apache.hadoop.hbase.HRegionLocation;
-051import 
org.apache.hadoop.hbase.ServerName;
-052import 
org.apache.hadoop.hbase.TableName;
-053import 
org.apache.yetus.audience.InterfaceAudience;
-054import 
org.apache.hadoop.hbase.client.MultiResponse.RegionResult;
-055import 
org.apache.hadoop.hbase.client.RetriesExhaustedException.ThrowableWithExtraContext;
-056import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-057import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-058import 
org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
-059import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
-060import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService;
-061import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
+032import java.util.HashMap;
+033import java.util.IdentityHashMap;
+034import java.util.List;
+035import java.util.Map;
+036import java.util.Optional;
+037import 
java.util.concurrent.CompletableFuture;
+038import 
java.util.concurrent.ConcurrentHashMap;
+039import 
java.util.concurrent.ConcurrentLinkedQueue;
+040import 
java.util.concurrent.ConcurrentMap;
+041import 
java.util.concurrent.ConcurrentSkipListMap;
+042import java.util.concurrent.TimeUnit;
+043import java.util.function.Supplier;
+044import java.util.stream.Collectors;
+045import java.util.stream.Stream;
+046
+047import org.apache.commons.logging.Log;
+048import 
org.apache.commons.logging.LogFactory;
+049import 
org.apache.hadoop.hbase.CellScannable;
+050import 
org.apache.hadoop.hbase.DoNotRetryIOException;
+051import 
org.apache.hadoop.hbase.HRegionLocation;
+052import 
org.apache.hadoop.hbase.ServerName;
+053import 
org.apache.hadoop.hbase.TableName;
+054import 
org.apache.yetus.audience.InterfaceAudience;
+055import 
org.apache.hadoop.hbase.client.MultiResponse.RegionResult;
+056import 
org.apache.hadoop.hbase.client.RetriesExhaustedException.ThrowableWithExtraContext;
+057import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
+058import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
+059import 
org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
+060import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
+061import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService;
 062import 
org.apache.hadoop.hbase.util.Bytes;
 063import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 064
@@ -240,212 +240,208 @@
 232  }
 233
 234  private ClientProtos.MultiRequest 
buildReq(Mapbyte[], RegionRequest actionsByRegion,
-235  ListCellScannable cells) 
throws IOException {
+235  ListCellScannable cells, 
MapInteger, Integer rowMutationsIndexMap) throws IOException {
 236ClientProtos.MultiRequest.Builder 
multiRequestBuilder = ClientProtos.MultiRequest.newBuilder();
 237ClientProtos.RegionAction.Builder 
regionActionBuilder = ClientProtos.RegionAction.newBuilder();
 238ClientProtos.Action.Builder 
actionBuilder = ClientProtos.Action.newBuilder();
 239ClientProtos.MutationProto.Builder 
mutationBuilder = ClientProtos.MutationProto.newBuilder();
 240for (Map.Entrybyte[], 
RegionRequest entry : actionsByRegion.entrySet()) {
-241  // TODO: remove the extra for loop 
as we will iterate it in mutationBuilder.
-242  if 
(!multiRequestBuilder.hasNonceGroup()) {
-243for (Action action : 
entry.getValue().actions) {
-244  if (action.hasNonce()) {
-245 

[18/51] [partial] hbase-site git commit: Published site at .

2017-11-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.html
index 10da76f..432de3f 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.html
@@ -26,135 +26,109 @@
 018
 019package 
org.apache.hadoop.hbase.procedure2;
 020
-021import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
-022
+021import 
com.google.common.annotations.VisibleForTesting;
+022import java.util.Iterator;
 023import java.util.List;
 024import java.util.concurrent.TimeUnit;
-025
-026import 
org.apache.yetus.audience.InterfaceAudience;
-027
-028/**
-029 * Keep track of the runnable 
procedures
-030 */
-031@InterfaceAudience.Private
-032public interface ProcedureScheduler {
-033  /**
-034   * Start the scheduler
-035   */
-036  void start();
-037
-038  /**
-039   * Stop the scheduler
-040   */
-041  void stop();
-042
-043  /**
-044   * In case the class is blocking on 
poll() waiting for items to be added,
-045   * this method should awake poll() and 
poll() should return.
-046   */
-047  void signalAll();
-048
-049  /**
-050   * Inserts the specified element at the 
front of this queue.
-051   * @param proc the Procedure to add
-052   */
-053  void addFront(Procedure proc);
-054
-055  /**
-056   * Inserts the specified element at the 
end of this queue.
-057   * @param proc the Procedure to add
-058   */
-059  void addBack(Procedure proc);
-060
-061  /**
-062   * The procedure can't run at the 
moment.
-063   * add it back to the queue, giving 
priority to someone else.
-064   * @param proc the Procedure to add 
back to the list
-065   */
-066  void yield(Procedure proc);
-067
-068  /**
-069   * The procedure in execution 
completed.
-070   * This can be implemented to perform 
cleanups.
-071   * @param proc the Procedure that 
completed the execution.
-072   */
-073  void completionCleanup(Procedure 
proc);
-074
-075  /**
-076   * @return true if there are procedures 
available to process, otherwise false.
-077   */
-078  boolean hasRunnables();
-079
-080  /**
-081   * Fetch one Procedure from the queue
-082   * @return the Procedure to execute, or 
null if nothing present.
-083   */
-084  Procedure poll();
-085
-086  /**
-087   * Fetch one Procedure from the queue
-088   * @param timeout how long to wait 
before giving up, in units of unit
-089   * @param unit a TimeUnit determining 
how to interpret the timeout parameter
-090   * @return the Procedure to execute, or 
null if nothing present.
-091   */
-092  Procedure poll(long timeout, TimeUnit 
unit);
-093
-094  /**
-095   * Mark the event as not ready.
-096   * Procedures calling waitEvent() will 
be suspended.
-097   * @param event the event to mark as 
suspended/not ready
-098   */
-099  void suspendEvent(ProcedureEvent 
event);
-100
-101  /**
-102   * Wake every procedure waiting for the 
specified event
-103   * (By design each event has only one 
"wake" caller)
-104   * @param event the event to wait
-105   */
-106  void wakeEvent(ProcedureEvent event);
-107
-108  /**
-109   * Wake every procedure waiting for the 
specified events.
-110   * (By design each event has only one 
"wake" caller)
-111   * @param count the number of events in 
the array to wake
-112   * @param events the list of events to 
wake
-113   */
-114  void wakeEvents(int count, 
ProcedureEvent... events);
+025import 
org.apache.yetus.audience.InterfaceAudience;
+026
+027/**
+028 * Keep track of the runnable 
procedures
+029 */
+030@InterfaceAudience.Private
+031public interface ProcedureScheduler {
+032  /**
+033   * Start the scheduler
+034   */
+035  void start();
+036
+037  /**
+038   * Stop the scheduler
+039   */
+040  void stop();
+041
+042  /**
+043   * In case the class is blocking on 
poll() waiting for items to be added,
+044   * this method should awake poll() and 
poll() should return.
+045   */
+046  void signalAll();
+047
+048  /**
+049   * Inserts the specified element at the 
front of this queue.
+050   * @param proc the Procedure to add
+051   */
+052  void addFront(Procedure proc);
+053
+054  /**
+055   * Inserts all elements in the iterator 
at the front of this queue.
+056   */
+057  void addFront(IteratorProcedure 
procedureIterator);
+058
+059  /**
+060   * Inserts the specified element at the 
end of this queue.
+061   * @param proc the Procedure to add
+062   */
+063  void addBack(Procedure proc);
+064
+065  /**
+066   * The procedure can't run at the 
moment.
+067   * add it back to the queue, giving 
priority to someone else.
+068   * @param proc the Procedure to add 
back to the list
+069   */
+070  void 

[18/51] [partial] hbase-site git commit: Published site at .

2017-11-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b1eb7453/devapidocs/org/apache/hadoop/hbase/security/access/AccessController.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/security/access/AccessController.html 
b/devapidocs/org/apache/hadoop/hbase/security/access/AccessController.html
index 4b176d0..c2b4126 100644
--- a/devapidocs/org/apache/hadoop/hbase/security/access/AccessController.html
+++ b/devapidocs/org/apache/hadoop/hbase/security/access/AccessController.html
@@ -114,7 +114,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.LimitedPrivate(value="Configuration")
-public class AccessController
+public class AccessController
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements MasterCoprocessor, RegionCoprocessor, RegionServerCoprocessor, 
org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.AccessControlService.Interface,
 MasterObserver, 
RegionObserver, 
RegionServerObserver, EndpointObserver, 
BulkLoadObserver
 Provides basic authorization checks for data access and 
administrative
@@ -1492,7 +1492,7 @@ implements 
 
 LOG
-private static finalorg.apache.commons.logging.Log LOG
+private static finalorg.apache.commons.logging.Log LOG
 
 
 
@@ -1501,7 +1501,7 @@ implements 
 
 AUDITLOG
-private static finalorg.apache.commons.logging.Log AUDITLOG
+private static finalorg.apache.commons.logging.Log AUDITLOG
 
 
 
@@ -1510,7 +1510,7 @@ implements 
 
 CHECK_COVERING_PERM
-private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String CHECK_COVERING_PERM
+private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String CHECK_COVERING_PERM
 
 See Also:
 Constant
 Field Values
@@ -1523,7 +1523,7 @@ implements 
 
 TAG_CHECK_PASSED
-private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String TAG_CHECK_PASSED
+private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String TAG_CHECK_PASSED
 
 See Also:
 Constant
 Field Values
@@ -1536,7 +1536,7 @@ implements 
 
 TRUE
-private static finalbyte[] TRUE
+private static finalbyte[] TRUE
 
 
 
@@ -1545,7 +1545,7 @@ implements 
 
 authManager
-TableAuthManager 
authManager
+TableAuthManager 
authManager
 
 
 
@@ -1554,7 +1554,7 @@ implements 
 
 aclRegion
-boolean aclRegion
+boolean aclRegion
 flags if we are running on a region of the _acl_ table
 
 
@@ -1564,7 +1564,7 @@ implements 
 
 regionEnv
-privateRegionCoprocessorEnvironment regionEnv
+privateRegionCoprocessorEnvironment regionEnv
 defined only for Endpoint implementation, so it can have 
way to
access region services
 
@@ -1575,7 +1575,7 @@ implements 
 
 scannerOwners
-privatehttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapInternalScanner,http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String scannerOwners
+privatehttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapInternalScanner,http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String scannerOwners
 Mapping of scanner instances to the user who created 
them
 
 
@@ -1585,7 +1585,7 @@ implements 
 
 tableAcls
-privatehttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapTableName,http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListUserPermission tableAcls
+privatehttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapTableName,http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListUserPermission tableAcls
 
 
 
@@ -1594,7 +1594,7 @@ implements 
 
 userProvider
-privateUserProvider userProvider
+privateUserProvider userProvider
 Provider for mapping principal names to Users
 
 
@@ -1604,7 +1604,7 @@ implements 
 
 authorizationEnabled
-boolean authorizationEnabled
+boolean authorizationEnabled
 if we are active, usually true, only not true if 
"hbase.security.authorization"
has been set to false in site configuration
 
@@ -1615,7 +1615,7 @@ implements 
 
 cellFeaturesEnabled
-boolean cellFeaturesEnabled
+boolean cellFeaturesEnabled
 if we are able to support cell ACLs
 
 
@@ -1625,7 +1625,7 @@ implements 
 
 shouldCheckExecPermission
-boolean 

  1   2   3   >