[21/51] [partial] hbase-site git commit: Published site at 620d70d6186fb800299bcc62ad7179fccfd1be41.

2019-01-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aa3fb87f/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
index 68302bf..a5a8905 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
@@ -2197,1768 +2197,1775 @@
 2189  
warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
 2190}
 2191
-2192for (ColumnFamilyDescriptor hcd : 
htd.getColumnFamilies()) {
-2193  if (hcd.getTimeToLive() = 0) 
{
-2194String message = "TTL for column 
family " + hcd.getNameAsString() + " must be positive.";
-2195
warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
-2196  }
-2197
-2198  // check blockSize
-2199  if (hcd.getBlocksize()  1024 
|| hcd.getBlocksize()  16 * 1024 * 1024) {
-2200String message = "Block size for 
column family " + hcd.getNameAsString()
-2201+ "  must be between 1K and 
16MB.";
+2192// check that we have minimum 1 
region replicas
+2193int regionReplicas = 
htd.getRegionReplication();
+2194if (regionReplicas  1) {
+2195  String message = "Table region 
replication should be at least one.";
+2196  
warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
+2197}
+2198
+2199for (ColumnFamilyDescriptor hcd : 
htd.getColumnFamilies()) {
+2200  if (hcd.getTimeToLive() = 0) 
{
+2201String message = "TTL for column 
family " + hcd.getNameAsString() + " must be positive.";
 2202
warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
 2203  }
 2204
-2205  // check versions
-2206  if (hcd.getMinVersions()  0) 
{
-2207String message = "Min versions 
for column family " + hcd.getNameAsString()
-2208  + "  must be positive.";
+2205  // check blockSize
+2206  if (hcd.getBlocksize()  1024 
|| hcd.getBlocksize()  16 * 1024 * 1024) {
+2207String message = "Block size for 
column family " + hcd.getNameAsString()
+2208+ "  must be between 1K and 
16MB.";
 2209
warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
 2210  }
-2211  // max versions already being 
checked
-2212
-2213  // HBASE-13776 Setting illegal 
versions for ColumnFamilyDescriptor
-2214  //  does not throw 
IllegalArgumentException
-2215  // check minVersions = 
maxVerions
-2216  if (hcd.getMinVersions()  
hcd.getMaxVersions()) {
-2217String message = "Min versions 
for column family " + hcd.getNameAsString()
-2218+ " must be less than the 
Max versions.";
-2219
warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
-2220  }
-2221
-  // check replication scope
-2223  checkReplicationScope(hcd);
-2224  // check bloom filter type
-2225  checkBloomFilterType(hcd);
-2226
-2227  // check data replication factor, 
it can be 0(default value) when user has not explicitly
-2228  // set the value, in this case we 
use default replication factor set in the file system.
-2229  if (hcd.getDFSReplication()  
0) {
-2230String message = "HFile 
Replication for column family " + hcd.getNameAsString()
-2231+ "  must be greater than 
zero.";
-2232
warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
-2233  }
-2234
-2235  // TODO: should we check 
coprocessors and encryption ?
-2236}
-2237  }
-2238
-2239  private void 
checkReplicationScope(ColumnFamilyDescriptor hcd) throws IOException{
-2240// check replication scope
-2241WALProtos.ScopeType scop = 
WALProtos.ScopeType.valueOf(hcd.getScope());
-2242if (scop == null) {
-2243  String message = "Replication 
scope for column family "
-2244  + hcd.getNameAsString() + " is 
" + hcd.getScope() + " which is invalid.";
+2211
+2212  // check versions
+2213  if (hcd.getMinVersions()  0) 
{
+2214String message = "Min versions 
for column family " + hcd.getNameAsString()
+2215  + "  must be positive.";
+2216
warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
+2217  }
+2218  // max versions already being 
checked
+2219
+2220  // HBASE-13776 Setting illegal 
versions for ColumnFamilyDescriptor
+2221  //  does not throw 
IllegalArgumentException
+  // check minVersions = 
maxVerions
+2223  if (hcd.getMinVersions()  
hcd.getMaxVersions()) {
+2224String message = "Min versions 
for column family " + hcd.getNameAsString()
+2225+ " must be less than the 
Max versions.";
+2226
warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
+2227  }
+2228
+2229  // check replication scope
+2230  

[21/51] [partial] hbase-site git commit: Published site at 281d6429e55149cc4c05430dcc1d1dc136d8b245.

2019-01-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/901d593a/devapidocs/org/apache/hadoop/hbase/master/MasterRpcServices.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/master/MasterRpcServices.html 
b/devapidocs/org/apache/hadoop/hbase/master/MasterRpcServices.html
index bd7effb..9a61b35 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/MasterRpcServices.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/MasterRpcServices.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i
 109":10,"i110":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i
 109":10,"i110":10,"i111":10,"i112":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -119,7 +119,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class MasterRpcServices
+public class MasterRpcServices
 extends RSRpcServices
 implements 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService.BlockingInterface,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStatusService.BlockingInterface,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockService.BlockingInterface,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.HbckService.BlockingInterface
 Implements the master RPC services.
@@ -559,142 +559,147 @@ implements 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.Master
 
 
 
+org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsRpcThrottleEnabledResponse
+isRpcThrottleEnabled(org.apache.hbase.thirdparty.com.google.protobuf.RpcControllercontroller,
+
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsRpcThrottleEnabledRequestrequest)
+
+
 org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse
 isSnapshotDone(org.apache.hbase.thirdparty.com.google.protobuf.RpcControllercontroller,
   
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequestrequest)
 Checks if the specified snapshot is done.
 
 
-
+
 org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse
 isSplitOrMergeEnabled(org.apache.hbase.thirdparty.com.google.protobuf.RpcControllercontroller,
  
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequestrequest)
 
-
+
 org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDecommissionedRegionServersResponse
 listDecommissionedRegionServers(org.apache.hbase.thirdparty.com.google.protobuf.RpcControllercontroller,

org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDecommissionedRegionServersRequestrequest)
 
-
+
 org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse
 

[21/51] [partial] hbase-site git commit: Published site at 466fa920fee572fe20db3b77ebf539dc304d5f31.

2019-01-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2bf59208/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder.html
index 2e150bc..0b315b8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder.html
@@ -25,22 +25,22 @@
 017 */
 018package org.apache.hadoop.hbase.client;
 019
-020import static 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions.checkArgument;
-021import static 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions.checkNotNull;
-022import static 
org.apache.hadoop.hbase.client.ConnectionUtils.retries2Attempts;
+020import static 
org.apache.hadoop.hbase.client.ConnectionUtils.retries2Attempts;
+021import static 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions.checkArgument;
+022import static 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions.checkNotNull;
 023
-024import 
org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer;
-025
-026import java.util.List;
-027import 
java.util.concurrent.CompletableFuture;
-028import java.util.concurrent.TimeUnit;
-029
-030import 
org.apache.hadoop.hbase.HRegionLocation;
-031import 
org.apache.hadoop.hbase.ServerName;
-032import 
org.apache.hadoop.hbase.TableName;
-033import 
org.apache.yetus.audience.InterfaceAudience;
-034import 
org.apache.hadoop.hbase.client.metrics.ScanMetrics;
-035import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
+024import java.util.List;
+025import 
java.util.concurrent.CompletableFuture;
+026import java.util.concurrent.TimeUnit;
+027import 
org.apache.hadoop.hbase.HRegionLocation;
+028import 
org.apache.hadoop.hbase.ServerName;
+029import 
org.apache.hadoop.hbase.TableName;
+030import 
org.apache.hadoop.hbase.client.metrics.ScanMetrics;
+031import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
+032import 
org.apache.yetus.audience.InterfaceAudience;
+033
+034import 
org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer;
+035
 036import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService;
 037import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanResponse;
 038
@@ -83,432 +83,441 @@
 075
 076private RegionLocateType locateType = 
RegionLocateType.CURRENT;
 077
-078public 
SingleRequestCallerBuilderT table(TableName tableName) {
-079  this.tableName = tableName;
-080  return this;
-081}
-082
-083public 
SingleRequestCallerBuilderT row(byte[] row) {
-084  this.row = row;
-085  return this;
-086}
-087
-088public 
SingleRequestCallerBuilderT action(
-089
AsyncSingleRequestRpcRetryingCaller.CallableT callable) {
-090  this.callable = callable;
-091  return this;
-092}
-093
-094public 
SingleRequestCallerBuilderT operationTimeout(long operationTimeout, 
TimeUnit unit) {
-095  this.operationTimeoutNs = 
unit.toNanos(operationTimeout);
-096  return this;
-097}
-098
-099public 
SingleRequestCallerBuilderT rpcTimeout(long rpcTimeout, TimeUnit unit) 
{
-100  this.rpcTimeoutNs = 
unit.toNanos(rpcTimeout);
-101  return this;
-102}
-103
-104public 
SingleRequestCallerBuilderT locateType(RegionLocateType locateType) {
-105  this.locateType = locateType;
-106  return this;
-107}
-108
-109public 
SingleRequestCallerBuilderT pause(long pause, TimeUnit unit) {
-110  this.pauseNs = 
unit.toNanos(pause);
-111  return this;
-112}
-113
-114public 
SingleRequestCallerBuilderT maxAttempts(int maxAttempts) {
-115  this.maxAttempts = maxAttempts;
-116  return this;
-117}
-118
-119public 
SingleRequestCallerBuilderT startLogErrorsCnt(int startLogErrorsCnt) 
{
-120  this.startLogErrorsCnt = 
startLogErrorsCnt;
-121  return this;
-122}
-123
-124public 
AsyncSingleRequestRpcRetryingCallerT build() {
-125  return new 
AsyncSingleRequestRpcRetryingCaller(retryTimer, conn,
-126  checkNotNull(tableName, 
"tableName is null"), checkNotNull(row, "row is null"),
-127  checkNotNull(locateType, 
"locateType is null"), checkNotNull(callable, "action is null"),
-128  pauseNs, maxAttempts, 
operationTimeoutNs, rpcTimeoutNs, startLogErrorsCnt);
+078private int replicaId = 
RegionReplicaUtil.DEFAULT_REPLICA_ID;
+079
+080public 
SingleRequestCallerBuilderT table(TableName tableName) {
+081  this.tableName = tableName;
+082  return this;
+083}
+084
+085public 
SingleRequestCallerBuilderT row(byte[] row) 

[21/51] [partial] hbase-site git commit: Published site at e4b6b4afb933a961f543537875f87a2dc62d3757.

2019-01-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/849d84a8/devapidocs/src-html/org/apache/hadoop/hbase/thrift/ThriftHBaseServiceHandler.IOErrorWithCause.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/thrift/ThriftHBaseServiceHandler.IOErrorWithCause.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/thrift/ThriftHBaseServiceHandler.IOErrorWithCause.html
new file mode 100644
index 000..5b5b199
--- /dev/null
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/thrift/ThriftHBaseServiceHandler.IOErrorWithCause.html
@@ -0,0 +1,1419 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+Source code
+
+
+
+
+001/**
+002 *
+003 * Licensed to the Apache Software 
Foundation (ASF) under one
+004 * or more contributor license 
agreements.  See the NOTICE file
+005 * distributed with this work for 
additional information
+006 * regarding copyright ownership.  The 
ASF licenses this file
+007 * to you under the Apache License, 
Version 2.0 (the
+008 * "License"); you may not use this file 
except in compliance
+009 * with the License.  You may obtain a 
copy of the License at
+010 *
+011 * 
http://www.apache.org/licenses/LICENSE-2.0
+012 *
+013 * Unless required by applicable law or 
agreed to in writing, software
+014 * distributed under the License is 
distributed on an "AS IS" BASIS,
+015 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
+016 * See the License for the specific 
language governing permissions and
+017 * limitations under the License.
+018 */
+019
+020package org.apache.hadoop.hbase.thrift;
+021
+022import static 
org.apache.hadoop.hbase.thrift.Constants.COALESCE_INC_KEY;
+023import static 
org.apache.hadoop.hbase.util.Bytes.getBytes;
+024
+025import java.io.IOException;
+026import java.nio.ByteBuffer;
+027import java.util.ArrayList;
+028import java.util.Collections;
+029import java.util.HashMap;
+030import java.util.List;
+031import java.util.Map;
+032import java.util.TreeMap;
+033
+034import 
org.apache.hadoop.conf.Configuration;
+035import org.apache.hadoop.hbase.Cell;
+036import 
org.apache.hadoop.hbase.CellBuilder;
+037import 
org.apache.hadoop.hbase.CellBuilderFactory;
+038import 
org.apache.hadoop.hbase.CellBuilderType;
+039import 
org.apache.hadoop.hbase.CellUtil;
+040import 
org.apache.hadoop.hbase.HColumnDescriptor;
+041import 
org.apache.hadoop.hbase.HConstants;
+042import 
org.apache.hadoop.hbase.HRegionLocation;
+043import 
org.apache.hadoop.hbase.HTableDescriptor;
+044import 
org.apache.hadoop.hbase.KeyValue;
+045import 
org.apache.hadoop.hbase.MetaTableAccessor;
+046import 
org.apache.hadoop.hbase.ServerName;
+047import 
org.apache.hadoop.hbase.TableName;
+048import 
org.apache.hadoop.hbase.TableNotFoundException;
+049import 
org.apache.hadoop.hbase.client.Append;
+050import 
org.apache.hadoop.hbase.client.Delete;
+051import 
org.apache.hadoop.hbase.client.Durability;
+052import 
org.apache.hadoop.hbase.client.Get;
+053import 
org.apache.hadoop.hbase.client.Increment;
+054import 
org.apache.hadoop.hbase.client.OperationWithAttributes;
+055import 
org.apache.hadoop.hbase.client.Put;
+056import 
org.apache.hadoop.hbase.client.RegionInfo;
+057import 
org.apache.hadoop.hbase.client.RegionLocator;
+058import 
org.apache.hadoop.hbase.client.Result;
+059import 
org.apache.hadoop.hbase.client.ResultScanner;
+060import 
org.apache.hadoop.hbase.client.Scan;
+061import 
org.apache.hadoop.hbase.client.Table;
+062import 
org.apache.hadoop.hbase.filter.Filter;
+063import 
org.apache.hadoop.hbase.filter.ParseFilter;
+064import 
org.apache.hadoop.hbase.filter.PrefixFilter;
+065import 
org.apache.hadoop.hbase.filter.WhileMatchFilter;
+066import 
org.apache.hadoop.hbase.security.UserProvider;
+067import 
org.apache.hadoop.hbase.thrift.generated.AlreadyExists;
+068import 
org.apache.hadoop.hbase.thrift.generated.BatchMutation;
+069import 
org.apache.hadoop.hbase.thrift.generated.ColumnDescriptor;
+070import 
org.apache.hadoop.hbase.thrift.generated.Hbase;
+071import 
org.apache.hadoop.hbase.thrift.generated.IOError;
+072import 
org.apache.hadoop.hbase.thrift.generated.IllegalArgument;
+073import 
org.apache.hadoop.hbase.thrift.generated.Mutation;
+074import 
org.apache.hadoop.hbase.thrift.generated.TAppend;
+075import 
org.apache.hadoop.hbase.thrift.generated.TCell;
+076import 
org.apache.hadoop.hbase.thrift.generated.TIncrement;
+077import 
org.apache.hadoop.hbase.thrift.generated.TRegionInfo;
+078import 
org.apache.hadoop.hbase.thrift.generated.TRowResult;
+079import 
org.apache.hadoop.hbase.thrift.generated.TScan;
+080import 
org.apache.hadoop.hbase.util.Bytes;
+081import org.apache.thrift.TException;
+082import 
org.apache.yetus.audience.InterfaceAudience;
+083import org.slf4j.Logger;
+084import org.slf4j.LoggerFactory;
+085
+086import 
org.apache.hbase.thirdparty.com.google.common.base.Throwables;
+087
+088/**
+089 * The HBaseServiceHandler is a glue 
object that connects Thrift 

[21/51] [partial] hbase-site git commit: Published site at 3ab895979b643a2980bcdb7fee2078f14b614210.

2019-01-01 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/coprocessor/example/RowCountEndpoint.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/coprocessor/example/RowCountEndpoint.html 
b/devapidocs/org/apache/hadoop/hbase/coprocessor/example/RowCountEndpoint.html
index cb536db..261e004 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/coprocessor/example/RowCountEndpoint.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/coprocessor/example/RowCountEndpoint.html
@@ -479,6 +479,6 @@ implements Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/coprocessor/example/ScanModifyingObserver.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/coprocessor/example/ScanModifyingObserver.html
 
b/devapidocs/org/apache/hadoop/hbase/coprocessor/example/ScanModifyingObserver.html
index 17f90e2..47cd2d5 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/coprocessor/example/ScanModifyingObserver.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/coprocessor/example/ScanModifyingObserver.html
@@ -470,6 +470,6 @@ implements Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/coprocessor/example/ValueRewritingObserver.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/coprocessor/example/ValueRewritingObserver.html
 
b/devapidocs/org/apache/hadoop/hbase/coprocessor/example/ValueRewritingObserver.html
index da0a020..b7f2a4f 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/coprocessor/example/ValueRewritingObserver.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/coprocessor/example/ValueRewritingObserver.html
@@ -511,6 +511,6 @@ implements Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/coprocessor/example/WriteHeavyIncrementObserver.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/coprocessor/example/WriteHeavyIncrementObserver.html
 
b/devapidocs/org/apache/hadoop/hbase/coprocessor/example/WriteHeavyIncrementObserver.html
index c7c92c6..edd3ed4 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/coprocessor/example/WriteHeavyIncrementObserver.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/coprocessor/example/WriteHeavyIncrementObserver.html
@@ -788,6 +788,6 @@ implements Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/coprocessor/example/ZooKeeperScanPolicyObserver.ZKDataHolder.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/coprocessor/example/ZooKeeperScanPolicyObserver.ZKDataHolder.html
 
b/devapidocs/org/apache/hadoop/hbase/coprocessor/example/ZooKeeperScanPolicyObserver.ZKDataHolder.html
index b7d8b2f..c136a98 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/coprocessor/example/ZooKeeperScanPolicyObserver.ZKDataHolder.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/coprocessor/example/ZooKeeperScanPolicyObserver.ZKDataHolder.html
@@ -411,6 +411,6 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 
-Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/coprocessor/example/ZooKeeperScanPolicyObserver.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/coprocessor/example/ZooKeeperScanPolicyObserver.html
 
b/devapidocs/org/apache/hadoop/hbase/coprocessor/example/ZooKeeperScanPolicyObserver.html
index a8d8e27..cd7ee0e 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/coprocessor/example/ZooKeeperScanPolicyObserver.html
+++ 

[21/51] [partial] hbase-site git commit: Published site at 7820ba1dbdba58b1002cdfde08eb21aa7a0bb6da.

2018-12-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/09ea0d5f/devapidocs/org/apache/hadoop/hbase/thrift2/ThriftUtilities.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/thrift2/ThriftUtilities.html 
b/devapidocs/org/apache/hadoop/hbase/thrift2/ThriftUtilities.html
index 62da309..c7ea445 100644
--- a/devapidocs/org/apache/hadoop/hbase/thrift2/ThriftUtilities.html
+++ b/devapidocs/org/apache/hadoop/hbase/thrift2/ThriftUtilities.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":9,"i1":9,"i2":9,"i3":9,"i4":9,"i5":9,"i6":9,"i7":9,"i8":9,"i9":9,"i10":9,"i11":9,"i12":9,"i13":9,"i14":9,"i15":9,"i16":9,"i17":9,"i18":9,"i19":9};
+var methods = 
{"i0":9,"i1":9,"i2":9,"i3":9,"i4":9,"i5":9,"i6":9,"i7":9,"i8":9,"i9":9,"i10":9,"i11":9,"i12":9,"i13":9,"i14":9,"i15":9,"i16":9,"i17":9,"i18":9,"i19":9,"i20":9,"i21":9,"i22":9,"i23":9,"i24":9,"i25":9,"i26":9,"i27":9,"i28":9,"i29":9,"i30":9,"i31":9,"i32":9,"i33":9,"i34":9,"i35":9,"i36":9,"i37":9,"i38":9,"i39":9,"i40":9,"i41":9,"i42":9,"i43":9};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -75,13 +75,13 @@ var activeTableTab = "activeTableTab";
 
 Summary:
 Nested|
-Field|
+Field|
 Constr|
 Method
 
 
 Detail:
-Field|
+Field|
 Constr|
 Method
 
@@ -110,7 +110,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public final class ThriftUtilities
+public final class ThriftUtilities
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 
 
@@ -118,6 +118,33 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 
+
+
+
+
+
+Field Summary
+
+Fields
+
+Modifier and Type
+Field and Description
+
+
+private static Cell[]
+EMPTY_CELL_ARRAY
+
+
+private static Result
+EMPTY_RESULT
+
+
+private static Result
+EMPTY_RESULT_STALE
+
+
+
+
 
 
 
@@ -161,95 +188,191 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 appendFromThrift(org.apache.hadoop.hbase.thrift2.generated.TAppendappend)
 
 
+static 
org.apache.hadoop.hbase.thrift2.generated.TBloomFilterType
+bloomFilterFromHBase(BloomTypein)
+
+
+static BloomType
+bloomFilterFromThrift(org.apache.hadoop.hbase.thrift2.generated.TBloomFilterTypein)
+
+
+static 
org.apache.hadoop.hbase.thrift2.generated.TColumnFamilyDescriptor
+columnFamilyDescriptorFromHBase(ColumnFamilyDescriptorin)
+
+
+static ColumnFamilyDescriptor
+columnFamilyDescriptorFromThrift(org.apache.hadoop.hbase.thrift2.generated.TColumnFamilyDescriptorin)
+
+
 static CompareOperator
 compareOpFromThrift(org.apache.hadoop.hbase.thrift2.generated.TCompareOptCompareOp)
 
-
+
+static 
org.apache.hadoop.hbase.thrift2.generated.TCompressionAlgorithm
+compressionAlgorithmFromHBase(Compression.Algorithmin)
+
+
+static Compression.Algorithm
+compressionAlgorithmFromThrift(org.apache.hadoop.hbase.thrift2.generated.TCompressionAlgorithmin)
+
+
 private static Consistency
 consistencyFromThrift(org.apache.hadoop.hbase.thrift2.generated.TConsistencytConsistency)
 
-
+
+static 
org.apache.hadoop.hbase.thrift2.generated.TDataBlockEncoding
+dataBlockEncodingFromHBase(DataBlockEncodingin)
+
+
+static DataBlockEncoding
+dataBlockEncodingFromThrift(org.apache.hadoop.hbase.thrift2.generated.TDataBlockEncodingin)
+
+
 static 
org.apache.hadoop.hbase.thrift2.generated.TDelete
 deleteFromHBase(Deletein)
 
-
+
 static Delete
 deleteFromThrift(org.apache.hadoop.hbase.thrift2.generated.TDeletein)
 Creates a Delete (HBase) from a 
TDelete (Thrift).
 
 
-
+
 static https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListDelete
 deletesFromThrift(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in 
java.util">Listorg.apache.hadoop.hbase.thrift2.generated.TDeletein)
 Converts multiple TDeletes (Thrift) into a 
list of Deletes 
(HBase).
 
 
-
+
+static 
org.apache.hadoop.hbase.thrift2.generated.TDeleteType
+deleteTypeFromHBase(Cell.Typetype)
+
+
+private static 
org.apache.hadoop.hbase.thrift2.generated.TDurability
+durabilityFromHBase(Durabilitydurability)
+
+
 private static Durability
 durabilityFromThrift(org.apache.hadoop.hbase.thrift2.generated.TDurabilitytDurability)
 
-
+
 static Get
 getFromThrift(org.apache.hadoop.hbase.thrift2.generated.TGetin)
 Creates a Get (HBase) from a 
TGet (Thrift).
 
 
-
+
 static https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListGet
 getsFromThrift(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in 
java.util">Listorg.apache.hadoop.hbase.thrift2.generated.TGetin)
 Converts multiple TGets (Thrift) into a list 
of Gets 
(HBase).
 
 
-
+
 static 

[21/51] [partial] hbase-site git commit: Published site at c448604ceb987d113913f0583452b2abce04db0d.

2018-12-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4f8b8424/devapidocs/org/apache/hadoop/hbase/regionserver/RegionServerServices.PostOpenDeployContext.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/RegionServerServices.PostOpenDeployContext.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/RegionServerServices.PostOpenDeployContext.html
index bfe700b..bd404ad 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/RegionServerServices.PostOpenDeployContext.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/RegionServerServices.PostOpenDeployContext.html
@@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public static class RegionServerServices.PostOpenDeployContext
+public static class RegionServerServices.PostOpenDeployContext
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 Context for postOpenDeployTasks().
 
@@ -211,7 +211,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 region
-private finalHRegion region
+private finalHRegion region
 
 
 
@@ -220,7 +220,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 masterSystemTime
-private finallong masterSystemTime
+private finallong masterSystemTime
 
 
 
@@ -238,7 +238,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 PostOpenDeployContext
 @InterfaceAudience.Private
-publicPostOpenDeployContext(HRegionregion,
+publicPostOpenDeployContext(HRegionregion,
 
longmasterSystemTime)
 
 
@@ -256,7 +256,7 @@ public
 
 getRegion
-publicHRegiongetRegion()
+publicHRegiongetRegion()
 
 
 
@@ -265,7 +265,7 @@ public
 
 getMasterSystemTime
-publiclonggetMasterSystemTime()
+publiclonggetMasterSystemTime()
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4f8b8424/devapidocs/org/apache/hadoop/hbase/regionserver/RegionServerServices.RegionStateTransitionContext.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/RegionServerServices.RegionStateTransitionContext.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/RegionServerServices.RegionStateTransitionContext.html
index c642a6b..84b5eca 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/RegionServerServices.RegionStateTransitionContext.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/RegionServerServices.RegionStateTransitionContext.html
@@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public static class RegionServerServices.RegionStateTransitionContext
+public static class RegionServerServices.RegionStateTransitionContext
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 
 
@@ -228,7 +228,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 code
-private 
finalorg.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode
 code
+private 
finalorg.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode
 code
 
 
 
@@ -237,7 +237,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 openSeqNum
-private finallong openSeqNum
+private finallong openSeqNum
 
 
 
@@ -246,7 +246,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 masterSystemTime
-private finallong masterSystemTime
+private finallong masterSystemTime
 
 
 
@@ -255,7 +255,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 hris
-private finalRegionInfo[] hris
+private finalRegionInfo[] hris
 
 
 
@@ -273,7 +273,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 RegionStateTransitionContext
 @InterfaceAudience.Private
-publicRegionStateTransitionContext(org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCodecode,
+publicRegionStateTransitionContext(org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCodecode,

longopenSeqNum,

longmasterSystemTime,
RegionInfo...hris)
@@ -293,7 +293,7 @@ public
 
 getCode
-publicorg.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCodegetCode()
+publicorg.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCodegetCode()
 
 
 
@@ -302,7 +302,7 @@ public
 
 getOpenSeqNum
-publiclonggetOpenSeqNum()

[21/51] [partial] hbase-site git commit: Published site at 8bf966c8e936dec4d83bcbe85c5aab543f14a0df.

2018-12-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/src-html/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.RSGroupStartupWorker.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.RSGroupStartupWorker.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.RSGroupStartupWorker.html
index 809f66f..9b60dd0 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.RSGroupStartupWorker.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.RSGroupStartupWorker.html
@@ -765,146 +765,145 @@
 757found.set(true);
 758try {
 759  boolean rootMetaFound =
-760  
masterServices.getMetaTableLocator().verifyMetaRegionLocation(
-761  conn, 
masterServices.getZooKeeper(), 1);
-762  if (rootMetaFound) {
-763MetaTableAccessor.Visitor 
visitor = new DefaultVisitorBase() {
-764  @Override
-765  public boolean 
visitInternal(Result row) throws IOException {
-766RegionInfo info = 
MetaTableAccessor.getRegionInfo(row);
-767if (info != null) {
-768  Cell serverCell =
-769  
row.getColumnLatestCell(HConstants.CATALOG_FAMILY,
-770  
HConstants.SERVER_QUALIFIER);
-771  if 
(RSGROUP_TABLE_NAME.equals(info.getTable())  serverCell != null) {
-772ServerName sn =
-773
ServerName.parseVersionedServerName(CellUtil.cloneValue(serverCell));
-774if (sn == null) {
-775  found.set(false);
-776} else if 
(tsm.isTableState(RSGROUP_TABLE_NAME, TableState.State.ENABLED)) {
-777  try {
-778
ClientProtos.ClientService.BlockingInterface rs = conn.getClient(sn);
-779
ClientProtos.GetRequest request =
-780
RequestConverter.buildGetRequest(info.getRegionName(),
-781new 
Get(ROW_KEY));
-782rs.get(null, 
request);
-783
assignedRegions.add(info);
-784  } catch(Exception 
ex) {
-785LOG.debug("Caught 
exception while verifying group region", ex);
-786  }
-787}
-788
foundRegions.add(info);
-789  }
-790}
-791return true;
-792  }
-793};
-794
MetaTableAccessor.fullScanRegions(conn, visitor);
-795// if no regions in meta then 
we have to create the table
-796if (foundRegions.size()  
1  rootMetaFound  !createSent) {
-797  createRSGroupTable();
-798  createSent = true;
-799}
-800LOG.info("RSGroup table=" + 
RSGROUP_TABLE_NAME + " isOnline=" + found.get()
-801+ ", regionCount=" + 
foundRegions.size() + ", assignCount="
-802+ assignedRegions.size() 
+ ", rootMetaFound=" + rootMetaFound);
-803found.set(found.get() 
 assignedRegions.size() == foundRegions.size()
-804 
foundRegions.size()  0);
-805  } else {
-806LOG.info("Waiting for catalog 
tables to come online");
-807found.set(false);
-808  }
-809  if (found.get()) {
-810LOG.debug("With group table 
online, refreshing cached information.");
-811
RSGroupInfoManagerImpl.this.refresh(true);
-812online = true;
-813//flush any inconsistencies 
between ZK and HTable
-814
RSGroupInfoManagerImpl.this.flushConfig();
-815  }
-816} catch (RuntimeException e) {
-817  throw e;
-818} catch(Exception e) {
-819  found.set(false);
-820  LOG.warn("Failed to perform 
check", e);
-821}
-822try {
-823  Thread.sleep(100);
-824} catch (InterruptedException e) 
{
-825  LOG.info("Sleep interrupted", 
e);
-826}
-827  }
-828  return found.get();
-829}
-830
-831private void createRSGroupTable() 
throws IOException {
-832  Long procId = 
masterServices.createSystemTable(RSGROUP_TABLE_DESC);
-833  // wait for region to be online
-834  int tries = 600;
-835  while 
(!(masterServices.getMasterProcedureExecutor().isFinished(procId))
-836   
masterServices.getMasterProcedureExecutor().isRunning()
-837   tries  0) {
-838try {
-839  Thread.sleep(100);
-840} catch (InterruptedException e) 
{
-841  throw new IOException("Wait 
interrupted ", e);
-842}
-843tries--;
-844  }
-845  if(tries = 0) {
-846throw new 

[21/51] [partial] hbase-site git commit: Published site at 1acbd36c903b048141866b143507bfce124a5c5f.

2018-11-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5299e667/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
index 883e8b2..b6da844 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
@@ -1172,6 +1172,13 @@
 InitMetaProcedure.acquireLock(MasterProcedureEnvenv)
 
 
+protected static void
+AbstractStateMachineNamespaceProcedure.addOrUpdateNamespace(MasterProcedureEnvenv,
+NamespaceDescriptorns)
+Insert/update the row into the ns family of meta 
table.
+
+
+
 private void
 RestoreSnapshotProcedure.addRegionsToInMemoryStates(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInforegionInfos,
   MasterProcedureEnvenv,
@@ -1179,13 +1186,13 @@
 Add regions to in-memory states
 
 
-
+
 private void
 CloneSnapshotProcedure.addRegionsToMeta(MasterProcedureEnvenv)
 Add regions to hbase:meta table.
 
 
-
+
 private static void
 CreateTableProcedure.addRegionsToMeta(MasterProcedureEnvenv,
 TableDescriptortableDescriptor,
@@ -1193,60 +1200,60 @@
 Add the specified set of regions to the hbase:meta 
table.
 
 
-
+
 private static void
 ModifyTableProcedure.addRegionsToMeta(MasterProcedureEnvenv,
 TableDescriptortableDescriptor,
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInforegionInfos)
 
-
+
 protected static https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfo
 CreateTableProcedure.addTableToMeta(MasterProcedureEnvenv,
   TableDescriptortableDescriptor,
   https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInforegions)
 
-
+
 private void
 ServerCrashProcedure.assignRegions(MasterProcedureEnvenv,
  https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInforegions)
 Assign the regions on the crashed RS to other Rses.
 
 
-
+
 private static 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest
 RSProcedureDispatcher.buildOpenRegionRequest(MasterProcedureEnvenv,
   ServerNameserverName,
   https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRSProcedureDispatcher.RegionOpenOperationoperations)
 
-
+
 org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo
 RSProcedureDispatcher.RegionOpenOperation.buildRegionOpenInfoRequest(MasterProcedureEnvenv)
 
-
+
 private boolean
 ReopenTableRegionsProcedure.canSchedule(MasterProcedureEnvenv,
HRegionLocationloc)
 
-
+
 protected static void
 AbstractStateMachineTableProcedure.checkOnline(MasterProcedureEnvenv,
RegionInfori)
 Check region is online.
 
 
-
+
 protected void
 AbstractStateMachineRegionProcedure.checkTableModifiable(MasterProcedureEnvenv)
 Check whether a table is modifiable - exists and either 
offline or online with config set
 
 
-
+
 protected void
 AbstractStateMachineTableProcedure.checkTableModifiable(MasterProcedureEnvenv)
 Check whether a table is modifiable - exists and either 
offline or online with config set
 
 
-
+
 private static void
 DeleteTableProcedure.cleanAnyRemainingRows(MasterProcedureEnvenv,
  TableNametableName)
@@ -1254,26 +1261,26 @@
  info:regioninfo column was empty because of some write error.
 
 
-
+
 protected void
 TruncateTableProcedure.completionCleanup(MasterProcedureEnvenv)
 
-
+
 protected void
 ModifyTableProcedure.completionCleanup(MasterProcedureEnvenv)
 
-
+
 protected void
 InitMetaProcedure.completionCleanup(MasterProcedureEnvenv)
 
-
+
 protected static void
-CreateNamespaceProcedure.createDirectory(MasterProcedureEnvenv,
+AbstractStateMachineNamespaceProcedure.createDirectory(MasterProcedureEnvenv,
NamespaceDescriptornsDescriptor)
 Create the namespace directory
 
 
-
+
 private https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfo
 CloneSnapshotProcedure.createFilesystemLayout(MasterProcedureEnvenv,
   TableDescriptortableDescriptor,
@@ -1281,20 +1288,20 @@
 Create regions in file system.
 
 
-
+
 protected static 

[21/51] [partial] hbase-site git commit: Published site at 130057f13774f6b213cdb06952c805a29d59396e.

2018-11-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/68eae623/devapidocs/src-html/org/apache/hadoop/hbase/security/access/AuthManager.PermissionCache.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/security/access/AuthManager.PermissionCache.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/security/access/AuthManager.PermissionCache.html
new file mode 100644
index 000..4d5cbc9
--- /dev/null
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/security/access/AuthManager.PermissionCache.html
@@ -0,0 +1,680 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+Source code
+
+
+
+
+001/**
+002 * Licensed to the Apache Software 
Foundation (ASF) under one
+003 * or more contributor license 
agreements.  See the NOTICE file
+004 * distributed with this work for 
additional information
+005 * regarding copyright ownership.  The 
ASF licenses this file
+006 * to you under the Apache License, 
Version 2.0 (the
+007 * "License"); you may not use this file 
except in compliance
+008 * with the License.  You may obtain a 
copy of the License at
+009 *
+010 * 
http://www.apache.org/licenses/LICENSE-2.0
+011 *
+012 * Unless required by applicable law or 
agreed to in writing, software
+013 * distributed under the License is 
distributed on an "AS IS" BASIS,
+014 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
+015 * See the License for the specific 
language governing permissions and
+016 * limitations under the License.
+017 */
+018
+019package 
org.apache.hadoop.hbase.security.access;
+020
+021import java.io.Closeable;
+022import java.io.IOException;
+023import java.util.HashMap;
+024import java.util.HashSet;
+025import java.util.List;
+026import java.util.Map;
+027import java.util.Set;
+028import 
java.util.concurrent.ConcurrentHashMap;
+029import 
java.util.concurrent.atomic.AtomicLong;
+030
+031import 
org.apache.hadoop.conf.Configuration;
+032import 
org.apache.hadoop.hbase.AuthUtil;
+033import org.apache.hadoop.hbase.Cell;
+034import 
org.apache.hadoop.hbase.TableName;
+035import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
+036import 
org.apache.hadoop.hbase.log.HBaseMarkers;
+037import 
org.apache.hadoop.hbase.security.Superusers;
+038import 
org.apache.hadoop.hbase.security.User;
+039import 
org.apache.hadoop.hbase.security.UserProvider;
+040import 
org.apache.hadoop.hbase.util.Bytes;
+041import 
org.apache.hadoop.hbase.zookeeper.ZKWatcher;
+042import 
org.apache.yetus.audience.InterfaceAudience;
+043import 
org.apache.zookeeper.KeeperException;
+044import org.slf4j.Logger;
+045import org.slf4j.LoggerFactory;
+046
+047import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+048import 
org.apache.hbase.thirdparty.com.google.common.collect.ListMultimap;
+049import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+050
+051/**
+052 * Performs authorization checks for a 
given user's assigned permissions.
+053 * p
+054 *   There're following scopes: 
bGlobal/b, bNamespace/b, 
bTable/b, bFamily/b,
+055 *   bQualifier/b, 
bCell/b.
+056 *   Generally speaking, higher scopes 
can overrides lower scopes,
+057 *   except for Cell permission can be 
granted even a user has not permission on specified table,
+058 *   which means the user can get/scan 
only those granted cells parts.
+059 * /p
+060 * e.g, if user A has global permission 
R(ead), he can
+061 * read table T without checking table 
scope permission, so authorization checks alway starts from
+062 * Global scope.
+063 * p
+064 *   For each scope, not only user but 
also groups he belongs to will be checked.
+065 * /p
+066 */
+067@InterfaceAudience.Private
+068public final class AuthManager implements 
Closeable {
+069
+070  /**
+071   * Cache of permissions, it is thread 
safe.
+072   * @param T T extends 
Permission
+073   */
+074  private static class 
PermissionCacheT extends Permission {
+075private final Object mutex = new 
Object();
+076private MapString, 
SetT cache = new HashMap();
+077
+078void put(String name, T perm) {
+079  synchronized (mutex) {
+080SetT perms = 
cache.getOrDefault(name, new HashSet());
+081perms.add(perm);
+082cache.put(name, perms);
+083  }
+084}
+085
+086SetT get(String name) {
+087  synchronized (mutex) {
+088return cache.get(name);
+089  }
+090}
+091
+092void clear() {
+093  synchronized (mutex) {
+094for (Map.EntryString, 
SetT entry : cache.entrySet()) {
+095  entry.getValue().clear();
+096}
+097cache.clear();
+098  }
+099}
+100  }
+101  
PermissionCacheNamespacePermission NS_NO_PERMISSION = new 
PermissionCache();
+102  PermissionCacheTablePermission 
TBL_NO_PERMISSION = new PermissionCache();
+103
+104  /**
+105   * Cache for global permission.
+106   * Since every user/group can only have 
one global permission, no need to user 

[21/51] [partial] hbase-site git commit: Published site at d5e4faacc354c1bc4d93efa71ca97ee3a056123e.

2018-10-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b5e107c3/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html
index b2a9771..bf81ebb 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html
@@ -46,3768 +46,3806 @@
 038import java.util.Objects;
 039import java.util.Set;
 040import java.util.SortedMap;
-041import java.util.TreeMap;
-042import java.util.TreeSet;
-043import 
java.util.concurrent.ConcurrentHashMap;
-044import 
java.util.concurrent.ConcurrentMap;
-045import 
java.util.concurrent.ConcurrentSkipListMap;
-046import 
java.util.concurrent.atomic.AtomicBoolean;
-047import 
java.util.concurrent.locks.ReentrantReadWriteLock;
-048import java.util.function.Function;
-049import 
javax.management.MalformedObjectNameException;
-050import javax.servlet.http.HttpServlet;
-051import 
org.apache.commons.lang3.RandomUtils;
-052import 
org.apache.commons.lang3.StringUtils;
-053import 
org.apache.commons.lang3.SystemUtils;
-054import 
org.apache.hadoop.conf.Configuration;
-055import org.apache.hadoop.fs.FileSystem;
-056import org.apache.hadoop.fs.Path;
-057import 
org.apache.hadoop.hbase.Abortable;
-058import 
org.apache.hadoop.hbase.CacheEvictionStats;
-059import 
org.apache.hadoop.hbase.ChoreService;
-060import 
org.apache.hadoop.hbase.ClockOutOfSyncException;
-061import 
org.apache.hadoop.hbase.CoordinatedStateManager;
-062import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-063import 
org.apache.hadoop.hbase.HBaseConfiguration;
-064import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
-065import 
org.apache.hadoop.hbase.HConstants;
-066import 
org.apache.hadoop.hbase.HealthCheckChore;
-067import 
org.apache.hadoop.hbase.MetaTableAccessor;
-068import 
org.apache.hadoop.hbase.NotServingRegionException;
-069import 
org.apache.hadoop.hbase.PleaseHoldException;
-070import 
org.apache.hadoop.hbase.ScheduledChore;
-071import 
org.apache.hadoop.hbase.ServerName;
-072import 
org.apache.hadoop.hbase.Stoppable;
-073import 
org.apache.hadoop.hbase.TableDescriptors;
-074import 
org.apache.hadoop.hbase.TableName;
-075import 
org.apache.hadoop.hbase.YouAreDeadException;
-076import 
org.apache.hadoop.hbase.ZNodeClearer;
-077import 
org.apache.hadoop.hbase.client.ClusterConnection;
-078import 
org.apache.hadoop.hbase.client.Connection;
-079import 
org.apache.hadoop.hbase.client.ConnectionUtils;
-080import 
org.apache.hadoop.hbase.client.RegionInfo;
-081import 
org.apache.hadoop.hbase.client.RegionInfoBuilder;
-082import 
org.apache.hadoop.hbase.client.RpcRetryingCallerFactory;
-083import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-084import 
org.apache.hadoop.hbase.client.locking.EntityLock;
-085import 
org.apache.hadoop.hbase.client.locking.LockServiceClient;
-086import 
org.apache.hadoop.hbase.conf.ConfigurationManager;
-087import 
org.apache.hadoop.hbase.conf.ConfigurationObserver;
-088import 
org.apache.hadoop.hbase.coordination.SplitLogWorkerCoordination;
-089import 
org.apache.hadoop.hbase.coordination.ZkCoordinatedStateManager;
-090import 
org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
-091import 
org.apache.hadoop.hbase.exceptions.RegionMovedException;
-092import 
org.apache.hadoop.hbase.exceptions.RegionOpeningException;
-093import 
org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
-094import 
org.apache.hadoop.hbase.executor.ExecutorService;
-095import 
org.apache.hadoop.hbase.executor.ExecutorType;
-096import 
org.apache.hadoop.hbase.fs.HFileSystem;
-097import 
org.apache.hadoop.hbase.http.InfoServer;
-098import 
org.apache.hadoop.hbase.io.hfile.BlockCache;
-099import 
org.apache.hadoop.hbase.io.hfile.CacheConfig;
-100import 
org.apache.hadoop.hbase.io.hfile.HFile;
-101import 
org.apache.hadoop.hbase.io.util.MemorySizeUtil;
-102import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
-103import 
org.apache.hadoop.hbase.ipc.NettyRpcClientConfigHelper;
-104import 
org.apache.hadoop.hbase.ipc.RpcClient;
-105import 
org.apache.hadoop.hbase.ipc.RpcClientFactory;
-106import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-107import 
org.apache.hadoop.hbase.ipc.RpcServer;
-108import 
org.apache.hadoop.hbase.ipc.RpcServerInterface;
-109import 
org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
-110import 
org.apache.hadoop.hbase.ipc.ServerRpcController;
-111import 
org.apache.hadoop.hbase.log.HBaseMarkers;
-112import 
org.apache.hadoop.hbase.master.HMaster;
-113import 
org.apache.hadoop.hbase.master.LoadBalancer;
-114import 
org.apache.hadoop.hbase.master.RegionState.State;
-115import 

[21/51] [partial] hbase-site git commit: Published site at 3fe8649b2c9ba1271c25e8f476548907e4c7a90d.

2018-10-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8f09a71d/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaCandidateGenerator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaCandidateGenerator.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaCandidateGenerator.html
index c7d99b2..9d1542c 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaCandidateGenerator.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaCandidateGenerator.html
@@ -382,1357 +382,1365 @@
 374for (int i = 0; i  
this.curFunctionCosts.length; i++) {
 375  curFunctionCosts[i] = 
tempFunctionCosts[i];
 376}
-377LOG.info("start 
StochasticLoadBalancer.balancer, initCost=" + currentCost + ", functionCost="
-378+ functionCost());
+377double initCost = currentCost;
+378double newCost = currentCost;
 379
-380double initCost = currentCost;
-381double newCost = currentCost;
-382
-383long computedMaxSteps;
-384if (runMaxSteps) {
-385  computedMaxSteps = 
Math.max(this.maxSteps,
-386  ((long)cluster.numRegions * 
(long)this.stepsPerRegion * (long)cluster.numServers));
-387} else {
-388  computedMaxSteps = 
Math.min(this.maxSteps,
-389  ((long)cluster.numRegions * 
(long)this.stepsPerRegion * (long)cluster.numServers));
-390}
-391// Perform a stochastic walk to see 
if we can get a good fit.
-392long step;
-393
-394for (step = 0; step  
computedMaxSteps; step++) {
-395  Cluster.Action action = 
nextAction(cluster);
-396
-397  if (action.type == Type.NULL) {
-398continue;
-399  }
-400
-401  cluster.doAction(action);
-402  updateCostsWithAction(cluster, 
action);
-403
-404  newCost = computeCost(cluster, 
currentCost);
-405
-406  // Should this be kept?
-407  if (newCost  currentCost) {
-408currentCost = newCost;
-409
-410// save for JMX
-411curOverallCost = currentCost;
-412for (int i = 0; i  
this.curFunctionCosts.length; i++) {
-413  curFunctionCosts[i] = 
tempFunctionCosts[i];
-414}
-415  } else {
-416// Put things back the way they 
were before.
-417// TODO: undo by remembering old 
values
-418Action undoAction = 
action.undoAction();
-419cluster.doAction(undoAction);
-420updateCostsWithAction(cluster, 
undoAction);
-421  }
-422
-423  if 
(EnvironmentEdgeManager.currentTime() - startTime 
-424  maxRunningTime) {
-425break;
-426  }
-427}
-428long endTime = 
EnvironmentEdgeManager.currentTime();
-429
-430
metricsBalancer.balanceCluster(endTime - startTime);
-431
-432// update costs metrics
-433updateStochasticCosts(tableName, 
curOverallCost, curFunctionCosts);
-434if (initCost  currentCost) {
-435  plans = 
createRegionPlans(cluster);
-436  LOG.info("Finished computing new 
load balance plan. Computation took {}" +
-437" to try {} different iterations. 
 Found a solution that moves " +
-438"{} regions; Going from a 
computed cost of {}" +
-439" to a new cost of {}", 
java.time.Duration.ofMillis(endTime - startTime),
-440step, plans.size(), initCost, 
currentCost);
-441  return plans;
-442}
-443LOG.info("Could not find a better 
load balance plan.  Tried {} different configurations in " +
-444  "{}, and did not find anything with 
a computed cost less than {}", step,
-445  java.time.Duration.ofMillis(endTime 
- startTime), initCost);
-446return null;
-447  }
-448
-449  /**
-450   * update costs to JMX
-451   */
-452  private void 
updateStochasticCosts(TableName tableName, Double overall, Double[] subCosts) 
{
-453if (tableName == null) return;
-454
-455// check if the metricsBalancer is 
MetricsStochasticBalancer before casting
-456if (metricsBalancer instanceof 
MetricsStochasticBalancer) {
-457  MetricsStochasticBalancer balancer 
= (MetricsStochasticBalancer) metricsBalancer;
-458  // overall cost
-459  
balancer.updateStochasticCost(tableName.getNameAsString(),
-460"Overall", "Overall cost", 
overall);
-461
-462  // each cost function
-463  for (int i = 0; i  
costFunctions.length; i++) {
-464CostFunction costFunction = 
costFunctions[i];
-465String costFunctionName = 
costFunction.getClass().getSimpleName();
-466Double costPercent = (overall == 
0) ? 0 : (subCosts[i] / overall);
-467// TODO: cost function may need a 
specific description
-468
balancer.updateStochasticCost(tableName.getNameAsString(), costFunctionName,
-469  "The percent of " + 
costFunctionName, 

[21/51] [partial] hbase-site git commit: Published site at 7adf590106826b9e4432cfeee06acdc0ccff8c6e.

2018-10-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/425db230/testdevapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html
--
diff --git a/testdevapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html 
b/testdevapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html
index 9f9d0e1..b2fc784 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html
@@ -124,7 +124,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Public
-public class HBaseTestingUtility
+public class HBaseTestingUtility
 extends HBaseZKTestingUtility
 Facility for testing HBase. Replacement for
  old HBaseTestCase and HBaseClusterTestCase functionality.
@@ -140,7 +140,8 @@ extends To preserve test data directories, pass the system property 
"hbase.testing.preserve.testdir"
- setting it to true.
+ setting it to true.
+ Trigger pre commit.
 
 
 
@@ -2018,7 +2019,7 @@ extends 
 TEST_DIRECTORY_KEY
 https://docs.oracle.com/javase/8/docs/api/java/lang/Deprecated.html?is-external=true;
 title="class or interface in java.lang">@Deprecated
-private static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String TEST_DIRECTORY_KEY
+private static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String TEST_DIRECTORY_KEY
 Deprecated.can be used only with mini dfs
 System property key to get test directory value. Name is as 
it is because mini dfs has
  hard-codings to put test data here. It should NOT be used directly in HBase, 
as it's a property
@@ -2035,7 +2036,7 @@ private static finalhttps://docs.oracle.com/javase/8/docs/api/jav
 
 
 REGIONS_PER_SERVER_KEY
-public static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String REGIONS_PER_SERVER_KEY
+public static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String REGIONS_PER_SERVER_KEY
 
 See Also:
 Constant
 Field Values
@@ -2048,7 +2049,7 @@ private static finalhttps://docs.oracle.com/javase/8/docs/api/jav
 
 
 DEFAULT_REGIONS_PER_SERVER
-public static finalint DEFAULT_REGIONS_PER_SERVER
+public static finalint DEFAULT_REGIONS_PER_SERVER
 The default number of regions per regionserver when 
creating a pre-split
  table.
 
@@ -2063,7 +2064,7 @@ private static finalhttps://docs.oracle.com/javase/8/docs/api/jav
 
 
 PRESPLIT_TEST_TABLE_KEY
-public static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String PRESPLIT_TEST_TABLE_KEY
+public static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String PRESPLIT_TEST_TABLE_KEY
 
 See Also:
 Constant
 Field Values
@@ -2076,7 +2077,7 @@ private static finalhttps://docs.oracle.com/javase/8/docs/api/jav
 
 
 PRESPLIT_TEST_TABLE
-public static finalboolean PRESPLIT_TEST_TABLE
+public static finalboolean PRESPLIT_TEST_TABLE
 
 See Also:
 Constant
 Field Values
@@ -2089,7 +2090,7 @@ private static finalhttps://docs.oracle.com/javase/8/docs/api/jav
 
 
 dfsCluster
-privateorg.apache.hadoop.hdfs.MiniDFSCluster dfsCluster
+privateorg.apache.hadoop.hdfs.MiniDFSCluster dfsCluster
 
 
 
@@ -2098,7 +2099,7 @@ private static finalhttps://docs.oracle.com/javase/8/docs/api/jav
 
 
 hbaseCluster
-private volatileHBaseCluster hbaseCluster
+private volatileHBaseCluster hbaseCluster
 
 
 
@@ -2107,7 +2108,7 @@ private static finalhttps://docs.oracle.com/javase/8/docs/api/jav
 
 
 mrCluster
-privateorg.apache.hadoop.mapred.MiniMRCluster mrCluster
+privateorg.apache.hadoop.mapred.MiniMRCluster mrCluster
 
 
 
@@ -2116,7 +2117,7 @@ private static finalhttps://docs.oracle.com/javase/8/docs/api/jav
 
 
 miniClusterRunning
-private volatileboolean miniClusterRunning
+private volatileboolean miniClusterRunning
 If there is a mini cluster running for this testing utility 
instance.
 
 
@@ -2126,7 +2127,7 @@ private static finalhttps://docs.oracle.com/javase/8/docs/api/jav
 
 
 hadoopLogDir
-privatehttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String hadoopLogDir
+privatehttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String hadoopLogDir
 
 
 
@@ -2135,7 +2136,7 @@ private static finalhttps://docs.oracle.com/javase/8/docs/api/jav
 
 
 dataTestDirOnTestFS
-privateorg.apache.hadoop.fs.Path dataTestDirOnTestFS
+privateorg.apache.hadoop.fs.Path dataTestDirOnTestFS
 Directory on test filesystem where we put the data for this 
instance of
  HBaseTestingUtility
 
@@ -2146,7 +2147,7 @@ 

[21/51] [partial] hbase-site git commit: Published site at 5fbb227deb365fe812d433fe39b85ac4b0ddee20.

2018-10-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9ebe686/devapidocs/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.html
 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.html
index 093bc8e..7744c8c 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.html
@@ -377,7 +377,7 @@ extends Procedure
-addStackIndex,
 afterReplay,
 beforeReplay,
 compareTo,
 doExecute,
 doRollback,
 elapsedTime,
 getChildrenLatch,
 getException,
 getLastUpdate,
 getNonceKey,
 getOwner,
 getParentProcId,
 getProcedureMetrics,
 getProcId,
 getProcIdHashCode,
 get
 ProcName, getResult,
 getRootProcedureId,
 getRootProcId,
 getStackIndexes,
 getState,
 getSubmittedTime,
 getTimeout,
 getTimeoutTimestamp,
 hasChildren,
 hasException,
 hasLock,
 hasOwner,
 hasParent,
 hasTimeout,
 haveSameParent,
 incChildrenLatch,
 isBypass, isFailed,
 isFinished,
 isInitializing,
 isRunnable,
 isSuccess,
 isWaiting,
 removeStackIndex,
 setAbortFailure,
 setChildrenLatch,
 setFailure,
 setFailure,
 setLastUpdate,
 setNonceKey,
 setOwner,
 setOwner,
 setParentProcId, setProcId,
 setResult,
 setRootProcId,
 setStackIndexes,
 setState,
 setSubmittedTime,
 setTimeout,
 setTimeoutFailure, shouldWaitClientAck,
 skipPersistence,
 toString,
 toStringClass,
 toStringDetails,
 toStringSimpleSB,
 updateMetricsOnFinish,
 updateMetricsOnSubmit, updateTimestamp,
 wasExecuted
+addStackIndex,
 afterReplay,
 beforeReplay,
 bypass,
 compareTo,
 doExecute,
 doRollback,
 elapsedTime,
 getChildrenLatch,
 getException,
 getLastUpdate,
 getNonceKey,
 getOwner,
 getParentProcId,
 getProcedureMetrics,
 getProcId,
 getProc
 IdHashCode, getProcName,
 getResult,
 getRootProcedureId,
 getRootProcId,
 getStackIndexes,
 getState,
 getSubmittedTime,
 getTimeout,
 getTimeoutTimestamp,
 hasChildren,
 hasException,
 hasLock,
 hasOwner,
 hasParent,
 hasTimeout,
 haveSameParent,
 incChildrenLatch, isBypass,
 isFailed,
 isFinished,
 isInitializing,
 isRunnable,
 isSuccess,
 isWaiting,
 removeStackIndex,
 setAbortFailure, 
setChildrenLatch,
 setFailure,
 setFailure,
 setLastUpdate,
 setNonceKey,
 setOwner,
 setOwner, setParentProcId,
 setProcId,
 setResult,
 setRootProcId,
 setStackIndexes,
 setState,
 setSubmittedTime,
 setTimeout, setTimeoutFailure,
 shouldWaitClientAck,
 skipPersistence,
 toString,
 toStringClass,
 toStringDetails,
 toStringSimpleSB,
 updateMetricsOnFinish, updateMetricsOnSubmit,
 updateTimestamp,
 wasExecuted
 
 
 



[21/51] [partial] hbase-site git commit: Published site at 821e4d7de2d576189f4288d1c2acf9e9a9471f5c.

2018-10-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/323b17d9/testdevapidocs/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClientAfterSplittingRegions.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClientAfterSplittingRegions.html
 
b/testdevapidocs/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClientAfterSplittingRegions.html
new file mode 100644
index 000..cb4690a
--- /dev/null
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClientAfterSplittingRegions.html
@@ -0,0 +1,369 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+TestRestoreSnapshotFromClientAfterSplittingRegions (Apache HBase 
3.0.0-SNAPSHOT Test API)
+
+
+
+
+
+var methods = {"i0":10,"i1":9};
+var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase.client
+Class TestRestoreSnapshotFromClientAfterSplittingRegions
+
+
+
+https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.client.RestoreSnapshotFromClientTestBase
+
+
+org.apache.hadoop.hbase.client.RestoreSnapshotFromClientAfterSplittingRegionsTestBase
+
+
+org.apache.hadoop.hbase.client.TestRestoreSnapshotFromClientAfterSplittingRegions
+
+
+
+
+
+
+
+
+
+
+
+
+public class TestRestoreSnapshotFromClientAfterSplittingRegions
+extends RestoreSnapshotFromClientAfterSplittingRegionsTestBase
+
+
+
+
+
+
+
+
+
+
+
+Field Summary
+
+Fields
+
+Modifier and Type
+Field and Description
+
+
+static HBaseClassTestRule
+CLASS_RULE
+
+
+int
+numReplicas
+
+
+
+
+
+
+Fields inherited from classorg.apache.hadoop.hbase.client.RestoreSnapshotFromClientTestBase
+admin,
 emptySnapshot,
 FAMILY,
 name,
 snapshot0Rows,
 snapshot1Rows,
 snapshotName0,
 snapshotName1,
 snapshotName2,
 tableName,
 TEST_FAMILY2,
 TEST_UTIL
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors
+
+Constructor and Description
+
+
+TestRestoreSnapshotFromClientAfterSplittingRegions()
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All MethodsStatic MethodsInstance MethodsConcrete Methods
+
+Modifier and Type
+Method and Description
+
+
+protected int
+getNumReplicas()
+
+
+static https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttps://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object[]
+params()
+
+
+
+
+
+
+Methods inherited from classorg.apache.hadoop.hbase.client.RestoreSnapshotFromClientAfterSplittingRegionsTestBase
+testRestoreSnapshotAfterSplittingRegions
+
+
+
+
+
+Methods inherited from classorg.apache.hadoop.hbase.client.RestoreSnapshotFromClientTestBase
+countRows,
 createTable,
 getValidMethodName,
 setup,
 setupCluster,
 setupConf,
 splitRe
 gion, tearDown,
 tearDownAfterClass,
 verifyRowCount
+
+
+
+
+
+Methods inherited from classjava.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--;
 title="class or interface in java.lang">clone, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-;
 title="class or interface in java.lang">equals, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--;
 title="class or interface in java.lang">finalize, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--;
 title="class or interface in java.lang">getClass, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--;
 title="class or interface in java.lang">hashCode, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notify--;
 title="class or interface in java.lang">notify, 

[21/51] [partial] hbase-site git commit: Published site at fa5fa6ecdd071b72b58971058ff3ab9d28c3e709.

2018-10-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d1341859/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/ProcedureStore.ProcedureIterator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/ProcedureStore.ProcedureIterator.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/ProcedureStore.ProcedureIterator.html
index ff29160..e4dc134 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/ProcedureStore.ProcedureIterator.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/ProcedureStore.ProcedureIterator.html
@@ -42,190 +42,208 @@
 034public interface ProcedureStore {
 035  /**
 036   * Store listener interface.
-037   * The main process should register a 
listener and respond to the store events.
-038   */
-039  public interface ProcedureStoreListener 
{
-040/**
-041 * triggered when the store sync is 
completed.
-042 */
-043void postSync();
-044
-045/**
-046 * triggered when the store is not 
able to write out data.
-047 * the main process should abort.
-048 */
-049void abortProcess();
-050  }
-051
-052  /**
-053   * An Iterator over a collection of 
Procedure
-054   */
-055  public interface ProcedureIterator {
-056/**
-057 * Reset the Iterator by seeking to 
the beginning of the list.
-058 */
-059void reset();
-060
-061/**
-062 * Returns true if the iterator has 
more elements.
-063 * (In other words, returns true if 
next() would return a Procedure
-064 * rather than throwing an 
exception.)
-065 * @return true if the iterator has 
more procedures
-066 */
-067boolean hasNext();
-068
-069/**
-070 * @return true if the iterator next 
element is a completed procedure.
-071 */
-072boolean isNextFinished();
-073
+037   * p/
+038   * The main process should register a 
listener and respond to the store events.
+039   */
+040  public interface ProcedureStoreListener 
{
+041
+042/**
+043 * triggered when the store sync is 
completed.
+044 */
+045default void postSync() {
+046}
+047
+048/**
+049 * triggered when the store is not 
able to write out data. the main process should abort.
+050 */
+051default void abortProcess() {
+052}
+053
+054/**
+055 * Suggest that the upper layer 
should update the state of some procedures. Ignore this call
+056 * will not effect correctness but 
performance.
+057 * p/
+058 * For a WAL based ProcedureStore 
implementation, if all the procedures stored in a WAL file
+059 * have been deleted, or updated 
later in another WAL file, then we can delete the WAL file. If
+060 * there are old procedures in a WAL 
file which are never deleted or updated, then we can not
+061 * delete the WAL file and this will 
cause we hold lots of WAL file and slow down the master
+062 * restarts. So here we introduce 
this method to tell the upper layer that please update the
+063 * states of these procedures so that 
we can delete the old WAL file.
+064 * @param procIds the id for the 
procedures
+065 */
+066default void forceUpdate(long[] 
procIds) {
+067}
+068  }
+069
+070  /**
+071   * An Iterator over a collection of 
Procedure
+072   */
+073  public interface ProcedureIterator {
 074/**
-075 * Skip the next procedure
+075 * Reset the Iterator by seeking to 
the beginning of the list.
 076 */
-077void skipNext();
+077void reset();
 078
 079/**
-080 * Returns the next procedure in the 
iteration.
-081 * @throws IOException if there was 
an error fetching/deserializing the procedure
-082 * @return the next procedure in the 
iteration.
-083 */
-084@SuppressWarnings("rawtypes")
-085Procedure next() throws 
IOException;
-086  }
-087
-088  /**
-089   * Interface passed to the 
ProcedureStore.load() method to handle the store-load events.
-090   */
-091  public interface ProcedureLoader {
+080 * Returns true if the iterator has 
more elements.
+081 * (In other words, returns true if 
next() would return a Procedure
+082 * rather than throwing an 
exception.)
+083 * @return true if the iterator has 
more procedures
+084 */
+085boolean hasNext();
+086
+087/**
+088 * @return true if the iterator next 
element is a completed procedure.
+089 */
+090boolean isNextFinished();
+091
 092/**
-093 * Called by ProcedureStore.load() to 
notify about the maximum proc-id in the store.
-094 * @param maxProcId the highest 
proc-id in the store
-095 */
-096void setMaxProcId(long maxProcId);
-097
-098/**
-099 * Called by the 
ProcedureStore.load() every time a set of procedures are ready to be 
executed.
-100 * The ProcedureIterator passed to 
the method, has the procedure sorted in replay-order.
-101 * @param procIter iterator over 

[21/51] [partial] hbase-site git commit: Published site at 6bc7089f9e0793efc9bdd46a84f5ccd9bc4579ad.

2018-09-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/419d0338/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.Testing.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.Testing.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.Testing.html
index 43c66a8..061ce80 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.Testing.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.Testing.html
@@ -23,2136 +23,2142 @@
 015 * See the License for the specific 
language governing permissions and
 016 * limitations under the License.
 017 */
-018
-019package 
org.apache.hadoop.hbase.procedure2;
-020
-021import java.io.IOException;
-022import java.util.ArrayDeque;
-023import java.util.ArrayList;
-024import java.util.Arrays;
-025import java.util.Collection;
-026import java.util.Deque;
-027import java.util.HashSet;
-028import java.util.Iterator;
-029import java.util.List;
-030import java.util.Map;
-031import java.util.Objects;
-032import java.util.Set;
-033import 
java.util.concurrent.ConcurrentHashMap;
-034import 
java.util.concurrent.CopyOnWriteArrayList;
-035import java.util.concurrent.TimeUnit;
-036import 
java.util.concurrent.atomic.AtomicBoolean;
-037import 
java.util.concurrent.atomic.AtomicInteger;
-038import 
java.util.concurrent.atomic.AtomicLong;
-039import java.util.stream.Collectors;
-040import java.util.stream.Stream;
-041
-042import 
org.apache.hadoop.conf.Configuration;
-043import 
org.apache.hadoop.hbase.HConstants;
-044import 
org.apache.hadoop.hbase.exceptions.IllegalArgumentIOException;
-045import 
org.apache.hadoop.hbase.log.HBaseMarkers;
-046import 
org.apache.hadoop.hbase.procedure2.Procedure.LockState;
-047import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore;
-048import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureIterator;
-049import 
org.apache.hadoop.hbase.procedure2.util.StringUtils;
-050import 
org.apache.hadoop.hbase.security.User;
-051import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-052import 
org.apache.hadoop.hbase.util.IdLock;
-053import 
org.apache.hadoop.hbase.util.NonceKey;
-054import 
org.apache.hadoop.hbase.util.Threads;
-055import 
org.apache.yetus.audience.InterfaceAudience;
-056import org.slf4j.Logger;
-057import org.slf4j.LoggerFactory;
-058
-059import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-060import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
+018package 
org.apache.hadoop.hbase.procedure2;
+019
+020import java.io.IOException;
+021import java.util.ArrayDeque;
+022import java.util.ArrayList;
+023import java.util.Arrays;
+024import java.util.Collection;
+025import java.util.Deque;
+026import java.util.HashSet;
+027import java.util.Iterator;
+028import java.util.List;
+029import java.util.Map;
+030import java.util.Objects;
+031import java.util.Set;
+032import 
java.util.concurrent.ConcurrentHashMap;
+033import 
java.util.concurrent.CopyOnWriteArrayList;
+034import java.util.concurrent.TimeUnit;
+035import 
java.util.concurrent.atomic.AtomicBoolean;
+036import 
java.util.concurrent.atomic.AtomicInteger;
+037import 
java.util.concurrent.atomic.AtomicLong;
+038import java.util.stream.Collectors;
+039import java.util.stream.Stream;
+040import 
org.apache.hadoop.conf.Configuration;
+041import 
org.apache.hadoop.hbase.HConstants;
+042import 
org.apache.hadoop.hbase.exceptions.IllegalArgumentIOException;
+043import 
org.apache.hadoop.hbase.log.HBaseMarkers;
+044import 
org.apache.hadoop.hbase.procedure2.Procedure.LockState;
+045import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore;
+046import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureIterator;
+047import 
org.apache.hadoop.hbase.procedure2.util.StringUtils;
+048import 
org.apache.hadoop.hbase.security.User;
+049import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+050import 
org.apache.hadoop.hbase.util.IdLock;
+051import 
org.apache.hadoop.hbase.util.NonceKey;
+052import 
org.apache.hadoop.hbase.util.Threads;
+053import 
org.apache.yetus.audience.InterfaceAudience;
+054import org.slf4j.Logger;
+055import org.slf4j.LoggerFactory;
+056
+057import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+058import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
+059
+060import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState;
 061
-062import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState;
-063
-064/**
-065 * Thread Pool that executes the 
submitted procedures.
-066 * The executor has a ProcedureStore 
associated.
-067 * Each operation is logged and on 
restart the pending procedures are resumed.
-068 *
-069 * Unless the Procedure code throws an 
error 

[21/51] [partial] hbase-site git commit: Published site at d7e08317d2f214e4cca7b67578aba0ed7a567d54.

2018-09-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/37cf49a6/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html
index 77bb7f2..0019de7 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html
@@ -885,414 +885,417 @@
 877  MapString, String props);
 878
 879  /**
-880   * abort a procedure
-881   * @param procId ID of the procedure to 
abort
-882   * @param mayInterruptIfRunning if the 
proc completed at least one step, should it be aborted?
-883   * @return true if aborted, false if 
procedure already completed or does not exist. the value is
-884   * wrapped by {@link 
CompletableFuture}
-885   */
-886  CompletableFutureBoolean 
abortProcedure(long procId, boolean mayInterruptIfRunning);
-887
-888  /**
-889   * List procedures
-890   * @return procedure list JSON wrapped 
by {@link CompletableFuture}
-891   */
-892  CompletableFutureString 
getProcedures();
-893
-894  /**
-895   * List locks.
-896   * @return lock list JSON wrapped by 
{@link CompletableFuture}
-897   */
-898  CompletableFutureString 
getLocks();
-899
-900  /**
-901   * Mark region server(s) as 
decommissioned to prevent additional regions from getting
-902   * assigned to them. Optionally unload 
the regions on the servers. If there are multiple servers
-903   * to be decommissioned, 
decommissioning them at the same time can prevent wasteful region
-904   * movements. Region unloading is 
asynchronous.
-905   * @param servers The list of servers 
to decommission.
-906   * @param offload True to offload the 
regions from the decommissioned servers
-907   */
-908  CompletableFutureVoid 
decommissionRegionServers(ListServerName servers, boolean offload);
-909
-910  /**
-911   * List region servers marked as 
decommissioned, which can not be assigned regions.
-912   * @return List of decommissioned 
region servers wrapped by {@link CompletableFuture}
-913   */
-914  
CompletableFutureListServerName 
listDecommissionedRegionServers();
-915
-916  /**
-917   * Remove decommission marker from a 
region server to allow regions assignments. Load regions onto
-918   * the server if a list of regions is 
given. Region loading is asynchronous.
-919   * @param server The server to 
recommission.
-920   * @param encodedRegionNames Regions to 
load onto the server.
-921   */
-922  CompletableFutureVoid 
recommissionRegionServer(ServerName server,
-923  Listbyte[] 
encodedRegionNames);
-924
-925  /**
-926   * @return cluster status wrapped by 
{@link CompletableFuture}
-927   */
-928  CompletableFutureClusterMetrics 
getClusterMetrics();
-929
-930  /**
-931   * @return cluster status wrapped by 
{@link CompletableFuture}
-932   */
-933  CompletableFutureClusterMetrics 
getClusterMetrics(EnumSetOption options);
-934
-935  /**
-936   * @return current master server name 
wrapped by {@link CompletableFuture}
-937   */
-938  default 
CompletableFutureServerName getMaster() {
-939return 
getClusterMetrics(EnumSet.of(Option.MASTER)).thenApply(ClusterMetrics::getMasterName);
-940  }
-941
-942  /**
-943   * @return current backup master list 
wrapped by {@link CompletableFuture}
-944   */
-945  default 
CompletableFutureCollectionServerName getBackupMasters() {
-946return 
getClusterMetrics(EnumSet.of(Option.BACKUP_MASTERS))
-947  
.thenApply(ClusterMetrics::getBackupMasterNames);
-948  }
-949
-950  /**
-951   * @return current live region servers 
list wrapped by {@link CompletableFuture}
-952   */
-953  default 
CompletableFutureCollectionServerName getRegionServers() {
-954return 
getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS))
-955  .thenApply(cm - 
cm.getLiveServerMetrics().keySet());
-956  }
-957
-958  /**
-959   * @return a list of master 
coprocessors wrapped by {@link CompletableFuture}
-960   */
-961  default 
CompletableFutureListString getMasterCoprocessorNames() {
-962return 
getClusterMetrics(EnumSet.of(Option.MASTER_COPROCESSORS))
-963
.thenApply(ClusterMetrics::getMasterCoprocessorNames);
-964  }
-965
-966  /**
-967   * Get the info port of the current 
master if one is available.
-968   * @return master info port
-969   */
-970  default 
CompletableFutureInteger getMasterInfoPort() {
-971return 
getClusterMetrics(EnumSet.of(Option.MASTER_INFO_PORT)).thenApply(
-972  
ClusterMetrics::getMasterInfoPort);
-973  }
-974
-975  /**
-976   * Shuts down the HBase cluster.
-977   */
-978  CompletableFutureVoid 
shutdown();
-979
-980  /**
-981   * Shuts down the current HBase master 
only.
-982   */
-983  CompletableFutureVoid 
stopMaster();
-984
-985  /**
-986   * Stop the designated regionserver.
-987   * @param serverName
-988   */
-989  CompletableFutureVoid 

[21/51] [partial] hbase-site git commit: Published site at 8eaaa63114a64bcaeaf0ed9bdd88615ee22255c1.

2018-09-25 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f6f9d4f3/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.TableDescriptorGetter.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.TableDescriptorGetter.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.TableDescriptorGetter.html
index 0cf012a..976894f 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.TableDescriptorGetter.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.TableDescriptorGetter.html
@@ -63,3884 +63,3883 @@
 055import javax.servlet.http.HttpServlet;
 056import 
javax.servlet.http.HttpServletRequest;
 057import 
javax.servlet.http.HttpServletResponse;
-058
-059import 
org.apache.commons.lang3.StringUtils;
-060import 
org.apache.hadoop.conf.Configuration;
-061import org.apache.hadoop.fs.Path;
-062import 
org.apache.hadoop.hbase.ChoreService;
-063import 
org.apache.hadoop.hbase.ClusterId;
-064import 
org.apache.hadoop.hbase.ClusterMetrics;
-065import 
org.apache.hadoop.hbase.ClusterMetrics.Option;
-066import 
org.apache.hadoop.hbase.ClusterMetricsBuilder;
-067import 
org.apache.hadoop.hbase.CompoundConfiguration;
-068import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-069import 
org.apache.hadoop.hbase.HBaseIOException;
-070import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
-071import 
org.apache.hadoop.hbase.HConstants;
-072import 
org.apache.hadoop.hbase.InvalidFamilyOperationException;
-073import 
org.apache.hadoop.hbase.MasterNotRunningException;
-074import 
org.apache.hadoop.hbase.MetaTableAccessor;
-075import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-076import 
org.apache.hadoop.hbase.PleaseHoldException;
-077import 
org.apache.hadoop.hbase.ReplicationPeerNotFoundException;
-078import 
org.apache.hadoop.hbase.ServerName;
-079import 
org.apache.hadoop.hbase.TableDescriptors;
-080import 
org.apache.hadoop.hbase.TableName;
-081import 
org.apache.hadoop.hbase.TableNotDisabledException;
-082import 
org.apache.hadoop.hbase.TableNotFoundException;
-083import 
org.apache.hadoop.hbase.UnknownRegionException;
-084import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-085import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
-086import 
org.apache.hadoop.hbase.client.MasterSwitchType;
-087import 
org.apache.hadoop.hbase.client.RegionInfo;
-088import 
org.apache.hadoop.hbase.client.RegionInfoBuilder;
-089import 
org.apache.hadoop.hbase.client.Result;
-090import 
org.apache.hadoop.hbase.client.TableDescriptor;
-091import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-092import 
org.apache.hadoop.hbase.client.TableState;
-093import 
org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
-094import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-095import 
org.apache.hadoop.hbase.exceptions.MergeRegionException;
-096import 
org.apache.hadoop.hbase.executor.ExecutorType;
-097import 
org.apache.hadoop.hbase.favored.FavoredNodesManager;
-098import 
org.apache.hadoop.hbase.favored.FavoredNodesPromoter;
-099import 
org.apache.hadoop.hbase.http.InfoServer;
-100import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
-101import 
org.apache.hadoop.hbase.ipc.RpcServer;
-102import 
org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
-103import 
org.apache.hadoop.hbase.log.HBaseMarkers;
-104import 
org.apache.hadoop.hbase.master.MasterRpcServices.BalanceSwitchMode;
-105import 
org.apache.hadoop.hbase.master.assignment.AssignProcedure;
-106import 
org.apache.hadoop.hbase.master.assignment.AssignmentManager;
-107import 
org.apache.hadoop.hbase.master.assignment.MergeTableRegionsProcedure;
-108import 
org.apache.hadoop.hbase.master.assignment.MoveRegionProcedure;
-109import 
org.apache.hadoop.hbase.master.assignment.RegionStateNode;
-110import 
org.apache.hadoop.hbase.master.assignment.RegionStates;
-111import 
org.apache.hadoop.hbase.master.assignment.TransitRegionStateProcedure;
-112import 
org.apache.hadoop.hbase.master.assignment.UnassignProcedure;
-113import 
org.apache.hadoop.hbase.master.balancer.BalancerChore;
-114import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer;
-115import 
org.apache.hadoop.hbase.master.balancer.ClusterStatusChore;
-116import 
org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory;
-117import 
org.apache.hadoop.hbase.master.cleaner.CleanerChore;
-118import 
org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
-119import 
org.apache.hadoop.hbase.master.cleaner.LogCleaner;
-120import 
org.apache.hadoop.hbase.master.cleaner.ReplicationBarrierCleaner;
-121import 
org.apache.hadoop.hbase.master.locking.LockManager;
-122import 
org.apache.hadoop.hbase.master.normalizer.NormalizationPlan;
-123import 
org.apache.hadoop.hbase.master.normalizer.NormalizationPlan.PlanType;
-124import 
org.apache.hadoop.hbase.master.normalizer.RegionNormalizer;
-125import 

[21/51] [partial] hbase-site git commit: Published site at cd161d976ef47b84e904f2d54bac65d2f3417c2a.

2018-09-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fa1bebf8/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ThrowableAbortable.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ThrowableAbortable.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ThrowableAbortable.html
index a5789e0..93a57cb 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ThrowableAbortable.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ThrowableAbortable.html
@@ -238,4120 +238,4119 @@
 230 * @see Admin
 231 */
 232@InterfaceAudience.Private
-233@InterfaceStability.Evolving
-234public class HBaseAdmin implements Admin 
{
-235  private static final Logger LOG = 
LoggerFactory.getLogger(HBaseAdmin.class);
-236
-237  private ClusterConnection connection;
-238
-239  private final Configuration conf;
-240  private final long pause;
-241  private final int numRetries;
-242  private final int syncWaitTimeout;
-243  private boolean aborted;
-244  private int operationTimeout;
-245  private int rpcTimeout;
-246
-247  private RpcRetryingCallerFactory 
rpcCallerFactory;
-248  private RpcControllerFactory 
rpcControllerFactory;
-249
-250  private NonceGenerator ng;
-251
-252  @Override
-253  public int getOperationTimeout() {
-254return operationTimeout;
-255  }
-256
-257  HBaseAdmin(ClusterConnection 
connection) throws IOException {
-258this.conf = 
connection.getConfiguration();
-259this.connection = connection;
-260
-261// TODO: receive 
ConnectionConfiguration here rather than re-parsing these configs every time.
-262this.pause = 
this.conf.getLong(HConstants.HBASE_CLIENT_PAUSE,
-263
HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
-264this.numRetries = 
this.conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
-265
HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
-266this.operationTimeout = 
this.conf.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT,
-267
HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT);
-268this.rpcTimeout = 
this.conf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY,
-269
HConstants.DEFAULT_HBASE_RPC_TIMEOUT);
-270this.syncWaitTimeout = 
this.conf.getInt(
-271  
"hbase.client.sync.wait.timeout.msec", 10 * 6); // 10min
-272
-273this.rpcCallerFactory = 
connection.getRpcRetryingCallerFactory();
-274this.rpcControllerFactory = 
connection.getRpcControllerFactory();
-275
-276this.ng = 
this.connection.getNonceGenerator();
-277  }
-278
-279  @Override
-280  public void abort(String why, Throwable 
e) {
-281// Currently does nothing but throw 
the passed message and exception
-282this.aborted = true;
-283throw new RuntimeException(why, e);
-284  }
-285
-286  @Override
-287  public boolean isAborted() {
-288return this.aborted;
-289  }
-290
-291  @Override
-292  public boolean abortProcedure(final 
long procId, final boolean mayInterruptIfRunning)
-293  throws IOException {
-294return 
get(abortProcedureAsync(procId, mayInterruptIfRunning), this.syncWaitTimeout,
-295  TimeUnit.MILLISECONDS);
-296  }
-297
-298  @Override
-299  public FutureBoolean 
abortProcedureAsync(final long procId, final boolean mayInterruptIfRunning)
-300  throws IOException {
-301Boolean abortProcResponse =
-302executeCallable(new 
MasterCallableAbortProcedureResponse(getConnection(),
-303getRpcControllerFactory()) 
{
-304  @Override
-305  protected AbortProcedureResponse 
rpcCall() throws Exception {
-306AbortProcedureRequest 
abortProcRequest =
-307
AbortProcedureRequest.newBuilder().setProcId(procId).build();
-308return 
master.abortProcedure(getRpcController(), abortProcRequest);
-309  }
-310}).getIsProcedureAborted();
-311return new AbortProcedureFuture(this, 
procId, abortProcResponse);
-312  }
-313
-314  @Override
-315  public ListTableDescriptor 
listTableDescriptors() throws IOException {
-316return 
listTableDescriptors((Pattern)null, false);
-317  }
-318
-319  @Override
-320  public ListTableDescriptor 
listTableDescriptors(Pattern pattern) throws IOException {
-321return listTableDescriptors(pattern, 
false);
-322  }
-323
-324  @Override
-325  public ListTableDescriptor 
listTableDescriptors(Pattern pattern, boolean includeSysTables)
-326  throws IOException {
-327return executeCallable(new 
MasterCallableListTableDescriptor(getConnection(),
-328getRpcControllerFactory()) {
-329  @Override
-330  protected 
ListTableDescriptor rpcCall() throws Exception {
-331GetTableDescriptorsRequest req 
=
-332
RequestConverter.buildGetTableDescriptorsRequest(pattern, includeSysTables);
-333return 
ProtobufUtil.toTableDescriptorList(master.getTableDescriptors(getRpcController(),
-334req));
-335  }
-336});
-337  }
-338
-339  

[21/51] [partial] hbase-site git commit: Published site at c6a65ba63fce85ac7c4b62b96ef2bbe6c35d2f00.

2018-09-04 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/293abb17/devapidocs/src-html/org/apache/hadoop/hbase/filter/ValueFilter.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/filter/ValueFilter.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/filter/ValueFilter.html
index 2f19834..9b23394 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/filter/ValueFilter.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/filter/ValueFilter.html
@@ -29,140 +29,135 @@
 021
 022import java.io.IOException;
 023import java.util.ArrayList;
-024import java.util.Objects;
-025
-026import org.apache.hadoop.hbase.Cell;
-027import 
org.apache.hadoop.hbase.CompareOperator;
-028import 
org.apache.yetus.audience.InterfaceAudience;
-029import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-030import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-031import 
org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos;
-032import 
org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException;
-033
-034/**
-035 * This filter is used to filter based on 
column value. It takes an
-036 * operator (equal, greater, not equal, 
etc) and a byte [] comparator for the
-037 * cell value.
-038 * p
-039 * This filter can be wrapped with {@link 
WhileMatchFilter} and {@link SkipFilter}
-040 * to add more control.
-041 * p
-042 * Multiple filters can be combined using 
{@link FilterList}.
-043 * p
-044 * To test the value of a single 
qualifier when scanning multiple qualifiers,
-045 * use {@link SingleColumnValueFilter}.
-046 */
-047@InterfaceAudience.Public
-048public class ValueFilter extends 
CompareFilter {
-049
-050  /**
-051   * Constructor.
-052   * @param valueCompareOp the compare op 
for value matching
-053   * @param valueComparator the 
comparator for value matching
-054   * @deprecated Since 2.0.0. Will be 
removed in 3.0.0.
-055   * Use {@link 
#ValueFilter(CompareOperator, ByteArrayComparable)}
-056   */
-057  public ValueFilter(final CompareOp 
valueCompareOp,
-058  final ByteArrayComparable 
valueComparator) {
-059super(valueCompareOp, 
valueComparator);
-060  }
-061
-062  /**
-063   * Constructor.
-064   * @param valueCompareOp the compare op 
for value matching
-065   * @param valueComparator the 
comparator for value matching
-066   */
-067  public ValueFilter(final 
CompareOperator valueCompareOp,
-068 final 
ByteArrayComparable valueComparator) {
-069super(valueCompareOp, 
valueComparator);
-070  }
-071
-072  @Deprecated
-073  @Override
-074  public ReturnCode filterKeyValue(final 
Cell c) {
-075return filterCell(c);
-076  }
-077
-078  @Override
-079  public ReturnCode filterCell(final Cell 
c) {
-080if 
(compareValue(getCompareOperator(), this.comparator, c)) {
-081  return ReturnCode.SKIP;
-082}
-083return ReturnCode.INCLUDE;
-084  }
-085
-086  public static Filter 
createFilterFromArguments(ArrayListbyte [] filterArguments) {
-087@SuppressWarnings("rawtypes")  // for 
arguments
-088ArrayList arguments = 
CompareFilter.extractArguments(filterArguments);
-089CompareOperator compareOp = 
(CompareOperator)arguments.get(0);
-090ByteArrayComparable comparator = 
(ByteArrayComparable)arguments.get(1);
-091return new ValueFilter(compareOp, 
comparator);
-092  }
-093
-094  /**
-095   * @return The filter serialized using 
pb
-096   */
-097  @Override
-098  public byte [] toByteArray() {
-099FilterProtos.ValueFilter.Builder 
builder =
-100  
FilterProtos.ValueFilter.newBuilder();
-101
builder.setCompareFilter(super.convert());
-102return 
builder.build().toByteArray();
-103  }
-104
-105  /**
-106   * @param pbBytes A pb serialized 
{@link ValueFilter} instance
-107   * @return An instance of {@link 
ValueFilter} made from codebytes/code
-108   * @throws DeserializationException
-109   * @see #toByteArray
-110   */
-111  public static ValueFilter 
parseFrom(final byte [] pbBytes)
-112  throws DeserializationException {
-113FilterProtos.ValueFilter proto;
-114try {
-115  proto = 
FilterProtos.ValueFilter.parseFrom(pbBytes);
-116} catch 
(InvalidProtocolBufferException e) {
-117  throw new 
DeserializationException(e);
-118}
-119final CompareOperator valueCompareOp 
=
-120  
CompareOperator.valueOf(proto.getCompareFilter().getCompareOp().name());
-121ByteArrayComparable valueComparator = 
null;
-122try {
-123  if 
(proto.getCompareFilter().hasComparator()) {
-124valueComparator = 
ProtobufUtil.toComparator(proto.getCompareFilter().getComparator());
-125  }
-126} catch (IOException ioe) {
-127  throw new 
DeserializationException(ioe);
-128}
-129return new 
ValueFilter(valueCompareOp,valueComparator);
-130  }
-131
-132  /**
-133   * @return true if and only if the 
fields of the filter that are serialized
-134   * are equal to the 

[21/51] [partial] hbase-site git commit: Published site at 7c1fad4992a169a35b4457e6f4afcb30d04406e9.

2018-08-31 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/74f60271/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregate.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregate.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregate.html
index 92ec202..99b030e 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregate.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregate.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = {"i0":6,"i1":6,"i2":6,"i3":6,"i4":6,"i5":6,"i6":6};
+var methods = 
{"i0":6,"i1":6,"i2":6,"i3":6,"i4":6,"i5":6,"i6":6,"i7":6,"i8":6,"i9":6,"i10":6,"i11":6,"i12":6,"i13":6,"i14":6,"i15":6};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],4:["t3","Abstract Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -129,44 +129,90 @@ public interface 
 long
+getAvgRegionSize(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringtable)
+Get the average region size for this table
+
+
+
+long
+getAvgStoreFileAge(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">Stringtable)
+
+
+long
 getCpRequestsCount(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringtable)
 Get the number of CoprocessorService requests that have 
been issued against this table
 
 
-
+
+long
+getFilteredReadRequestCount(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringtable)
+Get the total number of filtered read requests that have 
been issued against this table
+
+
+
 long
-getMemStoresSize(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringtable)
+getMaxStoreFileAge(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">Stringtable)
+
+
+long
+getMemStoreSize(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringtable)
 Get the memory store size against this table
 
 
-
+
+long
+getMinStoreFileAge(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">Stringtable)
+
+
+long
+getNumReferenceFiles(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">Stringtable)
+
+
+long
+getNumRegions(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringtable)
+Get the number of regions hosted on for this table
+
+
+
 long
-getReadRequestsCount(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringtable)
+getNumStoreFiles(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringtable)
+Get the number of store files hosted for this table
+
+
+
+long
+getNumStores(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringtable)
+Get the number of stores hosted on for this table
+
+
+
+long
+getReadRequestCount(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringtable)
 Get the number of read requests that have been issued 
against this table
 
 
-
+
 long
-getStoreFilesSize(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringtable)
+getStoreFileSize(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringtable)
 Get the store file size against this table
 
 
-
+
 long
 getTableSize(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringtable)
 Get the table region size against this table
 
 
-
+
 long
 getTotalRequestsCount(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringtable)
-Get the total number of requests that have been issued 
against this table
+Get the total number of requests that have been issued for 
this table
 
 
-
+
 long
-getWriteRequestsCount(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringtable)
-Get the number of write requests that have been issued 

[21/51] [partial] hbase-site git commit: Published site at 3afe9fb7e6ebfa71187cbe131558a83fae61cecd.

2018-08-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/424d7e41/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.html
index bd7445a..3504442 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.html
@@ -1720,459 +1720,459 @@
 1712  LOG.error("Error trying to 
determine if store has references, assuming references exists",
 1713ioe);
 1714  return true;
-1715}
-1716  }
-1717
-1718  /**
-1719   * getter for CompactionProgress 
object
-1720   * @return CompactionProgress object; 
can be null
-1721   */
-1722  public CompactionProgress 
getCompactionProgress() {
-1723return 
this.storeEngine.getCompactor().getProgress();
-1724  }
-1725
-1726  @Override
-1727  public boolean 
shouldPerformMajorCompaction() throws IOException {
-1728for (HStoreFile sf : 
this.storeEngine.getStoreFileManager().getStorefiles()) {
-1729  // TODO: what are these reader 
checks all over the place?
-1730  if (sf.getReader() == null) {
-1731LOG.debug("StoreFile {} has null 
Reader", sf);
-1732return false;
-1733  }
-1734}
-1735return 
storeEngine.getCompactionPolicy().shouldPerformMajorCompaction(
-1736
this.storeEngine.getStoreFileManager().getStorefiles());
-1737  }
-1738
-1739  public 
OptionalCompactionContext requestCompaction() throws IOException {
-1740return 
requestCompaction(NO_PRIORITY, CompactionLifeCycleTracker.DUMMY, null);
-1741  }
-1742
-1743  public 
OptionalCompactionContext requestCompaction(int priority,
-1744  CompactionLifeCycleTracker 
tracker, User user) throws IOException {
-1745// don't even select for compaction 
if writes are disabled
-1746if (!this.areWritesEnabled()) {
-1747  return Optional.empty();
-1748}
-1749// Before we do compaction, try to 
get rid of unneeded files to simplify things.
-1750removeUnneededFiles();
-1751
-1752final CompactionContext compaction = 
storeEngine.createCompaction();
-1753CompactionRequestImpl request = 
null;
-1754this.lock.readLock().lock();
-1755try {
-1756  synchronized (filesCompacting) {
-1757// First, see if coprocessor 
would want to override selection.
-1758if (this.getCoprocessorHost() != 
null) {
-1759  final ListHStoreFile 
candidatesForCoproc = compaction.preSelect(this.filesCompacting);
-1760  boolean override = 
getCoprocessorHost().preCompactSelection(this,
-1761  candidatesForCoproc, 
tracker, user);
-1762  if (override) {
-1763// Coprocessor is overriding 
normal file selection.
-1764compaction.forceSelect(new 
CompactionRequestImpl(candidatesForCoproc));
-1765  }
-1766}
-1767
-1768// Normal case - coprocessor is 
not overriding file selection.
-1769if (!compaction.hasSelection()) 
{
-1770  boolean isUserCompaction = 
priority == Store.PRIORITY_USER;
-1771  boolean mayUseOffPeak = 
offPeakHours.isOffPeakHour() 
-1772  
offPeakCompactionTracker.compareAndSet(false, true);
-1773  try {
-1774
compaction.select(this.filesCompacting, isUserCompaction,
-1775  mayUseOffPeak, forceMajor 
 filesCompacting.isEmpty());
-1776  } catch (IOException e) {
-1777if (mayUseOffPeak) {
-1778  
offPeakCompactionTracker.set(false);
-1779}
-1780throw e;
-1781  }
-1782  assert 
compaction.hasSelection();
-1783  if (mayUseOffPeak  
!compaction.getRequest().isOffPeak()) {
-1784// Compaction policy doesn't 
want to take advantage of off-peak.
-1785
offPeakCompactionTracker.set(false);
-1786  }
-1787}
-1788if (this.getCoprocessorHost() != 
null) {
-1789  
this.getCoprocessorHost().postCompactSelection(
-1790  this, 
ImmutableList.copyOf(compaction.getRequest().getFiles()), tracker,
-1791  compaction.getRequest(), 
user);
-1792}
-1793// Finally, we have the 
resulting files list. Check if we have any files at all.
-1794request = 
compaction.getRequest();
-1795CollectionHStoreFile 
selectedFiles = request.getFiles();
-1796if (selectedFiles.isEmpty()) {
-1797  return Optional.empty();
+1715} finally {
+1716  if (reloadedStoreFiles != null) 
{
+1717for (HStoreFile storeFile : 
reloadedStoreFiles) {
+1718  try {
+1719
storeFile.closeStoreFile(false);
+1720  } catch (IOException ioe) {
+1721LOG.warn("Encountered 
exception closing " + storeFile + ": " + ioe.getMessage());
+1722// continue with 

[21/51] [partial] hbase-site git commit: Published site at a452487a9b82bfd33bc10683c3f8b8ae74d58883.

2018-08-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0cf79db0/devapidocs/org/apache/hadoop/hbase/filter/ColumnPaginationFilter.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/filter/ColumnPaginationFilter.html 
b/devapidocs/org/apache/hadoop/hbase/filter/ColumnPaginationFilter.html
index ac54f87..368fdb4 100644
--- a/devapidocs/org/apache/hadoop/hbase/filter/ColumnPaginationFilter.html
+++ b/devapidocs/org/apache/hadoop/hbase/filter/ColumnPaginationFilter.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":9,"i2":10,"i3":42,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":9,"i10":10,"i11":10,"i12":10};
+var methods = 
{"i0":10,"i1":9,"i2":10,"i3":10,"i4":42,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":9,"i12":10,"i13":10,"i14":10};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete 
Methods"],32:["t6","Deprecated Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -120,7 +120,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Public
-public class ColumnPaginationFilter
+public class ColumnPaginationFilter
 extends FilterBase
 A filter, based on the ColumnCountGetFilter, takes two 
arguments: limit and offset.
  This filter can be used for row-based indexing, where references to other 
tables are stored across many columns,
@@ -234,60 +234,68 @@ extends createFilterFromArguments(https://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in 
java.util">ArrayListbyte[]filterArguments)
 
 
+boolean
+equals(https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Objectobj)
+
+
 Filter.ReturnCode
 filterCell(Cellc)
 A way to filter based on the column family, column 
qualifier and/or the column value.
 
 
-
+
 Filter.ReturnCode
 filterKeyValue(Cellc)
 Deprecated.
 
 
-
+
 boolean
 filterRowKey(Cellcell)
 Filters a row based on the row key.
 
 
-
+
 byte[]
 getColumnOffset()
 
-
+
 int
 getLimit()
 
-
+
 Cell
 getNextCellHint(Cellcell)
 Filters that are not sure which key must be next seeked to, 
can inherit
  this implementation that, by default, returns a null Cell.
 
 
-
+
 int
 getOffset()
 
-
+
+int
+hashCode()
+
+
 static ColumnPaginationFilter
 parseFrom(byte[]pbBytes)
 
-
+
 void
 reset()
 Filters that are purely stateless and do nothing in their 
reset() methods can inherit
  this null/empty implementation.
 
 
-
+
 byte[]
 toByteArray()
 Return length 0 byte array for Filters that don't require 
special serialization
 
 
-
+
 https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 toString()
 Return filter's info for debugging and logging 
purpose.
@@ -313,7 +321,7 @@ extends 
 
 Methods inherited from classjava.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
-https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--;
 title="class or interface in java.lang">clone, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-;
 title="class or interface in java.lang">equals, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--;
 title="class or interface in java.lang">finalize, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--;
 title="class or interface in java.lang">getClass, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--;
 title="class or interface in java.lang">hashCode, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notify--;
 title="class or interface in java.lang">notify, https://docs.oracle.com/javase/8/docs/api/ja
 va/lang/Object.html?is-external=true#notifyAll--" title="class or interface in 
java.lang">notifyAll, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait--;
 title="class or interface in java.lang">wait, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-;
 title="class or interface in java.lang">wait, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-int-;
 title="class or interface in java.lang">wait
+https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--;
 title="class or interface in java.lang">clone, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--;
 title="class or interface in java.lang">finalize, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--;
 title="class or interface in java.lang">getClass, 

[21/51] [partial] hbase-site git commit: Published site at 6a5b4f2a5c188f8eef4f2250b8b7db7dd1e750e4.

2018-08-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ff05a18/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
index db8431b..a8cb7c4 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
@@ -885,7766 +885,7797 @@
 877   * @return What the next sequence 
(edit) id should be.
 878   * @throws IOException e
 879   */
-880  private long initialize(final 
CancelableProgressable reporter) throws IOException {
-881
-882//Refuse to open the region if there 
is no column family in the table
-883if 
(htableDescriptor.getColumnFamilyCount() == 0) {
-884  throw new 
DoNotRetryIOException("Table " + 
htableDescriptor.getTableName().getNameAsString()+
-885  " should have at least one 
column family.");
-886}
-887
-888MonitoredTask status = 
TaskMonitor.get().createStatus("Initializing region " + this);
-889long nextSeqId = -1;
-890try {
-891  nextSeqId = 
initializeRegionInternals(reporter, status);
-892  return nextSeqId;
-893} finally {
-894  // nextSeqid will be -1 if the 
initialization fails.
-895  // At least it will be 0 
otherwise.
-896  if (nextSeqId == -1) {
-897status.abort("Exception during 
region " + getRegionInfo().getRegionNameAsString() +
-898  " initialization.");
-899  }
-900}
-901  }
-902
-903  private long 
initializeRegionInternals(final CancelableProgressable reporter,
-904  final MonitoredTask status) throws 
IOException {
-905if (coprocessorHost != null) {
-906  status.setStatus("Running 
coprocessor pre-open hook");
-907  coprocessorHost.preOpen();
-908}
-909
-910// Write HRI to a file in case we 
need to recover hbase:meta
-911// Only the primary replica should 
write .regioninfo
-912if 
(this.getRegionInfo().getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) {
-913  status.setStatus("Writing region 
info on filesystem");
-914  fs.checkRegionInfoOnFilesystem();
-915}
-916
-917// Initialize all the HStores
-918status.setStatus("Initializing all 
the Stores");
-919long maxSeqId = 
initializeStores(reporter, status);
-920this.mvcc.advanceTo(maxSeqId);
-921if 
(ServerRegionReplicaUtil.shouldReplayRecoveredEdits(this)) {
-922  CollectionHStore stores = 
this.stores.values();
-923  try {
-924// update the stores that we are 
replaying
-925LOG.debug("replaying wal for " + 
this.getRegionInfo().getEncodedName());
-926
stores.forEach(HStore::startReplayingFromWAL);
-927// Recover any edits if 
available.
-928maxSeqId = Math.max(maxSeqId,
-929  
replayRecoveredEditsIfAny(this.fs.getRegionDir(), maxSeqIdInStores, reporter, 
status));
-930// Make sure mvcc is up to max.
-931this.mvcc.advanceTo(maxSeqId);
-932  } finally {
-933LOG.debug("stopping wal replay 
for " + this.getRegionInfo().getEncodedName());
-934// update the stores that we are 
done replaying
-935
stores.forEach(HStore::stopReplayingFromWAL);
-936  }
-937}
-938this.lastReplayedOpenRegionSeqId = 
maxSeqId;
-939
-940
this.writestate.setReadOnly(ServerRegionReplicaUtil.isReadOnly(this));
-941this.writestate.flushRequested = 
false;
-942this.writestate.compacting.set(0);
-943
-944if (this.writestate.writesEnabled) 
{
-945  LOG.debug("Cleaning up temporary 
data for " + this.getRegionInfo().getEncodedName());
-946  // Remove temporary data left over 
from old regions
-947  status.setStatus("Cleaning up 
temporary data from old regions");
-948  fs.cleanupTempDir();
-949}
-950
-951if (this.writestate.writesEnabled) 
{
-952  status.setStatus("Cleaning up 
detritus from prior splits");
-953  // Get rid of any splits or merges 
that were lost in-progress.  Clean out
-954  // these directories here on open.  
We may be opening a region that was
-955  // being split but we crashed in 
the middle of it all.
-956  LOG.debug("Cleaning up detritus for 
" + this.getRegionInfo().getEncodedName());
-957  fs.cleanupAnySplitDetritus();
-958  fs.cleanupMergesDir();
-959}
+880  @VisibleForTesting
+881  long initialize(final 
CancelableProgressable reporter) throws IOException {
+882
+883//Refuse to open the region if there 
is no column family in the table
+884if 
(htableDescriptor.getColumnFamilyCount() == 0) {
+885  throw new 
DoNotRetryIOException("Table " + 
htableDescriptor.getTableName().getNameAsString()+
+886  " should have at least one 
column family.");
+887}
+888

[21/51] [partial] hbase-site git commit: Published site at 63f2d3cbdc8151f5f61f33e0a078c51b9ac076a5.

2018-08-21 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7ae6a80c/devapidocs/org/apache/hadoop/hbase/master/assignment/class-use/RegionStateNode.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/class-use/RegionStateNode.html
 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/class-use/RegionStateNode.html
new file mode 100644
index 000..e31a01d
--- /dev/null
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/class-use/RegionStateNode.html
@@ -0,0 +1,607 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+Uses of Class org.apache.hadoop.hbase.master.assignment.RegionStateNode 
(Apache HBase 3.0.0-SNAPSHOT API)
+
+
+
+
+
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev
+Next
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+
+
+
+Uses 
of Classorg.apache.hadoop.hbase.master.assignment.RegionStateNode
+
+
+
+
+
+Packages that use RegionStateNode
+
+Package
+Description
+
+
+
+org.apache.hadoop.hbase.master.assignment
+
+
+
+
+
+
+
+
+
+
+Uses of RegionStateNode 
in org.apache.hadoop.hbase.master.assignment
+
+Fields in org.apache.hadoop.hbase.master.assignment
 declared as RegionStateNode
+
+Modifier and Type
+Field and Description
+
+
+
+private RegionStateNode
+RegionStates.RegionFailedOpen.regionNode
+
+
+
+
+Fields in org.apache.hadoop.hbase.master.assignment
 with type parameters of type RegionStateNode
+
+Modifier and Type
+Field and Description
+
+
+
+private https://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in java.util">ArrayListRegionStateNode
+AssignmentManager.pendingAssignQueue
+
+
+private https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentSkipListMap.html?is-external=true;
 title="class or interface in 
java.util.concurrent">ConcurrentSkipListMapRegionInfo,RegionStateNode
+RegionStates.regionInTransition
+
+
+private https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentSkipListMap.html?is-external=true;
 title="class or interface in 
java.util.concurrent">ConcurrentSkipListMapRegionInfo,RegionStateNode
+RegionStates.regionOffline
+Regions marked as offline on a read of hbase:meta.
+
+
+
+private https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">SetRegionStateNode
+ServerStateNode.regions
+
+
+private https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentSkipListMap.html?is-external=true;
 title="class or interface in 
java.util.concurrent">ConcurrentSkipListMapbyte[],RegionStateNode
+RegionStates.regionsMap
+RegionName -- i.e.
+
+
+
+private https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentMap.html?is-external=true;
 title="class or interface in java.util.concurrent">ConcurrentMapRegionInfo,RegionStateNode
+RegionStateNode.ritMap
+
+
+
+
+Methods in org.apache.hadoop.hbase.master.assignment
 that return RegionStateNode
+
+Modifier and Type
+Method and Description
+
+
+
+(package private) RegionStateNode
+RegionStates.createRegionStateNode(RegionInforegionInfo)
+
+
+RegionStateNode
+RegionStates.getOrCreateRegionStateNode(RegionInforegionInfo)
+
+
+RegionStateNode
+RegionTransitionProcedure.getRegionState(MasterProcedureEnvenv)
+Deprecated.
+
+
+
+RegionStateNode
+RegionStates.RegionFailedOpen.getRegionStateNode()
+
+
+private RegionStateNode
+TransitRegionStateProcedure.getRegionStateNode(MasterProcedureEnvenv)
+
+
+RegionStateNode
+RegionStates.getRegionStateNode(RegionInforegionInfo)
+
+
+(package private) RegionStateNode
+RegionStates.getRegionStateNodeFromName(byte[]regionName)
+
+
+
+
+Methods in org.apache.hadoop.hbase.master.assignment
 that return types with arguments of type RegionStateNode
+
+Modifier and Type
+Method and Description
+
+
+
+https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">SetRegionStateNode
+ServerStateNode.getRegions()
+
+
+https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionStateNode
+RegionStates.getRegionsInTransition()
+
+
+https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionStateNode

[21/51] [partial] hbase-site git commit: Published site at 092efb42749bf7fc6ad338c96aae8e7b9d3a2c74.

2018-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/WALPlayer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/WALPlayer.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/WALPlayer.html
index 9501e97..a10ddfe 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/WALPlayer.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/WALPlayer.html
@@ -131,277 +131,279 @@
 123  }
 124}
 125  } catch (InterruptedException e) 
{
-126e.printStackTrace();
-127  }
-128}
-129
-130@Override
-131public void setup(Context context) 
throws IOException {
-132  Configuration conf = 
context.getConfiguration();
-133  String[] tables = 
conf.getStrings(TABLES_KEY);
-134  this.multiTableSupport = 
conf.getBoolean(MULTI_TABLES_SUPPORT, false);
-135  for (String table : tables) {
-136tableSet.add(table);
-137  }
-138}
-139  }
-140
-141  /**
-142   * A mapper that writes out {@link 
Mutation} to be directly applied to a running HBase instance.
-143   */
-144  protected static class WALMapper
-145  extends MapperWALKey, WALEdit, 
ImmutableBytesWritable, Mutation {
-146private MapTableName, 
TableName tables = new TreeMap();
-147
-148@Override
-149public void map(WALKey key, WALEdit 
value, Context context) throws IOException {
-150  try {
-151if (tables.isEmpty() || 
tables.containsKey(key.getTableName())) {
-152  TableName targetTable =
-153  tables.isEmpty() ? 
key.getTableName() : tables.get(key.getTableName());
-154  ImmutableBytesWritable tableOut 
= new ImmutableBytesWritable(targetTable.getName());
-155  Put put = null;
-156  Delete del = null;
-157  Cell lastCell = null;
-158  for (Cell cell : 
value.getCells()) {
-159// filtering WAL meta 
entries
-160if 
(WALEdit.isMetaEditFamily(cell)) {
-161  continue;
-162}
-163
-164// Allow a subclass filter 
out this cell.
-165if (filter(context, cell)) 
{
-166  // A WALEdit may contain 
multiple operations (HBASE-3584) and/or
-167  // multiple rows 
(HBASE-5229).
-168  // Aggregate as much as 
possible into a single Put/Delete
-169  // operation before writing 
to the context.
-170  if (lastCell == null || 
lastCell.getTypeByte() != cell.getTypeByte()
-171  || 
!CellUtil.matchingRows(lastCell, cell)) {
-172// row or type changed, 
write out aggregate KVs.
-173if (put != null) {
-174  context.write(tableOut, 
put);
-175}
-176if (del != null) {
-177  context.write(tableOut, 
del);
-178}
-179if 
(CellUtil.isDelete(cell)) {
-180  del = new 
Delete(CellUtil.cloneRow(cell));
-181} else {
-182  put = new 
Put(CellUtil.cloneRow(cell));
-183}
-184  }
-185  if 
(CellUtil.isDelete(cell)) {
-186del.add(cell);
-187  } else {
-188put.add(cell);
-189  }
-190}
-191lastCell = cell;
-192  }
-193  // write residual KVs
-194  if (put != null) {
-195context.write(tableOut, 
put);
-196  }
-197  if (del != null) {
-198context.write(tableOut, 
del);
-199  }
-200}
-201  } catch (InterruptedException e) 
{
-202e.printStackTrace();
-203  }
-204}
-205
-206protected boolean filter(Context 
context, final Cell cell) {
-207  return true;
-208}
-209
-210@Override
-211protected void
-212cleanup(MapperWALKey, 
WALEdit, ImmutableBytesWritable, Mutation.Context context)
-213throws IOException, 
InterruptedException {
-214  super.cleanup(context);
-215}
-216
-217@Override
-218public void setup(Context context) 
throws IOException {
-219  String[] tableMap = 
context.getConfiguration().getStrings(TABLE_MAP_KEY);
-220  String[] tablesToUse = 
context.getConfiguration().getStrings(TABLES_KEY);
-221  if (tableMap == null) {
-222tableMap = tablesToUse;
-223  }
-224  if (tablesToUse == null) {
-225// Then user wants all tables.
-226  } else if (tablesToUse.length != 
tableMap.length) {
-227// this can only happen when 
WALMapper is used directly by a class other than WALPlayer
-228throw new IOException("Incorrect 
table mapping specified .");
-229  }
-230  int i = 0;
-231  if (tablesToUse != null) {
-232for (String table : tablesToUse) 
{
-233  
tables.put(TableName.valueOf(table), 

[21/51] [partial] hbase-site git commit: Published site at 613d831429960348dc42c3bdb6ea5d31be15c81c.

2018-08-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7cf6034b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.html
index b7b4236..3d1edb3 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.html
@@ -259,1863 +259,1867 @@
 251   * + Metadata!  + = See note on 
BLOCK_METADATA_SPACE above.
 252   * ++
 253   * /code
-254   * @see #serialize(ByteBuffer)
+254   * @see #serialize(ByteBuffer, 
boolean)
 255   */
-256  static final 
CacheableDeserializerCacheable BLOCK_DESERIALIZER =
-257  new 
CacheableDeserializerCacheable() {
-258@Override
-259public HFileBlock 
deserialize(ByteBuff buf, boolean reuse, MemoryType memType)
-260throws IOException {
-261  // The buf has the file block 
followed by block metadata.
-262  // Set limit to just before the 
BLOCK_METADATA_SPACE then rewind.
-263  buf.limit(buf.limit() - 
BLOCK_METADATA_SPACE).rewind();
-264  // Get a new buffer to pass the 
HFileBlock for it to 'own'.
-265  ByteBuff newByteBuff;
-266  if (reuse) {
-267newByteBuff = buf.slice();
-268  } else {
-269int len = buf.limit();
-270newByteBuff = new 
SingleByteBuff(ByteBuffer.allocate(len));
-271newByteBuff.put(0, buf, 
buf.position(), len);
-272  }
-273  // Read out the 
BLOCK_METADATA_SPACE content and shove into our HFileBlock.
-274  buf.position(buf.limit());
-275  buf.limit(buf.limit() + 
HFileBlock.BLOCK_METADATA_SPACE);
-276  boolean usesChecksum = buf.get() == 
(byte) 1;
-277  long offset = buf.getLong();
-278  int nextBlockOnDiskSize = 
buf.getInt();
-279  HFileBlock hFileBlock =
-280  new HFileBlock(newByteBuff, 
usesChecksum, memType, offset, nextBlockOnDiskSize, null);
-281  return hFileBlock;
-282}
-283
-284@Override
-285public int 
getDeserialiserIdentifier() {
-286  return DESERIALIZER_IDENTIFIER;
-287}
-288
-289@Override
-290public HFileBlock 
deserialize(ByteBuff b) throws IOException {
-291  // Used only in tests
-292  return deserialize(b, false, 
MemoryType.EXCLUSIVE);
-293}
-294  };
-295
-296  private static final int 
DESERIALIZER_IDENTIFIER;
-297  static {
-298DESERIALIZER_IDENTIFIER =
-299
CacheableDeserializerIdManager.registerDeserializer(BLOCK_DESERIALIZER);
-300  }
-301
-302  /**
-303   * Copy constructor. Creates a shallow 
copy of {@code that}'s buffer.
-304   */
-305  private HFileBlock(HFileBlock that) {
-306this(that, false);
-307  }
-308
-309  /**
-310   * Copy constructor. Creates a 
shallow/deep copy of {@code that}'s buffer as per the boolean
-311   * param.
-312   */
-313  private HFileBlock(HFileBlock that, 
boolean bufCopy) {
-314init(that.blockType, 
that.onDiskSizeWithoutHeader,
-315
that.uncompressedSizeWithoutHeader, that.prevBlockOffset,
-316that.offset, 
that.onDiskDataSizeWithHeader, that.nextBlockOnDiskSize, that.fileContext);
-317if (bufCopy) {
-318  this.buf = new 
SingleByteBuff(ByteBuffer.wrap(that.buf.toBytes(0, that.buf.limit(;
-319} else {
-320  this.buf = that.buf.duplicate();
-321}
-322  }
-323
-324  /**
-325   * Creates a new {@link HFile} block 
from the given fields. This constructor
-326   * is used only while writing blocks 
and caching,
-327   * and is sitting in a byte buffer and 
we want to stuff the block into cache.
-328   *
-329   * pTODO: The caller presumes 
no checksumming
-330   * required of this block instance 
since going into cache; checksum already verified on
-331   * underlying block data pulled in from 
filesystem. Is that correct? What if cache is SSD?
+256  public static final 
CacheableDeserializerCacheable BLOCK_DESERIALIZER = new 
BlockDeserializer();
+257
+258  public static final class 
BlockDeserializer implements CacheableDeserializerCacheable {
+259private BlockDeserializer() {
+260}
+261
+262@Override
+263public HFileBlock 
deserialize(ByteBuff buf, boolean reuse, MemoryType memType)
+264throws IOException {
+265  // The buf has the file block 
followed by block metadata.
+266  // Set limit to just before the 
BLOCK_METADATA_SPACE then rewind.
+267  buf.limit(buf.limit() - 
BLOCK_METADATA_SPACE).rewind();
+268  // Get a new buffer to pass the 
HFileBlock for it to 'own'.
+269  ByteBuff newByteBuff;
+270  if (reuse) {
+271newByteBuff = buf.slice();
+272  } else {
+273int len = buf.limit();
+274newByteBuff = new 
SingleByteBuff(ByteBuffer.allocate(len));
+275newByteBuff.put(0, buf, 
buf.position(), len);
+276  }
+277  // Read out the 
BLOCK_METADATA_SPACE content 

[21/51] [partial] hbase-site git commit: Published site at ba5d1c1f28301adc99019d9d6c4a04fac98ae511.

2018-07-25 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/804782f0/devapidocs/org/apache/hadoop/hbase/procedure2/Procedure.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/procedure2/Procedure.html 
b/devapidocs/org/apache/hadoop/hbase/procedure2/Procedure.html
index 4c6f707..19a9ffb 100644
--- a/devapidocs/org/apache/hadoop/hbase/procedure2/Procedure.html
+++ b/devapidocs/org/apache/hadoop/hbase/procedure2/Procedure.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":6,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":6,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":6,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":9,"i24":10,"i25":10,"i26":9,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":9,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":6,"i52":6,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10};
+var methods = 
{"i0":6,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":6,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":6,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":9,"i24":10,"i25":10,"i26":9,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":9,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":6,"i54":6,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],4:["t3","Abstract 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -118,60 +118,78 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
- @InterfaceStability.Evolving
-public abstract class ProcedureTEnvironment
+public abstract class ProcedureTEnvironment
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableProcedureTEnvironment
-Base Procedure class responsible for Procedure Metadata;
- e.g. state, submittedTime, lastUpdate, stack-indexes, etc.
-
- Procedures are run by a ProcedureExecutor 
instance. They are submitted and then
- the ProcedureExecutor keeps calling execute(Object)
 until the Procedure is done.
- Execute may be called multiple times in the case of failure or a restart, so 
code must be
- idempotent. The return from an execute call is either: null to indicate we 
are done;
- ourself if there is more to do; or, a set of sub-procedures that need to
- be run to completion before the framework resumes our execution.
-
- The ProcedureExecutor keeps its
- notion of Procedure State in the Procedure itself; e.g. it stamps the 
Procedure as INITIALIZING,
- RUNNABLE, SUCCESS, etc. Here are some of the States defined in the 
ProcedureState enum from
- protos:
-
- isFailed()
 A procedure has executed at least once and has failed. The procedure
- may or may not have rolled back yet. Any procedure in FAILED state will be 
eventually moved
- to ROLLEDBACK state.
-
+Base Procedure class responsible for Procedure Metadata; 
e.g. state, submittedTime, lastUpdate,
+ stack-indexes, etc.
+ 
+ Procedures are run by a ProcedureExecutor 
instance. They are submitted and then the
+ ProcedureExecutor keeps calling execute(Object)
 until the Procedure is done. Execute may
+ be called multiple times in the case of failure or a restart, so code must be 
idempotent. The
+ return from an execute call is either: null to indicate we are done; ourself 
if there is more to
+ do; or, a set of sub-procedures that need to be run to completion before the 
framework resumes
+ our execution.
+ 
+ The ProcedureExecutor keeps its notion of Procedure State in the Procedure 
itself; e.g. it stamps
+ the Procedure as INITIALIZING, RUNNABLE, SUCCESS, etc. Here are some of the 
States defined in the
+ ProcedureState enum from protos:
+ 
+ isFailed()
 A procedure has executed at least once and has failed. The procedure may
+ or may not have rolled back yet. Any procedure in FAILED state will be 
eventually moved to
+ ROLLEDBACK state.
  isSuccess()
 A procedure is completed successfully without exception.
-
  isFinished()
 As a 

[21/51] [partial] hbase-site git commit: Published site at b4759ce6e72f50ccd9d410bd5917dc5a515414f1.

2018-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.MemStoreSizeCostFunction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.MemStoreSizeCostFunction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.MemStoreSizeCostFunction.html
index 233dba3..91b9055 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.MemStoreSizeCostFunction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.MemStoreSizeCostFunction.html
@@ -540,1205 +540,1204 @@
 532  
sm.getRegionMetrics().forEach((byte[] regionName, RegionMetrics rm) - {
 533DequeBalancerRegionLoad 
rLoads = oldLoads.get(Bytes.toString(regionName));
 534if (rLoads == null) {
-535  // There was nothing there
-536  rLoads = new 
ArrayDeque();
-537} else if (rLoads.size() = 
numRegionLoadsToRemember) {
-538  rLoads.remove();
-539}
-540rLoads.add(new 
BalancerRegionLoad(rm));
-541
loads.put(Bytes.toString(regionName), rLoads);
-542  });
-543});
-544
-545for(CostFromRegionLoadFunction cost : 
regionLoadFunctions) {
-546  cost.setLoads(loads);
-547}
-548  }
-549
-550  protected void initCosts(Cluster 
cluster) {
-551for (CostFunction c:costFunctions) 
{
-552  c.init(cluster);
-553}
-554  }
-555
-556  protected void 
updateCostsWithAction(Cluster cluster, Action action) {
-557for (CostFunction c : costFunctions) 
{
-558  c.postAction(action);
-559}
-560  }
-561
-562  /**
-563   * Get the names of the cost 
functions
-564   */
-565  public String[] getCostFunctionNames() 
{
-566if (costFunctions == null) return 
null;
-567String[] ret = new 
String[costFunctions.length];
-568for (int i = 0; i  
costFunctions.length; i++) {
-569  CostFunction c = 
costFunctions[i];
-570  ret[i] = 
c.getClass().getSimpleName();
-571}
-572
-573return ret;
-574  }
-575
-576  /**
-577   * This is the main cost function.  It 
will compute a cost associated with a proposed cluster
-578   * state.  All different costs will be 
combined with their multipliers to produce a double cost.
-579   *
-580   * @param cluster The state of the 
cluster
-581   * @param previousCost the previous 
cost. This is used as an early out.
-582   * @return a double of a cost 
associated with the proposed cluster state.  This cost is an
-583   * aggregate of all individual 
cost functions.
-584   */
-585  protected double computeCost(Cluster 
cluster, double previousCost) {
-586double total = 0;
-587
-588for (int i = 0; i  
costFunctions.length; i++) {
-589  CostFunction c = 
costFunctions[i];
-590  this.tempFunctionCosts[i] = 0.0;
-591
-592  if (c.getMultiplier() = 0) {
-593continue;
-594  }
-595
-596  Float multiplier = 
c.getMultiplier();
-597  Double cost = c.cost();
-598
-599  this.tempFunctionCosts[i] = 
multiplier*cost;
-600  total += 
this.tempFunctionCosts[i];
-601
-602  if (total  previousCost) {
-603break;
-604  }
-605}
-606
-607return total;
-608  }
-609
-610  /** Generates a candidate action to be 
applied to the cluster for cost function search */
-611  abstract static class 
CandidateGenerator {
-612abstract Cluster.Action 
generate(Cluster cluster);
-613
-614/**
-615 * From a list of regions pick a 
random one. Null can be returned which
-616 * {@link 
StochasticLoadBalancer#balanceCluster(Map)} recognize as signal to try a region 
move
-617 * rather than swap.
-618 *
-619 * @param clusterThe state of 
the cluster
-620 * @param server index of the 
server
-621 * @param chanceOfNoSwap Chance that 
this will decide to try a move rather
-622 *   than a 
swap.
-623 * @return a random {@link 
RegionInfo} or null if an asymmetrical move is
-624 * suggested.
-625 */
-626protected int 
pickRandomRegion(Cluster cluster, int server, double chanceOfNoSwap) {
-627  // Check to see if this is just a 
move.
-628  if 
(cluster.regionsPerServer[server].length == 0 || RANDOM.nextFloat()  
chanceOfNoSwap) {
-629// signal a move only.
-630return -1;
-631  }
-632  int rand = 
RANDOM.nextInt(cluster.regionsPerServer[server].length);
-633  return 
cluster.regionsPerServer[server][rand];
-634
-635}
-636protected int 
pickRandomServer(Cluster cluster) {
-637  if (cluster.numServers  1) {
-638return -1;
-639  }
-640
-641  return 
RANDOM.nextInt(cluster.numServers);
-642}
-643
-644protected int pickRandomRack(Cluster 
cluster) {
-645  if (cluster.numRacks  1) {
-646return -1;
-647  

[21/51] [partial] hbase-site git commit: Published site at e66a6603e36ecd67237ca16acd5e2de03f0d372d.

2018-07-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0c6f447e/apidocs/org/apache/hadoop/hbase/RegionMetrics.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/RegionMetrics.html 
b/apidocs/org/apache/hadoop/hbase/RegionMetrics.html
index e4dc75c..c745b96 100644
--- a/apidocs/org/apache/hadoop/hbase/RegionMetrics.html
+++ b/apidocs/org/apache/hadoop/hbase/RegionMetrics.html
@@ -1,6 +1,6 @@
 http://www.w3.org/TR/html4/loose.dtd;>
 
-
+
 
 
 
@@ -19,45 +19,45 @@
 }
 //-->
 var methods = 
{"i0":6,"i1":6,"i2":6,"i3":6,"i4":6,"i5":6,"i6":6,"i7":6,"i8":6,"i9":18,"i10":6,"i11":6,"i12":18,"i13":6,"i14":6,"i15":6,"i16":6,"i17":6,"i18":6,"i19":6,"i20":6,"i21":6};
-var tabs = 
{65535:["t0","所有方法"],2:["t2","实例方法"],4:["t3","抽象方法"],16:["t5","默认方法"]};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],4:["t3","Abstract Methods"],16:["t5","Default Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
 var tableTab = "tableTab";
 var activeTableTab = "activeTableTab";
 
 
-您的浏览器已禁用 JavaScript。
+JavaScript is disabled on your browser.
 
 
 
 
 
-跳过导航链接
+Skip navigation links
 
 
 
-
-概览
-程序包
-ç±»
-使用
-树
-已过时
-索引
-帮助
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
 
 
 
 
-上一个类
-下一个类
+PrevClass
+NextClass
 
 
-框架
-无框架
+Frames
+NoFrames
 
 
-所有类
+AllClasses
 
 
 
 
org.apache.hadoop.hbase
-

接口 RegionMetrics

+

Interface RegionMetrics

  • -
    所有已知实现类:
    -
    RegionLoad
    +
    All Known Implementing Classes:
    +
    RegionLoad


    @@ -119,15 +119,15 @@ public interface -

    方法概要

    - - +

    Method Summary

    +
    所有方法 å®žä¾‹æ–¹æ³• æŠ½è±¡æ–¹æ³• é»˜è®¤æ–¹æ³• 
    + - - + + -
    All Methods Instance Methods Abstract Methods Default Methods 
    限定符和类型方法和说明Modifier and TypeMethod and Description

    [21/51] [partial] hbase-site git commit: Published site at e66a6603e36ecd67237ca16acd5e2de03f0d372d.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5427a45e/apidocs/org/apache/hadoop/hbase/RegionLoad.html
    --
    diff --git a/apidocs/org/apache/hadoop/hbase/RegionLoad.html 
    b/apidocs/org/apache/hadoop/hbase/RegionLoad.html
    index ac7c39e..793255f 100644
    --- a/apidocs/org/apache/hadoop/hbase/RegionLoad.html
    +++ b/apidocs/org/apache/hadoop/hbase/RegionLoad.html
    @@ -1,6 +1,6 @@
     http://www.w3.org/TR/html4/loose.dtd;>
     
    -
    +
     
     
     
    @@ -19,45 +19,45 @@
     }
     //-->
     var methods = 
    {"i0":42,"i1":42,"i2":42,"i3":42,"i4":42,"i5":42,"i6":42,"i7":42,"i8":42,"i9":42,"i10":42,"i11":42,"i12":42,"i13":42,"i14":42,"i15":42,"i16":42,"i17":42,"i18":42,"i19":42,"i20":42,"i21":42,"i22":42,"i23":42,"i24":42,"i25":42,"i26":42,"i27":42,"i28":42,"i29":42,"i30":42,"i31":42,"i32":42,"i33":42,"i34":42,"i35":42,"i36":42,"i37":42,"i38":42,"i39":42,"i40":42};
    -var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
    Methods"],8:["t4","Concrete Methods"],32:["t6","Deprecated Methods"]};
    +var tabs = {65535:["t0","所有方法"],2:["t2","实例方法"],8:["t4","å…
    ·ä½“方法"],32:["t6","已过时的方法"]};
     var altColor = "altColor";
     var rowColor = "rowColor";
     var tableTab = "tableTab";
     var activeTableTab = "activeTableTab";
     
     
    -JavaScript is disabled on your browser.
    +您的浏览器已禁用 JavaScript。
     
     
     
     
     
    -Skip navigation links
    +跳过导航链接
     
     
     
    -
    -Overview
    -Package
    -Class
    -Use
    -Tree
    -Deprecated
    -Index
    -Help
    +
    +概览
    +程序包
    +ç±»
    +使用
    +树
    +已过时
    +索引
    +帮助
     
     
     
     
    -PrevClass
    -NextClass
    +上一个类
    +下一个类
     
     
    -Frames
    -NoFrames
    +框架
    +无框架
     
     
    -AllClasses
    +所有类
     
     
     

    [21/51] [partial] hbase-site git commit: Published site at 0f23784182ab88649de340d75804e0ff20dcd0fc.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bcb555af/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationStatus.ReplicationStatusBuilder.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationStatus.ReplicationStatusBuilder.html
     
    b/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationStatus.ReplicationStatusBuilder.html
    new file mode 100644
    index 000..516307c
    --- /dev/null
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/replication/regionserver/ReplicationStatus.ReplicationStatusBuilder.html
    @@ -0,0 +1,507 @@
    +http://www.w3.org/TR/html4/loose.dtd;>
    +
    +
    +
    +
    +
    +ReplicationStatus.ReplicationStatusBuilder (Apache HBase 3.0.0-SNAPSHOT 
    API)
    +
    +
    +
    +
    +
    +var methods = 
    {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10};
    +var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
    Methods"],8:["t4","Concrete Methods"]};
    +var altColor = "altColor";
    +var rowColor = "rowColor";
    +var tableTab = "tableTab";
    +var activeTableTab = "activeTableTab";
    +
    +
    +JavaScript is disabled on your browser.
    +
    +
    +
    +
    +
    +Skip navigation links
    +
    +
    +
    +
    +Overview
    +Package
    +Class
    +Use
    +Tree
    +Deprecated
    +Index
    +Help
    +
    +
    +
    +
    +PrevClass
    +NextClass
    +
    +
    +Frames
    +NoFrames
    +
    +
    +AllClasses
    +
    +
    +
    +
    +
    +
    +
    +Summary:
    +Nested|
    +Field|
    +Constr|
    +Method
    +
    +
    +Detail:
    +Field|
    +Constr|
    +Method
    +
    +
    +
    +
    +
    +
    +
    +
    +org.apache.hadoop.hbase.replication.regionserver
    +Class ReplicationStatus.ReplicationStatusBuilder
    +
    +
    +
    +https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">java.lang.Object
    +
    +
    +org.apache.hadoop.hbase.replication.regionserver.ReplicationStatus.ReplicationStatusBuilder
    +
    +
    +
    +
    +
    +
    +
    +Enclosing class:
    +ReplicationStatus
    +
    +
    +
    +public static class ReplicationStatus.ReplicationStatusBuilder
    +extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">Object
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +Field Summary
    +
    +Fields
    +
    +Modifier and Type
    +Field and Description
    +
    +
    +private long
    +ageOfLastShippedOp
    +
    +
    +private org.apache.hadoop.fs.Path
    +currentPath
    +
    +
    +private long
    +currentPosition
    +
    +
    +private long
    +fileSize
    +
    +
    +private https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String
    +peerId
    +
    +
    +private int
    +queueSize
    +
    +
    +private long
    +replicationDelay
    +
    +
    +private https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String
    +walGroup
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +Constructor Summary
    +
    +Constructors
    +
    +Constructor and Description
    +
    +
    +ReplicationStatusBuilder()
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +Method Summary
    +
    +All MethodsInstance MethodsConcrete Methods
    +
    +Modifier and Type
    +Method and Description
    +
    +
    +ReplicationStatus
    +build()
    +
    +
    +ReplicationStatus.ReplicationStatusBuilder
    +withAgeOfLastShippedOp(longageOfLastShippedOp)
    +
    +
    +ReplicationStatus.ReplicationStatusBuilder
    +withCurrentPath(org.apache.hadoop.fs.PathcurrentPath)
    +
    +
    +ReplicationStatus.ReplicationStatusBuilder
    +withCurrentPosition(longcurrentPosition)
    +
    +
    +ReplicationStatus.ReplicationStatusBuilder
    +withFileSize(longfileSize)
    +
    +
    +ReplicationStatus.ReplicationStatusBuilder
    +withPeerId(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in 
    java.lang">StringpeerId)
    +
    +
    +ReplicationStatus.ReplicationStatusBuilder
    +withQueueSize(intqueueSize)
    +
    +
    +ReplicationStatus.ReplicationStatusBuilder
    +withReplicationDelay(longreplicationDelay)
    +
    +
    +ReplicationStatus.ReplicationStatusBuilder
    +withWalGroup(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in 
    java.lang">StringwalGroup)
    +
    +
    +
    +
    +
    +
    +Methods inherited from classjava.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">Object
    +https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--;
     title="class or interface in java.lang">clone, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-;
     title="class or interface in java.lang">equals, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--;
     title="class or interface in java.lang">finalize, 

    [21/51] [partial] hbase-site git commit: Published site at 85b41f36e01214b6485c9352875c84ebf877dab3.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a5c66de0/devapidocs/org/apache/hadoop/hbase/util/package-summary.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/util/package-summary.html 
    b/devapidocs/org/apache/hadoop/hbase/util/package-summary.html
    index b393140..baa7669 100644
    --- a/devapidocs/org/apache/hadoop/hbase/util/package-summary.html
    +++ b/devapidocs/org/apache/hadoop/hbase/util/package-summary.html
    @@ -903,48 +903,55 @@
     
     
     
    +LossyCounting
    +
    +LossyCounting utility, bounded data structure that 
    maintains approximate high frequency
    + elements in data stream.
    +
    +
    +
     ManualEnvironmentEdge
     
     An environment edge that uses a manually set value.
     
     
    -
    +
     MapreduceDependencyClasspathTool
     
     Generate a classpath string containing any jars required by 
    mapreduce jobs.
     
     
    -
    +
     MapReduceExtendedCell
     
     A wrapper for a cell to be used with mapreduce, as the 
    output value class for mappers/reducers.
     
     
    -
    +
     MD5Hash
     
     Utility class for MD5
      MD5 hash produces a 128-bit digest.
     
     
    -
    +
     Methods
     
     
    -
    +
     ModifyRegionUtils
     
     Utility methods for interacting with the regions.
     
     
    -
    +
     MultiHConnection
     
     Provides ability to create multiple Connection instances 
    and allows to process a batch of
      actions using CHTable.doBatchWithCallback()
     
     
    -
    +
     MunkresAssignment
     
     Computes the optimal (minimal cost) assignment of jobs to 
    workers (or other
    @@ -955,126 +962,126 @@
      Problem: An Improved Version of Munkres' Algorithm".
     
     
    -
    +
     MurmurHash
     
     This is a very fast, non-cryptographic hash suitable for 
    general hash-based
      lookup.
     
     
    -
    +
     MurmurHash3
     
     This is a very fast, non-cryptographic hash suitable for 
    general hash-based
      lookup.
     
     
    -
    +
     NettyEventLoopGroupConfig
     
     Event loop group related config.
     
     
    -
    +
     NonceKey
     
     This implementation is not smart and just treats nonce 
    group and nonce as random bits.
     
     
    -
    +
     ObjectIntPairT
     
     A generic class for pair of an Object and and a primitive 
    int value.
     
     
    -
    +
     ObjectPoolK,V
     
     A thread-safe shared object pool in which object creation 
    is expected to be lightweight, and the
      objects may be excessively created and discarded.
     
     
    -
    +
     OrderedBytes
     
     Utility class that handles ordered byte arrays.
     
     
    -
    +
     PairT1,T2
     
     A generic class for pairs.
     
     
    -
    +
     PairOfSameTypeT
     
     A generic, immutable class for pairs of objects both of 
    type T.
     
     
    -
    +
     PoolMapK,V
     
     The PoolMap maps a key to a collection of 
    values, the elements
      of which are managed by a pool.
     
     
    -
    +
     PoolMap.ReusablePoolR
     
     The ReusablePool represents a PoolMap.Pool 
    that builds
      on the https://docs.oracle.com/javase/8/docs/api/java/util/LinkedList.html?is-external=true;
     title="class or interface in java.util">LinkedList 
    class.
     
     
    -
    +
     PoolMap.RoundRobinPoolR
     
     The RoundRobinPool represents a PoolMap.Pool, which
      stores its resources in an https://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
     title="class or interface in java.util">ArrayList.
     
     
    -
    +
     PoolMap.ThreadLocalPoolR
     
     The ThreadLocalPool represents a PoolMap.Pool 
    that
      builds on the https://docs.oracle.com/javase/8/docs/api/java/lang/ThreadLocal.html?is-external=true;
     title="class or interface in java.lang">ThreadLocal 
    class.
     
     
    -
    +
     PrettyPrinter
     
     
    -
    +
     ReflectionUtils
     
     
    -
    +
     RegionMover
     
     Tool for loading/unloading regions to/from given 
    regionserver This tool can be run from Command
      line directly as a utility.
     
     
    -
    +
     RegionMover.MoveWithoutAck
     
     Move Regions without Acknowledging.Usefule in case of RS 
    shutdown as we might want to shut the
      RS down anyways and not abort on a stuck region.
     
     
    -
    +
     RegionMover.RegionMoverBuilder
     
     Builder for Region mover.
     
     
    -
    +
     RegionSplitCalculatorR 
    extends KeyRange
     
     This is a generic region split calculator.
     
     
    -
    +
     RegionSplitter
     
     The RegionSplitter 
    class provides several utilities to help in the
    @@ -1082,221 +1089,221 @@
      instead of having HBase handle that automatically.
     
     
    -
    +
     RegionSplitter.DecimalStringSplit
     
     The format of a DecimalStringSplit region boundary is the 
    ASCII representation of
      reversed sequential number, or any other uniformly distributed decimal 
    value.
     
     
    -
    +
     RegionSplitter.HexStringSplit
     
     HexStringSplit is a well-known RegionSplitter.SplitAlgorithm 
    for choosing region
      boundaries.
     
     
    -
    +
     RegionSplitter.NumberStringSplit
     
     
    -
    +
     RegionSplitter.UniformSplit
     
     A SplitAlgorithm that divides the space of possible keys 
    evenly.
     
     
    -
    +
     RetryCounter
     
     Operation retry accounting.
     
     
    -
    +
     RetryCounter.BackoffPolicy
     
     Policy for calculating sleeping intervals between retry 
    attempts
     
     
    -
    +
     RetryCounter.ExponentialBackoffPolicy
     
     
    -
    +
     RetryCounter.ExponentialBackoffPolicyWithLimit
     
     
    -
    +
     RetryCounter.RetryConfig
     
     Configuration for a retry counter
     
     
    -
    +
     RetryCounterFactory
     
     
    -
    +
     RollingStatCalculator
     
     This class maintains mean and variation for any sequence of 
    input provided to it.
     
     
    -
    +
     

    [21/51] [partial] hbase-site git commit: Published site at 6198e1fc7dfa85c3bc6b2855f9a5fb5f4b2354ff.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/eb5d2c62/devapidocs/org/apache/hadoop/hbase/client/class-use/Connection.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/client/class-use/Connection.html 
    b/devapidocs/org/apache/hadoop/hbase/client/class-use/Connection.html
    index 14bfa72..acb9403 100644
    --- a/devapidocs/org/apache/hadoop/hbase/client/class-use/Connection.html
    +++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/Connection.html
    @@ -2309,7 +2309,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
    utility methods.
     
     
     private Connection
    -ReplicationSink.sharedHtableCon
    +ReplicationSink.sharedConn
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/eb5d2c62/devapidocs/org/apache/hadoop/hbase/client/class-use/Mutation.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/client/class-use/Mutation.html 
    b/devapidocs/org/apache/hadoop/hbase/client/class-use/Mutation.html
    index e06fa7e..13962f1 100644
    --- a/devapidocs/org/apache/hadoop/hbase/client/class-use/Mutation.html
    +++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/Mutation.html
    @@ -1115,7 +1115,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
    utility methods.
       longtimestamp,
       https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListRegion.RowLockacquiredRowLocks)
     If necessary, calls preBatchMutate() CP hook for a 
    mini-batch and updates metrics, cell
    -  count, tags and timestamp for all cells of all operations in a 
    mini-batch.
    + count, tags and timestamp for all cells of all operations in a 
    mini-batch.
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/eb5d2c62/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfo.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfo.html 
    b/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfo.html
    index b202432..fd46b3f 100644
    --- a/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfo.html
    +++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfo.html
    @@ -6937,10 +6937,14 @@ Input/OutputFormats, a table indexing MapReduce job, 
    and utility methods.
     RegionGroupingProvider.getWAL(RegionInforegion)
     
     
    +WAL
    +SyncReplicationWALProvider.getWAL(RegionInforegion)
    +
    +
     T
     AbstractFSWALProvider.getWAL(RegionInforegion)
     
    -
    +
     WAL
     WALFactory.getWAL(RegionInforegion)
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/eb5d2c62/devapidocs/org/apache/hadoop/hbase/client/class-use/Row.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/client/class-use/Row.html 
    b/devapidocs/org/apache/hadoop/hbase/client/class-use/Row.html
    index 3f909a0..08ea0fe 100644
    --- a/devapidocs/org/apache/hadoop/hbase/client/class-use/Row.html
    +++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/Row.html
    @@ -532,7 +532,7 @@
     
     
     
    -protected void
    +private void
     ReplicationSink.batch(TableNametableName,
      https://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
     title="class or interface in java.util">Collectionhttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListRowallRows)
     Do the changes and handle the pool
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/eb5d2c62/devapidocs/org/apache/hadoop/hbase/client/package-tree.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/client/package-tree.html 
    b/devapidocs/org/apache/hadoop/hbase/client/package-tree.html
    index 46f5222..697e918 100644
    --- a/devapidocs/org/apache/hadoop/hbase/client/package-tree.html
    +++ b/devapidocs/org/apache/hadoop/hbase/client/package-tree.html
    @@ -553,23 +553,23 @@
     java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
     title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
     title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
     title="class or interface in java.io">Serializable)
     
     org.apache.hadoop.hbase.client.Consistency
    -org.apache.hadoop.hbase.client.SnapshotType
    -org.apache.hadoop.hbase.client.RequestController.ReturnCode
    -org.apache.hadoop.hbase.client.AbstractResponse.ResponseType
    -org.apache.hadoop.hbase.client.MobCompactPartitionPolicy
    -org.apache.hadoop.hbase.client.AsyncProcessTask.SubmittedRows
    +org.apache.hadoop.hbase.client.Durability
    

    [21/51] [partial] hbase-site git commit: Published site at 14087cc919da9f2e0b1a68f701f6365ad9d1d71f.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/55ce8d97/devapidocs/org/apache/hadoop/hbase/package-use.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/package-use.html 
    b/devapidocs/org/apache/hadoop/hbase/package-use.html
    index 81b9b88..7798d7c 100644
    --- a/devapidocs/org/apache/hadoop/hbase/package-use.html
    +++ b/devapidocs/org/apache/hadoop/hbase/package-use.html
    @@ -2134,6 +2134,12 @@ service.
     
     
     
    +HRegionLocation
    +Data structure to hold RegionInfo and the address for the 
    hosting
    + HRegionServer.
    +
    +
    +
     PleaseHoldException
     This exception is thrown by the master when a region server 
    was shut down and
      restarted so fast that the master still hasn't processed the server shutdown
    @@ -2141,17 +2147,17 @@ service.
      operations, or when an operation is performed on a region server that is 
    still starting.
     
     
    -
    +
     ServerName
     Name of a particular incarnation of an HBase Server.
     
     
    -
    +
     TableName
     Immutable POJO class for representing a table name.
     
     
    -
    +
     YouAreDeadException
     This exception is thrown by the master when a region server 
    reports and is
      already being processed as dead.
    @@ -2306,16 +2312,22 @@ service.
     
     
     
    +HRegionLocation
    +Data structure to hold RegionInfo and the address for the 
    hosting
    + HRegionServer.
    +
    +
    +
     NamespaceDescriptor
     Namespace POJO class.
     
     
    -
    +
     ServerName
     Name of a particular incarnation of an HBase Server.
     
     
    -
    +
     TableName
     Immutable POJO class for representing a table name.
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/55ce8d97/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/Procedure.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/Procedure.html 
    b/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/Procedure.html
    index 07a74d8..7ffb74f 100644
    --- a/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/Procedure.html
    +++ b/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/Procedure.html
    @@ -400,7 +400,7 @@
     
     class
     ReopenTableRegionsProcedure
    -Used for non table procedures to reopen the regions for a 
    table.
    +Used for reopening the regions for a table.
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/55ce8d97/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/StateMachineProcedure.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/StateMachineProcedure.html
     
    b/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/StateMachineProcedure.html
    index c7740ba..1e4cfaa 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/StateMachineProcedure.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/StateMachineProcedure.html
    @@ -233,7 +233,7 @@
     
     class
     ReopenTableRegionsProcedure
    -Used for non table procedures to reopen the regions for a 
    table.
    +Used for reopening the regions for a table.
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/55ce8d97/devapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html 
    b/devapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html
    index 682ad29..1cb9a94 100644
    --- a/devapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html
    +++ b/devapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html
    @@ -219,10 +219,10 @@
     
     java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
     title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
     title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
     title="class or interface in java.io">Serializable)
     
    -org.apache.hadoop.hbase.procedure2.LockedResourceType
    -org.apache.hadoop.hbase.procedure2.StateMachineProcedure.Flow
     org.apache.hadoop.hbase.procedure2.Procedure.LockState
    +org.apache.hadoop.hbase.procedure2.LockedResourceType
     org.apache.hadoop.hbase.procedure2.RootProcedureState.State
    +org.apache.hadoop.hbase.procedure2.StateMachineProcedure.Flow
     org.apache.hadoop.hbase.procedure2.LockType
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/55ce8d97/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html 
    b/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html
    index 33c10de..41fbe18 100644
    --- a/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html
    +++ b/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html
    @@ -229,13 +229,13 @@
     
     

    [21/51] [partial] hbase-site git commit: Published site at 72784c2d836a4b977667449d3adec5e8d15453f5.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2b11656f/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.WaitForStateCallable.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.WaitForStateCallable.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.WaitForStateCallable.html
    index b6e7636..592c2cc 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.WaitForStateCallable.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.WaitForStateCallable.html
    @@ -356,3901 +356,3924 @@
     348  public FutureVoid 
    modifyTableAsync(TableDescriptor td) throws IOException {
     349ModifyTableResponse response = 
    executeCallable(
     350  new 
    MasterCallableModifyTableResponse(getConnection(), 
    getRpcControllerFactory()) {
    -351@Override
    -352protected ModifyTableResponse 
    rpcCall() throws Exception {
    -353  
    setPriority(td.getTableName());
    -354  ModifyTableRequest request = 
    RequestConverter.buildModifyTableRequest(
    -355td.getTableName(), td, 
    ng.getNonceGroup(), ng.newNonce());
    -356  return 
    master.modifyTable(getRpcController(), request);
    -357}
    -358  });
    -359return new ModifyTableFuture(this, 
    td.getTableName(), response);
    -360  }
    -361
    -362  @Override
    -363  public ListTableDescriptor 
    listTableDescriptorsByNamespace(byte[] name) throws IOException {
    -364return executeCallable(new 
    MasterCallableListTableDescriptor(getConnection(),
    -365getRpcControllerFactory()) {
    -366  @Override
    -367  protected 
    ListTableDescriptor rpcCall() throws Exception {
    -368return 
    master.listTableDescriptorsByNamespace(getRpcController(),
    -369
    ListTableDescriptorsByNamespaceRequest.newBuilder()
    -370  
    .setNamespaceName(Bytes.toString(name)).build())
    -371.getTableSchemaList()
    -372.stream()
    -373
    .map(ProtobufUtil::toTableDescriptor)
    -374
    .collect(Collectors.toList());
    -375  }
    -376});
    -377  }
    -378
    -379  @Override
    -380  public ListTableDescriptor 
    listTableDescriptors(ListTableName tableNames) throws IOException {
    -381return executeCallable(new 
    MasterCallableListTableDescriptor(getConnection(),
    -382getRpcControllerFactory()) {
    -383  @Override
    -384  protected 
    ListTableDescriptor rpcCall() throws Exception {
    -385GetTableDescriptorsRequest req 
    =
    -386
    RequestConverter.buildGetTableDescriptorsRequest(tableNames);
    -387  return 
    ProtobufUtil.toTableDescriptorList(master.getTableDescriptors(getRpcController(),
    -388  req));
    -389  }
    -390});
    -391  }
    -392
    -393  @Override
    -394  public ListRegionInfo 
    getRegions(final ServerName sn) throws IOException {
    -395AdminService.BlockingInterface admin 
    = this.connection.getAdmin(sn);
    -396// TODO: There is no timeout on this 
    controller. Set one!
    -397HBaseRpcController controller = 
    rpcControllerFactory.newController();
    -398return 
    ProtobufUtil.getOnlineRegions(controller, admin);
    -399  }
    -400
    -401  @Override
    -402  public ListRegionInfo 
    getRegions(TableName tableName) throws IOException {
    -403if 
    (TableName.isMetaTableName(tableName)) {
    -404  return 
    Arrays.asList(RegionInfoBuilder.FIRST_META_REGIONINFO);
    -405} else {
    -406  return 
    MetaTableAccessor.getTableRegions(connection, tableName, true);
    -407}
    -408  }
    -409
    -410  private static class 
    AbortProcedureFuture extends ProcedureFutureBoolean {
    -411private boolean isAbortInProgress;
    -412
    -413public AbortProcedureFuture(
    -414final HBaseAdmin admin,
    -415final Long procId,
    -416final Boolean abortProcResponse) 
    {
    -417  super(admin, procId);
    -418  this.isAbortInProgress = 
    abortProcResponse;
    -419}
    -420
    -421@Override
    -422public Boolean get(long timeout, 
    TimeUnit unit)
    -423throws InterruptedException, 
    ExecutionException, TimeoutException {
    -424  if (!this.isAbortInProgress) {
    -425return false;
    -426  }
    -427  super.get(timeout, unit);
    -428  return true;
    -429}
    -430  }
    -431
    -432  /** @return Connection used by this 
    object. */
    -433  @Override
    -434  public Connection getConnection() {
    -435return connection;
    -436  }
    -437
    -438  @Override
    -439  public boolean tableExists(final 
    TableName tableName) throws IOException {
    -440return executeCallable(new 
    RpcRetryingCallableBoolean() {
    -441  @Override
    -442  protected Boolean rpcCall(int 
    callTimeout) throws Exception {
    -443return 
    MetaTableAccessor.tableExists(connection, tableName);
    -444  }
    -445});
    -446  }
    -447
    -448  @Override
    -449  public HTableDescriptor[] listTables() 
    throws IOException {
    -450return listTables((Pattern)null, 
    false);
    -451  }
    -452
    -453  

    [21/51] [partial] hbase-site git commit: Published site at 9101fc246f86445006bfbcdfda5cc495016dc280.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/65565d77/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.DefaultVisitorBase.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.DefaultVisitorBase.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.DefaultVisitorBase.html
    index fea2b5a..c7a6cc4 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.DefaultVisitorBase.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.DefaultVisitorBase.html
    @@ -1354,816 +1354,824 @@
     1346   */
     1347  public static void 
    putsToMetaTable(final Connection connection, final ListPut ps)
     1348  throws IOException {
    -1349try (Table t = 
    getMetaHTable(connection)) {
    -1350  debugLogMutations(ps);
    -1351  t.put(ps);
    -1352}
    -1353  }
    -1354
    -1355  /**
    -1356   * Delete the passed 
    coded/code from the codehbase:meta/code 
    table.
    -1357   * @param connection connection we're 
    using
    -1358   * @param d Delete to add to 
    hbase:meta
    -1359   */
    -1360  private static void 
    deleteFromMetaTable(final Connection connection, final Delete d)
    -1361  throws IOException {
    -1362ListDelete dels = new 
    ArrayList(1);
    -1363dels.add(d);
    -1364deleteFromMetaTable(connection, 
    dels);
    -1365  }
    -1366
    -1367  /**
    -1368   * Delete the passed 
    codedeletes/code from the codehbase:meta/code 
    table.
    -1369   * @param connection connection we're 
    using
    -1370   * @param deletes Deletes to add to 
    hbase:meta  This list should support #remove.
    -1371   */
    -1372  private static void 
    deleteFromMetaTable(final Connection connection, final ListDelete 
    deletes)
    -1373  throws IOException {
    -1374try (Table t = 
    getMetaHTable(connection)) {
    -1375  debugLogMutations(deletes);
    -1376  t.delete(deletes);
    -1377}
    -1378  }
    -1379
    -1380  /**
    -1381   * Deletes some replica columns 
    corresponding to replicas for the passed rows
    -1382   * @param metaRows rows in 
    hbase:meta
    -1383   * @param replicaIndexToDeleteFrom the 
    replica ID we would start deleting from
    -1384   * @param numReplicasToRemove how many 
    replicas to remove
    -1385   * @param connection connection we're 
    using to access meta table
    -1386   */
    -1387  public static void 
    removeRegionReplicasFromMeta(Setbyte[] metaRows,
    -1388int replicaIndexToDeleteFrom, int 
    numReplicasToRemove, Connection connection)
    -1389  throws IOException {
    -1390int absoluteIndex = 
    replicaIndexToDeleteFrom + numReplicasToRemove;
    -1391for (byte[] row : metaRows) {
    -1392  long now = 
    EnvironmentEdgeManager.currentTime();
    -1393  Delete deleteReplicaLocations = 
    new Delete(row);
    -1394  for (int i = 
    replicaIndexToDeleteFrom; i  absoluteIndex; i++) {
    -1395
    deleteReplicaLocations.addColumns(getCatalogFamily(),
    -1396  getServerColumn(i), now);
    -1397
    deleteReplicaLocations.addColumns(getCatalogFamily(),
    -1398  getSeqNumColumn(i), now);
    -1399
    deleteReplicaLocations.addColumns(getCatalogFamily(),
    -1400  getStartCodeColumn(i), now);
    -1401  }
    -1402  deleteFromMetaTable(connection, 
    deleteReplicaLocations);
    -1403}
    -1404  }
    -1405
    -1406  /**
    -1407   * Execute the passed 
    codemutations/code against codehbase:meta/code 
    table.
    -1408   * @param connection connection we're 
    using
    -1409   * @param mutations Puts and Deletes 
    to execute on hbase:meta
    -1410   * @throws IOException
    -1411   */
    -1412  public static void 
    mutateMetaTable(final Connection connection,
    -1413 
    final ListMutation mutations)
    -1414throws IOException {
    -1415Table t = 
    getMetaHTable(connection);
    -1416try {
    -1417  debugLogMutations(mutations);
    -1418  t.batch(mutations, null);
    -1419} catch (InterruptedException e) {
    -1420  InterruptedIOException ie = new 
    InterruptedIOException(e.getMessage());
    -1421  ie.initCause(e);
    -1422  throw ie;
    -1423} finally {
    -1424  t.close();
    -1425}
    -1426  }
    -1427
    -1428  private static void 
    addRegionStateToPut(Put put, RegionState.State state) throws IOException {
    -1429
    put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY)
    -1430.setRow(put.getRow())
    -1431
    .setFamily(HConstants.CATALOG_FAMILY)
    -1432
    .setQualifier(getRegionStateColumn())
    -1433
    .setTimestamp(put.getTimestamp())
    -1434.setType(Cell.Type.Put)
    -1435
    .setValue(Bytes.toBytes(state.name()))
    -1436.build());
    -1437  }
    -1438
    -1439  /**
    -1440   * Adds daughter region infos to 
    hbase:meta row for the specified region. Note that this does not
    -1441   * add its daughter's as different 
    rows, but adds information about the daughters in the same row
    -1442   * as the parent. Use
    -1443   * {@link #splitRegion(Connection, 
    RegionInfo, long, RegionInfo, RegionInfo, ServerName, int)}
    -1444   * if you want to do that.
    -1445   * @param connection connection we're 
    using
    -1446 

    [21/51] [partial] hbase-site git commit: Published site at 0b28155d274910b4e667b949d51f78809a1eff0b.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e11cf2cb/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
    index 42d0637..eb16038 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
    @@ -80,21 +80,21 @@
     072import 
    org.apache.hadoop.hbase.PleaseHoldException;
     073import 
    org.apache.hadoop.hbase.ReplicationPeerNotFoundException;
     074import 
    org.apache.hadoop.hbase.ScheduledChore;
    -075import 
    org.apache.hadoop.hbase.ServerMetricsBuilder;
    -076import 
    org.apache.hadoop.hbase.ServerName;
    -077import 
    org.apache.hadoop.hbase.TableDescriptors;
    -078import 
    org.apache.hadoop.hbase.TableName;
    -079import 
    org.apache.hadoop.hbase.TableNotDisabledException;
    -080import 
    org.apache.hadoop.hbase.TableNotFoundException;
    -081import 
    org.apache.hadoop.hbase.UnknownRegionException;
    -082import 
    org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
    -083import 
    org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
    -084import 
    org.apache.hadoop.hbase.client.MasterSwitchType;
    -085import 
    org.apache.hadoop.hbase.client.RegionInfo;
    -086import 
    org.apache.hadoop.hbase.client.Result;
    -087import 
    org.apache.hadoop.hbase.client.TableDescriptor;
    -088import 
    org.apache.hadoop.hbase.client.TableDescriptorBuilder;
    -089import 
    org.apache.hadoop.hbase.client.TableState;
    +075import 
    org.apache.hadoop.hbase.ServerName;
    +076import 
    org.apache.hadoop.hbase.TableDescriptors;
    +077import 
    org.apache.hadoop.hbase.TableName;
    +078import 
    org.apache.hadoop.hbase.TableNotDisabledException;
    +079import 
    org.apache.hadoop.hbase.TableNotFoundException;
    +080import 
    org.apache.hadoop.hbase.UnknownRegionException;
    +081import 
    org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
    +082import 
    org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
    +083import 
    org.apache.hadoop.hbase.client.MasterSwitchType;
    +084import 
    org.apache.hadoop.hbase.client.RegionInfo;
    +085import 
    org.apache.hadoop.hbase.client.Result;
    +086import 
    org.apache.hadoop.hbase.client.TableDescriptor;
    +087import 
    org.apache.hadoop.hbase.client.TableDescriptorBuilder;
    +088import 
    org.apache.hadoop.hbase.client.TableState;
    +089import 
    org.apache.hadoop.hbase.client.VersionInfoUtil;
     090import 
    org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
     091import 
    org.apache.hadoop.hbase.exceptions.DeserializationException;
     092import 
    org.apache.hadoop.hbase.exceptions.MergeRegionException;
    @@ -220,3477 +220,3481 @@
     212
     213import 
    org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
     214import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState;
    -215import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionServerInfo;
    -216import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;
    -217import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceViolationPolicy;
    -218import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
    -219import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos;
    -220
    -221/**
    -222 * HMaster is the "master server" for 
    HBase. An HBase cluster has one active
    -223 * master.  If many masters are started, 
    all compete.  Whichever wins goes on to
    -224 * run the cluster.  All others park 
    themselves in their constructor until
    -225 * master or cluster shutdown or until 
    the active master loses its lease in
    -226 * zookeeper.  Thereafter, all running 
    master jostle to take over master role.
    -227 *
    -228 * pThe Master can be asked 
    shutdown the cluster. See {@link #shutdown()}.  In
    -229 * this case it will tell all 
    regionservers to go down and then wait on them
    -230 * all reporting in that they are down.  
    This master will then shut itself down.
    -231 *
    -232 * pYou can also shutdown just 
    this master.  Call {@link #stopMaster()}.
    -233 *
    -234 * @see org.apache.zookeeper.Watcher
    -235 */
    -236@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
    -237@SuppressWarnings("deprecation")
    -238public class HMaster extends 
    HRegionServer implements MasterServices {
    -239  private static Logger LOG = 
    LoggerFactory.getLogger(HMaster.class.getName());
    -240
    -241  /**
    -242   * Protection against zombie master. 
    Started once Master accepts active responsibility and
    -243   * starts taking over responsibilities. 
    Allows a finite time window before giving up ownership.
    -244   */
    -245  private static class 
    InitializationMonitor extends HasThread {
    -246/** The amount of time in 
    milliseconds to sleep before checking initialization status. */
    -247public static final String 
    TIMEOUT_KEY = "hbase.master.initializationmonitor.timeout";
    

    [21/51] [partial] hbase-site git commit: Published site at 7d3750bd9fc9747623549c242cc4171e224b3eaf.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
    index fe1e077..90c31f4 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
    @@ -1072,894 +1072,913 @@
     1064
     1065  protected boolean 
    waitServerReportEvent(final ServerName serverName, final Procedure proc) {
     1066final ServerStateNode serverNode = 
    regionStates.getOrCreateServer(serverName);
    -1067return 
    serverNode.getReportEvent().suspendIfNotReady(proc);
    -1068  }
    -1069
    -1070  protected void 
    wakeServerReportEvent(final ServerStateNode serverNode) {
    -1071
    serverNode.getReportEvent().wake(getProcedureScheduler());
    -1072  }
    -1073
    -1074  // 
    
    -1075  //  RIT chore
    -1076  // 
    
    -1077  private static class 
    RegionInTransitionChore extends 
    ProcedureInMemoryChoreMasterProcedureEnv {
    -1078public RegionInTransitionChore(final 
    int timeoutMsec) {
    -1079  super(timeoutMsec);
    -1080}
    -1081
    -1082@Override
    -1083protected void periodicExecute(final 
    MasterProcedureEnv env) {
    -1084  final AssignmentManager am = 
    env.getAssignmentManager();
    -1085
    -1086  final RegionInTransitionStat 
    ritStat = am.computeRegionInTransitionStat();
    -1087  if 
    (ritStat.hasRegionsOverThreshold()) {
    -1088for (RegionState hri: 
    ritStat.getRegionOverThreshold()) {
    -1089  
    am.handleRegionOverStuckWarningThreshold(hri.getRegion());
    -1090}
    -1091  }
    -1092
    -1093  // update metrics
    -1094  
    am.updateRegionsInTransitionMetrics(ritStat);
    -1095}
    -1096  }
    -1097
    -1098  public RegionInTransitionStat 
    computeRegionInTransitionStat() {
    -1099final RegionInTransitionStat rit = 
    new RegionInTransitionStat(getConfiguration());
    -1100rit.update(this);
    -1101return rit;
    -1102  }
    -1103
    -1104  public static class 
    RegionInTransitionStat {
    -1105private final int ritThreshold;
    +1067if (serverNode == null) {
    +1068  LOG.warn("serverName=null; {}", 
    proc);
    +1069}
    +1070return 
    serverNode.getReportEvent().suspendIfNotReady(proc);
    +1071  }
    +1072
    +1073  protected void 
    wakeServerReportEvent(final ServerStateNode serverNode) {
    +1074
    serverNode.getReportEvent().wake(getProcedureScheduler());
    +1075  }
    +1076
    +1077  // 
    
    +1078  //  RIT chore
    +1079  // 
    
    +1080  private static class 
    RegionInTransitionChore extends 
    ProcedureInMemoryChoreMasterProcedureEnv {
    +1081public RegionInTransitionChore(final 
    int timeoutMsec) {
    +1082  super(timeoutMsec);
    +1083}
    +1084
    +1085@Override
    +1086protected void periodicExecute(final 
    MasterProcedureEnv env) {
    +1087  final AssignmentManager am = 
    env.getAssignmentManager();
    +1088
    +1089  final RegionInTransitionStat 
    ritStat = am.computeRegionInTransitionStat();
    +1090  if 
    (ritStat.hasRegionsOverThreshold()) {
    +1091for (RegionState hri: 
    ritStat.getRegionOverThreshold()) {
    +1092  
    am.handleRegionOverStuckWarningThreshold(hri.getRegion());
    +1093}
    +1094  }
    +1095
    +1096  // update metrics
    +1097  
    am.updateRegionsInTransitionMetrics(ritStat);
    +1098}
    +1099  }
    +1100
    +1101  public RegionInTransitionStat 
    computeRegionInTransitionStat() {
    +1102final RegionInTransitionStat rit = 
    new RegionInTransitionStat(getConfiguration());
    +1103rit.update(this);
    +1104return rit;
    +1105  }
     1106
    -1107private HashMapString, 
    RegionState ritsOverThreshold = null;
    -1108private long statTimestamp;
    -1109private long oldestRITTime = 0;
    -1110private int totalRITsTwiceThreshold 
    = 0;
    -private int totalRITs = 0;
    -1112
    -1113@VisibleForTesting
    -1114public RegionInTransitionStat(final 
    Configuration conf) {
    -1115  this.ritThreshold =
    -1116
    conf.getInt(METRICS_RIT_STUCK_WARNING_THRESHOLD, 
    DEFAULT_RIT_STUCK_WARNING_THRESHOLD);
    -1117}
    -1118
    -1119public int getRITThreshold() {
    -1120  return ritThreshold;
    -1121}
    -1122
    -1123public long getTimestamp() {
    -1124  return statTimestamp;
    -1125}
    -1126
    -1127public int getTotalRITs() {
    -1128  return totalRITs;
    -1129}
    -1130
    -1131public long getOldestRITTime() {
    -1132  return oldestRITTime;
    -1133}
    -1134
    -1135public int 
    getTotalRITsOverThreshold() {
    -1136  

    [21/51] [partial] hbase-site git commit: Published site at 997747076d8ec0b4346d7cb99c4b0667a7c14905.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4df09ed9/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
    index 3da432b..d30fa8f 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
    @@ -928,7690 +928,7698 @@
     920  CollectionHStore stores = 
    this.stores.values();
     921  try {
     922// update the stores that we are 
    replaying
    -923
    stores.forEach(HStore::startReplayingFromWAL);
    -924// Recover any edits if 
    available.
    -925maxSeqId = Math.max(maxSeqId,
    -926  
    replayRecoveredEditsIfAny(this.fs.getRegionDir(), maxSeqIdInStores, reporter, 
    status));
    -927// Make sure mvcc is up to max.
    -928this.mvcc.advanceTo(maxSeqId);
    -929  } finally {
    -930// update the stores that we are 
    done replaying
    -931
    stores.forEach(HStore::stopReplayingFromWAL);
    -932  }
    -933}
    -934this.lastReplayedOpenRegionSeqId = 
    maxSeqId;
    -935
    -936
    this.writestate.setReadOnly(ServerRegionReplicaUtil.isReadOnly(this));
    -937this.writestate.flushRequested = 
    false;
    -938this.writestate.compacting.set(0);
    -939
    -940if (this.writestate.writesEnabled) 
    {
    -941  // Remove temporary data left over 
    from old regions
    -942  status.setStatus("Cleaning up 
    temporary data from old regions");
    -943  fs.cleanupTempDir();
    -944}
    -945
    -946if (this.writestate.writesEnabled) 
    {
    -947  status.setStatus("Cleaning up 
    detritus from prior splits");
    -948  // Get rid of any splits or merges 
    that were lost in-progress.  Clean out
    -949  // these directories here on open.  
    We may be opening a region that was
    -950  // being split but we crashed in 
    the middle of it all.
    -951  fs.cleanupAnySplitDetritus();
    -952  fs.cleanupMergesDir();
    -953}
    -954
    -955// Initialize split policy
    -956this.splitPolicy = 
    RegionSplitPolicy.create(this, conf);
    -957
    -958// Initialize flush policy
    -959this.flushPolicy = 
    FlushPolicyFactory.create(this, conf);
    -960
    -961long lastFlushTime = 
    EnvironmentEdgeManager.currentTime();
    -962for (HStore store: stores.values()) 
    {
    -963  
    this.lastStoreFlushTimeMap.put(store, lastFlushTime);
    -964}
    -965
    -966// Use maximum of log sequenceid or 
    that which was found in stores
    -967// (particularly if no recovered 
    edits, seqid will be -1).
    -968long maxSeqIdFromFile =
    -969  
    WALSplitter.getMaxRegionSequenceId(fs.getFileSystem(), fs.getRegionDir());
    -970long nextSeqId = Math.max(maxSeqId, 
    maxSeqIdFromFile) + 1;
    -971if (writestate.writesEnabled) {
    -972  
    WALSplitter.writeRegionSequenceIdFile(fs.getFileSystem(), fs.getRegionDir(), 
    nextSeqId - 1);
    -973}
    -974
    -975LOG.info("Opened {}; next 
    sequenceid={}", this.getRegionInfo().getShortNameToLog(), nextSeqId);
    -976
    -977// A region can be reopened if failed 
    a split; reset flags
    -978this.closing.set(false);
    -979this.closed.set(false);
    -980
    -981if (coprocessorHost != null) {
    -982  status.setStatus("Running 
    coprocessor post-open hooks");
    -983  coprocessorHost.postOpen();
    -984}
    +923LOG.debug("replaying wal for " + 
    this.getRegionInfo().getEncodedName());
    +924
    stores.forEach(HStore::startReplayingFromWAL);
    +925// Recover any edits if 
    available.
    +926maxSeqId = Math.max(maxSeqId,
    +927  
    replayRecoveredEditsIfAny(this.fs.getRegionDir(), maxSeqIdInStores, reporter, 
    status));
    +928// Make sure mvcc is up to max.
    +929this.mvcc.advanceTo(maxSeqId);
    +930  } finally {
    +931LOG.debug("stopping wal replay 
    for " + this.getRegionInfo().getEncodedName());
    +932// update the stores that we are 
    done replaying
    +933
    stores.forEach(HStore::stopReplayingFromWAL);
    +934  }
    +935}
    +936this.lastReplayedOpenRegionSeqId = 
    maxSeqId;
    +937
    +938
    this.writestate.setReadOnly(ServerRegionReplicaUtil.isReadOnly(this));
    +939this.writestate.flushRequested = 
    false;
    +940this.writestate.compacting.set(0);
    +941
    +942if (this.writestate.writesEnabled) 
    {
    +943  LOG.debug("Cleaning up temporary 
    data for " + this.getRegionInfo().getEncodedName());
    +944  // Remove temporary data left over 
    from old regions
    +945  status.setStatus("Cleaning up 
    temporary data from old regions");
    +946  fs.cleanupTempDir();
    +947}
    +948
    +949if (this.writestate.writesEnabled) 
    {
    +950  status.setStatus("Cleaning up 
    detritus from prior splits");
    +951  // Get rid of any splits or merges 
    that were lost in-progress.  Clean out
    +952  // these directories here on 

    [21/51] [partial] hbase-site git commit: Published site at f3d1c021de2264301f68eadb9ef126ff83d7ef53.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/883dde2f/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreLABImpl.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreLABImpl.html 
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreLABImpl.html
    index 7290d52..f71c930 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreLABImpl.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreLABImpl.html
    @@ -128,244 +128,245 @@
     120   */
     121  @Override
     122  public Cell forceCopyOfBigCellInto(Cell 
    cell) {
    -123int size = KeyValueUtil.length(cell) 
    + ChunkCreator.SIZEOF_CHUNK_HEADER;
    -124Preconditions.checkArgument(size 
    = 0, "negative size");
    -125if (size = dataChunkSize) {
    -126  // Using copyCellInto for cells 
    which are bigger than the original maxAlloc
    -127  Cell newCell = copyCellInto(cell, 
    dataChunkSize);
    -128  return newCell;
    -129} else {
    -130  Chunk c = 
    getNewExternalChunk(size);
    -131  int allocOffset = c.alloc(size);
    -132  return copyToChunkCell(cell, 
    c.getData(), allocOffset, size);
    -133}
    -134  }
    -135
    -136  private Cell copyCellInto(Cell cell, 
    int maxAlloc) {
    -137int size = 
    KeyValueUtil.length(cell);
    -138Preconditions.checkArgument(size 
    = 0, "negative size");
    -139// Callers should satisfy large 
    allocations directly from JVM since they
    -140// don't cause fragmentation as 
    badly.
    -141if (size  maxAlloc) {
    -142  return null;
    -143}
    -144Chunk c = null;
    -145int allocOffset = 0;
    -146while (true) {
    -147  // Try to get the chunk
    -148  c = getOrMakeChunk();
    -149  // we may get null because the some 
    other thread succeeded in getting the lock
    -150  // and so the current thread has to 
    try again to make its chunk or grab the chunk
    -151  // that the other thread created
    -152  // Try to allocate from this 
    chunk
    -153  if (c != null) {
    -154allocOffset = c.alloc(size);
    -155if (allocOffset != -1) {
    -156  // We succeeded - this is the 
    common case - small alloc
    -157  // from a big buffer
    -158  break;
    -159}
    -160// not enough space!
    -161// try to retire this chunk
    -162tryRetireChunk(c);
    -163  }
    -164}
    -165return copyToChunkCell(cell, 
    c.getData(), allocOffset, size);
    -166  }
    -167
    -168  /**
    -169   * Clone the passed cell by copying its 
    data into the passed buf and create a cell with a chunkid
    -170   * out of it
    -171   */
    -172  private Cell copyToChunkCell(Cell cell, 
    ByteBuffer buf, int offset, int len) {
    -173int tagsLen = cell.getTagsLength();
    -174if (cell instanceof ExtendedCell) {
    -175  ((ExtendedCell) cell).write(buf, 
    offset);
    -176} else {
    -177  // Normally all Cell impls within 
    Server will be of type ExtendedCell. Just considering the
    -178  // other case also. The data 
    fragments within Cell is copied into buf as in KeyValue
    -179  // serialization format only.
    -180  KeyValueUtil.appendTo(cell, buf, 
    offset, true);
    -181}
    -182// TODO : write the seqid here. For 
    writing seqId we should create a new cell type so
    -183// that seqId is not used as the 
    state
    -184if (tagsLen == 0) {
    -185  // When tagsLen is 0, make a 
    NoTagsByteBufferKeyValue version. This is an optimized class
    -186  // which directly return tagsLen as 
    0. So we avoid parsing many length components in
    -187  // reading the tagLength stored in 
    the backing buffer. The Memstore addition of every Cell
    -188  // call getTagsLength().
    -189  return new 
    NoTagByteBufferChunkKeyValue(buf, offset, len, cell.getSequenceId());
    -190} else {
    -191  return new 
    ByteBufferChunkKeyValue(buf, offset, len, cell.getSequenceId());
    -192}
    -193  }
    -194
    -195  /**
    -196   * Close this instance since it won't 
    be used any more, try to put the chunks
    -197   * back to pool
    -198   */
    -199  @Override
    -200  public void close() {
    -201this.closed = true;
    -202// We could put back the chunks to 
    pool for reusing only when there is no
    -203// opening scanner which will read 
    their data
    -204int count  = 
    openScannerCount.get();
    -205if(count == 0) {
    -206  recycleChunks();
    -207}
    -208  }
    -209
    -210  /**
    -211   * Called when opening a scanner on the 
    data of this MemStoreLAB
    -212   */
    -213  @Override
    -214  public void incScannerCount() {
    -215
    this.openScannerCount.incrementAndGet();
    -216  }
    -217
    -218  /**
    -219   * Called when closing a scanner on the 
    data of this MemStoreLAB
    -220   */
    -221  @Override
    -222  public void decScannerCount() {
    -223int count = 
    this.openScannerCount.decrementAndGet();
    -224if (this.closed  count == 
    0) {
    -225  recycleChunks();
    -226}
    -227  }
    -228
    -229  private void recycleChunks() {
    -230if (reclaimed.compareAndSet(false, 
    true)) {
    -231  
    

    [21/51] [partial] hbase-site git commit: Published site at cf529f18a9959589fa635f78df4840472526ea2c.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncTest.html
    --
    diff --git 
    a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncTest.html 
    b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncTest.html
    index 4639252..f54d04b 100644
    --- 
    a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncTest.html
    +++ 
    b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncTest.html
    @@ -122,7 +122,7 @@ var activeTableTab = "activeTableTab";
     
     
     
    -abstract static class PerformanceEvaluation.AsyncTest
    +abstract static class PerformanceEvaluation.AsyncTest
     extends PerformanceEvaluation.TestBase
     
     
    @@ -230,7 +230,7 @@ extends 
     
     connection
    -protectedorg.apache.hadoop.hbase.client.AsyncConnection connection
    +protectedorg.apache.hadoop.hbase.client.AsyncConnection connection
     
     
     
    @@ -247,7 +247,7 @@ extends 
     
     AsyncTest
    -AsyncTest(org.apache.hadoop.hbase.client.AsyncConnectioncon,
    +AsyncTest(org.apache.hadoop.hbase.client.AsyncConnectioncon,
       PerformanceEvaluation.TestOptionsoptions,
       PerformanceEvaluation.Statusstatus)
     
    @@ -266,7 +266,7 @@ extends 
     
     createConnection
    -voidcreateConnection()
    +voidcreateConnection()
     
     Specified by:
     createConnectionin
     classPerformanceEvaluation.TestBase
    @@ -279,7 +279,7 @@ extends 
     
     closeConnection
    -voidcloseConnection()
    +voidcloseConnection()
       throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException
     
     Specified by:
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.BufferedMutatorTest.html
    --
    diff --git 
    a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.BufferedMutatorTest.html
     
    b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.BufferedMutatorTest.html
    index a75ee34..ee238e0 100644
    --- 
    a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.BufferedMutatorTest.html
    +++ 
    b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.BufferedMutatorTest.html
    @@ -127,7 +127,7 @@ var activeTableTab = "activeTableTab";
     
     
     
    -abstract static class PerformanceEvaluation.BufferedMutatorTest
    +abstract static class PerformanceEvaluation.BufferedMutatorTest
     extends PerformanceEvaluation.Test
     
     
    @@ -253,7 +253,7 @@ extends 
     
     mutator
    -protectedorg.apache.hadoop.hbase.client.BufferedMutator mutator
    +protectedorg.apache.hadoop.hbase.client.BufferedMutator mutator
     
     
     
    @@ -262,7 +262,7 @@ extends 
     
     table
    -protectedorg.apache.hadoop.hbase.client.Table table
    +protectedorg.apache.hadoop.hbase.client.Table table
     
     
     
    @@ -279,7 +279,7 @@ extends 
     
     BufferedMutatorTest
    -BufferedMutatorTest(org.apache.hadoop.hbase.client.Connectioncon,
    +BufferedMutatorTest(org.apache.hadoop.hbase.client.Connectioncon,
     PerformanceEvaluation.TestOptionsoptions,
     PerformanceEvaluation.Statusstatus)
     
    @@ -298,7 +298,7 @@ extends 
     
     onStartup
    -voidonStartup()
    +voidonStartup()
     throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException
     
     Specified by:
    @@ -314,7 +314,7 @@ extends 
     
     onTakedown
    -voidonTakedown()
    +voidonTakedown()
      throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException
     
     Specified by:
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.CASTableTest.html
    --
    diff --git 
    a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.CASTableTest.html
     
    b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.CASTableTest.html
    index 217e0da..ae27398 100644
    --- 
    a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.CASTableTest.html
    +++ 
    b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.CASTableTest.html
    @@ -132,7 +132,7 @@ var activeTableTab = "activeTableTab";
     
     
     
    -abstract static class PerformanceEvaluation.CASTableTest
    +abstract static class PerformanceEvaluation.CASTableTest
     extends PerformanceEvaluation.TableTest
     Base class for operations that are CAS-like; that read a 
    value and then set it based off what
      they read. In this category is increment, append, checkAndPut, etc.
    @@ -278,7 +278,7 @@ extends 
     
     qualifier
    -private finalbyte[] qualifier
    +private finalbyte[] qualifier
     
     
     
    @@ -295,7 +295,7 @@ extends 
     
     CASTableTest
    -CASTableTest(org.apache.hadoop.hbase.client.Connectioncon,
    +CASTableTest(org.apache.hadoop.hbase.client.Connectioncon,
      PerformanceEvaluation.TestOptionsoptions,
    

    [21/51] [partial] hbase-site git commit: Published site at 021f66d11d2cbb7308308093e29e69d6e7661ee9.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/92a26cfb/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatchOperation.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatchOperation.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatchOperation.html
    index 4a879bb..7d27402 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatchOperation.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatchOperation.html
    @@ -300,7 +300,7 @@
     292  private MapString, 
    com.google.protobuf.Service coprocessorServiceHandlers = 
    Maps.newHashMap();
     293
     294  // Track data size in all memstores
    -295  private final MemStoreSizing 
    memStoreSize = new MemStoreSizing();
    +295  private final MemStoreSizing 
    memStoreSizing = new ThreadSafeMemStoreSizing();
     296  private final RegionServicesForStores 
    regionServicesForStores = new RegionServicesForStores(this);
     297
     298  // Debug possible data loss due to WAL 
    off
    @@ -1218,7389 +1218,7399 @@
     1210   * Increase the size of mem store in 
    this region and the size of global mem
     1211   * store
     1212   */
    -1213  public void 
    incMemStoreSize(MemStoreSize memStoreSize) {
    -1214if (this.rsAccounting != null) {
    -1215  
    rsAccounting.incGlobalMemStoreSize(memStoreSize);
    -1216}
    -1217long dataSize;
    -1218synchronized (this.memStoreSize) {
    -1219  
    this.memStoreSize.incMemStoreSize(memStoreSize);
    -1220  dataSize = 
    this.memStoreSize.getDataSize();
    -1221}
    -1222
    checkNegativeMemStoreDataSize(dataSize, memStoreSize.getDataSize());
    -1223  }
    -1224
    -1225  public void 
    decrMemStoreSize(MemStoreSize memStoreSize) {
    -1226if (this.rsAccounting != null) {
    -1227  
    rsAccounting.decGlobalMemStoreSize(memStoreSize);
    -1228}
    -1229long size;
    -1230synchronized (this.memStoreSize) {
    -1231  
    this.memStoreSize.decMemStoreSize(memStoreSize);
    -1232  size = 
    this.memStoreSize.getDataSize();
    +1213  void incMemStoreSize(MemStoreSize mss) 
    {
    +1214incMemStoreSize(mss.getDataSize(), 
    mss.getHeapSize(), mss.getOffHeapSize());
    +1215  }
    +1216
    +1217  void incMemStoreSize(long 
    dataSizeDelta, long heapSizeDelta, long offHeapSizeDelta) {
    +1218if (this.rsAccounting != null) {
    +1219  
    rsAccounting.incGlobalMemStoreSize(dataSizeDelta, heapSizeDelta, 
    offHeapSizeDelta);
    +1220}
    +1221long dataSize =
    +1222
    this.memStoreSizing.incMemStoreSize(dataSizeDelta, heapSizeDelta, 
    offHeapSizeDelta);
    +1223
    checkNegativeMemStoreDataSize(dataSize, dataSizeDelta);
    +1224  }
    +1225
    +1226  void decrMemStoreSize(MemStoreSize 
    mss) {
    +1227decrMemStoreSize(mss.getDataSize(), 
    mss.getHeapSize(), mss.getOffHeapSize());
    +1228  }
    +1229
    +1230  void decrMemStoreSize(long 
    dataSizeDelta, long heapSizeDelta, long offHeapSizeDelta) {
    +1231if (this.rsAccounting != null) {
    +1232  
    rsAccounting.decGlobalMemStoreSize(dataSizeDelta, heapSizeDelta, 
    offHeapSizeDelta);
     1233}
    -1234checkNegativeMemStoreDataSize(size, 
    -memStoreSize.getDataSize());
    -1235  }
    -1236
    -1237  private void 
    checkNegativeMemStoreDataSize(long memStoreDataSize, long delta) {
    -1238// This is extremely bad if we make 
    memStoreSize negative. Log as much info on the offending
    -1239// caller as possible. (memStoreSize 
    might be a negative value already -- freeing memory)
    -1240if (memStoreDataSize  0) {
    -1241  LOG.error("Asked to modify this 
    region's (" + this.toString()
    -1242  + ") memStoreSize to a 
    negative value which is incorrect. Current memStoreSize="
    -1243  + (memStoreDataSize - delta) + 
    ", delta=" + delta, new Exception());
    -1244}
    -1245  }
    -1246
    -1247  @Override
    -1248  public RegionInfo getRegionInfo() {
    -1249return this.fs.getRegionInfo();
    -1250  }
    -1251
    -1252  /**
    -1253   * @return Instance of {@link 
    RegionServerServices} used by this HRegion.
    -1254   * Can be null.
    -1255   */
    -1256  RegionServerServices 
    getRegionServerServices() {
    -1257return this.rsServices;
    -1258  }
    -1259
    -1260  @Override
    -1261  public long getReadRequestsCount() {
    -1262return readRequestsCount.sum();
    -1263  }
    -1264
    -1265  @Override
    -1266  public long 
    getFilteredReadRequestsCount() {
    -1267return 
    filteredReadRequestsCount.sum();
    -1268  }
    -1269
    -1270  @Override
    -1271  public long getWriteRequestsCount() 
    {
    -1272return writeRequestsCount.sum();
    -1273  }
    -1274
    -1275  @Override
    -1276  public long getMemStoreDataSize() {
    -1277return memStoreSize.getDataSize();
    -1278  }
    -1279
    -1280  @Override
    -1281  public long getMemStoreHeapSize() {
    -1282return memStoreSize.getHeapSize();
    -1283  }
    -1284
    -1285  @Override
    -1286  public long getMemStoreOffHeapSize() 
    {
    -1287return 
    memStoreSize.getOffHeapSize();
    -1288  }
    -1289
    -1290  /** @return store services for this 
    region, to access services required by store level 

    [21/51] [partial] hbase-site git commit: Published site at acd0d1e446c164d9c54bfb461b2d449c8d717c07.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f2065178/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndMutateTest.html
    --
    diff --git 
    a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndMutateTest.html
     
    b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndMutateTest.html
    index 2510283..418c60c 100644
    --- 
    a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndMutateTest.html
    +++ 
    b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndMutateTest.html
    @@ -77,77 +77,77 @@
     069import 
    org.apache.hadoop.hbase.client.RowMutations;
     070import 
    org.apache.hadoop.hbase.client.Scan;
     071import 
    org.apache.hadoop.hbase.client.Table;
    -072import 
    org.apache.hadoop.hbase.filter.BinaryComparator;
    -073import 
    org.apache.hadoop.hbase.filter.Filter;
    -074import 
    org.apache.hadoop.hbase.filter.FilterAllFilter;
    -075import 
    org.apache.hadoop.hbase.filter.FilterList;
    -076import 
    org.apache.hadoop.hbase.filter.PageFilter;
    -077import 
    org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
    -078import 
    org.apache.hadoop.hbase.filter.WhileMatchFilter;
    -079import 
    org.apache.hadoop.hbase.io.compress.Compression;
    -080import 
    org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
    -081import 
    org.apache.hadoop.hbase.io.hfile.RandomDistribution;
    -082import 
    org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
    -083import 
    org.apache.hadoop.hbase.regionserver.BloomType;
    -084import 
    org.apache.hadoop.hbase.regionserver.CompactingMemStore;
    -085import 
    org.apache.hadoop.hbase.trace.HBaseHTraceConfiguration;
    -086import 
    org.apache.hadoop.hbase.trace.SpanReceiverHost;
    -087import 
    org.apache.hadoop.hbase.trace.TraceUtil;
    -088import 
    org.apache.hadoop.hbase.util.ByteArrayHashKey;
    -089import 
    org.apache.hadoop.hbase.util.Bytes;
    -090import 
    org.apache.hadoop.hbase.util.Hash;
    -091import 
    org.apache.hadoop.hbase.util.MurmurHash;
    -092import 
    org.apache.hadoop.hbase.util.Pair;
    -093import 
    org.apache.hadoop.hbase.util.YammerHistogramUtils;
    -094import 
    org.apache.hadoop.io.LongWritable;
    -095import org.apache.hadoop.io.Text;
    -096import org.apache.hadoop.mapreduce.Job;
    -097import 
    org.apache.hadoop.mapreduce.Mapper;
    -098import 
    org.apache.hadoop.mapreduce.lib.input.NLineInputFormat;
    -099import 
    org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
    -100import 
    org.apache.hadoop.mapreduce.lib.reduce.LongSumReducer;
    -101import org.apache.hadoop.util.Tool;
    -102import 
    org.apache.hadoop.util.ToolRunner;
    -103import 
    org.apache.htrace.core.ProbabilitySampler;
    -104import org.apache.htrace.core.Sampler;
    -105import 
    org.apache.htrace.core.TraceScope;
    -106import 
    org.apache.yetus.audience.InterfaceAudience;
    -107import org.slf4j.Logger;
    -108import org.slf4j.LoggerFactory;
    -109import 
    org.apache.hbase.thirdparty.com.google.common.base.MoreObjects;
    -110import 
    org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
    -111
    -112/**
    -113 * Script used evaluating HBase 
    performance and scalability.  Runs a HBase
    -114 * client that steps through one of a set 
    of hardcoded tests or 'experiments'
    -115 * (e.g. a random reads test, a random 
    writes test, etc.). Pass on the
    -116 * command-line which test to run and how 
    many clients are participating in
    -117 * this experiment. Run {@code 
    PerformanceEvaluation --help} to obtain usage.
    -118 *
    -119 * pThis class sets up and runs 
    the evaluation programs described in
    -120 * Section 7, iPerformance 
    Evaluation/i, of the a
    -121 * 
    href="http://labs.google.com/papers/bigtable.html"Bigtable/a;
    -122 * paper, pages 8-10.
    -123 *
    -124 * pBy default, runs as a 
    mapreduce job where each mapper runs a single test
    -125 * client. Can also run as a 
    non-mapreduce, multithreaded application by
    -126 * specifying {@code --nomapred}. Each 
    client does about 1GB of data, unless
    -127 * specified otherwise.
    -128 */
    -129@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
    -130public class PerformanceEvaluation 
    extends Configured implements Tool {
    -131  static final String RANDOM_SEEK_SCAN = 
    "randomSeekScan";
    -132  static final String RANDOM_READ = 
    "randomRead";
    -133  private static final Logger LOG = 
    LoggerFactory.getLogger(PerformanceEvaluation.class.getName());
    -134  private static final ObjectMapper 
    MAPPER = new ObjectMapper();
    -135  static {
    -136
    MAPPER.configure(MapperFeature.SORT_PROPERTIES_ALPHABETICALLY, true);
    -137  }
    -138
    -139  public static final String TABLE_NAME = 
    "TestTable";
    -140  public static final byte[] FAMILY_NAME 
    = Bytes.toBytes("info");
    -141  public static final byte [] COLUMN_ZERO 
    = Bytes.toBytes("" + 0);
    -142  public static final byte [] 
    QUALIFIER_NAME = COLUMN_ZERO;
    +072import 
    org.apache.hadoop.hbase.client.metrics.ScanMetrics;
    +073import 
    org.apache.hadoop.hbase.filter.BinaryComparator;
    +074import 
    org.apache.hadoop.hbase.filter.Filter;
    +075import 
    

    [21/51] [partial] hbase-site git commit: Published site at 87f5b5f3411d96c31b4cb61b9a57ced22be91d1f.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/de18d468/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html
    index e1bc325..63e7421 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html
    @@ -66,5125 +66,5224 @@
     058import 
    java.util.concurrent.TimeoutException;
     059import 
    java.util.concurrent.atomic.AtomicBoolean;
     060import 
    java.util.concurrent.atomic.AtomicInteger;
    -061import org.apache.commons.io.IOUtils;
    -062import 
    org.apache.commons.lang3.RandomStringUtils;
    -063import 
    org.apache.commons.lang3.StringUtils;
    -064import 
    org.apache.hadoop.conf.Configuration;
    -065import 
    org.apache.hadoop.conf.Configured;
    -066import 
    org.apache.hadoop.fs.FSDataOutputStream;
    -067import org.apache.hadoop.fs.FileStatus;
    -068import org.apache.hadoop.fs.FileSystem;
    -069import org.apache.hadoop.fs.Path;
    -070import 
    org.apache.hadoop.fs.permission.FsAction;
    -071import 
    org.apache.hadoop.fs.permission.FsPermission;
    -072import 
    org.apache.hadoop.hbase.Abortable;
    -073import org.apache.hadoop.hbase.Cell;
    -074import 
    org.apache.hadoop.hbase.CellUtil;
    -075import 
    org.apache.hadoop.hbase.ClusterMetrics;
    -076import 
    org.apache.hadoop.hbase.ClusterMetrics.Option;
    -077import 
    org.apache.hadoop.hbase.HBaseConfiguration;
    -078import 
    org.apache.hadoop.hbase.HBaseInterfaceAudience;
    -079import 
    org.apache.hadoop.hbase.HConstants;
    -080import 
    org.apache.hadoop.hbase.HRegionInfo;
    -081import 
    org.apache.hadoop.hbase.HRegionLocation;
    -082import 
    org.apache.hadoop.hbase.KeyValue;
    -083import 
    org.apache.hadoop.hbase.MasterNotRunningException;
    -084import 
    org.apache.hadoop.hbase.MetaTableAccessor;
    -085import 
    org.apache.hadoop.hbase.RegionLocations;
    -086import 
    org.apache.hadoop.hbase.ServerName;
    -087import 
    org.apache.hadoop.hbase.TableName;
    -088import 
    org.apache.hadoop.hbase.ZooKeeperConnectionException;
    -089import 
    org.apache.hadoop.hbase.client.Admin;
    -090import 
    org.apache.hadoop.hbase.client.ClusterConnection;
    -091import 
    org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
    -092import 
    org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
    -093import 
    org.apache.hadoop.hbase.client.Connection;
    -094import 
    org.apache.hadoop.hbase.client.ConnectionFactory;
    -095import 
    org.apache.hadoop.hbase.client.Delete;
    -096import 
    org.apache.hadoop.hbase.client.Get;
    -097import 
    org.apache.hadoop.hbase.client.Put;
    -098import 
    org.apache.hadoop.hbase.client.RegionInfo;
    -099import 
    org.apache.hadoop.hbase.client.RegionInfoBuilder;
    -100import 
    org.apache.hadoop.hbase.client.RegionReplicaUtil;
    -101import 
    org.apache.hadoop.hbase.client.Result;
    -102import 
    org.apache.hadoop.hbase.client.RowMutations;
    -103import 
    org.apache.hadoop.hbase.client.Table;
    -104import 
    org.apache.hadoop.hbase.client.TableDescriptor;
    -105import 
    org.apache.hadoop.hbase.client.TableDescriptorBuilder;
    -106import 
    org.apache.hadoop.hbase.client.TableState;
    -107import 
    org.apache.hadoop.hbase.io.FileLink;
    -108import 
    org.apache.hadoop.hbase.io.HFileLink;
    -109import 
    org.apache.hadoop.hbase.io.hfile.CacheConfig;
    -110import 
    org.apache.hadoop.hbase.io.hfile.HFile;
    -111import 
    org.apache.hadoop.hbase.log.HBaseMarkers;
    -112import 
    org.apache.hadoop.hbase.master.MasterFileSystem;
    -113import 
    org.apache.hadoop.hbase.master.RegionState;
    -114import 
    org.apache.hadoop.hbase.regionserver.HRegion;
    -115import 
    org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
    -116import 
    org.apache.hadoop.hbase.regionserver.StoreFileInfo;
    -117import 
    org.apache.hadoop.hbase.replication.ReplicationException;
    -118import 
    org.apache.hadoop.hbase.security.AccessDeniedException;
    -119import 
    org.apache.hadoop.hbase.security.UserProvider;
    -120import 
    org.apache.hadoop.hbase.util.Bytes.ByteArrayComparator;
    -121import 
    org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter.ERROR_CODE;
    -122import 
    org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker;
    -123import 
    org.apache.hadoop.hbase.util.hbck.ReplicationChecker;
    -124import 
    org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandler;
    -125import 
    org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandlerImpl;
    -126import org.apache.hadoop.hbase.wal.WAL;
    -127import 
    org.apache.hadoop.hbase.wal.WALFactory;
    -128import 
    org.apache.hadoop.hbase.wal.WALSplitter;
    -129import 
    org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
    -130import 
    org.apache.hadoop.hbase.zookeeper.ZKUtil;
    -131import 
    org.apache.hadoop.hbase.zookeeper.ZKWatcher;
    -132import 
    org.apache.hadoop.hbase.zookeeper.ZNodePaths;
    -133import 
    org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
    -134import 
    

    [21/51] [partial] hbase-site git commit: Published site at 2912c953551bedbfbf30c32c156ed7bb187d54c3.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/KeyValueHeap.KVScannerComparator.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/KeyValueHeap.KVScannerComparator.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/KeyValueHeap.KVScannerComparator.html
    index 3cafa7c..05ee7d2 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/KeyValueHeap.KVScannerComparator.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/KeyValueHeap.KVScannerComparator.html
    @@ -232,8 +232,8 @@
     224  this.current.close();
     225}
     226if (this.heap != null) {
    -227  KeyValueScanner scanner;
    -228  while ((scanner = this.heap.poll()) 
    != null) {
    +227  // Order of closing the scanners 
    shouldn't matter here, so simply iterate and close them.
    +228  for (KeyValueScanner scanner : 
    heap) {
     229scanner.close();
     230  }
     231}
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/KeyValueHeap.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/KeyValueHeap.html 
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/KeyValueHeap.html
    index 3cafa7c..05ee7d2 100644
    --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/KeyValueHeap.html
    +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/KeyValueHeap.html
    @@ -232,8 +232,8 @@
     224  this.current.close();
     225}
     226if (this.heap != null) {
    -227  KeyValueScanner scanner;
    -228  while ((scanner = this.heap.poll()) 
    != null) {
    +227  // Order of closing the scanners 
    shouldn't matter here, so simply iterate and close them.
    +228  for (KeyValueScanner scanner : 
    heap) {
     229scanner.close();
     230  }
     231}
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ReversedRegionScannerImpl.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ReversedRegionScannerImpl.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ReversedRegionScannerImpl.html
    index 47c27f1..d7d35b7 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ReversedRegionScannerImpl.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ReversedRegionScannerImpl.html
    @@ -30,13 +30,13 @@
     022import java.util.List;
     023
     024import org.apache.hadoop.hbase.Cell;
    -025import 
    org.apache.hadoop.hbase.CellUtil;
    +025import 
    org.apache.hadoop.hbase.DoNotRetryIOException;
     026import 
    org.apache.hadoop.hbase.HConstants;
     027import 
    org.apache.hadoop.hbase.PrivateCellUtil;
    -028import 
    org.apache.yetus.audience.InterfaceAudience;
    -029import 
    org.apache.hadoop.hbase.client.Scan;
    -030import 
    org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl;
    -031import 
    org.apache.hadoop.hbase.util.Bytes;
    +028import 
    org.apache.hadoop.hbase.client.Scan;
    +029import 
    org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl;
    +030import 
    org.apache.hadoop.hbase.util.Bytes;
    +031import 
    org.apache.yetus.audience.InterfaceAudience;
     032
     033/**
     034 * ReversibleRegionScannerImpl extends 
    from RegionScannerImpl, and is used to
    @@ -61,37 +61,36 @@
     053  ListKeyValueScanner 
    joinedScanners, HRegion region) throws IOException {
     054this.storeHeap = new 
    ReversedKeyValueHeap(scanners, comparator);
     055if (!joinedScanners.isEmpty()) {
    -056  this.joinedHeap = new 
    ReversedKeyValueHeap(joinedScanners,
    -057  comparator);
    -058}
    -059  }
    -060
    -061  @Override
    -062  protected boolean shouldStop(Cell 
    currentRowCell) {
    -063if (currentRowCell == null) {
    -064  return true;
    -065}
    -066if (stopRow == null || 
    Bytes.equals(stopRow, HConstants.EMPTY_START_ROW)) {
    -067  return false;
    -068}
    -069int c = 
    comparator.compareRows(currentRowCell, stopRow, 0, stopRow.length);
    -070return c  0 || (c == 0  
    !includeStopRow);
    -071  }
    -072
    -073  @Override
    -074  protected boolean 
    nextRow(ScannerContext scannerContext, Cell curRowCell)
    -075  throws IOException {
    -076assert super.joinedContinuationRow == 
    null : "Trying to go to next row during joinedHeap read.";
    -077
    this.storeHeap.seekToPreviousRow(PrivateCellUtil.createFirstOnRow(curRowCell));
    -078resetFilters();
    -079// Calling the hook in CP which 
    allows it to do a fast forward
    -080if (this.region.getCoprocessorHost() 
    != null) {
    -081  return 
    this.region.getCoprocessorHost().postScannerFilterRow(this, curRowCell);
    -082}
    -083return true;
    -084  }
    -085
    -086}
    +056  throw new 
    DoNotRetryIOException("Reverse scan with loading CFs on demand is not 
    supported");
    +057

    [21/51] [partial] hbase-site git commit: Published site at 2a2258656b2fcd92b967131b6c1f037363553bc4.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e0fb1fde/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupCommands.Command.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupCommands.Command.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupCommands.Command.html
    index a12ad81..f236300 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupCommands.Command.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupCommands.Command.html
    @@ -49,31 +49,31 @@
     041import java.net.URI;
     042import java.util.List;
     043
    -044import 
    org.apache.commons.cli.CommandLine;
    -045import 
    org.apache.commons.cli.HelpFormatter;
    -046import org.apache.commons.cli.Options;
    -047import 
    org.apache.commons.lang3.StringUtils;
    -048import 
    org.apache.hadoop.conf.Configuration;
    -049import 
    org.apache.hadoop.conf.Configured;
    -050import org.apache.hadoop.fs.FileSystem;
    -051import org.apache.hadoop.fs.Path;
    -052import 
    org.apache.hadoop.hbase.HBaseConfiguration;
    -053import 
    org.apache.hadoop.hbase.TableName;
    -054import 
    org.apache.hadoop.hbase.backup.BackupAdmin;
    -055import 
    org.apache.hadoop.hbase.backup.BackupInfo;
    -056import 
    org.apache.hadoop.hbase.backup.BackupInfo.BackupState;
    -057import 
    org.apache.hadoop.hbase.backup.BackupRequest;
    -058import 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants;
    -059import 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.BackupCommand;
    -060import 
    org.apache.hadoop.hbase.backup.BackupType;
    -061import 
    org.apache.hadoop.hbase.backup.HBackupFileSystem;
    -062import 
    org.apache.hadoop.hbase.backup.util.BackupSet;
    -063import 
    org.apache.hadoop.hbase.backup.util.BackupUtils;
    -064import 
    org.apache.hadoop.hbase.client.Connection;
    -065import 
    org.apache.hadoop.hbase.client.ConnectionFactory;
    -066import 
    org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
    -067import 
    org.apache.yetus.audience.InterfaceAudience;
    -068import 
    org.apache.hbase.thirdparty.com.google.common.collect.Lists;
    +044import 
    org.apache.commons.lang3.StringUtils;
    +045import 
    org.apache.hadoop.conf.Configuration;
    +046import 
    org.apache.hadoop.conf.Configured;
    +047import org.apache.hadoop.fs.FileSystem;
    +048import org.apache.hadoop.fs.Path;
    +049import 
    org.apache.hadoop.hbase.HBaseConfiguration;
    +050import 
    org.apache.hadoop.hbase.TableName;
    +051import 
    org.apache.hadoop.hbase.backup.BackupAdmin;
    +052import 
    org.apache.hadoop.hbase.backup.BackupInfo;
    +053import 
    org.apache.hadoop.hbase.backup.BackupInfo.BackupState;
    +054import 
    org.apache.hadoop.hbase.backup.BackupRequest;
    +055import 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants;
    +056import 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.BackupCommand;
    +057import 
    org.apache.hadoop.hbase.backup.BackupType;
    +058import 
    org.apache.hadoop.hbase.backup.HBackupFileSystem;
    +059import 
    org.apache.hadoop.hbase.backup.util.BackupSet;
    +060import 
    org.apache.hadoop.hbase.backup.util.BackupUtils;
    +061import 
    org.apache.hadoop.hbase.client.Connection;
    +062import 
    org.apache.hadoop.hbase.client.ConnectionFactory;
    +063import 
    org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
    +064import 
    org.apache.yetus.audience.InterfaceAudience;
    +065import 
    org.apache.hbase.thirdparty.com.google.common.collect.Lists;
    +066import 
    org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine;
    +067import 
    org.apache.hbase.thirdparty.org.apache.commons.cli.HelpFormatter;
    +068import 
    org.apache.hbase.thirdparty.org.apache.commons.cli.Options;
     069
     070/**
     071 * General backup commands, options and 
    usage messages
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e0fb1fde/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupCommands.CreateCommand.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupCommands.CreateCommand.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupCommands.CreateCommand.html
    index a12ad81..f236300 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupCommands.CreateCommand.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupCommands.CreateCommand.html
    @@ -49,31 +49,31 @@
     041import java.net.URI;
     042import java.util.List;
     043
    -044import 
    org.apache.commons.cli.CommandLine;
    -045import 
    org.apache.commons.cli.HelpFormatter;
    -046import org.apache.commons.cli.Options;
    -047import 
    org.apache.commons.lang3.StringUtils;
    -048import 
    org.apache.hadoop.conf.Configuration;
    -049import 
    org.apache.hadoop.conf.Configured;
    -050import org.apache.hadoop.fs.FileSystem;
    -051import org.apache.hadoop.fs.Path;
    -052import 
    org.apache.hadoop.hbase.HBaseConfiguration;
    -053import 
    org.apache.hadoop.hbase.TableName;
    -054import 
    org.apache.hadoop.hbase.backup.BackupAdmin;
    -055import 
    org.apache.hadoop.hbase.backup.BackupInfo;
    -056import 
    

    [21/51] [partial] hbase-site git commit: Published site at e468b4022f76688851b3e0c34722f01a56bd624f.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/16541468/devapidocs/deprecated-list.html
    --
    diff --git a/devapidocs/deprecated-list.html b/devapidocs/deprecated-list.html
    index 3876dcf..92ff3b6 100644
    --- a/devapidocs/deprecated-list.html
    +++ b/devapidocs/deprecated-list.html
    @@ -743,19 +743,19 @@
     
     
     
    -org.apache.hadoop.hbase.client.Mutation.compareTo(Row)
    +org.apache.hadoop.hbase.client.Row.compareTo(Row)
     As of release 2.0.0, this 
    will be removed in HBase 3.0.0.
      Use Row.COMPARATOR
     instead
     
     
     
    -org.apache.hadoop.hbase.client.RowMutations.compareTo(Row)
    +org.apache.hadoop.hbase.client.Mutation.compareTo(Row)
     As of release 2.0.0, this 
    will be removed in HBase 3.0.0.
      Use Row.COMPARATOR
     instead
     
     
     
    -org.apache.hadoop.hbase.client.Row.compareTo(Row)
    +org.apache.hadoop.hbase.client.RowMutations.compareTo(Row)
     As of release 2.0.0, this 
    will be removed in HBase 3.0.0.
      Use Row.COMPARATOR
     instead
     
    @@ -794,13 +794,13 @@
     org.apache.hadoop.hbase.mapreduce.CellCreator.create(byte[],
     int, int, byte[], int, int, byte[], int, int, long, byte[], int, int, 
    String)
     
     
    -org.apache.hadoop.hbase.regionserver.RpcSchedulerFactory.create(Configuration,
     PriorityFunction)
    +org.apache.hadoop.hbase.regionserver.FifoRpcSchedulerFactory.create(Configuration,
     PriorityFunction)
     
     
     org.apache.hadoop.hbase.regionserver.SimpleRpcSchedulerFactory.create(Configuration,
     PriorityFunction)
     
     
    -org.apache.hadoop.hbase.regionserver.FifoRpcSchedulerFactory.create(Configuration,
     PriorityFunction)
    +org.apache.hadoop.hbase.regionserver.RpcSchedulerFactory.create(Configuration,
     PriorityFunction)
     
     
     org.apache.hadoop.hbase.coprocessor.ObserverContextImpl.createAndPrepare(E)
    @@ -1027,15 +1027,15 @@
     org.apache.hadoop.hbase.KeyValueUtil.ensureKeyValues(ListCell)
     
     
    -org.apache.hadoop.hbase.client.RowMutations.equals(Object)
    +org.apache.hadoop.hbase.client.Increment.equals(Object)
     As of release 2.0.0, this 
    will be removed in HBase 3.0.0.
    - No replacement
    + Use Row.COMPARATOR
     instead
     
     
     
    -org.apache.hadoop.hbase.client.Increment.equals(Object)
    +org.apache.hadoop.hbase.client.RowMutations.equals(Object)
     As of release 2.0.0, this 
    will be removed in HBase 3.0.0.
    - Use Row.COMPARATOR
     instead
    + No replacement
     
     
     
    @@ -1096,82 +1096,82 @@
     
     
     
    -org.apache.hadoop.hbase.filter.ValueFilter.filterKeyValue(Cell)
    +org.apache.hadoop.hbase.filter.DependentColumnFilter.filterKeyValue(Cell)
     
     
    -org.apache.hadoop.hbase.filter.SkipFilter.filterKeyValue(Cell)
    +org.apache.hadoop.hbase.filter.WhileMatchFilter.filterKeyValue(Cell)
     
     
    -org.apache.hadoop.hbase.filter.FamilyFilter.filterKeyValue(Cell)
    +org.apache.hadoop.hbase.filter.QualifierFilter.filterKeyValue(Cell)
     
     
    -org.apache.hadoop.hbase.filter.ColumnPrefixFilter.filterKeyValue(Cell)
    +org.apache.hadoop.hbase.filter.PrefixFilter.filterKeyValue(Cell)
     
     
    -org.apache.hadoop.hbase.filter.PageFilter.filterKeyValue(Cell)
    +org.apache.hadoop.hbase.filter.MultiRowRangeFilter.filterKeyValue(Cell)
     
     
    -org.apache.hadoop.hbase.filter.RowFilter.filterKeyValue(Cell)
    +org.apache.hadoop.hbase.filter.FamilyFilter.filterKeyValue(Cell)
     
     
    -org.apache.hadoop.hbase.filter.ColumnRangeFilter.filterKeyValue(Cell)
    +org.apache.hadoop.hbase.filter.SingleColumnValueFilter.filterKeyValue(Cell)
     
     
    -org.apache.hadoop.hbase.filter.ColumnCountGetFilter.filterKeyValue(Cell)
    +org.apache.hadoop.hbase.filter.Filter.filterKeyValue(Cell)
    +As of release 2.0.0, this 
    will be removed in HBase 3.0.0.
    + Instead use filterCell(Cell)
    +
     
     
    -org.apache.hadoop.hbase.filter.MultipleColumnPrefixFilter.filterKeyValue(Cell)
    +org.apache.hadoop.hbase.filter.TimestampsFilter.filterKeyValue(Cell)
     
     
    -org.apache.hadoop.hbase.filter.ColumnPaginationFilter.filterKeyValue(Cell)
    +org.apache.hadoop.hbase.filter.FuzzyRowFilter.filterKeyValue(Cell)
     
     
    -org.apache.hadoop.hbase.filter.DependentColumnFilter.filterKeyValue(Cell)
    +org.apache.hadoop.hbase.filter.FilterList.filterKeyValue(Cell)
     
     
    -org.apache.hadoop.hbase.filter.InclusiveStopFilter.filterKeyValue(Cell)
    +org.apache.hadoop.hbase.filter.ValueFilter.filterKeyValue(Cell)
     
     
    -org.apache.hadoop.hbase.filter.KeyOnlyFilter.filterKeyValue(Cell)
    +org.apache.hadoop.hbase.filter.RowFilter.filterKeyValue(Cell)
     
     
    -org.apache.hadoop.hbase.filter.MultiRowRangeFilter.filterKeyValue(Cell)
    +org.apache.hadoop.hbase.filter.InclusiveStopFilter.filterKeyValue(Cell)
     
     
    -org.apache.hadoop.hbase.filter.Filter.filterKeyValue(Cell)
    -As of release 2.0.0, this 
    will be removed in HBase 3.0.0.
    - Instead use filterCell(Cell)
    -
    +org.apache.hadoop.hbase.filter.ColumnPaginationFilter.filterKeyValue(Cell)
     
     
    -org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter.filterKeyValue(Cell)
    +org.apache.hadoop.hbase.filter.SkipFilter.filterKeyValue(Cell)
     
     
    -org.apache.hadoop.hbase.filter.WhileMatchFilter.filterKeyValue(Cell)
    

    [21/51] [partial] hbase-site git commit: Published site at 64061f896fe21512504e3886a400759e88b519da.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f1ebf5b6/devapidocs/org/apache/hadoop/hbase/client/class-use/Put.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/client/class-use/Put.html 
    b/devapidocs/org/apache/hadoop/hbase/client/class-use/Put.html
    index 2f94372..882bef5 100644
    --- a/devapidocs/org/apache/hadoop/hbase/client/class-use/Put.html
    +++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/Put.html
    @@ -611,16 +611,6 @@ service.
     
     
     boolean
    -HTable.checkAndPut(byte[]row,
    -   byte[]family,
    -   byte[]qualifier,
    -   byte[]value,
    -   Putput)
    -Deprecated.
    -
    -
    -
    -boolean
     Table.checkAndPut(byte[]row,
    byte[]family,
    byte[]qualifier,
    @@ -631,18 +621,17 @@ service.
     
     
     
    -
    +
     boolean
    -HTable.checkAndPut(byte[]row,
    +HTable.checkAndPut(byte[]row,
    byte[]family,
    byte[]qualifier,
    -   CompareFilter.CompareOpcompareOp,
    byte[]value,
    Putput)
     Deprecated.
     
     
    -
    +
     boolean
     Table.checkAndPut(byte[]row,
    byte[]family,
    @@ -655,18 +644,18 @@ service.
     
     
     
    -
    +
     boolean
    -HTable.checkAndPut(byte[]row,
    +HTable.checkAndPut(byte[]row,
    byte[]family,
    byte[]qualifier,
    -   CompareOperatorop,
    +   CompareFilter.CompareOpcompareOp,
    byte[]value,
    Putput)
     Deprecated.
     
     
    -
    +
     boolean
     Table.checkAndPut(byte[]row,
    byte[]family,
    @@ -679,6 +668,17 @@ service.
     
     
     
    +
    +boolean
    +HTable.checkAndPut(byte[]row,
    +   byte[]family,
    +   byte[]qualifier,
    +   CompareOperatorop,
    +   byte[]value,
    +   Putput)
    +Deprecated.
    +
    +
     
     private boolean
     HTable.doCheckAndPut(byte[]row,
    @@ -709,27 +709,27 @@ service.
     
     
     https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
     title="class or interface in java.lang">Void
    -RawAsyncTableImpl.put(Putput)
    +AsyncTable.put(Putput)
    +Puts some data to the table.
    +
     
     
     void
    -HTable.put(Putput)
    -
    -
    -void
     Table.put(Putput)
     Puts some data in the table.
     
     
    -
    +
     https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
     title="class or interface in java.lang">Void
     AsyncTableImpl.put(Putput)
     
    -
    +
     https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
     title="class or interface in java.lang">Void
    -AsyncTable.put(Putput)
    -Puts some data to the table.
    -
    +RawAsyncTableImpl.put(Putput)
    +
    +
    +void
    +HTable.put(Putput)
     
     
     boolean
    @@ -748,19 +748,19 @@ service.
     
     
     https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
     title="class or interface in java.lang">Boolean
    -RawAsyncTableImpl.CheckAndMutateBuilderImpl.thenPut(Putput)
    +AsyncTable.CheckAndMutateBuilder.thenPut(Putput)
     
     
     boolean
    -HTable.CheckAndMutateBuilderImpl.thenPut(Putput)
    +Table.CheckAndMutateBuilder.thenPut(Putput)
     
     
    -boolean
    -Table.CheckAndMutateBuilder.thenPut(Putput)
    +https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
     title="class or interface in java.lang">Boolean
    +RawAsyncTableImpl.CheckAndMutateBuilderImpl.thenPut(Putput)
     
     
    -https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
     title="class or interface in java.lang">Boolean
    -AsyncTable.CheckAndMutateBuilder.thenPut(Putput)
    +boolean
    +HTable.CheckAndMutateBuilderImpl.thenPut(Putput)
     
     
     void
    @@ -791,27 +791,27 @@ service.
     
     
     https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">Listhttps://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
     title="class or interface in java.lang">Void
    

    [21/51] [partial] hbase-site git commit: Published site at 4cb40e6d846ce1f28ffb40d388c9efb753197813.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4dc2a2e8/devapidocs/org/apache/hadoop/hbase/client/class-use/Get.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/client/class-use/Get.html 
    b/devapidocs/org/apache/hadoop/hbase/client/class-use/Get.html
    index d8c0033..14acfc0 100644
    --- a/devapidocs/org/apache/hadoop/hbase/client/class-use/Get.html
    +++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/Get.html
    @@ -399,10 +399,8 @@ service.
     
     
     
    -default https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
     title="class or interface in java.lang">Boolean
    -AsyncTable.exists(Getget)
    -Test for the existence of columns in the table, as 
    specified by the Get.
    -
    +boolean
    +HTable.exists(Getget)
     
     
     boolean
    @@ -411,32 +409,34 @@ service.
     
     
     
    -boolean
    -HTable.exists(Getget)
    +default https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
     title="class or interface in java.lang">Boolean
    +AsyncTable.exists(Getget)
    +Test for the existence of columns in the table, as 
    specified by the Get.
    +
     
     
     https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFutureResult
    -AsyncTable.get(Getget)
    -Extracts certain cells from a given row.
    -
    +RawAsyncTableImpl.get(Getget)
     
     
     Result
    +HTable.get(Getget)
    +
    +
    +Result
     Table.get(Getget)
     Extracts certain cells from a given row.
     
     
    -
    -https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFutureResult
    -AsyncTableImpl.get(Getget)
    -
     
     https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFutureResult
    -RawAsyncTableImpl.get(Getget)
    +AsyncTableImpl.get(Getget)
     
     
    -Result
    -HTable.get(Getget)
    +https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFutureResult
    +AsyncTable.get(Getget)
    +Extracts certain cells from a given row.
    +
     
     
     private Result
    @@ -457,10 +457,8 @@ service.
     
     
     
    -default https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">Listhttps://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
     title="class or interface in java.lang">Boolean
    -AsyncTable.exists(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListGetgets)
    -Test for the existence of columns in the table, as 
    specified by the Gets.
    -
    +boolean[]
    +HTable.exists(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListGetgets)
     
     
     boolean[]
    @@ -469,16 +467,12 @@ service.
     
     
     
    -boolean[]
    -HTable.exists(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListGetgets)
    -
    -
    -default https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">Listhttps://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
     title="class or interface in java.lang">Boolean
    -AsyncTable.existsAll(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListGetgets)
    -A simple version for batch exists.
    +default https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">Listhttps://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
     title="class or interface in java.lang">Boolean
    +AsyncTable.exists(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListGetgets)
    +Test for the existence of columns in the table, as 
    

    [21/51] [partial] hbase-site git commit: Published site at 8ab7b20f48951d77945181024f5e15842bc253c4.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6eb695c8/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
    index ecf500c..0cd5a4e 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
    @@ -238,8355 +238,8368 @@
     230  public static final String 
    HBASE_MAX_CELL_SIZE_KEY = "hbase.server.keyvalue.maxsize";
     231  public static final int 
    DEFAULT_MAX_CELL_SIZE = 10485760;
     232
    -233  public static final String 
    HBASE_REGIONSERVER_MINIBATCH_SIZE =
    -234  
    "hbase.regionserver.minibatch.size";
    -235  public static final int 
    DEFAULT_HBASE_REGIONSERVER_MINIBATCH_SIZE = 2;
    -236
    -237  /**
    -238   * This is the global default value for 
    durability. All tables/mutations not
    -239   * defining a durability or using 
    USE_DEFAULT will default to this value.
    -240   */
    -241  private static final Durability 
    DEFAULT_DURABILITY = Durability.SYNC_WAL;
    +233  /**
    +234   * This is the global default value for 
    durability. All tables/mutations not
    +235   * defining a durability or using 
    USE_DEFAULT will default to this value.
    +236   */
    +237  private static final Durability 
    DEFAULT_DURABILITY = Durability.SYNC_WAL;
    +238
    +239  public static final String 
    HBASE_REGIONSERVER_MINIBATCH_SIZE =
    +240  
    "hbase.regionserver.minibatch.size";
    +241  public static final int 
    DEFAULT_HBASE_REGIONSERVER_MINIBATCH_SIZE = 2;
     242
    -243  final AtomicBoolean closed = new 
    AtomicBoolean(false);
    -244
    -245  /* Closing can take some time; use the 
    closing flag if there is stuff we don't
    -246   * want to do while in closing state; 
    e.g. like offer this region up to the
    -247   * master as a region to close if the 
    carrying regionserver is overloaded.
    -248   * Once set, it is never cleared.
    -249   */
    -250  final AtomicBoolean closing = new 
    AtomicBoolean(false);
    -251
    -252  /**
    -253   * The max sequence id of flushed data 
    on this region. There is no edit in memory that is
    -254   * less that this sequence id.
    -255   */
    -256  private volatile long maxFlushedSeqId = 
    HConstants.NO_SEQNUM;
    -257
    -258  /**
    -259   * Record the sequence id of last flush 
    operation. Can be in advance of
    -260   * {@link #maxFlushedSeqId} when 
    flushing a single column family. In this case,
    -261   * {@link #maxFlushedSeqId} will be 
    older than the oldest edit in memory.
    -262   */
    -263  private volatile long lastFlushOpSeqId 
    = HConstants.NO_SEQNUM;
    -264
    -265  /**
    -266   * The sequence id of the last replayed 
    open region event from the primary region. This is used
    -267   * to skip entries before this due to 
    the possibility of replay edits coming out of order from
    -268   * replication.
    -269   */
    -270  protected volatile long 
    lastReplayedOpenRegionSeqId = -1L;
    -271  protected volatile long 
    lastReplayedCompactionSeqId = -1L;
    -272
    -273  
    //
    -274  // Members
    -275  
    //
    -276
    -277  // map from a locked row to the context 
    for that lock including:
    -278  // - CountDownLatch for threads waiting 
    on that row
    -279  // - the thread that owns the lock 
    (allow reentrancy)
    -280  // - reference count of (reentrant) 
    locks held by the thread
    -281  // - the row itself
    -282  private final 
    ConcurrentHashMapHashedBytes, RowLockContext lockedRows =
    -283  new ConcurrentHashMap();
    -284
    -285  protected final Mapbyte[], 
    HStore stores =
    -286  new 
    ConcurrentSkipListMap(Bytes.BYTES_RAWCOMPARATOR);
    +243  public static final String 
    WAL_HSYNC_CONF_KEY = "hbase.wal.hsync";
    +244  public static final boolean 
    DEFAULT_WAL_HSYNC = false;
    +245
    +246  final AtomicBoolean closed = new 
    AtomicBoolean(false);
    +247
    +248  /* Closing can take some time; use the 
    closing flag if there is stuff we don't
    +249   * want to do while in closing state; 
    e.g. like offer this region up to the
    +250   * master as a region to close if the 
    carrying regionserver is overloaded.
    +251   * Once set, it is never cleared.
    +252   */
    +253  final AtomicBoolean closing = new 
    AtomicBoolean(false);
    +254
    +255  /**
    +256   * The max sequence id of flushed data 
    on this region. There is no edit in memory that is
    +257   * less that this sequence id.
    +258   */
    +259  private volatile long maxFlushedSeqId = 
    HConstants.NO_SEQNUM;
    +260
    +261  /**
    +262   * Record the sequence id of last flush 
    operation. Can be in advance of
    +263   * {@link #maxFlushedSeqId} when 
    flushing a single column family. In this case,
    +264   * {@link #maxFlushedSeqId} will be 
    older than the oldest edit in memory.
    +265   */
    +266  private volatile long lastFlushOpSeqId 
    = HConstants.NO_SEQNUM;
    +267
    +268 

    [21/51] [partial] hbase-site git commit: Published site at 00095a2ef9442e3fd86c04876c9d91f2f8b23ad8.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.html
    index c27b109..4160a88 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.html
    @@ -105,7 +105,7 @@
     097try {
     098  done = waitUntilDone(startTime 
    * 1000L + asyncProcess.primaryCallTimeoutMicroseconds);
     099} catch (InterruptedException ex) 
    {
    -100  LOG.error("Replica thread was 
    interrupted - no replica calls: " + ex.getMessage());
    +100  LOG.error("Replica thread 
    interrupted - no replica calls {}", ex.getMessage());
     101  return;
     102}
     103  }
    @@ -149,7 +149,7 @@
     141  if (loc == null) return;
     142  HRegionLocation[] locs = 
    loc.getRegionLocations();
     143  if (locs.length == 1) {
    -144LOG.warn("No replicas found for " 
    + action.getAction());
    +144LOG.warn("No replicas found for 
    {}", action.getAction());
     145return;
     146  }
     147  synchronized (replicaResultLock) 
    {
    @@ -230,8 +230,8 @@
     222  return;
     223} catch (Throwable t) {
     224  // This should not happen. 
    Let's log  retry anyway.
    -225  LOG.error("#" + asyncProcess.id 
    + ", Caught throwable while calling. This is unexpected." +
    -226  " Retrying. Server is " + 
    server + ", tableName=" + tableName, t);
    +225  LOG.error("id=" + 
    asyncProcess.id + ", caught throwable. Unexpected." +
    +226  " Retrying. Server=" + 
    server + ", tableName=" + tableName, t);
     227  
    receiveGlobalFailure(multiAction, server, numAttempt, t);
     228  return;
     229}
    @@ -247,1036 +247,1035 @@
     239}
     240  } catch (Throwable t) {
     241// Something really bad happened. 
    We are on the send thread that will now die.
    -242LOG.error("Internal AsyncProcess 
    #" + asyncProcess.id + " error for "
    -243+ tableName + " processing 
    for " + server, t);
    -244throw new RuntimeException(t);
    -245  } finally {
    -246
    asyncProcess.decTaskCounters(multiAction.getRegions(), server);
    -247if (callsInProgress != null 
     callable != null  res != null) {
    -248  
    callsInProgress.remove(callable);
    -249}
    -250  }
    -251}
    -252  }
    -253
    -254  private final 
    Batch.CallbackCResult callback;
    -255  private final BatchErrors errors;
    -256  private final 
    ConnectionImplementation.ServerErrorTracker errorsByServer;
    -257  private final ExecutorService pool;
    -258  private final 
    SetCancellableRegionServerCallable callsInProgress;
    +242LOG.error("id=" + asyncProcess.id 
    + " error for " + tableName + " processing " + server, t);
    +243throw new RuntimeException(t);
    +244  } finally {
    +245
    asyncProcess.decTaskCounters(multiAction.getRegions(), server);
    +246if (callsInProgress != null 
     callable != null  res != null) {
    +247  
    callsInProgress.remove(callable);
    +248}
    +249  }
    +250}
    +251  }
    +252
    +253  private final 
    Batch.CallbackCResult callback;
    +254  private final BatchErrors errors;
    +255  private final 
    ConnectionImplementation.ServerErrorTracker errorsByServer;
    +256  private final ExecutorService pool;
    +257  private final 
    SetCancellableRegionServerCallable callsInProgress;
    +258
     259
    -260
    -261  private final TableName tableName;
    -262  private final AtomicLong 
    actionsInProgress = new AtomicLong(-1);
    -263  /**
    -264   * The lock controls access to results. 
    It is only held when populating results where
    -265   * there might be several callers 
    (eventual consistency gets). For other requests,
    -266   * there's one unique call going on per 
    result index.
    -267   */
    -268  private final Object replicaResultLock 
    = new Object();
    -269  /**
    -270   * Result array.  Null if results are 
    not needed. Otherwise, each index corresponds to
    -271   * the action index in initial actions 
    submitted. For most request types, has null-s for
    -272   * requests that are not done, and 
    result/exception for those that are done.
    -273   * For eventual-consistency gets, 
    initially the same applies; at some point, replica calls
    -274   * might be started, and 
    ReplicaResultState is put at the corresponding indices. The
    -275   * returning calls check the type to 
    detect when this is the case. After all calls are done,
    -276   * ReplicaResultState-s are replaced 
    with results for the user.
    -277   */
    -278  private final Object[] results;
    -279  /**
    -280   * Indices of replica gets in results. 
    If null, all or no actions are replica-gets.
    -281   */
    -282  private final int[] 
    replicaGetIndices;
    -283  private final boolean 
    hasAnyReplicaGets;
    -284  

    [21/51] [partial] hbase-site git commit: Published site at 22f4def942f8a3367d0ca6598317e9b9a7d0cfcd.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8b1eaec1/devapidocs/src-html/org/apache/hadoop/hbase/HTableDescriptor.html
    --
    diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/HTableDescriptor.html 
    b/devapidocs/src-html/org/apache/hadoop/hbase/HTableDescriptor.html
    index cbbd63f..19efd1a 100644
    --- a/devapidocs/src-html/org/apache/hadoop/hbase/HTableDescriptor.html
    +++ b/devapidocs/src-html/org/apache/hadoop/hbase/HTableDescriptor.html
    @@ -28,873 +28,886 @@
     020
     021import java.io.IOException;
     022import java.util.Collection;
    -023import java.util.List;
    -024import java.util.Map;
    -025import java.util.Set;
    -026import java.util.stream.Collectors;
    -027import java.util.stream.Stream;
    -028import org.apache.hadoop.fs.Path;
    -029import 
    org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
    -030import 
    org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor;
    -031import 
    org.apache.hadoop.hbase.client.Durability;
    -032import 
    org.apache.hadoop.hbase.client.TableDescriptor;
    -033import 
    org.apache.hadoop.hbase.client.TableDescriptorBuilder;
    -034import 
    org.apache.hadoop.hbase.client.TableDescriptorBuilder.ModifyableTableDescriptor;
    -035import 
    org.apache.hadoop.hbase.exceptions.DeserializationException;
    -036import 
    org.apache.hadoop.hbase.security.User;
    -037import 
    org.apache.hadoop.hbase.util.Bytes;
    -038import 
    org.apache.yetus.audience.InterfaceAudience;
    -039
    -040/**
    -041 * HTableDescriptor contains the details 
    about an HBase table  such as the descriptors of
    -042 * all the column families, is the table 
    a catalog table, code hbase:meta /code,
    -043 * if the table is read only, the maximum 
    size of the memstore,
    -044 * when the region split should occur, 
    coprocessors associated with it etc...
    -045 * @deprecated As of release 2.0.0, this 
    will be removed in HBase 3.0.0.
    -046 * Use {@link 
    TableDescriptorBuilder} to build {@link HTableDescriptor}.
    -047 */
    -048@Deprecated
    -049@InterfaceAudience.Public
    -050public class HTableDescriptor implements 
    TableDescriptor, ComparableHTableDescriptor {
    -051  public static final String SPLIT_POLICY 
    = TableDescriptorBuilder.SPLIT_POLICY;
    -052  public static final String MAX_FILESIZE 
    = TableDescriptorBuilder.MAX_FILESIZE;
    -053  public static final String OWNER = 
    TableDescriptorBuilder.OWNER;
    -054  public static final Bytes OWNER_KEY = 
    TableDescriptorBuilder.OWNER_KEY;
    -055  public static final String READONLY = 
    TableDescriptorBuilder.READONLY;
    -056  public static final String 
    COMPACTION_ENABLED = TableDescriptorBuilder.COMPACTION_ENABLED;
    -057  public static final String 
    MEMSTORE_FLUSHSIZE = TableDescriptorBuilder.MEMSTORE_FLUSHSIZE;
    -058  public static final String FLUSH_POLICY 
    = TableDescriptorBuilder.FLUSH_POLICY;
    -059  public static final String IS_ROOT = 
    "IS_ROOT";
    -060  public static final String IS_META = 
    TableDescriptorBuilder.IS_META;
    -061  public static final String DURABILITY = 
    TableDescriptorBuilder.DURABILITY;
    -062  public static final String 
    REGION_REPLICATION = TableDescriptorBuilder.REGION_REPLICATION;
    -063  public static final String 
    REGION_MEMSTORE_REPLICATION = 
    TableDescriptorBuilder.REGION_MEMSTORE_REPLICATION;
    -064  public static final String 
    NORMALIZATION_ENABLED = TableDescriptorBuilder.NORMALIZATION_ENABLED;
    -065  public static final String PRIORITY = 
    TableDescriptorBuilder.PRIORITY;
    -066  public static final boolean 
    DEFAULT_READONLY = TableDescriptorBuilder.DEFAULT_READONLY;
    -067  public static final boolean 
    DEFAULT_COMPACTION_ENABLED = 
    TableDescriptorBuilder.DEFAULT_COMPACTION_ENABLED;
    -068  public static final boolean 
    DEFAULT_NORMALIZATION_ENABLED = 
    TableDescriptorBuilder.DEFAULT_NORMALIZATION_ENABLED;
    -069  public static final long 
    DEFAULT_MEMSTORE_FLUSH_SIZE = 
    TableDescriptorBuilder.DEFAULT_MEMSTORE_FLUSH_SIZE;
    -070  public static final int 
    DEFAULT_REGION_REPLICATION = 
    TableDescriptorBuilder.DEFAULT_REGION_REPLICATION;
    -071  public static final boolean 
    DEFAULT_REGION_MEMSTORE_REPLICATION = 
    TableDescriptorBuilder.DEFAULT_REGION_MEMSTORE_REPLICATION;
    -072  protected final 
    ModifyableTableDescriptor delegatee;
    -073
    -074  /**
    -075   * Construct a table descriptor 
    specifying a TableName object
    -076   * @param name Table name.
    -077   * @see a 
    href="https://issues.apache.org/jira/browse/HBASE-174"HADOOP-1581 HBASE: 
    (HBASE-174) Un-openable tablename bug/a
    -078   */
    -079  public HTableDescriptor(final TableName 
    name) {
    -080this(new 
    ModifyableTableDescriptor(name));
    -081  }
    -082
    -083  /**
    -084   * Construct a table descriptor by 
    cloning the descriptor passed as a parameter.
    -085   * p
    -086   * Makes a deep copy of the supplied 
    descriptor.
    -087   * Can make a modifiable descriptor 
    from an ImmutableHTableDescriptor.
    -088   * @param desc The descriptor.
    -089   */
    -090  public HTableDescriptor(final 
    HTableDescriptor desc) {
    -091this(desc, true);
    -092  }
    -093
    -094  protected 

    [21/51] [partial] hbase-site git commit: Published site at 31da4d0bce69b3a47066a5df675756087ce4dc60.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a754d895/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
     
    b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
    index b75fbbe..159fa7e 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
    @@ -1379,344 +1379,351 @@
     
     
     
    +protected void
    +AbstractStateMachineTableProcedure.preflightChecks(MasterProcedureEnvenv,
    +   https://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
     title="class or interface in java.lang">Booleanenabled)
    +Check that cluster is up and master is running.
    +
    +
    +
     private void
     ModifyTableProcedure.preModify(MasterProcedureEnvenv,
      
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.ModifyTableStatestate)
     Action before modifying table.
     
     
    -
    +
     private void
     RecoverMetaProcedure.prepare(MasterProcedureEnvenv)
     Prepare for execution
     
     
    -
    +
     private void
     CloneSnapshotProcedure.prepareClone(MasterProcedureEnvenv)
     Action before any real action of cloning from 
    snapshot.
     
     
    -
    +
     private boolean
     CreateNamespaceProcedure.prepareCreate(MasterProcedureEnvenv)
     Action before any real action of creating namespace.
     
     
    -
    +
     private boolean
     CreateTableProcedure.prepareCreate(MasterProcedureEnvenv)
     
    -
    +
     private boolean
     DeleteTableProcedure.prepareDelete(MasterProcedureEnvenv)
     
    -
    +
     private boolean
     DeleteNamespaceProcedure.prepareDelete(MasterProcedureEnvenv)
     Action before any real action of deleting namespace.
     
     
    -
    +
     private boolean
     DisableTableProcedure.prepareDisable(MasterProcedureEnvenv)
     Action before any real action of disabling table.
     
     
    -
    +
     private boolean
     EnableTableProcedure.prepareEnable(MasterProcedureEnvenv)
     Action before any real action of enabling table.
     
     
    -
    +
     private boolean
     ModifyNamespaceProcedure.prepareModify(MasterProcedureEnvenv)
     Action before any real action of adding namespace.
     
     
    -
    +
     private void
     ModifyTableProcedure.prepareModify(MasterProcedureEnvenv)
     Check conditions before any real action of modifying a 
    table.
     
     
    -
    +
     private void
     RestoreSnapshotProcedure.prepareRestore(MasterProcedureEnvenv)
     Action before any real action of restoring from 
    snapshot.
     
     
    -
    +
     private boolean
     TruncateTableProcedure.prepareTruncate(MasterProcedureEnvenv)
     
    -
    +
     private boolean
     TruncateTableProcedure.preTruncate(MasterProcedureEnvenv)
     
    -
    +
     private void
     ServerCrashProcedure.processMeta(MasterProcedureEnvenv)
     
    -
    +
     protected void
     AbstractStateMachineNamespaceProcedure.releaseLock(MasterProcedureEnvenv)
     
    -
    +
     protected void
     AbstractStateMachineRegionProcedure.releaseLock(MasterProcedureEnvenv)
     
    -
    +
     protected void
     AbstractStateMachineTableProcedure.releaseLock(MasterProcedureEnvenv)
     
    -
    +
     protected void
     ServerCrashProcedure.releaseLock(MasterProcedureEnvenv)
     
    -
    +
     protected void
     RecoverMetaProcedure.releaseLock(MasterProcedureEnvenv)
     
    -
    +
     private void
     RSProcedureDispatcher.CloseRegionRemoteCall.remoteCallCompleted(MasterProcedureEnvenv,
    
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponseresponse)
     
    -
    +
     private void
     RSProcedureDispatcher.ExecuteProceduresRemoteCall.remoteCallFailed(MasterProcedureEnvenv,
     https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOExceptione)
     
    -
    +
     private void
     RSProcedureDispatcher.OpenRegionRemoteCall.remoteCallFailed(MasterProcedureEnvenv,
     https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOExceptione)
     
    -
    +
     private void
     RSProcedureDispatcher.CloseRegionRemoteCall.remoteCallFailed(MasterProcedureEnvenv,
     https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOExceptione)
     
    -
    +
     protected static void
     DeleteNamespaceProcedure.removeFromZKNamespaceManager(MasterProcedureEnvenv,
     https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringnamespaceName)
     remove from ZooKeeper.
     
     
    -
    +
     protected static void
     DeleteNamespaceProcedure.removeNamespaceQuota(MasterProcedureEnvenv,
     https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringnamespaceName)
     remove quota for the namespace
     
     
    -
    +
     private void
     RestoreSnapshotProcedure.restoreSnapshot(MasterProcedureEnvenv)
     Execute the on-disk Restore
     
     
    -
    +
     private void
     

    [21/51] [partial] hbase-site git commit: Published site at 6b77786dfc46d25ac5bb5f1c8a4a9eb47b52a604.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/81cde4ce/devapidocs/org/apache/hadoop/hbase/client/class-use/Result.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/client/class-use/Result.html 
    b/devapidocs/org/apache/hadoop/hbase/client/class-use/Result.html
    index b1c22cf..1eb3f35 100644
    --- a/devapidocs/org/apache/hadoop/hbase/client/class-use/Result.html
    +++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/Result.html
    @@ -292,7 +292,7 @@ service.
     
     
     private static HRegionLocation
    -MetaTableAccessor.getRegionLocation(Resultr,
    +AsyncMetaTableAccessor.getRegionLocation(Resultr,
      RegionInforegionInfo,
      intreplicaId)
     Returns the HRegionLocation parsed from the given meta row 
    Result
    @@ -301,7 +301,7 @@ service.
     
     
     private static HRegionLocation
    -AsyncMetaTableAccessor.getRegionLocation(Resultr,
    +MetaTableAccessor.getRegionLocation(Resultr,
      RegionInforegionInfo,
      intreplicaId)
     Returns the HRegionLocation parsed from the given meta row 
    Result
    @@ -309,14 +309,14 @@ service.
     
     
     
    -static RegionLocations
    -MetaTableAccessor.getRegionLocations(Resultr)
    +private static https://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
     title="class or interface in java.util">OptionalRegionLocations
    +AsyncMetaTableAccessor.getRegionLocations(Resultr)
     Returns an HRegionLocationList extracted from the 
    result.
     
     
     
    -private static https://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
     title="class or interface in java.util">OptionalRegionLocations
    -AsyncMetaTableAccessor.getRegionLocations(Resultr)
    +static RegionLocations
    +MetaTableAccessor.getRegionLocations(Resultr)
     Returns an HRegionLocationList extracted from the 
    result.
     
     
    @@ -334,42 +334,42 @@ service.
     
     
     private static long
    -MetaTableAccessor.getSeqNumDuringOpen(Resultr,
    +AsyncMetaTableAccessor.getSeqNumDuringOpen(Resultr,
    intreplicaId)
     The latest seqnum that the server writing to meta observed 
    when opening the region.
     
     
     
     private static long
    -AsyncMetaTableAccessor.getSeqNumDuringOpen(Resultr,
    +MetaTableAccessor.getSeqNumDuringOpen(Resultr,
    intreplicaId)
     The latest seqnum that the server writing to meta observed 
    when opening the region.
     
     
     
    -static ServerName
    -MetaTableAccessor.getServerName(Resultr,
    +private static https://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
     title="class or interface in java.util">OptionalServerName
    +AsyncMetaTableAccessor.getServerName(Resultr,
      intreplicaId)
     Returns a ServerName from catalog table Result.
     
     
     
    -private static https://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
     title="class or interface in java.util">OptionalServerName
    -AsyncMetaTableAccessor.getServerName(Resultr,
    +static ServerName
    +MetaTableAccessor.getServerName(Resultr,
      intreplicaId)
     Returns a ServerName from catalog table Result.
     
     
     
    +private static https://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
     title="class or interface in java.util">OptionalTableState
    +AsyncMetaTableAccessor.getTableState(Resultr)
    +
    +
     static TableState
     MetaTableAccessor.getTableState(Resultr)
     Decode table state from META Result.
     
     
    -
    -private static https://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
     title="class or interface in java.util">OptionalTableState
    -AsyncMetaTableAccessor.getTableState(Resultr)
    -
     
     void
     AsyncMetaTableAccessor.MetaTableScanResultConsumer.onNext(Result[]results,
    @@ -465,13 +465,13 @@ service.
     ClientScanner.cache
     
     
    -private https://docs.oracle.com/javase/8/docs/api/java/util/Deque.html?is-external=true;
     title="class or interface in java.util">DequeResult
    -BatchScanResultCache.partialResults
    -
    -
     private https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListResult
     CompleteScanResultCache.partialResults
     
    +
    +private https://docs.oracle.com/javase/8/docs/api/java/util/Deque.html?is-external=true;
     title="class or interface in java.util">DequeResult
    +BatchScanResultCache.partialResults
    +
     
     private https://docs.oracle.com/javase/8/docs/api/java/util/Queue.html?is-external=true;
     title="class or interface in java.util">QueueResult
     AsyncTableResultScanner.queue
    @@ -494,7 +494,7 @@ service.
     
     
     Result[]
    -BatchScanResultCache.addAndGet(Result[]results,
    +AllowPartialScanResultCache.addAndGet(Result[]results,
      booleanisHeartbeatMessage)
     
     
    @@ -504,20 +504,24 @@ service.
     
     
     Result[]
    -AllowPartialScanResultCache.addAndGet(Result[]results,
    +BatchScanResultCache.addAndGet(Result[]results,
      booleanisHeartbeatMessage)
     
     
     Result
    -HTable.append(Appendappend)
    -
    -
    -Result
     Table.append(Appendappend)
     Appends values to one or more columns within a single 
    

    [21/51] [partial] hbase-site git commit: Published site at 1384da71375427b522b09f06862bb5d629cef52f.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d347bde8/devapidocs/org/apache/hadoop/hbase/client/class-use/Put.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/client/class-use/Put.html 
    b/devapidocs/org/apache/hadoop/hbase/client/class-use/Put.html
    index 882bef5..2f94372 100644
    --- a/devapidocs/org/apache/hadoop/hbase/client/class-use/Put.html
    +++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/Put.html
    @@ -611,72 +611,72 @@ service.
     
     
     boolean
    -Table.checkAndPut(byte[]row,
    +HTable.checkAndPut(byte[]row,
    byte[]family,
    byte[]qualifier,
    byte[]value,
    Putput)
    -Deprecated.
    -Since 2.0.0. Will be 
    removed in 3.0.0. Use Table.checkAndMutate(byte[],
     byte[])
    -
    +Deprecated.
     
     
     
     boolean
    -HTable.checkAndPut(byte[]row,
    +Table.checkAndPut(byte[]row,
    byte[]family,
    byte[]qualifier,
    byte[]value,
    Putput)
    -Deprecated.
    +Deprecated.
    +Since 2.0.0. Will be 
    removed in 3.0.0. Use Table.checkAndMutate(byte[],
     byte[])
    +
     
     
     
     boolean
    -Table.checkAndPut(byte[]row,
    +HTable.checkAndPut(byte[]row,
    byte[]family,
    byte[]qualifier,
    CompareFilter.CompareOpcompareOp,
    byte[]value,
    Putput)
    -Deprecated.
    -Since 2.0.0. Will be 
    removed in 3.0.0. Use Table.checkAndMutate(byte[],
     byte[])
    -
    +Deprecated.
     
     
     
     boolean
    -HTable.checkAndPut(byte[]row,
    +Table.checkAndPut(byte[]row,
    byte[]family,
    byte[]qualifier,
    CompareFilter.CompareOpcompareOp,
    byte[]value,
    Putput)
    -Deprecated.
    +Deprecated.
    +Since 2.0.0. Will be 
    removed in 3.0.0. Use Table.checkAndMutate(byte[],
     byte[])
    +
     
     
     
     boolean
    -Table.checkAndPut(byte[]row,
    +HTable.checkAndPut(byte[]row,
    byte[]family,
    byte[]qualifier,
    CompareOperatorop,
    byte[]value,
    Putput)
    -Deprecated.
    -Since 2.0.0. Will be 
    removed in 3.0.0. Use Table.checkAndMutate(byte[],
     byte[])
    -
    +Deprecated.
     
     
     
     boolean
    -HTable.checkAndPut(byte[]row,
    +Table.checkAndPut(byte[]row,
    byte[]family,
    byte[]qualifier,
    CompareOperatorop,
    byte[]value,
    Putput)
    -Deprecated.
    +Deprecated.
    +Since 2.0.0. Will be 
    removed in 3.0.0. Use Table.checkAndMutate(byte[],
     byte[])
    +
     
     
     
    @@ -709,27 +709,27 @@ service.
     
     
     https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
     title="class or interface in java.lang">Void
    -AsyncTable.put(Putput)
    -Puts some data to the table.
    -
    +RawAsyncTableImpl.put(Putput)
     
     
     void
    +HTable.put(Putput)
    +
    +
    +void
     Table.put(Putput)
     Puts some data in the table.
     
     
    -
    -https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
     title="class or interface in java.lang">Void
    -AsyncTableImpl.put(Putput)
    -
     
     https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
     title="class or interface in java.lang">Void
    -RawAsyncTableImpl.put(Putput)
    +AsyncTableImpl.put(Putput)
     
     
    -void
    -HTable.put(Putput)
    +https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
     title="class or interface in java.lang">Void
    +AsyncTable.put(Putput)
    +Puts some data to the table.
    +
     
     
     boolean
    @@ -748,19 +748,19 @@ service.
     
     
     https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
     title="class or interface in java.lang">Boolean
    -AsyncTable.CheckAndMutateBuilder.thenPut(Putput)
    +RawAsyncTableImpl.CheckAndMutateBuilderImpl.thenPut(Putput)
     
     
     boolean
    -Table.CheckAndMutateBuilder.thenPut(Putput)
    +HTable.CheckAndMutateBuilderImpl.thenPut(Putput)
     
     
    -https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
     title="class or interface in java.lang">Boolean
    -RawAsyncTableImpl.CheckAndMutateBuilderImpl.thenPut(Putput)
    +boolean
    

    [21/51] [partial] hbase-site git commit: Published site at b7b86839250bf9b295ebc1948826f43a88736d6c.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6b94a2f2/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.OperationStatusCode.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.OperationStatusCode.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.OperationStatusCode.html
    index e828a9b..a99f492 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.OperationStatusCode.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.OperationStatusCode.html
    @@ -495,841 +495,853 @@
     487  /** The serialized table state 
    qualifier */
     488  public static final byte[] 
    TABLE_STATE_QUALIFIER = Bytes.toBytes("state");
     489
    -490
    -491  /**
    -492   * The meta table version column 
    qualifier.
    -493   * We keep current version of the meta 
    table in this column in code-ROOT-/code
    -494   * table: i.e. in the 'info:v' 
    column.
    -495   */
    -496  public static final byte [] 
    META_VERSION_QUALIFIER = Bytes.toBytes("v");
    -497
    -498  /**
    -499   * The current version of the meta 
    table.
    -500   * - pre-hbase 0.92.  There is no 
    META_VERSION column in the root table
    -501   * in this case. The meta has 
    HTableDescriptor serialized into the HRegionInfo;
    -502   * - version 0 is 0.92 and 0.94. Meta 
    data has serialized HRegionInfo's using
    -503   * Writable serialization, and 
    HRegionInfo's does not contain HTableDescriptors.
    -504   * - version 1 for 0.96+ keeps 
    HRegionInfo data structures, but changes the
    -505   * byte[] serialization from Writables 
    to Protobuf.
    -506   * See HRegionInfo.VERSION
    -507   */
    -508  public static final short META_VERSION 
    = 1;
    -509
    -510  // Other constants
    -511
    -512  /**
    -513   * An empty instance.
    -514   */
    -515  public static final byte [] 
    EMPTY_BYTE_ARRAY = new byte [0];
    -516
    -517  public static final ByteBuffer 
    EMPTY_BYTE_BUFFER = ByteBuffer.wrap(EMPTY_BYTE_ARRAY);
    -518
    -519  /**
    -520   * Used by scanners, etc when they want 
    to start at the beginning of a region
    -521   */
    -522  public static final byte [] 
    EMPTY_START_ROW = EMPTY_BYTE_ARRAY;
    -523
    -524  /**
    -525   * Last row in a table.
    -526   */
    -527  public static final byte [] 
    EMPTY_END_ROW = EMPTY_START_ROW;
    -528
    -529  /**
    -530* Used by scanners and others when 
    they're trying to detect the end of a
    -531* table
    -532*/
    -533  public static final byte [] LAST_ROW = 
    EMPTY_BYTE_ARRAY;
    +490  /** The replication barrier family as a 
    string*/
    +491  public static final String 
    REPLICATION_BARRIER_FAMILY_STR = "rep_barrier";
    +492
    +493  /** The replication barrier family */
    +494  public static final byte[] 
    REPLICATION_BARRIER_FAMILY =
    +495  
    Bytes.toBytes(REPLICATION_BARRIER_FAMILY_STR);
    +496
    +497  /**
    +498   * The meta table version column 
    qualifier.
    +499   * We keep current version of the meta 
    table in this column in code-ROOT-/code
    +500   * table: i.e. in the 'info:v' 
    column.
    +501   */
    +502  public static final byte [] 
    META_VERSION_QUALIFIER = Bytes.toBytes("v");
    +503
    +504  /**
    +505   * The current version of the meta 
    table.
    +506   * - pre-hbase 0.92.  There is no 
    META_VERSION column in the root table
    +507   * in this case. The meta has 
    HTableDescriptor serialized into the HRegionInfo;
    +508   * - version 0 is 0.92 and 0.94. Meta 
    data has serialized HRegionInfo's using
    +509   * Writable serialization, and 
    HRegionInfo's does not contain HTableDescriptors.
    +510   * - version 1 for 0.96+ keeps 
    HRegionInfo data structures, but changes the
    +511   * byte[] serialization from Writables 
    to Protobuf.
    +512   * See HRegionInfo.VERSION
    +513   */
    +514  public static final short META_VERSION 
    = 1;
    +515
    +516  // Other constants
    +517
    +518  /**
    +519   * An empty instance.
    +520   */
    +521  public static final byte [] 
    EMPTY_BYTE_ARRAY = new byte [0];
    +522
    +523  public static final ByteBuffer 
    EMPTY_BYTE_BUFFER = ByteBuffer.wrap(EMPTY_BYTE_ARRAY);
    +524
    +525  /**
    +526   * Used by scanners, etc when they want 
    to start at the beginning of a region
    +527   */
    +528  public static final byte [] 
    EMPTY_START_ROW = EMPTY_BYTE_ARRAY;
    +529
    +530  /**
    +531   * Last row in a table.
    +532   */
    +533  public static final byte [] 
    EMPTY_END_ROW = EMPTY_START_ROW;
     534
     535  /**
    -536   * Max length a row can have because of 
    the limitation in TFile.
    -537   */
    -538  public static final int MAX_ROW_LENGTH 
    = Short.MAX_VALUE;
    -539
    -540  /**
    -541   * Timestamp to use when we want to 
    refer to the latest cell.
    -542   *
    -543   * On client side, this is the 
    timestamp set by default when no timestamp is specified,
    -544   * to refer to the latest.
    -545   * On server side, this acts as a 
    notation.
    -546   * (1) For a cell of Put, which has 
    this notation,
    -547   * its timestamp will be replaced 
    with server's current time.
    -548   * (2) For a cell of Delete, which has 
    this notation,
    -549   * A. If the cell is of {@link 
    KeyValue.Type#Delete}, HBase issues a Get operation firstly.
    -550   *a. When the count of cell it 
    gets is 

    [21/51] [partial] hbase-site git commit: Published site at 1d25b60831b8cc8f7ad5fd366f1867de5c20d2f3.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/eb05e3e3/apidocs/org/apache/hadoop/hbase/client/AsyncAdminBuilder.html
    --
    diff --git a/apidocs/org/apache/hadoop/hbase/client/AsyncAdminBuilder.html 
    b/apidocs/org/apache/hadoop/hbase/client/AsyncAdminBuilder.html
    index 918db59..a11a224 100644
    --- a/apidocs/org/apache/hadoop/hbase/client/AsyncAdminBuilder.html
    +++ b/apidocs/org/apache/hadoop/hbase/client/AsyncAdminBuilder.html
    @@ -149,21 +149,21 @@ public interface 
     AsyncAdminBuilder
     setOperationTimeout(longtimeout,
    -   http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true;
     title="class or interface in 
    java.util.concurrent">TimeUnitunit)
    +   https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true;
     title="class or interface in 
    java.util.concurrent">TimeUnitunit)
     Set timeout for a whole admin operation.
     
     
     
     AsyncAdminBuilder
     setRetryPause(longtimeout,
    - http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true;
     title="class or interface in 
    java.util.concurrent">TimeUnitunit)
    + https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true;
     title="class or interface in 
    java.util.concurrent">TimeUnitunit)
     Set the base pause time for retrying.
     
     
     
     AsyncAdminBuilder
     setRpcTimeout(longtimeout,
    - http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true;
     title="class or interface in 
    java.util.concurrent">TimeUnitunit)
    + https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true;
     title="class or interface in 
    java.util.concurrent">TimeUnitunit)
     Set timeout for each rpc request.
     
     
    @@ -195,7 +195,7 @@ public interface 
     setOperationTimeout
     AsyncAdminBuildersetOperationTimeout(longtimeout,
    -  http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true;
     title="class or interface in 
    java.util.concurrent">TimeUnitunit)
    +  https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true;
     title="class or interface in 
    java.util.concurrent">TimeUnitunit)
     Set timeout for a whole admin operation. Operation timeout 
    and max attempt times(or max retry
      times) are both limitations for retrying, we will stop retrying when we reach 
    any of the
      limitations.
    @@ -215,7 +215,7 @@ public interface 
     setRpcTimeout
     AsyncAdminBuildersetRpcTimeout(longtimeout,
    -http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true;
     title="class or interface in 
    java.util.concurrent">TimeUnitunit)
    +https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true;
     title="class or interface in 
    java.util.concurrent">TimeUnitunit)
     Set timeout for each rpc request.
     
     Parameters:
    @@ -233,7 +233,7 @@ public interface 
     setRetryPause
     AsyncAdminBuildersetRetryPause(longtimeout,
    -http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true;
     title="class or interface in 
    java.util.concurrent">TimeUnitunit)
    +https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true;
     title="class or interface in 
    java.util.concurrent">TimeUnitunit)
     Set the base pause time for retrying. We use an exponential 
    policy to generate sleep time when
      retrying.
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/eb05e3e3/apidocs/org/apache/hadoop/hbase/client/AsyncBufferedMutator.html
    --
    diff --git a/apidocs/org/apache/hadoop/hbase/client/AsyncBufferedMutator.html 
    b/apidocs/org/apache/hadoop/hbase/client/AsyncBufferedMutator.html
    index 9b06d41..bbaaa3c 100644
    --- a/apidocs/org/apache/hadoop/hbase/client/AsyncBufferedMutator.html
    +++ b/apidocs/org/apache/hadoop/hbase/client/AsyncBufferedMutator.html
    @@ -101,13 +101,13 @@ var activeTableTab = "activeTableTab";
     
     
     All Superinterfaces:
    -http://docs.oracle.com/javase/8/docs/api/java/lang/AutoCloseable.html?is-external=true;
     title="class or interface in java.lang">AutoCloseable, http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html?is-external=true;
     title="class or interface in java.io">Closeable
    +https://docs.oracle.com/javase/8/docs/api/java/lang/AutoCloseable.html?is-external=true;
     title="class or interface in java.lang">AutoCloseable, https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html?is-external=true;
     title="class or interface in java.io">Closeable
     
     
     
     @InterfaceAudience.Public
     public interface AsyncBufferedMutator
    -extends 

    [21/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ec8bf761/devapidocs/org/apache/hadoop/hbase/procedure2/RootProcedureState.State.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/procedure2/RootProcedureState.State.html 
    b/devapidocs/org/apache/hadoop/hbase/procedure2/RootProcedureState.State.html
    index a2faf45..762be87 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/procedure2/RootProcedureState.State.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/procedure2/RootProcedureState.State.html
    @@ -248,7 +248,7 @@ the order they are declared.
     
     
     values
    -public staticRootProcedureState.State[]values()
    +public staticRootProcedureState.State[]values()
     Returns an array containing the constants of this enum 
    type, in
     the order they are declared.  This method may be used to iterate
     over the constants as follows:
    @@ -268,7 +268,7 @@ for (RootProcedureState.State c : 
    RootProcedureState.State.values())
     
     
     valueOf
    -public staticRootProcedureState.StatevalueOf(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">Stringname)
    +public staticRootProcedureState.StatevalueOf(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">Stringname)
     Returns the enum constant of this type with the specified 
    name.
     The string must match exactly an identifier used to declare an
     enum constant in this type.  (Extraneous whitespace characters are 
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ec8bf761/devapidocs/org/apache/hadoop/hbase/procedure2/StateMachineProcedure.Flow.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/procedure2/StateMachineProcedure.Flow.html 
    b/devapidocs/org/apache/hadoop/hbase/procedure2/StateMachineProcedure.Flow.html
    index 9501985..9159a18 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/procedure2/StateMachineProcedure.Flow.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/procedure2/StateMachineProcedure.Flow.html
    @@ -50,7 +50,7 @@ var activeTableTab = "activeTableTab";
     
     
     PrevClass
    -NextClass
    +NextClass
     
     
     Frames
    @@ -301,7 +301,7 @@ not permitted.)
     
     
     PrevClass
    -NextClass
    +NextClass
     
     
     Frames
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ec8bf761/devapidocs/org/apache/hadoop/hbase/procedure2/StoppableThread.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/procedure2/StoppableThread.html 
    b/devapidocs/org/apache/hadoop/hbase/procedure2/StoppableThread.html
    new file mode 100644
    index 000..e3b4978
    --- /dev/null
    +++ b/devapidocs/org/apache/hadoop/hbase/procedure2/StoppableThread.html
    @@ -0,0 +1,366 @@
    +http://www.w3.org/TR/html4/loose.dtd;>
    +
    +
    +
    +
    +
    +StoppableThread (Apache HBase 3.0.0-SNAPSHOT API)
    +
    +
    +
    +
    +
    +var methods = {"i0":10,"i1":6};
    +var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
    Methods"],4:["t3","Abstract Methods"],8:["t4","Concrete Methods"]};
    +var altColor = "altColor";
    +var rowColor = "rowColor";
    +var tableTab = "tableTab";
    +var activeTableTab = "activeTableTab";
    +
    +
    +JavaScript is disabled on your browser.
    +
    +
    +
    +
    +
    +Skip navigation links
    +
    +
    +
    +
    +Overview
    +Package
    +Class
    +Use
    +Tree
    +Deprecated
    +Index
    +Help
    +
    +
    +
    +
    +PrevClass
    +NextClass
    +
    +
    +Frames
    +NoFrames
    +
    +
    +AllClasses
    +
    +
    +
    +
    +
    +
    +
    +Summary:
    +Nested|
    +Field|
    +Constr|
    +Method
    +
    +
    +Detail:
    +Field|
    +Constr|
    +Method
    +
    +
    +
    +
    +
    +
    +
    +
    +org.apache.hadoop.hbase.procedure2
    +Class StoppableThread
    +
    +
    +
    +http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">java.lang.Object
    +
    +
    +http://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html?is-external=true;
     title="class or interface in java.lang">java.lang.Thread
    +
    +
    +org.apache.hadoop.hbase.procedure2.StoppableThread
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +All Implemented Interfaces:
    +http://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.html?is-external=true;
     title="class or interface in java.lang">Runnable
    +
    +
    +Direct Known Subclasses:
    +ProcedureExecutor.WorkerThread, TimeoutExecutorThread
    +
    +
    +
    +@InterfaceAudience.Private
    +abstract class StoppableThread
    +extends http://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html?is-external=true;
     title="class or interface in java.lang">Thread
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +Nested Class Summary
    +
    +
    +
    +
    +Nested classes/interfaces inherited from 

    [21/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.html
    index 4f5b33a..4361237 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.html
    @@ -278,567 +278,568 @@
     270  } else {
     271LOG.error(msg, e);
     272setFailure(e);
    -273  }
    -274}
    -275// if split fails,  need to call 
    ((HRegion)parent).clearSplit() when it is a force split
    -276return Flow.HAS_MORE_STATE;
    -277  }
    -278
    -279  /**
    -280   * To rollback {@link 
    SplitTableRegionProcedure}, an AssignProcedure is asynchronously
    -281   * submitted for parent region to be 
    split (rollback doesn't wait on the completion of the
    -282   * AssignProcedure) . This can be 
    improved by changing rollback() to support sub-procedures.
    -283   * See HBASE-19851 for details.
    -284   */
    -285  @Override
    -286  protected void rollbackState(final 
    MasterProcedureEnv env, final SplitTableRegionState state)
    -287  throws IOException, 
    InterruptedException {
    -288if (isTraceEnabled()) {
    -289  LOG.trace(this + " rollback state=" 
    + state);
    -290}
    -291
    -292try {
    -293  switch (state) {
    -294  case 
    SPLIT_TABLE_REGION_POST_OPERATION:
    -295  case 
    SPLIT_TABLE_REGION_OPEN_CHILD_REGIONS:
    -296  case 
    SPLIT_TABLE_REGION_PRE_OPERATION_AFTER_META:
    -297  case 
    SPLIT_TABLE_REGION_UPDATE_META:
    -298// PONR
    -299throw new 
    UnsupportedOperationException(this + " unhandled state=" + state);
    -300  case 
    SPLIT_TABLE_REGION_PRE_OPERATION_BEFORE_META:
    -301break;
    -302  case 
    SPLIT_TABLE_REGION_CREATE_DAUGHTER_REGIONS:
    -303// Doing nothing, as re-open 
    parent region would clean up daughter region directories.
    -304break;
    -305  case 
    SPLIT_TABLE_REGION_CLOSE_PARENT_REGION:
    -306openParentRegion(env);
    -307break;
    -308  case 
    SPLIT_TABLE_REGION_PRE_OPERATION:
    -309postRollBackSplitRegion(env);
    -310break;
    -311  case SPLIT_TABLE_REGION_PREPARE:
    -312break; // nothing to do
    -313  default:
    -314throw new 
    UnsupportedOperationException(this + " unhandled state=" + state);
    -315  }
    -316} catch (IOException e) {
    -317  // This will be retried. Unless 
    there is a bug in the code,
    -318  // this should be just a "temporary 
    error" (e.g. network down)
    -319  LOG.warn("pid=" + getProcId() + " 
    failed rollback attempt step " + state +
    -320  " for splitting the region "
    -321+ 
    getParentRegion().getEncodedName() + " in table " + getTableName(), e);
    -322  throw e;
    -323}
    -324  }
    -325
    -326  /*
    -327   * Check whether we are in the state 
    that can be rollback
    -328   */
    -329  @Override
    -330  protected boolean 
    isRollbackSupported(final SplitTableRegionState state) {
    -331switch (state) {
    -332  case 
    SPLIT_TABLE_REGION_POST_OPERATION:
    -333  case 
    SPLIT_TABLE_REGION_OPEN_CHILD_REGIONS:
    -334  case 
    SPLIT_TABLE_REGION_PRE_OPERATION_AFTER_META:
    -335  case 
    SPLIT_TABLE_REGION_UPDATE_META:
    -336// It is not safe to rollback if 
    we reach to these states.
    -337return false;
    -338  default:
    -339break;
    -340}
    -341return true;
    -342  }
    -343
    -344  @Override
    -345  protected SplitTableRegionState 
    getState(final int stateId) {
    -346return 
    SplitTableRegionState.forNumber(stateId);
    -347  }
    -348
    -349  @Override
    -350  protected int getStateId(final 
    SplitTableRegionState state) {
    -351return state.getNumber();
    -352  }
    -353
    -354  @Override
    -355  protected SplitTableRegionState 
    getInitialState() {
    -356return 
    SplitTableRegionState.SPLIT_TABLE_REGION_PREPARE;
    -357  }
    -358
    -359  @Override
    -360  protected void 
    serializeStateData(ProcedureStateSerializer serializer)
    -361  throws IOException {
    -362
    super.serializeStateData(serializer);
    -363
    -364final 
    MasterProcedureProtos.SplitTableRegionStateData.Builder splitTableRegionMsg =
    -365
    MasterProcedureProtos.SplitTableRegionStateData.newBuilder()
    -366
    .setUserInfo(MasterProcedureUtil.toProtoUserInfo(getUser()))
    -367
    .setParentRegionInfo(ProtobufUtil.toRegionInfo(getRegion()))
    -368
    .addChildRegionInfo(ProtobufUtil.toRegionInfo(daughter_1_RI))
    -369
    .addChildRegionInfo(ProtobufUtil.toRegionInfo(daughter_2_RI));
    -370
    serializer.serialize(splitTableRegionMsg.build());
    -371  }
    -372
    -373  @Override
    -374  protected void 
    deserializeStateData(ProcedureStateSerializer serializer)
    -375  throws IOException {
    -376
    super.deserializeStateData(serializer);
    -377
    -378final 
    

    [21/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/193b4259/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.Visitor.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.Visitor.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.Visitor.html
    index bd13b53..802b925 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.Visitor.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.Visitor.html
    @@ -900,7600 +900,7598 @@
     892if 
    (this.getRegionInfo().getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) {
     893  status.setStatus("Writing region 
    info on filesystem");
     894  fs.checkRegionInfoOnFilesystem();
    -895} else {
    -896  if (LOG.isDebugEnabled()) {
    -897LOG.debug("Skipping creation of 
    .regioninfo file for " + this.getRegionInfo());
    -898  }
    -899}
    -900
    -901// Initialize all the HStores
    -902status.setStatus("Initializing all 
    the Stores");
    -903long maxSeqId = 
    initializeStores(reporter, status);
    -904this.mvcc.advanceTo(maxSeqId);
    -905if 
    (ServerRegionReplicaUtil.shouldReplayRecoveredEdits(this)) {
    -906  CollectionHStore stores = 
    this.stores.values();
    -907  try {
    -908// update the stores that we are 
    replaying
    -909
    stores.forEach(HStore::startReplayingFromWAL);
    -910// Recover any edits if 
    available.
    -911maxSeqId = Math.max(maxSeqId,
    -912  
    replayRecoveredEditsIfAny(this.fs.getRegionDir(), maxSeqIdInStores, reporter, 
    status));
    -913// Make sure mvcc is up to max.
    -914this.mvcc.advanceTo(maxSeqId);
    -915  } finally {
    -916// update the stores that we are 
    done replaying
    -917
    stores.forEach(HStore::stopReplayingFromWAL);
    -918  }
    -919}
    -920this.lastReplayedOpenRegionSeqId = 
    maxSeqId;
    +895}
    +896
    +897// Initialize all the HStores
    +898status.setStatus("Initializing all 
    the Stores");
    +899long maxSeqId = 
    initializeStores(reporter, status);
    +900this.mvcc.advanceTo(maxSeqId);
    +901if 
    (ServerRegionReplicaUtil.shouldReplayRecoveredEdits(this)) {
    +902  CollectionHStore stores = 
    this.stores.values();
    +903  try {
    +904// update the stores that we are 
    replaying
    +905
    stores.forEach(HStore::startReplayingFromWAL);
    +906// Recover any edits if 
    available.
    +907maxSeqId = Math.max(maxSeqId,
    +908  
    replayRecoveredEditsIfAny(this.fs.getRegionDir(), maxSeqIdInStores, reporter, 
    status));
    +909// Make sure mvcc is up to max.
    +910this.mvcc.advanceTo(maxSeqId);
    +911  } finally {
    +912// update the stores that we are 
    done replaying
    +913
    stores.forEach(HStore::stopReplayingFromWAL);
    +914  }
    +915}
    +916this.lastReplayedOpenRegionSeqId = 
    maxSeqId;
    +917
    +918
    this.writestate.setReadOnly(ServerRegionReplicaUtil.isReadOnly(this));
    +919this.writestate.flushRequested = 
    false;
    +920this.writestate.compacting.set(0);
     921
    -922
    this.writestate.setReadOnly(ServerRegionReplicaUtil.isReadOnly(this));
    -923this.writestate.flushRequested = 
    false;
    -924this.writestate.compacting.set(0);
    -925
    -926if (this.writestate.writesEnabled) 
    {
    -927  // Remove temporary data left over 
    from old regions
    -928  status.setStatus("Cleaning up 
    temporary data from old regions");
    -929  fs.cleanupTempDir();
    -930}
    -931
    -932if (this.writestate.writesEnabled) 
    {
    -933  status.setStatus("Cleaning up 
    detritus from prior splits");
    -934  // Get rid of any splits or merges 
    that were lost in-progress.  Clean out
    -935  // these directories here on open.  
    We may be opening a region that was
    -936  // being split but we crashed in 
    the middle of it all.
    -937  fs.cleanupAnySplitDetritus();
    -938  fs.cleanupMergesDir();
    -939}
    -940
    -941// Initialize split policy
    -942this.splitPolicy = 
    RegionSplitPolicy.create(this, conf);
    -943
    -944// Initialize flush policy
    -945this.flushPolicy = 
    FlushPolicyFactory.create(this, conf);
    -946
    -947long lastFlushTime = 
    EnvironmentEdgeManager.currentTime();
    -948for (HStore store: stores.values()) 
    {
    -949  
    this.lastStoreFlushTimeMap.put(store, lastFlushTime);
    -950}
    -951
    -952// Use maximum of log sequenceid or 
    that which was found in stores
    -953// (particularly if no recovered 
    edits, seqid will be -1).
    -954long nextSeqid = maxSeqId;
    -955if (this.writestate.writesEnabled) 
    {
    -956  nextSeqid = 
    WALSplitter.writeRegionSequenceIdFile(this.fs.getFileSystem(),
    -957  this.fs.getRegionDir(), 
    nextSeqid, 1);
    -958} else {
    -959  nextSeqid++;
    -960}
    -961
    -962LOG.info("Onlined " + 
    this.getRegionInfo().getShortNameToLog() +
    -963  "; next sequenceid=" + 
    nextSeqid);
    +922if 

    [21/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/94208cfe/devapidocs/org/apache/hadoop/hbase/client/coprocessor/class-use/Batch.Callback.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/client/coprocessor/class-use/Batch.Callback.html
     
    b/devapidocs/org/apache/hadoop/hbase/client/coprocessor/class-use/Batch.Callback.html
    index 61695fd..bf8d672 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/client/coprocessor/class-use/Batch.Callback.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/client/coprocessor/class-use/Batch.Callback.html
    @@ -113,17 +113,17 @@
     
     
     
    +private Batch.CallbackCResult
    +AsyncRequestFutureImpl.callback
    +
    +
     private Batch.CallbackT
     AsyncProcessTask.callback
     
    -
    +
     private Batch.CallbackT
     AsyncProcessTask.Builder.callback
     
    -
    -private Batch.CallbackCResult
    -AsyncRequestFutureImpl.callback
    -
     
     
     
    @@ -148,50 +148,42 @@
     
     
     Rvoid
    -HTable.batchCallback(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">List? extends Rowactions,
    - http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">Object[]results,
    - Batch.CallbackRcallback)
    -
    -
    -Rvoid
     Table.batchCallback(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">List? extends Rowactions,
      http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">Object[]results,
      Batch.CallbackRcallback)
     Same as Table.batch(List,
     Object[]), but with a callback.
     
     
    +
    +Rvoid
    +HTable.batchCallback(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">List? extends Rowactions,
    + http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">Object[]results,
    + Batch.CallbackRcallback)
    +
     
     R extends 
    com.google.protobuf.Messagevoid
    -HTable.batchCoprocessorService(com.google.protobuf.Descriptors.MethodDescriptormethodDescriptor,
    +Table.batchCoprocessorService(com.google.protobuf.Descriptors.MethodDescriptormethodDescriptor,
    com.google.protobuf.Messagerequest,
    byte[]startKey,
    byte[]endKey,
    RresponsePrototype,
    -   Batch.CallbackRcallback)
    +   Batch.CallbackRcallback)
    +Creates an instance of the given Service 
    subclass for each table
    + region spanning the range from the startKey row to 
    endKey row (inclusive), all
    + the invocations to the same region server will be batched into one call.
    +
     
     
     R extends 
    com.google.protobuf.Messagevoid
    -Table.batchCoprocessorService(com.google.protobuf.Descriptors.MethodDescriptormethodDescriptor,
    +HTable.batchCoprocessorService(com.google.protobuf.Descriptors.MethodDescriptormethodDescriptor,
    com.google.protobuf.Messagerequest,
    byte[]startKey,
    byte[]endKey,
    RresponsePrototype,
    -   Batch.CallbackRcallback)
    -Creates an instance of the given Service 
    subclass for each table
    - region spanning the range from the startKey row to 
    endKey row (inclusive), all
    - the invocations to the same region server will be batched into one call.
    -
    +   Batch.CallbackRcallback)
     
     
     T extends 
    com.google.protobuf.Service,Rvoid
    -HTable.coprocessorService(http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true;
     title="class or interface in java.lang">ClassTservice,
    -  byte[]startKey,
    -  byte[]endKey,
    -  Batch.CallT,Rcallable,
    -  Batch.CallbackRcallback)
    -
    -
    -T extends 
    com.google.protobuf.Service,Rvoid
     Table.coprocessorService(http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true;
     title="class or interface in java.lang">ClassTservice,
       byte[]startKey,
       byte[]endKey,
    @@ -203,6 +195,14 @@
      with each Service instance.
     
     
    +
    +T extends 
    com.google.protobuf.Service,Rvoid
    +HTable.coprocessorService(http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true;
     title="class or interface in java.lang">ClassTservice,
    +  byte[]startKey,
    +  byte[]endKey,
    +  Batch.CallT,Rcallable,
    +  Batch.CallbackRcallback)
    +
     
     static Rvoid
     HTable.doBatchWithCallback(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">List? extends Rowactions,
    
    

    [21/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0cd17dc5/devapidocs/org/apache/hadoop/hbase/client/class-use/Result.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/client/class-use/Result.html 
    b/devapidocs/org/apache/hadoop/hbase/client/class-use/Result.html
    index 81b1f23..78d979d 100644
    --- a/devapidocs/org/apache/hadoop/hbase/client/class-use/Result.html
    +++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/Result.html
    @@ -292,7 +292,7 @@ service.
     
     
     private static HRegionLocation
    -AsyncMetaTableAccessor.getRegionLocation(Resultr,
    +MetaTableAccessor.getRegionLocation(Resultr,
      RegionInforegionInfo,
      intreplicaId)
     Returns the HRegionLocation parsed from the given meta row 
    Result
    @@ -301,7 +301,7 @@ service.
     
     
     private static HRegionLocation
    -MetaTableAccessor.getRegionLocation(Resultr,
    +AsyncMetaTableAccessor.getRegionLocation(Resultr,
      RegionInforegionInfo,
      intreplicaId)
     Returns the HRegionLocation parsed from the given meta row 
    Result
    @@ -309,14 +309,14 @@ service.
     
     
     
    -private static http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
     title="class or interface in java.util">OptionalRegionLocations
    -AsyncMetaTableAccessor.getRegionLocations(Resultr)
    +static RegionLocations
    +MetaTableAccessor.getRegionLocations(Resultr)
     Returns an HRegionLocationList extracted from the 
    result.
     
     
     
    -static RegionLocations
    -MetaTableAccessor.getRegionLocations(Resultr)
    +private static http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
     title="class or interface in java.util">OptionalRegionLocations
    +AsyncMetaTableAccessor.getRegionLocations(Resultr)
     Returns an HRegionLocationList extracted from the 
    result.
     
     
    @@ -326,42 +326,42 @@ service.
     
     
     private static long
    -AsyncMetaTableAccessor.getSeqNumDuringOpen(Resultr,
    +MetaTableAccessor.getSeqNumDuringOpen(Resultr,
    intreplicaId)
     The latest seqnum that the server writing to meta observed 
    when opening the region.
     
     
     
     private static long
    -MetaTableAccessor.getSeqNumDuringOpen(Resultr,
    +AsyncMetaTableAccessor.getSeqNumDuringOpen(Resultr,
    intreplicaId)
     The latest seqnum that the server writing to meta observed 
    when opening the region.
     
     
     
    -private static http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
     title="class or interface in java.util">OptionalServerName
    -AsyncMetaTableAccessor.getServerName(Resultr,
    +static ServerName
    +MetaTableAccessor.getServerName(Resultr,
      intreplicaId)
     Returns a ServerName from catalog table Result.
     
     
     
    -static ServerName
    -MetaTableAccessor.getServerName(Resultr,
    +private static http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
     title="class or interface in java.util">OptionalServerName
    +AsyncMetaTableAccessor.getServerName(Resultr,
      intreplicaId)
     Returns a ServerName from catalog table Result.
     
     
     
    -private static http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
     title="class or interface in java.util">OptionalTableState
    -AsyncMetaTableAccessor.getTableState(Resultr)
    -
    -
     static TableState
     MetaTableAccessor.getTableState(Resultr)
     Decode table state from META Result.
     
     
    +
    +private static http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
     title="class or interface in java.util">OptionalTableState
    +AsyncMetaTableAccessor.getTableState(Resultr)
    +
     
     void
     AsyncMetaTableAccessor.MetaTableScanResultConsumer.onNext(Result[]results,
    @@ -457,13 +457,13 @@ service.
     ClientScanner.cache
     
     
    -private http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListResult
    -CompleteScanResultCache.partialResults
    -
    -
     private http://docs.oracle.com/javase/8/docs/api/java/util/Deque.html?is-external=true;
     title="class or interface in java.util">DequeResult
     BatchScanResultCache.partialResults
     
    +
    +private http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListResult
    +CompleteScanResultCache.partialResults
    +
     
     private http://docs.oracle.com/javase/8/docs/api/java/util/Queue.html?is-external=true;
     title="class or interface in java.util">QueueResult
     AsyncTableResultScanner.queue
    @@ -486,7 +486,7 @@ service.
     
     
     Result[]
    -AllowPartialScanResultCache.addAndGet(Result[]results,
    +BatchScanResultCache.addAndGet(Result[]results,
      booleanisHeartbeatMessage)
     
     
    @@ -496,24 +496,20 @@ service.
     
     
     Result[]
    -BatchScanResultCache.addAndGet(Result[]results,
    +AllowPartialScanResultCache.addAndGet(Result[]results,
      booleanisHeartbeatMessage)
     
     
     Result
    -Table.append(Appendappend)
    -Appends values to one or more columns within a single 
    row.
    -
    +HTable.append(Appendappend)
     
     
     Result
    

    [21/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/828486ae/devapidocs/org/apache/hadoop/hbase/client/class-use/SnapshotDescription.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/client/class-use/SnapshotDescription.html 
    b/devapidocs/org/apache/hadoop/hbase/client/class-use/SnapshotDescription.html
    index 4584cda..fb9bdb3 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/client/class-use/SnapshotDescription.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/client/class-use/SnapshotDescription.html
    @@ -137,9 +137,7 @@
     
     
     http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListSnapshotDescription
    -AsyncAdmin.listSnapshots()
    -List completed snapshots.
    -
    +AsyncHBaseAdmin.listSnapshots()
     
     
     http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListSnapshotDescription
    @@ -148,22 +146,22 @@
     
     
     
    -http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListSnapshotDescription
    -RawAsyncHBaseAdmin.listSnapshots()
    -
    -
     http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListSnapshotDescription
     HBaseAdmin.listSnapshots()
     
    +
    +http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListSnapshotDescription
    +AsyncAdmin.listSnapshots()
    +List completed snapshots.
    +
    +
     
     http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListSnapshotDescription
    -AsyncHBaseAdmin.listSnapshots()
    +RawAsyncHBaseAdmin.listSnapshots()
     
     
     http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListSnapshotDescription
    -AsyncAdmin.listSnapshots(http://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html?is-external=true;
     title="class or interface in java.util.regex">Patternpattern)
    -List all the completed snapshots matching the given 
    pattern.
    -
    +AsyncHBaseAdmin.listSnapshots(http://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html?is-external=true;
     title="class or interface in 
    java.util.regex">Patternpattern)
     
     
     http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListSnapshotDescription
    @@ -172,16 +170,18 @@
     
     
     
    -http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListSnapshotDescription
    -RawAsyncHBaseAdmin.listSnapshots(http://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html?is-external=true;
     title="class or interface in 
    java.util.regex">Patternpattern)
    -
    -
     http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListSnapshotDescription
     HBaseAdmin.listSnapshots(http://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html?is-external=true;
     title="class or interface in 
    java.util.regex">Patternpattern)
     
    +
    +http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListSnapshotDescription
    +AsyncAdmin.listSnapshots(http://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html?is-external=true;
     title="class or interface in java.util.regex">Patternpattern)
    +List all the completed snapshots matching the given 
    pattern.
    +
    +
     
     http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in 

    [21/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f272b0e8/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfo.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfo.html 
    b/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfo.html
    index 232a8b4..aeaf9fe 100644
    --- a/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfo.html
    +++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfo.html
    @@ -495,7 +495,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
    utility methods.
     
     
     private static HRegionLocation
    -AsyncMetaTableAccessor.getRegionLocation(Resultr,
    +MetaTableAccessor.getRegionLocation(Resultr,
      RegionInforegionInfo,
      intreplicaId)
     Returns the HRegionLocation parsed from the given meta row 
    Result
    @@ -504,7 +504,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
    utility methods.
     
     
     private static HRegionLocation
    -MetaTableAccessor.getRegionLocation(Resultr,
    +AsyncMetaTableAccessor.getRegionLocation(Resultr,
      RegionInforegionInfo,
      intreplicaId)
     Returns the HRegionLocation parsed from the given meta row 
    Result
    @@ -944,7 +944,9 @@ Input/OutputFormats, a table indexing MapReduce job, and 
    utility methods.
     
     
     http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListRegionInfo
    -AsyncHBaseAdmin.getRegions(ServerNameserverName)
    +AsyncAdmin.getRegions(ServerNameserverName)
    +Get all the online regions on a region server.
    +
     
     
     http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListRegionInfo
    @@ -953,22 +955,22 @@ Input/OutputFormats, a table indexing MapReduce job, and 
    utility methods.
     
     
     
    -http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListRegionInfo
    -HBaseAdmin.getRegions(ServerNamesn)
    +http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListRegionInfo
    +RawAsyncHBaseAdmin.getRegions(ServerNameserverName)
     
     
    -http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListRegionInfo
    -AsyncAdmin.getRegions(ServerNameserverName)
    -Get all the online regions on a region server.
    -
    +http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListRegionInfo
    +HBaseAdmin.getRegions(ServerNamesn)
     
     
     http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListRegionInfo
    -RawAsyncHBaseAdmin.getRegions(ServerNameserverName)
    +AsyncHBaseAdmin.getRegions(ServerNameserverName)
     
     
     http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListRegionInfo
    -AsyncHBaseAdmin.getRegions(TableNametableName)
    +AsyncAdmin.getRegions(TableNametableName)
    +Get the regions of a given table.
    +
     
     
     http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListRegionInfo
    @@ -977,18 +979,16 @@ Input/OutputFormats, a table indexing MapReduce job, and 
    utility methods.
     
     
     
    -http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListRegionInfo
    -HBaseAdmin.getRegions(TableNametableName)
    +http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListRegionInfo
    +RawAsyncHBaseAdmin.getRegions(TableNametableName)
     
     
    -http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in 

    [21/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c83a37c8/devapidocs/org/apache/hadoop/hbase/client/class-use/SnapshotDescription.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/client/class-use/SnapshotDescription.html 
    b/devapidocs/org/apache/hadoop/hbase/client/class-use/SnapshotDescription.html
    index 4584cda..fb9bdb3 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/client/class-use/SnapshotDescription.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/client/class-use/SnapshotDescription.html
    @@ -137,9 +137,7 @@
     
     
     http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListSnapshotDescription
    -AsyncAdmin.listSnapshots()
    -List completed snapshots.
    -
    +AsyncHBaseAdmin.listSnapshots()
     
     
     http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListSnapshotDescription
    @@ -148,22 +146,22 @@
     
     
     
    -http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListSnapshotDescription
    -RawAsyncHBaseAdmin.listSnapshots()
    -
    -
     http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListSnapshotDescription
     HBaseAdmin.listSnapshots()
     
    +
    +http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListSnapshotDescription
    +AsyncAdmin.listSnapshots()
    +List completed snapshots.
    +
    +
     
     http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListSnapshotDescription
    -AsyncHBaseAdmin.listSnapshots()
    +RawAsyncHBaseAdmin.listSnapshots()
     
     
     http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListSnapshotDescription
    -AsyncAdmin.listSnapshots(http://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html?is-external=true;
     title="class or interface in java.util.regex">Patternpattern)
    -List all the completed snapshots matching the given 
    pattern.
    -
    +AsyncHBaseAdmin.listSnapshots(http://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html?is-external=true;
     title="class or interface in 
    java.util.regex">Patternpattern)
     
     
     http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListSnapshotDescription
    @@ -172,16 +170,18 @@
     
     
     
    -http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListSnapshotDescription
    -RawAsyncHBaseAdmin.listSnapshots(http://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html?is-external=true;
     title="class or interface in 
    java.util.regex">Patternpattern)
    -
    -
     http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListSnapshotDescription
     HBaseAdmin.listSnapshots(http://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html?is-external=true;
     title="class or interface in 
    java.util.regex">Patternpattern)
     
    +
    +http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListSnapshotDescription
    +AsyncAdmin.listSnapshots(http://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html?is-external=true;
     title="class or interface in java.util.regex">Patternpattern)
    +List all the completed snapshots matching the given 
    pattern.
    +
    +
     
     http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in 

    [21/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0ab8335e/devapidocs/org/apache/hadoop/hbase/client/class-use/TableDescriptor.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/client/class-use/TableDescriptor.html 
    b/devapidocs/org/apache/hadoop/hbase/client/class-use/TableDescriptor.html
    index 98104cb..56a2ea1 100644
    --- a/devapidocs/org/apache/hadoop/hbase/client/class-use/TableDescriptor.html
    +++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/TableDescriptor.html
    @@ -449,13 +449,13 @@ Input/OutputFormats, a table indexing MapReduce job, and 
    utility methods.
     
     
     TableDescriptor
    -Table.getDescriptor()
    -Gets the table 
    descriptor for this table.
    -
    +HTable.getDescriptor()
     
     
     TableDescriptor
    -HTable.getDescriptor()
    +Table.getDescriptor()
    +Gets the table 
    descriptor for this table.
    +
     
     
     TableDescriptor
    @@ -509,52 +509,52 @@ Input/OutputFormats, a table indexing MapReduce job, and 
    utility methods.
     
     
     http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFutureTableDescriptor
    -AsyncHBaseAdmin.getDescriptor(TableNametableName)
    -
    -
    -http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFutureTableDescriptor
     AsyncAdmin.getDescriptor(TableNametableName)
     Method for getting the tableDescriptor
     
     
    -
    +
     http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFutureTableDescriptor
     RawAsyncHBaseAdmin.getDescriptor(TableNametableName)
     
    +
    +http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFutureTableDescriptor
    +AsyncHBaseAdmin.getDescriptor(TableNametableName)
    +
     
     private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListTableDescriptor
     RawAsyncHBaseAdmin.getTableDescriptors(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsRequestrequest)
     
     
    -http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListTableDescriptor
    -Admin.listTableDescriptors()
    +default http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListTableDescriptor
    +AsyncAdmin.listTableDescriptors()
     List all the userspace tables.
     
     
     
     http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListTableDescriptor
    -HBaseAdmin.listTableDescriptors()
    -
    -
    -default http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListTableDescriptor
    -AsyncAdmin.listTableDescriptors()
    +Admin.listTableDescriptors()
     List all the userspace tables.
     
     
    -
    -http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListTableDescriptor
    -AsyncHBaseAdmin.listTableDescriptors(booleanincludeSysTables)
    -
     
    +http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListTableDescriptor
    +HBaseAdmin.listTableDescriptors()
    +
    +
     http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListTableDescriptor
     AsyncAdmin.listTableDescriptors(booleanincludeSysTables)
     List all the tables.
     
     
    -
    +
     http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListTableDescriptor
     RawAsyncHBaseAdmin.listTableDescriptors(booleanincludeSysTables)
     
    

    [21/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/src-html/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor.html
    index b590002..9b2a580 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor.html
    @@ -560,806 +560,811 @@
     552return this;
     553  }
     554
    -555  public ColumnFamilyDescriptorBuilder 
    setValue(final Bytes key, final Bytes value) {
    -556desc.setValue(key, value);
    +555  public ColumnFamilyDescriptorBuilder 
    setNewVersionBehavior(final boolean value) {
    +556desc.setNewVersionBehavior(value);
     557return this;
     558  }
     559
    -560  public ColumnFamilyDescriptorBuilder 
    setValue(final byte[] key, final byte[] value) {
    +560  public ColumnFamilyDescriptorBuilder 
    setValue(final Bytes key, final Bytes value) {
     561desc.setValue(key, value);
     562return this;
     563  }
     564
    -565  public ColumnFamilyDescriptorBuilder 
    setValue(final String key, final String value) {
    +565  public ColumnFamilyDescriptorBuilder 
    setValue(final byte[] key, final byte[] value) {
     566desc.setValue(key, value);
     567return this;
     568  }
     569
    -570  /**
    -571   * An ModifyableFamilyDescriptor 
    contains information about a column family such as the
    -572   * number of versions, compression 
    settings, etc.
    -573   *
    -574   * It is used as input when creating a 
    table or adding a column.
    -575   * TODO: make this package-private 
    after removing the HColumnDescriptor
    -576   */
    -577  @InterfaceAudience.Private
    -578  public static class 
    ModifyableColumnFamilyDescriptor
    -579  implements ColumnFamilyDescriptor, 
    ComparableModifyableColumnFamilyDescriptor {
    -580
    -581// Column family name
    -582private final byte[] name;
    -583
    -584// Column metadata
    -585private final MapBytes, Bytes 
    values = new HashMap();
    -586
    -587/**
    -588 * A map which holds the 
    configuration specific to the column family. The
    -589 * keys of the map have the same 
    names as config keys and override the
    -590 * defaults with cf-specific 
    settings. Example usage may be for compactions,
    -591 * etc.
    -592 */
    -593private final MapString, 
    String configuration = new HashMap();
    -594
    -595/**
    -596 * Construct a column descriptor 
    specifying only the family name The other
    -597 * attributes are defaulted.
    -598 *
    -599 * @param name Column family name. 
    Must be 'printable' -- digit or
    -600 * letter -- and may not contain a 
    code:/code
    -601 * TODO: make this private after the 
    HCD is removed.
    -602 */
    -603@InterfaceAudience.Private
    -604public 
    ModifyableColumnFamilyDescriptor(final byte[] name) {
    -605  this(isLegalColumnFamilyName(name), 
    getDefaultValuesBytes(), Collections.emptyMap());
    -606}
    -607
    -608/**
    -609 * Constructor. Makes a deep copy of 
    the supplied descriptor.
    -610 * TODO: make this private after the 
    HCD is removed.
    -611 * @param desc The descriptor.
    -612 */
    -613@InterfaceAudience.Private
    -614public 
    ModifyableColumnFamilyDescriptor(ColumnFamilyDescriptor desc) {
    -615  this(desc.getName(), 
    desc.getValues(), desc.getConfiguration());
    -616}
    -617
    -618private 
    ModifyableColumnFamilyDescriptor(byte[] name, MapBytes, Bytes values, 
    MapString, String config) {
    -619  this.name = name;
    -620  this.values.putAll(values);
    -621  
    this.configuration.putAll(config);
    -622}
    -623
    -624@Override
    -625public byte[] getName() {
    -626  return Bytes.copy(name);
    +570  public ColumnFamilyDescriptorBuilder 
    setValue(final String key, final String value) {
    +571desc.setValue(key, value);
    +572return this;
    +573  }
    +574
    +575  /**
    +576   * An ModifyableFamilyDescriptor 
    contains information about a column family such as the
    +577   * number of versions, compression 
    settings, etc.
    +578   *
    +579   * It is used as input when creating a 
    table or adding a column.
    +580   * TODO: make this package-private 
    after removing the HColumnDescriptor
    +581   */
    +582  @InterfaceAudience.Private
    +583  public static class 
    ModifyableColumnFamilyDescriptor
    +584  implements ColumnFamilyDescriptor, 
    ComparableModifyableColumnFamilyDescriptor {
    +585
    +586// Column family name
    +587private final byte[] name;
    +588
    +589// Column metadata
    +590private final MapBytes, Bytes 
    values = new HashMap();
    +591
    +592/**
    +593 * A map which holds the 
    configuration specific to the column family. The
    +594 * keys of the map have the same 
    names as 

    [21/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1f2eeb22/devapidocs/org/apache/hadoop/hbase/wal/RegionGroupingProvider.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/wal/RegionGroupingProvider.html 
    b/devapidocs/org/apache/hadoop/hbase/wal/RegionGroupingProvider.html
    index ed218f3..0d5613d 100644
    --- a/devapidocs/org/apache/hadoop/hbase/wal/RegionGroupingProvider.html
    +++ b/devapidocs/org/apache/hadoop/hbase/wal/RegionGroupingProvider.html
    @@ -18,7 +18,7 @@
     catch(err) {
     }
     //-->
    -var methods = 
    {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10};
    +var methods = 
    {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10};
     var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
    Methods"],8:["t4","Concrete Methods"]};
     var altColor = "altColor";
     var rowColor = "rowColor";
    @@ -277,27 +277,33 @@ implements 
     
     void
    +addWALActionsListener(WALActionsListenerlistener)
    +Add a WALActionsListener.
    +
    +
    +
    +void
     close()
     shutdown utstanding WALs and clean up any persisted 
    state.
     
     
    -
    +
     private WALProvider
     createProvider(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in 
    java.lang">Stringgroup)
     
    -
    +
     long
     getLogFileSize()
     Get size of the log files this provider is managing
     
     
    -
    +
     long
     getNumLogFiles()
     Get number of the log files this provider is managing
     
     
    -
    +
     (package private) RegionGroupingProvider.RegionGroupingStrategy
     getStrategy(org.apache.hadoop.conf.Configurationconf,
    http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">Stringkey,
    @@ -305,28 +311,27 @@ implements instantiate a strategy from a config property.
     
     
    -
    +
     WAL
     getWAL(RegionInforegion)
     
    -
    +
     private WAL
     getWAL(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in 
    java.lang">Stringgroup)
     
    -
    +
     http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListWAL
     getWALs()
     
    -
    +
     void
    -init(WALFactoryfactory,
    +init(WALFactoryfactory,
     org.apache.hadoop.conf.Configurationconf,
    -http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListWALActionsListenerlisteners,
     http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringproviderId)
     Set up the provider to create wals.
     
     
    -
    +
     void
     shutdown()
     persist outstanding WALs to storage and stop accepting new 
    appends.
    @@ -340,6 +345,13 @@ implements Methods inherited from classjava.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">Object
     http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--;
     title="class or interface in java.lang">clone, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-;
     title="class or interface in java.lang">equals, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--;
     title="class or interface in java.lang">finalize, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--;
     title="class or interface in java.lang">getClass, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--;
     title="class or interface in java.lang">hashCode, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notify--;
     title="class or interface in java.lang">notify, http://docs.oracle.com/javase/8/docs/api/java/lang
     /Object.html?is-external=true#notifyAll--" title="class or interface in 
    java.lang">notifyAll, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#toString--;
     title="class or interface in java.lang">toString, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait--;
     title="class or interface in java.lang">wait, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-;
     title="class or interface in java.lang">wait, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-int-;
     title="class or interface in java.lang">wait
     
    +
    +
    +
    +
    +Methods inherited from interfaceorg.apache.hadoop.hbase.wal.WALProvider
    +getWALFileLengthProvider
    +
     
     
     
    @@ -528,7 +540,7 @@ implements 
     
     
    -
    +
     
     
     
    @@ -536,19 +548,16 @@ implements init
     publicvoidinit(WALFactoryfactory,
      org.apache.hadoop.conf.Configurationconf,
    - http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListWALActionsListenerlisteners,
      

    [21/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/cc6597ec/testdevapidocs/org/apache/hadoop/hbase/class-use/HBaseClassTestRuleChecker.html
    --
    diff --git 
    a/testdevapidocs/org/apache/hadoop/hbase/class-use/HBaseClassTestRuleChecker.html
     
    b/testdevapidocs/org/apache/hadoop/hbase/class-use/HBaseClassTestRuleChecker.html
    new file mode 100644
    index 000..0b108c9
    --- /dev/null
    +++ 
    b/testdevapidocs/org/apache/hadoop/hbase/class-use/HBaseClassTestRuleChecker.html
    @@ -0,0 +1,125 @@
    +http://www.w3.org/TR/html4/loose.dtd;>
    +
    +
    +
    +
    +
    +Uses of Class org.apache.hadoop.hbase.HBaseClassTestRuleChecker (Apache 
    HBase 3.0.0-SNAPSHOT Test API)
    +
    +
    +
    +
    +
    +
    +
    +JavaScript is disabled on your browser.
    +
    +
    +
    +
    +
    +Skip navigation links
    +
    +
    +
    +
    +Overview
    +Package
    +Class
    +Use
    +Tree
    +Deprecated
    +Index
    +Help
    +
    +
    +
    +
    +Prev
    +Next
    +
    +
    +Frames
    +NoFrames
    +
    +
    +AllClasses
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +Uses of 
    Classorg.apache.hadoop.hbase.HBaseClassTestRuleChecker
    +
    +No usage of 
    org.apache.hadoop.hbase.HBaseClassTestRuleChecker
    +
    +
    +
    +
    +Skip navigation links
    +
    +
    +
    +
    +Overview
    +Package
    +Class
    +Use
    +Tree
    +Deprecated
    +Index
    +Help
    +
    +
    +
    +
    +Prev
    +Next
    +
    +
    +Frames
    +NoFrames
    +
    +
    +AllClasses
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
    reserved.
    +
    +
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/cc6597ec/testdevapidocs/org/apache/hadoop/hbase/class-use/HBaseTestingUtility.html
    --
    diff --git 
    a/testdevapidocs/org/apache/hadoop/hbase/class-use/HBaseTestingUtility.html 
    b/testdevapidocs/org/apache/hadoop/hbase/class-use/HBaseTestingUtility.html
    index 1d10852..a549cfa 100644
    --- a/testdevapidocs/org/apache/hadoop/hbase/class-use/HBaseTestingUtility.html
    +++ b/testdevapidocs/org/apache/hadoop/hbase/class-use/HBaseTestingUtility.html
    @@ -429,15 +429,15 @@
     
     
     private static HBaseTestingUtility
    -TestFSTableDescriptorForceCreation.UTIL
    +AcidGuaranteesTestBase.UTIL
     
     
    -private HBaseTestingUtility
    -TestRegionRebalancing.UTIL
    +private static HBaseTestingUtility
    +TestFSTableDescriptorForceCreation.UTIL
     
     
    -private static HBaseTestingUtility
    -TestAcidGuaranteesWithNoInMemCompaction.UTIL
    +private HBaseTestingUtility
    +TestRegionRebalancing.UTIL
     
     
     private static HBaseTestingUtility
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/cc6597ec/testdevapidocs/org/apache/hadoop/hbase/class-use/TestAcidGuaranteesWithNoInMemCompaction.html
    --
    diff --git 
    a/testdevapidocs/org/apache/hadoop/hbase/class-use/TestAcidGuaranteesWithNoInMemCompaction.html
     
    b/testdevapidocs/org/apache/hadoop/hbase/class-use/TestAcidGuaranteesWithNoInMemCompaction.html
    index f67b5e7..f647ea9 100644
    --- 
    a/testdevapidocs/org/apache/hadoop/hbase/class-use/TestAcidGuaranteesWithNoInMemCompaction.html
    +++ 
    b/testdevapidocs/org/apache/hadoop/hbase/class-use/TestAcidGuaranteesWithNoInMemCompaction.html
    @@ -72,55 +72,7 @@
     
     Uses of 
    Classorg.apache.hadoop.hbase.TestAcidGuaranteesWithNoInMemCompaction
     
    -
    -
    -
    -
    -Packages that use TestAcidGuaranteesWithNoInMemCompaction
    -
    -Package
    -Description
    -
    -
    -
    -org.apache.hadoop.hbase
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -Uses of TestAcidGuaranteesWithNoInMemCompaction in org.apache.hadoop.hbase
    -
    -Subclasses of TestAcidGuaranteesWithNoInMemCompaction in org.apache.hadoop.hbase
    -
    -Modifier and Type
    -Class and Description
    -
    -
    -
    -class
    -TestAcidGuaranteesWithAdaptivePolicy
    -
    -
    -class
    -TestAcidGuaranteesWithBasicPolicy
    -
    -
    -class
    -TestAcidGuaranteesWithEagerPolicy
    -
    -
    -
    -
    -
    -
    -
    -
    +No usage of 
    org.apache.hadoop.hbase.TestAcidGuaranteesWithNoInMemCompaction
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/cc6597ec/testdevapidocs/org/apache/hadoop/hbase/client/TestAdmin1.html
    --
    diff --git a/testdevapidocs/org/apache/hadoop/hbase/client/TestAdmin1.html 
    b/testdevapidocs/org/apache/hadoop/hbase/client/TestAdmin1.html
    index 8c2422f..3885abd 100644
    --- a/testdevapidocs/org/apache/hadoop/hbase/client/TestAdmin1.html
    +++ b/testdevapidocs/org/apache/hadoop/hbase/client/TestAdmin1.html
    @@ -109,7 +109,7 @@ var activeTableTab = 

    [21/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aa7ffc92/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupCommands.RepairCommand.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupCommands.RepairCommand.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupCommands.RepairCommand.html
    index eb9e252..667152a 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupCommands.RepairCommand.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupCommands.RepairCommand.html
    @@ -28,22 +28,22 @@
     020
     021import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_BANDWIDTH;
     022import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_BANDWIDTH_DESC;
    -023import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_PATH;
    -024import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_PATH_DESC;
    -025import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_RECORD_NUMBER;
    -026import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_RECORD_NUMBER_DESC;
    -027import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_SET;
    -028import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_SET_BACKUP_DESC;
    -029import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_SET_DESC;
    -030import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_TABLE;
    -031import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_TABLE_DESC;
    -032import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_TABLE_LIST_DESC;
    -033import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_WORKERS;
    -034import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_WORKERS_DESC;
    -035import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_YARN_QUEUE_NAME;
    -036import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_YARN_QUEUE_NAME_DESC;
    -037import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_DEBUG;
    -038import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_DEBUG_DESC;
    +023import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_DEBUG;
    +024import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_DEBUG_DESC;
    +025import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_PATH;
    +026import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_PATH_DESC;
    +027import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_RECORD_NUMBER;
    +028import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_RECORD_NUMBER_DESC;
    +029import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_SET;
    +030import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_SET_BACKUP_DESC;
    +031import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_SET_DESC;
    +032import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_TABLE;
    +033import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_TABLE_DESC;
    +034import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_TABLE_LIST_DESC;
    +035import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_WORKERS;
    +036import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_WORKERS_DESC;
    +037import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_YARN_QUEUE_NAME;
    +038import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_YARN_QUEUE_NAME_DESC;
     039
     040import java.io.IOException;
     041import java.net.URI;
    @@ -70,194 +70,194 @@
     062import 
    org.apache.hadoop.hbase.backup.util.BackupUtils;
     063import 
    org.apache.hadoop.hbase.client.Connection;
     064import 
    org.apache.hadoop.hbase.client.ConnectionFactory;
    -065import 
    org.apache.hbase.thirdparty.com.google.common.collect.Lists;
    -066import 
    org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
    -067import 
    org.apache.yetus.audience.InterfaceAudience;
    -068
    -069/**
    -070 * General backup commands, options and 
    usage messages
    -071 */
    -072
    +065import 
    org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
    +066import 
    org.apache.yetus.audience.InterfaceAudience;
    +067
    +068import 
    org.apache.hbase.thirdparty.com.google.common.collect.Lists;
    +069
    +070/**
    +071 * General backup commands, options and 
    usage messages
    +072 */
     073@InterfaceAudience.Private
     074public final class BackupCommands {
    -075
    -076  public final static String 
    INCORRECT_USAGE = "Incorrect usage";
    -077
    -078  public final static String 
    TOP_LEVEL_NOT_ALLOWED =
    -079  "Top level (root) folder is not 
    allowed to be a backup destination";
    -080
    -081  public static final String USAGE = 
    "Usage: hbase 

    [21/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/96e5e102/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ScannerContext.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ScannerContext.html 
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ScannerContext.html
    index 914b1c6..03a0b2a 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ScannerContext.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ScannerContext.html
    @@ -59,646 +59,711 @@
     051@InterfaceStability.Evolving
     052public class ScannerContext {
     053
    -054  /**
    -055   * Two sets of the same fields. One for 
    the limits, another for the progress towards those limits
    -056   */
    -057  LimitFields limits;
    -058  LimitFields progress;
    -059
    -060  /**
    -061   * The state of the scanner after the 
    invocation of {@link InternalScanner#next(java.util.List)}
    -062   * or {@link 
    RegionScanner#next(java.util.List)}.
    -063   */
    -064  NextState scannerState;
    -065  private static final NextState 
    DEFAULT_STATE = NextState.MORE_VALUES;
    -066
    -067  /**
    -068   * Used as an indication to invocations 
    of {@link InternalScanner#next(java.util.List)} and
    -069   * {@link 
    RegionScanner#next(java.util.List)} that, if true, the progress tracked within 
    this
    -070   * {@link ScannerContext} instance 
    should be considered while evaluating the limits. Useful for
    -071   * enforcing a set of limits across 
    multiple calls (i.e. the limit may not be reached in a single
    -072   * invocation, but any progress made 
    should be considered in future invocations)
    -073   * p
    -074   * Defaulting this value to false means 
    that, by default, any tracked progress will be wiped clean
    -075   * on invocations to {@link 
    InternalScanner#next(java.util.List)} and
    -076   * {@link 
    RegionScanner#next(java.util.List)} and the call will be treated as though no 
    progress
    -077   * has been made towards the limits so 
    far.
    -078   * p
    -079   * This is an important mechanism. 
    Users of Internal/Region scanners expect that they can define
    -080   * some limits and then repeatedly 
    invoke {@link InternalScanner#next(List)} or
    -081   * {@link RegionScanner#next(List)} 
    where each invocation respects these limits separately.
    -082   * p
    -083   * For example: pre {@code
    -084   * ScannerContext context = new 
    ScannerContext.newBuilder().setBatchLimit(5).build();
    -085   * RegionScanner scanner = ...
    -086   * ListCell results = new 
    ArrayListCell();
    -087   * while(scanner.next(results, 
    context)) {
    -088   *   // Do something with a batch of 5 
    cells
    -089   * }
    -090   * }/pre However, in the case 
    of RPCs, the server wants to be able to define a set of
    -091   * limits for a particular RPC request 
    and have those limits respected across multiple
    -092   * invocations. This means that the 
    progress made towards the limits in earlier calls will be
    -093   * saved and considered in future 
    invocations
    -094   */
    -095  boolean keepProgress;
    -096  private static boolean 
    DEFAULT_KEEP_PROGRESS = false;
    -097
    -098  private Cell lastPeekedCell = null;
    +054  LimitFields limits;
    +055  /**
    +056   * A different set of progress fields. 
    Only include batch, dataSize and heapSize. Compare to
    +057   * LimitFields, ProgressFields doesn't 
    contain time field. As we save a deadline in LimitFields,
    +058   * so use {@link 
    System#currentTimeMillis()} directly when check time limit.
    +059   */
    +060  ProgressFields progress;
    +061
    +062  /**
    +063   * The state of the scanner after the 
    invocation of {@link InternalScanner#next(java.util.List)}
    +064   * or {@link 
    RegionScanner#next(java.util.List)}.
    +065   */
    +066  NextState scannerState;
    +067  private static final NextState 
    DEFAULT_STATE = NextState.MORE_VALUES;
    +068
    +069  /**
    +070   * Used as an indication to invocations 
    of {@link InternalScanner#next(java.util.List)} and
    +071   * {@link 
    RegionScanner#next(java.util.List)} that, if true, the progress tracked within 
    this
    +072   * {@link ScannerContext} instance 
    should be considered while evaluating the limits. Useful for
    +073   * enforcing a set of limits across 
    multiple calls (i.e. the limit may not be reached in a single
    +074   * invocation, but any progress made 
    should be considered in future invocations)
    +075   * p
    +076   * Defaulting this value to false means 
    that, by default, any tracked progress will be wiped clean
    +077   * on invocations to {@link 
    InternalScanner#next(java.util.List)} and
    +078   * {@link 
    RegionScanner#next(java.util.List)} and the call will be treated as though no 
    progress
    +079   * has been made towards the limits so 
    far.
    +080   * p
    +081   * This is an important mechanism. 
    Users of Internal/Region scanners expect that they can define
    +082   * some limits and then repeatedly 
    invoke {@link InternalScanner#next(List)} or
    +083   * {@link RegionScanner#next(List)} 
    where each invocation respects these limits separately.
    +084   * p
    +085   * For example: pre {@code
    

    [21/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/master/HMasterCommandLine.LocalHMaster.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/master/HMasterCommandLine.LocalHMaster.html
     
    b/devapidocs/org/apache/hadoop/hbase/master/HMasterCommandLine.LocalHMaster.html
    index 9b9941b..2fb2450 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/master/HMasterCommandLine.LocalHMaster.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/master/HMasterCommandLine.LocalHMaster.html
    @@ -132,7 +132,7 @@ var activeTableTab = "activeTableTab";
     
     
     
    -public static class HMasterCommandLine.LocalHMaster
    +public static class HMasterCommandLine.LocalHMaster
     extends HMaster
     
     
    @@ -318,7 +318,7 @@ extends 
     
     zkcluster
    -privateMiniZooKeeperCluster zkcluster
    +privateMiniZooKeeperCluster zkcluster
     
     
     
    @@ -335,7 +335,7 @@ extends 
     
     LocalHMaster
    -publicLocalHMaster(org.apache.hadoop.conf.Configurationconf)
    +publicLocalHMaster(org.apache.hadoop.conf.Configurationconf)
      throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException,
     org.apache.zookeeper.KeeperException,
     http://docs.oracle.com/javase/8/docs/api/java/lang/InterruptedException.html?is-external=true;
     title="class or interface in java.lang">InterruptedException
    @@ -361,7 +361,7 @@ extends 
     
     run
    -publicvoidrun()
    +publicvoidrun()
     Description copied from 
    class:HRegionServer
     The HRegionServer sticks in this loop until closed.
     
    @@ -378,7 +378,7 @@ extends 
     
     setZKCluster
    -voidsetZKCluster(MiniZooKeeperClusterzkcluster)
    +voidsetZKCluster(MiniZooKeeperClusterzkcluster)
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/master/HMasterCommandLine.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/master/HMasterCommandLine.html 
    b/devapidocs/org/apache/hadoop/hbase/master/HMasterCommandLine.html
    index 0b918c0..95fdd9a 100644
    --- a/devapidocs/org/apache/hadoop/hbase/master/HMasterCommandLine.html
    +++ b/devapidocs/org/apache/hadoop/hbase/master/HMasterCommandLine.html
    @@ -338,7 +338,7 @@ extends 
     
     getUsage
    -protectedhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringgetUsage()
    +protectedhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringgetUsage()
     Description copied from 
    class:ServerCommandLine
     Implementing subclasses should return a usage string to 
    print out.
     
    @@ -353,7 +353,7 @@ extends 
     
     run
    -publicintrun(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String[]args)
    +publicintrun(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String[]args)
     throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
     title="class or interface in java.lang">Exception
     
     Throws:
    @@ -367,7 +367,7 @@ extends 
     
     startMaster
    -privateintstartMaster()
    +privateintstartMaster()
     
     
     
    @@ -376,7 +376,7 @@ extends 
     
     stopMaster
    -privateintstopMaster()
    +privateintstopMaster()
     
     
     
    @@ -385,7 +385,7 @@ extends 
     
     waitOnMasterThreads
    -privatevoidwaitOnMasterThreads(LocalHBaseClustercluster)
    +privatevoidwaitOnMasterThreads(LocalHBaseClustercluster)
       throws http://docs.oracle.com/javase/8/docs/api/java/lang/InterruptedException.html?is-external=true;
     title="class or interface in java.lang">InterruptedException
     
     Throws:
    @@ -399,7 +399,7 @@ extends 
     
     closeAllRegionServerThreads
    -private staticvoidcloseAllRegionServerThreads(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListJVMClusterUtil.RegionServerThreadregionservers)
    +private staticvoidcloseAllRegionServerThreads(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListJVMClusterUtil.RegionServerThreadregionservers)
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/master/LoadBalancer.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/master/LoadBalancer.html 
    b/devapidocs/org/apache/hadoop/hbase/master/LoadBalancer.html
    index ab927d5..a145836 100644
    --- a/devapidocs/org/apache/hadoop/hbase/master/LoadBalancer.html
    +++ b/devapidocs/org/apache/hadoop/hbase/master/LoadBalancer.html
    @@ -536,7 +536,7 @@ extends org.apache.hadoop.conf.Configurable, 
     
     onConfigurationChange
    

    [21/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/14db89d7/hbase-build-configuration/hbase-archetypes/project-info.html
    --
    diff --git a/hbase-build-configuration/hbase-archetypes/project-info.html 
    b/hbase-build-configuration/hbase-archetypes/project-info.html
    index c7eff6e..661d9ca 100644
    --- a/hbase-build-configuration/hbase-archetypes/project-info.html
    +++ b/hbase-build-configuration/hbase-archetypes/project-info.html
    @@ -7,7 +7,7 @@
       
     
     
    -
    +
     
     Apache HBase - Archetypes  Project Information
     
    @@ -167,7 +167,7 @@
     https://www.apache.org/;>The Apache Software 
    Foundation.
     All rights reserved.  
     
    -  Last Published: 
    2018-01-18
    +  Last Published: 
    2018-01-19
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/14db89d7/hbase-build-configuration/hbase-archetypes/project-summary.html
    --
    diff --git a/hbase-build-configuration/hbase-archetypes/project-summary.html 
    b/hbase-build-configuration/hbase-archetypes/project-summary.html
    index d0bbbd1..72869aa 100644
    --- a/hbase-build-configuration/hbase-archetypes/project-summary.html
    +++ b/hbase-build-configuration/hbase-archetypes/project-summary.html
    @@ -7,7 +7,7 @@
       
     
     
    -
    +
     
     Apache HBase - Archetypes  Project Summary
     
    @@ -163,7 +163,7 @@
     https://www.apache.org/;>The Apache Software 
    Foundation.
     All rights reserved.  
     
    -  Last Published: 
    2018-01-18
    +  Last Published: 
    2018-01-19
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/14db89d7/hbase-build-configuration/hbase-archetypes/source-repository.html
    --
    diff --git a/hbase-build-configuration/hbase-archetypes/source-repository.html 
    b/hbase-build-configuration/hbase-archetypes/source-repository.html
    index 183f6ec..1671b4c 100644
    --- a/hbase-build-configuration/hbase-archetypes/source-repository.html
    +++ b/hbase-build-configuration/hbase-archetypes/source-repository.html
    @@ -7,7 +7,7 @@
       
     
     
    -
    +
     
     Apache HBase - Archetypes  Source Code Management
     
    @@ -134,7 +134,7 @@
     https://www.apache.org/;>The Apache Software 
    Foundation.
     All rights reserved.  
     
    -  Last Published: 
    2018-01-18
    +  Last Published: 
    2018-01-19
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/14db89d7/hbase-build-configuration/hbase-archetypes/team-list.html
    --
    diff --git a/hbase-build-configuration/hbase-archetypes/team-list.html 
    b/hbase-build-configuration/hbase-archetypes/team-list.html
    index 0777511..a6c61d7 100644
    --- a/hbase-build-configuration/hbase-archetypes/team-list.html
    +++ b/hbase-build-configuration/hbase-archetypes/team-list.html
    @@ -7,7 +7,7 @@
       
     
     
    -
    +
     
     Apache HBase - Archetypes  Project Team
     
    @@ -553,7 +553,7 @@
     https://www.apache.org/;>The Apache Software 
    Foundation.
     All rights reserved.  
     
    -  Last Published: 
    2018-01-18
    +  Last Published: 
    2018-01-19
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/14db89d7/hbase-build-configuration/hbase-spark/checkstyle.html
    --
    diff --git a/hbase-build-configuration/hbase-spark/checkstyle.html 
    b/hbase-build-configuration/hbase-spark/checkstyle.html
    index a9224e1..3bdef18 100644
    --- a/hbase-build-configuration/hbase-spark/checkstyle.html
    +++ b/hbase-build-configuration/hbase-spark/checkstyle.html
    @@ -7,7 +7,7 @@
       
     
     
    -
    +
     
     Apache HBase - Spark  Checkstyle Results
     
    @@ -150,7 +150,7 @@
     https://www.apache.org/;>The Apache Software 
    Foundation.
     All rights reserved.  
     
    -  Last Published: 
    2018-01-18
    +  Last Published: 
    2018-01-19
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/14db89d7/hbase-build-configuration/hbase-spark/dependencies.html
    --
    diff --git a/hbase-build-configuration/hbase-spark/dependencies.html 
    b/hbase-build-configuration/hbase-spark/dependencies.html
    index 3e6df55..4417317 100644
    --- a/hbase-build-configuration/hbase-spark/dependencies.html
    +++ b/hbase-build-configuration/hbase-spark/dependencies.html
    @@ -7,7 +7,7 @@
       
     
     
    -
    +
     
    

    [21/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/testdevapidocs/org/apache/hadoop/hbase/client/TestCIBadHostname.html
    --
    diff --git 
    a/testdevapidocs/org/apache/hadoop/hbase/client/TestCIBadHostname.html 
    b/testdevapidocs/org/apache/hadoop/hbase/client/TestCIBadHostname.html
    new file mode 100644
    index 000..866db4a
    --- /dev/null
    +++ b/testdevapidocs/org/apache/hadoop/hbase/client/TestCIBadHostname.html
    @@ -0,0 +1,382 @@
    +http://www.w3.org/TR/html4/loose.dtd;>
    +
    +
    +
    +
    +
    +TestCIBadHostname (Apache HBase 3.0.0-SNAPSHOT Test API)
    +
    +
    +
    +
    +
    +var methods = {"i0":9,"i1":9,"i2":10,"i3":10};
    +var tabs = {65535:["t0","All Methods"],1:["t1","Static 
    Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
    +var altColor = "altColor";
    +var rowColor = "rowColor";
    +var tableTab = "tableTab";
    +var activeTableTab = "activeTableTab";
    +
    +
    +JavaScript is disabled on your browser.
    +
    +
    +
    +
    +
    +Skip navigation links
    +
    +
    +
    +
    +Overview
    +Package
    +Class
    +Use
    +Tree
    +Deprecated
    +Index
    +Help
    +
    +
    +
    +
    +PrevClass
    +NextClass
    +
    +
    +Frames
    +NoFrames
    +
    +
    +AllClasses
    +
    +
    +
    +
    +
    +
    +
    +Summary:
    +Nested|
    +Field|
    +Constr|
    +Method
    +
    +
    +Detail:
    +Field|
    +Constr|
    +Method
    +
    +
    +
    +
    +
    +
    +
    +
    +org.apache.hadoop.hbase.client
    +Class TestCIBadHostname
    +
    +
    +
    +http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">java.lang.Object
    +
    +
    +org.apache.hadoop.hbase.client.TestCIBadHostname
    +
    +
    +
    +
    +
    +
    +
    +
    +public class TestCIBadHostname
    +extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">Object
    +Tests that we fail fast when hostname resolution is not 
    working and do not cache
    + unresolved InetSocketAddresses.
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +Field Summary
    +
    +Fields
    +
    +Modifier and Type
    +Field and Description
    +
    +
    +private static 
    org.apache.hadoop.hbase.client.ConnectionImplementation
    +conn
    +
    +
    +private static HBaseTestingUtility
    +testUtil
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +Constructor Summary
    +
    +Constructors
    +
    +Constructor and Description
    +
    +
    +TestCIBadHostname()
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +Method Summary
    +
    +All MethodsStatic MethodsInstance MethodsConcrete Methods
    +
    +Modifier and Type
    +Method and Description
    +
    +
    +static void
    +setupBeforeClass()
    +
    +
    +static void
    +teardownAfterClass()
    +
    +
    +void
    +testGetAdminBadHostname()
    +
    +
    +void
    +testGetClientBadHostname()
    +
    +
    +
    +
    +
    +
    +Methods inherited from classjava.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">Object
    +http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--;
     title="class or interface in java.lang">clone, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-;
     title="class or interface in java.lang">equals, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--;
     title="class or interface in java.lang">finalize, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--;
     title="class or interface in java.lang">getClass, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--;
     title="class or interface in java.lang">hashCode, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notify--;
     title="class or interface in java.lang">notify, http://docs.oracle.com/javase/8/docs/api/java/lang
     /Object.html?is-external=true#notifyAll--" title="class or interface in 
    java.lang">notifyAll, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#toString--;
     title="class or interface in java.lang">toString, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait--;
     title="class or interface in java.lang">wait, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-;
     title="class or interface in java.lang">wait, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-int-;
     title="class or interface in java.lang">wait
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +Field Detail
    +
    +
    +
    +
    +
    +testUtil
    +private staticHBaseTestingUtility testUtil
    +
    +
    +
    +
    +
    +
    +
    +conn
    +private 
    staticorg.apache.hadoop.hbase.client.ConnectionImplementation conn
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +Constructor Detail
    +
    +
    +
    +
    +
    +TestCIBadHostname
    +publicTestCIBadHostname()
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +Method Detail
    +
    +
    +
    +
    +
    +setupBeforeClass
    +public 

    [21/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALProvider.WriterBase.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALProvider.WriterBase.html 
    b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALProvider.WriterBase.html
    index 59680d7..e118e2f 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALProvider.WriterBase.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALProvider.WriterBase.html
    @@ -30,87 +30,86 @@
     022import java.io.IOException;
     023import java.util.List;
     024import 
    java.util.concurrent.CompletableFuture;
    -025
    -026import 
    org.apache.hadoop.conf.Configuration;
    -027import 
    org.apache.yetus.audience.InterfaceAudience;
    -028// imports for things that haven't moved 
    from regionserver.wal yet.
    -029import 
    org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
    -030
    -031/**
    -032 * The Write Ahead Log (WAL) stores all 
    durable edits to the HRegion.
    -033 * This interface provides the entry 
    point for all WAL implementors.
    -034 * p
    -035 * See {@link FSHLogProvider} for an 
    example implementation.
    -036 *
    -037 * A single WALProvider will be used for 
    retrieving multiple WALs in a particular region server
    -038 * and must be threadsafe.
    -039 */
    -040@InterfaceAudience.Private
    -041public interface WALProvider {
    -042
    -043  /**
    -044   * Set up the provider to create 
    wals.
    -045   * will only be called once per 
    instance.
    -046   * @param factory factory that made us 
    may not be null
    -047   * @param conf may not be null
    -048   * @param listeners may be null
    -049   * @param providerId differentiate 
    between providers from one factory. may be null
    -050   */
    -051  void init(final WALFactory factory, 
    final Configuration conf,
    -052  final 
    ListWALActionsListener listeners, final String providerId) throws 
    IOException;
    -053
    -054  /**
    -055   * @param identifier may not be null. 
    contents will not be altered.
    -056   * @param namespace could be null, and 
    will use default namespace if null
    -057   * @return a WAL for writing entries 
    for the given region.
    -058   */
    -059  WAL getWAL(final byte[] identifier, 
    byte[] namespace) throws IOException;
    -060
    -061  /** @return the List of WALs that are 
    used by this server
    -062   */
    -063  ListWAL getWALs();
    -064
    -065  /**
    -066   * persist outstanding WALs to storage 
    and stop accepting new appends.
    -067   * This method serves as shorthand for 
    sending a sync to every WAL provided by a given
    -068   * implementation. Those WALs will also 
    stop accepting new writes.
    -069   */
    -070  void shutdown() throws IOException;
    -071
    -072  /**
    -073   * shutdown utstanding WALs and clean 
    up any persisted state.
    -074   * Call this method only when you will 
    not need to replay any of the edits to the WALs from
    -075   * this provider. After this call 
    completes, the underlying resources should have been reclaimed.
    -076   */
    -077  void close() throws IOException;
    -078
    -079  interface WriterBase extends Closeable 
    {
    -080long getLength();
    -081  }
    -082
    -083  // Writers are used internally. Users 
    outside of the WAL should be relying on the
    -084  // interface provided by WAL.
    -085  interface Writer extends WriterBase {
    -086void sync() throws IOException;
    -087void append(WAL.Entry entry) throws 
    IOException;
    -088  }
    -089
    -090  interface AsyncWriter extends 
    WriterBase {
    -091CompletableFutureLong 
    sync();
    -092void append(WAL.Entry entry);
    -093  }
    -094
    -095  /**
    -096   * Get number of the log files this 
    provider is managing
    -097   */
    -098  long getNumLogFiles();
    -099
    -100  /**
    -101   * Get size of the log files this 
    provider is managing
    -102   */
    -103  long getLogFileSize();
    -104
    -105}
    +025import 
    org.apache.hadoop.conf.Configuration;
    +026import 
    org.apache.hadoop.hbase.client.RegionInfo;
    +027import 
    org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
    +028import 
    org.apache.yetus.audience.InterfaceAudience;
    +029
    +030/**
    +031 * The Write Ahead Log (WAL) stores all 
    durable edits to the HRegion.
    +032 * This interface provides the entry 
    point for all WAL implementors.
    +033 * p
    +034 * See {@link FSHLogProvider} for an 
    example implementation.
    +035 *
    +036 * A single WALProvider will be used for 
    retrieving multiple WALs in a particular region server
    +037 * and must be threadsafe.
    +038 */
    +039@InterfaceAudience.Private
    +040public interface WALProvider {
    +041
    +042  /**
    +043   * Set up the provider to create 
    wals.
    +044   * will only be called once per 
    instance.
    +045   * @param factory factory that made us 
    may not be null
    +046   * @param conf may not be null
    +047   * @param listeners may be null
    +048   * @param providerId differentiate 
    between providers from one factory. may be null
    +049   */
    +050  void init(WALFactory factory, 
    Configuration conf, ListWALActionsListener listeners,
    +051  String providerId) throws 
    IOException;
    +052
    +053  /**
    +054   * @param region the region which we 
    

    [21/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/49431b18/devapidocs/org/apache/hadoop/hbase/client/class-use/Admin.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/client/class-use/Admin.html 
    b/devapidocs/org/apache/hadoop/hbase/client/class-use/Admin.html
    index 759466c..11c36a6 100644
    --- a/devapidocs/org/apache/hadoop/hbase/client/class-use/Admin.html
    +++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/Admin.html
    @@ -120,41 +120,35 @@ Input/OutputFormats, a table indexing MapReduce job, and 
    utility methods.
     
     
     
    -org.apache.hadoop.hbase.replication
    -
    -Multi Cluster Replication
    -
    -
    -
     org.apache.hadoop.hbase.rest
     
     HBase REST
     
     
    -
    +
     org.apache.hadoop.hbase.rest.model
     
     
    -
    +
     org.apache.hadoop.hbase.security.access
     
     
    -
    +
     org.apache.hadoop.hbase.thrift
     
     Provides an HBase http://incubator.apache.org/thrift/;>Thrift
     service.
     
     
    -
    +
     org.apache.hadoop.hbase.tool
     
     
    -
    +
     org.apache.hadoop.hbase.util
     
     
    -
    +
     org.apache.hbase.archetypes.exemplars.client
     
     This package provides fully-functional exemplar Java code 
    demonstrating
    @@ -162,7 +156,7 @@ service.
      archetype with hbase-client dependency.
     
     
    -
    +
     org.apache.hbase.archetypes.exemplars.shaded_client
     
     This package provides fully-functional exemplar Java code 
    demonstrating
    @@ -432,24 +426,6 @@ service.
     
     
     
    -
    -
    -
    -Uses of Admin in org.apache.hadoop.hbase.replication
    -
    -Fields in org.apache.hadoop.hbase.replication
     declared as Admin
    -
    -Modifier and Type
    -Field and Description
    -
    -
    -
    -private Admin
    -ReplicationTableBase.CreateReplicationTableWorker.admin
    -
    -
    -
    -
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/49431b18/devapidocs/org/apache/hadoop/hbase/client/class-use/ClusterConnection.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/client/class-use/ClusterConnection.html 
    b/devapidocs/org/apache/hadoop/hbase/client/class-use/ClusterConnection.html
    index 7da4399..3059f8c 100644
    --- a/devapidocs/org/apache/hadoop/hbase/client/class-use/ClusterConnection.html
    +++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/ClusterConnection.html
    @@ -121,10 +121,6 @@
     
     
     
    -org.apache.hadoop.hbase.util.hbck
    -
    -
    -
     org.apache.hadoop.hbase.zookeeper
     
     
    @@ -735,22 +731,6 @@
     
     
     
    -
    -Methods in org.apache.hadoop.hbase.replication.regionserver
     with parameters of type ClusterConnection
    -
    -Modifier and Type
    -Method and Description
    -
    -
    -
    -http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String
    -DumpReplicationQueues.dumpQueues(ClusterConnectionconnection,
    -  ZKWatcherzkw,
    -  http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
     title="class or interface in java.util">Sethttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringpeerIds,
    -  booleanhdfs)
    -
    -
    -
     
     Constructors in org.apache.hadoop.hbase.replication.regionserver
     with parameters of type ClusterConnection
     
    @@ -847,25 +827,6 @@
     
     
     
    -
    -
    -
    -Uses of ClusterConnection in org.apache.hadoop.hbase.util.hbck
    -
    -Constructors in org.apache.hadoop.hbase.util.hbck
     with parameters of type ClusterConnection
    -
    -Constructor and Description
    -
    -
    -
    -ReplicationChecker(org.apache.hadoop.conf.Configurationconf,
    -  ZKWatcherzkw,
    -  ClusterConnectionconnection,
    -  HBaseFsck.ErrorReportererrorReporter)
    -
    -
    -
    -
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/49431b18/devapidocs/org/apache/hadoop/hbase/client/class-use/Connection.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/client/class-use/Connection.html 
    b/devapidocs/org/apache/hadoop/hbase/client/class-use/Connection.html
    index 9f8d1b7..d0175ad 100644
    --- a/devapidocs/org/apache/hadoop/hbase/client/class-use/Connection.html
    +++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/Connection.html
    @@ -184,40 +184,34 @@ Input/OutputFormats, a table indexing MapReduce job, and 
    utility methods.
     
     
     
    -org.apache.hadoop.hbase.replication
    -
    -Multi Cluster Replication
    -
    -
    -
     org.apache.hadoop.hbase.replication.regionserver
     
     
    -
    +
     org.apache.hadoop.hbase.rsgroup
     
     
    -
    +
     org.apache.hadoop.hbase.security.access
     
     
    -
    +
     org.apache.hadoop.hbase.security.token
     
     
    -
    +
     org.apache.hadoop.hbase.security.visibility
     
     
    -
    +
     org.apache.hadoop.hbase.snapshot
     
     
    -
    +
     org.apache.hadoop.hbase.tool
     
     
    -
    +
     org.apache.hadoop.hbase.util
     
     
    @@ -2223,24 +2217,6 @@ Input/OutputFormats, a table indexing MapReduce job, and 
    utility methods.
     
     
     
    -
    -
    -
    -Uses of Connection in org.apache.hadoop.hbase.replication
    -
    -Fields in org.apache.hadoop.hbase.replication
     declared as Connection
    -
    -Modifier and Type
    -Field and Description
    -
    -
    -
    -private Connection
    

    [21/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7c40c62/devapidocs/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.html 
    b/devapidocs/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.html
    index 5565471..47eff2e 100644
    --- a/devapidocs/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.html
    +++ b/devapidocs/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.html
    @@ -870,7 +870,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     initTableSnapshotMapperJob
    -public staticvoidinitTableSnapshotMapperJob(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringsnapshotName,
    +public staticvoidinitTableSnapshotMapperJob(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringsnapshotName,
       Scanscan,
       http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true;
     title="class or interface in java.lang">Class? extends TableMappermapper,
       http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true;
     title="class or interface in java.lang">Class?outputKeyClass,
    @@ -879,8 +879,8 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       booleanaddDependencyJars,
       
    org.apache.hadoop.fs.PathtmpRestoreDir)
    throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException
    -Sets up the job for reading from a table snapshot. It 
    bypasses hbase servers
    - and read directly from snapshot files.
    +Sets up the job for reading from a table snapshot. It 
    bypasses hbase servers and read directly
    + from snapshot files.
     
     Parameters:
     snapshotName - The name of the snapshot (of a table) to read 
    from.
    @@ -888,13 +888,13 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     mapper - The mapper class to use.
     outputKeyClass - The class of the output key.
     outputValueClass - The class of the output value.
    -job - The current job to adjust.  Make sure the passed job is
    - carrying all necessary HBase configuration.
    -addDependencyJars - upload HBase jars and jars for any of the 
    configured
    -   job classes via the distributed cache (tmpjars).
    +job - The current job to adjust. Make sure the passed job is 
    carrying all necessary HBase
    +  configuration.
    +addDependencyJars - upload HBase jars and jars for any of the 
    configured job classes via
    +  the distributed cache (tmpjars).
     tmpRestoreDir - a temporary directory to copy the snapshot 
    files into. Current user should
    - have write permissions to this directory, and this should not be a 
    subdirectory of rootdir.
    - After the job is finished, restore directory can be deleted.
    +  have write permissions to this directory, and this should not be a 
    subdirectory of
    +  rootdir. After the job is finished, restore directory can be 
    deleted.
     Throws:
     http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException - When setting up 
    the details fails.
     See Also:
    @@ -908,7 +908,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     initTableSnapshotMapperJob
    -public staticvoidinitTableSnapshotMapperJob(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringsnapshotName,
    +public staticvoidinitTableSnapshotMapperJob(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringsnapshotName,
       Scanscan,
       http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true;
     title="class or interface in java.lang">Class? extends TableMappermapper,
       http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true;
     title="class or interface in java.lang">Class?outputKeyClass,
    @@ -950,7 +950,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     initTableMapperJob
    -public staticvoidinitTableMapperJob(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListScanscans,
    +public staticvoidinitTableMapperJob(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListScanscans,
    

    [21/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bb398572/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.NamespaceProcedureBiConsumer.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.NamespaceProcedureBiConsumer.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.NamespaceProcedureBiConsumer.html
    index 5b3b750..a1f3f7e 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.NamespaceProcedureBiConsumer.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.NamespaceProcedureBiConsumer.html
    @@ -97,3307 +97,3304 @@
     089import 
    org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer;
     090import 
    org.apache.hbase.thirdparty.io.netty.util.Timeout;
     091import 
    org.apache.hbase.thirdparty.io.netty.util.TimerTask;
    -092import 
    org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
    -093import 
    org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
    -094import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
    -095import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest;
    -096import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesResponse;
    -097import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
    -098import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
    -099import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
    -100import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
    -101import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
    -102import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
    -103import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
    -104import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
    -105import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest;
    -106import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse;
    -107import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest;
    -108import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse;
    -109import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest;
    -110import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse;
    -111import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
    -112import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse;
    -113import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
    -114import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
    -115import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
    -116import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
    -117import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
    -118import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
    -119import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
    -120import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
    -121import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
    -122import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
    -123import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
    -124import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersRequest;
    -125import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersResponse;
    -126import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
    -127import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
    -128import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest;
    -129import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse;
    -130import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersRequest;
    -131import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersResponse;
    -132import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
    -133import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
    -134import 
    

    [21/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/coordination/class-use/SplitLogManagerCoordination.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/coordination/class-use/SplitLogManagerCoordination.html
     
    b/devapidocs/org/apache/hadoop/hbase/coordination/class-use/SplitLogManagerCoordination.html
    index d1be68c..cef0831 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/coordination/class-use/SplitLogManagerCoordination.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/coordination/class-use/SplitLogManagerCoordination.html
    @@ -235,6 +235,6 @@
     
     
     
    -Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
    reserved.
    +Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
    reserved.
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/coordination/class-use/SplitLogWorkerCoordination.SplitTaskDetails.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/coordination/class-use/SplitLogWorkerCoordination.SplitTaskDetails.html
     
    b/devapidocs/org/apache/hadoop/hbase/coordination/class-use/SplitLogWorkerCoordination.SplitTaskDetails.html
    index 42a5cdb..e8618b8 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/coordination/class-use/SplitLogWorkerCoordination.SplitTaskDetails.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/coordination/class-use/SplitLogWorkerCoordination.SplitTaskDetails.html
    @@ -227,6 +227,6 @@
     
     
     
    -Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
    reserved.
    +Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
    reserved.
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/coordination/class-use/SplitLogWorkerCoordination.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/coordination/class-use/SplitLogWorkerCoordination.html
     
    b/devapidocs/org/apache/hadoop/hbase/coordination/class-use/SplitLogWorkerCoordination.html
    index 70ca6eb..a43b6d6 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/coordination/class-use/SplitLogWorkerCoordination.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/coordination/class-use/SplitLogWorkerCoordination.html
    @@ -333,6 +333,6 @@
     
     
     
    -Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
    reserved.
    +Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
    reserved.
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/coordination/class-use/ZKSplitLogManagerCoordination.CreateAsyncCallback.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/coordination/class-use/ZKSplitLogManagerCoordination.CreateAsyncCallback.html
     
    b/devapidocs/org/apache/hadoop/hbase/coordination/class-use/ZKSplitLogManagerCoordination.CreateAsyncCallback.html
    index e96b596..2349850 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/coordination/class-use/ZKSplitLogManagerCoordination.CreateAsyncCallback.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/coordination/class-use/ZKSplitLogManagerCoordination.CreateAsyncCallback.html
    @@ -120,6 +120,6 @@
     
     
     
    -Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
    reserved.
    +Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
    reserved.
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/coordination/class-use/ZKSplitLogManagerCoordination.CreateRescanAsyncCallback.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/coordination/class-use/ZKSplitLogManagerCoordination.CreateRescanAsyncCallback.html
     
    b/devapidocs/org/apache/hadoop/hbase/coordination/class-use/ZKSplitLogManagerCoordination.CreateRescanAsyncCallback.html
    index dd1ad08..91dd157 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/coordination/class-use/ZKSplitLogManagerCoordination.CreateRescanAsyncCallback.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/coordination/class-use/ZKSplitLogManagerCoordination.CreateRescanAsyncCallback.html
    @@ -120,6 +120,6 @@
     
     
     
    -Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
    reserved.
    +Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
    reserved.
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/coordination/class-use/ZKSplitLogManagerCoordination.DeleteAsyncCallback.html
    --
    diff --git 

    [21/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/hbase-build-configuration/hbase-archetypes/hbase-client-project/project-reports.html
    --
    diff --git 
    a/hbase-build-configuration/hbase-archetypes/hbase-client-project/project-reports.html
     
    b/hbase-build-configuration/hbase-archetypes/hbase-client-project/project-reports.html
    index 2255ada..3308278 100644
    --- 
    a/hbase-build-configuration/hbase-archetypes/hbase-client-project/project-reports.html
    +++ 
    b/hbase-build-configuration/hbase-archetypes/hbase-client-project/project-reports.html
    @@ -7,7 +7,7 @@
       
     
     
    -
    +
     
     Apache HBase - Exemplar for hbase-client archetype  
    Generated Reports
     
    @@ -128,7 +128,7 @@
     https://www.apache.org/;>The Apache Software 
    Foundation.
     All rights reserved.  
     
    -  Last Published: 
    2017-12-29
    +  Last Published: 
    2017-12-30
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/hbase-build-configuration/hbase-archetypes/hbase-client-project/project-summary.html
    --
    diff --git 
    a/hbase-build-configuration/hbase-archetypes/hbase-client-project/project-summary.html
     
    b/hbase-build-configuration/hbase-archetypes/hbase-client-project/project-summary.html
    index 86288df..2ecf6f3 100644
    --- 
    a/hbase-build-configuration/hbase-archetypes/hbase-client-project/project-summary.html
    +++ 
    b/hbase-build-configuration/hbase-archetypes/hbase-client-project/project-summary.html
    @@ -7,7 +7,7 @@
       
     
     
    -
    +
     
     Apache HBase - Exemplar for hbase-client archetype  Project 
    Summary
     
    @@ -166,7 +166,7 @@
     https://www.apache.org/;>The Apache Software 
    Foundation.
     All rights reserved.  
     
    -  Last Published: 
    2017-12-29
    +  Last Published: 
    2017-12-30
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/hbase-build-configuration/hbase-archetypes/hbase-client-project/source-repository.html
    --
    diff --git 
    a/hbase-build-configuration/hbase-archetypes/hbase-client-project/source-repository.html
     
    b/hbase-build-configuration/hbase-archetypes/hbase-client-project/source-repository.html
    index 55544d4..c16261e 100644
    --- 
    a/hbase-build-configuration/hbase-archetypes/hbase-client-project/source-repository.html
    +++ 
    b/hbase-build-configuration/hbase-archetypes/hbase-client-project/source-repository.html
    @@ -7,7 +7,7 @@
       
     
     
    -
    +
     
     Apache HBase - Exemplar for hbase-client archetype  Source 
    Code Management
     
    @@ -134,7 +134,7 @@
     https://www.apache.org/;>The Apache Software 
    Foundation.
     All rights reserved.  
     
    -  Last Published: 
    2017-12-29
    +  Last Published: 
    2017-12-30
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/hbase-build-configuration/hbase-archetypes/hbase-client-project/team-list.html
    --
    diff --git 
    a/hbase-build-configuration/hbase-archetypes/hbase-client-project/team-list.html
     
    b/hbase-build-configuration/hbase-archetypes/hbase-client-project/team-list.html
    index 04b6e34..9ad795c 100644
    --- 
    a/hbase-build-configuration/hbase-archetypes/hbase-client-project/team-list.html
    +++ 
    b/hbase-build-configuration/hbase-archetypes/hbase-client-project/team-list.html
    @@ -7,7 +7,7 @@
       
     
     
    -
    +
     
     Apache HBase - Exemplar for hbase-client archetype  Project 
    Team
     
    @@ -553,7 +553,7 @@
     https://www.apache.org/;>The Apache Software 
    Foundation.
     All rights reserved.  
     
    -  Last Published: 
    2017-12-29
    +  Last Published: 
    2017-12-30
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/checkstyle.html
    --
    diff --git 
    a/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/checkstyle.html
     
    b/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/checkstyle.html
    index 0e13722..b9c2c9e 100644
    --- 
    a/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/checkstyle.html
    +++ 
    b/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/checkstyle.html
    @@ -7,7 +7,7 @@
       
     
     
    -
    +
     
     Apache HBase - Exemplar for hbase-shaded-client archetype  
    Checkstyle Results
     
    @@ -150,7 +150,7 @@
     

    [21/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/63d6f712/devapidocs/org/apache/hadoop/hbase/io/asyncfs/class-use/FanOutOneBlockAsyncDFSOutput.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/io/asyncfs/class-use/FanOutOneBlockAsyncDFSOutput.html
     
    b/devapidocs/org/apache/hadoop/hbase/io/asyncfs/class-use/FanOutOneBlockAsyncDFSOutput.html
    index 7ad1b14..ef1ba06 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/io/asyncfs/class-use/FanOutOneBlockAsyncDFSOutput.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/io/asyncfs/class-use/FanOutOneBlockAsyncDFSOutput.html
    @@ -104,27 +104,27 @@
     
     
     static FanOutOneBlockAsyncDFSOutput
    -FanOutOneBlockAsyncDFSOutputHelper.createOutput(org.apache.hadoop.hdfs.DistributedFileSystemdfs,
    +FanOutOneBlockAsyncDFSOutputHelper.createOutput(org.apache.hadoop.hdfs.DistributedFileSystemdfs,
     org.apache.hadoop.fs.Pathf,
     booleanoverwrite,
     booleancreateParent,
     shortreplication,
     longblockSize,
    -
    org.apache.hadoop.hbase.shaded.io.netty.channel.EventLoopGroupeventLoopGroup,
    -http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true;
     title="class or interface in java.lang">Class? extends 
    org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelchannelClass)
    +
    org.apache.hbase.thirdparty.io.netty.channel.EventLoopGroupeventLoopGroup,
    +http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true;
     title="class or interface in java.lang">Class? extends 
    org.apache.hbase.thirdparty.io.netty.channel.ChannelchannelClass)
     Create a FanOutOneBlockAsyncDFSOutput.
     
     
     
     private static FanOutOneBlockAsyncDFSOutput
    -FanOutOneBlockAsyncDFSOutputHelper.createOutput(org.apache.hadoop.hdfs.DistributedFileSystemdfs,
    +FanOutOneBlockAsyncDFSOutputHelper.createOutput(org.apache.hadoop.hdfs.DistributedFileSystemdfs,
     http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">Stringsrc,
     booleanoverwrite,
     booleancreateParent,
     shortreplication,
     longblockSize,
    -
    org.apache.hadoop.hbase.shaded.io.netty.channel.EventLoopGroupeventLoopGroup,
    -http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true;
     title="class or interface in java.lang">Class? extends 
    org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelchannelClass)
    +
    org.apache.hbase.thirdparty.io.netty.channel.EventLoopGroupeventLoopGroup,
    +http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true;
     title="class or interface in java.lang">Class? extends 
    org.apache.hbase.thirdparty.io.netty.channel.ChannelchannelClass)
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/63d6f712/devapidocs/org/apache/hadoop/hbase/io/asyncfs/package-tree.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/io/asyncfs/package-tree.html 
    b/devapidocs/org/apache/hadoop/hbase/io/asyncfs/package-tree.html
    index acac59b..1d955f0 100644
    --- a/devapidocs/org/apache/hadoop/hbase/io/asyncfs/package-tree.html
    +++ b/devapidocs/org/apache/hadoop/hbase/io/asyncfs/package-tree.html
    @@ -82,16 +82,16 @@
     java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">Object
     
     org.apache.hadoop.hbase.io.asyncfs.AsyncFSOutputHelper
    -org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandlerAdapter
     (implements org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandler)
    +org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerAdapter
     (implements org.apache.hbase.thirdparty.io.netty.channel.ChannelHandler)
     
    -org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelInboundHandlerAdapter
     (implements 
    org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelInboundHandler)
    +org.apache.hbase.thirdparty.io.netty.channel.ChannelInboundHandlerAdapter
     (implements org.apache.hbase.thirdparty.io.netty.channel.ChannelInboundHandler)
     
    -org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelDuplexHandler
     (implements 
    org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelOutboundHandler)
    +org.apache.hbase.thirdparty.io.netty.channel.ChannelDuplexHandler 
    (implements org.apache.hbase.thirdparty.io.netty.channel.ChannelOutboundHandler)
     
     org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.SaslNegotiateHandler
     
     
    -org.apache.hadoop.hbase.shaded.io.netty.channel.SimpleChannelInboundHandlerI
    +org.apache.hbase.thirdparty.io.netty.channel.SimpleChannelInboundHandlerI
     
     org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.AckHandler
     org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.DecryptHandler
    @@ -100,10 +100,10 @@
     
     
     
    

    [21/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d449e87f/devapidocs/org/apache/hadoop/hbase/class-use/PrivateCellUtil.EmptyByteBufferExtendedCell.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/class-use/PrivateCellUtil.EmptyByteBufferExtendedCell.html
     
    b/devapidocs/org/apache/hadoop/hbase/class-use/PrivateCellUtil.EmptyByteBufferExtendedCell.html
    new file mode 100644
    index 000..da06531
    --- /dev/null
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/class-use/PrivateCellUtil.EmptyByteBufferExtendedCell.html
    @@ -0,0 +1,181 @@
    +http://www.w3.org/TR/html4/loose.dtd;>
    +
    +
    +
    +
    +
    +Uses of Class 
    org.apache.hadoop.hbase.PrivateCellUtil.EmptyByteBufferExtendedCell (Apache 
    HBase 3.0.0-SNAPSHOT API)
    +
    +
    +
    +
    +
    +
    +
    +JavaScript is disabled on your browser.
    +
    +
    +
    +
    +
    +Skip navigation links
    +
    +
    +
    +
    +Overview
    +Package
    +Class
    +Use
    +Tree
    +Deprecated
    +Index
    +Help
    +
    +
    +
    +
    +Prev
    +Next
    +
    +
    +Frames
    +NoFrames
    +
    +
    +AllClasses
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +Uses of 
    Classorg.apache.hadoop.hbase.PrivateCellUtil.EmptyByteBufferExtendedCell
    +
    +
    +
    +
    +
    +Packages that use PrivateCellUtil.EmptyByteBufferExtendedCell
    +
    +Package
    +Description
    +
    +
    +
    +org.apache.hadoop.hbase
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +Uses of PrivateCellUtil.EmptyByteBufferExtendedCell in org.apache.hadoop.hbase
    +
    +Subclasses of PrivateCellUtil.EmptyByteBufferExtendedCell in org.apache.hadoop.hbase
    +
    +Modifier and Type
    +Class and Description
    +
    +
    +
    +private static class
    +PrivateCellUtil.FirstOnRowByteBufferExtendedCell
    +
    +
    +private static class
    +PrivateCellUtil.FirstOnRowColByteBufferExtendedCell
    +
    +
    +private static class
    +PrivateCellUtil.FirstOnRowColTSByteBufferExtendedCell
    +
    +
    +private static class
    +PrivateCellUtil.LastOnRowByteBufferExtendedCell
    +
    +
    +private static class
    +PrivateCellUtil.LastOnRowColByteBufferExtendedCell
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +Skip navigation links
    +
    +
    +
    +
    +Overview
    +Package
    +Class
    +Use
    +Tree
    +Deprecated
    +Index
    +Help
    +
    +
    +
    +
    +Prev
    +Next
    +
    +
    +Frames
    +NoFrames
    +
    +
    +AllClasses
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
    reserved.
    +
    +
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d449e87f/devapidocs/org/apache/hadoop/hbase/class-use/PrivateCellUtil.FirstOnRowByteBufferCell.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/class-use/PrivateCellUtil.FirstOnRowByteBufferCell.html
     
    b/devapidocs/org/apache/hadoop/hbase/class-use/PrivateCellUtil.FirstOnRowByteBufferCell.html
    deleted file mode 100644
    index 8630ac9..000
    --- 
    a/devapidocs/org/apache/hadoop/hbase/class-use/PrivateCellUtil.FirstOnRowByteBufferCell.html
    +++ /dev/null
    @@ -1,169 +0,0 @@
    -http://www.w3.org/TR/html4/loose.dtd;>
    -
    -
    -
    -
    -
    -Uses of Class 
    org.apache.hadoop.hbase.PrivateCellUtil.FirstOnRowByteBufferCell (Apache HBase 
    3.0.0-SNAPSHOT API)
    -
    -
    -
    -
    -
    -
    -
    -JavaScript is disabled on your browser.
    -
    -
    -
    -
    -
    -Skip navigation links
    -
    -
    -
    -
    -Overview
    -Package
    -Class
    -Use
    -Tree
    -Deprecated
    -Index
    -Help
    -
    -
    -
    -
    -Prev
    -Next
    -
    -
    -Frames
    -NoFrames
    -
    -
    -AllClasses
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -Uses of 
    Classorg.apache.hadoop.hbase.PrivateCellUtil.FirstOnRowByteBufferCell
    -
    -
    -
    -
    -
    -Packages that use PrivateCellUtil.FirstOnRowByteBufferCell
    -
    -Package
    -Description
    -
    -
    -
    -org.apache.hadoop.hbase
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -Uses of PrivateCellUtil.FirstOnRowByteBufferCell in org.apache.hadoop.hbase
    -
    -Subclasses of PrivateCellUtil.FirstOnRowByteBufferCell in org.apache.hadoop.hbase
    -
    -Modifier and Type
    -Class and Description
    -
    -
    -
    -private static class
    -PrivateCellUtil.FirstOnRowColByteBufferCell
    -
    -
    -private static class
    -PrivateCellUtil.FirstOnRowColTSByteBufferCell
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -Skip navigation links
    -
    -
    -
    -
    

    [21/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d2b28a1a/devapidocs/org/apache/hadoop/hbase/security/SaslUtil.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/security/SaslUtil.html 
    b/devapidocs/org/apache/hadoop/hbase/security/SaslUtil.html
    index e3c9757..512237b 100644
    --- a/devapidocs/org/apache/hadoop/hbase/security/SaslUtil.html
    +++ b/devapidocs/org/apache/hadoop/hbase/security/SaslUtil.html
    @@ -110,7 +110,7 @@ var activeTableTab = "activeTableTab";
     
     
     @InterfaceAudience.Private
    -public class SaslUtil
    +public class SaslUtil
     extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">Object
     
     
    @@ -258,7 +258,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     LOG
    -private static finalorg.slf4j.Logger LOG
    +private static finalorg.slf4j.Logger LOG
     
     
     
    @@ -267,7 +267,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     SASL_DEFAULT_REALM
    -public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String SASL_DEFAULT_REALM
    +public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String SASL_DEFAULT_REALM
     
     See Also:
     Constant
     Field Values
    @@ -280,7 +280,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     SWITCH_TO_SIMPLE_AUTH
    -public static finalint SWITCH_TO_SIMPLE_AUTH
    +public static finalint SWITCH_TO_SIMPLE_AUTH
     
     See Also:
     Constant
     Field Values
    @@ -301,7 +301,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     SaslUtil
    -publicSaslUtil()
    +publicSaslUtil()
     
     
     
    @@ -318,7 +318,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     splitKerberosName
    -public statichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String[]splitKerberosName(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringfullName)
    +public statichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String[]splitKerberosName(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringfullName)
     Splitting fully qualified Kerberos name into parts
     
     
    @@ -328,7 +328,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     encodeIdentifier
    -statichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringencodeIdentifier(byte[]identifier)
    +statichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringencodeIdentifier(byte[]identifier)
     
     
     
    @@ -337,7 +337,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     decodeIdentifier
    -staticbyte[]decodeIdentifier(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">Stringidentifier)
    +staticbyte[]decodeIdentifier(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">Stringidentifier)
     
     
     
    @@ -346,7 +346,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     encodePassword
    -staticchar[]encodePassword(byte[]password)
    +staticchar[]encodePassword(byte[]password)
     
     
     
    @@ -355,7 +355,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     getQop
    -public staticSaslUtil.QualityOfProtectiongetQop(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringstringQop)
    +public staticSaslUtil.QualityOfProtectiongetQop(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringstringQop)
     Returns SaslUtil.QualityOfProtection
      corresponding to the given stringQop value.
     
    @@ -370,7 +370,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     initSaslProperties
    -public statichttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
     title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String,http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringinitSaslProperties(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringrpcProtection)
    +public 

    [21/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/devapidocs/org/apache/hadoop/hbase/replication/TableBasedReplicationQueuesImpl.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/replication/TableBasedReplicationQueuesImpl.html
     
    b/devapidocs/org/apache/hadoop/hbase/replication/TableBasedReplicationQueuesImpl.html
    deleted file mode 100644
    index 980f8bd..000
    --- 
    a/devapidocs/org/apache/hadoop/hbase/replication/TableBasedReplicationQueuesImpl.html
    +++ /dev/null
    @@ -1,1070 +0,0 @@
    -http://www.w3.org/TR/html4/loose.dtd;>
    -
    -
    -
    -
    -
    -TableBasedReplicationQueuesImpl (Apache HBase 3.0.0-SNAPSHOT 
    API)
    -
    -
    -
    -
    -
    -var methods = 
    {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10};
    -var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
    Methods"],8:["t4","Concrete Methods"]};
    -var altColor = "altColor";
    -var rowColor = "rowColor";
    -var tableTab = "tableTab";
    -var activeTableTab = "activeTableTab";
    -
    -
    -JavaScript is disabled on your browser.
    -
    -
    -
    -
    -
    -Skip navigation links
    -
    -
    -
    -
    -Overview
    -Package
    -Class
    -Use
    -Tree
    -Deprecated
    -Index
    -Help
    -
    -
    -
    -
    -PrevClass
    -NextClass
    -
    -
    -Frames
    -NoFrames
    -
    -
    -AllClasses
    -
    -
    -
    -
    -
    -
    -
    -Summary:
    -Nested|
    -Field|
    -Constr|
    -Method
    -
    -
    -Detail:
    -Field|
    -Constr|
    -Method
    -
    -
    -
    -
    -
    -
    -
    -
    -org.apache.hadoop.hbase.replication
    -Class 
    TableBasedReplicationQueuesImpl
    -
    -
    -
    -http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">java.lang.Object
    -
    -
    -org.apache.hadoop.hbase.replication.ReplicationTableBase
    -
    -
    -org.apache.hadoop.hbase.replication.TableBasedReplicationQueuesImpl
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -All Implemented Interfaces:
    -ReplicationQueues
    -
    -
    -
    -@InterfaceAudience.Private
    -public class TableBasedReplicationQueuesImpl
    -extends ReplicationTableBase
    -implements ReplicationQueues
    -This class provides an implementation of the 
    ReplicationQueues interface using an HBase table
    - "Replication Table". It utilizes the ReplicationTableBase to access the 
    Replication Table.
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -Field Summary
    -
    -Fields
    -
    -Modifier and Type
    -Field and Description
    -
    -
    -private static byte[]
    -EMPTY_STRING_BYTES
    -
    -
    -private static byte[]
    -INITIAL_OFFSET_BYTES
    -
    -
    -private static org.slf4j.Logger
    -LOG
    -
    -
    -private ReplicationStateZKBase
    -replicationState
    -
    -
    -private http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String
    -serverName
    -
    -
    -private byte[]
    -serverNameBytes
    -
    -
    -
    -
    -
    -
    -Fields inherited from classorg.apache.hadoop.hbase.replication.ReplicationTableBase
    -abortable,
     CF_QUEUE,
     COL_QUEUE_OWNER,
     COL_QUEUE_OWNER_HISTORY,
     conf,
     QUEUE_HISTORY_DELIMITER,
     REPLICATION_TABLE_NAME,
     ROW_KEY_DELIMITER
    -
    -
    -
    -
    -
    -
    -
    -
    -Constructor Summary
    -
    -Constructors
    -
    -Constructor and Description
    -
    -
    -TableBasedReplicationQueuesImpl(org.apache.hadoop.conf.Configurationconf,
    -   Abortableabort,
    -   ZKWatcherzkw)
    -
    -
    -TableBasedReplicationQueuesImpl(ReplicationQueuesArgumentsargs)
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -Method Summary
    -
    -All MethodsInstance MethodsConcrete Methods
    -
    -Modifier and Type
    -Method and Description
    -
    -
    -void
    -addHFileRefs(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringpeerId,
    -http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListPairorg.apache.hadoop.fs.Path,org.apache.hadoop.fs.Pathpairs)
    -Add new hfile references to the queue.
    -
    -
    -
    -void
    -addLog(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringqueueId,
    -  http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">Stringfilename)
    -Add a new WAL file to the given queue.
    -
    -
    -
    -void
    -addPeerToHFileRefs(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringpeerId)
    -Add a peer to hfile reference queue if peer does not 
    exist.
    -
    -
    -
    -private boolean
    -attemptToClaimQueue(Resultqueue,
    -   http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     

    [21/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7c0589c0/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteTableFuture.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteTableFuture.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteTableFuture.html
    index 6fecbc9..2accda0 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteTableFuture.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteTableFuture.html
    @@ -34,4140 +34,4141 @@
     026import 
    java.nio.charset.StandardCharsets;
     027import java.util.ArrayList;
     028import java.util.Arrays;
    -029import java.util.Collection;
    -030import java.util.EnumSet;
    -031import java.util.HashMap;
    -032import java.util.Iterator;
    -033import java.util.LinkedList;
    -034import java.util.List;
    -035import java.util.Map;
    -036import java.util.Set;
    -037import java.util.concurrent.Callable;
    -038import 
    java.util.concurrent.ExecutionException;
    -039import java.util.concurrent.Future;
    -040import java.util.concurrent.TimeUnit;
    -041import 
    java.util.concurrent.TimeoutException;
    -042import 
    java.util.concurrent.atomic.AtomicInteger;
    -043import 
    java.util.concurrent.atomic.AtomicReference;
    -044import java.util.regex.Pattern;
    -045import java.util.stream.Collectors;
    -046import java.util.stream.Stream;
    -047import 
    org.apache.hadoop.conf.Configuration;
    -048import 
    org.apache.hadoop.hbase.Abortable;
    -049import 
    org.apache.hadoop.hbase.CacheEvictionStats;
    -050import 
    org.apache.hadoop.hbase.CacheEvictionStatsBuilder;
    -051import 
    org.apache.hadoop.hbase.ClusterMetrics.Option;
    -052import 
    org.apache.hadoop.hbase.ClusterStatus;
    -053import 
    org.apache.hadoop.hbase.DoNotRetryIOException;
    -054import 
    org.apache.hadoop.hbase.HBaseConfiguration;
    -055import 
    org.apache.hadoop.hbase.HConstants;
    -056import 
    org.apache.hadoop.hbase.HRegionInfo;
    -057import 
    org.apache.hadoop.hbase.HRegionLocation;
    -058import 
    org.apache.hadoop.hbase.HTableDescriptor;
    -059import 
    org.apache.hadoop.hbase.MasterNotRunningException;
    -060import 
    org.apache.hadoop.hbase.MetaTableAccessor;
    -061import 
    org.apache.hadoop.hbase.NamespaceDescriptor;
    -062import 
    org.apache.hadoop.hbase.NamespaceNotFoundException;
    -063import 
    org.apache.hadoop.hbase.NotServingRegionException;
    -064import 
    org.apache.hadoop.hbase.RegionLoad;
    -065import 
    org.apache.hadoop.hbase.RegionLocations;
    -066import 
    org.apache.hadoop.hbase.ServerName;
    -067import 
    org.apache.hadoop.hbase.TableExistsException;
    -068import 
    org.apache.hadoop.hbase.TableName;
    -069import 
    org.apache.hadoop.hbase.TableNotDisabledException;
    -070import 
    org.apache.hadoop.hbase.TableNotFoundException;
    -071import 
    org.apache.hadoop.hbase.UnknownRegionException;
    -072import 
    org.apache.hadoop.hbase.ZooKeeperConnectionException;
    -073import 
    org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil;
    -074import 
    org.apache.hadoop.hbase.client.replication.TableCFs;
    -075import 
    org.apache.hadoop.hbase.client.security.SecurityCapability;
    -076import 
    org.apache.hadoop.hbase.exceptions.TimeoutIOException;
    -077import 
    org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
    -078import 
    org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
    -079import 
    org.apache.hadoop.hbase.ipc.HBaseRpcController;
    -080import 
    org.apache.hadoop.hbase.ipc.RpcControllerFactory;
    -081import 
    org.apache.hadoop.hbase.quotas.QuotaFilter;
    -082import 
    org.apache.hadoop.hbase.quotas.QuotaRetriever;
    -083import 
    org.apache.hadoop.hbase.quotas.QuotaSettings;
    -084import 
    org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
    -085import 
    org.apache.hadoop.hbase.replication.ReplicationException;
    -086import 
    org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
    -087import 
    org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
    -088import 
    org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
    -089import 
    org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
    -090import 
    org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
    -091import 
    org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
    -092import 
    org.apache.hadoop.hbase.snapshot.UnknownSnapshotException;
    -093import 
    org.apache.hadoop.hbase.util.Addressing;
    -094import 
    org.apache.hadoop.hbase.util.Bytes;
    -095import 
    org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
    -096import 
    org.apache.hadoop.hbase.util.ForeignExceptionUtil;
    -097import 
    org.apache.hadoop.hbase.util.Pair;
    -098import 
    org.apache.hadoop.ipc.RemoteException;
    -099import 
    org.apache.hadoop.util.StringUtils;
    -100import 
    org.apache.yetus.audience.InterfaceAudience;
    -101import 
    org.apache.yetus.audience.InterfaceStability;
    -102import org.slf4j.Logger;
    -103import org.slf4j.LoggerFactory;
    -104
    -105import 
    org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
    -106import 
    org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
    -107import 
    

    [21/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c4b2cc17/apidocs/src-html/org/apache/hadoop/hbase/client/OperationWithAttributes.html
    --
    diff --git 
    a/apidocs/src-html/org/apache/hadoop/hbase/client/OperationWithAttributes.html 
    b/apidocs/src-html/org/apache/hadoop/hbase/client/OperationWithAttributes.html
    index c9c4073..bc4360c 100644
    --- 
    a/apidocs/src-html/org/apache/hadoop/hbase/client/OperationWithAttributes.html
    +++ 
    b/apidocs/src-html/org/apache/hadoop/hbase/client/OperationWithAttributes.html
    @@ -30,105 +30,122 @@
     022import java.util.Collections;
     023import java.util.HashMap;
     024import java.util.Map;
    -025
    -026import 
    org.apache.hadoop.hbase.HConstants;
    -027import 
    org.apache.yetus.audience.InterfaceAudience;
    +025import java.util.TreeMap;
    +026import java.util.stream.Collectors;
    +027import 
    org.apache.hadoop.hbase.HConstants;
     028import 
    org.apache.hadoop.hbase.util.Bytes;
     029import 
    org.apache.hadoop.hbase.util.ClassSize;
    -030
    -031@InterfaceAudience.Public
    -032public abstract class 
    OperationWithAttributes extends Operation implements Attributes {
    -033  // An opaque blob of attributes
    -034  private MapString, byte[] 
    attributes;
    -035
    -036  // used for uniquely identifying an 
    operation
    -037  public static final String ID_ATRIBUTE 
    = "_operation.attributes.id";
    -038  private int priority = 
    HConstants.PRIORITY_UNSET;
    -039
    -040  @Override
    -041  public OperationWithAttributes 
    setAttribute(String name, byte[] value) {
    -042if (attributes == null  
    value == null) {
    -043  return this;
    -044}
    -045
    -046if (attributes == null) {
    -047  attributes = new 
    HashMap();
    -048}
    -049
    -050if (value == null) {
    -051  attributes.remove(name);
    -052  if (attributes.isEmpty()) {
    -053this.attributes = null;
    -054  }
    -055} else {
    -056  attributes.put(name, value);
    -057}
    -058return this;
    -059  }
    -060
    -061  @Override
    -062  public byte[] getAttribute(String name) 
    {
    +030import 
    org.apache.yetus.audience.InterfaceAudience;
    +031
    +032@InterfaceAudience.Public
    +033public abstract class 
    OperationWithAttributes extends Operation implements Attributes {
    +034  // An opaque blob of attributes
    +035  private MapString, byte[] 
    attributes;
    +036
    +037  // used for uniquely identifying an 
    operation
    +038  public static final String ID_ATRIBUTE 
    = "_operation.attributes.id";
    +039  private int priority = 
    HConstants.PRIORITY_UNSET;
    +040
    +041  /**
    +042   * empty construction.
    +043   * We need this empty construction to 
    keep binary compatibility.
    +044   */
    +045  protected OperationWithAttributes() {
    +046  }
    +047
    +048  protected 
    OperationWithAttributes(OperationWithAttributes clone) {
    +049this.attributes = 
    clone.getAttributesMap() == null ? null :
    +050  
    clone.getAttributesMap().entrySet().stream()
    +051.collect(Collectors.toMap(e - 
    e.getKey(), e - e.getValue(), (k, v) - {
    +052  throw new 
    RuntimeException("collisions!!!");
    +053}, () - new 
    TreeMap()));
    +054this.priority = 
    clone.getPriority();
    +055  }
    +056
    +057  @Override
    +058  public OperationWithAttributes 
    setAttribute(String name, byte[] value) {
    +059if (attributes == null  
    value == null) {
    +060  return this;
    +061}
    +062
     063if (attributes == null) {
    -064  return null;
    +064  attributes = new 
    HashMap();
     065}
     066
    -067return attributes.get(name);
    -068  }
    -069
    -070  @Override
    -071  public MapString, byte[] 
    getAttributesMap() {
    -072if (attributes == null) {
    -073  return Collections.emptyMap();
    +067if (value == null) {
    +068  attributes.remove(name);
    +069  if (attributes.isEmpty()) {
    +070this.attributes = null;
    +071  }
    +072} else {
    +073  attributes.put(name, value);
     074}
    -075return 
    Collections.unmodifiableMap(attributes);
    +075return this;
     076  }
     077
    -078  protected long getAttributeSize() {
    -079long size = 0;
    -080if (attributes != null) {
    -081  size += 
    ClassSize.align(this.attributes.size() * ClassSize.MAP_ENTRY);
    -082  for(Map.EntryString, byte[] 
    entry : this.attributes.entrySet()) {
    -083size += 
    ClassSize.align(ClassSize.STRING + entry.getKey().length());
    -084size += 
    ClassSize.align(ClassSize.ARRAY + entry.getValue().length);
    -085  }
    -086}
    -087return size;
    -088  }
    -089
    -090  /**
    -091   * This method allows you to set an 
    identifier on an operation. The original
    -092   * motivation for this was to allow the 
    identifier to be used in slow query
    -093   * logging, but this could obviously be 
    useful in other places. One use of
    -094   * this could be to put a class.method 
    identifier in here to see where the
    -095   * slow query is coming from.
    -096   * @param id
    -097   *  id to set for the scan
    -098   */
    -099  public OperationWithAttributes 
    setId(String id) {
    -100setAttribute(ID_ATRIBUTE, 
    Bytes.toBytes(id));
    -101return this;
    -102  }
    -103
    -104  /**
    -105   * This method 

    [21/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/505bbb2e/apidocs/src-html/org/apache/hadoop/hbase/util/Bytes.html
    --
    diff --git a/apidocs/src-html/org/apache/hadoop/hbase/util/Bytes.html 
    b/apidocs/src-html/org/apache/hadoop/hbase/util/Bytes.html
    index 51d92c2..86fc15e 100644
    --- a/apidocs/src-html/org/apache/hadoop/hbase/util/Bytes.html
    +++ b/apidocs/src-html/org/apache/hadoop/hbase/util/Bytes.html
    @@ -44,2578 +44,2580 @@
     036import java.util.Iterator;
     037import java.util.List;
     038
    -039import com.google.protobuf.ByteString;
    -040import org.apache.commons.logging.Log;
    -041import 
    org.apache.commons.logging.LogFactory;
    -042import org.apache.hadoop.hbase.Cell;
    -043import 
    org.apache.hadoop.hbase.CellComparator;
    -044import 
    org.apache.hadoop.hbase.KeyValue;
    -045import 
    org.apache.hadoop.io.RawComparator;
    -046import 
    org.apache.hadoop.io.WritableComparator;
    -047import 
    org.apache.hadoop.io.WritableUtils;
    -048import 
    org.apache.yetus.audience.InterfaceAudience;
    -049import sun.misc.Unsafe;
    -050
    -051import 
    org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
    -052import 
    org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
    +039import org.apache.hadoop.hbase.Cell;
    +040import 
    org.apache.hadoop.hbase.CellComparator;
    +041import 
    org.apache.hadoop.hbase.KeyValue;
    +042import 
    org.apache.hadoop.io.RawComparator;
    +043import 
    org.apache.hadoop.io.WritableComparator;
    +044import 
    org.apache.hadoop.io.WritableUtils;
    +045import 
    org.apache.yetus.audience.InterfaceAudience;
    +046import org.slf4j.Logger;
    +047import org.slf4j.LoggerFactory;
    +048
    +049import 
    org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
    +050import 
    org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
    +051
    +052import com.google.protobuf.ByteString;
     053
    -054/**
    -055 * Utility class that handles byte 
    arrays, conversions to/from other types,
    -056 * comparisons, hash code generation, 
    manufacturing keys for HashMaps or
    -057 * HashSets, and can be used as key in 
    maps or trees.
    -058 */
    -059@SuppressWarnings("restriction")
    -060@InterfaceAudience.Public
    -061@edu.umd.cs.findbugs.annotations.SuppressWarnings(
    -062
    value="EQ_CHECK_FOR_OPERAND_NOT_COMPATIBLE_WITH_THIS",
    -063justification="It has been like this 
    forever")
    -064public class Bytes implements 
    ComparableBytes {
    -065
    -066  // Using the charset canonical name for 
    String/byte[] conversions is much
    -067  // more efficient due to use of cached 
    encoders/decoders.
    -068  private static final String UTF8_CSN = 
    StandardCharsets.UTF_8.name();
    -069
    -070  //HConstants.EMPTY_BYTE_ARRAY should be 
    updated if this changed
    -071  private static final byte [] 
    EMPTY_BYTE_ARRAY = new byte [0];
    -072
    -073  private static final Log LOG = 
    LogFactory.getLog(Bytes.class);
    +054import sun.misc.Unsafe;
    +055
    +056/**
    +057 * Utility class that handles byte 
    arrays, conversions to/from other types,
    +058 * comparisons, hash code generation, 
    manufacturing keys for HashMaps or
    +059 * HashSets, and can be used as key in 
    maps or trees.
    +060 */
    +061@SuppressWarnings("restriction")
    +062@InterfaceAudience.Public
    +063@edu.umd.cs.findbugs.annotations.SuppressWarnings(
    +064
    value="EQ_CHECK_FOR_OPERAND_NOT_COMPATIBLE_WITH_THIS",
    +065justification="It has been like this 
    forever")
    +066public class Bytes implements 
    ComparableBytes {
    +067
    +068  // Using the charset canonical name for 
    String/byte[] conversions is much
    +069  // more efficient due to use of cached 
    encoders/decoders.
    +070  private static final String UTF8_CSN = 
    StandardCharsets.UTF_8.name();
    +071
    +072  //HConstants.EMPTY_BYTE_ARRAY should be 
    updated if this changed
    +073  private static final byte [] 
    EMPTY_BYTE_ARRAY = new byte [0];
     074
    -075  /**
    -076   * Size of boolean in bytes
    -077   */
    -078  public static final int SIZEOF_BOOLEAN 
    = Byte.SIZE / Byte.SIZE;
    -079
    -080  /**
    -081   * Size of byte in bytes
    -082   */
    -083  public static final int SIZEOF_BYTE = 
    SIZEOF_BOOLEAN;
    -084
    -085  /**
    -086   * Size of char in bytes
    -087   */
    -088  public static final int SIZEOF_CHAR = 
    Character.SIZE / Byte.SIZE;
    -089
    -090  /**
    -091   * Size of double in bytes
    -092   */
    -093  public static final int SIZEOF_DOUBLE = 
    Double.SIZE / Byte.SIZE;
    -094
    -095  /**
    -096   * Size of float in bytes
    -097   */
    -098  public static final int SIZEOF_FLOAT = 
    Float.SIZE / Byte.SIZE;
    -099
    -100  /**
    -101   * Size of int in bytes
    -102   */
    -103  public static final int SIZEOF_INT = 
    Integer.SIZE / Byte.SIZE;
    -104
    -105  /**
    -106   * Size of long in bytes
    -107   */
    -108  public static final int SIZEOF_LONG = 
    Long.SIZE / Byte.SIZE;
    -109
    -110  /**
    -111   * Size of short in bytes
    -112   */
    -113  public static final int SIZEOF_SHORT = 
    Short.SIZE / Byte.SIZE;
    -114
    -115  /**
    -116   * Mask to apply to a long to reveal 
    the lower int only. Use like this:
    -117   * int i = (int)(0xL ^ 
    some_long_value);
    -118   */
    -119  public static final 

    [21/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c4c0cfa5/apidocs/src-html/org/apache/hadoop/hbase/filter/MultiRowRangeFilter.html
    --
    diff --git 
    a/apidocs/src-html/org/apache/hadoop/hbase/filter/MultiRowRangeFilter.html 
    b/apidocs/src-html/org/apache/hadoop/hbase/filter/MultiRowRangeFilter.html
    index af883ab..0ab7c24 100644
    --- a/apidocs/src-html/org/apache/hadoop/hbase/filter/MultiRowRangeFilter.html
    +++ b/apidocs/src-html/org/apache/hadoop/hbase/filter/MultiRowRangeFilter.html
    @@ -154,372 +154,374 @@
     146  /**
     147   * @return The filter serialized using 
    pb
     148   */
    -149  public byte[] toByteArray() {
    -150
    FilterProtos.MultiRowRangeFilter.Builder builder = 
    FilterProtos.MultiRowRangeFilter
    -151.newBuilder();
    -152for (RowRange range : rangeList) {
    -153  if (range != null) {
    -154FilterProtos.RowRange.Builder 
    rangebuilder = FilterProtos.RowRange.newBuilder();
    -155if (range.startRow != null)
    -156  
    rangebuilder.setStartRow(UnsafeByteOperations.unsafeWrap(range.startRow));
    -157
    rangebuilder.setStartRowInclusive(range.startRowInclusive);
    -158if (range.stopRow != null)
    -159  
    rangebuilder.setStopRow(UnsafeByteOperations.unsafeWrap(range.stopRow));
    -160
    rangebuilder.setStopRowInclusive(range.stopRowInclusive);
    -161
    builder.addRowRangeList(rangebuilder.build());
    -162  }
    -163}
    -164return 
    builder.build().toByteArray();
    -165  }
    -166
    -167  /**
    -168   * @param pbBytes A pb serialized 
    instance
    -169   * @return An instance of 
    MultiRowRangeFilter
    -170   * @throws 
    org.apache.hadoop.hbase.exceptions.DeserializationException
    -171   */
    -172  public static MultiRowRangeFilter 
    parseFrom(final byte[] pbBytes)
    -173  throws DeserializationException {
    -174FilterProtos.MultiRowRangeFilter 
    proto;
    -175try {
    -176  proto = 
    FilterProtos.MultiRowRangeFilter.parseFrom(pbBytes);
    -177} catch 
    (InvalidProtocolBufferException e) {
    -178  throw new 
    DeserializationException(e);
    -179}
    -180int length = 
    proto.getRowRangeListCount();
    -181ListFilterProtos.RowRange 
    rangeProtos = proto.getRowRangeListList();
    -182ListRowRange rangeList = new 
    ArrayList(length);
    -183for (FilterProtos.RowRange rangeProto 
    : rangeProtos) {
    -184  RowRange range = new 
    RowRange(rangeProto.hasStartRow() ? rangeProto.getStartRow()
    -185  .toByteArray() : null, 
    rangeProto.getStartRowInclusive(), rangeProto.hasStopRow() ?
    -186  
    rangeProto.getStopRow().toByteArray() : null, 
    rangeProto.getStopRowInclusive());
    -187  rangeList.add(range);
    -188}
    -189return new 
    MultiRowRangeFilter(rangeList);
    -190  }
    -191
    -192  /**
    -193   * @param o the filter to compare
    -194   * @return true if and only if the 
    fields of the filter that are serialized are equal to the
    -195   * corresponding fields in 
    other. Used for testing.
    -196   */
    -197  boolean areSerializedFieldsEqual(Filter 
    o) {
    -198if (o == this)
    -199  return true;
    -200if (!(o instanceof 
    MultiRowRangeFilter))
    -201  return false;
    -202
    -203MultiRowRangeFilter other = 
    (MultiRowRangeFilter) o;
    -204if (this.rangeList.size() != 
    other.rangeList.size())
    -205  return false;
    -206for (int i = 0; i  
    rangeList.size(); ++i) {
    -207  RowRange thisRange = 
    this.rangeList.get(i);
    -208  RowRange otherRange = 
    other.rangeList.get(i);
    -209  if 
    (!(Bytes.equals(thisRange.startRow, otherRange.startRow)  
    Bytes.equals(
    -210  thisRange.stopRow, 
    otherRange.stopRow)  (thisRange.startRowInclusive ==
    -211  otherRange.startRowInclusive) 
     (thisRange.stopRowInclusive ==
    -212  otherRange.stopRowInclusive))) 
    {
    -213return false;
    -214  }
    -215}
    -216return true;
    -217  }
    -218
    -219  /**
    -220   * calculate the position where the row 
    key in the ranges list.
    -221   *
    -222   * @param rowKey the row key to 
    calculate
    -223   * @return index the position of the 
    row key
    -224   */
    -225  private int getNextRangeIndex(byte[] 
    rowKey) {
    -226RowRange temp = new RowRange(rowKey, 
    true, null, true);
    -227int index = 
    Collections.binarySearch(rangeList, temp);
    -228if (index  0) {
    -229  int insertionPosition = -index - 
    1;
    -230  // check if the row key in the 
    range before the insertion position
    -231  if (insertionPosition != 0 
     rangeList.get(insertionPosition - 1).contains(rowKey)) {
    -232return insertionPosition - 1;
    -233  }
    -234  // check if the row key is before 
    the first range
    -235  if (insertionPosition == 0 
     !rangeList.get(insertionPosition).contains(rowKey)) {
    -236return ROW_BEFORE_FIRST_RANGE;
    -237  }
    -238  if (!initialized) {
    -239initialized = true;
    -240  }
    -241  return insertionPosition;
    -242}
    -243// the row key equals one of the 
    start keys, and the the range exclude the start key
    -244
    if(rangeList.get(index).startRowInclusive == 

    [21/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/071f974b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.DefaultVisitorBase.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.DefaultVisitorBase.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.DefaultVisitorBase.html
    index f1a2443..a469e93 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.DefaultVisitorBase.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.DefaultVisitorBase.html
    @@ -1350,415 +1350,415 @@
     1342return delete;
     1343  }
     1344
    -1345  public static Put 
    makeBarrierPut(byte[] encodedRegionName, long seq, byte[] tableName) {
    -1346byte[] seqBytes = 
    Bytes.toBytes(seq);
    -1347return new Put(encodedRegionName)
    -1348
    .addImmutable(HConstants.REPLICATION_BARRIER_FAMILY, seqBytes, seqBytes)
    -1349
    .addImmutable(HConstants.REPLICATION_META_FAMILY, tableNameCq, tableName);
    -1350  }
    -1351
    -1352
    -1353  public static Put 
    makeDaughterPut(byte[] encodedRegionName, byte[] value) {
    -1354return new 
    Put(encodedRegionName).addImmutable(HConstants.REPLICATION_META_FAMILY,
    -1355daughterNameCq, value);
    -1356  }
    -1357
    -1358  public static Put makeParentPut(byte[] 
    encodedRegionName, byte[] value) {
    -1359return new 
    Put(encodedRegionName).addImmutable(HConstants.REPLICATION_META_FAMILY,
    -1360parentNameCq, value);
    -1361  }
    -1362
    -1363  /**
    -1364   * Adds split daughters to the Put
    -1365   */
    -1366  public static Put 
    addDaughtersToPut(Put put, RegionInfo splitA, RegionInfo splitB) {
    -1367if (splitA != null) {
    -1368  put.addImmutable(
    -1369HConstants.CATALOG_FAMILY, 
    HConstants.SPLITA_QUALIFIER, RegionInfo.toByteArray(splitA));
    -1370}
    -1371if (splitB != null) {
    -1372  put.addImmutable(
    -1373HConstants.CATALOG_FAMILY, 
    HConstants.SPLITB_QUALIFIER, RegionInfo.toByteArray(splitB));
    -1374}
    -1375return put;
    -1376  }
    -1377
    -1378  /**
    -1379   * Put the passed 
    codeputs/code to the codehbase:meta/code 
    table.
    -1380   * Non-atomic for multi puts.
    -1381   * @param connection connection we're 
    using
    -1382   * @param puts Put to add to 
    hbase:meta
    -1383   * @throws IOException
    -1384   */
    -1385  public static void 
    putToMetaTable(final Connection connection, final Put... puts)
    -1386throws IOException {
    -1387put(getMetaHTable(connection), 
    Arrays.asList(puts));
    -1388  }
    -1389
    -1390  /**
    -1391   * @param t Table to use (will be 
    closed when done).
    -1392   * @param puts puts to make
    -1393   * @throws IOException
    -1394   */
    -1395  private static void put(final Table t, 
    final ListPut puts) throws IOException {
    -1396try {
    -1397  if (METALOG.isDebugEnabled()) {
    -1398
    METALOG.debug(mutationsToString(puts));
    -1399  }
    -1400  t.put(puts);
    -1401} finally {
    -1402  t.close();
    -1403}
    -1404  }
    -1405
    -1406  /**
    -1407   * Put the passed 
    codeps/code to the codehbase:meta/code table.
    -1408   * @param connection connection we're 
    using
    -1409   * @param ps Put to add to 
    hbase:meta
    -1410   * @throws IOException
    -1411   */
    -1412  public static void 
    putsToMetaTable(final Connection connection, final ListPut ps)
    -1413throws IOException {
    -1414Table t = 
    getMetaHTable(connection);
    -1415try {
    -1416  if (METALOG.isDebugEnabled()) {
    -1417
    METALOG.debug(mutationsToString(ps));
    -1418  }
    -1419  t.put(ps);
    -1420} finally {
    -1421  t.close();
    -1422}
    -1423  }
    -1424
    -1425  /**
    -1426   * Delete the passed 
    coded/code from the codehbase:meta/code 
    table.
    -1427   * @param connection connection we're 
    using
    -1428   * @param d Delete to add to 
    hbase:meta
    -1429   * @throws IOException
    -1430   */
    -1431  static void deleteFromMetaTable(final 
    Connection connection, final Delete d)
    -1432throws IOException {
    -1433ListDelete dels = new 
    ArrayList(1);
    -1434dels.add(d);
    -1435deleteFromMetaTable(connection, 
    dels);
    -1436  }
    -1437
    -1438  /**
    -1439   * Delete the passed 
    codedeletes/code from the codehbase:meta/code 
    table.
    -1440   * @param connection connection we're 
    using
    -1441   * @param deletes Deletes to add to 
    hbase:meta  This list should support #remove.
    -1442   * @throws IOException
    -1443   */
    -1444  public static void 
    deleteFromMetaTable(final Connection connection, final ListDelete 
    deletes)
    -1445throws IOException {
    -1446Table t = 
    getMetaHTable(connection);
    -1447try {
    -1448  if (METALOG.isDebugEnabled()) {
    -1449
    METALOG.debug(mutationsToString(deletes));
    -1450  }
    -1451  t.delete(deletes);
    -1452} finally {
    -1453  t.close();
    -1454}
    -1455  }
    -1456
    -1457  /**
    -1458   * Deletes some replica columns 
    corresponding to replicas for the passed rows
    -1459   * @param metaRows rows in 
    hbase:meta
    -1460   * @param replicaIndexToDeleteFrom the 
    replica ID we would start deleting from
    -1461   * @param 

    [21/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dc4e5c85/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ModifyTableFuture.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ModifyTableFuture.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ModifyTableFuture.html
    index 7c59e27..c904c56 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ModifyTableFuture.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ModifyTableFuture.html
    @@ -119,4048 +119,4054 @@
     111import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
     112import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest;
     113import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest;
    -114import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
    -115import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
    -116import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
    -117import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
    -118import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest;
    -119import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse;
    -120import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest;
    -121import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
    -122import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
    -123import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceRequest;
    -124import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceResponse;
    -125import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
    -126import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
    -127import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
    -128import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
    -129import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos;
    -130import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
    -131import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
    -132import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
    -133import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
    -134import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
    -135import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersRequest;
    -136import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
    -137import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
    -138import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest;
    -139import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse;
    -140import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
    -141import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
    -142import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
    -143import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse;
    -144import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
    -145import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableRequest;
    -146import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableResponse;
    -147import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest;
    -148import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse;
    -149import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
    -150import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse;
    -151import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest;
    -152import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse;
    -153import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest;
    -154import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
    -155import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetLocksRequest;
    -156import 
    

    [21/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4abd958d/devapidocs/org/apache/hadoop/hbase/client/Table.CheckAndMutateBuilder.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/client/Table.CheckAndMutateBuilder.html 
    b/devapidocs/org/apache/hadoop/hbase/client/Table.CheckAndMutateBuilder.html
    new file mode 100644
    index 000..3dbdcf7
    --- /dev/null
    +++ b/devapidocs/org/apache/hadoop/hbase/client/Table.CheckAndMutateBuilder.html
    @@ -0,0 +1,357 @@
    +http://www.w3.org/TR/html4/loose.dtd;>
    +
    +
    +
    +
    +
    +Table.CheckAndMutateBuilder (Apache HBase 3.0.0-SNAPSHOT API)
    +
    +
    +
    +
    +
    +var methods = {"i0":18,"i1":6,"i2":6,"i3":6,"i4":6,"i5":6,"i6":6};
    +var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
    Methods"],4:["t3","Abstract Methods"],16:["t5","Default Methods"]};
    +var altColor = "altColor";
    +var rowColor = "rowColor";
    +var tableTab = "tableTab";
    +var activeTableTab = "activeTableTab";
    +
    +
    +JavaScript is disabled on your browser.
    +
    +
    +
    +
    +
    +Skip navigation links
    +
    +
    +
    +
    +Overview
    +Package
    +Class
    +Use
    +Tree
    +Deprecated
    +Index
    +Help
    +
    +
    +
    +
    +PrevClass
    +NextClass
    +
    +
    +Frames
    +NoFrames
    +
    +
    +AllClasses
    +
    +
    +
    +
    +
    +
    +
    +Summary:
    +Nested|
    +Field|
    +Constr|
    +Method
    +
    +
    +Detail:
    +Field|
    +Constr|
    +Method
    +
    +
    +
    +
    +
    +
    +
    +
    +org.apache.hadoop.hbase.client
    +Interface 
    Table.CheckAndMutateBuilder
    +
    +
    +
    +
    +
    +
    +All Known Implementing Classes:
    +HTable.CheckAndMutateBuilderImpl, RemoteHTable.CheckAndMutateBuilderImpl
    +
    +
    +Enclosing interface:
    +Table
    +
    +
    +
    +public static interface Table.CheckAndMutateBuilder
    +A helper class for sending checkAndMutate request.
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +Method Summary
    +
    +All MethodsInstance MethodsAbstract MethodsDefault Methods
    +
    +Modifier and Type
    +Method and Description
    +
    +
    +default Table.CheckAndMutateBuilder
    +ifEquals(byte[]value)
    +Check for equality.
    +
    +
    +
    +Table.CheckAndMutateBuilder
    +ifMatches(CompareOperatorcompareOp,
    + byte[]value)
    +
    +
    +Table.CheckAndMutateBuilder
    +ifNotExists()
    +Check for lack of column.
    +
    +
    +
    +Table.CheckAndMutateBuilder
    +qualifier(byte[]qualifier)
    +
    +
    +boolean
    +thenDelete(Deletedelete)
    +
    +
    +boolean
    +thenMutate(RowMutationsmutation)
    +
    +
    +boolean
    +thenPut(Putput)
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +Method Detail
    +
    +
    +
    +
    +
    +qualifier
    +Table.CheckAndMutateBuilderqualifier(byte[]qualifier)
    +
    +Parameters:
    +qualifier - column qualifier to check.
    +
    +
    +
    +
    +
    +
    +
    +
    +ifNotExists
    +Table.CheckAndMutateBuilderifNotExists()
    +Check for lack of column.
    +
    +
    +
    +
    +
    +
    +
    +ifEquals
    +defaultTable.CheckAndMutateBuilderifEquals(byte[]value)
    +Check for equality.
    +
    +Parameters:
    +value - the expected value
    +
    +
    +
    +
    +
    +
    +
    +
    +ifMatches
    +Table.CheckAndMutateBuilderifMatches(CompareOperatorcompareOp,
    +  byte[]value)
    +
    +Parameters:
    +compareOp - comparison operator to use
    +value - the expected value
    +
    +
    +
    +
    +
    +
    +
    +
    +thenPut
    +booleanthenPut(Putput)
    + throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException
    +
    +Parameters:
    +put - data to put if check succeeds
    +Returns:
    +true if the new put was executed, false 
    otherwise.
    +Throws:
    +http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException
    +
    +
    +
    +
    +
    +
    +
    +
    +thenDelete
    +booleanthenDelete(Deletedelete)
    +throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException
    +
    +Parameters:
    +delete - data to delete if check succeeds
    +Returns:
    +true if the new delete was executed, false 
    otherwise.
    +Throws:
    +http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException
    +
    +
    +
    +
    +
    +
    +
    +
    +thenMutate
    +booleanthenMutate(RowMutationsmutation)
    +throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException
    +
    +Parameters:
    +mutation - mutations to perform if check succeeds
    +Returns:
    +true if the new mutation was executed, false otherwise.
    +Throws:
    +http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +Skip navigation links
    +
    +
    +
    +
    +Overview
    +Package
    +Class
    +Use
    +Tree
    +Deprecated
    +Index
    +Help
    +
    +
    +
    +
    +PrevClass
    +NextClass
    +
    +
    +Frames
    +NoFrames
    +
    +
    +AllClasses
    +
    +
    +
    -var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10};
    +var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10};
     var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
    Methods"],8:["t4","Concrete Methods"]};
     var altColor = "altColor";
     var rowColor = "rowColor";
    @@ -119,7 +119,7 @@ var activeTableTab = "activeTableTab";
     
     
     All Implemented Interfaces:
    -Cell, SettableSequenceId
    +http://docs.oracle.com/javase/8/docs/api/java/lang/Cloneable.html?is-external=true;
     title="class or interface in java.lang">Cloneable, Cell, ExtendedCell, HeapSize, RawCell
     
     
     Enclosing class:
    @@ -127,7 +127,7 @@ var activeTableTab = "activeTableTab";
     
     
     
    -private static class PrivateCellUtil.LastOnRowColCell
    +private static class PrivateCellUtil.LastOnRowColCell
     extends PrivateCellUtil.LastOnRowCell
     
     
    @@ -152,26 +152,44 @@ extends fArray
     
     
    +private static long
    +FIXED_OVERHEAD
    +
    +
     private byte
     flength
     
    -
    +
     private int
     foffset
     
    -
    +
     private byte[]
     qArray
     
    -
    +
     private int
     qlength
     
    -
    +
     private int
     qoffset
     
     
    +
    +
    +
    +
    +Fields inherited from interfaceorg.apache.hadoop.hbase.ExtendedCell
    +CELL_NOT_BASED_ON_CHUNK
    +
    +
    +
    +
    +
    +Fields inherited from interfaceorg.apache.hadoop.hbase.RawCell
    +MAX_TAGS_LENGTH
    +
     
     
     
    @@ -240,6 +258,10 @@ extends int
     getQualifierOffset()
     
    +
    +long
    +heapSize()
    +
     
     
     
    @@ -253,7 +275,7 @@ extends PrivateCellUtil.EmptyCell
    -getSequenceId,
     getTagsArray,
     getTagsLength,
     getTagsOffset,
     getValueArray,
     getValueLength,
     getValueOffset,
     setSequenceId
    +getSequenceId,
     getTagsArray,
     getTagsLength,
     getTagsOffset,
     getValueArray,
     getValueLength,
     getValueOffset,
     setSequenceId,
     setTimestamp, setTimestamp
     
     
     
    @@ -262,6 +284,20 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">Object
     http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--;
     title="class or interface in java.lang">clone, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-;
     title="class or interface in java.lang">equals, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--;
     title="class or interface in java.lang">finalize, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--;
     title="class or interface in java.lang">getClass, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--;
     title="class or interface in java.lang">hashCode, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notify--;
     title="class or interface in java.lang">notify, http://docs.oracle.com/javase/8/docs/api/java/lang
     /Object.html?is-external=true#notifyAll--" title="class or interface in 
    java.lang">notifyAll, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#toString--;
     title="class or interface in java.lang">toString, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait--;
     title="class or interface in java.lang">wait, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-;
     title="class or interface in java.lang">wait, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-int-;
     title="class or interface in java.lang">wait
     
    +
    +
    +
    +
    +Methods inherited from interfaceorg.apache.hadoop.hbase.ExtendedCell
    +deepClone,
     getChunkId,
     getSerializedSize,
     write,
     write
    +
    +
    +
    +
    +
    +Methods inherited from interfaceorg.apache.hadoop.hbase.RawCell
    +checkForTagsLength,
     cloneTags,
     getTag,
     getTags
    +
     
     
     
    @@ -276,13 +312,22 @@ extends 
    +
    +
    +
    +
    +FIXED_OVERHEAD
    +private static finallong FIXED_OVERHEAD
    +
    +
     
     
     
     
     
     fArray
    -private finalbyte[] fArray
    +private finalbyte[] fArray
     
     
     
    @@ -291,7 +336,7 @@ extends 
     
     foffset
    -private finalint foffset
    +private finalint foffset
     
     
     
    @@ -300,7 +345,7 @@ extends 
     
     flength
    -private finalbyte flength
    +private finalbyte flength
     
     
     
    @@ -309,7 +354,7 @@ extends 
     
     qArray
    -private finalbyte[] qArray
    +private finalbyte[] qArray
     
     
     
    @@ -318,7 +363,7 @@ extends 
     
     qoffset
    -private finalint qoffset
    +private finalint qoffset
     
     
     
    @@ -327,7 +372,7 @@ extends 
     
     qlength
    -private finalint qlength
    +private finalint qlength
     
     
     
    @@ -344,7 +389,7 @@ extends 
     
     

    [21/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d171b896/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.TableInfo.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.TableInfo.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.TableInfo.html
    index 62bc799..5c004ce 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.TableInfo.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.TableInfo.html
    @@ -250,7 +250,7 @@
     242Cell kv = cell;
     243// null input == user explicitly 
    wants to flush
     244if (row == null  kv == 
    null) {
    -245  rollWriters();
    +245  rollWriters(null);
     246  return;
     247}
     248
    @@ -284,636 +284,642 @@
     276  configureStoragePolicy(conf, 
    fs, tableAndFamily, writerPath);
     277}
     278
    -279// If any of the HFiles for the 
    column families has reached
    -280// maxsize, we need to roll all 
    the writers
    -281if (wl != null  
    wl.written + length = maxsize) {
    -282  this.rollRequested = true;
    -283}
    -284
    -285// This can only happen once a 
    row is finished though
    -286if (rollRequested  
    Bytes.compareTo(this.previousRow, rowKey) != 0) {
    -287  rollWriters();
    -288}
    -289
    -290// create a new WAL writer, if 
    necessary
    -291if (wl == null || wl.writer == 
    null) {
    -292  if 
    (conf.getBoolean(LOCALITY_SENSITIVE_CONF_KEY, DEFAULT_LOCALITY_SENSITIVE)) {
    -293HRegionLocation loc = null;
    -294
    -295String tableName = 
    Bytes.toString(tableNameBytes);
    -296if (tableName != null) {
    -297  try (Connection connection 
    = ConnectionFactory.createConnection(conf);
    -298 RegionLocator 
    locator =
    -299   
    connection.getRegionLocator(TableName.valueOf(tableName))) {
    -300loc = 
    locator.getRegionLocation(rowKey);
    -301  } catch (Throwable e) {
    -302LOG.warn("There's 
    something wrong when locating rowkey: " +
    -303  Bytes.toString(rowKey) 
    + " for tablename: " + tableName, e);
    -304loc = null;
    -305  } }
    -306
    -307if (null == loc) {
    -308  if (LOG.isTraceEnabled()) 
    {
    -309LOG.trace("failed to get 
    region location, so use default writer for rowkey: " +
    -310  
    Bytes.toString(rowKey));
    -311  }
    -312  wl = 
    getNewWriter(tableNameBytes, family, conf, null);
    -313} else {
    -314  if (LOG.isDebugEnabled()) 
    {
    -315LOG.debug("first rowkey: 
    [" + Bytes.toString(rowKey) + "]");
    -316  }
    -317  InetSocketAddress 
    initialIsa =
    -318  new 
    InetSocketAddress(loc.getHostname(), loc.getPort());
    -319  if 
    (initialIsa.isUnresolved()) {
    -320if (LOG.isTraceEnabled()) 
    {
    -321  LOG.trace("failed to 
    resolve bind address: " + loc.getHostname() + ":"
    -322  + loc.getPort() + 
    ", so use default writer");
    -323}
    -324wl = 
    getNewWriter(tableNameBytes, family, conf, null);
    -325  } else {
    -326if (LOG.isDebugEnabled()) 
    {
    -327  LOG.debug("use favored 
    nodes writer: " + initialIsa.getHostString());
    -328}
    -329wl = 
    getNewWriter(tableNameBytes, family, conf, new InetSocketAddress[] { 
    initialIsa
    -330});
    -331  }
    -332}
    -333  } else {
    -334wl = 
    getNewWriter(tableNameBytes, family, conf, null);
    -335  }
    -336}
    -337
    -338// we now have the proper WAL 
    writer. full steam ahead
    -339// TODO : Currently in 
    SettableTimeStamp but this will also move to ExtendedCell
    -340
    PrivateCellUtil.updateLatestStamp(cell, this.now);
    -341wl.writer.append(kv);
    -342wl.written += length;
    -343
    -344// Copy the row so we know when a 
    row transition.
    -345this.previousRow = rowKey;
    -346  }
    -347
    -348  private void rollWriters() throws 
    IOException {
    -349for (WriterLength wl : 
    this.writers.values()) {
    -350  if (wl.writer != null) {
    -351LOG.info(
    -352"Writer=" + 
    wl.writer.getPath() + ((wl.written == 0)? "": ", wrote=" + wl.written));
    -353close(wl.writer);
    -354  }
    -355  wl.writer = null;
    -356  wl.written = 0;
    -357}
    -358this.rollRequested = false;
    -359  }
    -360
    -361  /*
    -362   * Create a new StoreFile.Writer.
    -363   * @param family
    -364   * @return A WriterLength, 
    containing a new StoreFile.Writer.
    -365   * @throws IOException
    -366   */
    -367  
    

    [21/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c54c242b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ReplayBatchOperation.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ReplayBatchOperation.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ReplayBatchOperation.html
    index 3edfbef..9707b2c 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ReplayBatchOperation.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ReplayBatchOperation.html
    @@ -2459,5936 +2459,5935 @@
     2451  }
     2452
     2453  for (HStore s : storesToFlush) {
    -2454MemStoreSize flushableSize = 
    s.getFlushableSize();
    -2455
    totalSizeOfFlushableStores.incMemStoreSize(flushableSize);
    -2456
    storeFlushCtxs.put(s.getColumnFamilyDescriptor().getName(),
    -2457  
    s.createFlushContext(flushOpSeqId, tracker));
    -2458// for writing stores to WAL
    -2459
    committedFiles.put(s.getColumnFamilyDescriptor().getName(), null);
    -2460
    storeFlushableSize.put(s.getColumnFamilyDescriptor().getName(), 
    flushableSize);
    -2461  }
    -2462
    -2463  // write the snapshot start to 
    WAL
    -2464  if (wal != null  
    !writestate.readOnly) {
    -2465FlushDescriptor desc = 
    ProtobufUtil.toFlushDescriptor(FlushAction.START_FLUSH,
    -2466getRegionInfo(), 
    flushOpSeqId, committedFiles);
    -2467// No sync. Sync is below where 
    no updates lock and we do FlushAction.COMMIT_FLUSH
    -2468WALUtil.writeFlushMarker(wal, 
    this.getReplicationScope(), getRegionInfo(), desc, false,
    -2469mvcc);
    -2470  }
    -2471
    -2472  // Prepare flush (take a 
    snapshot)
    -2473  for (StoreFlushContext flush : 
    storeFlushCtxs.values()) {
    -2474flush.prepare();
    -2475  }
    -2476} catch (IOException ex) {
    -2477  doAbortFlushToWAL(wal, 
    flushOpSeqId, committedFiles);
    -2478  throw ex;
    -2479} finally {
    -2480  
    this.updatesLock.writeLock().unlock();
    -2481}
    -2482String s = "Finished memstore 
    snapshotting " + this + ", syncing WAL and waiting on mvcc, " +
    -2483"flushsize=" + 
    totalSizeOfFlushableStores;
    -2484status.setStatus(s);
    -2485doSyncOfUnflushedWALChanges(wal, 
    getRegionInfo());
    -2486return new 
    PrepareFlushResult(storeFlushCtxs, committedFiles, storeFlushableSize, 
    startTime,
    -2487flushOpSeqId, flushedSeqId, 
    totalSizeOfFlushableStores);
    -2488  }
    -2489
    -2490  /**
    -2491   * Utility method broken out of 
    internalPrepareFlushCache so that method is smaller.
    -2492   */
    -2493  private void 
    logFatLineOnFlush(CollectionHStore storesToFlush, long sequenceId) {
    -2494if (!LOG.isInfoEnabled()) {
    -2495  return;
    -2496}
    -2497// Log a fat line detailing what is 
    being flushed.
    -2498StringBuilder perCfExtras = null;
    -2499if (!isAllFamilies(storesToFlush)) 
    {
    -2500  perCfExtras = new 
    StringBuilder();
    -2501  for (HStore store: storesToFlush) 
    {
    -2502perCfExtras.append("; 
    ").append(store.getColumnFamilyName());
    -2503perCfExtras.append("=")
    -2504
    .append(StringUtils.byteDesc(store.getFlushableSize().getDataSize()));
    -2505  }
    -2506}
    -2507LOG.info("Flushing " + + 
    storesToFlush.size() + "/" + stores.size() +
    -2508" column families, memstore=" + 
    StringUtils.byteDesc(this.memstoreDataSize.get()) +
    -2509((perCfExtras != null  
    perCfExtras.length()  0)? perCfExtras.toString(): "") +
    -2510((wal != null) ? "" : "; WAL is 
    null, using passed sequenceid=" + sequenceId));
    -2511  }
    -2512
    -2513  private void doAbortFlushToWAL(final 
    WAL wal, final long flushOpSeqId,
    -2514  final Mapbyte[], 
    ListPath committedFiles) {
    -2515if (wal == null) return;
    -2516try {
    -2517  FlushDescriptor desc = 
    ProtobufUtil.toFlushDescriptor(FlushAction.ABORT_FLUSH,
    -2518  getRegionInfo(), flushOpSeqId, 
    committedFiles);
    -2519  WALUtil.writeFlushMarker(wal, 
    this.getReplicationScope(), getRegionInfo(), desc, false,
    -2520  mvcc);
    -2521} catch (Throwable t) {
    -2522  LOG.warn("Received unexpected 
    exception trying to write ABORT_FLUSH marker to WAL:" +
    -2523  
    StringUtils.stringifyException(t));
    -2524  // ignore this since we will be 
    aborting the RS with DSE.
    -2525}
    -2526// we have called 
    wal.startCacheFlush(), now we have to abort it
    -2527
    wal.abortCacheFlush(this.getRegionInfo().getEncodedNameAsBytes());
    -2528  }
    -2529
    -2530  /**
    -2531   * Sync unflushed WAL changes. See 
    HBASE-8208 for details
    -2532   */
    -2533  private static void 
    doSyncOfUnflushedWALChanges(final WAL wal, final RegionInfo hri)
    -2534  throws IOException {
    -2535if (wal == null) {
    -2536  return;
    -2537}
    -2538try {
    -2539  wal.sync(); // ensure that flush 
    marker is sync'ed
    -2540} catch (IOException ioe) {
    -2541  
    

    [21/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.ShortCircuitingClusterConnection.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.ShortCircuitingClusterConnection.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.ShortCircuitingClusterConnection.html
    index 1bddf29..f667b93 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.ShortCircuitingClusterConnection.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.ShortCircuitingClusterConnection.html
    @@ -124,380 +124,381 @@
     116  
    HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
     117// Go big. Multiply by 10. If we 
    can't get to meta after this many retries
     118// then something seriously wrong.
    -119int serversideMultiplier = 
    c.getInt("hbase.client.serverside.retries.multiplier", 10);
    -120int retries = hcRetries * 
    serversideMultiplier;
    -121
    c.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, retries);
    -122log.info(sn + " server-side 
    Connection retries=" + retries);
    -123  }
    -124
    -125  /**
    -126   * A ClusterConnection that will 
    short-circuit RPC making direct invocations against the
    -127   * localhost if the invocation target 
    is 'this' server; save on network and protobuf
    -128   * invocations.
    -129   */
    -130  // TODO This has to still do PB 
    marshalling/unmarshalling stuff. Check how/whether we can avoid.
    -131  @VisibleForTesting // Class is visible 
    so can assert we are short-circuiting when expected.
    -132  public static class 
    ShortCircuitingClusterConnection extends ConnectionImplementation {
    -133private final ServerName 
    serverName;
    -134private final 
    AdminService.BlockingInterface localHostAdmin;
    -135private final 
    ClientService.BlockingInterface localHostClient;
    -136
    -137private 
    ShortCircuitingClusterConnection(Configuration conf, ExecutorService pool, User 
    user,
    -138ServerName serverName, 
    AdminService.BlockingInterface admin,
    -139ClientService.BlockingInterface 
    client)
    -140throws IOException {
    -141  super(conf, pool, user);
    -142  this.serverName = serverName;
    -143  this.localHostAdmin = admin;
    -144  this.localHostClient = client;
    -145}
    -146
    -147@Override
    -148public AdminService.BlockingInterface 
    getAdmin(ServerName sn) throws IOException {
    -149  return serverName.equals(sn) ? 
    this.localHostAdmin : super.getAdmin(sn);
    -150}
    -151
    -152@Override
    -153public 
    ClientService.BlockingInterface getClient(ServerName sn) throws IOException {
    -154  return serverName.equals(sn) ? 
    this.localHostClient : super.getClient(sn);
    -155}
    -156
    -157@Override
    -158public MasterKeepAliveConnection 
    getKeepAliveMasterService() throws MasterNotRunningException {
    -159  if (this.localHostClient instanceof 
    MasterService.BlockingInterface) {
    -160return new 
    ShortCircuitMasterConnection((MasterService.BlockingInterface)this.localHostClient);
    -161  }
    -162  return 
    super.getKeepAliveMasterService();
    -163}
    -164  }
    -165
    -166  /**
    -167   * Creates a short-circuit connection 
    that can bypass the RPC layer (serialization,
    -168   * deserialization, networking, etc..) 
    when talking to a local server.
    -169   * @param conf the current 
    configuration
    -170   * @param pool the thread pool to use 
    for batch operations
    -171   * @param user the user the connection 
    is for
    -172   * @param serverName the local server 
    name
    -173   * @param admin the admin interface of 
    the local server
    -174   * @param client the client interface 
    of the local server
    -175   * @return an short-circuit 
    connection.
    -176   * @throws IOException if IO failure 
    occurred
    -177   */
    -178  public static ClusterConnection 
    createShortCircuitConnection(final Configuration conf,
    -179  ExecutorService pool, User user, 
    final ServerName serverName,
    -180  final 
    AdminService.BlockingInterface admin, final ClientService.BlockingInterface 
    client)
    -181  throws IOException {
    -182if (user == null) {
    -183  user = 
    UserProvider.instantiate(conf).getCurrent();
    -184}
    -185return new 
    ShortCircuitingClusterConnection(conf, pool, user, serverName, admin, 
    client);
    -186  }
    -187
    -188  /**
    -189   * Setup the connection class, so that 
    it will not depend on master being online. Used for testing
    -190   * @param conf configuration to set
    -191   */
    -192  @VisibleForTesting
    -193  public static void 
    setupMasterlessConnection(Configuration conf) {
    -194
    conf.set(ClusterConnection.HBASE_CLIENT_CONNECTION_IMPL, 
    MasterlessConnection.class.getName());
    -195  }
    -196
    -197  /**
    -198   * Some tests shut down the master. But 
    table availability is a master RPC which is performed on
    -199   * region re-lookups.
    -200   */
    -201  static class MasterlessConnection 
    extends ConnectionImplementation {
    -202

    [21/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html
    --
    diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html 
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html
    index 92c1c97..68f8a94 100644
    --- a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html
    +++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html
    @@ -307,860 +307,919 @@
     299   * was sent to HBase and may need some 
    time to finish the compact operation.
     300   * @param tableName table to compact
     301   */
    -302  CompletableFutureVoid 
    compact(TableName tableName);
    -303
    -304  /**
    -305   * Compact a column family within a 
    table. When the returned CompletableFuture is done, it only
    -306   * means the compact request was sent 
    to HBase and may need some time to finish the compact
    -307   * operation.
    -308   * @param tableName table to compact
    -309   * @param columnFamily column family 
    within a table. If not present, compact the table's all
    -310   *  column families.
    -311   */
    -312  CompletableFutureVoid 
    compact(TableName tableName, byte[] columnFamily);
    -313
    -314  /**
    -315   * Compact an individual region. When 
    the returned CompletableFuture is done, it only means the
    -316   * compact request was sent to HBase 
    and may need some time to finish the compact operation.
    -317   * @param regionName region to 
    compact
    -318   */
    -319  CompletableFutureVoid 
    compactRegion(byte[] regionName);
    -320
    -321  /**
    -322   * Compact a column family within a 
    region. When the returned CompletableFuture is done, it only
    -323   * means the compact request was sent 
    to HBase and may need some time to finish the compact
    -324   * operation.
    -325   * @param regionName region to 
    compact
    -326   * @param columnFamily column family 
    within a region. If not present, compact the region's all
    -327   *  column families.
    -328   */
    -329  CompletableFutureVoid 
    compactRegion(byte[] regionName, byte[] columnFamily);
    -330
    -331  /**
    -332   * Major compact a table. When the 
    returned CompletableFuture is done, it only means the compact
    -333   * request was sent to HBase and may 
    need some time to finish the compact operation.
    -334   * @param tableName table to major 
    compact
    -335   */
    -336  CompletableFutureVoid 
    majorCompact(TableName tableName);
    -337
    -338  /**
    -339   * Major compact a column family within 
    a table. When the returned CompletableFuture is done, it
    -340   * only means the compact request was 
    sent to HBase and may need some time to finish the compact
    -341   * operation.
    -342   * @param tableName table to major 
    compact
    -343   * @param columnFamily column family 
    within a table. If not present, major compact the table's all
    -344   *  column families.
    -345   */
    -346  CompletableFutureVoid 
    majorCompact(TableName tableName, byte[] columnFamily);
    -347
    -348  /**
    -349   * Major compact a region. When the 
    returned CompletableFuture is done, it only means the compact
    -350   * request was sent to HBase and may 
    need some time to finish the compact operation.
    -351   * @param regionName region to major 
    compact
    -352   */
    -353  CompletableFutureVoid 
    majorCompactRegion(byte[] regionName);
    -354
    -355  /**
    -356   * Major compact a column family within 
    region. When the returned CompletableFuture is done, it
    -357   * only means the compact request was 
    sent to HBase and may need some time to finish the compact
    -358   * operation.
    -359   * @param regionName region to major 
    compact
    -360   * @param columnFamily column family 
    within a region. If not present, major compact the region's
    -361   *  all column families.
    -362   */
    -363  CompletableFutureVoid 
    majorCompactRegion(byte[] regionName, byte[] columnFamily);
    -364
    -365  /**
    -366   * Compact all regions on the region 
    server.
    -367   * @param serverName the region server 
    name
    -368   */
    -369  CompletableFutureVoid 
    compactRegionServer(ServerName serverName);
    -370
    -371  /**
    -372   * Compact all regions on the region 
    server.
    -373   * @param serverName the region server 
    name
    -374   */
    -375  CompletableFutureVoid 
    majorCompactRegionServer(ServerName serverName);
    -376
    -377  /**
    -378   * Turn the Merge switch on or off.
    -379   * @param on
    -380   * @return Previous switch value 
    wrapped by a {@link CompletableFuture}
    -381   */
    -382  CompletableFutureBoolean 
    mergeSwitch(boolean on);
    -383
    -384  /**
    -385   * Query the current state of the Merge 
    switch.
    -386   * @return true if the switch is on, 
    false otherwise. The return value will be wrapped by a
    -387   * {@link CompletableFuture}
    -388   */
    -389  CompletableFutureBoolean 
    isMergeEnabled();
    -390
    -391  /**
    -392   * Turn the Split switch on or off.
    -393   * @param on
    -394   * @return Previous switch value 
    wrapped by a {@link CompletableFuture}
    -395   */
    -396  CompletableFutureBoolean 
    splitSwitch(boolean on);
    -397
    -398  /**
    -399   * 

    [21/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStateStore.RegionStateVisitor.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStateStore.RegionStateVisitor.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStateStore.RegionStateVisitor.html
    index 84e9e52..252bcc2 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStateStore.RegionStateVisitor.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStateStore.RegionStateVisitor.html
    @@ -56,290 +56,293 @@
     048import 
    org.apache.yetus.audience.InterfaceAudience;
     049import 
    org.apache.zookeeper.KeeperException;
     050
    -051import 
    org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions;
    -052
    -053/**
    -054 * Store Region State to hbase:meta 
    table.
    -055 */
    -056@InterfaceAudience.Private
    -057public class RegionStateStore {
    -058  private static final Log LOG = 
    LogFactory.getLog(RegionStateStore.class);
    -059
    -060  /** The delimiter for meta columns for 
    replicaIds gt; 0 */
    -061  protected static final char 
    META_REPLICA_ID_DELIMITER = '_';
    -062
    -063  private final MasterServices master;
    -064
    -065  private MultiHConnection 
    multiHConnection;
    -066
    -067  public RegionStateStore(final 
    MasterServices master) {
    -068this.master = master;
    -069  }
    -070
    -071  public void start() throws IOException 
    {
    -072  }
    -073
    -074  public void stop() {
    -075if (multiHConnection != null) {
    -076  multiHConnection.close();
    -077  multiHConnection = null;
    -078}
    -079  }
    -080
    -081  public interface RegionStateVisitor {
    -082void visitRegionState(RegionInfo 
    regionInfo, State state,
    -083  ServerName regionLocation, 
    ServerName lastHost, long openSeqNum);
    -084  }
    -085
    -086  public void visitMeta(final 
    RegionStateVisitor visitor) throws IOException {
    -087
    MetaTableAccessor.fullScanRegions(master.getConnection(), new 
    MetaTableAccessor.Visitor() {
    -088  final boolean isDebugEnabled = 
    LOG.isDebugEnabled();
    -089
    -090  @Override
    -091  public boolean visit(final Result 
    r) throws IOException {
    -092if (r !=  null  
    !r.isEmpty()) {
    -093  long st = 0;
    -094  if (LOG.isTraceEnabled()) {
    -095st = 
    System.currentTimeMillis();
    -096  }
    -097  visitMetaEntry(visitor, r);
    -098  if (LOG.isTraceEnabled()) {
    -099long et = 
    System.currentTimeMillis();
    -100LOG.trace("[T] LOAD META PERF 
    " + StringUtils.humanTimeDiff(et - st));
    -101  }
    -102} else if (isDebugEnabled) {
    -103  LOG.debug("NULL result from 
    meta - ignoring but this is strange.");
    -104}
    -105return true;
    -106  }
    -107});
    -108  }
    -109
    -110  private void visitMetaEntry(final 
    RegionStateVisitor visitor, final Result result)
    -111  throws IOException {
    -112final RegionLocations rl = 
    MetaTableAccessor.getRegionLocations(result);
    -113if (rl == null) return;
    -114
    -115final HRegionLocation[] locations = 
    rl.getRegionLocations();
    -116if (locations == null) return;
    -117
    -118for (int i = 0; i  
    locations.length; ++i) {
    -119  final HRegionLocation hrl = 
    locations[i];
    -120  if (hrl == null) continue;
    -121
    -122  final RegionInfo regionInfo = 
    hrl.getRegionInfo();
    -123  if (regionInfo == null) continue;
    -124
    -125  final int replicaId = 
    regionInfo.getReplicaId();
    -126  final State state = 
    getRegionState(result, replicaId);
    -127
    -128  final ServerName lastHost = 
    hrl.getServerName();
    -129  final ServerName regionLocation = 
    getRegionServer(result, replicaId);
    -130  final long openSeqNum = -1;
    -131
    -132  // TODO: move under trace, now is 
    visible for debugging
    -133  LOG.info(String.format("Load 
    hbase:meta entry region=%s regionState=%s lastHost=%s regionLocation=%s",
    -134regionInfo, state, lastHost, 
    regionLocation));
    -135
    -136  
    visitor.visitRegionState(regionInfo, state, regionLocation, lastHost, 
    openSeqNum);
    -137}
    -138  }
    -139
    -140  public void updateRegionLocation(final 
    RegionInfo regionInfo, final State state,
    -141  final ServerName regionLocation, 
    final ServerName lastHost, final long openSeqNum,
    -142  final long pid)
    -143  throws IOException {
    -144if (regionInfo.isMetaRegion()) {
    -145  updateMetaLocation(regionInfo, 
    regionLocation);
    -146} else {
    -147  
    updateUserRegionLocation(regionInfo, state, regionLocation, lastHost, 
    openSeqNum, pid);
    -148}
    -149  }
    -150
    -151  public void updateRegionState(final 
    long openSeqNum, final long pid,
    -152  final RegionState newState, final 
    RegionState oldState) throws IOException {
    -153
    updateRegionLocation(newState.getRegion(), newState.getState(), 
    newState.getServerName(),
    -154oldState != null ? 
    oldState.getServerName() : null, openSeqNum, 

    [21/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b1eb7453/devapidocs/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.html 
    b/devapidocs/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.html
    index de4d67d..35229c7 100644
    --- a/devapidocs/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.html
    +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.html
    @@ -115,7 +115,7 @@ var activeTableTab = "activeTableTab";
     
     
     @InterfaceAudience.Private
    -public class RegionCoprocessorHost
    +public class RegionCoprocessorHost
     extends CoprocessorHostRegionCoprocessor,RegionCoprocessorEnvironment
     Implements the coprocessor environment and runtime support 
    for coprocessors
      loaded within a Region.
    @@ -797,7 +797,7 @@ extends 
     
     LOG
    -private static finalorg.apache.commons.logging.Log LOG
    +private static finalorg.apache.commons.logging.Log LOG
     
     
     
    @@ -806,7 +806,7 @@ extends 
     
     SHARED_DATA_MAP
    -private static 
    finalorg.apache.commons.collections4.map.ReferenceMaphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String,http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentMap.html?is-external=true;
     title="class or interface in java.util.concurrent">ConcurrentMaphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String,http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">Object SHARED_DATA_MAP
    +private static 
    finalorg.apache.commons.collections4.map.ReferenceMaphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String,http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentMap.html?is-external=true;
     title="class or interface in java.util.concurrent">ConcurrentMaphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String,http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">Object SHARED_DATA_MAP
     
     
     
    @@ -815,7 +815,7 @@ extends 
     
     hasCustomPostScannerFilterRow
    -private finalboolean hasCustomPostScannerFilterRow
    +private finalboolean hasCustomPostScannerFilterRow
     
     
     
    @@ -824,7 +824,7 @@ extends 
     
     rsServices
    -RegionServerServices rsServices
    +RegionServerServices rsServices
     The region server services
     
     
    @@ -834,7 +834,7 @@ extends 
     
     region
    -HRegion region
    +HRegion region
     The region
     
     
    @@ -844,7 +844,7 @@ extends 
     
     regionObserverGetter
    -privateCoprocessorHost.ObserverGetterRegionCoprocessor,RegionObserver 
    regionObserverGetter
    +privateCoprocessorHost.ObserverGetterRegionCoprocessor,RegionObserver 
    regionObserverGetter
     
     
     
    @@ -853,7 +853,7 @@ extends 
     
     endpointObserverGetter
    -privateCoprocessorHost.ObserverGetterRegionCoprocessor,EndpointObserver endpointObserverGetter
    +privateCoprocessorHost.ObserverGetterRegionCoprocessor,EndpointObserver endpointObserverGetter
     
     
     
    @@ -870,7 +870,7 @@ extends 
     
     RegionCoprocessorHost
    -publicRegionCoprocessorHost(HRegionregion,
    +publicRegionCoprocessorHost(HRegionregion,
      RegionServerServicesrsServices,
      
    org.apache.hadoop.conf.Configurationconf)
     Constructor
    @@ -896,7 +896,7 @@ extends 
     
     getTableCoprocessorAttrsFromSchema
    -statichttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListRegionCoprocessorHost.TableCoprocessorAttributegetTableCoprocessorAttrsFromSchema(org.apache.hadoop.conf.Configurationconf,
    +statichttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListRegionCoprocessorHost.TableCoprocessorAttributegetTableCoprocessorAttrsFromSchema(org.apache.hadoop.conf.Configurationconf,
    
     TableDescriptorhtd)
     
     
    @@ -906,7 +906,7 @@ extends 
     
     testTableCoprocessorAttrs
    -public staticvoidtestTableCoprocessorAttrs(org.apache.hadoop.conf.Configurationconf,
    +public staticvoidtestTableCoprocessorAttrs(org.apache.hadoop.conf.Configurationconf,
      TableDescriptorhtd)
       throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException
     Sanity check the table coprocessor attributes of the 
    supplied schema. Will
    @@ -926,7 +926,7 @@ extends 
     
     loadTableCoprocessors
    -voidloadTableCoprocessors(org.apache.hadoop.conf.Configurationconf)
    

      1   2   3   >