[20/51] [partial] hbase-site git commit: Published site at 620d70d6186fb800299bcc62ad7179fccfd1be41.

2019-01-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aa3fb87f/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterWalManager.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterWalManager.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterWalManager.html
index 127b088..42abebf 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterWalManager.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterWalManager.html
@@ -44,367 +44,369 @@
 036import 
org.apache.hadoop.hbase.HConstants;
 037import 
org.apache.hadoop.hbase.ServerName;
 038import 
org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL;
-039import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-040import 
org.apache.hadoop.hbase.util.FSUtils;
-041import 
org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
-042import 
org.apache.hadoop.hbase.wal.WALSplitter;
-043import 
org.apache.yetus.audience.InterfaceAudience;
-044import org.slf4j.Logger;
-045import org.slf4j.LoggerFactory;
-046import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-047
-048/**
-049 * This class abstracts a bunch of 
operations the HMaster needs
-050 * when splitting log files e.g. finding 
log files, dirs etc.
-051 */
-052@InterfaceAudience.Private
-053public class MasterWalManager {
-054  private static final Logger LOG = 
LoggerFactory.getLogger(MasterWalManager.class);
-055
-056  final static PathFilter META_FILTER = 
new PathFilter() {
-057@Override
-058public boolean accept(Path p) {
-059  return 
AbstractFSWALProvider.isMetaFile(p);
-060}
-061  };
-062
-063  @VisibleForTesting
-064  public final static PathFilter 
NON_META_FILTER = new PathFilter() {
-065@Override
-066public boolean accept(Path p) {
-067  return 
!AbstractFSWALProvider.isMetaFile(p);
-068}
-069  };
-070
-071  // metrics for master
-072  // TODO: Rename it, since those metrics 
are split-manager related
-073  private final MetricsMasterFileSystem 
metricsMasterFilesystem = new MetricsMasterFileSystem();
-074
-075  // Keep around for convenience.
-076  private final MasterServices 
services;
-077  private final Configuration conf;
-078  private final FileSystem fs;
-079
-080  // The Path to the old logs dir
-081  private final Path oldLogDir;
-082  private final Path rootDir;
-083
-084  // create the split log lock
-085  private final Lock splitLogLock = new 
ReentrantLock();
-086  private final SplitLogManager 
splitLogManager;
-087
-088  // Is the fileystem ok?
-089  private volatile boolean fsOk = true;
-090
-091  public MasterWalManager(MasterServices 
services) throws IOException {
-092this(services.getConfiguration(), 
services.getMasterFileSystem().getWALFileSystem(),
-093  
services.getMasterFileSystem().getWALRootDir(), services);
-094  }
-095
-096  public MasterWalManager(Configuration 
conf, FileSystem fs, Path rootDir, MasterServices services)
-097  throws IOException {
-098this.fs = fs;
-099this.conf = conf;
-100this.rootDir = rootDir;
-101this.services = services;
-102this.splitLogManager = new 
SplitLogManager(services, conf);
-103
-104this.oldLogDir = new Path(rootDir, 
HConstants.HREGION_OLDLOGDIR_NAME);
-105  }
-106
-107  public void stop() {
-108if (splitLogManager != null) {
-109  splitLogManager.stop();
-110}
-111  }
-112
-113  @VisibleForTesting
-114  SplitLogManager getSplitLogManager() 
{
-115return this.splitLogManager;
-116  }
-117
-118  /**
-119   * Get the directory where old logs 
go
-120   * @return the dir
-121   */
-122  Path getOldLogDir() {
-123return this.oldLogDir;
-124  }
-125
-126  public FileSystem getFileSystem() {
-127return this.fs;
-128  }
-129
-130  /**
-131   * Checks to see if the file system is 
still accessible.
-132   * If not, sets closed
-133   * @return false if file system is not 
available
-134   */
-135  private boolean checkFileSystem() {
-136if (this.fsOk) {
-137  try {
-138
FSUtils.checkFileSystemAvailable(this.fs);
-139
FSUtils.checkDfsSafeMode(this.conf);
-140  } catch (IOException e) {
-141services.abort("Shutting down 
HBase cluster: file system not available", e);
-142this.fsOk = false;
-143  }
-144}
-145return this.fsOk;
-146  }
-147
-148  /**
-149   * Get Servernames which are currently 
splitting; paths have a '-splitting' suffix.
-150   * @return ServerName
-151   * @throws IOException IOException
-152   */
-153  public SetServerName 
getSplittingServersFromWALDir() throws  IOException {
-154return 
getServerNamesFromWALDirPath(
-155  p - 
p.getName().endsWith(AbstractFSWALProvider.SPLITTING_EXT));
-156  }
-157
-158  /**
-159   * Get Servernames that COULD BE 
'alive'; excludes those that have a '-splitting' suffix as these
-160   * are already being split -- they 
cannot be 'alive'.
-161   * @return ServerName

[20/51] [partial] hbase-site git commit: Published site at 281d6429e55149cc4c05430dcc1d1dc136d8b245.

2019-01-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/901d593a/devapidocs/org/apache/hadoop/hbase/master/MasterServices.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/master/MasterServices.html 
b/devapidocs/org/apache/hadoop/hbase/master/MasterServices.html
index 03cc2d1..8344ce2 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/MasterServices.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/MasterServices.html
@@ -18,8 +18,8 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":6,"i1":6,"i2":6,"i3":6,"i4":6,"i5":6,"i6":6,"i7":6,"i8":6,"i9":6,"i10":6,"i11":6,"i12":6,"i13":6,"i14":6,"i15":6,"i16":6,"i17":6,"i18":6,"i19":6,"i20":6,"i21":6,"i22":6,"i23":6,"i24":6,"i25":6,"i26":6,"i27":6,"i28":6,"i29":6,"i30":6,"i31":6,"i32":6,"i33":6,"i34":6,"i35":6,"i36":6,"i37":6,"i38":6,"i39":6,"i40":6,"i41":6,"i42":6,"i43":6,"i44":6,"i45":6,"i46":6,"i47":6,"i48":6,"i49":6,"i50":6,"i51":6,"i52":6,"i53":6,"i54":6,"i55":6,"i56":6,"i57":6,"i58":6};
-var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],4:["t3","Abstract Methods"]};
+var methods = 
{"i0":6,"i1":6,"i2":6,"i3":6,"i4":6,"i5":6,"i6":6,"i7":6,"i8":6,"i9":6,"i10":6,"i11":6,"i12":6,"i13":6,"i14":6,"i15":6,"i16":6,"i17":6,"i18":6,"i19":6,"i20":6,"i21":6,"i22":6,"i23":6,"i24":6,"i25":6,"i26":6,"i27":6,"i28":6,"i29":6,"i30":6,"i31":6,"i32":6,"i33":6,"i34":6,"i35":6,"i36":6,"i37":6,"i38":6,"i39":18,"i40":6,"i41":6,"i42":6,"i43":6,"i44":6,"i45":6,"i46":6,"i47":6,"i48":6,"i49":6,"i50":6,"i51":6,"i52":6,"i53":6,"i54":6,"i55":6,"i56":6,"i57":6,"i58":6,"i59":6};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],4:["t3","Abstract Methods"],16:["t5","Default Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
 var tableTab = "tableTab";
@@ -129,7 +129,7 @@ extends 
-All MethodsInstance MethodsAbstract Methods
+All MethodsInstance MethodsAbstract MethodsDefault Methods
 
 Modifier and Type
 Method and Description
@@ -343,58 +343,62 @@ extends getSnapshotManager()
 
 
+default SplitWALManager
+getSplitWALManager()
+
+
 SyncReplicationReplayWALManager
 getSyncReplicationReplayWALManager()
 Returns the SyncReplicationReplayWALManager.
 
 
-
+
 TableDescriptors
 getTableDescriptors()
 
-
+
 TableStateManager
 getTableStateManager()
 
-
+
 boolean
 isActiveMaster()
 
-
+
 boolean
 isClusterUp()
 
-
+
 boolean
 isInitialized()
 
-
+
 boolean
 isInMaintenanceMode()
 
-
+
 boolean
 isSplitOrMergeEnabled(MasterSwitchTypeswitchType)
 
-
+
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListReplicationPeerDescription
 listReplicationPeers(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringregex)
 Return a list of replication peers.
 
 
-
+
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListTableDescriptor
 listTableDescriptorsByNamespace(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringname)
 Get list of table descriptors by namespace
 
 
-
+
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListTableName
 listTableNamesByNamespace(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringname)
 Get list of table names by namespace
 
 
-
+
 long
 mergeRegions(RegionInfo[]regionsToMerge,
 booleanforcible,
@@ -403,7 +407,7 @@ extends Merge regions in a table.
 
 
-
+
 long
 modifyColumn(TableNametableName,
 ColumnFamilyDescriptordescriptor,
@@ -412,7 +416,7 @@ extends Modify the column descriptor of an existing column in an 
existing table
 
 
-
+
 long
 modifyTable(TableNametableName,
TableDescriptordescriptor,
@@ -421,19 +425,19 @@ extends Modify the descriptor of an existing table
 
 
-
+
 boolean
 registerService(com.google.protobuf.Serviceinstance)
 Registers a new protocol buffer Service 
subclass as a master coprocessor endpoint.
 
 
-
+
 long
 removeReplicationPeer(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringpeerId)
 Removes a peer and stops the replication
 
 
-
+
 long
 splitRegion(RegionInforegionInfo,
byte[]splitRow,
@@ -442,14 +446,14 @@ extends Split a region.
 
 
-
+
 long
 transitReplicationPeerSyncReplicationState(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringpeerId,
   SyncReplicationStateclusterState)
 Set current cluster state for a synchronous replication 
peer.
 
 
-
+
 long
 truncateTable(TableNametableName,
  

[20/51] [partial] hbase-site git commit: Published site at 466fa920fee572fe20db3b77ebf539dc304d5f31.

2019-01-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2bf59208/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.BatchCallerBuilder.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.BatchCallerBuilder.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.BatchCallerBuilder.html
index 2e150bc..0b315b8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.BatchCallerBuilder.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.BatchCallerBuilder.html
@@ -25,22 +25,22 @@
 017 */
 018package org.apache.hadoop.hbase.client;
 019
-020import static 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions.checkArgument;
-021import static 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions.checkNotNull;
-022import static 
org.apache.hadoop.hbase.client.ConnectionUtils.retries2Attempts;
+020import static 
org.apache.hadoop.hbase.client.ConnectionUtils.retries2Attempts;
+021import static 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions.checkArgument;
+022import static 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions.checkNotNull;
 023
-024import 
org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer;
-025
-026import java.util.List;
-027import 
java.util.concurrent.CompletableFuture;
-028import java.util.concurrent.TimeUnit;
-029
-030import 
org.apache.hadoop.hbase.HRegionLocation;
-031import 
org.apache.hadoop.hbase.ServerName;
-032import 
org.apache.hadoop.hbase.TableName;
-033import 
org.apache.yetus.audience.InterfaceAudience;
-034import 
org.apache.hadoop.hbase.client.metrics.ScanMetrics;
-035import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
+024import java.util.List;
+025import 
java.util.concurrent.CompletableFuture;
+026import java.util.concurrent.TimeUnit;
+027import 
org.apache.hadoop.hbase.HRegionLocation;
+028import 
org.apache.hadoop.hbase.ServerName;
+029import 
org.apache.hadoop.hbase.TableName;
+030import 
org.apache.hadoop.hbase.client.metrics.ScanMetrics;
+031import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
+032import 
org.apache.yetus.audience.InterfaceAudience;
+033
+034import 
org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer;
+035
 036import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService;
 037import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanResponse;
 038
@@ -83,432 +83,441 @@
 075
 076private RegionLocateType locateType = 
RegionLocateType.CURRENT;
 077
-078public 
SingleRequestCallerBuilderT table(TableName tableName) {
-079  this.tableName = tableName;
-080  return this;
-081}
-082
-083public 
SingleRequestCallerBuilderT row(byte[] row) {
-084  this.row = row;
-085  return this;
-086}
-087
-088public 
SingleRequestCallerBuilderT action(
-089
AsyncSingleRequestRpcRetryingCaller.CallableT callable) {
-090  this.callable = callable;
-091  return this;
-092}
-093
-094public 
SingleRequestCallerBuilderT operationTimeout(long operationTimeout, 
TimeUnit unit) {
-095  this.operationTimeoutNs = 
unit.toNanos(operationTimeout);
-096  return this;
-097}
-098
-099public 
SingleRequestCallerBuilderT rpcTimeout(long rpcTimeout, TimeUnit unit) 
{
-100  this.rpcTimeoutNs = 
unit.toNanos(rpcTimeout);
-101  return this;
-102}
-103
-104public 
SingleRequestCallerBuilderT locateType(RegionLocateType locateType) {
-105  this.locateType = locateType;
-106  return this;
-107}
-108
-109public 
SingleRequestCallerBuilderT pause(long pause, TimeUnit unit) {
-110  this.pauseNs = 
unit.toNanos(pause);
-111  return this;
-112}
-113
-114public 
SingleRequestCallerBuilderT maxAttempts(int maxAttempts) {
-115  this.maxAttempts = maxAttempts;
-116  return this;
-117}
-118
-119public 
SingleRequestCallerBuilderT startLogErrorsCnt(int startLogErrorsCnt) 
{
-120  this.startLogErrorsCnt = 
startLogErrorsCnt;
-121  return this;
-122}
-123
-124public 
AsyncSingleRequestRpcRetryingCallerT build() {
-125  return new 
AsyncSingleRequestRpcRetryingCaller(retryTimer, conn,
-126  checkNotNull(tableName, 
"tableName is null"), checkNotNull(row, "row is null"),
-127  checkNotNull(locateType, 
"locateType is null"), checkNotNull(callable, "action is null"),
-128  pauseNs, maxAttempts, 
operationTimeoutNs, rpcTimeoutNs, startLogErrorsCnt);
+078private int replicaId = 
RegionReplicaUtil.DEFAULT_REPLICA_ID;
+079
+080public 
SingleRequestCallerBuilderT table(TableName tableName) {
+081  this.tableName = tableName;
+082  return this;
+083}
+084
+085public 
SingleRequestCallerBuilderT row(byte[] row) {
+086  this.row = row;
+087   

[20/51] [partial] hbase-site git commit: Published site at e4b6b4afb933a961f543537875f87a2dc62d3757.

2019-01-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/849d84a8/devapidocs/src-html/org/apache/hadoop/hbase/thrift/ThriftHBaseServiceHandler.ResultScannerWrapper.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/thrift/ThriftHBaseServiceHandler.ResultScannerWrapper.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/thrift/ThriftHBaseServiceHandler.ResultScannerWrapper.html
new file mode 100644
index 000..5b5b199
--- /dev/null
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/thrift/ThriftHBaseServiceHandler.ResultScannerWrapper.html
@@ -0,0 +1,1419 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+Source code
+
+
+
+
+001/**
+002 *
+003 * Licensed to the Apache Software 
Foundation (ASF) under one
+004 * or more contributor license 
agreements.  See the NOTICE file
+005 * distributed with this work for 
additional information
+006 * regarding copyright ownership.  The 
ASF licenses this file
+007 * to you under the Apache License, 
Version 2.0 (the
+008 * "License"); you may not use this file 
except in compliance
+009 * with the License.  You may obtain a 
copy of the License at
+010 *
+011 * 
http://www.apache.org/licenses/LICENSE-2.0
+012 *
+013 * Unless required by applicable law or 
agreed to in writing, software
+014 * distributed under the License is 
distributed on an "AS IS" BASIS,
+015 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
+016 * See the License for the specific 
language governing permissions and
+017 * limitations under the License.
+018 */
+019
+020package org.apache.hadoop.hbase.thrift;
+021
+022import static 
org.apache.hadoop.hbase.thrift.Constants.COALESCE_INC_KEY;
+023import static 
org.apache.hadoop.hbase.util.Bytes.getBytes;
+024
+025import java.io.IOException;
+026import java.nio.ByteBuffer;
+027import java.util.ArrayList;
+028import java.util.Collections;
+029import java.util.HashMap;
+030import java.util.List;
+031import java.util.Map;
+032import java.util.TreeMap;
+033
+034import 
org.apache.hadoop.conf.Configuration;
+035import org.apache.hadoop.hbase.Cell;
+036import 
org.apache.hadoop.hbase.CellBuilder;
+037import 
org.apache.hadoop.hbase.CellBuilderFactory;
+038import 
org.apache.hadoop.hbase.CellBuilderType;
+039import 
org.apache.hadoop.hbase.CellUtil;
+040import 
org.apache.hadoop.hbase.HColumnDescriptor;
+041import 
org.apache.hadoop.hbase.HConstants;
+042import 
org.apache.hadoop.hbase.HRegionLocation;
+043import 
org.apache.hadoop.hbase.HTableDescriptor;
+044import 
org.apache.hadoop.hbase.KeyValue;
+045import 
org.apache.hadoop.hbase.MetaTableAccessor;
+046import 
org.apache.hadoop.hbase.ServerName;
+047import 
org.apache.hadoop.hbase.TableName;
+048import 
org.apache.hadoop.hbase.TableNotFoundException;
+049import 
org.apache.hadoop.hbase.client.Append;
+050import 
org.apache.hadoop.hbase.client.Delete;
+051import 
org.apache.hadoop.hbase.client.Durability;
+052import 
org.apache.hadoop.hbase.client.Get;
+053import 
org.apache.hadoop.hbase.client.Increment;
+054import 
org.apache.hadoop.hbase.client.OperationWithAttributes;
+055import 
org.apache.hadoop.hbase.client.Put;
+056import 
org.apache.hadoop.hbase.client.RegionInfo;
+057import 
org.apache.hadoop.hbase.client.RegionLocator;
+058import 
org.apache.hadoop.hbase.client.Result;
+059import 
org.apache.hadoop.hbase.client.ResultScanner;
+060import 
org.apache.hadoop.hbase.client.Scan;
+061import 
org.apache.hadoop.hbase.client.Table;
+062import 
org.apache.hadoop.hbase.filter.Filter;
+063import 
org.apache.hadoop.hbase.filter.ParseFilter;
+064import 
org.apache.hadoop.hbase.filter.PrefixFilter;
+065import 
org.apache.hadoop.hbase.filter.WhileMatchFilter;
+066import 
org.apache.hadoop.hbase.security.UserProvider;
+067import 
org.apache.hadoop.hbase.thrift.generated.AlreadyExists;
+068import 
org.apache.hadoop.hbase.thrift.generated.BatchMutation;
+069import 
org.apache.hadoop.hbase.thrift.generated.ColumnDescriptor;
+070import 
org.apache.hadoop.hbase.thrift.generated.Hbase;
+071import 
org.apache.hadoop.hbase.thrift.generated.IOError;
+072import 
org.apache.hadoop.hbase.thrift.generated.IllegalArgument;
+073import 
org.apache.hadoop.hbase.thrift.generated.Mutation;
+074import 
org.apache.hadoop.hbase.thrift.generated.TAppend;
+075import 
org.apache.hadoop.hbase.thrift.generated.TCell;
+076import 
org.apache.hadoop.hbase.thrift.generated.TIncrement;
+077import 
org.apache.hadoop.hbase.thrift.generated.TRegionInfo;
+078import 
org.apache.hadoop.hbase.thrift.generated.TRowResult;
+079import 
org.apache.hadoop.hbase.thrift.generated.TScan;
+080import 
org.apache.hadoop.hbase.util.Bytes;
+081import org.apache.thrift.TException;
+082import 
org.apache.yetus.audience.InterfaceAudience;
+083import org.slf4j.Logger;
+084import org.slf4j.LoggerFactory;
+085
+086import 
org.apache.hbase.thirdparty.com.google.common.base.Throwables;
+087
+088/**
+089 * The HBaseServiceHandler is a glue 
object that 

[20/51] [partial] hbase-site git commit: Published site at 3ab895979b643a2980bcdb7fee2078f14b614210.

2019-01-01 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/exceptions/package-use.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/exceptions/package-use.html 
b/devapidocs/org/apache/hadoop/hbase/exceptions/package-use.html
index d51f5e6..555dae3 100644
--- a/devapidocs/org/apache/hadoop/hbase/exceptions/package-use.html
+++ b/devapidocs/org/apache/hadoop/hbase/exceptions/package-use.html
@@ -566,6 +566,6 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 
-Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/executor/EventHandler.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/executor/EventHandler.html 
b/devapidocs/org/apache/hadoop/hbase/executor/EventHandler.html
index bead54b..cbdc6ff 100644
--- a/devapidocs/org/apache/hadoop/hbase/executor/EventHandler.html
+++ b/devapidocs/org/apache/hadoop/hbase/executor/EventHandler.html
@@ -619,6 +619,6 @@ implements https://docs.oracle.com/javase/8/docs/api/java/lang/Runnable
 
 
 
-Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/executor/EventType.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/executor/EventType.html 
b/devapidocs/org/apache/hadoop/hbase/executor/EventType.html
index 9a7165e..c3ff4e0 100644
--- a/devapidocs/org/apache/hadoop/hbase/executor/EventType.html
+++ b/devapidocs/org/apache/hadoop/hbase/executor/EventType.html
@@ -1183,6 +1183,6 @@ not permitted.)
 
 
 
-Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/executor/ExecutorService.Executor.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/executor/ExecutorService.Executor.html 
b/devapidocs/org/apache/hadoop/hbase/executor/ExecutorService.Executor.html
index 0daf97d..8290699 100644
--- a/devapidocs/org/apache/hadoop/hbase/executor/ExecutorService.Executor.html
+++ b/devapidocs/org/apache/hadoop/hbase/executor/ExecutorService.Executor.html
@@ -431,6 +431,6 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 
-Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/executor/ExecutorService.ExecutorStatus.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/executor/ExecutorService.ExecutorStatus.html
 
b/devapidocs/org/apache/hadoop/hbase/executor/ExecutorService.ExecutorStatus.html
index cbfa071..c603d8c 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/executor/ExecutorService.ExecutorStatus.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/executor/ExecutorService.ExecutorStatus.html
@@ -357,6 +357,6 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 
-Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/executor/ExecutorService.RunningEventStatus.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/executor/ExecutorService.RunningEventStatus.html
 
b/devapidocs/org/apache/hadoop/hbase/executor/ExecutorService.RunningEventStatus.html
index e4289ac..64d5fcb 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/executor/ExecutorService.RunningEventStatus.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/executor/ExecutorService.RunningEventStatus.html
@@ -290,6 +290,6 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 
-Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache 

[20/51] [partial] hbase-site git commit: Published site at 7820ba1dbdba58b1002cdfde08eb21aa7a0bb6da.

2018-12-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/09ea0d5f/devapidocs/org/apache/hadoop/hbase/tool/WriteSinkCoprocessor.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/tool/WriteSinkCoprocessor.html 
b/devapidocs/org/apache/hadoop/hbase/tool/WriteSinkCoprocessor.html
index 938c09f..bd49ba8 100644
--- a/devapidocs/org/apache/hadoop/hbase/tool/WriteSinkCoprocessor.html
+++ b/devapidocs/org/apache/hadoop/hbase/tool/WriteSinkCoprocessor.html
@@ -278,7 +278,7 @@ implements RegionObserver
-postAppend,
 postBatchMutate,
 postBatchMutateIndispensably,
 postBulkLoadHFile,
 postCheckAndDelete,
 postCheckAndPut,
 postClose,
 postCloseRegionOperation, postCommitStoreFile,
 postCompact,
 postCompactSelection, postDelete,
 postExists,
 postFlush,
 postFlush, postGetOp,
 postIncrement,
 postInstantiateDeleteTracker,
 postMemStoreCompacti
 on, postMutationBeforeWAL,
 postOpen,
 postPut,
 po
 stReplayWALs, postScannerClose,
 postScannerFilterRow,
 postScannerNext,
 postScannerOpen, postStartRegionOperation,
 postStoreFileReaderOpen,
 postWALRestore,
 preAppend,
 preAppendAfterRowLock,
 preBulkLoadHFile,
 preCheckAndDelete,
 preCheckAndDeleteAfterRowLock,
 preCheckAndPut,
 preCheckAndPutAfterRowLock,
 preClose,
 preCommitStoreFile,
 preCompact,
 preCompactScannerOpen,
 preCompactSelection,
 preDelete,
 preExists, preFlush,
 preFlush,
 preFlushScannerOpen,
 preGetOp,
 preIncrement,
 preIncrementAfterRowLock,
 preMemStoreCompaction,
 preMemStoreCompactionCompact, preMemStoreCompactionCompactScannerOpen,
 prePrepareTimeStampForDeleteVersion,
 prePut,
 preReplayWALs,
 preScannerClose,
 preScannerNext,
 preScannerOpen,
 preStoreFileReaderOpen,
 preStoreScannerOpen,
 preWALRestore
+postAppend,
 postAppendBeforeWAL,
 postBatchMutate,
 postBatchMutateIndispensably,
 postBulkLoadHFile,
 postCheckAndDelete,
 postCheckAndPut,
 postClose, postCloseRegionOperation,
 postCommitStoreFile,
 postCompact,
 postCompactSelection,
 postDelete,
 postExists,
 postFlush,
 postFlush,
 postGetOp,
 postIncrement,
 postIncrementBeforeWAL,
 postInstantiateDeleteTracker,
 postMemStoreCompaction,
 postMutationBeforeWAL,
 postOpen,
 postPut,
 postReplayWALs,
 postScannerClose,
 postScannerFilterRow,
 postScannerNext,
 postScannerOpen,
 postStartRegionOperation,
 postStoreFileReaderOpen,
 postWALRestore,
 preAppend,
 preAppendAfterRowLock,
 preBulkLoadHFile, preCheckAndDelete,
 preCheckAndDeleteAfterRowLock,
 preCheckAndPut,
 preCheckAndPutAfterRowLock,
 preClose,
 preCommitStoreFile,
 preCompact,
 preCompactScannerOpen,
 preCompactSelection, preDelete,
 preExists,
 preFlush,
 preFlush, preFlushScannerOpen,
 preGetOp,
 preIncrement,
 preIncrementA
 fterRowLock, preMemStoreCompaction,
 preMemStoreCompactionCompact,
 preMemStoreCompactionCompactScannerOpen,
 prePrepareTimeStampForDeleteVersion,
 prePut,
 preReplayWALs,
 preScannerClose,
 preScannerNext,
 preScannerOpen,
 preStoreFileReaderOpen,
 preStoreScannerOpen, preWALRestore
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/09ea0d5f/devapidocs/org/apache/hadoop/hbase/util/class-use/Pair.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/util/class-use/Pair.html 
b/devapidocs/org/apache/hadoop/hbase/util/class-use/Pair.html
index da375bf..9db284e 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/class-use/Pair.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/class-use/Pair.html
@@ -675,6 +675,33 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 Uses of Pair in org.apache.hadoop.hbase.coprocessor
 
+Methods in org.apache.hadoop.hbase.coprocessor
 that return types with arguments of type Pair
+
+Modifier and Type
+Method and Description
+
+
+
+default https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListPairCell,Cell
+RegionObserver.postAppendBeforeWAL(ObserverContextRegionCoprocessorEnvironmentctx,
+   Mutationmutation,
+   https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListPairCell,CellcellPairs)
+Called after a list of new cells has been created during an 
append operation, but before
+ they are committed to the WAL or memstore.
+
+
+
+default https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListPairCell,Cell
+RegionObserver.postIncrementBeforeWAL(ObserverContextRegionCoprocessorEnvironmentctx,
+  Mutationmutation,
+  https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListPairCell,CellcellPairs)
+Called after a list of new cells has been created during an 
increment operation, but before
+ they are committed to the WAL or memstore.
+
+
+
+
+
 Method 

[20/51] [partial] hbase-site git commit: Published site at c448604ceb987d113913f0583452b2abce04db0d.

2018-12-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4f8b8424/devapidocs/org/apache/hadoop/hbase/rest/model/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/rest/model/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/rest/model/package-tree.html
index 034077c..c20ff47 100644
--- a/devapidocs/org/apache/hadoop/hbase/rest/model/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/rest/model/package-tree.html
@@ -110,8 +110,8 @@
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.rest.model.ScannerModel.FilterModel.ByteArrayComparableModel.ComparatorType
 org.apache.hadoop.hbase.rest.model.ScannerModel.FilterModel.FilterType
+org.apache.hadoop.hbase.rest.model.ScannerModel.FilterModel.ByteArrayComparableModel.ComparatorType
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4f8b8424/devapidocs/org/apache/hadoop/hbase/security/access/package-tree.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/security/access/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/security/access/package-tree.html
index aa0564c..56f5ff2 100644
--- a/devapidocs/org/apache/hadoop/hbase/security/access/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/security/access/package-tree.html
@@ -141,10 +141,10 @@
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.security.access.Permission.Scope
-org.apache.hadoop.hbase.security.access.AccessController.OpType
-org.apache.hadoop.hbase.security.access.Permission.Action
 org.apache.hadoop.hbase.security.access.AccessControlFilter.Strategy
+org.apache.hadoop.hbase.security.access.Permission.Action
+org.apache.hadoop.hbase.security.access.AccessController.OpType
+org.apache.hadoop.hbase.security.access.Permission.Scope
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4f8b8424/devapidocs/org/apache/hadoop/hbase/thrift/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/thrift/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/thrift/package-tree.html
index 2cef8bd..4ade4c1 100644
--- a/devapidocs/org/apache/hadoop/hbase/thrift/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/thrift/package-tree.html
@@ -199,8 +199,8 @@
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.thrift.ThriftServerRunner.ImplType
 org.apache.hadoop.hbase.thrift.ThriftMetrics.ThriftServerType
+org.apache.hadoop.hbase.thrift.ThriftServerRunner.ImplType
 org.apache.hadoop.hbase.thrift.MetricsThriftServerSourceFactoryImpl.FactoryStorage
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4f8b8424/devapidocs/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheTmpl.ImplData.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheTmpl.ImplData.html
 
b/devapidocs/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheTmpl.ImplData.html
index c0d1f12..20b038d 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheTmpl.ImplData.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheTmpl.ImplData.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = {"i0":10,"i1":10,"i2":10,"i3":10};
+var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -118,7 +118,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public static class BlockCacheTmpl.ImplData

[20/51] [partial] hbase-site git commit: Published site at 8bf966c8e936dec4d83bcbe85c5aab543f14a0df.

2018-12-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/src-html/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.html
index 809f66f..9b60dd0 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.html
@@ -765,146 +765,145 @@
 757found.set(true);
 758try {
 759  boolean rootMetaFound =
-760  
masterServices.getMetaTableLocator().verifyMetaRegionLocation(
-761  conn, 
masterServices.getZooKeeper(), 1);
-762  if (rootMetaFound) {
-763MetaTableAccessor.Visitor 
visitor = new DefaultVisitorBase() {
-764  @Override
-765  public boolean 
visitInternal(Result row) throws IOException {
-766RegionInfo info = 
MetaTableAccessor.getRegionInfo(row);
-767if (info != null) {
-768  Cell serverCell =
-769  
row.getColumnLatestCell(HConstants.CATALOG_FAMILY,
-770  
HConstants.SERVER_QUALIFIER);
-771  if 
(RSGROUP_TABLE_NAME.equals(info.getTable())  serverCell != null) {
-772ServerName sn =
-773
ServerName.parseVersionedServerName(CellUtil.cloneValue(serverCell));
-774if (sn == null) {
-775  found.set(false);
-776} else if 
(tsm.isTableState(RSGROUP_TABLE_NAME, TableState.State.ENABLED)) {
-777  try {
-778
ClientProtos.ClientService.BlockingInterface rs = conn.getClient(sn);
-779
ClientProtos.GetRequest request =
-780
RequestConverter.buildGetRequest(info.getRegionName(),
-781new 
Get(ROW_KEY));
-782rs.get(null, 
request);
-783
assignedRegions.add(info);
-784  } catch(Exception 
ex) {
-785LOG.debug("Caught 
exception while verifying group region", ex);
-786  }
-787}
-788
foundRegions.add(info);
-789  }
-790}
-791return true;
-792  }
-793};
-794
MetaTableAccessor.fullScanRegions(conn, visitor);
-795// if no regions in meta then 
we have to create the table
-796if (foundRegions.size()  
1  rootMetaFound  !createSent) {
-797  createRSGroupTable();
-798  createSent = true;
-799}
-800LOG.info("RSGroup table=" + 
RSGROUP_TABLE_NAME + " isOnline=" + found.get()
-801+ ", regionCount=" + 
foundRegions.size() + ", assignCount="
-802+ assignedRegions.size() 
+ ", rootMetaFound=" + rootMetaFound);
-803found.set(found.get() 
 assignedRegions.size() == foundRegions.size()
-804 
foundRegions.size()  0);
-805  } else {
-806LOG.info("Waiting for catalog 
tables to come online");
-807found.set(false);
-808  }
-809  if (found.get()) {
-810LOG.debug("With group table 
online, refreshing cached information.");
-811
RSGroupInfoManagerImpl.this.refresh(true);
-812online = true;
-813//flush any inconsistencies 
between ZK and HTable
-814
RSGroupInfoManagerImpl.this.flushConfig();
-815  }
-816} catch (RuntimeException e) {
-817  throw e;
-818} catch(Exception e) {
-819  found.set(false);
-820  LOG.warn("Failed to perform 
check", e);
-821}
-822try {
-823  Thread.sleep(100);
-824} catch (InterruptedException e) 
{
-825  LOG.info("Sleep interrupted", 
e);
-826}
-827  }
-828  return found.get();
-829}
-830
-831private void createRSGroupTable() 
throws IOException {
-832  Long procId = 
masterServices.createSystemTable(RSGROUP_TABLE_DESC);
-833  // wait for region to be online
-834  int tries = 600;
-835  while 
(!(masterServices.getMasterProcedureExecutor().isFinished(procId))
-836   
masterServices.getMasterProcedureExecutor().isRunning()
-837   tries  0) {
-838try {
-839  Thread.sleep(100);
-840} catch (InterruptedException e) 
{
-841  throw new IOException("Wait 
interrupted ", e);
-842}
-843tries--;
-844  }
-845  if(tries = 0) {
-846throw new IOException("Failed to 
create group table in a given time.");
-847  } else {
-848Procedure? result = 

[20/51] [partial] hbase-site git commit: Published site at 1acbd36c903b048141866b143507bfce124a5c5f.

2018-11-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5299e667/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/ProcedureSyncWait.Predicate.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/ProcedureSyncWait.Predicate.html
 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/ProcedureSyncWait.Predicate.html
index 897da76..20face7 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/ProcedureSyncWait.Predicate.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/ProcedureSyncWait.Predicate.html
@@ -112,6 +112,13 @@
 
 
 static TT
+ProcedureSyncWait.waitFor(MasterProcedureEnvenv,
+   longwaitTime,
+   https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringpurpose,
+   ProcedureSyncWait.PredicateTpredicate)
+
+
+static TT
 ProcedureSyncWait.waitFor(MasterProcedureEnvenv,
https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringpurpose,
ProcedureSyncWait.PredicateTpredicate)

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5299e667/devapidocs/org/apache/hadoop/hbase/master/procedure/package-tree.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/package-tree.html
index 2991c16..1dd5d5a 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/procedure/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/procedure/package-tree.html
@@ -216,8 +216,8 @@
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
 org.apache.hadoop.hbase.master.procedure.TableProcedureInterface.TableOperationType
-org.apache.hadoop.hbase.master.procedure.ServerProcedureInterface.ServerOperationType
 org.apache.hadoop.hbase.master.procedure.PeerProcedureInterface.PeerOperationType
+org.apache.hadoop.hbase.master.procedure.ServerProcedureInterface.ServerOperationType
 org.apache.hadoop.hbase.master.procedure.MetaProcedureInterface.MetaOperationType
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5299e667/devapidocs/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.RefreshCacheTask.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.RefreshCacheTask.html
 
b/devapidocs/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.RefreshCacheTask.html
index 8923097..499e7bb 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.RefreshCacheTask.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.RefreshCacheTask.html
@@ -122,7 +122,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public class SnapshotFileCache.RefreshCacheTask
+public class SnapshotFileCache.RefreshCacheTask
 extends https://docs.oracle.com/javase/8/docs/api/java/util/TimerTask.html?is-external=true;
 title="class or interface in java.util">TimerTask
 Simple helper task that just periodically attempts to 
refresh the cache
 
@@ -199,7 +199,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/util/TimerTask.h
 
 
 RefreshCacheTask
-publicRefreshCacheTask()
+publicRefreshCacheTask()
 
 
 
@@ -216,7 +216,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/util/TimerTask.h
 
 
 run
-publicvoidrun()
+publicvoidrun()
 
 Specified by:
 https://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.html?is-external=true#run--;
 title="class or interface in java.lang">runin 
interfacehttps://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.html?is-external=true;
 title="class or interface in java.lang">Runnable

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5299e667/devapidocs/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.SnapshotDirectoryInfo.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.SnapshotDirectoryInfo.html
 
b/devapidocs/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.SnapshotDirectoryInfo.html
index af18761..171d0c9 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.SnapshotDirectoryInfo.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.SnapshotDirectoryInfo.html
@@ 

[20/51] [partial] hbase-site git commit: Published site at 130057f13774f6b213cdb06952c805a29d59396e.

2018-11-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/68eae623/devapidocs/src-html/org/apache/hadoop/hbase/security/access/AuthManager.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/security/access/AuthManager.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/security/access/AuthManager.html
new file mode 100644
index 000..4d5cbc9
--- /dev/null
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/security/access/AuthManager.html
@@ -0,0 +1,680 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+Source code
+
+
+
+
+001/**
+002 * Licensed to the Apache Software 
Foundation (ASF) under one
+003 * or more contributor license 
agreements.  See the NOTICE file
+004 * distributed with this work for 
additional information
+005 * regarding copyright ownership.  The 
ASF licenses this file
+006 * to you under the Apache License, 
Version 2.0 (the
+007 * "License"); you may not use this file 
except in compliance
+008 * with the License.  You may obtain a 
copy of the License at
+009 *
+010 * 
http://www.apache.org/licenses/LICENSE-2.0
+011 *
+012 * Unless required by applicable law or 
agreed to in writing, software
+013 * distributed under the License is 
distributed on an "AS IS" BASIS,
+014 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
+015 * See the License for the specific 
language governing permissions and
+016 * limitations under the License.
+017 */
+018
+019package 
org.apache.hadoop.hbase.security.access;
+020
+021import java.io.Closeable;
+022import java.io.IOException;
+023import java.util.HashMap;
+024import java.util.HashSet;
+025import java.util.List;
+026import java.util.Map;
+027import java.util.Set;
+028import 
java.util.concurrent.ConcurrentHashMap;
+029import 
java.util.concurrent.atomic.AtomicLong;
+030
+031import 
org.apache.hadoop.conf.Configuration;
+032import 
org.apache.hadoop.hbase.AuthUtil;
+033import org.apache.hadoop.hbase.Cell;
+034import 
org.apache.hadoop.hbase.TableName;
+035import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
+036import 
org.apache.hadoop.hbase.log.HBaseMarkers;
+037import 
org.apache.hadoop.hbase.security.Superusers;
+038import 
org.apache.hadoop.hbase.security.User;
+039import 
org.apache.hadoop.hbase.security.UserProvider;
+040import 
org.apache.hadoop.hbase.util.Bytes;
+041import 
org.apache.hadoop.hbase.zookeeper.ZKWatcher;
+042import 
org.apache.yetus.audience.InterfaceAudience;
+043import 
org.apache.zookeeper.KeeperException;
+044import org.slf4j.Logger;
+045import org.slf4j.LoggerFactory;
+046
+047import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+048import 
org.apache.hbase.thirdparty.com.google.common.collect.ListMultimap;
+049import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+050
+051/**
+052 * Performs authorization checks for a 
given user's assigned permissions.
+053 * p
+054 *   There're following scopes: 
bGlobal/b, bNamespace/b, 
bTable/b, bFamily/b,
+055 *   bQualifier/b, 
bCell/b.
+056 *   Generally speaking, higher scopes 
can overrides lower scopes,
+057 *   except for Cell permission can be 
granted even a user has not permission on specified table,
+058 *   which means the user can get/scan 
only those granted cells parts.
+059 * /p
+060 * e.g, if user A has global permission 
R(ead), he can
+061 * read table T without checking table 
scope permission, so authorization checks alway starts from
+062 * Global scope.
+063 * p
+064 *   For each scope, not only user but 
also groups he belongs to will be checked.
+065 * /p
+066 */
+067@InterfaceAudience.Private
+068public final class AuthManager implements 
Closeable {
+069
+070  /**
+071   * Cache of permissions, it is thread 
safe.
+072   * @param T T extends 
Permission
+073   */
+074  private static class 
PermissionCacheT extends Permission {
+075private final Object mutex = new 
Object();
+076private MapString, 
SetT cache = new HashMap();
+077
+078void put(String name, T perm) {
+079  synchronized (mutex) {
+080SetT perms = 
cache.getOrDefault(name, new HashSet());
+081perms.add(perm);
+082cache.put(name, perms);
+083  }
+084}
+085
+086SetT get(String name) {
+087  synchronized (mutex) {
+088return cache.get(name);
+089  }
+090}
+091
+092void clear() {
+093  synchronized (mutex) {
+094for (Map.EntryString, 
SetT entry : cache.entrySet()) {
+095  entry.getValue().clear();
+096}
+097cache.clear();
+098  }
+099}
+100  }
+101  
PermissionCacheNamespacePermission NS_NO_PERMISSION = new 
PermissionCache();
+102  PermissionCacheTablePermission 
TBL_NO_PERMISSION = new PermissionCache();
+103
+104  /**
+105   * Cache for global permission.
+106   * Since every user/group can only have 
one global permission, no need to user PermissionCache.
+107   */
+108  private volatile MapString, 

[20/51] [partial] hbase-site git commit: Published site at d5e4faacc354c1bc4d93efa71ca97ee3a056123e.

2018-10-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b5e107c3/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemStoreFlusher.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemStoreFlusher.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemStoreFlusher.html
index b2a9771..bf81ebb 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemStoreFlusher.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemStoreFlusher.html
@@ -46,3768 +46,3806 @@
 038import java.util.Objects;
 039import java.util.Set;
 040import java.util.SortedMap;
-041import java.util.TreeMap;
-042import java.util.TreeSet;
-043import 
java.util.concurrent.ConcurrentHashMap;
-044import 
java.util.concurrent.ConcurrentMap;
-045import 
java.util.concurrent.ConcurrentSkipListMap;
-046import 
java.util.concurrent.atomic.AtomicBoolean;
-047import 
java.util.concurrent.locks.ReentrantReadWriteLock;
-048import java.util.function.Function;
-049import 
javax.management.MalformedObjectNameException;
-050import javax.servlet.http.HttpServlet;
-051import 
org.apache.commons.lang3.RandomUtils;
-052import 
org.apache.commons.lang3.StringUtils;
-053import 
org.apache.commons.lang3.SystemUtils;
-054import 
org.apache.hadoop.conf.Configuration;
-055import org.apache.hadoop.fs.FileSystem;
-056import org.apache.hadoop.fs.Path;
-057import 
org.apache.hadoop.hbase.Abortable;
-058import 
org.apache.hadoop.hbase.CacheEvictionStats;
-059import 
org.apache.hadoop.hbase.ChoreService;
-060import 
org.apache.hadoop.hbase.ClockOutOfSyncException;
-061import 
org.apache.hadoop.hbase.CoordinatedStateManager;
-062import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-063import 
org.apache.hadoop.hbase.HBaseConfiguration;
-064import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
-065import 
org.apache.hadoop.hbase.HConstants;
-066import 
org.apache.hadoop.hbase.HealthCheckChore;
-067import 
org.apache.hadoop.hbase.MetaTableAccessor;
-068import 
org.apache.hadoop.hbase.NotServingRegionException;
-069import 
org.apache.hadoop.hbase.PleaseHoldException;
-070import 
org.apache.hadoop.hbase.ScheduledChore;
-071import 
org.apache.hadoop.hbase.ServerName;
-072import 
org.apache.hadoop.hbase.Stoppable;
-073import 
org.apache.hadoop.hbase.TableDescriptors;
-074import 
org.apache.hadoop.hbase.TableName;
-075import 
org.apache.hadoop.hbase.YouAreDeadException;
-076import 
org.apache.hadoop.hbase.ZNodeClearer;
-077import 
org.apache.hadoop.hbase.client.ClusterConnection;
-078import 
org.apache.hadoop.hbase.client.Connection;
-079import 
org.apache.hadoop.hbase.client.ConnectionUtils;
-080import 
org.apache.hadoop.hbase.client.RegionInfo;
-081import 
org.apache.hadoop.hbase.client.RegionInfoBuilder;
-082import 
org.apache.hadoop.hbase.client.RpcRetryingCallerFactory;
-083import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-084import 
org.apache.hadoop.hbase.client.locking.EntityLock;
-085import 
org.apache.hadoop.hbase.client.locking.LockServiceClient;
-086import 
org.apache.hadoop.hbase.conf.ConfigurationManager;
-087import 
org.apache.hadoop.hbase.conf.ConfigurationObserver;
-088import 
org.apache.hadoop.hbase.coordination.SplitLogWorkerCoordination;
-089import 
org.apache.hadoop.hbase.coordination.ZkCoordinatedStateManager;
-090import 
org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
-091import 
org.apache.hadoop.hbase.exceptions.RegionMovedException;
-092import 
org.apache.hadoop.hbase.exceptions.RegionOpeningException;
-093import 
org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
-094import 
org.apache.hadoop.hbase.executor.ExecutorService;
-095import 
org.apache.hadoop.hbase.executor.ExecutorType;
-096import 
org.apache.hadoop.hbase.fs.HFileSystem;
-097import 
org.apache.hadoop.hbase.http.InfoServer;
-098import 
org.apache.hadoop.hbase.io.hfile.BlockCache;
-099import 
org.apache.hadoop.hbase.io.hfile.CacheConfig;
-100import 
org.apache.hadoop.hbase.io.hfile.HFile;
-101import 
org.apache.hadoop.hbase.io.util.MemorySizeUtil;
-102import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
-103import 
org.apache.hadoop.hbase.ipc.NettyRpcClientConfigHelper;
-104import 
org.apache.hadoop.hbase.ipc.RpcClient;
-105import 
org.apache.hadoop.hbase.ipc.RpcClientFactory;
-106import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-107import 
org.apache.hadoop.hbase.ipc.RpcServer;
-108import 
org.apache.hadoop.hbase.ipc.RpcServerInterface;
-109import 
org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
-110import 
org.apache.hadoop.hbase.ipc.ServerRpcController;
-111import 
org.apache.hadoop.hbase.log.HBaseMarkers;
-112import 
org.apache.hadoop.hbase.master.HMaster;
-113import 
org.apache.hadoop.hbase.master.LoadBalancer;
-114import 
org.apache.hadoop.hbase.master.RegionState.State;

[20/51] [partial] hbase-site git commit: Published site at 3fe8649b2c9ba1271c25e8f476548907e4c7a90d.

2018-10-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8f09a71d/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaHostCostFunction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaHostCostFunction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaHostCostFunction.html
index c7d99b2..9d1542c 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaHostCostFunction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaHostCostFunction.html
@@ -382,1357 +382,1365 @@
 374for (int i = 0; i  
this.curFunctionCosts.length; i++) {
 375  curFunctionCosts[i] = 
tempFunctionCosts[i];
 376}
-377LOG.info("start 
StochasticLoadBalancer.balancer, initCost=" + currentCost + ", functionCost="
-378+ functionCost());
+377double initCost = currentCost;
+378double newCost = currentCost;
 379
-380double initCost = currentCost;
-381double newCost = currentCost;
-382
-383long computedMaxSteps;
-384if (runMaxSteps) {
-385  computedMaxSteps = 
Math.max(this.maxSteps,
-386  ((long)cluster.numRegions * 
(long)this.stepsPerRegion * (long)cluster.numServers));
-387} else {
-388  computedMaxSteps = 
Math.min(this.maxSteps,
-389  ((long)cluster.numRegions * 
(long)this.stepsPerRegion * (long)cluster.numServers));
-390}
-391// Perform a stochastic walk to see 
if we can get a good fit.
-392long step;
-393
-394for (step = 0; step  
computedMaxSteps; step++) {
-395  Cluster.Action action = 
nextAction(cluster);
-396
-397  if (action.type == Type.NULL) {
-398continue;
-399  }
-400
-401  cluster.doAction(action);
-402  updateCostsWithAction(cluster, 
action);
-403
-404  newCost = computeCost(cluster, 
currentCost);
-405
-406  // Should this be kept?
-407  if (newCost  currentCost) {
-408currentCost = newCost;
-409
-410// save for JMX
-411curOverallCost = currentCost;
-412for (int i = 0; i  
this.curFunctionCosts.length; i++) {
-413  curFunctionCosts[i] = 
tempFunctionCosts[i];
-414}
-415  } else {
-416// Put things back the way they 
were before.
-417// TODO: undo by remembering old 
values
-418Action undoAction = 
action.undoAction();
-419cluster.doAction(undoAction);
-420updateCostsWithAction(cluster, 
undoAction);
-421  }
-422
-423  if 
(EnvironmentEdgeManager.currentTime() - startTime 
-424  maxRunningTime) {
-425break;
-426  }
-427}
-428long endTime = 
EnvironmentEdgeManager.currentTime();
-429
-430
metricsBalancer.balanceCluster(endTime - startTime);
-431
-432// update costs metrics
-433updateStochasticCosts(tableName, 
curOverallCost, curFunctionCosts);
-434if (initCost  currentCost) {
-435  plans = 
createRegionPlans(cluster);
-436  LOG.info("Finished computing new 
load balance plan. Computation took {}" +
-437" to try {} different iterations. 
 Found a solution that moves " +
-438"{} regions; Going from a 
computed cost of {}" +
-439" to a new cost of {}", 
java.time.Duration.ofMillis(endTime - startTime),
-440step, plans.size(), initCost, 
currentCost);
-441  return plans;
-442}
-443LOG.info("Could not find a better 
load balance plan.  Tried {} different configurations in " +
-444  "{}, and did not find anything with 
a computed cost less than {}", step,
-445  java.time.Duration.ofMillis(endTime 
- startTime), initCost);
-446return null;
-447  }
-448
-449  /**
-450   * update costs to JMX
-451   */
-452  private void 
updateStochasticCosts(TableName tableName, Double overall, Double[] subCosts) 
{
-453if (tableName == null) return;
-454
-455// check if the metricsBalancer is 
MetricsStochasticBalancer before casting
-456if (metricsBalancer instanceof 
MetricsStochasticBalancer) {
-457  MetricsStochasticBalancer balancer 
= (MetricsStochasticBalancer) metricsBalancer;
-458  // overall cost
-459  
balancer.updateStochasticCost(tableName.getNameAsString(),
-460"Overall", "Overall cost", 
overall);
-461
-462  // each cost function
-463  for (int i = 0; i  
costFunctions.length; i++) {
-464CostFunction costFunction = 
costFunctions[i];
-465String costFunctionName = 
costFunction.getClass().getSimpleName();
-466Double costPercent = (overall == 
0) ? 0 : (subCosts[i] / overall);
-467// TODO: cost function may need a 
specific description
-468
balancer.updateStochasticCost(tableName.getNameAsString(), costFunctionName,
-469  "The percent of " + 
costFunctionName, costPercent);

[20/51] [partial] hbase-site git commit: Published site at 7adf590106826b9e4432cfeee06acdc0ccff8c6e.

2018-10-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/425db230/testdevapidocs/org/apache/hadoop/hbase/class-use/HBaseClassTestRule.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/class-use/HBaseClassTestRule.html 
b/testdevapidocs/org/apache/hadoop/hbase/class-use/HBaseClassTestRule.html
index 57f4692..73e1692 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/class-use/HBaseClassTestRule.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/class-use/HBaseClassTestRule.html
@@ -4301,10 +4301,14 @@
 
 
 static HBaseClassTestRule
-TestWALProcedureStore.CLASS_RULE
+TestWALProcedureTree.CLASS_RULE
 
 
 static HBaseClassTestRule
+TestWALProcedureStore.CLASS_RULE
+
+
+static HBaseClassTestRule
 TestForceUpdateProcedure.CLASS_RULE
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/425db230/testdevapidocs/org/apache/hadoop/hbase/io/hfile/package-tree.html
--
diff --git a/testdevapidocs/org/apache/hadoop/hbase/io/hfile/package-tree.html 
b/testdevapidocs/org/apache/hadoop/hbase/io/hfile/package-tree.html
index ec56445..4837d80 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/io/hfile/package-tree.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/io/hfile/package-tree.html
@@ -158,8 +158,8 @@
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.io.hfile.TestCacheOnWrite.CacheOnWriteType
 org.apache.hadoop.hbase.io.hfile.TagUsage
+org.apache.hadoop.hbase.io.hfile.TestCacheOnWrite.CacheOnWriteType
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/425db230/testdevapidocs/org/apache/hadoop/hbase/package-tree.html
--
diff --git a/testdevapidocs/org/apache/hadoop/hbase/package-tree.html 
b/testdevapidocs/org/apache/hadoop/hbase/package-tree.html
index 576f2d6..58e6363 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/package-tree.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/package-tree.html
@@ -580,12 +580,12 @@
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
 org.apache.hadoop.hbase.HBaseClusterManager.CommandProvider.Operation
-org.apache.hadoop.hbase.PerformanceEvaluation.Counter
-org.apache.hadoop.hbase.IntegrationTestRegionReplicaPerf.Stat
 org.apache.hadoop.hbase.ScanPerformanceEvaluation.ScanCounter
+org.apache.hadoop.hbase.IntegrationTestRegionReplicaPerf.Stat
+org.apache.hadoop.hbase.ResourceChecker.Phase
 org.apache.hadoop.hbase.RESTApiClusterManager.RoleCommand
 org.apache.hadoop.hbase.IntegrationTestDDLMasterFailover.ACTION
-org.apache.hadoop.hbase.ResourceChecker.Phase
+org.apache.hadoop.hbase.PerformanceEvaluation.Counter
 org.apache.hadoop.hbase.ClusterManager.ServiceType
 org.apache.hadoop.hbase.RESTApiClusterManager.Service
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/425db230/testdevapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html 
b/testdevapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html
index 42f874a..e84bc13 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html
@@ -219,10 +219,10 @@
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.procedure2.TestStateMachineProcedure.TestSMProcedureState
 org.apache.hadoop.hbase.procedure2.TestProcedureBypass.StuckStateMachineState
-org.apache.hadoop.hbase.procedure2.TestProcedureRecovery.TestStateMachineProcedure.State
 org.apache.hadoop.hbase.procedure2.TestYieldProcedures.TestStateMachineProcedure.State

[20/51] [partial] hbase-site git commit: Published site at 5fbb227deb365fe812d433fe39b85ac4b0ddee20.

2018-10-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9ebe686/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
index 1db4dc7..49e372e 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
@@ -1215,25 +1215,30 @@
 RSProcedureDispatcher.RegionOpenOperation.buildRegionOpenInfoRequest(MasterProcedureEnvenv)
 
 
+private boolean
+ReopenTableRegionsProcedure.canSchedule(MasterProcedureEnvenv,
+   HRegionLocationloc)
+
+
 protected static void
 AbstractStateMachineTableProcedure.checkOnline(MasterProcedureEnvenv,
RegionInfori)
 Check region is online.
 
 
-
+
 protected void
 AbstractStateMachineRegionProcedure.checkTableModifiable(MasterProcedureEnvenv)
 Check whether a table is modifiable - exists and either 
offline or online with config set
 
 
-
+
 protected void
 AbstractStateMachineTableProcedure.checkTableModifiable(MasterProcedureEnvenv)
 Check whether a table is modifiable - exists and either 
offline or online with config set
 
 
-
+
 private static void
 DeleteTableProcedure.cleanAnyRemainingRows(MasterProcedureEnvenv,
  TableNametableName)
@@ -1241,26 +1246,26 @@
  info:regioninfo column was empty because of some write error.
 
 
-
+
 protected void
 TruncateTableProcedure.completionCleanup(MasterProcedureEnvenv)
 
-
+
 protected void
 ModifyTableProcedure.completionCleanup(MasterProcedureEnvenv)
 
-
+
 protected void
 InitMetaProcedure.completionCleanup(MasterProcedureEnvenv)
 
-
+
 protected static void
 CreateNamespaceProcedure.createDirectory(MasterProcedureEnvenv,
NamespaceDescriptornsDescriptor)
 Create the namespace directory
 
 
-
+
 private https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfo
 CloneSnapshotProcedure.createFilesystemLayout(MasterProcedureEnvenv,
   TableDescriptortableDescriptor,
@@ -1268,20 +1273,20 @@
 Create regions in file system.
 
 
-
+
 protected static https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfo
 CreateTableProcedure.createFsLayout(MasterProcedureEnvenv,
   TableDescriptortableDescriptor,
   https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfonewRegions)
 
-
+
 protected static https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfo
 CreateTableProcedure.createFsLayout(MasterProcedureEnvenv,
   TableDescriptortableDescriptor,
   https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfonewRegions,
   CreateTableProcedure.CreateHdfsRegionshdfsRegionHandler)
 
-
+
 private https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfo
 CloneSnapshotProcedure.createFsLayout(MasterProcedureEnvenv,
   TableDescriptortableDescriptor,
@@ -1290,19 +1295,19 @@
 Create region layout in file system.
 
 
-
+
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfo
 CreateTableProcedure.CreateHdfsRegions.createHdfsRegions(MasterProcedureEnvenv,
  org.apache.hadoop.fs.PathtableRootDir,
  TableNametableName,
  https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfonewRegions)
 
-
+
 protected static void
 DeleteTableProcedure.deleteAssignmentState(MasterProcedureEnvenv,
  TableNametableName)
 
-
+
 static void
 MasterDDLOperationHelper.deleteColumnFamilyFromFileSystem(MasterProcedureEnvenv,
 TableNametableName,
@@ -1312,14 +1317,14 @@
 Remove the column family from the file system
 
 
-
+
 protected static void
 DeleteNamespaceProcedure.deleteDirectory(MasterProcedureEnvenv,
https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringnamespaceName)
 Delete the namespace directories from the file system
 
 
-
+
 private void
 ModifyTableProcedure.deleteFromFs(MasterProcedureEnvenv,
 TableDescriptoroldTableDescriptor,
@@ -1327,27 +1332,27 @@
 Removes from hdfs the families that 

[20/51] [partial] hbase-site git commit: Published site at 821e4d7de2d576189f4288d1c2acf9e9a9471f5c.

2018-10-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/323b17d9/testdevapidocs/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClientGetCompactionState.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClientGetCompactionState.html
 
b/testdevapidocs/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClientGetCompactionState.html
new file mode 100644
index 000..078c52a
--- /dev/null
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClientGetCompactionState.html
@@ -0,0 +1,369 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+TestRestoreSnapshotFromClientGetCompactionState (Apache HBase 
3.0.0-SNAPSHOT Test API)
+
+
+
+
+
+var methods = {"i0":10,"i1":9};
+var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase.client
+Class TestRestoreSnapshotFromClientGetCompactionState
+
+
+
+https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.client.RestoreSnapshotFromClientTestBase
+
+
+org.apache.hadoop.hbase.client.RestoreSnapshotFromClientGetCompactionStateTestBase
+
+
+org.apache.hadoop.hbase.client.TestRestoreSnapshotFromClientGetCompactionState
+
+
+
+
+
+
+
+
+
+
+
+
+public class TestRestoreSnapshotFromClientGetCompactionState
+extends RestoreSnapshotFromClientGetCompactionStateTestBase
+
+
+
+
+
+
+
+
+
+
+
+Field Summary
+
+Fields
+
+Modifier and Type
+Field and Description
+
+
+static HBaseClassTestRule
+CLASS_RULE
+
+
+int
+numReplicas
+
+
+
+
+
+
+Fields inherited from classorg.apache.hadoop.hbase.client.RestoreSnapshotFromClientTestBase
+admin,
 emptySnapshot,
 FAMILY,
 name,
 snapshot0Rows,
 snapshot1Rows,
 snapshotName0,
 snapshotName1,
 snapshotName2,
 tableName,
 TEST_FAMILY2,
 TEST_UTIL
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors
+
+Constructor and Description
+
+
+TestRestoreSnapshotFromClientGetCompactionState()
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All MethodsStatic MethodsInstance MethodsConcrete Methods
+
+Modifier and Type
+Method and Description
+
+
+protected int
+getNumReplicas()
+
+
+static https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttps://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object[]
+params()
+
+
+
+
+
+
+Methods inherited from classorg.apache.hadoop.hbase.client.RestoreSnapshotFromClientGetCompactionStateTestBase
+testGetCompactionStateAfterRestoringSnapshot
+
+
+
+
+
+Methods inherited from classorg.apache.hadoop.hbase.client.RestoreSnapshotFromClientTestBase
+countRows,
 createTable,
 getValidMethodName,
 setup,
 setupCluster,
 setupConf,
 splitRe
 gion, tearDown,
 tearDownAfterClass,
 verifyRowCount
+
+
+
+
+
+Methods inherited from classjava.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--;
 title="class or interface in java.lang">clone, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-;
 title="class or interface in java.lang">equals, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--;
 title="class or interface in java.lang">finalize, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--;
 title="class or interface in java.lang">getClass, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--;
 title="class or interface in java.lang">hashCode, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notify--;
 title="class or interface in java.lang">notify, 

[20/51] [partial] hbase-site git commit: Published site at fa5fa6ecdd071b72b58971058ff3ab9d28c3e709.

2018-10-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d1341859/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/ProcedureStore.ProcedureStoreListener.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/ProcedureStore.ProcedureStoreListener.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/ProcedureStore.ProcedureStoreListener.html
index ff29160..e4dc134 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/ProcedureStore.ProcedureStoreListener.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/ProcedureStore.ProcedureStoreListener.html
@@ -42,190 +42,208 @@
 034public interface ProcedureStore {
 035  /**
 036   * Store listener interface.
-037   * The main process should register a 
listener and respond to the store events.
-038   */
-039  public interface ProcedureStoreListener 
{
-040/**
-041 * triggered when the store sync is 
completed.
-042 */
-043void postSync();
-044
-045/**
-046 * triggered when the store is not 
able to write out data.
-047 * the main process should abort.
-048 */
-049void abortProcess();
-050  }
-051
-052  /**
-053   * An Iterator over a collection of 
Procedure
-054   */
-055  public interface ProcedureIterator {
-056/**
-057 * Reset the Iterator by seeking to 
the beginning of the list.
-058 */
-059void reset();
-060
-061/**
-062 * Returns true if the iterator has 
more elements.
-063 * (In other words, returns true if 
next() would return a Procedure
-064 * rather than throwing an 
exception.)
-065 * @return true if the iterator has 
more procedures
-066 */
-067boolean hasNext();
-068
-069/**
-070 * @return true if the iterator next 
element is a completed procedure.
-071 */
-072boolean isNextFinished();
-073
+037   * p/
+038   * The main process should register a 
listener and respond to the store events.
+039   */
+040  public interface ProcedureStoreListener 
{
+041
+042/**
+043 * triggered when the store sync is 
completed.
+044 */
+045default void postSync() {
+046}
+047
+048/**
+049 * triggered when the store is not 
able to write out data. the main process should abort.
+050 */
+051default void abortProcess() {
+052}
+053
+054/**
+055 * Suggest that the upper layer 
should update the state of some procedures. Ignore this call
+056 * will not effect correctness but 
performance.
+057 * p/
+058 * For a WAL based ProcedureStore 
implementation, if all the procedures stored in a WAL file
+059 * have been deleted, or updated 
later in another WAL file, then we can delete the WAL file. If
+060 * there are old procedures in a WAL 
file which are never deleted or updated, then we can not
+061 * delete the WAL file and this will 
cause we hold lots of WAL file and slow down the master
+062 * restarts. So here we introduce 
this method to tell the upper layer that please update the
+063 * states of these procedures so that 
we can delete the old WAL file.
+064 * @param procIds the id for the 
procedures
+065 */
+066default void forceUpdate(long[] 
procIds) {
+067}
+068  }
+069
+070  /**
+071   * An Iterator over a collection of 
Procedure
+072   */
+073  public interface ProcedureIterator {
 074/**
-075 * Skip the next procedure
+075 * Reset the Iterator by seeking to 
the beginning of the list.
 076 */
-077void skipNext();
+077void reset();
 078
 079/**
-080 * Returns the next procedure in the 
iteration.
-081 * @throws IOException if there was 
an error fetching/deserializing the procedure
-082 * @return the next procedure in the 
iteration.
-083 */
-084@SuppressWarnings("rawtypes")
-085Procedure next() throws 
IOException;
-086  }
-087
-088  /**
-089   * Interface passed to the 
ProcedureStore.load() method to handle the store-load events.
-090   */
-091  public interface ProcedureLoader {
+080 * Returns true if the iterator has 
more elements.
+081 * (In other words, returns true if 
next() would return a Procedure
+082 * rather than throwing an 
exception.)
+083 * @return true if the iterator has 
more procedures
+084 */
+085boolean hasNext();
+086
+087/**
+088 * @return true if the iterator next 
element is a completed procedure.
+089 */
+090boolean isNextFinished();
+091
 092/**
-093 * Called by ProcedureStore.load() to 
notify about the maximum proc-id in the store.
-094 * @param maxProcId the highest 
proc-id in the store
-095 */
-096void setMaxProcId(long maxProcId);
-097
-098/**
-099 * Called by the 
ProcedureStore.load() every time a set of procedures are ready to be 
executed.
-100 * The ProcedureIterator passed to 
the method, has the procedure sorted in replay-order.
-101 * 

[20/51] [partial] hbase-site git commit: Published site at 6bc7089f9e0793efc9bdd46a84f5ccd9bc4579ad.

2018-09-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/419d0338/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerMonitor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerMonitor.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerMonitor.html
index 43c66a8..061ce80 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerMonitor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerMonitor.html
@@ -23,2136 +23,2142 @@
 015 * See the License for the specific 
language governing permissions and
 016 * limitations under the License.
 017 */
-018
-019package 
org.apache.hadoop.hbase.procedure2;
-020
-021import java.io.IOException;
-022import java.util.ArrayDeque;
-023import java.util.ArrayList;
-024import java.util.Arrays;
-025import java.util.Collection;
-026import java.util.Deque;
-027import java.util.HashSet;
-028import java.util.Iterator;
-029import java.util.List;
-030import java.util.Map;
-031import java.util.Objects;
-032import java.util.Set;
-033import 
java.util.concurrent.ConcurrentHashMap;
-034import 
java.util.concurrent.CopyOnWriteArrayList;
-035import java.util.concurrent.TimeUnit;
-036import 
java.util.concurrent.atomic.AtomicBoolean;
-037import 
java.util.concurrent.atomic.AtomicInteger;
-038import 
java.util.concurrent.atomic.AtomicLong;
-039import java.util.stream.Collectors;
-040import java.util.stream.Stream;
-041
-042import 
org.apache.hadoop.conf.Configuration;
-043import 
org.apache.hadoop.hbase.HConstants;
-044import 
org.apache.hadoop.hbase.exceptions.IllegalArgumentIOException;
-045import 
org.apache.hadoop.hbase.log.HBaseMarkers;
-046import 
org.apache.hadoop.hbase.procedure2.Procedure.LockState;
-047import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore;
-048import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureIterator;
-049import 
org.apache.hadoop.hbase.procedure2.util.StringUtils;
-050import 
org.apache.hadoop.hbase.security.User;
-051import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-052import 
org.apache.hadoop.hbase.util.IdLock;
-053import 
org.apache.hadoop.hbase.util.NonceKey;
-054import 
org.apache.hadoop.hbase.util.Threads;
-055import 
org.apache.yetus.audience.InterfaceAudience;
-056import org.slf4j.Logger;
-057import org.slf4j.LoggerFactory;
-058
-059import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-060import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
+018package 
org.apache.hadoop.hbase.procedure2;
+019
+020import java.io.IOException;
+021import java.util.ArrayDeque;
+022import java.util.ArrayList;
+023import java.util.Arrays;
+024import java.util.Collection;
+025import java.util.Deque;
+026import java.util.HashSet;
+027import java.util.Iterator;
+028import java.util.List;
+029import java.util.Map;
+030import java.util.Objects;
+031import java.util.Set;
+032import 
java.util.concurrent.ConcurrentHashMap;
+033import 
java.util.concurrent.CopyOnWriteArrayList;
+034import java.util.concurrent.TimeUnit;
+035import 
java.util.concurrent.atomic.AtomicBoolean;
+036import 
java.util.concurrent.atomic.AtomicInteger;
+037import 
java.util.concurrent.atomic.AtomicLong;
+038import java.util.stream.Collectors;
+039import java.util.stream.Stream;
+040import 
org.apache.hadoop.conf.Configuration;
+041import 
org.apache.hadoop.hbase.HConstants;
+042import 
org.apache.hadoop.hbase.exceptions.IllegalArgumentIOException;
+043import 
org.apache.hadoop.hbase.log.HBaseMarkers;
+044import 
org.apache.hadoop.hbase.procedure2.Procedure.LockState;
+045import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore;
+046import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureIterator;
+047import 
org.apache.hadoop.hbase.procedure2.util.StringUtils;
+048import 
org.apache.hadoop.hbase.security.User;
+049import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+050import 
org.apache.hadoop.hbase.util.IdLock;
+051import 
org.apache.hadoop.hbase.util.NonceKey;
+052import 
org.apache.hadoop.hbase.util.Threads;
+053import 
org.apache.yetus.audience.InterfaceAudience;
+054import org.slf4j.Logger;
+055import org.slf4j.LoggerFactory;
+056
+057import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+058import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
+059
+060import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState;
 061
-062import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState;
-063
-064/**
-065 * Thread Pool that executes the 
submitted procedures.
-066 * The executor has a ProcedureStore 
associated.
-067 * Each operation is logged and on 
restart the pending procedures are resumed.
-068 *
-069 * Unless the 

[20/51] [partial] hbase-site git commit: Published site at d7e08317d2f214e4cca7b67578aba0ed7a567d54.

2018-09-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/37cf49a6/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
index 976894f..721035e 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
@@ -3020,926 +3020,927 @@
 3012}
 3013  }
 3014
-3015  void checkServiceStarted() throws 
ServerNotRunningYetException {
-3016if (!serviceStarted) {
-3017  throw new 
ServerNotRunningYetException("Server is not running yet");
-3018}
-3019  }
-3020
-3021  public static class 
MasterStoppedException extends DoNotRetryIOException {
-3022MasterStoppedException() {
-3023  super();
-3024}
-3025  }
-3026
-3027  void checkInitialized() throws 
PleaseHoldException, ServerNotRunningYetException,
-3028  MasterNotRunningException, 
MasterStoppedException {
-3029checkServiceStarted();
-3030if (!isInitialized()) {
-3031  throw new 
PleaseHoldException("Master is initializing");
-3032}
-3033if (isStopped()) {
-3034  throw new 
MasterStoppedException();
-3035}
-3036  }
-3037
-3038  /**
-3039   * Report whether this master is 
currently the active master or not.
-3040   * If not active master, we are parked 
on ZK waiting to become active.
-3041   *
-3042   * This method is used for testing.
-3043   *
-3044   * @return true if active master, 
false if not.
-3045   */
-3046  @Override
-3047  public boolean isActiveMaster() {
-3048return activeMaster;
-3049  }
-3050
-3051  /**
-3052   * Report whether this master has 
completed with its initialization and is
-3053   * ready.  If ready, the master is 
also the active master.  A standby master
-3054   * is never ready.
-3055   *
-3056   * This method is used for testing.
-3057   *
-3058   * @return true if master is ready to 
go, false if not.
-3059   */
-3060  @Override
-3061  public boolean isInitialized() {
-3062return initialized.isReady();
-3063  }
-3064
-3065  /**
-3066   * Report whether this master is in 
maintenance mode.
-3067   *
-3068   * @return true if master is in 
maintenanceMode
-3069   */
-3070  @Override
-3071  public boolean isInMaintenanceMode() 
throws IOException {
-3072if (!isInitialized()) {
-3073  throw new 
PleaseHoldException("Master is initializing");
-3074}
-3075return 
maintenanceModeTracker.isInMaintenanceMode();
-3076  }
-3077
-3078  @VisibleForTesting
-3079  public void setInitialized(boolean 
isInitialized) {
-3080
procedureExecutor.getEnvironment().setEventReady(initialized, isInitialized);
-3081  }
-3082
-3083  @Override
-3084  public ProcedureEvent? 
getInitializedEvent() {
-3085return initialized;
-3086  }
-3087
-3088  /**
-3089   * Compute the average load across all 
region servers.
-3090   * Currently, this uses a very naive 
computation - just uses the number of
-3091   * regions being served, ignoring 
stats about number of requests.
-3092   * @return the average load
-3093   */
-3094  public double getAverageLoad() {
-3095if (this.assignmentManager == null) 
{
-3096  return 0;
-3097}
-3098
-3099RegionStates regionStates = 
this.assignmentManager.getRegionStates();
-3100if (regionStates == null) {
-3101  return 0;
-3102}
-3103return 
regionStates.getAverageLoad();
-3104  }
-3105
-3106  /*
-3107   * @return the count of region split 
plans executed
-3108   */
-3109  public long getSplitPlanCount() {
-3110return splitPlanCount;
-3111  }
-3112
-3113  /*
-3114   * @return the count of region merge 
plans executed
-3115   */
-3116  public long getMergePlanCount() {
-3117return mergePlanCount;
-3118  }
-3119
-3120  @Override
-3121  public boolean registerService(Service 
instance) {
-3122/*
-3123 * No stacking of instances is 
allowed for a single service name
-3124 */
-3125Descriptors.ServiceDescriptor 
serviceDesc = instance.getDescriptorForType();
-3126String serviceName = 
CoprocessorRpcUtils.getServiceName(serviceDesc);
-3127if 
(coprocessorServiceHandlers.containsKey(serviceName)) {
-3128  LOG.error("Coprocessor service 
"+serviceName+
-3129  " already registered, 
rejecting request from "+instance
-3130  );
-3131  return false;
-3132}
-3133
-3134
coprocessorServiceHandlers.put(serviceName, instance);
-3135if (LOG.isDebugEnabled()) {
-3136  LOG.debug("Registered master 
coprocessor service: service="+serviceName);
-3137}
-3138return true;
-3139  }
-3140
-3141  /**
-3142   * Utility for constructing an 
instance of the passed HMaster class.
-3143   * @param masterClass
-3144   * @return HMaster instance.
-3145   */
-3146  

[20/51] [partial] hbase-site git commit: Published site at 8eaaa63114a64bcaeaf0ed9bdd88615ee22255c1.

2018-09-25 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f6f9d4f3/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
index 0cf012a..976894f 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
@@ -63,3884 +63,3883 @@
 055import javax.servlet.http.HttpServlet;
 056import 
javax.servlet.http.HttpServletRequest;
 057import 
javax.servlet.http.HttpServletResponse;
-058
-059import 
org.apache.commons.lang3.StringUtils;
-060import 
org.apache.hadoop.conf.Configuration;
-061import org.apache.hadoop.fs.Path;
-062import 
org.apache.hadoop.hbase.ChoreService;
-063import 
org.apache.hadoop.hbase.ClusterId;
-064import 
org.apache.hadoop.hbase.ClusterMetrics;
-065import 
org.apache.hadoop.hbase.ClusterMetrics.Option;
-066import 
org.apache.hadoop.hbase.ClusterMetricsBuilder;
-067import 
org.apache.hadoop.hbase.CompoundConfiguration;
-068import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-069import 
org.apache.hadoop.hbase.HBaseIOException;
-070import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
-071import 
org.apache.hadoop.hbase.HConstants;
-072import 
org.apache.hadoop.hbase.InvalidFamilyOperationException;
-073import 
org.apache.hadoop.hbase.MasterNotRunningException;
-074import 
org.apache.hadoop.hbase.MetaTableAccessor;
-075import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-076import 
org.apache.hadoop.hbase.PleaseHoldException;
-077import 
org.apache.hadoop.hbase.ReplicationPeerNotFoundException;
-078import 
org.apache.hadoop.hbase.ServerName;
-079import 
org.apache.hadoop.hbase.TableDescriptors;
-080import 
org.apache.hadoop.hbase.TableName;
-081import 
org.apache.hadoop.hbase.TableNotDisabledException;
-082import 
org.apache.hadoop.hbase.TableNotFoundException;
-083import 
org.apache.hadoop.hbase.UnknownRegionException;
-084import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-085import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
-086import 
org.apache.hadoop.hbase.client.MasterSwitchType;
-087import 
org.apache.hadoop.hbase.client.RegionInfo;
-088import 
org.apache.hadoop.hbase.client.RegionInfoBuilder;
-089import 
org.apache.hadoop.hbase.client.Result;
-090import 
org.apache.hadoop.hbase.client.TableDescriptor;
-091import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-092import 
org.apache.hadoop.hbase.client.TableState;
-093import 
org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
-094import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-095import 
org.apache.hadoop.hbase.exceptions.MergeRegionException;
-096import 
org.apache.hadoop.hbase.executor.ExecutorType;
-097import 
org.apache.hadoop.hbase.favored.FavoredNodesManager;
-098import 
org.apache.hadoop.hbase.favored.FavoredNodesPromoter;
-099import 
org.apache.hadoop.hbase.http.InfoServer;
-100import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
-101import 
org.apache.hadoop.hbase.ipc.RpcServer;
-102import 
org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
-103import 
org.apache.hadoop.hbase.log.HBaseMarkers;
-104import 
org.apache.hadoop.hbase.master.MasterRpcServices.BalanceSwitchMode;
-105import 
org.apache.hadoop.hbase.master.assignment.AssignProcedure;
-106import 
org.apache.hadoop.hbase.master.assignment.AssignmentManager;
-107import 
org.apache.hadoop.hbase.master.assignment.MergeTableRegionsProcedure;
-108import 
org.apache.hadoop.hbase.master.assignment.MoveRegionProcedure;
-109import 
org.apache.hadoop.hbase.master.assignment.RegionStateNode;
-110import 
org.apache.hadoop.hbase.master.assignment.RegionStates;
-111import 
org.apache.hadoop.hbase.master.assignment.TransitRegionStateProcedure;
-112import 
org.apache.hadoop.hbase.master.assignment.UnassignProcedure;
-113import 
org.apache.hadoop.hbase.master.balancer.BalancerChore;
-114import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer;
-115import 
org.apache.hadoop.hbase.master.balancer.ClusterStatusChore;
-116import 
org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory;
-117import 
org.apache.hadoop.hbase.master.cleaner.CleanerChore;
-118import 
org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
-119import 
org.apache.hadoop.hbase.master.cleaner.LogCleaner;
-120import 
org.apache.hadoop.hbase.master.cleaner.ReplicationBarrierCleaner;
-121import 
org.apache.hadoop.hbase.master.locking.LockManager;
-122import 
org.apache.hadoop.hbase.master.normalizer.NormalizationPlan;
-123import 
org.apache.hadoop.hbase.master.normalizer.NormalizationPlan.PlanType;
-124import 
org.apache.hadoop.hbase.master.normalizer.RegionNormalizer;
-125import 
org.apache.hadoop.hbase.master.normalizer.RegionNormalizerChore;
-126import 
org.apache.hadoop.hbase.master.normalizer.RegionNormalizerFactory;

[20/51] [partial] hbase-site git commit: Published site at cd161d976ef47b84e904f2d54bac65d2f3417c2a.

2018-09-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fa1bebf8/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.TruncateTableFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.TruncateTableFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.TruncateTableFuture.html
index a5789e0..93a57cb 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.TruncateTableFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.TruncateTableFuture.html
@@ -238,4120 +238,4119 @@
 230 * @see Admin
 231 */
 232@InterfaceAudience.Private
-233@InterfaceStability.Evolving
-234public class HBaseAdmin implements Admin 
{
-235  private static final Logger LOG = 
LoggerFactory.getLogger(HBaseAdmin.class);
-236
-237  private ClusterConnection connection;
-238
-239  private final Configuration conf;
-240  private final long pause;
-241  private final int numRetries;
-242  private final int syncWaitTimeout;
-243  private boolean aborted;
-244  private int operationTimeout;
-245  private int rpcTimeout;
-246
-247  private RpcRetryingCallerFactory 
rpcCallerFactory;
-248  private RpcControllerFactory 
rpcControllerFactory;
-249
-250  private NonceGenerator ng;
-251
-252  @Override
-253  public int getOperationTimeout() {
-254return operationTimeout;
-255  }
-256
-257  HBaseAdmin(ClusterConnection 
connection) throws IOException {
-258this.conf = 
connection.getConfiguration();
-259this.connection = connection;
-260
-261// TODO: receive 
ConnectionConfiguration here rather than re-parsing these configs every time.
-262this.pause = 
this.conf.getLong(HConstants.HBASE_CLIENT_PAUSE,
-263
HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
-264this.numRetries = 
this.conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
-265
HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
-266this.operationTimeout = 
this.conf.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT,
-267
HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT);
-268this.rpcTimeout = 
this.conf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY,
-269
HConstants.DEFAULT_HBASE_RPC_TIMEOUT);
-270this.syncWaitTimeout = 
this.conf.getInt(
-271  
"hbase.client.sync.wait.timeout.msec", 10 * 6); // 10min
-272
-273this.rpcCallerFactory = 
connection.getRpcRetryingCallerFactory();
-274this.rpcControllerFactory = 
connection.getRpcControllerFactory();
-275
-276this.ng = 
this.connection.getNonceGenerator();
-277  }
-278
-279  @Override
-280  public void abort(String why, Throwable 
e) {
-281// Currently does nothing but throw 
the passed message and exception
-282this.aborted = true;
-283throw new RuntimeException(why, e);
-284  }
-285
-286  @Override
-287  public boolean isAborted() {
-288return this.aborted;
-289  }
-290
-291  @Override
-292  public boolean abortProcedure(final 
long procId, final boolean mayInterruptIfRunning)
-293  throws IOException {
-294return 
get(abortProcedureAsync(procId, mayInterruptIfRunning), this.syncWaitTimeout,
-295  TimeUnit.MILLISECONDS);
-296  }
-297
-298  @Override
-299  public FutureBoolean 
abortProcedureAsync(final long procId, final boolean mayInterruptIfRunning)
-300  throws IOException {
-301Boolean abortProcResponse =
-302executeCallable(new 
MasterCallableAbortProcedureResponse(getConnection(),
-303getRpcControllerFactory()) 
{
-304  @Override
-305  protected AbortProcedureResponse 
rpcCall() throws Exception {
-306AbortProcedureRequest 
abortProcRequest =
-307
AbortProcedureRequest.newBuilder().setProcId(procId).build();
-308return 
master.abortProcedure(getRpcController(), abortProcRequest);
-309  }
-310}).getIsProcedureAborted();
-311return new AbortProcedureFuture(this, 
procId, abortProcResponse);
-312  }
-313
-314  @Override
-315  public ListTableDescriptor 
listTableDescriptors() throws IOException {
-316return 
listTableDescriptors((Pattern)null, false);
-317  }
-318
-319  @Override
-320  public ListTableDescriptor 
listTableDescriptors(Pattern pattern) throws IOException {
-321return listTableDescriptors(pattern, 
false);
-322  }
-323
-324  @Override
-325  public ListTableDescriptor 
listTableDescriptors(Pattern pattern, boolean includeSysTables)
-326  throws IOException {
-327return executeCallable(new 
MasterCallableListTableDescriptor(getConnection(),
-328getRpcControllerFactory()) {
-329  @Override
-330  protected 
ListTableDescriptor rpcCall() throws Exception {
-331GetTableDescriptorsRequest req 
=
-332
RequestConverter.buildGetTableDescriptorsRequest(pattern, includeSysTables);
-333return 
ProtobufUtil.toTableDescriptorList(master.getTableDescriptors(getRpcController(),
-334req));
-335  }
-336});
-337  }
-338

[20/51] [partial] hbase-site git commit: Published site at c6a65ba63fce85ac7c4b62b96ef2bbe6c35d2f00.

2018-09-04 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/293abb17/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html
index fe4e081..eecf20f 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html
@@ -44,1858 +44,1838 @@
 036import 
org.apache.hadoop.hbase.HBaseIOException;
 037import 
org.apache.hadoop.hbase.HConstants;
 038import 
org.apache.hadoop.hbase.PleaseHoldException;
-039import 
org.apache.hadoop.hbase.RegionException;
-040import 
org.apache.hadoop.hbase.ServerName;
-041import 
org.apache.hadoop.hbase.TableName;
-042import 
org.apache.hadoop.hbase.UnknownRegionException;
-043import 
org.apache.hadoop.hbase.YouAreDeadException;
-044import 
org.apache.hadoop.hbase.client.DoNotRetryRegionException;
-045import 
org.apache.hadoop.hbase.client.RegionInfo;
-046import 
org.apache.hadoop.hbase.client.RegionInfoBuilder;
-047import 
org.apache.hadoop.hbase.client.Result;
-048import 
org.apache.hadoop.hbase.client.TableState;
-049import 
org.apache.hadoop.hbase.exceptions.UnexpectedStateException;
-050import 
org.apache.hadoop.hbase.favored.FavoredNodesManager;
-051import 
org.apache.hadoop.hbase.favored.FavoredNodesPromoter;
-052import 
org.apache.hadoop.hbase.master.LoadBalancer;
-053import 
org.apache.hadoop.hbase.master.MasterServices;
-054import 
org.apache.hadoop.hbase.master.MetricsAssignmentManager;
-055import 
org.apache.hadoop.hbase.master.NoSuchProcedureException;
-056import 
org.apache.hadoop.hbase.master.RegionPlan;
-057import 
org.apache.hadoop.hbase.master.RegionState;
-058import 
org.apache.hadoop.hbase.master.RegionState.State;
-059import 
org.apache.hadoop.hbase.master.ServerListener;
-060import 
org.apache.hadoop.hbase.master.TableStateManager;
-061import 
org.apache.hadoop.hbase.master.balancer.FavoredStochasticBalancer;
-062import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
-063import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureScheduler;
-064import 
org.apache.hadoop.hbase.master.procedure.ProcedureSyncWait;
-065import 
org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure;
-066import 
org.apache.hadoop.hbase.procedure2.Procedure;
-067import 
org.apache.hadoop.hbase.procedure2.ProcedureEvent;
-068import 
org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
-069import 
org.apache.hadoop.hbase.procedure2.ProcedureInMemoryChore;
-070import 
org.apache.hadoop.hbase.procedure2.util.StringUtils;
-071import 
org.apache.hadoop.hbase.regionserver.SequenceId;
-072import 
org.apache.hadoop.hbase.util.Bytes;
-073import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-074import 
org.apache.hadoop.hbase.util.HasThread;
-075import 
org.apache.hadoop.hbase.util.Pair;
-076import 
org.apache.hadoop.hbase.util.Threads;
-077import 
org.apache.hadoop.hbase.util.VersionInfo;
-078import 
org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-079import 
org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-080import 
org.apache.yetus.audience.InterfaceAudience;
-081import 
org.apache.zookeeper.KeeperException;
-082import org.slf4j.Logger;
-083import org.slf4j.LoggerFactory;
+039import 
org.apache.hadoop.hbase.ServerName;
+040import 
org.apache.hadoop.hbase.TableName;
+041import 
org.apache.hadoop.hbase.UnknownRegionException;
+042import 
org.apache.hadoop.hbase.YouAreDeadException;
+043import 
org.apache.hadoop.hbase.client.DoNotRetryRegionException;
+044import 
org.apache.hadoop.hbase.client.RegionInfo;
+045import 
org.apache.hadoop.hbase.client.RegionInfoBuilder;
+046import 
org.apache.hadoop.hbase.client.Result;
+047import 
org.apache.hadoop.hbase.client.TableState;
+048import 
org.apache.hadoop.hbase.exceptions.UnexpectedStateException;
+049import 
org.apache.hadoop.hbase.favored.FavoredNodesManager;
+050import 
org.apache.hadoop.hbase.favored.FavoredNodesPromoter;
+051import 
org.apache.hadoop.hbase.master.LoadBalancer;
+052import 
org.apache.hadoop.hbase.master.MasterServices;
+053import 
org.apache.hadoop.hbase.master.MetricsAssignmentManager;
+054import 
org.apache.hadoop.hbase.master.RegionPlan;
+055import 
org.apache.hadoop.hbase.master.RegionState;
+056import 
org.apache.hadoop.hbase.master.RegionState.State;
+057import 
org.apache.hadoop.hbase.master.ServerListener;
+058import 
org.apache.hadoop.hbase.master.TableStateManager;
+059import 
org.apache.hadoop.hbase.master.balancer.FavoredStochasticBalancer;
+060import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+061import 

[20/51] [partial] hbase-site git commit: Published site at 7c1fad4992a169a35b4457e6f4afcb30d04406e9.

2018-08-31 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/74f60271/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregateImpl.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregateImpl.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregateImpl.html
index 74ae9d2..bba6ed2 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregateImpl.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregateImpl.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -220,44 +220,94 @@ implements 
 long
+getAvgRegionSize(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringtable)
+Get the average region size for this table
+
+
+
+long
+getAvgStoreFileAge(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">Stringtable)
+
+
+long
+getCpRequestCount(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">Stringtable)
+
+
+long
 getCpRequestsCount(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringtable)
 Get the number of CoprocessorService requests that have 
been issued against this table
 
 
-
+
+long
+getFilteredReadRequestCount(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringtable)
+Get the total number of filtered read requests that have 
been issued against this table
+
+
+
 long
-getMemStoresSize(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringtable)
+getMaxStoreFileAge(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">Stringtable)
+
+
+long
+getMemStoreSize(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringtable)
 Get the memory store size against this table
 
 
-
+
+long
+getMinStoreFileAge(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">Stringtable)
+
+
+long
+getNumReferenceFiles(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">Stringtable)
+
+
+long
+getNumRegions(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringtable)
+Get the number of regions hosted on for this table
+
+
+
+long
+getNumStoreFiles(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringtable)
+Get the number of store files hosted for this table
+
+
+
+long
+getNumStores(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringtable)
+Get the number of stores hosted on for this table
+
+
+
 long
-getReadRequestsCount(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringtable)
+getReadRequestCount(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringtable)
 Get the number of read requests that have been issued 
against this table
 
 
-
+
 long
-getStoreFilesSize(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringtable)
+getStoreFileSize(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringtable)
 Get the store file size against this table
 
 
-
+
 long
 getTableSize(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringtable)
 Get the table region size against this table
 
 
-
+
 long
 getTotalRequestsCount(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringtable)
-Get the total number of requests that have been issued 
against this table
+Get the total number of requests that have been issued for 
this 

[20/51] [partial] hbase-site git commit: Published site at 3afe9fb7e6ebfa71187cbe131558a83fae61cecd.

2018-08-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/424d7e41/downloads.html
--
diff --git a/downloads.html b/downloads.html
index a8552e1..83b6734 100644
--- a/downloads.html
+++ b/downloads.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Apache HBase Downloads
 
@@ -423,7 +423,7 @@ under the License. -->
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-08-27
+  Last Published: 
2018-08-28
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/424d7e41/export_control.html
--
diff --git a/export_control.html b/export_control.html
index 793f8ef..aed354c 100644
--- a/export_control.html
+++ b/export_control.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  
   Export Control
@@ -331,7 +331,7 @@ for more details.
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-08-27
+  Last Published: 
2018-08-28
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/424d7e41/index.html
--
diff --git a/index.html b/index.html
index 1bf2722..ef12102 100644
--- a/index.html
+++ b/index.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Apache HBase™ Home
 
@@ -411,7 +411,7 @@ Apache HBase is an open-source, distributed, versioned, 
non-relational database
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-08-27
+  Last Published: 
2018-08-28
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/424d7e41/integration.html
--
diff --git a/integration.html b/integration.html
index 08000a6..6904d39 100644
--- a/integration.html
+++ b/integration.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  CI Management
 
@@ -291,7 +291,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-08-27
+  Last Published: 
2018-08-28
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/424d7e41/issue-tracking.html
--
diff --git a/issue-tracking.html b/issue-tracking.html
index 62b64ee..75b584b 100644
--- a/issue-tracking.html
+++ b/issue-tracking.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Issue Management
 
@@ -288,7 +288,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-08-27
+  Last Published: 
2018-08-28
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/424d7e41/license.html
--
diff --git a/license.html b/license.html
index 7a4f11f..e1199d3 100644
--- a/license.html
+++ b/license.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Project Licenses
 
@@ -491,7 +491,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-08-27
+  Last Published: 
2018-08-28
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/424d7e41/mail-lists.html
--
diff --git a/mail-lists.html b/mail-lists.html
index a825b70..845ab02 100644
--- a/mail-lists.html
+++ b/mail-lists.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Project Mailing Lists
 
@@ -341,7 +341,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-08-27
+  Last Published: 
2018-08-28
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/424d7e41/metrics.html
--
diff --git a/metrics.html b/metrics.html
index 6a00fab..d9a7ce0 100644
--- a/metrics.html
+++ b/metrics.html

[20/51] [partial] hbase-site git commit: Published site at a452487a9b82bfd33bc10683c3f8b8ae74d58883.

2018-08-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0cf79db0/devapidocs/org/apache/hadoop/hbase/filter/ColumnValueFilter.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/filter/ColumnValueFilter.html 
b/devapidocs/org/apache/hadoop/hbase/filter/ColumnValueFilter.html
index dab230a..2ccd245 100644
--- a/devapidocs/org/apache/hadoop/hbase/filter/ColumnValueFilter.html
+++ b/devapidocs/org/apache/hadoop/hbase/filter/ColumnValueFilter.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":9,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":9,"i12":10,"i13":10,"i14":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":9,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":9,"i14":10,"i15":10,"i16":10};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -120,7 +120,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Public
-public class ColumnValueFilter
+public class ColumnValueFilter
 extends FilterBase
 Different from SingleColumnValueFilter which 
returns an entire row
  when specified condition is matched, ColumnValueFilter return the 
matched cell only.
@@ -254,59 +254,67 @@ extends 
 
 
+boolean
+equals(https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Objectobj)
+
+
 Filter.ReturnCode
 filterCell(Cellc)
 A way to filter based on the column family, column 
qualifier and/or the column value.
 
 
-
+
 boolean
 filterRowKey(Cellcell)
 Filters a row based on the row key.
 
 
-
+
 ByteArrayComparable
 getComparator()
 
-
+
 CompareOperator
 getCompareOperator()
 
-
+
 byte[]
 getFamily()
 
-
+
 byte[]
 getQualifier()
 
-
+
+int
+hashCode()
+
+
 boolean
 isFamilyEssential(byte[]name)
 By default, we require all scan's column families to be 
present.
 
 
-
+
 static ColumnValueFilter
 parseFrom(byte[]pbBytes)
 Parse protobuf bytes to a ColumnValueFilter
 
 
-
+
 void
 reset()
 Filters that are purely stateless and do nothing in their 
reset() methods can inherit
  this null/empty implementation.
 
 
-
+
 byte[]
 toByteArray()
 Return length 0 byte array for Filters that don't require 
special serialization
 
 
-
+
 https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 toString()
 Return filter's info for debugging and logging 
purpose.
@@ -332,7 +340,7 @@ extends 
 
 Methods inherited from classjava.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
-https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--;
 title="class or interface in java.lang">clone, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-;
 title="class or interface in java.lang">equals, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--;
 title="class or interface in java.lang">finalize, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--;
 title="class or interface in java.lang">getClass, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--;
 title="class or interface in java.lang">hashCode, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notify--;
 title="class or interface in java.lang">notify, https://docs.oracle.com/javase/8/docs/api/ja
 va/lang/Object.html?is-external=true#notifyAll--" title="class or interface in 
java.lang">notifyAll, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait--;
 title="class or interface in java.lang">wait, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-;
 title="class or interface in java.lang">wait, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-int-;
 title="class or interface in java.lang">wait
+https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--;
 title="class or interface in java.lang">clone, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--;
 title="class or interface in java.lang">finalize, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--;
 title="class or interface in java.lang">getClass, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notify--;
 title="class or interface in java.lang">notify, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notifyAll--;
 title="class or interface in java.lang">notifyAll, 

[20/51] [partial] hbase-site git commit: Published site at 6a5b4f2a5c188f8eef4f2250b8b7db7dd1e750e4.

2018-08-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ff05a18/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ReplayBatchOperation.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ReplayBatchOperation.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ReplayBatchOperation.html
index db8431b..a8cb7c4 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ReplayBatchOperation.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ReplayBatchOperation.html
@@ -885,7766 +885,7797 @@
 877   * @return What the next sequence 
(edit) id should be.
 878   * @throws IOException e
 879   */
-880  private long initialize(final 
CancelableProgressable reporter) throws IOException {
-881
-882//Refuse to open the region if there 
is no column family in the table
-883if 
(htableDescriptor.getColumnFamilyCount() == 0) {
-884  throw new 
DoNotRetryIOException("Table " + 
htableDescriptor.getTableName().getNameAsString()+
-885  " should have at least one 
column family.");
-886}
-887
-888MonitoredTask status = 
TaskMonitor.get().createStatus("Initializing region " + this);
-889long nextSeqId = -1;
-890try {
-891  nextSeqId = 
initializeRegionInternals(reporter, status);
-892  return nextSeqId;
-893} finally {
-894  // nextSeqid will be -1 if the 
initialization fails.
-895  // At least it will be 0 
otherwise.
-896  if (nextSeqId == -1) {
-897status.abort("Exception during 
region " + getRegionInfo().getRegionNameAsString() +
-898  " initialization.");
-899  }
-900}
-901  }
-902
-903  private long 
initializeRegionInternals(final CancelableProgressable reporter,
-904  final MonitoredTask status) throws 
IOException {
-905if (coprocessorHost != null) {
-906  status.setStatus("Running 
coprocessor pre-open hook");
-907  coprocessorHost.preOpen();
-908}
-909
-910// Write HRI to a file in case we 
need to recover hbase:meta
-911// Only the primary replica should 
write .regioninfo
-912if 
(this.getRegionInfo().getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) {
-913  status.setStatus("Writing region 
info on filesystem");
-914  fs.checkRegionInfoOnFilesystem();
-915}
-916
-917// Initialize all the HStores
-918status.setStatus("Initializing all 
the Stores");
-919long maxSeqId = 
initializeStores(reporter, status);
-920this.mvcc.advanceTo(maxSeqId);
-921if 
(ServerRegionReplicaUtil.shouldReplayRecoveredEdits(this)) {
-922  CollectionHStore stores = 
this.stores.values();
-923  try {
-924// update the stores that we are 
replaying
-925LOG.debug("replaying wal for " + 
this.getRegionInfo().getEncodedName());
-926
stores.forEach(HStore::startReplayingFromWAL);
-927// Recover any edits if 
available.
-928maxSeqId = Math.max(maxSeqId,
-929  
replayRecoveredEditsIfAny(this.fs.getRegionDir(), maxSeqIdInStores, reporter, 
status));
-930// Make sure mvcc is up to max.
-931this.mvcc.advanceTo(maxSeqId);
-932  } finally {
-933LOG.debug("stopping wal replay 
for " + this.getRegionInfo().getEncodedName());
-934// update the stores that we are 
done replaying
-935
stores.forEach(HStore::stopReplayingFromWAL);
-936  }
-937}
-938this.lastReplayedOpenRegionSeqId = 
maxSeqId;
-939
-940
this.writestate.setReadOnly(ServerRegionReplicaUtil.isReadOnly(this));
-941this.writestate.flushRequested = 
false;
-942this.writestate.compacting.set(0);
-943
-944if (this.writestate.writesEnabled) 
{
-945  LOG.debug("Cleaning up temporary 
data for " + this.getRegionInfo().getEncodedName());
-946  // Remove temporary data left over 
from old regions
-947  status.setStatus("Cleaning up 
temporary data from old regions");
-948  fs.cleanupTempDir();
-949}
-950
-951if (this.writestate.writesEnabled) 
{
-952  status.setStatus("Cleaning up 
detritus from prior splits");
-953  // Get rid of any splits or merges 
that were lost in-progress.  Clean out
-954  // these directories here on open.  
We may be opening a region that was
-955  // being split but we crashed in 
the middle of it all.
-956  LOG.debug("Cleaning up detritus for 
" + this.getRegionInfo().getEncodedName());
-957  fs.cleanupAnySplitDetritus();
-958  fs.cleanupMergesDir();
-959}
+880  @VisibleForTesting
+881  long initialize(final 
CancelableProgressable reporter) throws IOException {
+882
+883//Refuse to open the region if there 
is no column family in the table
+884if 
(htableDescriptor.getColumnFamilyCount() == 0) {
+885  throw new 
DoNotRetryIOException("Table " + 
htableDescriptor.getTableName().getNameAsString()+
+886  " should have at least one 
column family.");

[20/51] [partial] hbase-site git commit: Published site at 63f2d3cbdc8151f5f61f33e0a078c51b9ac076a5.

2018-08-21 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7ae6a80c/devapidocs/org/apache/hadoop/hbase/master/assignment/class-use/RegionStates.RegionStateNode.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/class-use/RegionStates.RegionStateNode.html
 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/class-use/RegionStates.RegionStateNode.html
deleted file mode 100644
index a8b7714..000
--- 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/class-use/RegionStates.RegionStateNode.html
+++ /dev/null
@@ -1,539 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd;>
-
-
-
-
-
-Uses of Class 
org.apache.hadoop.hbase.master.assignment.RegionStates.RegionStateNode (Apache 
HBase 3.0.0-SNAPSHOT API)
-
-
-
-
-
-
-
-JavaScript is disabled on your browser.
-
-
-
-
-
-Skip navigation links
-
-
-
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
-
-
-
-
-Prev
-Next
-
-
-Frames
-NoFrames
-
-
-AllClasses
-
-
-
-
-
-
-
-
-
-
-Uses of 
Classorg.apache.hadoop.hbase.master.assignment.RegionStates.RegionStateNode
-
-
-
-
-
-Packages that use RegionStates.RegionStateNode
-
-Package
-Description
-
-
-
-org.apache.hadoop.hbase.master.assignment
-
-
-
-
-
-
-
-
-
-
-Uses of RegionStates.RegionStateNode in 
org.apache.hadoop.hbase.master.assignment
-
-Fields in org.apache.hadoop.hbase.master.assignment
 declared as RegionStates.RegionStateNode
-
-Modifier and Type
-Field and Description
-
-
-
-private RegionStates.RegionStateNode
-RegionStates.RegionFailedOpen.regionNode
-
-
-
-
-Fields in org.apache.hadoop.hbase.master.assignment
 with type parameters of type RegionStates.RegionStateNode
-
-Modifier and Type
-Field and Description
-
-
-
-private https://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in java.util">ArrayListRegionStates.RegionStateNode
-AssignmentManager.pendingAssignQueue
-
-
-private https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentSkipListMap.html?is-external=true;
 title="class or interface in 
java.util.concurrent">ConcurrentSkipListMapRegionInfo,RegionStates.RegionStateNode
-RegionStates.regionInTransition
-
-
-private https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentSkipListMap.html?is-external=true;
 title="class or interface in 
java.util.concurrent">ConcurrentSkipListMapRegionInfo,RegionStates.RegionStateNode
-RegionStates.regionOffline
-Regions marked as offline on a read of hbase:meta.
-
-
-
-private https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">SetRegionStates.RegionStateNode
-RegionStates.ServerStateNode.regions
-
-
-private https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentSkipListMap.html?is-external=true;
 title="class or interface in 
java.util.concurrent">ConcurrentSkipListMapbyte[],RegionStates.RegionStateNode
-RegionStates.regionsMap
-RegionName -- i.e.
-
-
-
-
-
-Methods in org.apache.hadoop.hbase.master.assignment
 that return RegionStates.RegionStateNode
-
-Modifier and Type
-Method and Description
-
-
-
-protected RegionStates.RegionStateNode
-RegionStates.createRegionStateNode(RegionInforegionInfo)
-
-
-protected RegionStates.RegionStateNode
-RegionStates.getOrCreateRegionStateNode(RegionInforegionInfo)
-
-
-RegionStates.RegionStateNode
-RegionTransitionProcedure.getRegionState(MasterProcedureEnvenv)
-
-
-RegionStates.RegionStateNode
-RegionStates.RegionFailedOpen.getRegionStateNode()
-
-
-protected RegionStates.RegionStateNode
-RegionStates.getRegionStateNode(RegionInforegionInfo)
-
-
-(package private) RegionStates.RegionStateNode
-RegionStates.getRegionStateNodeFromName(byte[]regionName)
-
-
-
-
-Methods in org.apache.hadoop.hbase.master.assignment
 that return types with arguments of type RegionStates.RegionStateNode
-
-Modifier and Type
-Method and Description
-
-
-
-https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">SetRegionStates.RegionStateNode
-RegionStates.ServerStateNode.getRegions()
-
-
-https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionStates.RegionStateNode
-RegionStates.getRegionsInTransition()
-
-
-https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in 

[20/51] [partial] hbase-site git commit: Published site at 092efb42749bf7fc6ad338c96aae8e7b9d3a2c74.

2018-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/Procedure.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/Procedure.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/Procedure.html
index 6086d40..3cfacfc 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/Procedure.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/Procedure.html
@@ -903,103 +903,108 @@
 895  return;
 896}
 897
-898LOG.debug("{} held the lock before 
restarting, call acquireLock to restore it.", this);
-899LockState state = acquireLock(env);
-900assert state == 
LockState.LOCK_ACQUIRED;
-901  }
+898if (isFinished()) {
+899  LOG.debug("{} is already finished, 
skip acquiring lock.", this);
+900  return;
+901}
 902
-903  /**
-904   * Internal method called by the 
ProcedureExecutor that starts the user-level code acquireLock().
-905   */
-906  final LockState 
doAcquireLock(TEnvironment env, ProcedureStore store) {
-907if (waitInitialized(env)) {
-908  return LockState.LOCK_EVENT_WAIT;
-909}
-910if (lockedWhenLoading) {
-911  // reset it so we will not consider 
it anymore
-912  lockedWhenLoading = false;
-913  locked = true;
-914  // Here we return without persist 
the locked state, as lockedWhenLoading is true means
-915  // that the locked field of the 
procedure stored in procedure store is true, so we do not need
-916  // to store it again.
-917  return LockState.LOCK_ACQUIRED;
-918}
-919LockState state = acquireLock(env);
-920if (state == LockState.LOCK_ACQUIRED) 
{
-921  locked = true;
-922  // persist that we have held the 
lock. This must be done before we actually execute the
-923  // procedure, otherwise when 
restarting, we may consider the procedure does not have a lock,
-924  // but it may have already done 
some changes as we have already executed it, and if another
-925  // procedure gets the lock, then 
the semantic will be broken if the holdLock is true, as we do
-926  // not expect that another 
procedure can be executed in the middle.
-927  store.update(this);
-928}
-929return state;
-930  }
-931
-932  /**
-933   * Internal method called by the 
ProcedureExecutor that starts the user-level code releaseLock().
-934   */
-935  final void doReleaseLock(TEnvironment 
env, ProcedureStore store) {
-936locked = false;
-937// persist that we have released the 
lock. This must be done before we actually release the
-938// lock. Another procedure may take 
this lock immediately after we release the lock, and if we
-939// crash before persist the 
information that we have already released the lock, then when
-940// restarting there will be two 
procedures which both have the lock and cause problems.
-941if (getState() != 
ProcedureState.ROLLEDBACK) {
-942  // If the state is ROLLEDBACK, it 
means that we have already deleted the procedure from
-943  // procedure store, so do not need 
to log the release operation any more.
-944  store.update(this);
-945}
-946releaseLock(env);
-947  }
-948
-949  @Override
-950  public int compareTo(final 
ProcedureTEnvironment other) {
-951return Long.compare(getProcId(), 
other.getProcId());
+903LOG.debug("{} held the lock before 
restarting, call acquireLock to restore it.", this);
+904LockState state = acquireLock(env);
+905assert state == 
LockState.LOCK_ACQUIRED;
+906  }
+907
+908  /**
+909   * Internal method called by the 
ProcedureExecutor that starts the user-level code acquireLock().
+910   */
+911  final LockState 
doAcquireLock(TEnvironment env, ProcedureStore store) {
+912if (waitInitialized(env)) {
+913  return LockState.LOCK_EVENT_WAIT;
+914}
+915if (lockedWhenLoading) {
+916  // reset it so we will not consider 
it anymore
+917  lockedWhenLoading = false;
+918  locked = true;
+919  // Here we return without persist 
the locked state, as lockedWhenLoading is true means
+920  // that the locked field of the 
procedure stored in procedure store is true, so we do not need
+921  // to store it again.
+922  return LockState.LOCK_ACQUIRED;
+923}
+924LockState state = acquireLock(env);
+925if (state == LockState.LOCK_ACQUIRED) 
{
+926  locked = true;
+927  // persist that we have held the 
lock. This must be done before we actually execute the
+928  // procedure, otherwise when 
restarting, we may consider the procedure does not have a lock,
+929  // but it may have already done 
some changes as we have already executed it, and if another
+930  // procedure gets the lock, then 
the semantic will be broken if the holdLock is true, as we do
+931  // not expect that another 
procedure can be executed in the middle.
+932 

[20/51] [partial] hbase-site git commit: Published site at 613d831429960348dc42c3bdb6ea5d31be15c81c.

2018-08-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7cf6034b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.BucketEntry.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.BucketEntry.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.BucketEntry.html
index bd3c59e..21e240a 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.BucketEntry.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.BucketEntry.html
@@ -33,62 +33,62 @@
 025import java.io.FileNotFoundException;
 026import java.io.FileOutputStream;
 027import java.io.IOException;
-028import java.io.ObjectInputStream;
-029import java.io.ObjectOutputStream;
-030import java.io.Serializable;
-031import java.nio.ByteBuffer;
-032import java.util.ArrayList;
-033import java.util.Comparator;
-034import java.util.HashSet;
-035import java.util.Iterator;
-036import java.util.List;
-037import java.util.Map;
-038import java.util.NavigableSet;
-039import java.util.PriorityQueue;
-040import java.util.Set;
-041import 
java.util.concurrent.ArrayBlockingQueue;
-042import 
java.util.concurrent.BlockingQueue;
-043import 
java.util.concurrent.ConcurrentHashMap;
-044import 
java.util.concurrent.ConcurrentMap;
-045import 
java.util.concurrent.ConcurrentSkipListSet;
-046import java.util.concurrent.Executors;
-047import 
java.util.concurrent.ScheduledExecutorService;
-048import java.util.concurrent.TimeUnit;
-049import 
java.util.concurrent.atomic.AtomicInteger;
-050import 
java.util.concurrent.atomic.AtomicLong;
-051import 
java.util.concurrent.atomic.LongAdder;
-052import java.util.concurrent.locks.Lock;
-053import 
java.util.concurrent.locks.ReentrantLock;
-054import 
java.util.concurrent.locks.ReentrantReadWriteLock;
-055import 
org.apache.hadoop.conf.Configuration;
-056import 
org.apache.hadoop.hbase.HBaseConfiguration;
-057import 
org.apache.hadoop.hbase.io.HeapSize;
-058import 
org.apache.hadoop.hbase.io.hfile.BlockCache;
-059import 
org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
-060import 
org.apache.hadoop.hbase.io.hfile.BlockCacheUtil;
-061import 
org.apache.hadoop.hbase.io.hfile.BlockPriority;
-062import 
org.apache.hadoop.hbase.io.hfile.BlockType;
-063import 
org.apache.hadoop.hbase.io.hfile.CacheStats;
-064import 
org.apache.hadoop.hbase.io.hfile.Cacheable;
-065import 
org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType;
-066import 
org.apache.hadoop.hbase.io.hfile.CacheableDeserializer;
-067import 
org.apache.hadoop.hbase.io.hfile.CacheableDeserializerIdManager;
-068import 
org.apache.hadoop.hbase.io.hfile.CachedBlock;
-069import 
org.apache.hadoop.hbase.io.hfile.HFileBlock;
-070import 
org.apache.hadoop.hbase.nio.ByteBuff;
-071import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-072import 
org.apache.hadoop.hbase.util.HasThread;
-073import 
org.apache.hadoop.hbase.util.IdReadWriteLock;
-074import 
org.apache.hadoop.hbase.util.IdReadWriteLock.ReferenceType;
-075import 
org.apache.hadoop.hbase.util.UnsafeAvailChecker;
-076import 
org.apache.hadoop.util.StringUtils;
-077import 
org.apache.yetus.audience.InterfaceAudience;
-078import org.slf4j.Logger;
-079import org.slf4j.LoggerFactory;
-080
-081import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-082import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-083import 
org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
+028import java.io.Serializable;
+029import java.nio.ByteBuffer;
+030import java.util.ArrayList;
+031import java.util.Comparator;
+032import java.util.HashSet;
+033import java.util.Iterator;
+034import java.util.List;
+035import java.util.Map;
+036import java.util.NavigableSet;
+037import java.util.PriorityQueue;
+038import java.util.Set;
+039import 
java.util.concurrent.ArrayBlockingQueue;
+040import 
java.util.concurrent.BlockingQueue;
+041import 
java.util.concurrent.ConcurrentHashMap;
+042import 
java.util.concurrent.ConcurrentMap;
+043import 
java.util.concurrent.ConcurrentSkipListSet;
+044import java.util.concurrent.Executors;
+045import 
java.util.concurrent.ScheduledExecutorService;
+046import java.util.concurrent.TimeUnit;
+047import 
java.util.concurrent.atomic.AtomicInteger;
+048import 
java.util.concurrent.atomic.AtomicLong;
+049import 
java.util.concurrent.atomic.LongAdder;
+050import java.util.concurrent.locks.Lock;
+051import 
java.util.concurrent.locks.ReentrantLock;
+052import 
java.util.concurrent.locks.ReentrantReadWriteLock;
+053import 
org.apache.hadoop.conf.Configuration;
+054import 
org.apache.hadoop.hbase.HBaseConfiguration;
+055import 
org.apache.hadoop.hbase.io.HeapSize;
+056import 
org.apache.hadoop.hbase.io.hfile.BlockCache;
+057import 
org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
+058import 
org.apache.hadoop.hbase.io.hfile.BlockCacheUtil;

[20/51] [partial] hbase-site git commit: Published site at ba5d1c1f28301adc99019d9d6c4a04fac98ae511.

2018-07-25 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/804782f0/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.CompletedProcedureCleaner.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.CompletedProcedureCleaner.html
 
b/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.CompletedProcedureCleaner.html
index 1b3ae53..6c4852a 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.CompletedProcedureCleaner.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.CompletedProcedureCleaner.html
@@ -186,7 +186,7 @@ extends CLEANER_INTERVAL_CONF_KEY
 
 
-private https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttps://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true;
 title="class or interface in java.lang">Long,ProcedureExecutor.CompletedProcedureRetainer
+private https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttps://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true;
 title="class or interface in java.lang">Long,ProcedureExecutor.CompletedProcedureRetainerTEnvironment
 completed
 
 
@@ -253,7 +253,7 @@ extends 
 CompletedProcedureCleaner(org.apache.hadoop.conf.Configurationconf,
  ProcedureStorestore,
- https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttps://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true;
 title="class or interface in java.lang">Long,ProcedureExecutor.CompletedProcedureRetainercompletedMap,
+ https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttps://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true;
 title="class or interface in java.lang">Long,ProcedureExecutor.CompletedProcedureRetainerTEnvironmentcompletedMap,
  https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">MapNonceKey,https://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true;
 title="class or interface in 
java.lang">LongnonceKeysToProcIdsMap)
 
 
@@ -288,7 +288,7 @@ extends Procedure
-acquireLock,
 addStackIndex,
 afterReplay,
 beforeReplay,
 compareTo,
 completionCleanup,
 doAcquireLock,
 doExecute,
 doReleaseLock,
 doRollback,
 elapsedTime,
 getChildrenLatch,
 getException,
 getLastUpdate,
 getNonceKey,
 getOwner,
 getParentProcId,
 getProcedureMetrics,
 getProcId,
 getProcIdHashCode,
 getProcName,
 getResult,
 getRootProcedureId,
 getRootProcId,
 getStackIndexes,
 getState, getSubmittedTime,
 getTimeout,
 getTimeoutTimestamp,
 hasChildren,
 hasException,
 hasLock,
 hasOwner,
 hasParent,
 hasTimeout, haveSameParent,
 holdLock,
 incChildrenLatch,
 isFailed,
 isFinished,
 isInitializing,
 isRunnable,
 isSuccess,
 isWaiting,
 isYieldAfterExecutionStep,
 releaseLock,
 removeStackIndex,
 setAbortFailure,
 setChildrenLatch,
 setFailure,
 setFailure, setLastUpdate,
 setNonceKey,
 setOwner,
 setOwner,
 setParentProcId,
 setProcId,
 setResult,
 setRootProcId, setStackIndexes,
 setState,
 setSubmittedTime,
 setTimeout,
 setTimeoutFailure,
 shouldWaitClientAck,
 toString,
 toS
 tringClass, toStringClassDetails,
 toStringDetails,
 toStringSimpleSB,
 toStringState,
 tryRunnable,
 updateMetricsOnFinish,
 updateMetricsOnSubmit,
 updateTimestamp, wasExecuted
+acquireLock,
 addStackIndex,
 afterReplay,
 beforeReplay,
 compareTo,
 completionCleanup,
 doAcquireLock,
 doExecute, doReleaseLock,
 doRollback,
 elapsedTime,
 getChildrenLatch,
 getException,
 getLastUpdate,
 getNonceKey,
 getOwner,
 getParentProcId,
 getProcedureMetrics,
 getProcId,
 getProcIdHashCode,
 getProcName,
 getResult,
 getRootProcedureId,
 getRootProcId,
 getStackIndexes, getState,
 getSubmittedTime,
 getTimeout,
 getTimeoutTimestamp,
 hasChildren,
 hasException,
 hasLock,
 hasOwner,
 hasParent, hasTimeout,
 haveSameParent,
 holdLock,
 incChildrenLatch,
 isFailed,
 isFinished,
 isInitializing,
 isRunnable,
 isSuccess,
 isWaiting,
 isYieldAfterExecutionStep,
 lockedWhenLoading,
 releaseLock,
 removeStackIndex,
 restoreLock,
 setAbortFailure,
 setChildrenLatch, 
setFailure,
 setFailure,
 setLastUpdate,
 setNonceKey,
 setOwner,
 setOwner,
 setParent
 ProcId, setProcId,
 setResult,
 setRootProcId,
 setStackIndexes,
 setState,
 setSubmittedTime,
 setTimeout,
 setTimeoutFailu
 re, shouldWaitClientAck,
 toString,
 toStringClass,
 toStringClassDetails,
 toStringDetails,
 toStringSimpleSB,
 toStringState,
 tryRunnable,
 updateMetricsOnFinish,
 updateMetricsOnSubmit,
 updateTimestamp,
 waitInitialized,
 wasExecuted
 
 
 
@@ -430,7 +430,7 @@ extends 
 
 

[20/51] [partial] hbase-site git commit: Published site at b4759ce6e72f50ccd9d410bd5917dc5a515414f1.

2018-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.MoveCostFunction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.MoveCostFunction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.MoveCostFunction.html
index 233dba3..91b9055 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.MoveCostFunction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.MoveCostFunction.html
@@ -540,1205 +540,1204 @@
 532  
sm.getRegionMetrics().forEach((byte[] regionName, RegionMetrics rm) - {
 533DequeBalancerRegionLoad 
rLoads = oldLoads.get(Bytes.toString(regionName));
 534if (rLoads == null) {
-535  // There was nothing there
-536  rLoads = new 
ArrayDeque();
-537} else if (rLoads.size() = 
numRegionLoadsToRemember) {
-538  rLoads.remove();
-539}
-540rLoads.add(new 
BalancerRegionLoad(rm));
-541
loads.put(Bytes.toString(regionName), rLoads);
-542  });
-543});
-544
-545for(CostFromRegionLoadFunction cost : 
regionLoadFunctions) {
-546  cost.setLoads(loads);
-547}
-548  }
-549
-550  protected void initCosts(Cluster 
cluster) {
-551for (CostFunction c:costFunctions) 
{
-552  c.init(cluster);
-553}
-554  }
-555
-556  protected void 
updateCostsWithAction(Cluster cluster, Action action) {
-557for (CostFunction c : costFunctions) 
{
-558  c.postAction(action);
-559}
-560  }
-561
-562  /**
-563   * Get the names of the cost 
functions
-564   */
-565  public String[] getCostFunctionNames() 
{
-566if (costFunctions == null) return 
null;
-567String[] ret = new 
String[costFunctions.length];
-568for (int i = 0; i  
costFunctions.length; i++) {
-569  CostFunction c = 
costFunctions[i];
-570  ret[i] = 
c.getClass().getSimpleName();
-571}
-572
-573return ret;
-574  }
-575
-576  /**
-577   * This is the main cost function.  It 
will compute a cost associated with a proposed cluster
-578   * state.  All different costs will be 
combined with their multipliers to produce a double cost.
-579   *
-580   * @param cluster The state of the 
cluster
-581   * @param previousCost the previous 
cost. This is used as an early out.
-582   * @return a double of a cost 
associated with the proposed cluster state.  This cost is an
-583   * aggregate of all individual 
cost functions.
-584   */
-585  protected double computeCost(Cluster 
cluster, double previousCost) {
-586double total = 0;
-587
-588for (int i = 0; i  
costFunctions.length; i++) {
-589  CostFunction c = 
costFunctions[i];
-590  this.tempFunctionCosts[i] = 0.0;
-591
-592  if (c.getMultiplier() = 0) {
-593continue;
-594  }
-595
-596  Float multiplier = 
c.getMultiplier();
-597  Double cost = c.cost();
-598
-599  this.tempFunctionCosts[i] = 
multiplier*cost;
-600  total += 
this.tempFunctionCosts[i];
-601
-602  if (total  previousCost) {
-603break;
-604  }
-605}
-606
-607return total;
-608  }
-609
-610  /** Generates a candidate action to be 
applied to the cluster for cost function search */
-611  abstract static class 
CandidateGenerator {
-612abstract Cluster.Action 
generate(Cluster cluster);
-613
-614/**
-615 * From a list of regions pick a 
random one. Null can be returned which
-616 * {@link 
StochasticLoadBalancer#balanceCluster(Map)} recognize as signal to try a region 
move
-617 * rather than swap.
-618 *
-619 * @param clusterThe state of 
the cluster
-620 * @param server index of the 
server
-621 * @param chanceOfNoSwap Chance that 
this will decide to try a move rather
-622 *   than a 
swap.
-623 * @return a random {@link 
RegionInfo} or null if an asymmetrical move is
-624 * suggested.
-625 */
-626protected int 
pickRandomRegion(Cluster cluster, int server, double chanceOfNoSwap) {
-627  // Check to see if this is just a 
move.
-628  if 
(cluster.regionsPerServer[server].length == 0 || RANDOM.nextFloat()  
chanceOfNoSwap) {
-629// signal a move only.
-630return -1;
-631  }
-632  int rand = 
RANDOM.nextInt(cluster.regionsPerServer[server].length);
-633  return 
cluster.regionsPerServer[server][rand];
-634
-635}
-636protected int 
pickRandomServer(Cluster cluster) {
-637  if (cluster.numServers  1) {
-638return -1;
-639  }
-640
-641  return 
RANDOM.nextInt(cluster.numServers);
-642}
-643
-644protected int pickRandomRack(Cluster 
cluster) {
-645  if (cluster.numRacks  1) {
-646return -1;
-647  }
-648
-649  return 

[20/51] [partial] hbase-site git commit: Published site at e66a6603e36ecd67237ca16acd5e2de03f0d372d.

2018-07-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0c6f447e/apidocs/org/apache/hadoop/hbase/RetryImmediatelyException.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/RetryImmediatelyException.html 
b/apidocs/org/apache/hadoop/hbase/RetryImmediatelyException.html
index 08c03f4..36052a7 100644
--- a/apidocs/org/apache/hadoop/hbase/RetryImmediatelyException.html
+++ b/apidocs/org/apache/hadoop/hbase/RetryImmediatelyException.html
@@ -1,6 +1,6 @@
 http://www.w3.org/TR/html4/loose.dtd;>
 
-
+
 
 
 
@@ -20,38 +20,38 @@
 //-->
 
 
-您的浏览器已禁用 JavaScript。
+JavaScript is disabled on your browser.
 
 
 
 
 
-跳过导航链接
+Skip navigation links
 
 
 
-
-概览
-程序包
-ç±»
-使用
-树
-已过时
-索引
-帮助
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
 
 
 
 
-上一个类
-下一个类
+PrevClass
+NextClass
 
 
-框架
-无框架
+Frames
+NoFrames
 
 
-所有类
+AllClasses
 
 
 
 
org.apache.hadoop.hbase
-

ç±» RetryImmediatelyException

+

Class RetryImmediatelyException


[20/51] [partial] hbase-site git commit: Published site at e66a6603e36ecd67237ca16acd5e2de03f0d372d.

2018-07-19 Thread zhangduo
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5427a45e/apidocs/org/apache/hadoop/hbase/RegionMetrics.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/RegionMetrics.html 
b/apidocs/org/apache/hadoop/hbase/RegionMetrics.html
index c745b96..e4dc75c 100644
--- a/apidocs/org/apache/hadoop/hbase/RegionMetrics.html
+++ b/apidocs/org/apache/hadoop/hbase/RegionMetrics.html
@@ -1,6 +1,6 @@
 http://www.w3.org/TR/html4/loose.dtd;>
 
-
+
 
 
 
@@ -19,45 +19,45 @@
 }
 //-->
 var methods = 
{"i0":6,"i1":6,"i2":6,"i3":6,"i4":6,"i5":6,"i6":6,"i7":6,"i8":6,"i9":18,"i10":6,"i11":6,"i12":18,"i13":6,"i14":6,"i15":6,"i16":6,"i17":6,"i18":6,"i19":6,"i20":6,"i21":6};
-var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],4:["t3","Abstract Methods"],16:["t5","Default Methods"]};
+var tabs = 
{65535:["t0","所有方法"],2:["t2","实例方法"],4:["t3","抽象方法"],16:["t5","默认方法"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
 var tableTab = "tableTab";
 var activeTableTab = "activeTableTab";
 
 
-JavaScript is disabled on your browser.
+您的浏览器已禁用 JavaScript。
 
 
 
 
 
-Skip navigation links
+跳过导航链接
 
 
 
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
+
+概览
+程序包
+ç±»
+使用
+树
+已过时
+索引
+帮助
 
 
 
 
-PrevClass
-NextClass
+上一个类
+下一个类
 
 
-Frames
-NoFrames
+框架
+无框架
 
 
-AllClasses
+所有类
 
 
 
 
org.apache.hadoop.hbase
-

Interface RegionMetrics

+

接口 RegionMetrics

  • -
    All Known Implementing Classes:
    -
    RegionLoad
    +
    所有已知实现类:
    +
    RegionLoad


    @@ -119,15 +119,15 @@ public interface -

    Method Summary

    - - +

    方法概要

    +
    All Methods Instance Methods Abstract Methods Default Methods 
    + - - + + -
    所有方法 å®žä¾‹æ–¹æ³• æŠ½è±¡æ–¹æ³• é»˜è®¤æ–¹æ³• 
    Modifier and TypeMethod and Description限定符和类型方法和说明

    [20/51] [partial] hbase-site git commit: Published site at 0f23784182ab88649de340d75804e0ff20dcd0fc.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bcb555af/devapidocs/org/apache/hadoop/hbase/replication/regionserver/class-use/ReplicationStatus.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/replication/regionserver/class-use/ReplicationStatus.html
     
    b/devapidocs/org/apache/hadoop/hbase/replication/regionserver/class-use/ReplicationStatus.html
    new file mode 100644
    index 000..18dd8e4
    --- /dev/null
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/replication/regionserver/class-use/ReplicationStatus.html
    @@ -0,0 +1,234 @@
    +http://www.w3.org/TR/html4/loose.dtd;>
    +
    +
    +
    +
    +
    +Uses of Class 
    org.apache.hadoop.hbase.replication.regionserver.ReplicationStatus (Apache 
    HBase 3.0.0-SNAPSHOT API)
    +
    +
    +
    +
    +
    +
    +
    +JavaScript is disabled on your browser.
    +
    +
    +
    +
    +
    +Skip navigation links
    +
    +
    +
    +
    +Overview
    +Package
    +Class
    +Use
    +Tree
    +Deprecated
    +Index
    +Help
    +
    +
    +
    +
    +Prev
    +Next
    +
    +
    +Frames
    +NoFrames
    +
    +
    +AllClasses
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +Uses of 
    Classorg.apache.hadoop.hbase.replication.regionserver.ReplicationStatus
    +
    +
    +
    +
    +
    +Packages that use ReplicationStatus
    +
    +Package
    +Description
    +
    +
    +
    +org.apache.hadoop.hbase.regionserver
    +
    +
    +
    +org.apache.hadoop.hbase.replication.regionserver
    +
    +
    +
    +org.apache.hadoop.hbase.tmpl.regionserver
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +Uses of ReplicationStatus in org.apache.hadoop.hbase.regionserver
    +
    +Methods in org.apache.hadoop.hbase.regionserver
     that return types with arguments of type ReplicationStatus
    +
    +Modifier and Type
    +Method and Description
    +
    +
    +
    +https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
     title="class or interface in java.util">Maphttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String,ReplicationStatus
    +HRegionServer.getWalGroupsReplicationStatus()
    +
    +
    +
    +
    +
    +
    +
    +Uses of ReplicationStatus in org.apache.hadoop.hbase.replication.regionserver
    +
    +Methods in org.apache.hadoop.hbase.replication.regionserver
     that return ReplicationStatus
    +
    +Modifier and Type
    +Method and Description
    +
    +
    +
    +ReplicationStatus
    +ReplicationStatus.ReplicationStatusBuilder.build()
    +
    +
    +
    +
    +Methods in org.apache.hadoop.hbase.replication.regionserver
     that return types with arguments of type ReplicationStatus
    +
    +Modifier and Type
    +Method and Description
    +
    +
    +
    +default https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
     title="class or interface in java.util">Maphttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String,ReplicationStatus
    +ReplicationSourceInterface.getWalGroupStatus()
    +get the stat of replication for each wal group.
    +
    +
    +
    +https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
     title="class or interface in java.util">Maphttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String,ReplicationStatus
    +ReplicationSource.getWalGroupStatus()
    +
    +
    +
    +
    +
    +
    +
    +Uses of ReplicationStatus in org.apache.hadoop.hbase.tmpl.regionserver
    +
    +Method parameters in org.apache.hadoop.hbase.tmpl.regionserver
     with type arguments of type ReplicationStatus
    +
    +Modifier and Type
    +Method and Description
    +
    +
    +
    +private void
    +ReplicationStatusTmplImpl.__jamon_innerUnit__currentLog(https://docs.oracle.com/javase/8/docs/api/java/io/Writer.html?is-external=true;
     title="class or interface in java.io">WriterjamonWriter,
    + https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
     title="class or interface in java.util">Maphttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String,ReplicationStatusmetrics)
    +
    +
    +private void
    +ReplicationStatusTmplImpl.__jamon_innerUnit__replicationDelay(https://docs.oracle.com/javase/8/docs/api/java/io/Writer.html?is-external=true;
     title="class or interface in java.io">WriterjamonWriter,
    +   https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
     title="class or interface in java.util">Maphttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String,ReplicationStatusmetrics)
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +Skip navigation links
    +
    +
    +
    +
    +Overview
    +Package
    +Class
    +Use
    +Tree
    +Deprecated
    +Index
    +Help
    +
    +
    +
    +
    +Prev
    

    [20/51] [partial] hbase-site git commit: Published site at 85b41f36e01214b6485c9352875c84ebf877dab3.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a5c66de0/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.OperationStatusCode.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.OperationStatusCode.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.OperationStatusCode.html
    index 8ac7885..5454963 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.OperationStatusCode.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.OperationStatusCode.html
    @@ -538,835 +538,842 @@
     530  // Other constants
     531
     532  /**
    -533   * An empty instance.
    +533   * An empty byte array instance.
     534   */
     535  public static final byte [] 
    EMPTY_BYTE_ARRAY = new byte [0];
     536
    -537  public static final ByteBuffer 
    EMPTY_BYTE_BUFFER = ByteBuffer.wrap(EMPTY_BYTE_ARRAY);
    -538
    -539  /**
    -540   * Used by scanners, etc when they want 
    to start at the beginning of a region
    -541   */
    -542  public static final byte [] 
    EMPTY_START_ROW = EMPTY_BYTE_ARRAY;
    +537  /**
    +538   * An empty string instance.
    +539   */
    +540  public static final String EMPTY_STRING 
    = "";
    +541
    +542  public static final ByteBuffer 
    EMPTY_BYTE_BUFFER = ByteBuffer.wrap(EMPTY_BYTE_ARRAY);
     543
     544  /**
    -545   * Last row in a table.
    +545   * Used by scanners, etc when they want 
    to start at the beginning of a region
     546   */
    -547  public static final byte [] 
    EMPTY_END_ROW = EMPTY_START_ROW;
    +547  public static final byte [] 
    EMPTY_START_ROW = EMPTY_BYTE_ARRAY;
     548
     549  /**
    -550* Used by scanners and others when 
    they're trying to detect the end of a
    -551* table
    -552*/
    -553  public static final byte [] LAST_ROW = 
    EMPTY_BYTE_ARRAY;
    -554
    -555  /**
    -556   * Max length a row can have because of 
    the limitation in TFile.
    -557   */
    -558  public static final int MAX_ROW_LENGTH 
    = Short.MAX_VALUE;
    +550   * Last row in a table.
    +551   */
    +552  public static final byte [] 
    EMPTY_END_ROW = EMPTY_START_ROW;
    +553
    +554  /**
    +555* Used by scanners and others when 
    they're trying to detect the end of a
    +556* table
    +557*/
    +558  public static final byte [] LAST_ROW = 
    EMPTY_BYTE_ARRAY;
     559
     560  /**
    -561   * Timestamp to use when we want to 
    refer to the latest cell.
    -562   *
    -563   * On client side, this is the 
    timestamp set by default when no timestamp is specified,
    -564   * to refer to the latest.
    -565   * On server side, this acts as a 
    notation.
    -566   * (1) For a cell of Put, which has 
    this notation,
    -567   * its timestamp will be replaced 
    with server's current time.
    -568   * (2) For a cell of Delete, which has 
    this notation,
    -569   * A. If the cell is of {@link 
    KeyValue.Type#Delete}, HBase issues a Get operation firstly.
    -570   *a. When the count of cell it 
    gets is less than the count of cell to delete,
    -571   *   the timestamp of Delete 
    cell will be replaced with server's current time.
    -572   *b. When the count of cell it 
    gets is equal to the count of cell to delete,
    -573   *   the timestamp of Delete 
    cell will be replaced with the latest timestamp of cell it
    -574   *   gets.
    -575   *   (c. It is invalid and an 
    exception will be thrown,
    -576   *   if the count of cell it 
    gets is greater than the count of cell to delete,
    -577   *   as the max version of Get 
    is set to the count of cell to delete.)
    -578   * B. If the cell is of other 
    Delete types, like {@link KeyValue.Type#DeleteFamilyVersion},
    -579   *{@link 
    KeyValue.Type#DeleteColumn}, or {@link KeyValue.Type#DeleteFamily},
    -580   *the timestamp of Delete cell 
    will be replaced with server's current time.
    -581   *
    -582   * So that is why it is named as 
    "latest" but assigned as the max value of Long.
    -583   */
    -584  public static final long 
    LATEST_TIMESTAMP = Long.MAX_VALUE;
    -585
    -586  /**
    -587   * Timestamp to use when we want to 
    refer to the oldest cell.
    -588   * Special! Used in fake Cells only. 
    Should never be the timestamp on an actual Cell returned to
    -589   * a client.
    -590   * @deprecated Should not be public 
    since hbase-1.3.0. For internal use only. Move internal to
    -591   *   Scanners flagged as special 
    timestamp value never to be returned as timestamp on a Cell.
    -592   */
    -593  @Deprecated
    -594  public static final long 
    OLDEST_TIMESTAMP = Long.MIN_VALUE;
    -595
    -596  /**
    -597   * LATEST_TIMESTAMP in bytes form
    -598   */
    -599  public static final byte [] 
    LATEST_TIMESTAMP_BYTES = {
    -600// big-endian
    -601(byte) (LATEST_TIMESTAMP  
    56),
    -602(byte) (LATEST_TIMESTAMP  
    48),
    -603(byte) (LATEST_TIMESTAMP  
    40),
    -604(byte) (LATEST_TIMESTAMP  
    32),
    -605(byte) (LATEST_TIMESTAMP  
    24),
    -606(byte) (LATEST_TIMESTAMP  
    16),
    -607(byte) (LATEST_TIMESTAMP  
    8),
    -608(byte) LATEST_TIMESTAMP,
    -609  };
    -610
    -611  /**
    -612   * Define for 'return-all-versions'.
    -613   */
    -614  public static final int ALL_VERSIONS = 
    

    [20/51] [partial] hbase-site git commit: Published site at 6198e1fc7dfa85c3bc6b2855f9a5fb5f4b2354ff.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/eb5d2c62/devapidocs/org/apache/hadoop/hbase/coprocessor/MasterObserver.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/coprocessor/MasterObserver.html 
    b/devapidocs/org/apache/hadoop/hbase/coprocessor/MasterObserver.html
    index b8d9dfb..f8cddb1 100644
    --- a/devapidocs/org/apache/hadoop/hbase/coprocessor/MasterObserver.html
    +++ b/devapidocs/org/apache/hadoop/hbase/coprocessor/MasterObserver.html
    @@ -18,7 +18,7 @@
     catch(err) {
     }
     //-->
    -var methods = 
    {"i0":18,"i1":18,"i2":18,"i3":18,"i4":18,"i5":18,"i6":18,"i7":18,"i8":18,"i9":18,"i10":18,"i11":18,"i12":18,"i13":18,"i14":50,"i15":18,"i16":18,"i17":18,"i18":18,"i19":18,"i20":18,"i21":18,"i22":18,"i23":18,"i24":18,"i25":18,"i26":18,"i27":18,"i28":18,"i29":18,"i30":18,"i31":18,"i32":18,"i33":18,"i34":18,"i35":18,"i36":18,"i37":18,"i38":18,"i39":18,"i40":18,"i41":18,"i42":18,"i43":18,"i44":18,"i45":18,"i46":50,"i47":18,"i48":50,"i49":18,"i50":18,"i51":18,"i52":18,"i53":18,"i54":18,"i55":18,"i56":18,"i57":18,"i58":18,"i59":18,"i60":18,"i61":18,"i62":18,"i63":18,"i64":18,"i65":18,"i66":18,"i67":18,"i68":18,"i69":18,"i70":18,"i71":18,"i72":18,"i73":18,"i74":18,"i75":18,"i76":18,"i77":18,"i78":18,"i79":18,"i80":18,"i81":18,"i82":18,"i83":18,"i84":18,"i85":18,"i86":18,"i87":18,"i88":18,"i89":18,"i90":18,"i91":18,"i92":18,"i93":18,"i94":18,"i95":18,"i96":18,"i97":18,"i98":18,"i99":18,"i100":18,"i101":18,"i102":18,"i103":18,"i104":18,"i105":18,"i106":18,"i107":18,"i108":18,"i
     
    109":18,"i110":18,"i111":18,"i112":18,"i113":18,"i114":18,"i115":18,"i116":18,"i117":18,"i118":50,"i119":18,"i120":50,"i121":18,"i122":50,"i123":18,"i124":18,"i125":18,"i126":18,"i127":18,"i128":18,"i129":18,"i130":18,"i131":18,"i132":18,"i133":18,"i134":18,"i135":18,"i136":18,"i137":18,"i138":18,"i139":18,"i140":18,"i141":18,"i142":18,"i143":18,"i144":18,"i145":18,"i146":18,"i147":18,"i148":18,"i149":18,"i150":18,"i151":18,"i152":18};
    +var methods = 
    {"i0":18,"i1":18,"i2":18,"i3":18,"i4":18,"i5":18,"i6":18,"i7":18,"i8":18,"i9":18,"i10":18,"i11":18,"i12":18,"i13":18,"i14":50,"i15":18,"i16":18,"i17":18,"i18":18,"i19":18,"i20":18,"i21":18,"i22":18,"i23":18,"i24":18,"i25":18,"i26":18,"i27":18,"i28":18,"i29":18,"i30":18,"i31":18,"i32":18,"i33":18,"i34":18,"i35":18,"i36":18,"i37":18,"i38":18,"i39":18,"i40":18,"i41":18,"i42":18,"i43":18,"i44":18,"i45":18,"i46":50,"i47":18,"i48":50,"i49":18,"i50":18,"i51":18,"i52":18,"i53":18,"i54":18,"i55":18,"i56":18,"i57":18,"i58":18,"i59":18,"i60":18,"i61":18,"i62":18,"i63":18,"i64":18,"i65":18,"i66":18,"i67":18,"i68":18,"i69":18,"i70":18,"i71":18,"i72":18,"i73":18,"i74":18,"i75":18,"i76":18,"i77":18,"i78":18,"i79":18,"i80":18,"i81":18,"i82":18,"i83":18,"i84":18,"i85":18,"i86":18,"i87":18,"i88":18,"i89":18,"i90":18,"i91":18,"i92":18,"i93":18,"i94":18,"i95":18,"i96":18,"i97":18,"i98":18,"i99":18,"i100":18,"i101":18,"i102":18,"i103":18,"i104":18,"i105":18,"i106":18,"i107":18,"i108":18,"i
     
    109":18,"i110":18,"i111":18,"i112":18,"i113":18,"i114":18,"i115":18,"i116":18,"i117":18,"i118":18,"i119":50,"i120":18,"i121":50,"i122":18,"i123":50,"i124":18,"i125":18,"i126":18,"i127":18,"i128":18,"i129":18,"i130":18,"i131":18,"i132":18,"i133":18,"i134":18,"i135":18,"i136":18,"i137":18,"i138":18,"i139":18,"i140":18,"i141":18,"i142":18,"i143":18,"i144":18,"i145":18,"i146":18,"i147":18,"i148":18,"i149":18,"i150":18,"i151":18,"i152":18,"i153":18,"i154":18};
     var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
    Methods"],16:["t5","Default Methods"],32:["t6","Deprecated Methods"]};
     var altColor = "altColor";
     var rowColor = "rowColor";
    @@ -107,7 +107,7 @@ var activeTableTab = "activeTableTab";
     
     @InterfaceAudience.LimitedPrivate(value="Coprocesssor")
      @InterfaceStability.Evolving
    -public interface MasterObserver
    +public interface MasterObserver
     Defines coprocessor hooks for interacting with operations 
    on the
      HMaster 
    process.
      
    @@ -698,12 +698,21 @@ public interface 
     default void
    +postTransitReplicationPeerSyncReplicationState(ObserverContextMasterCoprocessorEnvironmentctx,
    +  https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringpeerId,
    +  SyncReplicationStatefrom,
    +  SyncReplicationStateto)
    +Called after transit current cluster state for the 
    specified synchronous replication peer
    +
    +
    +
    +default void
     postTruncateTable(ObserverContextMasterCoprocessorEnvironmentctx,
      TableNametableName)
     Called after the truncateTable operation has been 
    requested.
     
     
    -
    +
     default void
     postUnassign(ObserverContextMasterCoprocessorEnvironmentctx,
     RegionInforegionInfo,
    @@ -711,7 +720,7 @@ public interface Called after the region unassignment has been 
    requested.
     
     
    -
    +
     default void
     

    [20/51] [partial] hbase-site git commit: Published site at 14087cc919da9f2e0b1a68f701f6365ad9d1d71f.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/55ce8d97/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatchOperation.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatchOperation.html
     
    b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatchOperation.html
    index 280dacf..02afc83 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatchOperation.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatchOperation.html
    @@ -118,7 +118,7 @@ var activeTableTab = "activeTableTab";
     
     
     
    -static class HRegion.MutationBatchOperation
    +static class HRegion.MutationBatchOperation
     extends HRegion.BatchOperationMutation
     Batch of mutation operations. Base class is shared with HRegion.ReplayBatchOperation
     as most
      of the logic is same.
    @@ -342,7 +342,7 @@ extends 
     
     nonceGroup
    -privatelong nonceGroup
    +privatelong nonceGroup
     
     
     
    @@ -351,7 +351,7 @@ extends 
     
     nonce
    -privatelong nonce
    +privatelong nonce
     
     
     
    @@ -368,7 +368,7 @@ extends 
     
     MutationBatchOperation
    -publicMutationBatchOperation(HRegionregion,
    +publicMutationBatchOperation(HRegionregion,
       Mutation[]operations,
       booleanatomic,
       longnonceGroup,
    @@ -389,7 +389,7 @@ extends 
     
     getMutation
    -publicMutationgetMutation(intindex)
    +publicMutationgetMutation(intindex)
     
     Specified by:
     getMutationin
     classHRegion.BatchOperationMutation
    @@ -402,7 +402,7 @@ extends 
     
     getNonceGroup
    -publiclonggetNonceGroup(intindex)
    +publiclonggetNonceGroup(intindex)
     
     Specified by:
     getNonceGroupin
     classHRegion.BatchOperationMutation
    @@ -415,7 +415,7 @@ extends 
     
     getNonce
    -publiclonggetNonce(intindex)
    +publiclonggetNonce(intindex)
     
     Specified by:
     getNoncein
     classHRegion.BatchOperationMutation
    @@ -428,7 +428,7 @@ extends 
     
     getMutationsForCoprocs
    -publicMutation[]getMutationsForCoprocs()
    +publicMutation[]getMutationsForCoprocs()
     Description copied from 
    class:HRegion.BatchOperation
     This method is potentially expensive and useful mostly for 
    non-replay CP path.
     
    @@ -443,7 +443,7 @@ extends 
     
     isInReplay
    -publicbooleanisInReplay()
    +publicbooleanisInReplay()
     
     Specified by:
     isInReplayin
     classHRegion.BatchOperationMutation
    @@ -456,7 +456,7 @@ extends 
     
     getOrigLogSeqNum
    -publiclonggetOrigLogSeqNum()
    +publiclonggetOrigLogSeqNum()
     
     Specified by:
     getOrigLogSeqNumin
     classHRegion.BatchOperationMutation
    @@ -469,7 +469,7 @@ extends 
     
     startRegionOperation
    -publicvoidstartRegionOperation()
    +publicvoidstartRegionOperation()
       throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException
     
     Specified by:
    @@ -485,7 +485,7 @@ extends 
     
     closeRegionOperation
    -publicvoidcloseRegionOperation()
    +publicvoidcloseRegionOperation()
       throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException
     
     Specified by:
    @@ -501,7 +501,7 @@ extends 
     
     checkAndPreparePut
    -publicvoidcheckAndPreparePut(Putp)
    +publicvoidcheckAndPreparePut(Putp)
     throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException
     Description copied from 
    class:HRegion.BatchOperation
     Implement any Put request specific check and prepare logic 
    here. Please refer to
    @@ -520,7 +520,7 @@ extends 
     
     checkAndPrepare
    -publicvoidcheckAndPrepare()
    +publicvoidcheckAndPrepare()
      throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException
     Description copied from 
    class:HRegion.BatchOperation
     Validates each mutation and prepares a batch for write. If 
    necessary (non-replay case), runs
    @@ -542,7 +542,7 @@ extends 
     
     prepareMiniBatchOperations
    -publicvoidprepareMiniBatchOperations(MiniBatchOperationInProgressMutationminiBatchOp,
    +publicvoidprepareMiniBatchOperations(MiniBatchOperationInProgressMutationminiBatchOp,
    longtimestamp,
    https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListRegion.RowLockacquiredRowLocks)
     throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException
    @@ -563,7 +563,7 @@ extends 
     
     buildWALEdits
    -publichttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListPairNonceKey,WALEditbuildWALEdits(MiniBatchOperationInProgressMutation
     ;miniBatchOp)
    

    [20/51] [partial] hbase-site git commit: Published site at 72784c2d836a4b977667449d3adec5e8d15453f5.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2b11656f/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.html
    index b6e7636..592c2cc 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.html
    @@ -356,3901 +356,3924 @@
     348  public FutureVoid 
    modifyTableAsync(TableDescriptor td) throws IOException {
     349ModifyTableResponse response = 
    executeCallable(
     350  new 
    MasterCallableModifyTableResponse(getConnection(), 
    getRpcControllerFactory()) {
    -351@Override
    -352protected ModifyTableResponse 
    rpcCall() throws Exception {
    -353  
    setPriority(td.getTableName());
    -354  ModifyTableRequest request = 
    RequestConverter.buildModifyTableRequest(
    -355td.getTableName(), td, 
    ng.getNonceGroup(), ng.newNonce());
    -356  return 
    master.modifyTable(getRpcController(), request);
    -357}
    -358  });
    -359return new ModifyTableFuture(this, 
    td.getTableName(), response);
    -360  }
    -361
    -362  @Override
    -363  public ListTableDescriptor 
    listTableDescriptorsByNamespace(byte[] name) throws IOException {
    -364return executeCallable(new 
    MasterCallableListTableDescriptor(getConnection(),
    -365getRpcControllerFactory()) {
    -366  @Override
    -367  protected 
    ListTableDescriptor rpcCall() throws Exception {
    -368return 
    master.listTableDescriptorsByNamespace(getRpcController(),
    -369
    ListTableDescriptorsByNamespaceRequest.newBuilder()
    -370  
    .setNamespaceName(Bytes.toString(name)).build())
    -371.getTableSchemaList()
    -372.stream()
    -373
    .map(ProtobufUtil::toTableDescriptor)
    -374
    .collect(Collectors.toList());
    -375  }
    -376});
    -377  }
    -378
    -379  @Override
    -380  public ListTableDescriptor 
    listTableDescriptors(ListTableName tableNames) throws IOException {
    -381return executeCallable(new 
    MasterCallableListTableDescriptor(getConnection(),
    -382getRpcControllerFactory()) {
    -383  @Override
    -384  protected 
    ListTableDescriptor rpcCall() throws Exception {
    -385GetTableDescriptorsRequest req 
    =
    -386
    RequestConverter.buildGetTableDescriptorsRequest(tableNames);
    -387  return 
    ProtobufUtil.toTableDescriptorList(master.getTableDescriptors(getRpcController(),
    -388  req));
    -389  }
    -390});
    -391  }
    -392
    -393  @Override
    -394  public ListRegionInfo 
    getRegions(final ServerName sn) throws IOException {
    -395AdminService.BlockingInterface admin 
    = this.connection.getAdmin(sn);
    -396// TODO: There is no timeout on this 
    controller. Set one!
    -397HBaseRpcController controller = 
    rpcControllerFactory.newController();
    -398return 
    ProtobufUtil.getOnlineRegions(controller, admin);
    -399  }
    -400
    -401  @Override
    -402  public ListRegionInfo 
    getRegions(TableName tableName) throws IOException {
    -403if 
    (TableName.isMetaTableName(tableName)) {
    -404  return 
    Arrays.asList(RegionInfoBuilder.FIRST_META_REGIONINFO);
    -405} else {
    -406  return 
    MetaTableAccessor.getTableRegions(connection, tableName, true);
    -407}
    -408  }
    -409
    -410  private static class 
    AbortProcedureFuture extends ProcedureFutureBoolean {
    -411private boolean isAbortInProgress;
    -412
    -413public AbortProcedureFuture(
    -414final HBaseAdmin admin,
    -415final Long procId,
    -416final Boolean abortProcResponse) 
    {
    -417  super(admin, procId);
    -418  this.isAbortInProgress = 
    abortProcResponse;
    -419}
    -420
    -421@Override
    -422public Boolean get(long timeout, 
    TimeUnit unit)
    -423throws InterruptedException, 
    ExecutionException, TimeoutException {
    -424  if (!this.isAbortInProgress) {
    -425return false;
    -426  }
    -427  super.get(timeout, unit);
    -428  return true;
    -429}
    -430  }
    -431
    -432  /** @return Connection used by this 
    object. */
    -433  @Override
    -434  public Connection getConnection() {
    -435return connection;
    -436  }
    -437
    -438  @Override
    -439  public boolean tableExists(final 
    TableName tableName) throws IOException {
    -440return executeCallable(new 
    RpcRetryingCallableBoolean() {
    -441  @Override
    -442  protected Boolean rpcCall(int 
    callTimeout) throws Exception {
    -443return 
    MetaTableAccessor.tableExists(connection, tableName);
    -444  }
    -445});
    -446  }
    -447
    -448  @Override
    -449  public HTableDescriptor[] listTables() 
    throws IOException {
    -450return listTables((Pattern)null, 
    false);
    -451  }
    -452
    -453  @Override
    -454  public HTableDescriptor[] 
    listTables(Pattern pattern) throws IOException {
    -455return 

    [20/51] [partial] hbase-site git commit: Published site at 9101fc246f86445006bfbcdfda5cc495016dc280.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/65565d77/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.QueryType.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.QueryType.html 
    b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.QueryType.html
    index fea2b5a..c7a6cc4 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.QueryType.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.QueryType.html
    @@ -1354,816 +1354,824 @@
     1346   */
     1347  public static void 
    putsToMetaTable(final Connection connection, final ListPut ps)
     1348  throws IOException {
    -1349try (Table t = 
    getMetaHTable(connection)) {
    -1350  debugLogMutations(ps);
    -1351  t.put(ps);
    -1352}
    -1353  }
    -1354
    -1355  /**
    -1356   * Delete the passed 
    coded/code from the codehbase:meta/code 
    table.
    -1357   * @param connection connection we're 
    using
    -1358   * @param d Delete to add to 
    hbase:meta
    -1359   */
    -1360  private static void 
    deleteFromMetaTable(final Connection connection, final Delete d)
    -1361  throws IOException {
    -1362ListDelete dels = new 
    ArrayList(1);
    -1363dels.add(d);
    -1364deleteFromMetaTable(connection, 
    dels);
    -1365  }
    -1366
    -1367  /**
    -1368   * Delete the passed 
    codedeletes/code from the codehbase:meta/code 
    table.
    -1369   * @param connection connection we're 
    using
    -1370   * @param deletes Deletes to add to 
    hbase:meta  This list should support #remove.
    -1371   */
    -1372  private static void 
    deleteFromMetaTable(final Connection connection, final ListDelete 
    deletes)
    -1373  throws IOException {
    -1374try (Table t = 
    getMetaHTable(connection)) {
    -1375  debugLogMutations(deletes);
    -1376  t.delete(deletes);
    -1377}
    -1378  }
    -1379
    -1380  /**
    -1381   * Deletes some replica columns 
    corresponding to replicas for the passed rows
    -1382   * @param metaRows rows in 
    hbase:meta
    -1383   * @param replicaIndexToDeleteFrom the 
    replica ID we would start deleting from
    -1384   * @param numReplicasToRemove how many 
    replicas to remove
    -1385   * @param connection connection we're 
    using to access meta table
    -1386   */
    -1387  public static void 
    removeRegionReplicasFromMeta(Setbyte[] metaRows,
    -1388int replicaIndexToDeleteFrom, int 
    numReplicasToRemove, Connection connection)
    -1389  throws IOException {
    -1390int absoluteIndex = 
    replicaIndexToDeleteFrom + numReplicasToRemove;
    -1391for (byte[] row : metaRows) {
    -1392  long now = 
    EnvironmentEdgeManager.currentTime();
    -1393  Delete deleteReplicaLocations = 
    new Delete(row);
    -1394  for (int i = 
    replicaIndexToDeleteFrom; i  absoluteIndex; i++) {
    -1395
    deleteReplicaLocations.addColumns(getCatalogFamily(),
    -1396  getServerColumn(i), now);
    -1397
    deleteReplicaLocations.addColumns(getCatalogFamily(),
    -1398  getSeqNumColumn(i), now);
    -1399
    deleteReplicaLocations.addColumns(getCatalogFamily(),
    -1400  getStartCodeColumn(i), now);
    -1401  }
    -1402  deleteFromMetaTable(connection, 
    deleteReplicaLocations);
    -1403}
    -1404  }
    -1405
    -1406  /**
    -1407   * Execute the passed 
    codemutations/code against codehbase:meta/code 
    table.
    -1408   * @param connection connection we're 
    using
    -1409   * @param mutations Puts and Deletes 
    to execute on hbase:meta
    -1410   * @throws IOException
    -1411   */
    -1412  public static void 
    mutateMetaTable(final Connection connection,
    -1413 
    final ListMutation mutations)
    -1414throws IOException {
    -1415Table t = 
    getMetaHTable(connection);
    -1416try {
    -1417  debugLogMutations(mutations);
    -1418  t.batch(mutations, null);
    -1419} catch (InterruptedException e) {
    -1420  InterruptedIOException ie = new 
    InterruptedIOException(e.getMessage());
    -1421  ie.initCause(e);
    -1422  throw ie;
    -1423} finally {
    -1424  t.close();
    -1425}
    -1426  }
    -1427
    -1428  private static void 
    addRegionStateToPut(Put put, RegionState.State state) throws IOException {
    -1429
    put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY)
    -1430.setRow(put.getRow())
    -1431
    .setFamily(HConstants.CATALOG_FAMILY)
    -1432
    .setQualifier(getRegionStateColumn())
    -1433
    .setTimestamp(put.getTimestamp())
    -1434.setType(Cell.Type.Put)
    -1435
    .setValue(Bytes.toBytes(state.name()))
    -1436.build());
    -1437  }
    -1438
    -1439  /**
    -1440   * Adds daughter region infos to 
    hbase:meta row for the specified region. Note that this does not
    -1441   * add its daughter's as different 
    rows, but adds information about the daughters in the same row
    -1442   * as the parent. Use
    -1443   * {@link #splitRegion(Connection, 
    RegionInfo, long, RegionInfo, RegionInfo, ServerName, int)}
    -1444   * if you want to do that.
    -1445   * @param connection connection we're 
    using
    -1446   * @param regionInfo RegionInfo of 
    parent 

    [20/51] [partial] hbase-site git commit: Published site at 0b28155d274910b4e667b949d51f78809a1eff0b.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e11cf2cb/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
    --
    diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html 
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
    index 42d0637..eb16038 100644
    --- a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
    +++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
    @@ -80,21 +80,21 @@
     072import 
    org.apache.hadoop.hbase.PleaseHoldException;
     073import 
    org.apache.hadoop.hbase.ReplicationPeerNotFoundException;
     074import 
    org.apache.hadoop.hbase.ScheduledChore;
    -075import 
    org.apache.hadoop.hbase.ServerMetricsBuilder;
    -076import 
    org.apache.hadoop.hbase.ServerName;
    -077import 
    org.apache.hadoop.hbase.TableDescriptors;
    -078import 
    org.apache.hadoop.hbase.TableName;
    -079import 
    org.apache.hadoop.hbase.TableNotDisabledException;
    -080import 
    org.apache.hadoop.hbase.TableNotFoundException;
    -081import 
    org.apache.hadoop.hbase.UnknownRegionException;
    -082import 
    org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
    -083import 
    org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
    -084import 
    org.apache.hadoop.hbase.client.MasterSwitchType;
    -085import 
    org.apache.hadoop.hbase.client.RegionInfo;
    -086import 
    org.apache.hadoop.hbase.client.Result;
    -087import 
    org.apache.hadoop.hbase.client.TableDescriptor;
    -088import 
    org.apache.hadoop.hbase.client.TableDescriptorBuilder;
    -089import 
    org.apache.hadoop.hbase.client.TableState;
    +075import 
    org.apache.hadoop.hbase.ServerName;
    +076import 
    org.apache.hadoop.hbase.TableDescriptors;
    +077import 
    org.apache.hadoop.hbase.TableName;
    +078import 
    org.apache.hadoop.hbase.TableNotDisabledException;
    +079import 
    org.apache.hadoop.hbase.TableNotFoundException;
    +080import 
    org.apache.hadoop.hbase.UnknownRegionException;
    +081import 
    org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
    +082import 
    org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
    +083import 
    org.apache.hadoop.hbase.client.MasterSwitchType;
    +084import 
    org.apache.hadoop.hbase.client.RegionInfo;
    +085import 
    org.apache.hadoop.hbase.client.Result;
    +086import 
    org.apache.hadoop.hbase.client.TableDescriptor;
    +087import 
    org.apache.hadoop.hbase.client.TableDescriptorBuilder;
    +088import 
    org.apache.hadoop.hbase.client.TableState;
    +089import 
    org.apache.hadoop.hbase.client.VersionInfoUtil;
     090import 
    org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
     091import 
    org.apache.hadoop.hbase.exceptions.DeserializationException;
     092import 
    org.apache.hadoop.hbase.exceptions.MergeRegionException;
    @@ -220,3477 +220,3481 @@
     212
     213import 
    org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
     214import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState;
    -215import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionServerInfo;
    -216import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;
    -217import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceViolationPolicy;
    -218import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
    -219import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos;
    -220
    -221/**
    -222 * HMaster is the "master server" for 
    HBase. An HBase cluster has one active
    -223 * master.  If many masters are started, 
    all compete.  Whichever wins goes on to
    -224 * run the cluster.  All others park 
    themselves in their constructor until
    -225 * master or cluster shutdown or until 
    the active master loses its lease in
    -226 * zookeeper.  Thereafter, all running 
    master jostle to take over master role.
    -227 *
    -228 * pThe Master can be asked 
    shutdown the cluster. See {@link #shutdown()}.  In
    -229 * this case it will tell all 
    regionservers to go down and then wait on them
    -230 * all reporting in that they are down.  
    This master will then shut itself down.
    -231 *
    -232 * pYou can also shutdown just 
    this master.  Call {@link #stopMaster()}.
    -233 *
    -234 * @see org.apache.zookeeper.Watcher
    -235 */
    -236@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
    -237@SuppressWarnings("deprecation")
    -238public class HMaster extends 
    HRegionServer implements MasterServices {
    -239  private static Logger LOG = 
    LoggerFactory.getLogger(HMaster.class.getName());
    -240
    -241  /**
    -242   * Protection against zombie master. 
    Started once Master accepts active responsibility and
    -243   * starts taking over responsibilities. 
    Allows a finite time window before giving up ownership.
    -244   */
    -245  private static class 
    InitializationMonitor extends HasThread {
    -246/** The amount of time in 
    milliseconds to sleep before checking initialization status. */
    -247public static final String 
    TIMEOUT_KEY = "hbase.master.initializationmonitor.timeout";
    -248public static final long 
    TIMEOUT_DEFAULT = 

    [20/51] [partial] hbase-site git commit: Published site at 7d3750bd9fc9747623549c242cc4171e224b3eaf.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/FailedRemoteDispatchException.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/FailedRemoteDispatchException.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/FailedRemoteDispatchException.html
    deleted file mode 100644
    index 58257aa..000
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/FailedRemoteDispatchException.html
    +++ /dev/null
    @@ -1,105 +0,0 @@
    -http://www.w3.org/TR/html4/loose.dtd;>
    -
    -
    -Source code
    -
    -
    -
    -
    -001/**
    -002 * Licensed to the Apache Software 
    Foundation (ASF) under one
    -003 * or more contributor license 
    agreements.  See the NOTICE file
    -004 * distributed with this work for 
    additional information
    -005 * regarding copyright ownership.  The 
    ASF licenses this file
    -006 * to you under the Apache License, 
    Version 2.0 (the
    -007 * "License"); you may not use this file 
    except in compliance
    -008 * with the License.  You may obtain a 
    copy of the License at
    -009 *
    -010 * 
    http://www.apache.org/licenses/LICENSE-2.0
    -011 *
    -012 * Unless required by applicable law or 
    agreed to in writing, software
    -013 * distributed under the License is 
    distributed on an "AS IS" BASIS,
    -014 * WITHOUT WARRANTIES OR CONDITIONS OF 
    ANY KIND, either express or implied.
    -015 * See the License for the specific 
    language governing permissions and
    -016 * limitations under the License.
    -017 */
    -018package 
    org.apache.hadoop.hbase.master.assignment;
    -019
    -020import 
    org.apache.hadoop.hbase.HBaseIOException;
    -021import 
    org.apache.yetus.audience.InterfaceAudience;
    -022
    -023/**
    -024 * Used internally signaling failed queue 
    of a remote procedure
    -025 * operation.
    -026 */
    -027@SuppressWarnings("serial")
    -028@InterfaceAudience.Private
    -029public class 
    FailedRemoteDispatchException extends HBaseIOException {
    -030  public 
    FailedRemoteDispatchException(String msg) {
    -031super(msg);
    -032  }
    -033}
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/MoveRegionProcedure.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/MoveRegionProcedure.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/MoveRegionProcedure.html
    index 6b7e383..09fe96e 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/MoveRegionProcedure.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/MoveRegionProcedure.html
    @@ -87,99 +87,102 @@
     079try {
     080  preflightChecks(env, true);
     081  checkOnline(env, 
    this.plan.getRegionInfo());
    -082} catch (HBaseIOException e) {
    -083  LOG.warn(this.toString() + " 
    FAILED because " + e.toString());
    -084  return Flow.NO_MORE_STATE;
    -085}
    -086break;
    -087  case MOVE_REGION_UNASSIGN:
    -088addChildProcedure(new 
    UnassignProcedure(plan.getRegionInfo(), plan.getSource(),
    -089plan.getDestination(), 
    true));
    -090
    setNextState(MoveRegionState.MOVE_REGION_ASSIGN);
    -091break;
    -092  case MOVE_REGION_ASSIGN:
    -093AssignProcedure assignProcedure = 
    plan.getDestination() == null ?
    -094new 
    AssignProcedure(plan.getRegionInfo()):
    -095new 
    AssignProcedure(plan.getRegionInfo(), plan.getDestination());
    -096
    addChildProcedure(assignProcedure);
    -097return Flow.NO_MORE_STATE;
    -098  default:
    -099throw new 
    UnsupportedOperationException("unhandled state=" + state);
    -100}
    -101return Flow.HAS_MORE_STATE;
    -102  }
    -103
    -104  @Override
    -105  protected void rollbackState(final 
    MasterProcedureEnv env, final MoveRegionState state)
    -106  throws IOException {
    -107// no-op
    -108  }
    -109
    -110  @Override
    -111  public boolean abort(final 
    MasterProcedureEnv env) {
    -112return false;
    -113  }
    -114
    -115  @Override
    -116  public void toStringClassDetails(final 
    StringBuilder sb) {
    -117
    sb.append(getClass().getSimpleName());
    -118sb.append(" ");
    -119sb.append(plan);
    -120  }
    -121
    -122  @Override
    -123  protected MoveRegionState 
    getInitialState() {
    -124return 
    MoveRegionState.MOVE_REGION_UNASSIGN;
    -125  }
    -126
    -127  @Override
    -128  protected int getStateId(final 
    MoveRegionState state) {
    -129return state.getNumber();
    -130  }
    -131
    -132  @Override
    -133  protected MoveRegionState 
    getState(final int stateId) {
    -134return 
    MoveRegionState.valueOf(stateId);
    -135  }
    -136
    -137  @Override
    -138  public TableName getTableName() {
    -139return 
    plan.getRegionInfo().getTable();
    -140  }
    -141
    -142  @Override
    -143  public TableOperationType 
    

    [20/51] [partial] hbase-site git commit: Published site at 997747076d8ec0b4346d7cb99c4b0667a7c14905.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4df09ed9/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
    index 3da432b..d30fa8f 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
    @@ -928,7690 +928,7698 @@
     920  CollectionHStore stores = 
    this.stores.values();
     921  try {
     922// update the stores that we are 
    replaying
    -923
    stores.forEach(HStore::startReplayingFromWAL);
    -924// Recover any edits if 
    available.
    -925maxSeqId = Math.max(maxSeqId,
    -926  
    replayRecoveredEditsIfAny(this.fs.getRegionDir(), maxSeqIdInStores, reporter, 
    status));
    -927// Make sure mvcc is up to max.
    -928this.mvcc.advanceTo(maxSeqId);
    -929  } finally {
    -930// update the stores that we are 
    done replaying
    -931
    stores.forEach(HStore::stopReplayingFromWAL);
    -932  }
    -933}
    -934this.lastReplayedOpenRegionSeqId = 
    maxSeqId;
    -935
    -936
    this.writestate.setReadOnly(ServerRegionReplicaUtil.isReadOnly(this));
    -937this.writestate.flushRequested = 
    false;
    -938this.writestate.compacting.set(0);
    -939
    -940if (this.writestate.writesEnabled) 
    {
    -941  // Remove temporary data left over 
    from old regions
    -942  status.setStatus("Cleaning up 
    temporary data from old regions");
    -943  fs.cleanupTempDir();
    -944}
    -945
    -946if (this.writestate.writesEnabled) 
    {
    -947  status.setStatus("Cleaning up 
    detritus from prior splits");
    -948  // Get rid of any splits or merges 
    that were lost in-progress.  Clean out
    -949  // these directories here on open.  
    We may be opening a region that was
    -950  // being split but we crashed in 
    the middle of it all.
    -951  fs.cleanupAnySplitDetritus();
    -952  fs.cleanupMergesDir();
    -953}
    -954
    -955// Initialize split policy
    -956this.splitPolicy = 
    RegionSplitPolicy.create(this, conf);
    -957
    -958// Initialize flush policy
    -959this.flushPolicy = 
    FlushPolicyFactory.create(this, conf);
    -960
    -961long lastFlushTime = 
    EnvironmentEdgeManager.currentTime();
    -962for (HStore store: stores.values()) 
    {
    -963  
    this.lastStoreFlushTimeMap.put(store, lastFlushTime);
    -964}
    -965
    -966// Use maximum of log sequenceid or 
    that which was found in stores
    -967// (particularly if no recovered 
    edits, seqid will be -1).
    -968long maxSeqIdFromFile =
    -969  
    WALSplitter.getMaxRegionSequenceId(fs.getFileSystem(), fs.getRegionDir());
    -970long nextSeqId = Math.max(maxSeqId, 
    maxSeqIdFromFile) + 1;
    -971if (writestate.writesEnabled) {
    -972  
    WALSplitter.writeRegionSequenceIdFile(fs.getFileSystem(), fs.getRegionDir(), 
    nextSeqId - 1);
    -973}
    -974
    -975LOG.info("Opened {}; next 
    sequenceid={}", this.getRegionInfo().getShortNameToLog(), nextSeqId);
    -976
    -977// A region can be reopened if failed 
    a split; reset flags
    -978this.closing.set(false);
    -979this.closed.set(false);
    -980
    -981if (coprocessorHost != null) {
    -982  status.setStatus("Running 
    coprocessor post-open hooks");
    -983  coprocessorHost.postOpen();
    -984}
    +923LOG.debug("replaying wal for " + 
    this.getRegionInfo().getEncodedName());
    +924
    stores.forEach(HStore::startReplayingFromWAL);
    +925// Recover any edits if 
    available.
    +926maxSeqId = Math.max(maxSeqId,
    +927  
    replayRecoveredEditsIfAny(this.fs.getRegionDir(), maxSeqIdInStores, reporter, 
    status));
    +928// Make sure mvcc is up to max.
    +929this.mvcc.advanceTo(maxSeqId);
    +930  } finally {
    +931LOG.debug("stopping wal replay 
    for " + this.getRegionInfo().getEncodedName());
    +932// update the stores that we are 
    done replaying
    +933
    stores.forEach(HStore::stopReplayingFromWAL);
    +934  }
    +935}
    +936this.lastReplayedOpenRegionSeqId = 
    maxSeqId;
    +937
    +938
    this.writestate.setReadOnly(ServerRegionReplicaUtil.isReadOnly(this));
    +939this.writestate.flushRequested = 
    false;
    +940this.writestate.compacting.set(0);
    +941
    +942if (this.writestate.writesEnabled) 
    {
    +943  LOG.debug("Cleaning up temporary 
    data for " + this.getRegionInfo().getEncodedName());
    +944  // Remove temporary data left over 
    from old regions
    +945  status.setStatus("Cleaning up 
    temporary data from old regions");
    +946  fs.cleanupTempDir();
    +947}
    +948
    +949if (this.writestate.writesEnabled) 
    {
    +950  status.setStatus("Cleaning up 
    detritus from prior splits");
    +951  // Get rid of any splits or merges 
    that were lost in-progress.  Clean out
    +952  // these directories here on 

    [20/51] [partial] hbase-site git commit: Published site at f3d1c021de2264301f68eadb9ef126ff83d7ef53.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/883dde2f/devapidocs/src-html/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.html
    index a588fcf..098dc5e 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.html
    @@ -51,197 +51,208 @@
     043 * target cluster is an HBase cluster.
     044 */
     045@InterfaceAudience.Private
    -046@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="MT_CORRECTNESS",
    -047  justification="Thinks zkw needs to be 
    synchronized access but should be fine as is.")
    -048public abstract class 
    HBaseReplicationEndpoint extends BaseReplicationEndpoint
    -049  implements Abortable {
    +046public abstract class 
    HBaseReplicationEndpoint extends BaseReplicationEndpoint
    +047  implements Abortable {
    +048
    +049  private static final Logger LOG = 
    LoggerFactory.getLogger(HBaseReplicationEndpoint.class);
     050
    -051  private static final Logger LOG = 
    LoggerFactory.getLogger(HBaseReplicationEndpoint.class);
    -052
    -053  private ZKWatcher zkw = null; // 
    FindBugs: MT_CORRECTNESS
    -054
    -055  private ListServerName 
    regionServers = new ArrayList(0);
    -056  private long lastRegionServerUpdate;
    -057
    -058  protected void disconnect() {
    -059if (zkw != null) {
    -060  zkw.close();
    -061}
    -062  }
    -063
    -064  /**
    -065   * A private method used to 
    re-establish a zookeeper session with a peer cluster.
    -066   * @param ke
    -067   */
    -068  protected void 
    reconnect(KeeperException ke) {
    -069if (ke instanceof 
    ConnectionLossException || ke instanceof SessionExpiredException
    -070|| ke instanceof 
    AuthFailedException) {
    -071  String clusterKey = 
    ctx.getPeerConfig().getClusterKey();
    -072  LOG.warn("Lost the ZooKeeper 
    connection for peer " + clusterKey, ke);
    -073  try {
    -074reloadZkWatcher();
    -075  } catch (IOException io) {
    -076LOG.warn("Creation of 
    ZookeeperWatcher failed for peer " + clusterKey, io);
    -077  }
    -078}
    -079  }
    -080
    -081  @Override
    -082  public void start() {
    -083startAsync();
    -084  }
    -085
    -086  @Override
    -087  public void stop() {
    -088stopAsync();
    -089  }
    -090
    -091  @Override
    -092  protected void doStart() {
    -093try {
    -094  reloadZkWatcher();
    -095  notifyStarted();
    -096} catch (IOException e) {
    -097  notifyFailed(e);
    -098}
    -099  }
    -100
    -101  @Override
    -102  protected void doStop() {
    -103disconnect();
    -104notifyStopped();
    -105  }
    -106
    -107  @Override
    -108  // Synchronize peer cluster connection 
    attempts to avoid races and rate
    -109  // limit connections when multiple 
    replication sources try to connect to
    -110  // the peer cluster. If the peer 
    cluster is down we can get out of control
    -111  // over time.
    -112  public synchronized UUID getPeerUUID() 
    {
    -113UUID peerUUID = null;
    -114try {
    -115  peerUUID = 
    ZKClusterId.getUUIDForCluster(zkw);
    -116} catch (KeeperException ke) {
    -117  reconnect(ke);
    -118}
    -119return peerUUID;
    -120  }
    -121
    -122  /**
    -123   * Get the ZK connection to this peer
    -124   * @return zk connection
    -125   */
    -126  protected ZKWatcher getZkw() {
    -127return zkw;
    -128  }
    -129
    -130  /**
    -131   * Closes the current ZKW (if not null) 
    and creates a new one
    -132   * @throws IOException If anything goes 
    wrong connecting
    -133   */
    -134  void reloadZkWatcher() throws 
    IOException {
    -135if (zkw != null) zkw.close();
    -136zkw = new 
    ZKWatcher(ctx.getConfiguration(),
    -137"connection to cluster: " + 
    ctx.getPeerId(), this);
    -138getZkw().registerListener(new 
    PeerRegionServerListener(this));
    -139  }
    -140
    -141  @Override
    -142  public void abort(String why, Throwable 
    e) {
    -143LOG.error("The 
    HBaseReplicationEndpoint corresponding to peer " + ctx.getPeerId()
    -144+ " was aborted for the following 
    reason(s):" + why, e);
    -145  }
    -146
    -147  @Override
    -148  public boolean isAborted() {
    -149// Currently this is never "Aborted", 
    we just log when the abort method is called.
    -150return false;
    -151  }
    -152
    -153  /**
    -154   * Get the list of all the region 
    servers from the specified peer
    -155   * @param zkw zk connection to use
    -156   * @return list of region server 
    addresses or an empty list if the slave is unavailable
    -157   */
    -158  protected static ListServerName 
    fetchSlavesAddresses(ZKWatcher zkw)
    -159  throws KeeperException {
    -160ListString children = 
    ZKUtil.listChildrenAndWatchForNewChildren(zkw, zkw.znodePaths.rsZNode);
    -161if (children == null) {
    -162  return Collections.emptyList();
    -163}
    -164ListServerName addresses = 
    new ArrayList(children.size());
    -165for (String child : children) {
    -166  
    

    [20/51] [partial] hbase-site git commit: Published site at cf529f18a9959589fa635f78df4840472526ea2c.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.ScanTest.html
    --
    diff --git 
    a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.ScanTest.html 
    b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.ScanTest.html
    index df30a00..3d2b4ec 100644
    --- a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.ScanTest.html
    +++ b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.ScanTest.html
    @@ -128,7 +128,7 @@ var activeTableTab = "activeTableTab";
     
     
     
    -static class PerformanceEvaluation.ScanTest
    +static class PerformanceEvaluation.ScanTest
     extends PerformanceEvaluation.TableTest
     
     
    @@ -264,7 +264,7 @@ extends 
     
     testScanner
    -privateorg.apache.hadoop.hbase.client.ResultScanner testScanner
    +privateorg.apache.hadoop.hbase.client.ResultScanner testScanner
     
     
     
    @@ -281,7 +281,7 @@ extends 
     
     ScanTest
    -ScanTest(org.apache.hadoop.hbase.client.Connectioncon,
    +ScanTest(org.apache.hadoop.hbase.client.Connectioncon,
      PerformanceEvaluation.TestOptionsoptions,
      PerformanceEvaluation.Statusstatus)
     
    @@ -300,7 +300,7 @@ extends 
     
     testTakedown
    -voidtestTakedown()
    +voidtestTakedown()
    throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException
     
     Overrides:
    @@ -316,7 +316,7 @@ extends 
     
     testRow
    -voidtestRow(inti)
    +voidtestRow(inti)
       throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException
     
     Specified by:
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.SequentialReadTest.html
    --
    diff --git 
    a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.SequentialReadTest.html
     
    b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.SequentialReadTest.html
    index fe182c6..d72b414 100644
    --- 
    a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.SequentialReadTest.html
    +++ 
    b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.SequentialReadTest.html
    @@ -128,7 +128,7 @@ var activeTableTab = "activeTableTab";
     
     
     
    -static class PerformanceEvaluation.SequentialReadTest
    +static class PerformanceEvaluation.SequentialReadTest
     extends PerformanceEvaluation.TableTest
     
     
    @@ -249,7 +249,7 @@ extends 
     
     SequentialReadTest
    -SequentialReadTest(org.apache.hadoop.hbase.client.Connectioncon,
    +SequentialReadTest(org.apache.hadoop.hbase.client.Connectioncon,
    PerformanceEvaluation.TestOptionsoptions,
    PerformanceEvaluation.Statusstatus)
     
    @@ -268,7 +268,7 @@ extends 
     
     testRow
    -voidtestRow(inti)
    +voidtestRow(inti)
       throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException
     
     Specified by:
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.SequentialWriteTest.html
    --
    diff --git 
    a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.SequentialWriteTest.html
     
    b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.SequentialWriteTest.html
    index d5d599f..056a44e 100644
    --- 
    a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.SequentialWriteTest.html
    +++ 
    b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.SequentialWriteTest.html
    @@ -128,7 +128,7 @@ var activeTableTab = "activeTableTab";
     
     
     
    -static class PerformanceEvaluation.SequentialWriteTest
    +static class PerformanceEvaluation.SequentialWriteTest
     extends PerformanceEvaluation.BufferedMutatorTest
     
     
    @@ -249,7 +249,7 @@ extends 
     
     SequentialWriteTest
    -SequentialWriteTest(org.apache.hadoop.hbase.client.Connectioncon,
    +SequentialWriteTest(org.apache.hadoop.hbase.client.Connectioncon,
     PerformanceEvaluation.TestOptionsoptions,
     PerformanceEvaluation.Statusstatus)
     
    @@ -268,7 +268,7 @@ extends 
     
     testRow
    -voidtestRow(inti)
    +voidtestRow(inti)
       throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException
     
     Specified by:
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.Status.html
    --
    diff --git 
    a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.Status.html 
    b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.Status.html
    index 02fdcb3..a64ee82 100644
    --- a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.Status.html
    +++ 

    [20/51] [partial] hbase-site git commit: Published site at 021f66d11d2cbb7308308093e29e69d6e7661ee9.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/92a26cfb/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
    index 4a879bb..7d27402 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
    @@ -300,7 +300,7 @@
     292  private MapString, 
    com.google.protobuf.Service coprocessorServiceHandlers = 
    Maps.newHashMap();
     293
     294  // Track data size in all memstores
    -295  private final MemStoreSizing 
    memStoreSize = new MemStoreSizing();
    +295  private final MemStoreSizing 
    memStoreSizing = new ThreadSafeMemStoreSizing();
     296  private final RegionServicesForStores 
    regionServicesForStores = new RegionServicesForStores(this);
     297
     298  // Debug possible data loss due to WAL 
    off
    @@ -1218,7389 +1218,7399 @@
     1210   * Increase the size of mem store in 
    this region and the size of global mem
     1211   * store
     1212   */
    -1213  public void 
    incMemStoreSize(MemStoreSize memStoreSize) {
    -1214if (this.rsAccounting != null) {
    -1215  
    rsAccounting.incGlobalMemStoreSize(memStoreSize);
    -1216}
    -1217long dataSize;
    -1218synchronized (this.memStoreSize) {
    -1219  
    this.memStoreSize.incMemStoreSize(memStoreSize);
    -1220  dataSize = 
    this.memStoreSize.getDataSize();
    -1221}
    -1222
    checkNegativeMemStoreDataSize(dataSize, memStoreSize.getDataSize());
    -1223  }
    -1224
    -1225  public void 
    decrMemStoreSize(MemStoreSize memStoreSize) {
    -1226if (this.rsAccounting != null) {
    -1227  
    rsAccounting.decGlobalMemStoreSize(memStoreSize);
    -1228}
    -1229long size;
    -1230synchronized (this.memStoreSize) {
    -1231  
    this.memStoreSize.decMemStoreSize(memStoreSize);
    -1232  size = 
    this.memStoreSize.getDataSize();
    +1213  void incMemStoreSize(MemStoreSize mss) 
    {
    +1214incMemStoreSize(mss.getDataSize(), 
    mss.getHeapSize(), mss.getOffHeapSize());
    +1215  }
    +1216
    +1217  void incMemStoreSize(long 
    dataSizeDelta, long heapSizeDelta, long offHeapSizeDelta) {
    +1218if (this.rsAccounting != null) {
    +1219  
    rsAccounting.incGlobalMemStoreSize(dataSizeDelta, heapSizeDelta, 
    offHeapSizeDelta);
    +1220}
    +1221long dataSize =
    +1222
    this.memStoreSizing.incMemStoreSize(dataSizeDelta, heapSizeDelta, 
    offHeapSizeDelta);
    +1223
    checkNegativeMemStoreDataSize(dataSize, dataSizeDelta);
    +1224  }
    +1225
    +1226  void decrMemStoreSize(MemStoreSize 
    mss) {
    +1227decrMemStoreSize(mss.getDataSize(), 
    mss.getHeapSize(), mss.getOffHeapSize());
    +1228  }
    +1229
    +1230  void decrMemStoreSize(long 
    dataSizeDelta, long heapSizeDelta, long offHeapSizeDelta) {
    +1231if (this.rsAccounting != null) {
    +1232  
    rsAccounting.decGlobalMemStoreSize(dataSizeDelta, heapSizeDelta, 
    offHeapSizeDelta);
     1233}
    -1234checkNegativeMemStoreDataSize(size, 
    -memStoreSize.getDataSize());
    -1235  }
    -1236
    -1237  private void 
    checkNegativeMemStoreDataSize(long memStoreDataSize, long delta) {
    -1238// This is extremely bad if we make 
    memStoreSize negative. Log as much info on the offending
    -1239// caller as possible. (memStoreSize 
    might be a negative value already -- freeing memory)
    -1240if (memStoreDataSize  0) {
    -1241  LOG.error("Asked to modify this 
    region's (" + this.toString()
    -1242  + ") memStoreSize to a 
    negative value which is incorrect. Current memStoreSize="
    -1243  + (memStoreDataSize - delta) + 
    ", delta=" + delta, new Exception());
    -1244}
    -1245  }
    -1246
    -1247  @Override
    -1248  public RegionInfo getRegionInfo() {
    -1249return this.fs.getRegionInfo();
    -1250  }
    -1251
    -1252  /**
    -1253   * @return Instance of {@link 
    RegionServerServices} used by this HRegion.
    -1254   * Can be null.
    -1255   */
    -1256  RegionServerServices 
    getRegionServerServices() {
    -1257return this.rsServices;
    -1258  }
    -1259
    -1260  @Override
    -1261  public long getReadRequestsCount() {
    -1262return readRequestsCount.sum();
    -1263  }
    -1264
    -1265  @Override
    -1266  public long 
    getFilteredReadRequestsCount() {
    -1267return 
    filteredReadRequestsCount.sum();
    -1268  }
    -1269
    -1270  @Override
    -1271  public long getWriteRequestsCount() 
    {
    -1272return writeRequestsCount.sum();
    -1273  }
    -1274
    -1275  @Override
    -1276  public long getMemStoreDataSize() {
    -1277return memStoreSize.getDataSize();
    -1278  }
    -1279
    -1280  @Override
    -1281  public long getMemStoreHeapSize() {
    -1282return memStoreSize.getHeapSize();
    -1283  }
    -1284
    -1285  @Override
    -1286  public long getMemStoreOffHeapSize() 
    {
    -1287return 
    memStoreSize.getOffHeapSize();
    -1288  }
    -1289
    -1290  /** @return store services for this 
    region, to access services required 

    [20/51] [partial] hbase-site git commit: Published site at acd0d1e446c164d9c54bfb461b2d449c8d717c07.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f2065178/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndPutTest.html
    --
    diff --git 
    a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndPutTest.html
     
    b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndPutTest.html
    index 2510283..418c60c 100644
    --- 
    a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndPutTest.html
    +++ 
    b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndPutTest.html
    @@ -77,77 +77,77 @@
     069import 
    org.apache.hadoop.hbase.client.RowMutations;
     070import 
    org.apache.hadoop.hbase.client.Scan;
     071import 
    org.apache.hadoop.hbase.client.Table;
    -072import 
    org.apache.hadoop.hbase.filter.BinaryComparator;
    -073import 
    org.apache.hadoop.hbase.filter.Filter;
    -074import 
    org.apache.hadoop.hbase.filter.FilterAllFilter;
    -075import 
    org.apache.hadoop.hbase.filter.FilterList;
    -076import 
    org.apache.hadoop.hbase.filter.PageFilter;
    -077import 
    org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
    -078import 
    org.apache.hadoop.hbase.filter.WhileMatchFilter;
    -079import 
    org.apache.hadoop.hbase.io.compress.Compression;
    -080import 
    org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
    -081import 
    org.apache.hadoop.hbase.io.hfile.RandomDistribution;
    -082import 
    org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
    -083import 
    org.apache.hadoop.hbase.regionserver.BloomType;
    -084import 
    org.apache.hadoop.hbase.regionserver.CompactingMemStore;
    -085import 
    org.apache.hadoop.hbase.trace.HBaseHTraceConfiguration;
    -086import 
    org.apache.hadoop.hbase.trace.SpanReceiverHost;
    -087import 
    org.apache.hadoop.hbase.trace.TraceUtil;
    -088import 
    org.apache.hadoop.hbase.util.ByteArrayHashKey;
    -089import 
    org.apache.hadoop.hbase.util.Bytes;
    -090import 
    org.apache.hadoop.hbase.util.Hash;
    -091import 
    org.apache.hadoop.hbase.util.MurmurHash;
    -092import 
    org.apache.hadoop.hbase.util.Pair;
    -093import 
    org.apache.hadoop.hbase.util.YammerHistogramUtils;
    -094import 
    org.apache.hadoop.io.LongWritable;
    -095import org.apache.hadoop.io.Text;
    -096import org.apache.hadoop.mapreduce.Job;
    -097import 
    org.apache.hadoop.mapreduce.Mapper;
    -098import 
    org.apache.hadoop.mapreduce.lib.input.NLineInputFormat;
    -099import 
    org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
    -100import 
    org.apache.hadoop.mapreduce.lib.reduce.LongSumReducer;
    -101import org.apache.hadoop.util.Tool;
    -102import 
    org.apache.hadoop.util.ToolRunner;
    -103import 
    org.apache.htrace.core.ProbabilitySampler;
    -104import org.apache.htrace.core.Sampler;
    -105import 
    org.apache.htrace.core.TraceScope;
    -106import 
    org.apache.yetus.audience.InterfaceAudience;
    -107import org.slf4j.Logger;
    -108import org.slf4j.LoggerFactory;
    -109import 
    org.apache.hbase.thirdparty.com.google.common.base.MoreObjects;
    -110import 
    org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
    -111
    -112/**
    -113 * Script used evaluating HBase 
    performance and scalability.  Runs a HBase
    -114 * client that steps through one of a set 
    of hardcoded tests or 'experiments'
    -115 * (e.g. a random reads test, a random 
    writes test, etc.). Pass on the
    -116 * command-line which test to run and how 
    many clients are participating in
    -117 * this experiment. Run {@code 
    PerformanceEvaluation --help} to obtain usage.
    -118 *
    -119 * pThis class sets up and runs 
    the evaluation programs described in
    -120 * Section 7, iPerformance 
    Evaluation/i, of the a
    -121 * 
    href="http://labs.google.com/papers/bigtable.html"Bigtable/a;
    -122 * paper, pages 8-10.
    -123 *
    -124 * pBy default, runs as a 
    mapreduce job where each mapper runs a single test
    -125 * client. Can also run as a 
    non-mapreduce, multithreaded application by
    -126 * specifying {@code --nomapred}. Each 
    client does about 1GB of data, unless
    -127 * specified otherwise.
    -128 */
    -129@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
    -130public class PerformanceEvaluation 
    extends Configured implements Tool {
    -131  static final String RANDOM_SEEK_SCAN = 
    "randomSeekScan";
    -132  static final String RANDOM_READ = 
    "randomRead";
    -133  private static final Logger LOG = 
    LoggerFactory.getLogger(PerformanceEvaluation.class.getName());
    -134  private static final ObjectMapper 
    MAPPER = new ObjectMapper();
    -135  static {
    -136
    MAPPER.configure(MapperFeature.SORT_PROPERTIES_ALPHABETICALLY, true);
    -137  }
    -138
    -139  public static final String TABLE_NAME = 
    "TestTable";
    -140  public static final byte[] FAMILY_NAME 
    = Bytes.toBytes("info");
    -141  public static final byte [] COLUMN_ZERO 
    = Bytes.toBytes("" + 0);
    -142  public static final byte [] 
    QUALIFIER_NAME = COLUMN_ZERO;
    +072import 
    org.apache.hadoop.hbase.client.metrics.ScanMetrics;
    +073import 
    org.apache.hadoop.hbase.filter.BinaryComparator;
    +074import 
    org.apache.hadoop.hbase.filter.Filter;
    +075import 
    

    [20/51] [partial] hbase-site git commit: Published site at 87f5b5f3411d96c31b4cb61b9a57ced22be91d1f.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/de18d468/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html 
    b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html
    index e1bc325..63e7421 100644
    --- a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html
    +++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html
    @@ -66,5125 +66,5224 @@
     058import 
    java.util.concurrent.TimeoutException;
     059import 
    java.util.concurrent.atomic.AtomicBoolean;
     060import 
    java.util.concurrent.atomic.AtomicInteger;
    -061import org.apache.commons.io.IOUtils;
    -062import 
    org.apache.commons.lang3.RandomStringUtils;
    -063import 
    org.apache.commons.lang3.StringUtils;
    -064import 
    org.apache.hadoop.conf.Configuration;
    -065import 
    org.apache.hadoop.conf.Configured;
    -066import 
    org.apache.hadoop.fs.FSDataOutputStream;
    -067import org.apache.hadoop.fs.FileStatus;
    -068import org.apache.hadoop.fs.FileSystem;
    -069import org.apache.hadoop.fs.Path;
    -070import 
    org.apache.hadoop.fs.permission.FsAction;
    -071import 
    org.apache.hadoop.fs.permission.FsPermission;
    -072import 
    org.apache.hadoop.hbase.Abortable;
    -073import org.apache.hadoop.hbase.Cell;
    -074import 
    org.apache.hadoop.hbase.CellUtil;
    -075import 
    org.apache.hadoop.hbase.ClusterMetrics;
    -076import 
    org.apache.hadoop.hbase.ClusterMetrics.Option;
    -077import 
    org.apache.hadoop.hbase.HBaseConfiguration;
    -078import 
    org.apache.hadoop.hbase.HBaseInterfaceAudience;
    -079import 
    org.apache.hadoop.hbase.HConstants;
    -080import 
    org.apache.hadoop.hbase.HRegionInfo;
    -081import 
    org.apache.hadoop.hbase.HRegionLocation;
    -082import 
    org.apache.hadoop.hbase.KeyValue;
    -083import 
    org.apache.hadoop.hbase.MasterNotRunningException;
    -084import 
    org.apache.hadoop.hbase.MetaTableAccessor;
    -085import 
    org.apache.hadoop.hbase.RegionLocations;
    -086import 
    org.apache.hadoop.hbase.ServerName;
    -087import 
    org.apache.hadoop.hbase.TableName;
    -088import 
    org.apache.hadoop.hbase.ZooKeeperConnectionException;
    -089import 
    org.apache.hadoop.hbase.client.Admin;
    -090import 
    org.apache.hadoop.hbase.client.ClusterConnection;
    -091import 
    org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
    -092import 
    org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
    -093import 
    org.apache.hadoop.hbase.client.Connection;
    -094import 
    org.apache.hadoop.hbase.client.ConnectionFactory;
    -095import 
    org.apache.hadoop.hbase.client.Delete;
    -096import 
    org.apache.hadoop.hbase.client.Get;
    -097import 
    org.apache.hadoop.hbase.client.Put;
    -098import 
    org.apache.hadoop.hbase.client.RegionInfo;
    -099import 
    org.apache.hadoop.hbase.client.RegionInfoBuilder;
    -100import 
    org.apache.hadoop.hbase.client.RegionReplicaUtil;
    -101import 
    org.apache.hadoop.hbase.client.Result;
    -102import 
    org.apache.hadoop.hbase.client.RowMutations;
    -103import 
    org.apache.hadoop.hbase.client.Table;
    -104import 
    org.apache.hadoop.hbase.client.TableDescriptor;
    -105import 
    org.apache.hadoop.hbase.client.TableDescriptorBuilder;
    -106import 
    org.apache.hadoop.hbase.client.TableState;
    -107import 
    org.apache.hadoop.hbase.io.FileLink;
    -108import 
    org.apache.hadoop.hbase.io.HFileLink;
    -109import 
    org.apache.hadoop.hbase.io.hfile.CacheConfig;
    -110import 
    org.apache.hadoop.hbase.io.hfile.HFile;
    -111import 
    org.apache.hadoop.hbase.log.HBaseMarkers;
    -112import 
    org.apache.hadoop.hbase.master.MasterFileSystem;
    -113import 
    org.apache.hadoop.hbase.master.RegionState;
    -114import 
    org.apache.hadoop.hbase.regionserver.HRegion;
    -115import 
    org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
    -116import 
    org.apache.hadoop.hbase.regionserver.StoreFileInfo;
    -117import 
    org.apache.hadoop.hbase.replication.ReplicationException;
    -118import 
    org.apache.hadoop.hbase.security.AccessDeniedException;
    -119import 
    org.apache.hadoop.hbase.security.UserProvider;
    -120import 
    org.apache.hadoop.hbase.util.Bytes.ByteArrayComparator;
    -121import 
    org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter.ERROR_CODE;
    -122import 
    org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker;
    -123import 
    org.apache.hadoop.hbase.util.hbck.ReplicationChecker;
    -124import 
    org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandler;
    -125import 
    org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandlerImpl;
    -126import org.apache.hadoop.hbase.wal.WAL;
    -127import 
    org.apache.hadoop.hbase.wal.WALFactory;
    -128import 
    org.apache.hadoop.hbase.wal.WALSplitter;
    -129import 
    org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
    -130import 
    org.apache.hadoop.hbase.zookeeper.ZKUtil;
    -131import 
    org.apache.hadoop.hbase.zookeeper.ZKWatcher;
    -132import 
    org.apache.hadoop.hbase.zookeeper.ZNodePaths;
    -133import 
    org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
    -134import 
    org.apache.hadoop.ipc.RemoteException;
    -135import 
    org.apache.hadoop.security.UserGroupInformation;
    -136import 
    

    [20/51] [partial] hbase-site git commit: Published site at 2912c953551bedbfbf30c32c156ed7bb187d54c3.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/devapidocs/src-html/org/apache/hadoop/hbase/thrift2/ThriftServer.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/thrift2/ThriftServer.html 
    b/devapidocs/src-html/org/apache/hadoop/hbase/thrift2/ThriftServer.html
    index 1c73421..783dc34 100644
    --- a/devapidocs/src-html/org/apache/hadoop/hbase/thrift2/ThriftServer.html
    +++ b/devapidocs/src-html/org/apache/hadoop/hbase/thrift2/ThriftServer.html
    @@ -31,115 +31,115 @@
     023import java.net.InetSocketAddress;
     024import java.net.UnknownHostException;
     025import java.security.PrivilegedAction;
    -026import java.util.List;
    -027import java.util.Map;
    -028import 
    java.util.concurrent.ExecutorService;
    -029import 
    java.util.concurrent.LinkedBlockingQueue;
    -030import 
    java.util.concurrent.SynchronousQueue;
    -031import 
    java.util.concurrent.ThreadPoolExecutor;
    -032import java.util.concurrent.TimeUnit;
    -033
    -034import 
    javax.security.auth.callback.Callback;
    -035import 
    javax.security.auth.callback.UnsupportedCallbackException;
    -036import 
    javax.security.sasl.AuthorizeCallback;
    -037import javax.security.sasl.SaslServer;
    -038
    -039import 
    org.apache.hadoop.conf.Configuration;
    -040import 
    org.apache.hadoop.conf.Configured;
    -041import 
    org.apache.hadoop.hbase.HBaseConfiguration;
    -042import 
    org.apache.hadoop.hbase.HBaseInterfaceAudience;
    -043import 
    org.apache.hadoop.hbase.filter.ParseFilter;
    -044import 
    org.apache.hadoop.hbase.http.InfoServer;
    -045import 
    org.apache.hadoop.hbase.security.SaslUtil;
    -046import 
    org.apache.hadoop.hbase.security.SecurityUtil;
    -047import 
    org.apache.hadoop.hbase.security.UserProvider;
    -048import 
    org.apache.hadoop.hbase.thrift.CallQueue;
    -049import 
    org.apache.hadoop.hbase.thrift.THBaseThreadPoolExecutor;
    -050import 
    org.apache.hadoop.hbase.thrift.ThriftMetrics;
    -051import 
    org.apache.hadoop.hbase.thrift2.generated.THBaseService;
    -052import 
    org.apache.hadoop.hbase.util.DNS;
    -053import 
    org.apache.hadoop.hbase.util.JvmPauseMonitor;
    -054import 
    org.apache.hadoop.hbase.util.Strings;
    -055import 
    org.apache.hadoop.security.SaslRpcServer.SaslGssCallbackHandler;
    -056import 
    org.apache.hadoop.security.UserGroupInformation;
    -057import org.apache.hadoop.util.Tool;
    -058import 
    org.apache.hadoop.util.ToolRunner;
    -059import org.apache.thrift.TException;
    -060import org.apache.thrift.TProcessor;
    -061import 
    org.apache.thrift.protocol.TBinaryProtocol;
    -062import 
    org.apache.thrift.protocol.TCompactProtocol;
    -063import 
    org.apache.thrift.protocol.TProtocol;
    -064import 
    org.apache.thrift.protocol.TProtocolFactory;
    -065import 
    org.apache.thrift.server.THsHaServer;
    -066import 
    org.apache.thrift.server.TNonblockingServer;
    -067import 
    org.apache.thrift.server.TServer;
    -068import 
    org.apache.thrift.server.TThreadPoolServer;
    -069import 
    org.apache.thrift.server.TThreadedSelectorServer;
    -070import 
    org.apache.thrift.transport.TFramedTransport;
    -071import 
    org.apache.thrift.transport.TNonblockingServerSocket;
    -072import 
    org.apache.thrift.transport.TNonblockingServerTransport;
    -073import 
    org.apache.thrift.transport.TSaslServerTransport;
    -074import 
    org.apache.thrift.transport.TServerSocket;
    -075import 
    org.apache.thrift.transport.TServerTransport;
    -076import 
    org.apache.thrift.transport.TTransportException;
    -077import 
    org.apache.thrift.transport.TTransportFactory;
    -078import 
    org.apache.yetus.audience.InterfaceAudience;
    -079import org.slf4j.Logger;
    -080import org.slf4j.LoggerFactory;
    -081import 
    org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
    -082import 
    org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine;
    -083import 
    org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLineParser;
    +026import java.util.Map;
    +027import 
    java.util.concurrent.ExecutorService;
    +028import 
    java.util.concurrent.LinkedBlockingQueue;
    +029import 
    java.util.concurrent.SynchronousQueue;
    +030import 
    java.util.concurrent.ThreadPoolExecutor;
    +031import java.util.concurrent.TimeUnit;
    +032
    +033import 
    javax.security.auth.callback.Callback;
    +034import 
    javax.security.auth.callback.UnsupportedCallbackException;
    +035import 
    javax.security.sasl.AuthorizeCallback;
    +036import javax.security.sasl.SaslServer;
    +037
    +038import 
    org.apache.hadoop.conf.Configuration;
    +039import 
    org.apache.hadoop.conf.Configured;
    +040import 
    org.apache.hadoop.hbase.HBaseConfiguration;
    +041import 
    org.apache.hadoop.hbase.HBaseInterfaceAudience;
    +042import 
    org.apache.hadoop.hbase.filter.ParseFilter;
    +043import 
    org.apache.hadoop.hbase.http.InfoServer;
    +044import 
    org.apache.hadoop.hbase.security.SaslUtil;
    +045import 
    org.apache.hadoop.hbase.security.SecurityUtil;
    +046import 
    org.apache.hadoop.hbase.security.UserProvider;
    +047import 
    org.apache.hadoop.hbase.thrift.CallQueue;
    +048import 
    org.apache.hadoop.hbase.thrift.THBaseThreadPoolExecutor;
    +049import 
    org.apache.hadoop.hbase.thrift.ThriftMetrics;
    +050import 
    

    [20/51] [partial] hbase-site git commit: Published site at 2a2258656b2fcd92b967131b6c1f037363553bc4.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e0fb1fde/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupManager.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupManager.html 
    b/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupManager.html
    index d0d6f47..45e4c6c 100644
    --- a/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupManager.html
    +++ b/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupManager.html
    @@ -40,499 +40,505 @@
     032import 
    org.apache.hadoop.hbase.HConstants;
     033import 
    org.apache.hadoop.hbase.HTableDescriptor;
     034import 
    org.apache.hadoop.hbase.TableName;
    -035import 
    org.apache.hadoop.hbase.backup.BackupInfo;
    -036import 
    org.apache.hadoop.hbase.backup.BackupInfo.BackupState;
    -037import 
    org.apache.hadoop.hbase.backup.BackupObserver;
    -038import 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants;
    -039import 
    org.apache.hadoop.hbase.backup.BackupType;
    -040import 
    org.apache.hadoop.hbase.backup.HBackupFileSystem;
    -041import 
    org.apache.hadoop.hbase.backup.impl.BackupManifest.BackupImage;
    -042import 
    org.apache.hadoop.hbase.backup.master.BackupLogCleaner;
    -043import 
    org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager;
    -044import 
    org.apache.hadoop.hbase.backup.regionserver.LogRollRegionServerProcedureManager;
    -045import 
    org.apache.hadoop.hbase.client.Admin;
    -046import 
    org.apache.hadoop.hbase.client.Connection;
    -047import 
    org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
    -048import 
    org.apache.hadoop.hbase.procedure.ProcedureManagerHost;
    -049import 
    org.apache.hadoop.hbase.util.Pair;
    -050import 
    org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
    -051import 
    org.apache.yetus.audience.InterfaceAudience;
    -052import org.slf4j.Logger;
    -053import org.slf4j.LoggerFactory;
    -054
    -055/**
    -056 * Handles backup requests, creates 
    backup info records in backup system table to keep track of
    -057 * backup sessions, dispatches backup 
    request.
    -058 */
    -059@InterfaceAudience.Private
    -060public class BackupManager implements 
    Closeable {
    -061  // in seconds
    -062  public final static String 
    BACKUP_EXCLUSIVE_OPERATION_TIMEOUT_SECONDS_KEY =
    -063  
    "hbase.backup.exclusive.op.timeout.seconds";
    -064  // In seconds
    -065  private final static int 
    DEFAULT_BACKUP_EXCLUSIVE_OPERATION_TIMEOUT = 3600;
    -066  private static final Logger LOG = 
    LoggerFactory.getLogger(BackupManager.class);
    -067
    -068  protected Configuration conf = null;
    -069  protected BackupInfo backupInfo = 
    null;
    -070  protected BackupSystemTable 
    systemTable;
    -071  protected final Connection conn;
    -072
    -073  /**
    -074   * Backup manager constructor.
    -075   * @param conn connection
    -076   * @param conf configuration
    -077   * @throws IOException exception
    -078   */
    -079  public BackupManager(Connection conn, 
    Configuration conf) throws IOException {
    -080if 
    (!conf.getBoolean(BackupRestoreConstants.BACKUP_ENABLE_KEY,
    -081  
    BackupRestoreConstants.BACKUP_ENABLE_DEFAULT)) {
    -082  throw new BackupException("HBase 
    backup is not enabled. Check your "
    -083  + 
    BackupRestoreConstants.BACKUP_ENABLE_KEY + " setting.");
    -084}
    -085this.conf = conf;
    -086this.conn = conn;
    -087this.systemTable = new 
    BackupSystemTable(conn);
    -088  }
    -089
    -090  /**
    -091   * Returns backup info
    -092   */
    -093  protected BackupInfo getBackupInfo() 
    {
    -094return backupInfo;
    -095  }
    -096
    -097  /**
    -098   * This method modifies the master's 
    configuration in order to inject backup-related features
    -099   * (TESTs only)
    -100   * @param conf configuration
    -101   */
    -102  @VisibleForTesting
    -103  public static void 
    decorateMasterConfiguration(Configuration conf) {
    -104if (!isBackupEnabled(conf)) {
    -105  return;
    -106}
    -107// Add WAL archive cleaner plug-in
    -108String plugins = 
    conf.get(HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS);
    -109String cleanerClass = 
    BackupLogCleaner.class.getCanonicalName();
    -110if (!plugins.contains(cleanerClass)) 
    {
    -111  
    conf.set(HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS, plugins + "," + 
    cleanerClass);
    -112}
    -113
    -114String classes = 
    conf.get(ProcedureManagerHost.MASTER_PROCEDURE_CONF_KEY);
    -115String masterProcedureClass = 
    LogRollMasterProcedureManager.class.getName();
    -116if (classes == null) {
    -117  
    conf.set(ProcedureManagerHost.MASTER_PROCEDURE_CONF_KEY, 
    masterProcedureClass);
    -118} else if 
    (!classes.contains(masterProcedureClass)) {
    -119  
    conf.set(ProcedureManagerHost.MASTER_PROCEDURE_CONF_KEY,
    -120classes + "," + 
    masterProcedureClass);
    -121}
    -122
    -123if (LOG.isDebugEnabled()) {
    -124  LOG.debug("Added log cleaner: " + 
    cleanerClass + "\n" + "Added master procedure manager: "
    -125  + masterProcedureClass);
    -126}
    -127  }
    -128
    -129  /**
    -130   * This method modifies the Region 
    Server configuration in order to 

    [20/51] [partial] hbase-site git commit: Published site at e468b4022f76688851b3e0c34722f01a56bd624f.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/16541468/devapidocs/index-all.html
    --
    diff --git a/devapidocs/index-all.html b/devapidocs/index-all.html
    index 99ed6af..d15182d 100644
    --- a/devapidocs/index-all.html
    +++ b/devapidocs/index-all.html
    @@ -2826,6 +2826,8 @@
     
     ALL_FLAG_BITS
     - Static variable in class org.apache.hadoop.hbase.regionserver.ServerNonceManager.OperationContext
     
    +ALL_TIME - 
    Static variable in class org.apache.hadoop.hbase.io.TimeRange
    +
     ALL_VERSIONS
     - Static variable in class org.apache.hadoop.hbase.HConstants
     
     Define for 'return-all-versions'.
    @@ -2948,6 +2950,8 @@
     
     allTime - 
    Variable in class org.apache.hadoop.hbase.io.TimeRange
     
    +allTime() 
    - Static method in class org.apache.hadoop.hbase.io.TimeRange
    +
     alreadyRunning
     - Variable in class org.apache.hadoop.hbase.master.CatalogJanitor
     
     alternateBufferedMutatorClassName
     - Variable in class org.apache.hadoop.hbase.client.ConnectionImplementation
    @@ -4210,6 +4214,8 @@
     
     AsyncTableResultScanner(AsyncTableAdvancedScanResultConsumer,
     Scan, long) - Constructor for class 
    org.apache.hadoop.hbase.client.AsyncTableResultScanner
     
    +at(long) - 
    Static method in class org.apache.hadoop.hbase.io.TimeRange
    +
     atCapacity()
     - Method in class org.apache.hadoop.hbase.util.compaction.ClusterCompactionQueues
     
     atHeadOfRingBufferEventHandlerAppend()
     - Method in class org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL
    @@ -10050,9 +10056,14 @@
     Since 2.0.0. Will be 
    removed in 3.0.0. Use Table.checkAndMutate(byte[],
     byte[])
     
     
    -checkAndMutate(byte[],
     byte[], byte[], CompareOperator, ByteArrayComparable, Mutation, 
    boolean) - Method in class org.apache.hadoop.hbase.regionserver.HRegion
    +checkAndMutate(byte[],
     byte[], byte[], CompareOperator, ByteArrayComparable, TimeRange, 
    Mutation) - Method in class org.apache.hadoop.hbase.regionserver.HRegion
     
    -checkAndMutate(byte[],
     byte[], byte[], CompareOperator, ByteArrayComparable, Mutation, 
    boolean) - Method in interface 
    org.apache.hadoop.hbase.regionserver.Region
    +checkAndMutate(byte[],
     byte[], byte[], CompareOperator, ByteArrayComparable, Mutation) - 
    Method in interface org.apache.hadoop.hbase.regionserver.Region
    +
    +Atomically checks if a row/family/qualifier value matches 
    the expected value and if it does,
    + it performs the mutation.
    +
    +checkAndMutate(byte[],
     byte[], byte[], CompareOperator, ByteArrayComparable, TimeRange, 
    Mutation) - Method in interface 
    org.apache.hadoop.hbase.regionserver.Region
     
     Atomically checks if a row/family/qualifier value matches 
    the expected value and if it does,
      it performs the mutation.
    @@ -10166,14 +10177,19 @@
     Check is a server of same host and port already exists,
      if not, or the existed one got a smaller start code, record it.
     
    -checkAndRowMutate(byte[],
     byte[], byte[], CompareOperator, ByteArrayComparable, RowMutations) 
    - Method in class org.apache.hadoop.hbase.regionserver.HRegion
    +checkAndRowMutate(byte[],
     byte[], byte[], CompareOperator, ByteArrayComparable, TimeRange, 
    RowMutations) - Method in class 
    org.apache.hadoop.hbase.regionserver.HRegion
     
     checkAndRowMutate(byte[],
     byte[], byte[], CompareOperator, ByteArrayComparable, RowMutations) 
    - Method in interface org.apache.hadoop.hbase.regionserver.Region
     
     Atomically checks if a row/family/qualifier value matches 
    the expected values and if it does,
      it performs the row mutations.
     
    -checkAndRowMutate(HRegion,
     ListClientProtos.Action, CellScanner, byte[], byte[], byte[], 
    CompareOperator, ByteArrayComparable, ClientProtos.RegionActionResult.Builder, 
    ActivePolicyEnforcement) - Method in class 
    org.apache.hadoop.hbase.regionserver.RSRpcServices
    +checkAndRowMutate(byte[],
     byte[], byte[], CompareOperator, ByteArrayComparable, TimeRange, 
    RowMutations) - Method in interface 
    org.apache.hadoop.hbase.regionserver.Region
    +
    +Atomically checks if a row/family/qualifier value matches 
    the expected values and if it does,
    + it performs the row mutations.
    +
    +checkAndRowMutate(HRegion,
     ListClientProtos.Action, CellScanner, byte[], byte[], byte[], 
    CompareOperator, ByteArrayComparable, TimeRange, 
    ClientProtos.RegionActionResult.Builder, ActivePolicyEnforcement) - 
    Method in class org.apache.hadoop.hbase.regionserver.RSRpcServices
     
     Mutate a list of rows atomically.
     
    @@ -2,17 +25571,17 @@
     
     doCall()
     - Method in class org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller
     
    -doCheckAndDelete(byte[],
     byte[], byte[], String, byte[], Delete) - Method in class 
    org.apache.hadoop.hbase.client.HTable
    +doCheckAndDelete(byte[],
     byte[], byte[], String, byte[], TimeRange, Delete) - Method in 
    class org.apache.hadoop.hbase.client.HTable
     
     doCheckAndDelete(byte[],
     byte[], byte[], byte[], Delete) - Method in class 
    org.apache.hadoop.hbase.rest.client.RemoteHTable
     
    -doCheckAndMutate(byte[],
     byte[], byte[], String, byte[], RowMutations) - Method in class 
    org.apache.hadoop.hbase.client.HTable
    

    [20/51] [partial] hbase-site git commit: Published site at 64061f896fe21512504e3886a400759e88b519da.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f1ebf5b6/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfo.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfo.html 
    b/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfo.html
    index e4177f7..40bbdbf 100644
    --- a/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfo.html
    +++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfo.html
    @@ -495,7 +495,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
    utility methods.
     
     
     private static HRegionLocation
    -MetaTableAccessor.getRegionLocation(Resultr,
    +AsyncMetaTableAccessor.getRegionLocation(Resultr,
      RegionInforegionInfo,
      intreplicaId)
     Returns the HRegionLocation parsed from the given meta row 
    Result
    @@ -504,7 +504,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
    utility methods.
     
     
     private static HRegionLocation
    -AsyncMetaTableAccessor.getRegionLocation(Resultr,
    +MetaTableAccessor.getRegionLocation(Resultr,
      RegionInforegionInfo,
      intreplicaId)
     Returns the HRegionLocation parsed from the given meta row 
    Result
    @@ -941,9 +941,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
    utility methods.
     
     
     https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListRegionInfo
    -AsyncAdmin.getRegions(ServerNameserverName)
    -Get all the online regions on a region server.
    -
    +AsyncHBaseAdmin.getRegions(ServerNameserverName)
     
     
     https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListRegionInfo
    @@ -952,22 +950,22 @@ Input/OutputFormats, a table indexing MapReduce job, and 
    utility methods.
     
     
     
    -https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListRegionInfo
    -RawAsyncHBaseAdmin.getRegions(ServerNameserverName)
    -
    -
     https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListRegionInfo
     HBaseAdmin.getRegions(ServerNamesn)
     
    +
    +https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListRegionInfo
    +AsyncAdmin.getRegions(ServerNameserverName)
    +Get all the online regions on a region server.
    +
    +
     
     https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListRegionInfo
    -AsyncHBaseAdmin.getRegions(ServerNameserverName)
    +RawAsyncHBaseAdmin.getRegions(ServerNameserverName)
     
     
     https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListRegionInfo
    -AsyncAdmin.getRegions(TableNametableName)
    -Get the regions of a given table.
    -
    +AsyncHBaseAdmin.getRegions(TableNametableName)
     
     
     https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListRegionInfo
    @@ -976,16 +974,18 @@ Input/OutputFormats, a table indexing MapReduce job, and 
    utility methods.
     
     
     
    -https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListRegionInfo
    -RawAsyncHBaseAdmin.getRegions(TableNametableName)
    -
    -
     https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListRegionInfo
     HBaseAdmin.getRegions(TableNametableName)
     
    +
    +https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListRegionInfo
    

    [20/51] [partial] hbase-site git commit: Published site at 4cb40e6d846ce1f28ffb40d388c9efb753197813.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4dc2a2e8/devapidocs/org/apache/hadoop/hbase/client/class-use/Put.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/client/class-use/Put.html 
    b/devapidocs/org/apache/hadoop/hbase/client/class-use/Put.html
    index 882bef5..2f94372 100644
    --- a/devapidocs/org/apache/hadoop/hbase/client/class-use/Put.html
    +++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/Put.html
    @@ -611,72 +611,72 @@ service.
     
     
     boolean
    -Table.checkAndPut(byte[]row,
    +HTable.checkAndPut(byte[]row,
    byte[]family,
    byte[]qualifier,
    byte[]value,
    Putput)
    -Deprecated.
    -Since 2.0.0. Will be 
    removed in 3.0.0. Use Table.checkAndMutate(byte[],
     byte[])
    -
    +Deprecated.
     
     
     
     boolean
    -HTable.checkAndPut(byte[]row,
    +Table.checkAndPut(byte[]row,
    byte[]family,
    byte[]qualifier,
    byte[]value,
    Putput)
    -Deprecated.
    +Deprecated.
    +Since 2.0.0. Will be 
    removed in 3.0.0. Use Table.checkAndMutate(byte[],
     byte[])
    +
     
     
     
     boolean
    -Table.checkAndPut(byte[]row,
    +HTable.checkAndPut(byte[]row,
    byte[]family,
    byte[]qualifier,
    CompareFilter.CompareOpcompareOp,
    byte[]value,
    Putput)
    -Deprecated.
    -Since 2.0.0. Will be 
    removed in 3.0.0. Use Table.checkAndMutate(byte[],
     byte[])
    -
    +Deprecated.
     
     
     
     boolean
    -HTable.checkAndPut(byte[]row,
    +Table.checkAndPut(byte[]row,
    byte[]family,
    byte[]qualifier,
    CompareFilter.CompareOpcompareOp,
    byte[]value,
    Putput)
    -Deprecated.
    +Deprecated.
    +Since 2.0.0. Will be 
    removed in 3.0.0. Use Table.checkAndMutate(byte[],
     byte[])
    +
     
     
     
     boolean
    -Table.checkAndPut(byte[]row,
    +HTable.checkAndPut(byte[]row,
    byte[]family,
    byte[]qualifier,
    CompareOperatorop,
    byte[]value,
    Putput)
    -Deprecated.
    -Since 2.0.0. Will be 
    removed in 3.0.0. Use Table.checkAndMutate(byte[],
     byte[])
    -
    +Deprecated.
     
     
     
     boolean
    -HTable.checkAndPut(byte[]row,
    +Table.checkAndPut(byte[]row,
    byte[]family,
    byte[]qualifier,
    CompareOperatorop,
    byte[]value,
    Putput)
    -Deprecated.
    +Deprecated.
    +Since 2.0.0. Will be 
    removed in 3.0.0. Use Table.checkAndMutate(byte[],
     byte[])
    +
     
     
     
    @@ -709,27 +709,27 @@ service.
     
     
     https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
     title="class or interface in java.lang">Void
    -AsyncTable.put(Putput)
    -Puts some data to the table.
    -
    +RawAsyncTableImpl.put(Putput)
     
     
     void
    +HTable.put(Putput)
    +
    +
    +void
     Table.put(Putput)
     Puts some data in the table.
     
     
    -
    -https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
     title="class or interface in java.lang">Void
    -AsyncTableImpl.put(Putput)
    -
     
     https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
     title="class or interface in java.lang">Void
    -RawAsyncTableImpl.put(Putput)
    +AsyncTableImpl.put(Putput)
     
     
    -void
    -HTable.put(Putput)
    +https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
     title="class or interface in java.lang">Void
    +AsyncTable.put(Putput)
    +Puts some data to the table.
    +
     
     
     boolean
    @@ -748,19 +748,19 @@ service.
     
     
     https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
     title="class or interface in java.lang">Boolean
    -AsyncTable.CheckAndMutateBuilder.thenPut(Putput)
    +RawAsyncTableImpl.CheckAndMutateBuilderImpl.thenPut(Putput)
     
     
     boolean
    -Table.CheckAndMutateBuilder.thenPut(Putput)
    +HTable.CheckAndMutateBuilderImpl.thenPut(Putput)
     
     
    -https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
     title="class or interface in java.lang">Boolean
    -RawAsyncTableImpl.CheckAndMutateBuilderImpl.thenPut(Putput)
    +boolean
    

    [20/51] [partial] hbase-site git commit: Published site at 8ab7b20f48951d77945181024f5e15842bc253c4.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6eb695c8/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
    index ecf500c..0cd5a4e 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
    @@ -238,8355 +238,8368 @@
     230  public static final String 
    HBASE_MAX_CELL_SIZE_KEY = "hbase.server.keyvalue.maxsize";
     231  public static final int 
    DEFAULT_MAX_CELL_SIZE = 10485760;
     232
    -233  public static final String 
    HBASE_REGIONSERVER_MINIBATCH_SIZE =
    -234  
    "hbase.regionserver.minibatch.size";
    -235  public static final int 
    DEFAULT_HBASE_REGIONSERVER_MINIBATCH_SIZE = 2;
    -236
    -237  /**
    -238   * This is the global default value for 
    durability. All tables/mutations not
    -239   * defining a durability or using 
    USE_DEFAULT will default to this value.
    -240   */
    -241  private static final Durability 
    DEFAULT_DURABILITY = Durability.SYNC_WAL;
    +233  /**
    +234   * This is the global default value for 
    durability. All tables/mutations not
    +235   * defining a durability or using 
    USE_DEFAULT will default to this value.
    +236   */
    +237  private static final Durability 
    DEFAULT_DURABILITY = Durability.SYNC_WAL;
    +238
    +239  public static final String 
    HBASE_REGIONSERVER_MINIBATCH_SIZE =
    +240  
    "hbase.regionserver.minibatch.size";
    +241  public static final int 
    DEFAULT_HBASE_REGIONSERVER_MINIBATCH_SIZE = 2;
     242
    -243  final AtomicBoolean closed = new 
    AtomicBoolean(false);
    -244
    -245  /* Closing can take some time; use the 
    closing flag if there is stuff we don't
    -246   * want to do while in closing state; 
    e.g. like offer this region up to the
    -247   * master as a region to close if the 
    carrying regionserver is overloaded.
    -248   * Once set, it is never cleared.
    -249   */
    -250  final AtomicBoolean closing = new 
    AtomicBoolean(false);
    -251
    -252  /**
    -253   * The max sequence id of flushed data 
    on this region. There is no edit in memory that is
    -254   * less that this sequence id.
    -255   */
    -256  private volatile long maxFlushedSeqId = 
    HConstants.NO_SEQNUM;
    -257
    -258  /**
    -259   * Record the sequence id of last flush 
    operation. Can be in advance of
    -260   * {@link #maxFlushedSeqId} when 
    flushing a single column family. In this case,
    -261   * {@link #maxFlushedSeqId} will be 
    older than the oldest edit in memory.
    -262   */
    -263  private volatile long lastFlushOpSeqId 
    = HConstants.NO_SEQNUM;
    -264
    -265  /**
    -266   * The sequence id of the last replayed 
    open region event from the primary region. This is used
    -267   * to skip entries before this due to 
    the possibility of replay edits coming out of order from
    -268   * replication.
    -269   */
    -270  protected volatile long 
    lastReplayedOpenRegionSeqId = -1L;
    -271  protected volatile long 
    lastReplayedCompactionSeqId = -1L;
    -272
    -273  
    //
    -274  // Members
    -275  
    //
    -276
    -277  // map from a locked row to the context 
    for that lock including:
    -278  // - CountDownLatch for threads waiting 
    on that row
    -279  // - the thread that owns the lock 
    (allow reentrancy)
    -280  // - reference count of (reentrant) 
    locks held by the thread
    -281  // - the row itself
    -282  private final 
    ConcurrentHashMapHashedBytes, RowLockContext lockedRows =
    -283  new ConcurrentHashMap();
    -284
    -285  protected final Mapbyte[], 
    HStore stores =
    -286  new 
    ConcurrentSkipListMap(Bytes.BYTES_RAWCOMPARATOR);
    +243  public static final String 
    WAL_HSYNC_CONF_KEY = "hbase.wal.hsync";
    +244  public static final boolean 
    DEFAULT_WAL_HSYNC = false;
    +245
    +246  final AtomicBoolean closed = new 
    AtomicBoolean(false);
    +247
    +248  /* Closing can take some time; use the 
    closing flag if there is stuff we don't
    +249   * want to do while in closing state; 
    e.g. like offer this region up to the
    +250   * master as a region to close if the 
    carrying regionserver is overloaded.
    +251   * Once set, it is never cleared.
    +252   */
    +253  final AtomicBoolean closing = new 
    AtomicBoolean(false);
    +254
    +255  /**
    +256   * The max sequence id of flushed data 
    on this region. There is no edit in memory that is
    +257   * less that this sequence id.
    +258   */
    +259  private volatile long maxFlushedSeqId = 
    HConstants.NO_SEQNUM;
    +260
    +261  /**
    +262   * Record the sequence id of last flush 
    operation. Can be in advance of
    +263   * {@link #maxFlushedSeqId} when 
    flushing a single column family. In this case,
    +264   * {@link #maxFlushedSeqId} will be 
    older than the oldest edit in memory.
    +265   */
    +266  private volatile long lastFlushOpSeqId 
    = HConstants.NO_SEQNUM;
    +267
    +268  /**
    

    [20/51] [partial] hbase-site git commit: Published site at 00095a2ef9442e3fd86c04876c9d91f2f8b23ad8.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/src-html/org/apache/hadoop/hbase/client/DoNotRetryRegionException.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/client/DoNotRetryRegionException.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/DoNotRetryRegionException.html
    index ffdfe64..725aedc 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/client/DoNotRetryRegionException.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/DoNotRetryRegionException.html
    @@ -45,7 +45,10 @@
     037super(s);
     038  }
     039
    -040}
    +040  public 
    DoNotRetryRegionException(Throwable cause) {
    +041super(cause);
    +042  }
    +043}
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/src-html/org/apache/hadoop/hbase/exceptions/MergeRegionException.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/exceptions/MergeRegionException.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/exceptions/MergeRegionException.html
    index f9f6c2a..40cdee10 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/exceptions/MergeRegionException.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/exceptions/MergeRegionException.html
    @@ -50,7 +50,11 @@
     042  public MergeRegionException(String s) 
    {
     043super(s);
     044  }
    -045}
    +045
    +046  public MergeRegionException(Throwable 
    cause) {
    +047super(cause);
    +048  }
    +049}
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileAccessor.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileAccessor.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileAccessor.html
    index 74fbf67..33418d0 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileAccessor.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileAccessor.html
    @@ -27,287 +27,296 @@
     019package 
    org.apache.hadoop.hbase.io.hfile.bucket;
     020
     021import java.io.File;
    -022import java.io.FileNotFoundException;
    -023import java.io.IOException;
    -024import java.io.RandomAccessFile;
    -025import java.nio.ByteBuffer;
    -026import 
    java.nio.channels.ClosedChannelException;
    -027import java.nio.channels.FileChannel;
    -028import java.util.Arrays;
    -029import 
    org.apache.hadoop.hbase.io.hfile.Cacheable;
    -030import 
    org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType;
    -031import 
    org.apache.hadoop.hbase.io.hfile.CacheableDeserializer;
    -032import 
    org.apache.hadoop.hbase.nio.ByteBuff;
    -033import 
    org.apache.hadoop.hbase.nio.SingleByteBuff;
    -034import 
    org.apache.hadoop.util.StringUtils;
    -035import 
    org.apache.yetus.audience.InterfaceAudience;
    -036import org.slf4j.Logger;
    -037import org.slf4j.LoggerFactory;
    -038
    -039import 
    org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
    -040import 
    org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
    -041
    -042/**
    -043 * IO engine that stores data to a file 
    on the local file system.
    -044 */
    -045@InterfaceAudience.Private
    -046public class FileIOEngine implements 
    IOEngine {
    -047  private static final Logger LOG = 
    LoggerFactory.getLogger(FileIOEngine.class);
    -048  public static final String 
    FILE_DELIMITER = ",";
    -049  private final String[] filePaths;
    -050  private final FileChannel[] 
    fileChannels;
    -051  private final RandomAccessFile[] 
    rafs;
    -052
    -053  private final long sizePerFile;
    -054  private final long capacity;
    -055
    -056  private FileReadAccessor readAccessor = 
    new FileReadAccessor();
    -057  private FileWriteAccessor writeAccessor 
    = new FileWriteAccessor();
    -058
    -059  public FileIOEngine(long capacity, 
    boolean maintainPersistence, String... filePaths)
    -060  throws IOException {
    -061this.sizePerFile = capacity / 
    filePaths.length;
    -062this.capacity = this.sizePerFile * 
    filePaths.length;
    -063this.filePaths = filePaths;
    -064this.fileChannels = new 
    FileChannel[filePaths.length];
    -065if (!maintainPersistence) {
    -066  for (String filePath : filePaths) 
    {
    -067File file = new File(filePath);
    -068if (file.exists()) {
    -069  if (LOG.isDebugEnabled()) {
    -070LOG.debug("File " + filePath 
    + " already exists. Deleting!!");
    -071  }
    -072  file.delete();
    -073  // If deletion fails still we 
    can manage with the writes
    -074}
    -075  }
    -076}
    -077this.rafs = new 
    RandomAccessFile[filePaths.length];
    -078for (int i = 0; i  
    filePaths.length; i++) {
    -079  String filePath = filePaths[i];
    -080  try {
    -081rafs[i] = new 
    RandomAccessFile(filePath, "rw");
    -082long totalSpace = new 
    File(filePath).getTotalSpace();
    

    [20/51] [partial] hbase-site git commit: Published site at 22f4def942f8a3367d0ca6598317e9b9a7d0cfcd.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8b1eaec1/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
    --
    diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/Version.html 
    b/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
    index 00fdac8..ee05e07 100644
    --- a/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
    +++ b/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
    @@ -16,11 +16,11 @@
     008@InterfaceAudience.Private
     009public class Version {
     010  public static final String version = 
    "3.0.0-SNAPSHOT";
    -011  public static final String revision = 
    "31da4d0bce69b3a47066a5df675756087ce4dc60";
    +011  public static final String revision = 
    "22f4def942f8a3367d0ca6598317e9b9a7d0cfcd";
     012  public static final String user = 
    "jenkins";
    -013  public static final String date = "Thu 
    Mar 15 14:41:42 UTC 2018";
    +013  public static final String date = "Fri 
    Mar 16 14:41:20 UTC 2018";
     014  public static final String url = 
    "git://asf920.gq1.ygridcore.net/home/jenkins/jenkins-slave/workspace/hbase_generate_website/hbase";
    -015  public static final String srcChecksum 
    = "19a96f7db58e59c468ba7211c146ebe4";
    +015  public static final String srcChecksum 
    = "574e2041b3e629f67dd934e64524deb2";
     016}
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8b1eaec1/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.WALItem.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.WALItem.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.WALItem.html
    index 3445980..8425334 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.WALItem.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.WALItem.html
    @@ -1348,12 +1348,12 @@
     1340colBuilder.setTimeToLive(ttl);
     1341
     1342ColumnFamilyDescriptor 
    colSessionsDesc = colBuilder.build();
    -1343
    builder.addColumnFamily(colSessionsDesc);
    +1343
    builder.setColumnFamily(colSessionsDesc);
     1344
     1345colBuilder =
     1346
    ColumnFamilyDescriptorBuilder.newBuilder(META_FAMILY);
     1347colBuilder.setTimeToLive(ttl);
    -1348
    builder.addColumnFamily(colBuilder.build());
    +1348
    builder.setColumnFamily(colBuilder.build());
     1349return builder.build();
     1350  }
     1351
    @@ -1388,11 +1388,11 @@
     1380  
    BackupRestoreConstants.BACKUP_SYSTEM_TTL_DEFAULT);
     1381colBuilder.setTimeToLive(ttl);
     1382ColumnFamilyDescriptor 
    colSessionsDesc = colBuilder.build();
    -1383
    builder.addColumnFamily(colSessionsDesc);
    +1383
    builder.setColumnFamily(colSessionsDesc);
     1384colBuilder =
     1385
    ColumnFamilyDescriptorBuilder.newBuilder(META_FAMILY);
     1386colBuilder.setTimeToLive(ttl);
    -1387
    builder.addColumnFamily(colBuilder.build());
    +1387
    builder.setColumnFamily(colBuilder.build());
     1388return builder.build();
     1389  }
     1390
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8b1eaec1/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.html
    index 3445980..8425334 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.html
    @@ -1348,12 +1348,12 @@
     1340colBuilder.setTimeToLive(ttl);
     1341
     1342ColumnFamilyDescriptor 
    colSessionsDesc = colBuilder.build();
    -1343
    builder.addColumnFamily(colSessionsDesc);
    +1343
    builder.setColumnFamily(colSessionsDesc);
     1344
     1345colBuilder =
     1346
    ColumnFamilyDescriptorBuilder.newBuilder(META_FAMILY);
     1347colBuilder.setTimeToLive(ttl);
    -1348
    builder.addColumnFamily(colBuilder.build());
    +1348
    builder.setColumnFamily(colBuilder.build());
     1349return builder.build();
     1350  }
     1351
    @@ -1388,11 +1388,11 @@
     1380  
    BackupRestoreConstants.BACKUP_SYSTEM_TTL_DEFAULT);
     1381colBuilder.setTimeToLive(ttl);
     1382ColumnFamilyDescriptor 
    colSessionsDesc = colBuilder.build();
    -1383
    builder.addColumnFamily(colSessionsDesc);
    +1383
    builder.setColumnFamily(colSessionsDesc);
     1384colBuilder =
     1385
    ColumnFamilyDescriptorBuilder.newBuilder(META_FAMILY);
     1386colBuilder.setTimeToLive(ttl);
    -1387
    builder.addColumnFamily(colBuilder.build());
    +1387
    builder.setColumnFamily(colBuilder.build());
     1388return builder.build();
     1389  }
     1390
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8b1eaec1/devapidocs/src-html/org/apache/hadoop/hbase/backup/util/RestoreTool.html
    

    [20/51] [partial] hbase-site git commit: Published site at 31da4d0bce69b3a47066a5df675756087ce4dc60.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a754d895/devapidocs/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.html 
    b/devapidocs/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.html
    index 26333d9..cc6116d 100644
    --- a/devapidocs/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.html
    +++ b/devapidocs/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.html
    @@ -18,7 +18,7 @@
     catch(err) {
     }
     //-->
    -var methods = 
    {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10};
    +var methods = 
    {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10};
     var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
    Methods"],8:["t4","Concrete Methods"]};
     var altColor = "altColor";
     var rowColor = "rowColor";
    @@ -125,7 +125,7 @@ var activeTableTab = "activeTableTab";
     
     @InterfaceAudience.LimitedPrivate(value="Configuration")
      @InterfaceStability.Unstable
    -public class SnapshotManager
    +public class SnapshotManager
     extends MasterProcedureManager
     implements Stoppable
     This class manages the procedure of taking and restoring 
    snapshots. There is only one
    @@ -285,11 +285,19 @@ implements 
     
     void
    +checkPermissions(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescriptiondesc,
    +AccessCheckeraccessChecker,
    +Useruser)
    +Check for required permissions before executing the 
    procedure.
    +
    +
    +
    +void
     checkSnapshotSupport()
     Throws an exception if snapshot operations (take a 
    snapshot, restore, clone) are not supported.
     
     
    -
    +
     private void
     checkSnapshotSupport(org.apache.hadoop.conf.Configurationconf,
     MasterFileSystemmfs)
    @@ -297,26 +305,26 @@ implements 
     
     
    -
    +
     private void
     cleanupCompletedRestoreInMap()
     Remove the procedures that are marked as finished
     
     
    -
    +
     private void
     cleanupSentinels()
     Removes "abandoned" snapshot/restore requests.
     
     
    -
    +
     private void
     cleanupSentinels(https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
     title="class or interface in java.util">MapTableName,SnapshotSentinelsentinels)
     Remove the sentinels that are marked as finished and the 
    completion time
      has exceeded the removal timeout.
     
     
    -
    +
     (package private) long
     cloneSnapshot(org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescriptionsnapshot,
      TableDescriptortableDescriptor,
    @@ -325,7 +333,7 @@ implements Clone the specified snapshot into a new table.
     
     
    -
    +
     private long
     cloneSnapshot(org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescriptionreqSnapshot,
      TableNametableName,
    @@ -336,114 +344,114 @@ implements Clone the specified snapshot.
     
     
    -
    +
     void
     deleteSnapshot(org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescriptionsnapshot)
     Delete the specified snapshot
     
     
    -
    +
     void
     execProcedure(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescriptiondesc)
     Execute a distributed procedure on cluster
     
     
    -
    +
     https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in 
    java.util">Listorg.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription
     getCompletedSnapshots()
     Gets the list of all completed snapshots.
     
     
    -
    +
     private https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in 
    java.util">Listorg.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription
     getCompletedSnapshots(org.apache.hadoop.fs.PathsnapshotDir,
      booleanwithCpCall)
     Gets the list of all completed snapshots.
     
     
    -
    +
     (package private) ProcedureCoordinator
     getCoordinator()
     
    -
    +
     KeyLockerhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String
     getLocks()
     
    -
    +
     https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String
     getProcedureSignature()
     Return the unique signature of the procedure.
     
     
    -
    +
     void
     initialize(MasterServicesmaster,
       MetricsMastermetricsMaster)
     Initialize a globally barriered procedure for master.
     
     
    -
    +
     boolean
     

    [20/51] [partial] hbase-site git commit: Published site at 6b77786dfc46d25ac5bb5f1c8a4a9eb47b52a604.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/81cde4ce/devapidocs/org/apache/hadoop/hbase/client/class-use/ResultScanner.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/client/class-use/ResultScanner.html 
    b/devapidocs/org/apache/hadoop/hbase/client/class-use/ResultScanner.html
    index 3616545..6209920 100644
    --- a/devapidocs/org/apache/hadoop/hbase/client/class-use/ResultScanner.html
    +++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/ResultScanner.html
    @@ -208,9 +208,9 @@ service.
     
     
     
    -ResultScanner
    -HTable.getScanner(byte[]family)
    -The underlying HTable must 
    not be closed.
    +default ResultScanner
    +AsyncTable.getScanner(byte[]family)
    +Gets a scanner on the current table for the given 
    family.
     
     
     
    @@ -220,16 +220,16 @@ service.
     
     
     
    -default ResultScanner
    -AsyncTable.getScanner(byte[]family)
    -Gets a scanner on the current table for the given 
    family.
    +ResultScanner
    +HTable.getScanner(byte[]family)
    +The underlying HTable must 
    not be closed.
     
     
     
    -ResultScanner
    -HTable.getScanner(byte[]family,
    +default ResultScanner
    +AsyncTable.getScanner(byte[]family,
       byte[]qualifier)
    -The underlying HTable must 
    not be closed.
    +Gets a scanner on the current table for the given family 
    and qualifier.
     
     
     
    @@ -240,37 +240,37 @@ service.
     
     
     
    -default ResultScanner
    -AsyncTable.getScanner(byte[]family,
    +ResultScanner
    +HTable.getScanner(byte[]family,
       byte[]qualifier)
    -Gets a scanner on the current table for the given family 
    and qualifier.
    +The underlying HTable must 
    not be closed.
     
     
     
     ResultScanner
    -RawAsyncTableImpl.getScanner(Scanscan)
    -
    -
    -ResultScanner
    -HTable.getScanner(Scanscan)
    -The underlying HTable must 
    not be closed.
    +AsyncTable.getScanner(Scanscan)
    +Returns a scanner on the current table as specified by the 
    Scan 
    object.
     
     
    -
    +
     ResultScanner
     Table.getScanner(Scanscan)
     Returns a scanner on the current table as specified by the 
    Scan
      object.
     
     
    -
    +
     ResultScanner
     AsyncTableImpl.getScanner(Scanscan)
     
    +
    +ResultScanner
    +RawAsyncTableImpl.getScanner(Scanscan)
    +
     
     ResultScanner
    -AsyncTable.getScanner(Scanscan)
    -Returns a scanner on the current table as specified by the 
    Scan 
    object.
    +HTable.getScanner(Scanscan)
    +The underlying HTable must 
    not be closed.
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/81cde4ce/devapidocs/org/apache/hadoop/hbase/client/class-use/RetriesExhaustedWithDetailsException.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/client/class-use/RetriesExhaustedWithDetailsException.html
     
    b/devapidocs/org/apache/hadoop/hbase/client/class-use/RetriesExhaustedWithDetailsException.html
    index 62cfd60..8fa3f76 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/client/class-use/RetriesExhaustedWithDetailsException.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/client/class-use/RetriesExhaustedWithDetailsException.html
    @@ -106,11 +106,11 @@
     
     
     RetriesExhaustedWithDetailsException
    -AsyncRequestFutureImpl.getErrors()
    +AsyncRequestFuture.getErrors()
     
     
     RetriesExhaustedWithDetailsException
    -AsyncRequestFuture.getErrors()
    +AsyncRequestFutureImpl.getErrors()
     
     
     (package private) RetriesExhaustedWithDetailsException
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/81cde4ce/devapidocs/org/apache/hadoop/hbase/client/class-use/RetryingCallable.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/client/class-use/RetryingCallable.html 
    b/devapidocs/org/apache/hadoop/hbase/client/class-use/RetryingCallable.html
    index eec52bf..5b32e1b 100644
    --- a/devapidocs/org/apache/hadoop/hbase/client/class-use/RetryingCallable.html
    +++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/RetryingCallable.html
    @@ -234,36 +234,28 @@
     
     
     
    -T
    -RpcRetryingCallerImpl.callWithoutRetries(RetryingCallableTcallable,
    -  intcallTimeout)
    -
    -
     T
     RpcRetryingCaller.callWithoutRetries(RetryingCallableTcallable,
       intcallTimeout)
     Call the server once only.
     
     
    -
    +
     T
    -RpcRetryingCallerImpl.callWithRetries(RetryingCallableTcallable,
    -   intcallTimeout)
    +RpcRetryingCallerImpl.callWithoutRetries(RetryingCallableTcallable,
    +  intcallTimeout)
     
    -
    +
     T
     RpcRetryingCaller.callWithRetries(RetryingCallableTcallable,
    intcallTimeout)
     Retries if invocation fails.
     
     
    -
    -RetryingCallerInterceptorContext
    -NoOpRetryingInterceptorContext.prepare(RetryingCallable?callable)
    -
     
    -FastFailInterceptorContext
    -FastFailInterceptorContext.prepare(RetryingCallable?callable)
    +T
    +RpcRetryingCallerImpl.callWithRetries(RetryingCallableTcallable,
    +   intcallTimeout)
     
     
     abstract RetryingCallerInterceptorContext
    @@ -275,13 +267,11 @@
     
     
     RetryingCallerInterceptorContext
    -NoOpRetryingInterceptorContext.prepare(RetryingCallable?callable,
    -   inttries)
    

    [20/51] [partial] hbase-site git commit: Published site at 1384da71375427b522b09f06862bb5d629cef52f.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d347bde8/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfo.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfo.html 
    b/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfo.html
    index 7c541f9..8225386 100644
    --- a/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfo.html
    +++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfo.html
    @@ -495,7 +495,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
    utility methods.
     
     
     private static HRegionLocation
    -AsyncMetaTableAccessor.getRegionLocation(Resultr,
    +MetaTableAccessor.getRegionLocation(Resultr,
      RegionInforegionInfo,
      intreplicaId)
     Returns the HRegionLocation parsed from the given meta row 
    Result
    @@ -504,7 +504,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
    utility methods.
     
     
     private static HRegionLocation
    -MetaTableAccessor.getRegionLocation(Resultr,
    +AsyncMetaTableAccessor.getRegionLocation(Resultr,
      RegionInforegionInfo,
      intreplicaId)
     Returns the HRegionLocation parsed from the given meta row 
    Result
    @@ -647,6 +647,10 @@ Input/OutputFormats, a table indexing MapReduce job, and 
    utility methods.
     MetaTableAccessor.getListOfRegionInfos(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListPairRegionInfo,ServerNamepairs)
     
     
    +static byte[]
    +MetaTableAccessor.getParentsBytes(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListRegionInfoparents)
    +
    +
     static void
     MetaTableAccessor.overwriteRegions(Connectionconnection,
     https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListRegionInforegionInfos,
    @@ -937,7 +941,9 @@ Input/OutputFormats, a table indexing MapReduce job, and 
    utility methods.
     
     
     https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListRegionInfo
    -AsyncHBaseAdmin.getRegions(ServerNameserverName)
    +AsyncAdmin.getRegions(ServerNameserverName)
    +Get all the online regions on a region server.
    +
     
     
     https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListRegionInfo
    @@ -946,22 +952,22 @@ Input/OutputFormats, a table indexing MapReduce job, and 
    utility methods.
     
     
     
    -https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListRegionInfo
    -HBaseAdmin.getRegions(ServerNamesn)
    +https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListRegionInfo
    +RawAsyncHBaseAdmin.getRegions(ServerNameserverName)
     
     
    -https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListRegionInfo
    -AsyncAdmin.getRegions(ServerNameserverName)
    -Get all the online regions on a region server.
    -
    +https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListRegionInfo
    +HBaseAdmin.getRegions(ServerNamesn)
     
     
     https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListRegionInfo
    -RawAsyncHBaseAdmin.getRegions(ServerNameserverName)
    +AsyncHBaseAdmin.getRegions(ServerNameserverName)
     
     
     https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListRegionInfo
    -AsyncHBaseAdmin.getRegions(TableNametableName)
    +AsyncAdmin.getRegions(TableNametableName)
    +Get the regions of a given table.
    +
     
     
     https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListRegionInfo
    @@ -970,18 +976,16 @@ Input/OutputFormats, a table indexing MapReduce job, and 
    

    [20/51] [partial] hbase-site git commit: Published site at b7b86839250bf9b295ebc1948826f43a88736d6c.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6b94a2f2/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.html
    --
    diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.html 
    b/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.html
    index e828a9b..a99f492 100644
    --- a/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.html
    +++ b/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.html
    @@ -495,841 +495,853 @@
     487  /** The serialized table state 
    qualifier */
     488  public static final byte[] 
    TABLE_STATE_QUALIFIER = Bytes.toBytes("state");
     489
    -490
    -491  /**
    -492   * The meta table version column 
    qualifier.
    -493   * We keep current version of the meta 
    table in this column in code-ROOT-/code
    -494   * table: i.e. in the 'info:v' 
    column.
    -495   */
    -496  public static final byte [] 
    META_VERSION_QUALIFIER = Bytes.toBytes("v");
    -497
    -498  /**
    -499   * The current version of the meta 
    table.
    -500   * - pre-hbase 0.92.  There is no 
    META_VERSION column in the root table
    -501   * in this case. The meta has 
    HTableDescriptor serialized into the HRegionInfo;
    -502   * - version 0 is 0.92 and 0.94. Meta 
    data has serialized HRegionInfo's using
    -503   * Writable serialization, and 
    HRegionInfo's does not contain HTableDescriptors.
    -504   * - version 1 for 0.96+ keeps 
    HRegionInfo data structures, but changes the
    -505   * byte[] serialization from Writables 
    to Protobuf.
    -506   * See HRegionInfo.VERSION
    -507   */
    -508  public static final short META_VERSION 
    = 1;
    -509
    -510  // Other constants
    -511
    -512  /**
    -513   * An empty instance.
    -514   */
    -515  public static final byte [] 
    EMPTY_BYTE_ARRAY = new byte [0];
    -516
    -517  public static final ByteBuffer 
    EMPTY_BYTE_BUFFER = ByteBuffer.wrap(EMPTY_BYTE_ARRAY);
    -518
    -519  /**
    -520   * Used by scanners, etc when they want 
    to start at the beginning of a region
    -521   */
    -522  public static final byte [] 
    EMPTY_START_ROW = EMPTY_BYTE_ARRAY;
    -523
    -524  /**
    -525   * Last row in a table.
    -526   */
    -527  public static final byte [] 
    EMPTY_END_ROW = EMPTY_START_ROW;
    -528
    -529  /**
    -530* Used by scanners and others when 
    they're trying to detect the end of a
    -531* table
    -532*/
    -533  public static final byte [] LAST_ROW = 
    EMPTY_BYTE_ARRAY;
    +490  /** The replication barrier family as a 
    string*/
    +491  public static final String 
    REPLICATION_BARRIER_FAMILY_STR = "rep_barrier";
    +492
    +493  /** The replication barrier family */
    +494  public static final byte[] 
    REPLICATION_BARRIER_FAMILY =
    +495  
    Bytes.toBytes(REPLICATION_BARRIER_FAMILY_STR);
    +496
    +497  /**
    +498   * The meta table version column 
    qualifier.
    +499   * We keep current version of the meta 
    table in this column in code-ROOT-/code
    +500   * table: i.e. in the 'info:v' 
    column.
    +501   */
    +502  public static final byte [] 
    META_VERSION_QUALIFIER = Bytes.toBytes("v");
    +503
    +504  /**
    +505   * The current version of the meta 
    table.
    +506   * - pre-hbase 0.92.  There is no 
    META_VERSION column in the root table
    +507   * in this case. The meta has 
    HTableDescriptor serialized into the HRegionInfo;
    +508   * - version 0 is 0.92 and 0.94. Meta 
    data has serialized HRegionInfo's using
    +509   * Writable serialization, and 
    HRegionInfo's does not contain HTableDescriptors.
    +510   * - version 1 for 0.96+ keeps 
    HRegionInfo data structures, but changes the
    +511   * byte[] serialization from Writables 
    to Protobuf.
    +512   * See HRegionInfo.VERSION
    +513   */
    +514  public static final short META_VERSION 
    = 1;
    +515
    +516  // Other constants
    +517
    +518  /**
    +519   * An empty instance.
    +520   */
    +521  public static final byte [] 
    EMPTY_BYTE_ARRAY = new byte [0];
    +522
    +523  public static final ByteBuffer 
    EMPTY_BYTE_BUFFER = ByteBuffer.wrap(EMPTY_BYTE_ARRAY);
    +524
    +525  /**
    +526   * Used by scanners, etc when they want 
    to start at the beginning of a region
    +527   */
    +528  public static final byte [] 
    EMPTY_START_ROW = EMPTY_BYTE_ARRAY;
    +529
    +530  /**
    +531   * Last row in a table.
    +532   */
    +533  public static final byte [] 
    EMPTY_END_ROW = EMPTY_START_ROW;
     534
     535  /**
    -536   * Max length a row can have because of 
    the limitation in TFile.
    -537   */
    -538  public static final int MAX_ROW_LENGTH 
    = Short.MAX_VALUE;
    -539
    -540  /**
    -541   * Timestamp to use when we want to 
    refer to the latest cell.
    -542   *
    -543   * On client side, this is the 
    timestamp set by default when no timestamp is specified,
    -544   * to refer to the latest.
    -545   * On server side, this acts as a 
    notation.
    -546   * (1) For a cell of Put, which has 
    this notation,
    -547   * its timestamp will be replaced 
    with server's current time.
    -548   * (2) For a cell of Delete, which has 
    this notation,
    -549   * A. If the cell is of {@link 
    KeyValue.Type#Delete}, HBase issues a Get operation firstly.
    -550   *a. When the count of cell it 
    gets is less than the count of cell to delete,
    -551   *   the timestamp of Delete 
    cell will be replaced 

    [20/51] [partial] hbase-site git commit: Published site at 1d25b60831b8cc8f7ad5fd366f1867de5c20d2f3.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/eb05e3e3/apidocs/org/apache/hadoop/hbase/client/AsyncTable.html
    --
    diff --git a/apidocs/org/apache/hadoop/hbase/client/AsyncTable.html 
    b/apidocs/org/apache/hadoop/hbase/client/AsyncTable.html
    index 8f03403..a4bedb1 100644
    --- a/apidocs/org/apache/hadoop/hbase/client/AsyncTable.html
    +++ b/apidocs/org/apache/hadoop/hbase/client/AsyncTable.html
    @@ -109,7 +109,7 @@ public interface http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in 
    java.util.concurrent">CompletableFuture.
    + from the returned https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in 
    java.util.concurrent">CompletableFuture.
     
     Since:
     2.0.0
    @@ -154,20 +154,20 @@ public interface Method and Description
     
     
    -http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFutureResult
    +https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFutureResult
     append(Appendappend)
     Appends values to one or more columns within a single 
    row.
     
     
     
    -Thttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in 
    java.util.concurrent">CompletableFutureT
    -batch(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">List? extends Rowactions)
    +Thttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">Listhttps://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in 
    java.util.concurrent">CompletableFutureT
    +batch(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">List? extends Rowactions)
     Method that does a batch call on Deletes, Gets, Puts, 
    Increments, Appends and RowMutations.
     
     
     
    -default Thttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListT
    -batchAll(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">List? extends Rowactions)
    +default Thttps://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListT
    +batchAll(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">List? extends Rowactions)
     A simple version of batch.
     
     
    @@ -180,71 +180,71 @@ public interface 
     S,Rorg.apache.hadoop.hbase.client.AsyncTable.CoprocessorServiceBuilderS,R
    -coprocessorService(http://docs.oracle.com/javase/8/docs/api/java/util/function/Function.html?is-external=true;
     title="class or interface in 
    java.util.function">Functioncom.google.protobuf.RpcChannel,SstubMaker,
    +coprocessorService(https://docs.oracle.com/javase/8/docs/api/java/util/function/Function.html?is-external=true;
     title="class or interface in 
    java.util.function">Functioncom.google.protobuf.RpcChannel,SstubMaker,
       ServiceCallerS,Rcallable,
       AsyncTable.CoprocessorCallbackRcallback)
     Execute a coprocessor call on the regions which are covered 
    by a range.
     
     
     
    -S,Rhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in 
    java.util.concurrent">CompletableFutureR
    -coprocessorService(http://docs.oracle.com/javase/8/docs/api/java/util/function/Function.html?is-external=true;
     title="class or interface in 
    java.util.function">Functioncom.google.protobuf.RpcChannel,SstubMaker,
    +S,Rhttps://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in 
    java.util.concurrent">CompletableFutureR
    +coprocessorService(https://docs.oracle.com/javase/8/docs/api/java/util/function/Function.html?is-external=true;
     title="class or interface in 
    java.util.function">Functioncom.google.protobuf.RpcChannel,SstubMaker,
       ServiceCallerS,Rcallable,
       byte[]row)
     Execute the given 

    [20/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ec8bf761/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/Procedure.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/Procedure.html 
    b/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/Procedure.html
    index e901330..d15170c 100644
    --- a/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/Procedure.html
    +++ b/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/Procedure.html
    @@ -844,7 +844,7 @@
     
     
     
    -private Procedure
    +private Procedure?
     ProcedureExecutor.WorkerThread.activeProcedure
     
     
    @@ -1070,7 +1070,7 @@
     
     
     void
    -ProcedureExecutor.TimeoutExecutorThread.add(Procedureprocedure)
    +TimeoutExecutorThread.add(Procedure?procedure)
     
     
     void
    @@ -1172,7 +1172,7 @@
     
     
     private void
    -ProcedureExecutor.TimeoutExecutorThread.executeTimedoutProcedure(Procedureproc)
    +TimeoutExecutorThread.executeTimedoutProcedure(Procedureproc)
     
     
     protected static http://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true;
     title="class or interface in java.lang">Long
    @@ -1180,7 +1180,7 @@
       Procedure?proc)
     
     
    -private http://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true;
     title="class or interface in java.lang">Long
    +(package private) http://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true;
     title="class or interface in java.lang">Long
     ProcedureExecutor.getRootProcedureId(Procedureproc)
     
     
    @@ -1267,7 +1267,7 @@
     
     
     boolean
    -ProcedureExecutor.TimeoutExecutorThread.remove(Procedureprocedure)
    +TimeoutExecutorThread.remove(Procedure?procedure)
     
     
     private void
    @@ -1371,7 +1371,7 @@
     CompletedProcedureRetainer(Procedure?procedure)
     
     
    -DelayedProcedure(Procedureprocedure)
    +DelayedProcedure(Procedure?procedure)
     
     
     LockedResource(LockedResourceTyperesourceType,
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ec8bf761/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureExecutor.DelayedProcedure.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureExecutor.DelayedProcedure.html
     
    b/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureExecutor.DelayedProcedure.html
    deleted file mode 100644
    index 222fa56..000
    --- 
    a/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureExecutor.DelayedProcedure.html
    +++ /dev/null
    @@ -1,165 +0,0 @@
    -http://www.w3.org/TR/html4/loose.dtd;>
    -
    -
    -
    -
    -
    -Uses of Class 
    org.apache.hadoop.hbase.procedure2.ProcedureExecutor.DelayedProcedure (Apache 
    HBase 3.0.0-SNAPSHOT API)
    -
    -
    -
    -
    -
    -
    -
    -JavaScript is disabled on your browser.
    -
    -
    -
    -
    -
    -Skip navigation links
    -
    -
    -
    -
    -Overview
    -Package
    -Class
    -Use
    -Tree
    -Deprecated
    -Index
    -Help
    -
    -
    -
    -
    -Prev
    -Next
    -
    -
    -Frames
    -NoFrames
    -
    -
    -AllClasses
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -Uses of 
    Classorg.apache.hadoop.hbase.procedure2.ProcedureExecutor.DelayedProcedure
    -
    -
    -
    -
    -
    -Packages that use ProcedureExecutor.DelayedProcedure
    -
    -Package
    -Description
    -
    -
    -
    -org.apache.hadoop.hbase.procedure2
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -Uses of ProcedureExecutor.DelayedProcedure in 
    org.apache.hadoop.hbase.procedure2
    -
    -Methods in org.apache.hadoop.hbase.procedure2
     with parameters of type ProcedureExecutor.DelayedProcedure
    -
    -Modifier and Type
    -Method and Description
    -
    -
    -
    -private void
    -ProcedureExecutor.TimeoutExecutorThread.execDelayedProcedure(ProcedureExecutor.DelayedProceduredelayed)
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -Skip navigation links
    -
    -
    -
    -
    -Overview
    -Package
    -Class
    -Use
    -Tree
    -Deprecated
    -Index
    -Help
    -
    -
    -
    -
    -Prev
    -Next
    -
    -
    -Frames
    -NoFrames
    -
    -
    -AllClasses
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
    reserved.
    -
    -
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ec8bf761/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureExecutor.InlineChore.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureExecutor.InlineChore.html
     
    b/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureExecutor.InlineChore.html
    

    [20/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html
    index 49ff09e..c27dc14 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/AbstractMemStore.html
    @@ -179,177 +179,179 @@
     171  }
     172
     173  MemStoreSizing getSnapshotSizing() {
    -174return new 
    MemStoreSizing(this.snapshot.keySize(), this.snapshot.heapSize());
    -175  }
    -176
    -177  @Override
    -178  public String toString() {
    -179StringBuilder buf = new 
    StringBuilder();
    -180int i = 1;
    -181try {
    -182  for (Segment segment : 
    getSegments()) {
    -183buf.append("Segment (" + i + ") " 
    + segment.toString() + "; ");
    -184i++;
    -185  }
    -186} catch (IOException e){
    -187  return e.toString();
    -188}
    -189return buf.toString();
    -190  }
    -191
    -192  protected Configuration 
    getConfiguration() {
    -193return conf;
    -194  }
    -195
    -196  protected void dump(Logger log) {
    -197active.dump(log);
    -198snapshot.dump(log);
    -199  }
    -200
    -201
    -202  /*
    -203   * Inserts the specified Cell into 
    MemStore and deletes any existing
    -204   * versions of the same 
    row/family/qualifier as the specified Cell.
    -205   * p
    -206   * First, the specified Cell is 
    inserted into the Memstore.
    +174return new 
    MemStoreSizing(this.snapshot.keySize(),
    +175this.snapshot.heapSize(),
    +176this.snapshot.offHeapSize());
    +177  }
    +178
    +179  @Override
    +180  public String toString() {
    +181StringBuilder buf = new 
    StringBuilder();
    +182int i = 1;
    +183try {
    +184  for (Segment segment : 
    getSegments()) {
    +185buf.append("Segment (" + i + ") " 
    + segment.toString() + "; ");
    +186i++;
    +187  }
    +188} catch (IOException e){
    +189  return e.toString();
    +190}
    +191return buf.toString();
    +192  }
    +193
    +194  protected Configuration 
    getConfiguration() {
    +195return conf;
    +196  }
    +197
    +198  protected void dump(Logger log) {
    +199active.dump(log);
    +200snapshot.dump(log);
    +201  }
    +202
    +203
    +204  /*
    +205   * Inserts the specified Cell into 
    MemStore and deletes any existing
    +206   * versions of the same 
    row/family/qualifier as the specified Cell.
     207   * p
    -208   * If there are any existing Cell in 
    this MemStore with the same row,
    -209   * family, and qualifier, they are 
    removed.
    -210   * p
    -211   * Callers must hold the read lock.
    -212   *
    -213   * @param cell the cell to be updated
    -214   * @param readpoint readpoint below 
    which we can safely remove duplicate KVs
    -215   * @param memstoreSize
    -216   */
    -217  private void upsert(Cell cell, long 
    readpoint, MemStoreSizing memstoreSizing) {
    -218// Add the Cell to the MemStore
    -219// Use the internalAdd method here 
    since we (a) already have a lock
    -220// and (b) cannot safely use the 
    MSLAB here without potentially
    -221// hitting OOME - see 
    TestMemStore.testUpsertMSLAB for a
    -222// test that triggers the 
    pathological case if we don't avoid MSLAB
    -223// here.
    -224// This cell data is backed by the 
    same byte[] where we read request in RPC(See HBASE-15180). We
    -225// must do below deep copy. Or else 
    we will keep referring to the bigger chunk of memory and
    -226// prevent it from getting GCed.
    -227cell = deepCopyIfNeeded(cell);
    -228this.active.upsert(cell, readpoint, 
    memstoreSizing);
    -229setOldestEditTimeToNow();
    -230checkActiveSize();
    -231  }
    -232
    -233  /*
    -234   * @param a
    -235   * @param b
    -236   * @return Return lowest of a or b or 
    null if both a and b are null
    -237   */
    -238  protected Cell getLowest(final Cell a, 
    final Cell b) {
    -239if (a == null) {
    -240  return b;
    -241}
    -242if (b == null) {
    -243  return a;
    -244}
    -245return comparator.compareRows(a, b) 
    = 0? a: b;
    -246  }
    -247
    -248  /*
    -249   * @param key Find row that follows 
    this one.  If null, return first.
    -250   * @param set Set to look in for a row 
    beyond coderow/code.
    -251   * @return Next row or null if none 
    found.  If one found, will be a new
    -252   * KeyValue -- can be destroyed by 
    subsequent calls to this method.
    -253   */
    -254  protected Cell getNextRow(final Cell 
    key,
    -255  final NavigableSetCell set) 
    {
    -256Cell result = null;
    -257SortedSetCell tail = key == 
    null? set: set.tailSet(key);
    -258// Iterate until we fall into the 
    next row; i.e. move off current row
    -259for (Cell cell: tail) {
    -260  if (comparator.compareRows(cell, 
    key) = 0) {
    -261continue;
    -262  }
    -263  // Note: Not suppressing deletes or 
    expired cells.  Needs to be handled
    -264  // by higher up functions.
    -265  result = cell;
    -266  break;
    

    [20/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/193b4259/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
    index bd13b53..802b925 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
    @@ -900,7600 +900,7598 @@
     892if 
    (this.getRegionInfo().getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) {
     893  status.setStatus("Writing region 
    info on filesystem");
     894  fs.checkRegionInfoOnFilesystem();
    -895} else {
    -896  if (LOG.isDebugEnabled()) {
    -897LOG.debug("Skipping creation of 
    .regioninfo file for " + this.getRegionInfo());
    -898  }
    -899}
    -900
    -901// Initialize all the HStores
    -902status.setStatus("Initializing all 
    the Stores");
    -903long maxSeqId = 
    initializeStores(reporter, status);
    -904this.mvcc.advanceTo(maxSeqId);
    -905if 
    (ServerRegionReplicaUtil.shouldReplayRecoveredEdits(this)) {
    -906  CollectionHStore stores = 
    this.stores.values();
    -907  try {
    -908// update the stores that we are 
    replaying
    -909
    stores.forEach(HStore::startReplayingFromWAL);
    -910// Recover any edits if 
    available.
    -911maxSeqId = Math.max(maxSeqId,
    -912  
    replayRecoveredEditsIfAny(this.fs.getRegionDir(), maxSeqIdInStores, reporter, 
    status));
    -913// Make sure mvcc is up to max.
    -914this.mvcc.advanceTo(maxSeqId);
    -915  } finally {
    -916// update the stores that we are 
    done replaying
    -917
    stores.forEach(HStore::stopReplayingFromWAL);
    -918  }
    -919}
    -920this.lastReplayedOpenRegionSeqId = 
    maxSeqId;
    +895}
    +896
    +897// Initialize all the HStores
    +898status.setStatus("Initializing all 
    the Stores");
    +899long maxSeqId = 
    initializeStores(reporter, status);
    +900this.mvcc.advanceTo(maxSeqId);
    +901if 
    (ServerRegionReplicaUtil.shouldReplayRecoveredEdits(this)) {
    +902  CollectionHStore stores = 
    this.stores.values();
    +903  try {
    +904// update the stores that we are 
    replaying
    +905
    stores.forEach(HStore::startReplayingFromWAL);
    +906// Recover any edits if 
    available.
    +907maxSeqId = Math.max(maxSeqId,
    +908  
    replayRecoveredEditsIfAny(this.fs.getRegionDir(), maxSeqIdInStores, reporter, 
    status));
    +909// Make sure mvcc is up to max.
    +910this.mvcc.advanceTo(maxSeqId);
    +911  } finally {
    +912// update the stores that we are 
    done replaying
    +913
    stores.forEach(HStore::stopReplayingFromWAL);
    +914  }
    +915}
    +916this.lastReplayedOpenRegionSeqId = 
    maxSeqId;
    +917
    +918
    this.writestate.setReadOnly(ServerRegionReplicaUtil.isReadOnly(this));
    +919this.writestate.flushRequested = 
    false;
    +920this.writestate.compacting.set(0);
     921
    -922
    this.writestate.setReadOnly(ServerRegionReplicaUtil.isReadOnly(this));
    -923this.writestate.flushRequested = 
    false;
    -924this.writestate.compacting.set(0);
    -925
    -926if (this.writestate.writesEnabled) 
    {
    -927  // Remove temporary data left over 
    from old regions
    -928  status.setStatus("Cleaning up 
    temporary data from old regions");
    -929  fs.cleanupTempDir();
    -930}
    -931
    -932if (this.writestate.writesEnabled) 
    {
    -933  status.setStatus("Cleaning up 
    detritus from prior splits");
    -934  // Get rid of any splits or merges 
    that were lost in-progress.  Clean out
    -935  // these directories here on open.  
    We may be opening a region that was
    -936  // being split but we crashed in 
    the middle of it all.
    -937  fs.cleanupAnySplitDetritus();
    -938  fs.cleanupMergesDir();
    -939}
    -940
    -941// Initialize split policy
    -942this.splitPolicy = 
    RegionSplitPolicy.create(this, conf);
    -943
    -944// Initialize flush policy
    -945this.flushPolicy = 
    FlushPolicyFactory.create(this, conf);
    -946
    -947long lastFlushTime = 
    EnvironmentEdgeManager.currentTime();
    -948for (HStore store: stores.values()) 
    {
    -949  
    this.lastStoreFlushTimeMap.put(store, lastFlushTime);
    -950}
    -951
    -952// Use maximum of log sequenceid or 
    that which was found in stores
    -953// (particularly if no recovered 
    edits, seqid will be -1).
    -954long nextSeqid = maxSeqId;
    -955if (this.writestate.writesEnabled) 
    {
    -956  nextSeqid = 
    WALSplitter.writeRegionSequenceIdFile(this.fs.getFileSystem(),
    -957  this.fs.getRegionDir(), 
    nextSeqid, 1);
    -958} else {
    -959  nextSeqid++;
    -960}
    -961
    -962LOG.info("Onlined " + 
    this.getRegionInfo().getShortNameToLog() +
    -963  "; next sequenceid=" + 
    nextSeqid);
    +922if (this.writestate.writesEnabled) 
    {
    +923  // Remove 

    [20/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/94208cfe/devapidocs/org/apache/hadoop/hbase/exceptions/class-use/DeserializationException.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/exceptions/class-use/DeserializationException.html
     
    b/devapidocs/org/apache/hadoop/hbase/exceptions/class-use/DeserializationException.html
    index 80108a2..a07a830 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/exceptions/class-use/DeserializationException.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/exceptions/class-use/DeserializationException.html
    @@ -144,17 +144,15 @@
     
     
     
    -static HColumnDescriptor
    -HColumnDescriptor.parseFrom(byte[]bytes)
    -Deprecated.
    -
    -
    -
     static HTableDescriptor
     HTableDescriptor.parseFrom(byte[]bytes)
     Deprecated.
     
     
    +
    +static ClusterId
    +ClusterId.parseFrom(byte[]bytes)
    +
     
     static HRegionInfo
     HRegionInfo.parseFrom(byte[]bytes)
    @@ -165,8 +163,10 @@
     
     
     
    -static ClusterId
    -ClusterId.parseFrom(byte[]bytes)
    +static HColumnDescriptor
    +HColumnDescriptor.parseFrom(byte[]bytes)
    +Deprecated.
    +
     
     
     static SplitLogTask
    @@ -220,17 +220,17 @@
     TableDescriptorBuilder.ModifyableTableDescriptor.parseFrom(byte[]bytes)
     
     
    +static RegionInfo
    +RegionInfo.parseFrom(byte[]bytes)
    +
    +
     static ColumnFamilyDescriptor
     ColumnFamilyDescriptorBuilder.parseFrom(byte[]pbBytes)
     
    -
    +
     private static ColumnFamilyDescriptor
     ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor.parseFrom(byte[]bytes)
     
    -
    -static RegionInfo
    -RegionInfo.parseFrom(byte[]bytes)
    -
     
     static RegionInfo
     RegionInfo.parseFrom(byte[]bytes,
    @@ -305,111 +305,111 @@
     ByteArrayComparable.parseFrom(byte[]pbBytes)
     
     
    -static ColumnPrefixFilter
    -ColumnPrefixFilter.parseFrom(byte[]pbBytes)
    +static SingleColumnValueExcludeFilter
    +SingleColumnValueExcludeFilter.parseFrom(byte[]pbBytes)
     
     
    -static ColumnCountGetFilter
    -ColumnCountGetFilter.parseFrom(byte[]pbBytes)
    +static ValueFilter
    +ValueFilter.parseFrom(byte[]pbBytes)
     
     
    -static RowFilter
    -RowFilter.parseFrom(byte[]pbBytes)
    +static SkipFilter
    +SkipFilter.parseFrom(byte[]pbBytes)
     
     
    -static FuzzyRowFilter
    -FuzzyRowFilter.parseFrom(byte[]pbBytes)
    +static FamilyFilter
    +FamilyFilter.parseFrom(byte[]pbBytes)
     
     
    -static BinaryComparator
    -BinaryComparator.parseFrom(byte[]pbBytes)
    +static BinaryPrefixComparator
    +BinaryPrefixComparator.parseFrom(byte[]pbBytes)
     
     
    -static RegexStringComparator
    -RegexStringComparator.parseFrom(byte[]pbBytes)
    +static NullComparator
    +NullComparator.parseFrom(byte[]pbBytes)
     
     
    -static Filter
    -Filter.parseFrom(byte[]pbBytes)
    -Concrete implementers can signal a failure condition in 
    their code by throwing an
    - http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException.
    -
    +static BigDecimalComparator
    +BigDecimalComparator.parseFrom(byte[]pbBytes)
     
     
    -static RandomRowFilter
    -RandomRowFilter.parseFrom(byte[]pbBytes)
    +static ColumnPrefixFilter
    +ColumnPrefixFilter.parseFrom(byte[]pbBytes)
     
     
    -static FirstKeyOnlyFilter
    -FirstKeyOnlyFilter.parseFrom(byte[]pbBytes)
    +static PageFilter
    +PageFilter.parseFrom(byte[]pbBytes)
     
     
    -static SkipFilter
    -SkipFilter.parseFrom(byte[]pbBytes)
    +static BitComparator
    +BitComparator.parseFrom(byte[]pbBytes)
     
     
    -static BinaryPrefixComparator
    -BinaryPrefixComparator.parseFrom(byte[]pbBytes)
    +static RowFilter
    +RowFilter.parseFrom(byte[]pbBytes)
     
     
    -static TimestampsFilter
    -TimestampsFilter.parseFrom(byte[]pbBytes)
    +static ColumnRangeFilter
    +ColumnRangeFilter.parseFrom(byte[]pbBytes)
     
     
    -static ValueFilter
    -ValueFilter.parseFrom(byte[]pbBytes)
    +static ColumnCountGetFilter
    +ColumnCountGetFilter.parseFrom(byte[]pbBytes)
     
     
    -static KeyOnlyFilter
    -KeyOnlyFilter.parseFrom(byte[]pbBytes)
    +static SubstringComparator
    +SubstringComparator.parseFrom(byte[]pbBytes)
     
     
    -static FamilyFilter
    -FamilyFilter.parseFrom(byte[]pbBytes)
    +static MultipleColumnPrefixFilter
    +MultipleColumnPrefixFilter.parseFrom(byte[]pbBytes)
     
     
    -static QualifierFilter
    -QualifierFilter.parseFrom(byte[]pbBytes)
    +static ColumnPaginationFilter
    +ColumnPaginationFilter.parseFrom(byte[]pbBytes)
     
     
    -static FilterList
    -FilterList.parseFrom(byte[]pbBytes)
    +static DependentColumnFilter
    +DependentColumnFilter.parseFrom(byte[]pbBytes)
     
     
    -static BigDecimalComparator
    -BigDecimalComparator.parseFrom(byte[]pbBytes)
    +static BinaryComparator
    +BinaryComparator.parseFrom(byte[]pbBytes)
     
     
    -static ColumnRangeFilter
    -ColumnRangeFilter.parseFrom(byte[]pbBytes)
    +static InclusiveStopFilter
    +InclusiveStopFilter.parseFrom(byte[]pbBytes)
     
     
    -static ColumnPaginationFilter
    -ColumnPaginationFilter.parseFrom(byte[]pbBytes)
    +static KeyOnlyFilter
    +KeyOnlyFilter.parseFrom(byte[]pbBytes)
     
     
    -static SubstringComparator
    -SubstringComparator.parseFrom(byte[]pbBytes)
    +static MultiRowRangeFilter
    +MultiRowRangeFilter.parseFrom(byte[]pbBytes)
     
     
    -static WhileMatchFilter
    -WhileMatchFilter.parseFrom(byte[]pbBytes)
    +static Filter
    +Filter.parseFrom(byte[]pbBytes)
    

    [20/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0cd17dc5/devapidocs/org/apache/hadoop/hbase/client/class-use/ResultScanner.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/client/class-use/ResultScanner.html 
    b/devapidocs/org/apache/hadoop/hbase/client/class-use/ResultScanner.html
    index e3d9f70..35f0e35 100644
    --- a/devapidocs/org/apache/hadoop/hbase/client/class-use/ResultScanner.html
    +++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/ResultScanner.html
    @@ -208,9 +208,9 @@ service.
     
     
     
    -default ResultScanner
    -AsyncTable.getScanner(byte[]family)
    -Gets a scanner on the current table for the given 
    family.
    +ResultScanner
    +HTable.getScanner(byte[]family)
    +The underlying HTable must 
    not be closed.
     
     
     
    @@ -220,16 +220,16 @@ service.
     
     
     
    -ResultScanner
    -HTable.getScanner(byte[]family)
    -The underlying HTable must 
    not be closed.
    +default ResultScanner
    +AsyncTable.getScanner(byte[]family)
    +Gets a scanner on the current table for the given 
    family.
     
     
     
    -default ResultScanner
    -AsyncTable.getScanner(byte[]family,
    +ResultScanner
    +HTable.getScanner(byte[]family,
       byte[]qualifier)
    -Gets a scanner on the current table for the given family 
    and qualifier.
    +The underlying HTable must 
    not be closed.
     
     
     
    @@ -240,37 +240,37 @@ service.
     
     
     
    -ResultScanner
    -HTable.getScanner(byte[]family,
    +default ResultScanner
    +AsyncTable.getScanner(byte[]family,
       byte[]qualifier)
    -The underlying HTable must 
    not be closed.
    +Gets a scanner on the current table for the given family 
    and qualifier.
     
     
     
     ResultScanner
    -AsyncTable.getScanner(Scanscan)
    -Returns a scanner on the current table as specified by the 
    Scan 
    object.
    -
    +RawAsyncTableImpl.getScanner(Scanscan)
     
     
     ResultScanner
    -Table.getScanner(Scanscan)
    -Returns a scanner on the current table as specified by the 
    Scan
    - object.
    +HTable.getScanner(Scanscan)
    +The underlying HTable must 
    not be closed.
     
     
     
     ResultScanner
    -AsyncTableImpl.getScanner(Scanscan)
    +Table.getScanner(Scanscan)
    +Returns a scanner on the current table as specified by the 
    Scan
    + object.
    +
     
     
     ResultScanner
    -RawAsyncTableImpl.getScanner(Scanscan)
    +AsyncTableImpl.getScanner(Scanscan)
     
     
     ResultScanner
    -HTable.getScanner(Scanscan)
    -The underlying HTable must 
    not be closed.
    +AsyncTable.getScanner(Scanscan)
    +Returns a scanner on the current table as specified by the 
    Scan 
    object.
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0cd17dc5/devapidocs/org/apache/hadoop/hbase/client/class-use/RetriesExhaustedWithDetailsException.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/client/class-use/RetriesExhaustedWithDetailsException.html
     
    b/devapidocs/org/apache/hadoop/hbase/client/class-use/RetriesExhaustedWithDetailsException.html
    index b1d1cef..d730879 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/client/class-use/RetriesExhaustedWithDetailsException.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/client/class-use/RetriesExhaustedWithDetailsException.html
    @@ -106,11 +106,11 @@
     
     
     RetriesExhaustedWithDetailsException
    -AsyncRequestFuture.getErrors()
    +AsyncRequestFutureImpl.getErrors()
     
     
     RetriesExhaustedWithDetailsException
    -AsyncRequestFutureImpl.getErrors()
    +AsyncRequestFuture.getErrors()
     
     
     (package private) RetriesExhaustedWithDetailsException
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0cd17dc5/devapidocs/org/apache/hadoop/hbase/client/class-use/RetryingCallable.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/client/class-use/RetryingCallable.html 
    b/devapidocs/org/apache/hadoop/hbase/client/class-use/RetryingCallable.html
    index 0a290e1..9642faa 100644
    --- a/devapidocs/org/apache/hadoop/hbase/client/class-use/RetryingCallable.html
    +++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/RetryingCallable.html
    @@ -234,28 +234,36 @@
     
     
     
    +T
    +RpcRetryingCallerImpl.callWithoutRetries(RetryingCallableTcallable,
    +  intcallTimeout)
    +
    +
     T
     RpcRetryingCaller.callWithoutRetries(RetryingCallableTcallable,
       intcallTimeout)
     Call the server once only.
     
     
    -
    +
     T
    -RpcRetryingCallerImpl.callWithoutRetries(RetryingCallableTcallable,
    -  intcallTimeout)
    +RpcRetryingCallerImpl.callWithRetries(RetryingCallableTcallable,
    +   intcallTimeout)
     
    -
    +
     T
     RpcRetryingCaller.callWithRetries(RetryingCallableTcallable,
    intcallTimeout)
     Retries if invocation fails.
     
     
    +
    +RetryingCallerInterceptorContext
    +NoOpRetryingInterceptorContext.prepare(RetryingCallable?callable)
    +
     
    -T
    -RpcRetryingCallerImpl.callWithRetries(RetryingCallableTcallable,
    -   intcallTimeout)
    +FastFailInterceptorContext
    +FastFailInterceptorContext.prepare(RetryingCallable?callable)
     
     
     abstract RetryingCallerInterceptorContext
    @@ -267,11 +275,13 @@
     
     
     

    [20/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/828486ae/devapidocs/org/apache/hadoop/hbase/client/class-use/TableDescriptor.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/client/class-use/TableDescriptor.html 
    b/devapidocs/org/apache/hadoop/hbase/client/class-use/TableDescriptor.html
    index 56a2ea1..98104cb 100644
    --- a/devapidocs/org/apache/hadoop/hbase/client/class-use/TableDescriptor.html
    +++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/TableDescriptor.html
    @@ -449,14 +449,14 @@ Input/OutputFormats, a table indexing MapReduce job, and 
    utility methods.
     
     
     TableDescriptor
    -HTable.getDescriptor()
    -
    -
    -TableDescriptor
     Table.getDescriptor()
     Gets the table 
    descriptor for this table.
     
     
    +
    +TableDescriptor
    +HTable.getDescriptor()
    +
     
     TableDescriptor
     Admin.getDescriptor(TableNametableName)
    @@ -509,51 +509,51 @@ Input/OutputFormats, a table indexing MapReduce job, and 
    utility methods.
     
     
     http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFutureTableDescriptor
    -AsyncAdmin.getDescriptor(TableNametableName)
    -Method for getting the tableDescriptor
    -
    +AsyncHBaseAdmin.getDescriptor(TableNametableName)
     
     
     http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFutureTableDescriptor
    -RawAsyncHBaseAdmin.getDescriptor(TableNametableName)
    +AsyncAdmin.getDescriptor(TableNametableName)
    +Method for getting the tableDescriptor
    +
     
     
     http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFutureTableDescriptor
    -AsyncHBaseAdmin.getDescriptor(TableNametableName)
    +RawAsyncHBaseAdmin.getDescriptor(TableNametableName)
     
     
     private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListTableDescriptor
     RawAsyncHBaseAdmin.getTableDescriptors(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsRequestrequest)
     
     
    -default http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListTableDescriptor
    -AsyncAdmin.listTableDescriptors()
    -List all the userspace tables.
    -
    -
    -
     http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListTableDescriptor
     Admin.listTableDescriptors()
     List all the userspace tables.
     
     
    -
    +
     http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListTableDescriptor
     HBaseAdmin.listTableDescriptors()
     
    +
    +default http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListTableDescriptor
    +AsyncAdmin.listTableDescriptors()
    +List all the userspace tables.
    +
    +
     
     http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListTableDescriptor
    -AsyncAdmin.listTableDescriptors(booleanincludeSysTables)
    -List all the tables.
    -
    +AsyncHBaseAdmin.listTableDescriptors(booleanincludeSysTables)
     
     
     http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListTableDescriptor
    -RawAsyncHBaseAdmin.listTableDescriptors(booleanincludeSysTables)
    +AsyncAdmin.listTableDescriptors(booleanincludeSysTables)
    +List all the tables.
    +
     
     
     http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListTableDescriptor
    -AsyncHBaseAdmin.listTableDescriptors(booleanincludeSysTables)
    +RawAsyncHBaseAdmin.listTableDescriptors(booleanincludeSysTables)
     
     
     

    [20/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f272b0e8/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionLocateType.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionLocateType.html 
    b/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionLocateType.html
    index 2ac1b78..90f52b0 100644
    --- a/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionLocateType.html
    +++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionLocateType.html
    @@ -106,7 +106,7 @@
     
     
     private RegionLocateType
    -AsyncRpcRetryingCallerFactory.SingleRequestCallerBuilder.locateType
    +AsyncSingleRequestRpcRetryingCaller.locateType
     
     
     RegionLocateType
    @@ -114,7 +114,7 @@
     
     
     private RegionLocateType
    -AsyncSingleRequestRpcRetryingCaller.locateType
    +AsyncRpcRetryingCallerFactory.SingleRequestCallerBuilder.locateType
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f272b0e8/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionLocator.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionLocator.html 
    b/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionLocator.html
    index fbe0658..e062eb5 100644
    --- a/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionLocator.html
    +++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionLocator.html
    @@ -230,13 +230,13 @@ service.
     
     
     private RegionLocator
    -TableInputFormatBase.regionLocator
    -The RegionLocator of the 
    table.
    -
    +HFileOutputFormat2.TableInfo.regionLocator
     
     
     private RegionLocator
    -HFileOutputFormat2.TableInfo.regionLocator
    +TableInputFormatBase.regionLocator
    +The RegionLocator of the 
    table.
    +
     
     
     
    @@ -248,15 +248,15 @@ service.
     
     
     
    +RegionLocator
    +HFileOutputFormat2.TableInfo.getRegionLocator()
    +
    +
     protected RegionLocator
     TableInputFormatBase.getRegionLocator()
     Allows subclasses to get the RegionLocator.
     
     
    -
    -RegionLocator
    -HFileOutputFormat2.TableInfo.getRegionLocator()
    -
     
     
     
    
    
    

    [20/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c83a37c8/devapidocs/org/apache/hadoop/hbase/client/class-use/TableDescriptor.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/client/class-use/TableDescriptor.html 
    b/devapidocs/org/apache/hadoop/hbase/client/class-use/TableDescriptor.html
    index 56a2ea1..98104cb 100644
    --- a/devapidocs/org/apache/hadoop/hbase/client/class-use/TableDescriptor.html
    +++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/TableDescriptor.html
    @@ -449,14 +449,14 @@ Input/OutputFormats, a table indexing MapReduce job, and 
    utility methods.
     
     
     TableDescriptor
    -HTable.getDescriptor()
    -
    -
    -TableDescriptor
     Table.getDescriptor()
     Gets the table 
    descriptor for this table.
     
     
    +
    +TableDescriptor
    +HTable.getDescriptor()
    +
     
     TableDescriptor
     Admin.getDescriptor(TableNametableName)
    @@ -509,51 +509,51 @@ Input/OutputFormats, a table indexing MapReduce job, and 
    utility methods.
     
     
     http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFutureTableDescriptor
    -AsyncAdmin.getDescriptor(TableNametableName)
    -Method for getting the tableDescriptor
    -
    +AsyncHBaseAdmin.getDescriptor(TableNametableName)
     
     
     http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFutureTableDescriptor
    -RawAsyncHBaseAdmin.getDescriptor(TableNametableName)
    +AsyncAdmin.getDescriptor(TableNametableName)
    +Method for getting the tableDescriptor
    +
     
     
     http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFutureTableDescriptor
    -AsyncHBaseAdmin.getDescriptor(TableNametableName)
    +RawAsyncHBaseAdmin.getDescriptor(TableNametableName)
     
     
     private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListTableDescriptor
     RawAsyncHBaseAdmin.getTableDescriptors(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsRequestrequest)
     
     
    -default http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListTableDescriptor
    -AsyncAdmin.listTableDescriptors()
    -List all the userspace tables.
    -
    -
    -
     http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListTableDescriptor
     Admin.listTableDescriptors()
     List all the userspace tables.
     
     
    -
    +
     http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListTableDescriptor
     HBaseAdmin.listTableDescriptors()
     
    +
    +default http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListTableDescriptor
    +AsyncAdmin.listTableDescriptors()
    +List all the userspace tables.
    +
    +
     
     http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListTableDescriptor
    -AsyncAdmin.listTableDescriptors(booleanincludeSysTables)
    -List all the tables.
    -
    +AsyncHBaseAdmin.listTableDescriptors(booleanincludeSysTables)
     
     
     http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListTableDescriptor
    -RawAsyncHBaseAdmin.listTableDescriptors(booleanincludeSysTables)
    +AsyncAdmin.listTableDescriptors(booleanincludeSysTables)
    +List all the tables.
    +
     
     
     http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListTableDescriptor
    -AsyncHBaseAdmin.listTableDescriptors(booleanincludeSysTables)
    +RawAsyncHBaseAdmin.listTableDescriptors(booleanincludeSysTables)
     
     
     

    [20/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0ab8335e/devapidocs/org/apache/hadoop/hbase/client/coprocessor/class-use/Batch.Callback.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/client/coprocessor/class-use/Batch.Callback.html
     
    b/devapidocs/org/apache/hadoop/hbase/client/coprocessor/class-use/Batch.Callback.html
    index bf8d672..61695fd 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/client/coprocessor/class-use/Batch.Callback.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/client/coprocessor/class-use/Batch.Callback.html
    @@ -113,17 +113,17 @@
     
     
     
    -private Batch.CallbackCResult
    -AsyncRequestFutureImpl.callback
    -
    -
     private Batch.CallbackT
     AsyncProcessTask.callback
     
    -
    +
     private Batch.CallbackT
     AsyncProcessTask.Builder.callback
     
    +
    +private Batch.CallbackCResult
    +AsyncRequestFutureImpl.callback
    +
     
     
     
    @@ -148,42 +148,50 @@
     
     
     Rvoid
    -Table.batchCallback(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">List? extends Rowactions,
    +HTable.batchCallback(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">List? extends Rowactions,
      http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">Object[]results,
    - Batch.CallbackRcallback)
    -Same as Table.batch(List,
     Object[]), but with a callback.
    -
    + Batch.CallbackRcallback)
     
     
     Rvoid
    -HTable.batchCallback(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">List? extends Rowactions,
    +Table.batchCallback(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">List? extends Rowactions,
      http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">Object[]results,
    - Batch.CallbackRcallback)
    + Batch.CallbackRcallback)
    +Same as Table.batch(List,
     Object[]), but with a callback.
    +
     
     
     R extends 
    com.google.protobuf.Messagevoid
    -Table.batchCoprocessorService(com.google.protobuf.Descriptors.MethodDescriptormethodDescriptor,
    +HTable.batchCoprocessorService(com.google.protobuf.Descriptors.MethodDescriptormethodDescriptor,
    com.google.protobuf.Messagerequest,
    byte[]startKey,
    byte[]endKey,
    RresponsePrototype,
    -   Batch.CallbackRcallback)
    -Creates an instance of the given Service 
    subclass for each table
    - region spanning the range from the startKey row to 
    endKey row (inclusive), all
    - the invocations to the same region server will be batched into one call.
    -
    +   Batch.CallbackRcallback)
     
     
     R extends 
    com.google.protobuf.Messagevoid
    -HTable.batchCoprocessorService(com.google.protobuf.Descriptors.MethodDescriptormethodDescriptor,
    +Table.batchCoprocessorService(com.google.protobuf.Descriptors.MethodDescriptormethodDescriptor,
    com.google.protobuf.Messagerequest,
    byte[]startKey,
    byte[]endKey,
    RresponsePrototype,
    -   Batch.CallbackRcallback)
    +   Batch.CallbackRcallback)
    +Creates an instance of the given Service 
    subclass for each table
    + region spanning the range from the startKey row to 
    endKey row (inclusive), all
    + the invocations to the same region server will be batched into one call.
    +
     
     
     T extends 
    com.google.protobuf.Service,Rvoid
    +HTable.coprocessorService(http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true;
     title="class or interface in java.lang">ClassTservice,
    +  byte[]startKey,
    +  byte[]endKey,
    +  Batch.CallT,Rcallable,
    +  Batch.CallbackRcallback)
    +
    +
    +T extends 
    com.google.protobuf.Service,Rvoid
     Table.coprocessorService(http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true;
     title="class or interface in java.lang">ClassTservice,
       byte[]startKey,
       byte[]endKey,
    @@ -195,14 +203,6 @@
      with each Service instance.
     
     
    -
    -T extends 
    com.google.protobuf.Service,Rvoid
    -HTable.coprocessorService(http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true;
     title="class or interface in java.lang">ClassTservice,
    -  byte[]startKey,
    -  byte[]endKey,
    -  Batch.CallT,Rcallable,
    -  Batch.CallbackRcallback)
    -
     
     static Rvoid
     HTable.doBatchWithCallback(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">List? extends Rowactions,
    
    

    [20/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/src-html/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.html
    index b590002..9b2a580 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.html
    @@ -560,806 +560,811 @@
     552return this;
     553  }
     554
    -555  public ColumnFamilyDescriptorBuilder 
    setValue(final Bytes key, final Bytes value) {
    -556desc.setValue(key, value);
    +555  public ColumnFamilyDescriptorBuilder 
    setNewVersionBehavior(final boolean value) {
    +556desc.setNewVersionBehavior(value);
     557return this;
     558  }
     559
    -560  public ColumnFamilyDescriptorBuilder 
    setValue(final byte[] key, final byte[] value) {
    +560  public ColumnFamilyDescriptorBuilder 
    setValue(final Bytes key, final Bytes value) {
     561desc.setValue(key, value);
     562return this;
     563  }
     564
    -565  public ColumnFamilyDescriptorBuilder 
    setValue(final String key, final String value) {
    +565  public ColumnFamilyDescriptorBuilder 
    setValue(final byte[] key, final byte[] value) {
     566desc.setValue(key, value);
     567return this;
     568  }
     569
    -570  /**
    -571   * An ModifyableFamilyDescriptor 
    contains information about a column family such as the
    -572   * number of versions, compression 
    settings, etc.
    -573   *
    -574   * It is used as input when creating a 
    table or adding a column.
    -575   * TODO: make this package-private 
    after removing the HColumnDescriptor
    -576   */
    -577  @InterfaceAudience.Private
    -578  public static class 
    ModifyableColumnFamilyDescriptor
    -579  implements ColumnFamilyDescriptor, 
    ComparableModifyableColumnFamilyDescriptor {
    -580
    -581// Column family name
    -582private final byte[] name;
    -583
    -584// Column metadata
    -585private final MapBytes, Bytes 
    values = new HashMap();
    -586
    -587/**
    -588 * A map which holds the 
    configuration specific to the column family. The
    -589 * keys of the map have the same 
    names as config keys and override the
    -590 * defaults with cf-specific 
    settings. Example usage may be for compactions,
    -591 * etc.
    -592 */
    -593private final MapString, 
    String configuration = new HashMap();
    -594
    -595/**
    -596 * Construct a column descriptor 
    specifying only the family name The other
    -597 * attributes are defaulted.
    -598 *
    -599 * @param name Column family name. 
    Must be 'printable' -- digit or
    -600 * letter -- and may not contain a 
    code:/code
    -601 * TODO: make this private after the 
    HCD is removed.
    -602 */
    -603@InterfaceAudience.Private
    -604public 
    ModifyableColumnFamilyDescriptor(final byte[] name) {
    -605  this(isLegalColumnFamilyName(name), 
    getDefaultValuesBytes(), Collections.emptyMap());
    -606}
    -607
    -608/**
    -609 * Constructor. Makes a deep copy of 
    the supplied descriptor.
    -610 * TODO: make this private after the 
    HCD is removed.
    -611 * @param desc The descriptor.
    -612 */
    -613@InterfaceAudience.Private
    -614public 
    ModifyableColumnFamilyDescriptor(ColumnFamilyDescriptor desc) {
    -615  this(desc.getName(), 
    desc.getValues(), desc.getConfiguration());
    -616}
    -617
    -618private 
    ModifyableColumnFamilyDescriptor(byte[] name, MapBytes, Bytes values, 
    MapString, String config) {
    -619  this.name = name;
    -620  this.values.putAll(values);
    -621  
    this.configuration.putAll(config);
    -622}
    -623
    -624@Override
    -625public byte[] getName() {
    -626  return Bytes.copy(name);
    +570  public ColumnFamilyDescriptorBuilder 
    setValue(final String key, final String value) {
    +571desc.setValue(key, value);
    +572return this;
    +573  }
    +574
    +575  /**
    +576   * An ModifyableFamilyDescriptor 
    contains information about a column family such as the
    +577   * number of versions, compression 
    settings, etc.
    +578   *
    +579   * It is used as input when creating a 
    table or adding a column.
    +580   * TODO: make this package-private 
    after removing the HColumnDescriptor
    +581   */
    +582  @InterfaceAudience.Private
    +583  public static class 
    ModifyableColumnFamilyDescriptor
    +584  implements ColumnFamilyDescriptor, 
    ComparableModifyableColumnFamilyDescriptor {
    +585
    +586// Column family name
    +587private final byte[] name;
    +588
    +589// Column metadata
    +590private final MapBytes, Bytes 
    values = new HashMap();
    +591
    +592/**
    +593 * A map which holds the 
    configuration specific to the column family. The
    +594 * keys of the map have the same 
    names as config keys and override the
    +595 * defaults with cf-specific 
    settings. Example usage may be for compactions,
    +596 * etc.
    +597 */
    +598private final 

    [20/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1f2eeb22/devapidocs/org/apache/hadoop/hbase/wal/WALKey.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/wal/WALKey.html 
    b/devapidocs/org/apache/hadoop/hbase/wal/WALKey.html
    index 8c64e35..bbbda4d 100644
    --- a/devapidocs/org/apache/hadoop/hbase/wal/WALKey.html
    +++ b/devapidocs/org/apache/hadoop/hbase/wal/WALKey.html
    @@ -110,7 +110,7 @@ var activeTableTab = "activeTableTab";
     
     
     @InterfaceAudience.LimitedPrivate(value={"Replication","Coprocesssor"})
    -public interface WALKey
    +public interface WALKey
     extends SequenceId, http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
     title="class or interface in java.lang">ComparableWALKey
     Key for WAL Entry.
      Read-only. No Setters. For limited audience such as Coprocessors.
    @@ -189,7 +189,7 @@ extends 
     TableName
    -getTablename()
    +getTableName()
     
     
     long
    @@ -236,7 +236,7 @@ extends 
     
     EMPTY_UUIDS
    -static finalhttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/util/UUID.html?is-external=true;
     title="class or interface in java.util">UUID EMPTY_UUIDS
    +static finalhttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/util/UUID.html?is-external=true;
     title="class or interface in java.util">UUID EMPTY_UUIDS
     Unmodifiable empty list of UUIDs.
     
     
    @@ -254,7 +254,7 @@ extends 
     
     estimatedSerializedSizeOf
    -defaultlongestimatedSerializedSizeOf()
    +defaultlongestimatedSerializedSizeOf()
     
     
     
    @@ -263,20 +263,20 @@ extends 
     
     getEncodedRegionName
    -byte[]getEncodedRegionName()
    +byte[]getEncodedRegionName()
     
     Returns:
     encoded region name
     
     
     
    -
    +
     
     
     
     
    -getTablename
    -TableNamegetTablename()
    +getTableName
    +TableNamegetTableName()
     
     Returns:
     table name
    @@ -289,7 +289,7 @@ extends 
     
     getWriteTime
    -longgetWriteTime()
    +longgetWriteTime()
     
     Returns:
     the write time
    @@ -302,7 +302,7 @@ extends 
     
     getNonceGroup
    -defaultlonggetNonceGroup()
    +defaultlonggetNonceGroup()
     
     Returns:
     The nonce group
    @@ -315,7 +315,7 @@ extends 
     
     getNonce
    -defaultlonggetNonce()
    +defaultlonggetNonce()
     
     Returns:
     The nonce
    @@ -328,7 +328,7 @@ extends 
     
     getOriginatingClusterId
    -http://docs.oracle.com/javase/8/docs/api/java/util/UUID.html?is-external=true;
     title="class or interface in java.util">UUIDgetOriginatingClusterId()
    +http://docs.oracle.com/javase/8/docs/api/java/util/UUID.html?is-external=true;
     title="class or interface in java.util">UUIDgetOriginatingClusterId()
     
     
     
    @@ -337,7 +337,7 @@ extends 
     
     getOrigLogSeqNum
    -longgetOrigLogSeqNum()
    +longgetOrigLogSeqNum()
     Return a positive long if current WALKeyImpl is created 
    from a replay edit; a replay edit is an
      edit that came in when replaying WALs of a crashed server.
     
    @@ -352,7 +352,7 @@ extends 
     
     toStringMap
    -defaulthttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
     title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String,http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">ObjecttoStringMap()
    +defaulthttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
     title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String,http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">ObjecttoStringMap()
     Produces a string map for this key. Useful for programmatic 
    use and
      manipulation of the data stored in an WALKeyImpl, for example, printing
      as JSON.
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1f2eeb22/devapidocs/org/apache/hadoop/hbase/wal/WALKeyImpl.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/wal/WALKeyImpl.html 
    b/devapidocs/org/apache/hadoop/hbase/wal/WALKeyImpl.html
    index 4e6ae43..2b90be3 100644
    --- a/devapidocs/org/apache/hadoop/hbase/wal/WALKeyImpl.html
    +++ b/devapidocs/org/apache/hadoop/hbase/wal/WALKeyImpl.html
    @@ -416,7 +416,7 @@ implements 
     
     TableName
    -getTablename()
    +getTableName()
     
     
     MultiVersionConcurrencyControl.WriteEntry
    @@ -987,16 +987,16 @@ protectedvoid
    +
     
     
     
     
    -getTablename
    -publicTableNamegetTablename()
    +getTableName
    +publicTableNamegetTableName()
     
     Specified by:
    -getTablenamein
     interfaceWALKey
    +getTableNamein
     interfaceWALKey
     Returns:
     table name
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1f2eeb22/devapidocs/org/apache/hadoop/hbase/wal/WALProvider.AsyncWriter.html
    

    [20/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/cc6597ec/testdevapidocs/org/apache/hadoop/hbase/client/TestAlwaysSetScannerId.html
    --
    diff --git 
    a/testdevapidocs/org/apache/hadoop/hbase/client/TestAlwaysSetScannerId.html 
    b/testdevapidocs/org/apache/hadoop/hbase/client/TestAlwaysSetScannerId.html
    index 61f4127..e3a25b3 100644
    --- a/testdevapidocs/org/apache/hadoop/hbase/client/TestAlwaysSetScannerId.html
    +++ b/testdevapidocs/org/apache/hadoop/hbase/client/TestAlwaysSetScannerId.html
    @@ -109,7 +109,7 @@ var activeTableTab = "activeTableTab";
     
     
     
    -public class TestAlwaysSetScannerId
    +public class TestAlwaysSetScannerId
     extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">Object
     Testcase to make sure that we always set scanner id in 
    ScanResponse. See HBASE-18000.
     
    @@ -135,26 +135,30 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     CF
     
     
    +static HBaseClassTestRule
    +CLASS_RULE
    +
    +
     private static int
     COUNT
     
    -
    +
     private static byte[]
     CQ
     
    -
    +
     private static 
    org.apache.hadoop.hbase.HRegionInfo
     HRI
     
    -
    +
     private static 
    org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService.BlockingInterface
     STUB
     
    -
    +
     private static 
    org.apache.hadoop.hbase.TableName
     TABLE_NAME
     
    -
    +
     private static HBaseTestingUtility
     UTIL
     
    @@ -224,13 +228,22 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     Field Detail
    +
    +
    +
    +
    +
    +CLASS_RULE
    +public static finalHBaseClassTestRule CLASS_RULE
    +
    +
     
     
     
     
     
     UTIL
    -private static finalHBaseTestingUtility UTIL
    +private static finalHBaseTestingUtility UTIL
     
     
     
    @@ -239,7 +252,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     TABLE_NAME
    -private static finalorg.apache.hadoop.hbase.TableName TABLE_NAME
    +private static finalorg.apache.hadoop.hbase.TableName TABLE_NAME
     
     
     
    @@ -248,7 +261,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     CF
    -private static finalbyte[] CF
    +private static finalbyte[] CF
     
     
     
    @@ -257,7 +270,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     CQ
    -private static finalbyte[] CQ
    +private static finalbyte[] CQ
     
     
     
    @@ -266,7 +279,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     COUNT
    -private static finalint COUNT
    +private static finalint COUNT
     
     See Also:
     Constant
     Field Values
    @@ -279,7 +292,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     HRI
    -private staticorg.apache.hadoop.hbase.HRegionInfo HRI
    +private staticorg.apache.hadoop.hbase.HRegionInfo HRI
     
     
     
    @@ -288,7 +301,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     STUB
    -private 
    staticorg.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService.BlockingInterface
     STUB
    +private 
    staticorg.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService.BlockingInterface
     STUB
     
     
     
    @@ -305,7 +318,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     TestAlwaysSetScannerId
    -publicTestAlwaysSetScannerId()
    +publicTestAlwaysSetScannerId()
     
     
     
    @@ -322,7 +335,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     setUp
    -public staticvoidsetUp()
    +public staticvoidsetUp()
       throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
     title="class or interface in java.lang">Exception
     
     Throws:
    @@ -336,7 +349,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     tearDown
    -public staticvoidtearDown()
    +public staticvoidtearDown()
      throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
     title="class or interface in java.lang">Exception
     
     Throws:
    @@ -350,7 +363,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     test
    -publicvoidtest()
    +publicvoidtest()
       throws 
    org.apache.hbase.thirdparty.com.google.protobuf.ServiceException,
      http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/cc6597ec/testdevapidocs/org/apache/hadoop/hbase/client/TestAppendFromClientSide.html
    --
    diff --git 
    a/testdevapidocs/org/apache/hadoop/hbase/client/TestAppendFromClientSide.html 
    b/testdevapidocs/org/apache/hadoop/hbase/client/TestAppendFromClientSide.html
    index 703d5a8..ac29224 100644
    --- 
    a/testdevapidocs/org/apache/hadoop/hbase/client/TestAppendFromClientSide.html
    +++ 
    b/testdevapidocs/org/apache/hadoop/hbase/client/TestAppendFromClientSide.html
    @@ -131,22 +131,26 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     

    [20/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aa7ffc92/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupCommands.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupCommands.html 
    b/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupCommands.html
    index eb9e252..667152a 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupCommands.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupCommands.html
    @@ -28,22 +28,22 @@
     020
     021import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_BANDWIDTH;
     022import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_BANDWIDTH_DESC;
    -023import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_PATH;
    -024import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_PATH_DESC;
    -025import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_RECORD_NUMBER;
    -026import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_RECORD_NUMBER_DESC;
    -027import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_SET;
    -028import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_SET_BACKUP_DESC;
    -029import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_SET_DESC;
    -030import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_TABLE;
    -031import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_TABLE_DESC;
    -032import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_TABLE_LIST_DESC;
    -033import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_WORKERS;
    -034import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_WORKERS_DESC;
    -035import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_YARN_QUEUE_NAME;
    -036import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_YARN_QUEUE_NAME_DESC;
    -037import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_DEBUG;
    -038import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_DEBUG_DESC;
    +023import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_DEBUG;
    +024import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_DEBUG_DESC;
    +025import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_PATH;
    +026import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_PATH_DESC;
    +027import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_RECORD_NUMBER;
    +028import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_RECORD_NUMBER_DESC;
    +029import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_SET;
    +030import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_SET_BACKUP_DESC;
    +031import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_SET_DESC;
    +032import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_TABLE;
    +033import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_TABLE_DESC;
    +034import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_TABLE_LIST_DESC;
    +035import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_WORKERS;
    +036import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_WORKERS_DESC;
    +037import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_YARN_QUEUE_NAME;
    +038import static 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_YARN_QUEUE_NAME_DESC;
     039
     040import java.io.IOException;
     041import java.net.URI;
    @@ -70,194 +70,194 @@
     062import 
    org.apache.hadoop.hbase.backup.util.BackupUtils;
     063import 
    org.apache.hadoop.hbase.client.Connection;
     064import 
    org.apache.hadoop.hbase.client.ConnectionFactory;
    -065import 
    org.apache.hbase.thirdparty.com.google.common.collect.Lists;
    -066import 
    org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
    -067import 
    org.apache.yetus.audience.InterfaceAudience;
    -068
    -069/**
    -070 * General backup commands, options and 
    usage messages
    -071 */
    -072
    +065import 
    org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
    +066import 
    org.apache.yetus.audience.InterfaceAudience;
    +067
    +068import 
    org.apache.hbase.thirdparty.com.google.common.collect.Lists;
    +069
    +070/**
    +071 * General backup commands, options and 
    usage messages
    +072 */
     073@InterfaceAudience.Private
     074public final class BackupCommands {
    -075
    -076  public final static String 
    INCORRECT_USAGE = "Incorrect usage";
    -077
    -078  public final static String 
    TOP_LEVEL_NOT_ALLOWED =
    -079  "Top level (root) folder is not 
    allowed to be a backup destination";
    -080
    -081  public static final String USAGE = 
    "Usage: hbase backup COMMAND [command-specific arguments]\n"
    -082  + "where COMMAND 

    [20/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/96e5e102/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreScanner.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreScanner.html 
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreScanner.html
    index c376e15..070c994 100644
    --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreScanner.html
    +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StoreScanner.html
    @@ -560,597 +560,596 @@
     552LOOP: do {
     553  // Update and check the time limit 
    based on the configured value of cellsPerTimeoutCheck
     554  if ((kvsScanned % 
    cellsPerHeartbeatCheck == 0)) {
    -555
    scannerContext.updateTimeProgress();
    -556if 
    (scannerContext.checkTimeLimit(LimitScope.BETWEEN_CELLS)) {
    -557  return 
    scannerContext.setScannerState(NextState.TIME_LIMIT_REACHED).hasMoreValues();
    -558}
    -559  }
    -560  // Do object compare - we set 
    prevKV from the same heap.
    -561  if (prevCell != cell) {
    -562++kvsScanned;
    -563  }
    -564  checkScanOrder(prevCell, cell, 
    comparator);
    -565  int cellSize = 
    PrivateCellUtil.estimatedSerializedSizeOf(cell);
    -566  bytesRead += cellSize;
    -567  prevCell = cell;
    -568  
    scannerContext.setLastPeekedCell(cell);
    -569  topChanged = false;
    -570  ScanQueryMatcher.MatchCode qcode = 
    matcher.match(cell);
    -571  switch (qcode) {
    -572case INCLUDE:
    -573case INCLUDE_AND_SEEK_NEXT_ROW:
    -574case INCLUDE_AND_SEEK_NEXT_COL:
    -575
    -576  Filter f = 
    matcher.getFilter();
    -577  if (f != null) {
    -578cell = 
    f.transformCell(cell);
    -579  }
    -580
    -581  this.countPerRow++;
    -582  if (storeLimit  -1 
     this.countPerRow  (storeLimit + storeOffset)) {
    -583// do what SEEK_NEXT_ROW 
    does.
    -584if 
    (!matcher.moreRowsMayExistAfter(cell)) {
    -585  close(false);// Do all 
    cleanup except heap.close()
    -586  return 
    scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues();
    -587}
    -588matcher.clearCurrentRow();
    -589seekToNextRow(cell);
    -590break LOOP;
    -591  }
    -592
    -593  // add to results only if we 
    have skipped #storeOffset kvs
    -594  // also update metric 
    accordingly
    -595  if (this.countPerRow  
    storeOffset) {
    -596outResult.add(cell);
    -597
    -598// Update local tracking 
    information
    -599count++;
    -600totalBytesRead += cellSize;
    -601
    -602// Update the progress of the 
    scanner context
    -603
    scannerContext.incrementSizeProgress(cellSize,
    -604  
    PrivateCellUtil.estimatedHeapSizeOf(cell));
    -605
    scannerContext.incrementBatchProgress(1);
    -606
    -607if (matcher.isUserScan() 
     totalBytesRead  maxRowSize) {
    -608  throw new 
    RowTooBigException(
    -609  "Max row size allowed: 
    " + maxRowSize + ", but the row is bigger than that.");
    -610}
    -611  }
    -612
    -613  if (qcode == 
    ScanQueryMatcher.MatchCode.INCLUDE_AND_SEEK_NEXT_ROW) {
    -614if 
    (!matcher.moreRowsMayExistAfter(cell)) {
    -615  close(false);// Do all 
    cleanup except heap.close()
    -616  return 
    scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues();
    -617}
    -618matcher.clearCurrentRow();
    -619seekOrSkipToNextRow(cell);
    -620  } else if (qcode == 
    ScanQueryMatcher.MatchCode.INCLUDE_AND_SEEK_NEXT_COL) {
    -621
    seekOrSkipToNextColumn(cell);
    -622  } else {
    -623this.heap.next();
    -624  }
    -625
    -626  if 
    (scannerContext.checkBatchLimit(LimitScope.BETWEEN_CELLS)) {
    -627break LOOP;
    -628  }
    -629  if 
    (scannerContext.checkSizeLimit(LimitScope.BETWEEN_CELLS)) {
    -630break LOOP;
    -631  }
    -632  continue;
    -633
    -634case DONE:
    -635  // Optimization for Gets! If 
    DONE, no more to get on this row, early exit!
    -636  if (get) {
    -637// Then no more to this 
    row... exit.
    -638close(false);// Do all 
    cleanup except heap.close()
    -639return 
    scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues();
    -640  }
    -641  matcher.clearCurrentRow();
    -642  return 
    scannerContext.setScannerState(NextState.MORE_VALUES).hasMoreValues();
    -643
    -644case DONE_SCAN:
    -645  close(false);// Do all cleanup 
    except heap.close()
    -646  return 
    scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues();
    -647
    -648case SEEK_NEXT_ROW:
    -649  // This is just a relatively 
    simple end of scan fix, to short-cut end
    -650  // us if there is an endKey in 
    the scan.
    -651 

    [20/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/master/MasterCoprocessorHost.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/master/MasterCoprocessorHost.html 
    b/devapidocs/org/apache/hadoop/hbase/master/MasterCoprocessorHost.html
    index e3f0e57..61789bb 100644
    --- a/devapidocs/org/apache/hadoop/hbase/master/MasterCoprocessorHost.html
    +++ b/devapidocs/org/apache/hadoop/hbase/master/MasterCoprocessorHost.html
    @@ -965,7 +965,7 @@ extends 
     
     masterServices
    -privateMasterServices masterServices
    +privateMasterServices masterServices
     
     
     
    @@ -974,7 +974,7 @@ extends 
     
     masterObserverGetter
    -privateCoprocessorHost.ObserverGetterMasterCoprocessor,MasterObserver 
    masterObserverGetter
    +privateCoprocessorHost.ObserverGetterMasterCoprocessor,MasterObserver 
    masterObserverGetter
     
     
     
    @@ -991,7 +991,7 @@ extends 
     
     MasterCoprocessorHost
    -publicMasterCoprocessorHost(MasterServicesservices,
    +publicMasterCoprocessorHost(MasterServicesservices,
      
    org.apache.hadoop.conf.Configurationconf)
     
     
    @@ -1009,7 +1009,7 @@ extends 
     
     createEnvironment
    -publicMasterCoprocessorHost.MasterEnvironmentcreateEnvironment(MasterCoprocessorinstance,
    +publicMasterCoprocessorHost.MasterEnvironmentcreateEnvironment(MasterCoprocessorinstance,
      
    intpriority,
      intseq,
      
    org.apache.hadoop.conf.Configurationconf)
    @@ -1027,7 +1027,7 @@ extends 
     
     checkAndGetInstance
    -publicMasterCoprocessorcheckAndGetInstance(http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true;
     title="class or interface in java.lang">Class?implClass)
    +publicMasterCoprocessorcheckAndGetInstance(http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true;
     title="class or interface in java.lang">Class?implClass)
       throws http://docs.oracle.com/javase/8/docs/api/java/lang/InstantiationException.html?is-external=true;
     title="class or interface in java.lang">InstantiationException,
      http://docs.oracle.com/javase/8/docs/api/java/lang/IllegalAccessException.html?is-external=true;
     title="class or interface in java.lang">IllegalAccessException
     Description copied from 
    class:CoprocessorHost
    @@ -1051,7 +1051,7 @@ extends 
     
     preCreateNamespace
    -publicvoidpreCreateNamespace(NamespaceDescriptorns)
    +publicvoidpreCreateNamespace(NamespaceDescriptorns)
     throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException
     
     Throws:
    @@ -1065,7 +1065,7 @@ extends 
     
     postCreateNamespace
    -publicvoidpostCreateNamespace(NamespaceDescriptorns)
    +publicvoidpostCreateNamespace(NamespaceDescriptorns)
      throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException
     
     Throws:
    @@ -1079,7 +1079,7 @@ extends 
     
     preDeleteNamespace
    -publicvoidpreDeleteNamespace(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringnamespaceName)
    +publicvoidpreDeleteNamespace(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringnamespaceName)
     throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException
     
     Throws:
    @@ -1093,7 +1093,7 @@ extends 
     
     postDeleteNamespace
    -publicvoidpostDeleteNamespace(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringnamespaceName)
    +publicvoidpostDeleteNamespace(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringnamespaceName)
      throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException
     
     Throws:
    @@ -1107,7 +1107,7 @@ extends 
     
     preModifyNamespace
    -publicvoidpreModifyNamespace(NamespaceDescriptorns)
    +publicvoidpreModifyNamespace(NamespaceDescriptorns)
     throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException
     
     Throws:
    @@ -1121,7 +1121,7 @@ extends 
     
     postModifyNamespace
    -publicvoidpostModifyNamespace(NamespaceDescriptorns)
    +publicvoidpostModifyNamespace(NamespaceDescriptorns)
      throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or 

    [20/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/14db89d7/testapidocs/index-all.html
    --
    diff --git a/testapidocs/index-all.html b/testapidocs/index-all.html
    index b140232..90e8675 100644
    --- a/testapidocs/index-all.html
    +++ b/testapidocs/index-all.html
    @@ -875,6 +875,10 @@
     
     Start a MiniHBaseCluster.
     
    +MiniHBaseCluster(Configuration,
     int, int, Class? extends HMaster, Class? extends 
    MiniHBaseCluster.MiniHBaseClusterRegionServer) - Constructor for 
    class org.apache.hadoop.hbase.MiniHBaseCluster
    +
    +Start a MiniHBaseCluster.
    +
     MiniHBaseCluster(Configuration,
     int, int, ListInteger, Class? extends HMaster, Class? 
    extends MiniHBaseCluster.MiniHBaseClusterRegionServer) - 
    Constructor for class org.apache.hadoop.hbase.MiniHBaseCluster
     
     modifyTableSync(Admin,
     TableDescriptor) - Static method in class 
    org.apache.hadoop.hbase.HBaseTestingUtility
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/14db89d7/testapidocs/org/apache/hadoop/hbase/MiniHBaseCluster.html
    --
    diff --git a/testapidocs/org/apache/hadoop/hbase/MiniHBaseCluster.html 
    b/testapidocs/org/apache/hadoop/hbase/MiniHBaseCluster.html
    index 2012575..12c7700 100644
    --- a/testapidocs/org/apache/hadoop/hbase/MiniHBaseCluster.html
    +++ b/testapidocs/org/apache/hadoop/hbase/MiniHBaseCluster.html
    @@ -182,6 +182,15 @@ extends 
    +MiniHBaseCluster(org.apache.hadoop.conf.Configurationconf,
    +intnumMasters,
    +intnumRegionServers,
    +http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true;
     title="class or interface in java.lang">Class? extends HMastermasterClass,
    +http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true;
     title="class or interface in java.lang">Class? extends MiniHBaseCluster.MiniHBaseClusterRegionServerregionserverClass)
    +Start a MiniHBaseCluster.
    +
    +
    +
     MiniHBaseCluster(org.apache.hadoop.conf.Configurationconf,
     intnumMasters,
     intnumRegionServers,
    @@ -683,13 +692,38 @@ extends 
    +
    +
    +
    +
    +MiniHBaseCluster
    +publicMiniHBaseCluster(org.apache.hadoop.conf.Configurationconf,
    +intnumMasters,
    +intnumRegionServers,
    +http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true;
     title="class or interface in java.lang">Class? extends HMastermasterClass,
    +http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true;
     title="class or interface in java.lang">Class? extends MiniHBaseCluster.MiniHBaseClusterRegionServerregionserverClass)
    + throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException,
    +http://docs.oracle.com/javase/8/docs/api/java/lang/InterruptedException.html?is-external=true;
     title="class or interface in java.lang">InterruptedException
    +Start a MiniHBaseCluster.
    +
    +Parameters:
    +conf - Configuration to be used for cluster
    +numMasters - initial number of masters to start.
    +numRegionServers - initial number of region servers to 
    start.
    +Throws:
    +http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException
    +http://docs.oracle.com/javase/8/docs/api/java/lang/InterruptedException.html?is-external=true;
     title="class or interface in java.lang">InterruptedException
    +
    +
    +
     
     
     
     
     
     MiniHBaseCluster
    -publicMiniHBaseCluster(org.apache.hadoop.conf.Configurationconf,
    +publicMiniHBaseCluster(org.apache.hadoop.conf.Configurationconf,
     intnumMasters,
     intnumRegionServers,
     http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/lang/Integer.html?is-external=true;
     title="class or interface in java.lang">IntegerrsPorts,
    @@ -723,7 +757,7 @@ extends 
     
     getConfiguration
    -publicorg.apache.hadoop.conf.ConfigurationgetConfiguration()
    +publicorg.apache.hadoop.conf.ConfigurationgetConfiguration()
     
     
     
    @@ -732,7 +766,7 @@ extends 
     
     startRegionServer
    -publicvoidstartRegionServer(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">Stringhostname,
    +publicvoidstartRegionServer(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">Stringhostname,
       intport)
    throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException
     Description copied from 
    class:org.apache.hadoop.hbase.HBaseCluster
    @@ -754,7 +788,7 @@ extends 
     
    

    [20/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/testdevapidocs/org/apache/hadoop/hbase/client/TestCIGetRpcTimeout.html
    --
    diff --git 
    a/testdevapidocs/org/apache/hadoop/hbase/client/TestCIGetRpcTimeout.html 
    b/testdevapidocs/org/apache/hadoop/hbase/client/TestCIGetRpcTimeout.html
    new file mode 100644
    index 000..6f63255
    --- /dev/null
    +++ b/testdevapidocs/org/apache/hadoop/hbase/client/TestCIGetRpcTimeout.html
    @@ -0,0 +1,333 @@
    +http://www.w3.org/TR/html4/loose.dtd;>
    +
    +
    +
    +
    +
    +TestCIGetRpcTimeout (Apache HBase 3.0.0-SNAPSHOT Test API)
    +
    +
    +
    +
    +
    +var methods = {"i0":10};
    +var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
    Methods"],8:["t4","Concrete Methods"]};
    +var altColor = "altColor";
    +var rowColor = "rowColor";
    +var tableTab = "tableTab";
    +var activeTableTab = "activeTableTab";
    +
    +
    +JavaScript is disabled on your browser.
    +
    +
    +
    +
    +
    +Skip navigation links
    +
    +
    +
    +
    +Overview
    +Package
    +Class
    +Use
    +Tree
    +Deprecated
    +Index
    +Help
    +
    +
    +
    +
    +PrevClass
    +NextClass
    +
    +
    +Frames
    +NoFrames
    +
    +
    +AllClasses
    +
    +
    +
    +
    +
    +
    +
    +Summary:
    +Nested|
    +Field|
    +Constr|
    +Method
    +
    +
    +Detail:
    +Field|
    +Constr|
    +Method
    +
    +
    +
    +
    +
    +
    +
    +
    +org.apache.hadoop.hbase.client
    +Class 
    TestCIGetRpcTimeout
    +
    +
    +
    +http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">java.lang.Object
    +
    +
    +org.apache.hadoop.hbase.client.AbstractTestCITimeout
    +
    +
    +org.apache.hadoop.hbase.client.AbstractTestCIRpcTimeout
    +
    +
    +org.apache.hadoop.hbase.client.TestCIGetRpcTimeout
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +public class TestCIGetRpcTimeout
    +extends AbstractTestCIRpcTimeout
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +Nested Class Summary
    +
    +
    +
    +
    +Nested classes/interfaces inherited from 
    classorg.apache.hadoop.hbase.client.AbstractTestCITimeout
    +AbstractTestCITimeout.SleepAndFailFirstTime,
     AbstractTestCITimeout.SleepCoprocessor
    +
    +
    +
    +
    +
    +
    +
    +
    +Field Summary
    +
    +
    +
    +
    +Fields inherited from classorg.apache.hadoop.hbase.client.AbstractTestCITimeout
    +FAM_NAM,
     name,
     TEST_UTIL
    +
    +
    +
    +
    +
    +
    +
    +
    +Constructor Summary
    +
    +Constructors
    +
    +Constructor and Description
    +
    +
    +TestCIGetRpcTimeout()
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +Method Summary
    +
    +All MethodsInstance MethodsConcrete Methods
    +
    +Modifier and Type
    +Method and Description
    +
    +
    +protected void
    +execute(org.apache.hadoop.hbase.client.Tabletable)
    +
    +
    +
    +
    +
    +
    +Methods inherited from classorg.apache.hadoop.hbase.client.AbstractTestCIRpcTimeout
    +setUp,
     testRpcTimeout
    +
    +
    +
    +
    +
    +Methods inherited from classorg.apache.hadoop.hbase.client.AbstractTestCITimeout
    +setUpBeforeClass,
     tearDownAfterClass
    +
    +
    +
    +
    +
    +Methods inherited from classjava.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">Object
    +http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--;
     title="class or interface in java.lang">clone, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-;
     title="class or interface in java.lang">equals, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--;
     title="class or interface in java.lang">finalize, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--;
     title="class or interface in java.lang">getClass, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--;
     title="class or interface in java.lang">hashCode, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notify--;
     title="class or interface in java.lang">notify, http://docs.oracle.com/javase/8/docs/api/java/lang
     /Object.html?is-external=true#notifyAll--" title="class or interface in 
    java.lang">notifyAll, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#toString--;
     title="class or interface in java.lang">toString, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait--;
     title="class or interface in java.lang">wait, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-;
     title="class or interface in java.lang">wait, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-int-;
     title="class or interface in java.lang">wait
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +Constructor Detail
    +
    +
    +
    +
    +
    +TestCIGetRpcTimeout
    +publicTestCIGetRpcTimeout()
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +Method Detail
    +
    +
    +
    +
    +
    +execute
    

    [20/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependencies.html
    --
    diff --git 
    a/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependencies.html
     
    b/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependencies.html
    index d58f717..35d8362 100644
    --- 
    a/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependencies.html
    +++ 
    b/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependencies.html
    @@ -7,7 +7,7 @@
       
     
     
    -
    +
     
     Apache HBase - Exemplar for hbase-client archetype  Project 
    Dependencies
     
    @@ -4168,7 +4168,7 @@ The following provides more details on the included 
    cryptographic software:
     https://www.apache.org/;>The Apache Software 
    Foundation.
     All rights reserved.  
     
    -  Last Published: 
    2018-01-10
    +  Last Published: 
    2018-01-11
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-convergence.html
    --
    diff --git 
    a/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-convergence.html
     
    b/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-convergence.html
    index 18a9f2f..bf1e334 100644
    --- 
    a/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-convergence.html
    +++ 
    b/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-convergence.html
    @@ -7,7 +7,7 @@
       
     
     
    -
    +
     
     Apache HBase - Exemplar for hbase-client archetype  Reactor 
    Dependency Convergence
     
    @@ -912,7 +912,7 @@
     https://www.apache.org/;>The Apache Software 
    Foundation.
     All rights reserved.  
     
    -  Last Published: 
    2018-01-10
    +  Last Published: 
    2018-01-11
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-info.html
    --
    diff --git 
    a/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-info.html
     
    b/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-info.html
    index 95c2003..bdccdc9 100644
    --- 
    a/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-info.html
    +++ 
    b/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-info.html
    @@ -7,7 +7,7 @@
       
     
     
    -
    +
     
     Apache HBase - Exemplar for hbase-client archetype  
    Dependency Information
     
    @@ -147,7 +147,7 @@
     https://www.apache.org/;>The Apache Software 
    Foundation.
     All rights reserved.  
     
    -  Last Published: 
    2018-01-10
    +  Last Published: 
    2018-01-11
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-management.html
    --
    diff --git 
    a/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-management.html
     
    b/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-management.html
    index 79797ff..482ab8a 100644
    --- 
    a/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-management.html
    +++ 
    b/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-management.html
    @@ -7,7 +7,7 @@
       
     
     
    -
    +
     
     Apache HBase - Exemplar for hbase-client archetype  Project 
    Dependency Management
     
    @@ -810,7 +810,7 @@
     https://www.apache.org/;>The Apache Software 
    Foundation.
     All rights reserved.  
     
    -  Last Published: 
    2018-01-10
    +  Last Published: 
    2018-01-11
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/hbase-build-configuration/hbase-archetypes/hbase-client-project/index.html
    --
    diff --git 
    a/hbase-build-configuration/hbase-archetypes/hbase-client-project/index.html 
    b/hbase-build-configuration/hbase-archetypes/hbase-client-project/index.html
    index 4d538bd..9bf6e8e 100644
    --- a/hbase-build-configuration/hbase-archetypes/hbase-client-project/index.html
    +++ b/hbase-build-configuration/hbase-archetypes/hbase-client-project/index.html
    @@ -7,7 +7,7 @@
       
     
     
    -
    +
     
     Apache HBase - 

    [20/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/49431b18/devapidocs/org/apache/hadoop/hbase/client/package-summary.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/client/package-summary.html 
    b/devapidocs/org/apache/hadoop/hbase/client/package-summary.html
    index 3d53183..3377afb 100644
    --- a/devapidocs/org/apache/hadoop/hbase/client/package-summary.html
    +++ b/devapidocs/org/apache/hadoop/hbase/client/package-summary.html
    @@ -916,222 +916,226 @@
     
     
     
    -HBaseAdmin.RestoreSnapshotFuture
    +HBaseAdmin.ReplicationFuture
     
     
     
    -HBaseAdmin.SplitTableRegionFuture
    +HBaseAdmin.RestoreSnapshotFuture
     
     
     
    +HBaseAdmin.SplitTableRegionFuture
    +
    +
    +
     HBaseAdmin.TableFutureV
     
     
    -
    +
     HBaseAdmin.ThrowableAbortable
     
     Simple Abortable, throwing 
    RuntimeException on abort.
     
     
    -
    +
     HBaseAdmin.TruncateTableFuture
     
     
    -
    +
     HRegionLocator
     
     An implementation of RegionLocator.
     
     
    -
    +
     HTable
     
     An implementation of Table.
     
     
    -
    +
     HTableMultiplexer
     
     HTableMultiplexer provides a thread-safe non blocking PUT 
    API across all the tables.
     
     
    -
    +
     HTableMultiplexer.AtomicAverageCounter
     
     Helper to count the average over an interval until 
    reset.
     
     
    -
    +
     HTableMultiplexer.FlushWorker
     
     
    -
    +
     HTableMultiplexer.HTableMultiplexerStatus
     
     HTableMultiplexerStatus keeps track of the current status 
    of the HTableMultiplexer.
     
     
    -
    +
     HTableMultiplexer.PutStatus
     
     
    -
    +
     ImmutableHColumnDescriptor
     Deprecated
     
    -
    +
     ImmutableHRegionInfo
     Deprecated
     
    -
    +
     ImmutableHTableDescriptor
     Deprecated
     
    -
    +
     Increment
     
     Used to perform Increment operations on a single row.
     
     
    -
    +
     MasterCallableV
     
     A RetryingCallable for Master RPC operations.
     
     
    -
    +
     MasterCoprocessorRpcChannelImpl
     
     The implementation of a master based coprocessor rpc 
    channel.
     
     
    -
    +
     MetaCache
     
     A cache implementation for region locations from meta.
     
     
    -
    +
     MetricsConnection
     
     This class is for maintaining the various connection 
    statistics and publishing them through
      the metrics interfaces.
     
     
    -
    +
     MetricsConnection.CallStats
     
     A container class for collecting details about the RPC call 
    as it percolates.
     
     
    -
    +
     MetricsConnection.CallTracker
     
     
    -
    +
     MetricsConnection.RegionStats
     
     
    -
    +
     MetricsConnection.RunnerStats
     
     
    -
    +
     MultiAction
     
     Container for Actions (i.e.
     
     
    -
    +
     MultiResponse
     
     A container for Result objects, grouped by regionName.
     
     
    -
    +
     MultiResponse.RegionResult
     
     
    -
    +
     MultiServerCallable
     
     Callable that handles the multi method call 
    going against a single
      regionserver; i.e.
     
     
    -
    +
     Mutation
     
     
    -
    +
     Mutation.CellWrapper
     
     
    -
    +
     NoncedRegionServerCallableT
     
     Implementations make an rpc call against a RegionService 
    via a protobuf Service.
     
     
    -
    +
     NoOpRetryableCallerInterceptor
     
     Class that acts as a NoOpInterceptor.
     
     
    -
    +
     NoOpRetryingInterceptorContext
     
     
    -
    +
     Operation
     
     Superclass for any type that maps to a potentially 
    application-level query.
     
     
    -
    +
     OperationWithAttributes
     
     
    -
    +
     PackagePrivateFieldAccessor
     
     A helper class used to access the package private field in 
    o.a.h.h.client package.
     
     
    -
    +
     PerClientRandomNonceGenerator
     
     NonceGenerator implementation that uses client ID hash + 
    random int as nonce group, and random
      numbers as nonces.
     
     
    -
    +
     PreemptiveFastFailInterceptor
     
     The concrete RetryingCallerInterceptor 
    class that implements the preemptive fast fail
      feature.
     
     
    -
    +
     Put
     
     Used to perform Put operations for a single row.
     
     
    -
    +
     Query
     
     Base class for HBase read operations; e.g.
     
     
    -
    +
     QuotaStatusCalls
     
     Client class to wrap RPCs to HBase servers for space quota 
    status information.
     
     
    -
    +
     RawAsyncHBaseAdmin
     
     The implementation of AsyncAdmin.
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/49431b18/devapidocs/org/apache/hadoop/hbase/client/package-tree.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/client/package-tree.html 
    b/devapidocs/org/apache/hadoop/hbase/client/package-tree.html
    index 7c2c41a..a29477d 100644
    --- a/devapidocs/org/apache/hadoop/hbase/client/package-tree.html
    +++ b/devapidocs/org/apache/hadoop/hbase/client/package-tree.html
    @@ -204,6 +204,7 @@
     
     org.apache.hadoop.hbase.client.HBaseAdmin.AbortProcedureFuture
     org.apache.hadoop.hbase.client.HBaseAdmin.NamespaceFuture
    +org.apache.hadoop.hbase.client.HBaseAdmin.ReplicationFuture
     org.apache.hadoop.hbase.client.HBaseAdmin.TableFutureV
     
     org.apache.hadoop.hbase.client.HBaseAdmin.CreateTableFuture
    @@ -297,6 +298,7 @@
     org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.ModifyNamespaceProcedureBiConsumer
     
     
    +org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.ReplicationProcedureBiConsumer
     org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.TableProcedureBiConsumer
     
     org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer
    @@ -545,24 +547,24 @@
     
     

    [20/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7c40c62/devapidocs/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.html
     
    b/devapidocs/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.html
    index 526491c..afa053f 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.html
    @@ -18,7 +18,7 @@
     catch(err) {
     }
     //-->
    -var methods = {"i0":10,"i1":10,"i2":9,"i3":9,"i4":9,"i5":10,"i6":9,"i7":9};
    +var methods = 
    {"i0":10,"i1":10,"i2":9,"i3":9,"i4":9,"i5":10,"i6":10,"i7":9,"i8":9};
     var tabs = {65535:["t0","All Methods"],1:["t1","Static 
    Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
     var altColor = "altColor";
     var rowColor = "rowColor";
    @@ -118,7 +118,7 @@ var activeTableTab = "activeTableTab";
     
     
     
    -public class VerifyReplication
    +public class VerifyReplication
     extends org.apache.hadoop.conf.Configured
     implements org.apache.hadoop.util.Tool
     This map-only job compares the data from a local table with 
    a remote one.
    @@ -315,15 +315,20 @@ implements org.apache.hadoop.util.Tool
     printUsage(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in 
    java.lang">StringerrorMsg)
     
     
    +private void
    +restoreSnapshotForPeerCluster(org.apache.hadoop.conf.Configurationconf,
    + http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in 
    java.lang">StringpeerQuorumAddress)
    +
    +
     int
     run(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in 
    java.lang">String[]args)
     
    -
    +
     private static void
     setRowPrefixFilter(Scanscan,
       http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in 
    java.lang">StringrowPrefixes)
     
    -
    +
     private static void
     setStartAndStopRows(Scanscan,
    byte[]startPrefixRow,
    @@ -371,7 +376,7 @@ implements org.apache.hadoop.util.Tool
     
     
     LOG
    -private static finalorg.slf4j.Logger LOG
    +private static finalorg.slf4j.Logger LOG
     
     
     
    @@ -380,7 +385,7 @@ implements org.apache.hadoop.util.Tool
     
     
     NAME
    -public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String NAME
    +public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String NAME
     
     See Also:
     Constant
     Field Values
    @@ -393,7 +398,7 @@ implements org.apache.hadoop.util.Tool
     
     
     PEER_CONFIG_PREFIX
    -private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String PEER_CONFIG_PREFIX
    +private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String PEER_CONFIG_PREFIX
     
     See Also:
     Constant
     Field Values
    @@ -406,7 +411,7 @@ implements org.apache.hadoop.util.Tool
     
     
     startTime
    -long startTime
    +long startTime
     
     
     
    @@ -415,7 +420,7 @@ implements org.apache.hadoop.util.Tool
     
     
     endTime
    -long endTime
    +long endTime
     
     
     
    @@ -424,7 +429,7 @@ implements org.apache.hadoop.util.Tool
     
     
     batch
    -int batch
    +int batch
     
     
     
    @@ -433,7 +438,7 @@ implements org.apache.hadoop.util.Tool
     
     
     versions
    -int versions
    +int versions
     
     
     
    @@ -442,7 +447,7 @@ implements org.apache.hadoop.util.Tool
     
     
     tableName
    -http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String tableName
    +http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String tableName
     
     
     
    @@ -451,7 +456,7 @@ implements org.apache.hadoop.util.Tool
     
     
     families
    -http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String families
    +http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String families
     
     
     
    @@ -460,7 +465,7 @@ implements org.apache.hadoop.util.Tool
     
     
     delimiter
    -http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String delimiter
    +http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String delimiter
     
     
     
    @@ -469,7 +474,7 @@ implements org.apache.hadoop.util.Tool
     
     
     peerId
    -http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String peerId
    

    [20/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bb398572/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ProcedureBiConsumer.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ProcedureBiConsumer.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ProcedureBiConsumer.html
    index 5b3b750..a1f3f7e 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ProcedureBiConsumer.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ProcedureBiConsumer.html
    @@ -97,3307 +97,3304 @@
     089import 
    org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer;
     090import 
    org.apache.hbase.thirdparty.io.netty.util.Timeout;
     091import 
    org.apache.hbase.thirdparty.io.netty.util.TimerTask;
    -092import 
    org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
    -093import 
    org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
    -094import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
    -095import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest;
    -096import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesResponse;
    -097import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
    -098import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
    -099import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
    -100import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
    -101import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
    -102import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
    -103import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
    -104import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
    -105import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest;
    -106import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse;
    -107import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest;
    -108import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse;
    -109import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest;
    -110import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse;
    -111import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
    -112import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse;
    -113import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
    -114import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
    -115import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
    -116import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
    -117import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
    -118import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
    -119import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
    -120import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
    -121import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
    -122import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
    -123import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
    -124import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersRequest;
    -125import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersResponse;
    -126import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
    -127import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
    -128import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest;
    -129import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse;
    -130import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersRequest;
    -131import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersResponse;
    -132import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
    -133import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
    -134import 
    

    [20/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/EndpointObserver.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/EndpointObserver.html
     
    b/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/EndpointObserver.html
    index 0722fde..aa24a3c 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/EndpointObserver.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/EndpointObserver.html
    @@ -222,6 +222,6 @@
     
     
     
    -Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
    reserved.
    +Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
    reserved.
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/Export.PrivilegedWriter.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/Export.PrivilegedWriter.html
     
    b/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/Export.PrivilegedWriter.html
    index 4ad98e1..c2ddf1d 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/Export.PrivilegedWriter.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/Export.PrivilegedWriter.html
    @@ -162,6 +162,6 @@
     
     
     
    -Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
    reserved.
    +Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
    reserved.
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/Export.RegionOp.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/Export.RegionOp.html 
    b/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/Export.RegionOp.html
    index f325b58..54d3372 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/Export.RegionOp.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/Export.RegionOp.html
    @@ -120,6 +120,6 @@
     
     
     
    -Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
    reserved.
    +Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
    reserved.
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/Export.Response.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/Export.Response.html 
    b/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/Export.Response.html
    index 718b75d..95d1d00 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/Export.Response.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/Export.Response.html
    @@ -170,6 +170,6 @@
     
     
     
    -Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
    reserved.
    +Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
    reserved.
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/Export.ScanCoprocessor.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/Export.ScanCoprocessor.html
     
    b/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/Export.ScanCoprocessor.html
    index 1ddf577..0e73f34 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/Export.ScanCoprocessor.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/Export.ScanCoprocessor.html
    @@ -120,6 +120,6 @@
     
     
     
    -Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
    reserved.
    +Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
    reserved.
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/Export.SecureWriter.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/Export.SecureWriter.html
     
    b/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/Export.SecureWriter.html
    index cb6c0f5..6521b48 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/Export.SecureWriter.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/Export.SecureWriter.html
    @@ -120,6 +120,6 @@
     
     
     
    -Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
    reserved.
    +Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
    

    [20/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/hbase-shaded-check-invariants/dependency-info.html
    --
    diff --git a/hbase-shaded-check-invariants/dependency-info.html 
    b/hbase-shaded-check-invariants/dependency-info.html
    index fb60015..4aa5a59 100644
    --- a/hbase-shaded-check-invariants/dependency-info.html
    +++ b/hbase-shaded-check-invariants/dependency-info.html
    @@ -7,7 +7,7 @@
       
     
     
    -
    +
     
     Apache HBase Shaded Packaging Invariants  Dependency 
    Information
     
    @@ -148,7 +148,7 @@
     https://www.apache.org/;>The Apache Software 
    Foundation.
     All rights reserved.  
     
    -  Last Published: 
    2017-12-29
    +  Last Published: 
    2017-12-30
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/hbase-shaded-check-invariants/dependency-management.html
    --
    diff --git a/hbase-shaded-check-invariants/dependency-management.html 
    b/hbase-shaded-check-invariants/dependency-management.html
    index c060f5c..9394dc9 100644
    --- a/hbase-shaded-check-invariants/dependency-management.html
    +++ b/hbase-shaded-check-invariants/dependency-management.html
    @@ -7,7 +7,7 @@
       
     
     
    -
    +
     
     Apache HBase Shaded Packaging Invariants  Project 
    Dependency Management
     
    @@ -810,7 +810,7 @@
     https://www.apache.org/;>The Apache Software 
    Foundation.
     All rights reserved.  
     
    -  Last Published: 
    2017-12-29
    +  Last Published: 
    2017-12-30
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/hbase-shaded-check-invariants/index.html
    --
    diff --git a/hbase-shaded-check-invariants/index.html 
    b/hbase-shaded-check-invariants/index.html
    index 963c584..9db7cbc 100644
    --- a/hbase-shaded-check-invariants/index.html
    +++ b/hbase-shaded-check-invariants/index.html
    @@ -7,7 +7,7 @@
       
     
     
    -
    +
     
     Apache HBase Shaded Packaging Invariants  About
     
    @@ -122,7 +122,7 @@
     https://www.apache.org/;>The Apache Software 
    Foundation.
     All rights reserved.  
     
    -  Last Published: 
    2017-12-29
    +  Last Published: 
    2017-12-30
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/hbase-shaded-check-invariants/integration.html
    --
    diff --git a/hbase-shaded-check-invariants/integration.html 
    b/hbase-shaded-check-invariants/integration.html
    index 9ef8d81..bb6962a 100644
    --- a/hbase-shaded-check-invariants/integration.html
    +++ b/hbase-shaded-check-invariants/integration.html
    @@ -7,7 +7,7 @@
       
     
     
    -
    +
     
     Apache HBase Shaded Packaging Invariants  CI 
    Management
     
    @@ -126,7 +126,7 @@
     https://www.apache.org/;>The Apache Software 
    Foundation.
     All rights reserved.  
     
    -  Last Published: 
    2017-12-29
    +  Last Published: 
    2017-12-30
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/hbase-shaded-check-invariants/issue-tracking.html
    --
    diff --git a/hbase-shaded-check-invariants/issue-tracking.html 
    b/hbase-shaded-check-invariants/issue-tracking.html
    index c22ea99..b2e3466 100644
    --- a/hbase-shaded-check-invariants/issue-tracking.html
    +++ b/hbase-shaded-check-invariants/issue-tracking.html
    @@ -7,7 +7,7 @@
       
     
     
    -
    +
     
     Apache HBase Shaded Packaging Invariants  Issue 
    Management
     
    @@ -123,7 +123,7 @@
     https://www.apache.org/;>The Apache Software 
    Foundation.
     All rights reserved.  
     
    -  Last Published: 
    2017-12-29
    +  Last Published: 
    2017-12-30
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/hbase-shaded-check-invariants/license.html
    --
    diff --git a/hbase-shaded-check-invariants/license.html 
    b/hbase-shaded-check-invariants/license.html
    index a97285b..61472a5 100644
    --- a/hbase-shaded-check-invariants/license.html
    +++ b/hbase-shaded-check-invariants/license.html
    @@ -7,7 +7,7 @@
       
     
     
    -
    +
     
     Apache HBase Shaded Packaging Invariants  Project 
    Licenses
     
    @@ -326,7 +326,7 @@
     https://www.apache.org/;>The Apache Software 
    Foundation.
     All rights reserved.  
     
    -  Last 

    [20/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d449e87f/devapidocs/org/apache/hadoop/hbase/class-use/PrivateCellUtil.TagRewriteByteBufferExtendedCell.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/class-use/PrivateCellUtil.TagRewriteByteBufferExtendedCell.html
     
    b/devapidocs/org/apache/hadoop/hbase/class-use/PrivateCellUtil.TagRewriteByteBufferExtendedCell.html
    new file mode 100644
    index 000..e5bc7a9
    --- /dev/null
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/class-use/PrivateCellUtil.TagRewriteByteBufferExtendedCell.html
    @@ -0,0 +1,165 @@
    +http://www.w3.org/TR/html4/loose.dtd;>
    +
    +
    +
    +
    +
    +Uses of Class 
    org.apache.hadoop.hbase.PrivateCellUtil.TagRewriteByteBufferExtendedCell 
    (Apache HBase 3.0.0-SNAPSHOT API)
    +
    +
    +
    +
    +
    +
    +
    +JavaScript is disabled on your browser.
    +
    +
    +
    +
    +
    +Skip navigation links
    +
    +
    +
    +
    +Overview
    +Package
    +Class
    +Use
    +Tree
    +Deprecated
    +Index
    +Help
    +
    +
    +
    +
    +Prev
    +Next
    +
    +
    +Frames
    +NoFrames
    +
    +
    +AllClasses
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +Uses of 
    Classorg.apache.hadoop.hbase.PrivateCellUtil.TagRewriteByteBufferExtendedCell
    +
    +
    +
    +
    +
    +Packages that use PrivateCellUtil.TagRewriteByteBufferExtendedCell
    +
    +Package
    +Description
    +
    +
    +
    +org.apache.hadoop.hbase
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +Uses of PrivateCellUtil.TagRewriteByteBufferExtendedCell 
    in org.apache.hadoop.hbase
    +
    +Subclasses of PrivateCellUtil.TagRewriteByteBufferExtendedCell 
    in org.apache.hadoop.hbase
    +
    +Modifier and Type
    +Class and Description
    +
    +
    +
    +(package private) static class
    +PrivateCellUtil.ValueAndTagRewriteByteBufferExtendedCell
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +Skip navigation links
    +
    +
    +
    +
    +Overview
    +Package
    +Class
    +Use
    +Tree
    +Deprecated
    +Index
    +Help
    +
    +
    +
    +
    +Prev
    +Next
    +
    +
    +Frames
    +NoFrames
    +
    +
    +AllClasses
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
    reserved.
    +
    +
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d449e87f/devapidocs/org/apache/hadoop/hbase/class-use/PrivateCellUtil.ValueAndTagRewriteByteBufferCell.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/class-use/PrivateCellUtil.ValueAndTagRewriteByteBufferCell.html
     
    b/devapidocs/org/apache/hadoop/hbase/class-use/PrivateCellUtil.ValueAndTagRewriteByteBufferCell.html
    deleted file mode 100644
    index a246083..000
    --- 
    a/devapidocs/org/apache/hadoop/hbase/class-use/PrivateCellUtil.ValueAndTagRewriteByteBufferCell.html
    +++ /dev/null
    @@ -1,125 +0,0 @@
    -http://www.w3.org/TR/html4/loose.dtd;>
    -
    -
    -
    -
    -
    -Uses of Class 
    org.apache.hadoop.hbase.PrivateCellUtil.ValueAndTagRewriteByteBufferCell 
    (Apache HBase 3.0.0-SNAPSHOT API)
    -
    -
    -
    -
    -
    -
    -
    -JavaScript is disabled on your browser.
    -
    -
    -
    -
    -
    -Skip navigation links
    -
    -
    -
    -
    -Overview
    -Package
    -Class
    -Use
    -Tree
    -Deprecated
    -Index
    -Help
    -
    -
    -
    -
    -Prev
    -Next
    -
    -
    -Frames
    -NoFrames
    -
    -
    -AllClasses
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -Uses of 
    Classorg.apache.hadoop.hbase.PrivateCellUtil.ValueAndTagRewriteByteBufferCell
    -
    -No usage of 
    org.apache.hadoop.hbase.PrivateCellUtil.ValueAndTagRewriteByteBufferCell
    -
    -
    -
    -
    -Skip navigation links
    -
    -
    -
    -
    -Overview
    -Package
    -Class
    -Use
    -Tree
    -Deprecated
    -Index
    -Help
    -
    -
    -
    -
    -Prev
    -Next
    -
    -
    -Frames
    -NoFrames
    -
    -
    -AllClasses
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
    reserved.
    -
    -
    
    

    [20/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d2b28a1a/devapidocs/src-html/org/apache/hadoop/hbase/ByteBufferKeyValue.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/ByteBufferKeyValue.html 
    b/devapidocs/src-html/org/apache/hadoop/hbase/ByteBufferKeyValue.html
    index 8112448..71d0648 100644
    --- a/devapidocs/src-html/org/apache/hadoop/hbase/ByteBufferKeyValue.html
    +++ b/devapidocs/src-html/org/apache/hadoop/hbase/ByteBufferKeyValue.html
    @@ -25,367 +25,333 @@
     017 */
     018package org.apache.hadoop.hbase;
     019
    -020import static 
    org.apache.hadoop.hbase.Tag.TAG_LENGTH_SIZE;
    -021
    -022import java.io.IOException;
    -023import java.io.OutputStream;
    -024import java.nio.ByteBuffer;
    -025import java.util.ArrayList;
    -026import java.util.Iterator;
    -027import java.util.List;
    -028import java.util.Optional;
    +020import java.io.IOException;
    +021import java.io.OutputStream;
    +022import java.nio.ByteBuffer;
    +023import 
    org.apache.hadoop.hbase.util.ByteBufferUtils;
    +024import 
    org.apache.hadoop.hbase.util.Bytes;
    +025import 
    org.apache.hadoop.hbase.util.ClassSize;
    +026import 
    org.apache.yetus.audience.InterfaceAudience;
    +027
    +028import 
    org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
     029
    -030import 
    org.apache.hadoop.hbase.util.ByteBufferUtils;
    -031import 
    org.apache.hadoop.hbase.util.Bytes;
    -032import 
    org.apache.hadoop.hbase.util.ClassSize;
    -033import 
    org.apache.yetus.audience.InterfaceAudience;
    -034
    -035import 
    org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
    +030/**
    +031 * This Cell is an implementation of 
    {@link ByteBufferCell} where the data resides in
    +032 * off heap/ on heap ByteBuffer
    +033 */
    +034@InterfaceAudience.Private
    +035public class ByteBufferKeyValue extends 
    ByteBufferCell implements ExtendedCell {
     036
    -037/**
    -038 * This Cell is an implementation of 
    {@link ByteBufferCell} where the data resides in
    -039 * off heap/ on heap ByteBuffer
    -040 */
    -041@InterfaceAudience.Private
    -042public class ByteBufferKeyValue extends 
    ByteBufferCell implements ExtendedCell {
    -043
    -044  protected final ByteBuffer buf;
    -045  protected final int offset;
    -046  protected final int length;
    -047  private long seqId = 0;
    -048
    -049  public static final int FIXED_OVERHEAD 
    = ClassSize.OBJECT + ClassSize.REFERENCE
    -050  + (2 * Bytes.SIZEOF_INT) + 
    Bytes.SIZEOF_LONG;
    +037  protected final ByteBuffer buf;
    +038  protected final int offset;
    +039  protected final int length;
    +040  private long seqId = 0;
    +041
    +042  public static final int FIXED_OVERHEAD 
    = ClassSize.OBJECT + ClassSize.REFERENCE
    +043  + (2 * Bytes.SIZEOF_INT) + 
    Bytes.SIZEOF_LONG;
    +044
    +045  public ByteBufferKeyValue(ByteBuffer 
    buf, int offset, int length, long seqId) {
    +046this.buf = buf;
    +047this.offset = offset;
    +048this.length = length;
    +049this.seqId = seqId;
    +050  }
     051
    -052  public ByteBufferKeyValue(ByteBuffer 
    buf, int offset, int length, long seqId) {
    +052  public ByteBufferKeyValue(ByteBuffer 
    buf, int offset, int length) {
     053this.buf = buf;
     054this.offset = offset;
     055this.length = length;
    -056this.seqId = seqId;
    -057  }
    -058
    -059  public ByteBufferKeyValue(ByteBuffer 
    buf, int offset, int length) {
    -060this.buf = buf;
    -061this.offset = offset;
    -062this.length = length;
    -063  }
    -064
    -065  @VisibleForTesting
    -066  public ByteBuffer getBuffer() {
    -067return this.buf;
    -068  }
    -069
    -070  @VisibleForTesting
    -071  public int getOffset() {
    -072return this.offset;
    -073  }
    -074
    -075  @Override
    -076  public byte[] getRowArray() {
    -077return CellUtil.cloneRow(this);
    -078  }
    -079
    -080  @Override
    -081  public int getRowOffset() {
    -082return 0;
    -083  }
    -084
    -085  @Override
    -086  public short getRowLength() {
    -087return getRowLen();
    -088  }
    -089
    -090  private short getRowLen() {
    -091return 
    ByteBufferUtils.toShort(this.buf, this.offset + KeyValue.ROW_OFFSET);
    -092  }
    -093
    -094  @Override
    -095  public byte[] getFamilyArray() {
    -096return CellUtil.cloneFamily(this);
    -097  }
    -098
    -099  @Override
    -100  public int getFamilyOffset() {
    -101return 0;
    -102  }
    -103
    -104  @Override
    -105  public byte getFamilyLength() {
    -106return 
    getFamilyLength(getFamilyLengthPosition());
    -107  }
    -108
    -109  private int getFamilyLengthPosition() 
    {
    -110return this.offset + 
    KeyValue.ROW_KEY_OFFSET
    -111+ getRowLen();
    -112  }
    -113
    -114  private byte getFamilyLength(int 
    famLenPos) {
    -115return 
    ByteBufferUtils.toByte(this.buf, famLenPos);
    -116  }
    -117
    -118  @Override
    -119  public byte[] getQualifierArray() {
    -120return 
    CellUtil.cloneQualifier(this);
    -121  }
    -122
    -123  @Override
    -124  public int getQualifierOffset() {
    -125return 0;
    -126  }
    -127
    -128  @Override
    -129  public int getQualifierLength() {
    -130return 
    getQualifierLength(getRowLength(), getFamilyLength());
    -131  }
    -132
    -133  private int getQualifierLength(int 
    rlength, int 

    [20/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/devapidocs/org/apache/hadoop/hbase/replication/class-use/ReplicationException.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/replication/class-use/ReplicationException.html
     
    b/devapidocs/org/apache/hadoop/hbase/replication/class-use/ReplicationException.html
    index 6ab6bf6..8762dbe 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/replication/class-use/ReplicationException.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/replication/class-use/ReplicationException.html
    @@ -403,26 +403,16 @@
     
     
     void
    -TableBasedReplicationQueuesImpl.addHFileRefs(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringpeerId,
    -http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListPairorg.apache.hadoop.fs.Path,org.apache.hadoop.fs.Pathpairs)
    -
    -
    -void
     ReplicationQueuesZKImpl.addHFileRefs(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringpeerId,
     http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListPairorg.apache.hadoop.fs.Path,org.apache.hadoop.fs.Pathpairs)
     
    -
    +
     void
     ReplicationQueues.addHFileRefs(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringpeerId,
     http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListPairorg.apache.hadoop.fs.Path,org.apache.hadoop.fs.Pathpairs)
     Add new hfile references to the queue.
     
     
    -
    -void
    -TableBasedReplicationQueuesImpl.addLog(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringqueueId,
    -  http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in 
    java.lang">Stringfilename)
    -
     
     void
     ReplicationQueuesZKImpl.addLog(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringqueueId,
    @@ -437,66 +427,57 @@
     
     
     void
    -TableBasedReplicationQueuesImpl.addPeerToHFileRefs(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in 
    java.lang">StringpeerId)
    -
    -
    -void
     ReplicationQueuesZKImpl.addPeerToHFileRefs(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in 
    java.lang">StringpeerId)
     
    -
    +
     void
     ReplicationQueues.addPeerToHFileRefs(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringpeerId)
     Add a peer to hfile reference queue if peer does not 
    exist.
     
     
    -
    +
     private void
     ReplicationPeersZKImpl.changePeerState(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">Stringid,
    
    org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ReplicationState.Statestate)
     Update the state znode of a peer cluster.
     
     
    -
    +
     private void
     ReplicationPeersZKImpl.checkQueuesDeleted(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in 
    java.lang">StringpeerId)
     
    -
    +
     boolean
     ReplicationPeersZKImpl.createAndAddPeer(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringpeerId)
     Attempt to connect to a new remote slave cluster.
     
     
    -
    +
     private ReplicationPeerZKImpl
     ReplicationPeersZKImpl.createPeer(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringpeerId)
     Helper method to connect to a peer
     
     
    -
    +
     void
     ReplicationPeers.disablePeer(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringpeerId)
     Stop the replication to the specified remote slave 
    cluster.
     
     
    -
    +
     void
     ReplicationPeersZKImpl.disablePeer(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">Stringid)
     
    -
    +
     void
     ReplicationPeers.enablePeer(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringpeerId)
     Restart the replication to the specified remote slave 
    cluster.
     
     
    -
    +
     void
     ReplicationPeersZKImpl.enablePeer(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">Stringid)
     
    -
    -long
    

    [20/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7c0589c0/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DisableTableFuture.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DisableTableFuture.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DisableTableFuture.html
    index 6fecbc9..2accda0 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DisableTableFuture.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DisableTableFuture.html
    @@ -34,4140 +34,4141 @@
     026import 
    java.nio.charset.StandardCharsets;
     027import java.util.ArrayList;
     028import java.util.Arrays;
    -029import java.util.Collection;
    -030import java.util.EnumSet;
    -031import java.util.HashMap;
    -032import java.util.Iterator;
    -033import java.util.LinkedList;
    -034import java.util.List;
    -035import java.util.Map;
    -036import java.util.Set;
    -037import java.util.concurrent.Callable;
    -038import 
    java.util.concurrent.ExecutionException;
    -039import java.util.concurrent.Future;
    -040import java.util.concurrent.TimeUnit;
    -041import 
    java.util.concurrent.TimeoutException;
    -042import 
    java.util.concurrent.atomic.AtomicInteger;
    -043import 
    java.util.concurrent.atomic.AtomicReference;
    -044import java.util.regex.Pattern;
    -045import java.util.stream.Collectors;
    -046import java.util.stream.Stream;
    -047import 
    org.apache.hadoop.conf.Configuration;
    -048import 
    org.apache.hadoop.hbase.Abortable;
    -049import 
    org.apache.hadoop.hbase.CacheEvictionStats;
    -050import 
    org.apache.hadoop.hbase.CacheEvictionStatsBuilder;
    -051import 
    org.apache.hadoop.hbase.ClusterMetrics.Option;
    -052import 
    org.apache.hadoop.hbase.ClusterStatus;
    -053import 
    org.apache.hadoop.hbase.DoNotRetryIOException;
    -054import 
    org.apache.hadoop.hbase.HBaseConfiguration;
    -055import 
    org.apache.hadoop.hbase.HConstants;
    -056import 
    org.apache.hadoop.hbase.HRegionInfo;
    -057import 
    org.apache.hadoop.hbase.HRegionLocation;
    -058import 
    org.apache.hadoop.hbase.HTableDescriptor;
    -059import 
    org.apache.hadoop.hbase.MasterNotRunningException;
    -060import 
    org.apache.hadoop.hbase.MetaTableAccessor;
    -061import 
    org.apache.hadoop.hbase.NamespaceDescriptor;
    -062import 
    org.apache.hadoop.hbase.NamespaceNotFoundException;
    -063import 
    org.apache.hadoop.hbase.NotServingRegionException;
    -064import 
    org.apache.hadoop.hbase.RegionLoad;
    -065import 
    org.apache.hadoop.hbase.RegionLocations;
    -066import 
    org.apache.hadoop.hbase.ServerName;
    -067import 
    org.apache.hadoop.hbase.TableExistsException;
    -068import 
    org.apache.hadoop.hbase.TableName;
    -069import 
    org.apache.hadoop.hbase.TableNotDisabledException;
    -070import 
    org.apache.hadoop.hbase.TableNotFoundException;
    -071import 
    org.apache.hadoop.hbase.UnknownRegionException;
    -072import 
    org.apache.hadoop.hbase.ZooKeeperConnectionException;
    -073import 
    org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil;
    -074import 
    org.apache.hadoop.hbase.client.replication.TableCFs;
    -075import 
    org.apache.hadoop.hbase.client.security.SecurityCapability;
    -076import 
    org.apache.hadoop.hbase.exceptions.TimeoutIOException;
    -077import 
    org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
    -078import 
    org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
    -079import 
    org.apache.hadoop.hbase.ipc.HBaseRpcController;
    -080import 
    org.apache.hadoop.hbase.ipc.RpcControllerFactory;
    -081import 
    org.apache.hadoop.hbase.quotas.QuotaFilter;
    -082import 
    org.apache.hadoop.hbase.quotas.QuotaRetriever;
    -083import 
    org.apache.hadoop.hbase.quotas.QuotaSettings;
    -084import 
    org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
    -085import 
    org.apache.hadoop.hbase.replication.ReplicationException;
    -086import 
    org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
    -087import 
    org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
    -088import 
    org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
    -089import 
    org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
    -090import 
    org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
    -091import 
    org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
    -092import 
    org.apache.hadoop.hbase.snapshot.UnknownSnapshotException;
    -093import 
    org.apache.hadoop.hbase.util.Addressing;
    -094import 
    org.apache.hadoop.hbase.util.Bytes;
    -095import 
    org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
    -096import 
    org.apache.hadoop.hbase.util.ForeignExceptionUtil;
    -097import 
    org.apache.hadoop.hbase.util.Pair;
    -098import 
    org.apache.hadoop.ipc.RemoteException;
    -099import 
    org.apache.hadoop.util.StringUtils;
    -100import 
    org.apache.yetus.audience.InterfaceAudience;
    -101import 
    org.apache.yetus.audience.InterfaceStability;
    -102import org.slf4j.Logger;
    -103import org.slf4j.LoggerFactory;
    -104
    -105import 
    org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
    -106import 
    org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
    

    [20/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c4b2cc17/apidocs/src-html/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.html
    --
    diff --git 
    a/apidocs/src-html/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.html
     
    b/apidocs/src-html/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.html
    index 5446e3c..ba7a281 100644
    --- 
    a/apidocs/src-html/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.html
    +++ 
    b/apidocs/src-html/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.html
    @@ -31,469 +31,370 @@
     023import java.util.ArrayList;
     024import java.util.Collection;
     025import java.util.HashMap;
    -026import java.util.HashSet;
    -027import java.util.List;
    -028import java.util.Map;
    -029import java.util.Set;
    -030import java.util.TreeMap;
    -031import java.util.regex.Pattern;
    -032
    -033import 
    org.apache.hadoop.conf.Configuration;
    -034import 
    org.apache.hadoop.hbase.HConstants;
    -035import 
    org.apache.hadoop.hbase.ReplicationPeerNotFoundException;
    -036import 
    org.apache.hadoop.hbase.TableName;
    -037import 
    org.apache.yetus.audience.InterfaceAudience;
    -038import org.slf4j.Logger;
    -039import org.slf4j.LoggerFactory;
    -040import 
    org.apache.hadoop.hbase.client.Admin;
    -041import 
    org.apache.hadoop.hbase.client.Connection;
    -042import 
    org.apache.hadoop.hbase.client.ConnectionFactory;
    -043import 
    org.apache.hadoop.hbase.replication.ReplicationException;
    -044import 
    org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
    -045import 
    org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
    -046import 
    org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
    -047import 
    org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
    -048
    -049/**
    -050 * p
    -051 * This class provides the administrative 
    interface to HBase cluster
    -052 * replication.
    -053 * /p
    -054 * p
    -055 * Adding a new peer results in creating 
    new outbound connections from every
    -056 * region server to a subset of region 
    servers on the slave cluster. Each
    -057 * new stream of replication will start 
    replicating from the beginning of the
    -058 * current WAL, meaning that edits from 
    that past will be replicated.
    -059 * /p
    -060 * p
    -061 * Removing a peer is a destructive and 
    irreversible operation that stops
    -062 * all the replication streams for the 
    given cluster and deletes the metadata
    -063 * used to keep track of the replication 
    state.
    -064 * /p
    -065 * p
    -066 * To see which commands are available in 
    the shell, type
    -067 * 
    codereplication/code.
    -068 * /p
    -069 *
    -070 * @deprecated use {@link 
    org.apache.hadoop.hbase.client.Admin} instead.
    -071 */
    -072@InterfaceAudience.Public
    -073@Deprecated
    -074public class ReplicationAdmin implements 
    Closeable {
    -075  private static final Logger LOG = 
    LoggerFactory.getLogger(ReplicationAdmin.class);
    +026import java.util.List;
    +027import java.util.Map;
    +028import java.util.TreeMap;
    +029import java.util.regex.Pattern;
    +030
    +031import 
    org.apache.hadoop.conf.Configuration;
    +032import 
    org.apache.hadoop.hbase.HConstants;
    +033import 
    org.apache.hadoop.hbase.ReplicationPeerNotFoundException;
    +034import 
    org.apache.hadoop.hbase.TableName;
    +035import 
    org.apache.yetus.audience.InterfaceAudience;
    +036import org.slf4j.Logger;
    +037import org.slf4j.LoggerFactory;
    +038import 
    org.apache.hadoop.hbase.client.Admin;
    +039import 
    org.apache.hadoop.hbase.client.Connection;
    +040import 
    org.apache.hadoop.hbase.client.ConnectionFactory;
    +041import 
    org.apache.hadoop.hbase.replication.ReplicationException;
    +042import 
    org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
    +043import 
    org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
    +044import 
    org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
    +045
    +046/**
    +047 * p
    +048 * This class provides the administrative 
    interface to HBase cluster
    +049 * replication.
    +050 * /p
    +051 * p
    +052 * Adding a new peer results in creating 
    new outbound connections from every
    +053 * region server to a subset of region 
    servers on the slave cluster. Each
    +054 * new stream of replication will start 
    replicating from the beginning of the
    +055 * current WAL, meaning that edits from 
    that past will be replicated.
    +056 * /p
    +057 * p
    +058 * Removing a peer is a destructive and 
    irreversible operation that stops
    +059 * all the replication streams for the 
    given cluster and deletes the metadata
    +060 * used to keep track of the replication 
    state.
    +061 * /p
    +062 * p
    +063 * To see which commands are available in 
    the shell, type
    +064 * 
    codereplication/code.
    +065 * /p
    +066 *
    +067 * @deprecated use {@link 
    org.apache.hadoop.hbase.client.Admin} instead.
    +068 */
    +069@InterfaceAudience.Public
    +070@Deprecated
    +071public class ReplicationAdmin implements 
    Closeable {
    +072  private static final Logger LOG = 
    LoggerFactory.getLogger(ReplicationAdmin.class);
    +073
    +074  public static final String TNAME = 
    "tableName";
    +075  public 

    [20/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/505bbb2e/apidocs/src-html/org/apache/hadoop/hbase/util/MD5Hash.html
    --
    diff --git a/apidocs/src-html/org/apache/hadoop/hbase/util/MD5Hash.html 
    b/apidocs/src-html/org/apache/hadoop/hbase/util/MD5Hash.html
    index 999b0f2..3970aa2 100644
    --- a/apidocs/src-html/org/apache/hadoop/hbase/util/MD5Hash.html
    +++ b/apidocs/src-html/org/apache/hadoop/hbase/util/MD5Hash.html
    @@ -31,9 +31,9 @@
     023import 
    java.security.NoSuchAlgorithmException;
     024
     025import 
    org.apache.commons.codec.binary.Hex;
    -026import org.apache.commons.logging.Log;
    -027import 
    org.apache.commons.logging.LogFactory;
    -028import 
    org.apache.yetus.audience.InterfaceAudience;
    +026import 
    org.apache.yetus.audience.InterfaceAudience;
    +027import org.slf4j.Logger;
    +028import org.slf4j.LoggerFactory;
     029
     030/**
     031 * Utility class for MD5
    @@ -41,7 +41,7 @@
     033 */
     034@InterfaceAudience.Public
     035public class MD5Hash {
    -036  private static final Log LOG = 
    LogFactory.getLog(MD5Hash.class);
    +036  private static final Logger LOG = 
    LoggerFactory.getLogger(MD5Hash.class);
     037
     038  /**
     039   * Given a byte array, returns in MD5 
    hash as a hex string.
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/505bbb2e/apidocs/src-html/org/apache/hadoop/hbase/util/RegionMover.html
    --
    diff --git a/apidocs/src-html/org/apache/hadoop/hbase/util/RegionMover.html 
    b/apidocs/src-html/org/apache/hadoop/hbase/util/RegionMover.html
    index d426656..cc11564 100644
    --- a/apidocs/src-html/org/apache/hadoop/hbase/util/RegionMover.html
    +++ b/apidocs/src-html/org/apache/hadoop/hbase/util/RegionMover.html
    @@ -51,27 +51,27 @@
     043import 
    java.util.concurrent.TimeoutException;
     044
     045import 
    org.apache.commons.cli.CommandLine;
    -046import org.apache.commons.logging.Log;
    -047import 
    org.apache.commons.logging.LogFactory;
    -048import 
    org.apache.hadoop.conf.Configuration;
    -049import 
    org.apache.hadoop.hbase.ClusterStatus.Option;
    -050import 
    org.apache.hadoop.hbase.HBaseConfiguration;
    -051import 
    org.apache.hadoop.hbase.HConstants;
    -052import 
    org.apache.hadoop.hbase.ServerName;
    -053import 
    org.apache.hadoop.hbase.TableName;
    -054import 
    org.apache.hadoop.hbase.client.Admin;
    -055import 
    org.apache.hadoop.hbase.client.Connection;
    -056import 
    org.apache.hadoop.hbase.client.ConnectionFactory;
    -057import 
    org.apache.hadoop.hbase.client.Get;
    -058import 
    org.apache.hadoop.hbase.client.RegionInfo;
    -059import 
    org.apache.hadoop.hbase.client.Result;
    -060import 
    org.apache.hadoop.hbase.client.ResultScanner;
    -061import 
    org.apache.hadoop.hbase.client.Scan;
    -062import 
    org.apache.hadoop.hbase.client.Table;
    -063import 
    org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter;
    -064import 
    org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
    -065import 
    org.apache.hadoop.hbase.zookeeper.ZKWatcher;
    -066import 
    org.apache.yetus.audience.InterfaceAudience;
    +046import 
    org.apache.hadoop.conf.Configuration;
    +047import 
    org.apache.hadoop.hbase.ClusterStatus.Option;
    +048import 
    org.apache.hadoop.hbase.HBaseConfiguration;
    +049import 
    org.apache.hadoop.hbase.HConstants;
    +050import 
    org.apache.hadoop.hbase.ServerName;
    +051import 
    org.apache.hadoop.hbase.TableName;
    +052import 
    org.apache.hadoop.hbase.client.Admin;
    +053import 
    org.apache.hadoop.hbase.client.Connection;
    +054import 
    org.apache.hadoop.hbase.client.ConnectionFactory;
    +055import 
    org.apache.hadoop.hbase.client.Get;
    +056import 
    org.apache.hadoop.hbase.client.RegionInfo;
    +057import 
    org.apache.hadoop.hbase.client.Result;
    +058import 
    org.apache.hadoop.hbase.client.ResultScanner;
    +059import 
    org.apache.hadoop.hbase.client.Scan;
    +060import 
    org.apache.hadoop.hbase.client.Table;
    +061import 
    org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter;
    +062import 
    org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
    +063import 
    org.apache.hadoop.hbase.zookeeper.ZKWatcher;
    +064import 
    org.apache.yetus.audience.InterfaceAudience;
    +065import org.slf4j.Logger;
    +066import org.slf4j.LoggerFactory;
     067
     068/**
     069 * Tool for loading/unloading regions 
    to/from given regionserver This tool can be run from Command
    @@ -90,7 +90,7 @@
     082  public static final int 
    DEFAULT_MOVE_RETRIES_MAX = 5;
     083  public static final int 
    DEFAULT_MOVE_WAIT_MAX = 60;
     084  public static final int 
    DEFAULT_SERVERSTART_WAIT_MAX = 180;
    -085  static final Log LOG = 
    LogFactory.getLog(RegionMover.class);
    +085  static final Logger LOG = 
    LoggerFactory.getLogger(RegionMover.class);
     086  private RegionMoverBuilder rmbuilder;
     087  private boolean ack = true;
     088  private int maxthreads = 1;
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/505bbb2e/apidocs/src-html/org/apache/hadoop/hbase/util/VersionInfo.html
    --
    diff --git a/apidocs/src-html/org/apache/hadoop/hbase/util/VersionInfo.html 
    

    [20/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c4c0cfa5/apidocs/src-html/org/apache/hadoop/hbase/filter/MultipleColumnPrefixFilter.html
    --
    diff --git 
    a/apidocs/src-html/org/apache/hadoop/hbase/filter/MultipleColumnPrefixFilter.html
     
    b/apidocs/src-html/org/apache/hadoop/hbase/filter/MultipleColumnPrefixFilter.html
    index 8f5ce0e..7d6b874 100644
    --- 
    a/apidocs/src-html/org/apache/hadoop/hbase/filter/MultipleColumnPrefixFilter.html
    +++ 
    b/apidocs/src-html/org/apache/hadoop/hbase/filter/MultipleColumnPrefixFilter.html
    @@ -126,94 +126,96 @@
     118  /**
     119   * @return The filter serialized using 
    pb
     120   */
    -121  public byte [] toByteArray() {
    -122
    FilterProtos.MultipleColumnPrefixFilter.Builder builder =
    -123  
    FilterProtos.MultipleColumnPrefixFilter.newBuilder();
    -124for (byte [] element : 
    sortedPrefixes) {
    -125  if (element != null) 
    builder.addSortedPrefixes(UnsafeByteOperations.unsafeWrap(element));
    -126}
    -127return 
    builder.build().toByteArray();
    -128  }
    -129
    -130  /**
    -131   * @param pbBytes A pb serialized 
    {@link MultipleColumnPrefixFilter} instance
    -132   * @return An instance of {@link 
    MultipleColumnPrefixFilter} made from codebytes/code
    -133   * @throws DeserializationException
    -134   * @see #toByteArray
    -135   */
    -136  public static 
    MultipleColumnPrefixFilter parseFrom(final byte [] pbBytes)
    -137  throws DeserializationException {
    -138
    FilterProtos.MultipleColumnPrefixFilter proto;
    -139try {
    -140  proto = 
    FilterProtos.MultipleColumnPrefixFilter.parseFrom(pbBytes);
    -141} catch 
    (InvalidProtocolBufferException e) {
    -142  throw new 
    DeserializationException(e);
    -143}
    -144int numPrefixes = 
    proto.getSortedPrefixesCount();
    -145byte [][] prefixes = new 
    byte[numPrefixes][];
    -146for (int i = 0; i  numPrefixes; 
    ++i) {
    -147  prefixes[i] = 
    proto.getSortedPrefixes(i).toByteArray();
    -148}
    -149
    -150return new 
    MultipleColumnPrefixFilter(prefixes);
    -151  }
    -152
    -153  /**
    -154   * @param o the other filter to compare 
    with
    -155   * @return true if and only if the 
    fields of the filter that are serialized
    -156   * are equal to the corresponding 
    fields in other.  Used for testing.
    -157   */
    -158  boolean areSerializedFieldsEqual(Filter 
    o) {
    -159if (o == this) return true;
    -160if (!(o instanceof 
    MultipleColumnPrefixFilter)) return false;
    -161
    -162MultipleColumnPrefixFilter other = 
    (MultipleColumnPrefixFilter)o;
    -163return 
    this.sortedPrefixes.equals(other.sortedPrefixes);
    -164  }
    -165
    -166  @Override
    -167  public Cell getNextCellHint(Cell cell) 
    {
    -168return 
    PrivateCellUtil.createFirstOnRowCol(cell, hint, 0, hint.length);
    -169  }
    -170
    -171  public TreeSetbyte [] 
    createTreeSet() {
    -172return new TreeSet(new 
    ComparatorObject() {
    -173@Override
    -174  public int compare (Object o1, 
    Object o2) {
    -175  if (o1 == null || o2 == null)
    -176throw new 
    IllegalArgumentException ("prefixes can't be null");
    -177
    -178  byte [] b1 = (byte []) o1;
    -179  byte [] b2 = (byte []) o2;
    -180  return Bytes.compareTo (b1, 0, 
    b1.length, b2, 0, b2.length);
    -181}
    -182  });
    -183  }
    -184
    -185  @Override
    -186  public String toString() {
    -187return toString(MAX_LOG_PREFIXES);
    -188  }
    -189
    -190  protected String toString(int 
    maxPrefixes) {
    -191StringBuilder prefixes = new 
    StringBuilder();
    -192
    -193int count = 0;
    -194for (byte[] ba : this.sortedPrefixes) 
    {
    -195  if (count = maxPrefixes) {
    -196break;
    -197  }
    -198  ++count;
    -199  
    prefixes.append(Bytes.toStringBinary(ba));
    -200  if (count  
    this.sortedPrefixes.size()  count  maxPrefixes) {
    -201prefixes.append(", ");
    -202  }
    -203}
    -204
    -205return String.format("%s (%d/%d): 
    [%s]", this.getClass().getSimpleName(),
    -206count, 
    this.sortedPrefixes.size(), prefixes.toString());
    -207  }
    -208}
    +121  @Override
    +122  public byte [] toByteArray() {
    +123
    FilterProtos.MultipleColumnPrefixFilter.Builder builder =
    +124  
    FilterProtos.MultipleColumnPrefixFilter.newBuilder();
    +125for (byte [] element : 
    sortedPrefixes) {
    +126  if (element != null) 
    builder.addSortedPrefixes(UnsafeByteOperations.unsafeWrap(element));
    +127}
    +128return 
    builder.build().toByteArray();
    +129  }
    +130
    +131  /**
    +132   * @param pbBytes A pb serialized 
    {@link MultipleColumnPrefixFilter} instance
    +133   * @return An instance of {@link 
    MultipleColumnPrefixFilter} made from codebytes/code
    +134   * @throws DeserializationException
    +135   * @see #toByteArray
    +136   */
    +137  public static 
    MultipleColumnPrefixFilter parseFrom(final byte [] pbBytes)
    +138  throws DeserializationException {
    +139
    FilterProtos.MultipleColumnPrefixFilter proto;
    +140try {
    +141  proto = 
    FilterProtos.MultipleColumnPrefixFilter.parseFrom(pbBytes);
    +142} catch 
    (InvalidProtocolBufferException e) {
    +143  

    [20/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/071f974b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.QueryType.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.QueryType.html 
    b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.QueryType.html
    index f1a2443..a469e93 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.QueryType.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.QueryType.html
    @@ -1350,415 +1350,415 @@
     1342return delete;
     1343  }
     1344
    -1345  public static Put 
    makeBarrierPut(byte[] encodedRegionName, long seq, byte[] tableName) {
    -1346byte[] seqBytes = 
    Bytes.toBytes(seq);
    -1347return new Put(encodedRegionName)
    -1348
    .addImmutable(HConstants.REPLICATION_BARRIER_FAMILY, seqBytes, seqBytes)
    -1349
    .addImmutable(HConstants.REPLICATION_META_FAMILY, tableNameCq, tableName);
    -1350  }
    -1351
    -1352
    -1353  public static Put 
    makeDaughterPut(byte[] encodedRegionName, byte[] value) {
    -1354return new 
    Put(encodedRegionName).addImmutable(HConstants.REPLICATION_META_FAMILY,
    -1355daughterNameCq, value);
    -1356  }
    -1357
    -1358  public static Put makeParentPut(byte[] 
    encodedRegionName, byte[] value) {
    -1359return new 
    Put(encodedRegionName).addImmutable(HConstants.REPLICATION_META_FAMILY,
    -1360parentNameCq, value);
    -1361  }
    -1362
    -1363  /**
    -1364   * Adds split daughters to the Put
    -1365   */
    -1366  public static Put 
    addDaughtersToPut(Put put, RegionInfo splitA, RegionInfo splitB) {
    -1367if (splitA != null) {
    -1368  put.addImmutable(
    -1369HConstants.CATALOG_FAMILY, 
    HConstants.SPLITA_QUALIFIER, RegionInfo.toByteArray(splitA));
    -1370}
    -1371if (splitB != null) {
    -1372  put.addImmutable(
    -1373HConstants.CATALOG_FAMILY, 
    HConstants.SPLITB_QUALIFIER, RegionInfo.toByteArray(splitB));
    -1374}
    -1375return put;
    -1376  }
    -1377
    -1378  /**
    -1379   * Put the passed 
    codeputs/code to the codehbase:meta/code 
    table.
    -1380   * Non-atomic for multi puts.
    -1381   * @param connection connection we're 
    using
    -1382   * @param puts Put to add to 
    hbase:meta
    -1383   * @throws IOException
    -1384   */
    -1385  public static void 
    putToMetaTable(final Connection connection, final Put... puts)
    -1386throws IOException {
    -1387put(getMetaHTable(connection), 
    Arrays.asList(puts));
    -1388  }
    -1389
    -1390  /**
    -1391   * @param t Table to use (will be 
    closed when done).
    -1392   * @param puts puts to make
    -1393   * @throws IOException
    -1394   */
    -1395  private static void put(final Table t, 
    final ListPut puts) throws IOException {
    -1396try {
    -1397  if (METALOG.isDebugEnabled()) {
    -1398
    METALOG.debug(mutationsToString(puts));
    -1399  }
    -1400  t.put(puts);
    -1401} finally {
    -1402  t.close();
    -1403}
    -1404  }
    -1405
    -1406  /**
    -1407   * Put the passed 
    codeps/code to the codehbase:meta/code table.
    -1408   * @param connection connection we're 
    using
    -1409   * @param ps Put to add to 
    hbase:meta
    -1410   * @throws IOException
    -1411   */
    -1412  public static void 
    putsToMetaTable(final Connection connection, final ListPut ps)
    -1413throws IOException {
    -1414Table t = 
    getMetaHTable(connection);
    -1415try {
    -1416  if (METALOG.isDebugEnabled()) {
    -1417
    METALOG.debug(mutationsToString(ps));
    -1418  }
    -1419  t.put(ps);
    -1420} finally {
    -1421  t.close();
    -1422}
    -1423  }
    -1424
    -1425  /**
    -1426   * Delete the passed 
    coded/code from the codehbase:meta/code 
    table.
    -1427   * @param connection connection we're 
    using
    -1428   * @param d Delete to add to 
    hbase:meta
    -1429   * @throws IOException
    -1430   */
    -1431  static void deleteFromMetaTable(final 
    Connection connection, final Delete d)
    -1432throws IOException {
    -1433ListDelete dels = new 
    ArrayList(1);
    -1434dels.add(d);
    -1435deleteFromMetaTable(connection, 
    dels);
    -1436  }
    -1437
    -1438  /**
    -1439   * Delete the passed 
    codedeletes/code from the codehbase:meta/code 
    table.
    -1440   * @param connection connection we're 
    using
    -1441   * @param deletes Deletes to add to 
    hbase:meta  This list should support #remove.
    -1442   * @throws IOException
    -1443   */
    -1444  public static void 
    deleteFromMetaTable(final Connection connection, final ListDelete 
    deletes)
    -1445throws IOException {
    -1446Table t = 
    getMetaHTable(connection);
    -1447try {
    -1448  if (METALOG.isDebugEnabled()) {
    -1449
    METALOG.debug(mutationsToString(deletes));
    -1450  }
    -1451  t.delete(deletes);
    -1452} finally {
    -1453  t.close();
    -1454}
    -1455  }
    -1456
    -1457  /**
    -1458   * Deletes some replica columns 
    corresponding to replicas for the passed rows
    -1459   * @param metaRows rows in 
    hbase:meta
    -1460   * @param replicaIndexToDeleteFrom the 
    replica ID we would start deleting from
    -1461   * @param numReplicasToRemove how many 
    replicas to 

    [20/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dc4e5c85/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.NamespaceFuture.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.NamespaceFuture.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.NamespaceFuture.html
    index 7c59e27..c904c56 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.NamespaceFuture.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.NamespaceFuture.html
    @@ -119,4048 +119,4054 @@
     111import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
     112import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest;
     113import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest;
    -114import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
    -115import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
    -116import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
    -117import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
    -118import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest;
    -119import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse;
    -120import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest;
    -121import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
    -122import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
    -123import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceRequest;
    -124import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceResponse;
    -125import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
    -126import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
    -127import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
    -128import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
    -129import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos;
    -130import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
    -131import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
    -132import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
    -133import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
    -134import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
    -135import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersRequest;
    -136import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
    -137import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
    -138import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest;
    -139import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse;
    -140import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
    -141import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
    -142import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
    -143import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse;
    -144import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
    -145import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableRequest;
    -146import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableResponse;
    -147import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest;
    -148import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse;
    -149import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
    -150import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse;
    -151import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest;
    -152import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse;
    -153import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest;
    -154import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
    -155import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetLocksRequest;
    -156import 
    

    [20/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4abd958d/devapidocs/org/apache/hadoop/hbase/client/Table.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/client/Table.html 
    b/devapidocs/org/apache/hadoop/hbase/client/Table.html
    index 67eb868..4550eb1 100644
    --- a/devapidocs/org/apache/hadoop/hbase/client/Table.html
    +++ b/devapidocs/org/apache/hadoop/hbase/client/Table.html
    @@ -18,7 +18,7 @@
     catch(err) {
     }
     //-->
    -var methods = 
    {"i0":6,"i1":6,"i2":6,"i3":6,"i4":6,"i5":6,"i6":38,"i7":6,"i8":38,"i9":6,"i10":6,"i11":38,"i12":6,"i13":6,"i14":6,"i15":6,"i16":6,"i17":6,"i18":6,"i19":6,"i20":6,"i21":50,"i22":6,"i23":6,"i24":6,"i25":6,"i26":6,"i27":38,"i28":6,"i29":38,"i30":6,"i31":38,"i32":6,"i33":6,"i34":6,"i35":6,"i36":38,"i37":38,"i38":6,"i39":6,"i40":6,"i41":6,"i42":6,"i43":6,"i44":6,"i45":38,"i46":38,"i47":38,"i48":38};
    +var methods = 
    {"i0":6,"i1":6,"i2":6,"i3":6,"i4":6,"i5":38,"i6":38,"i7":38,"i8":6,"i9":38,"i10":38,"i11":38,"i12":38,"i13":38,"i14":6,"i15":6,"i16":6,"i17":6,"i18":6,"i19":6,"i20":6,"i21":6,"i22":50,"i23":6,"i24":6,"i25":6,"i26":6,"i27":6,"i28":38,"i29":6,"i30":38,"i31":6,"i32":38,"i33":6,"i34":6,"i35":6,"i36":6,"i37":38,"i38":38,"i39":6,"i40":6,"i41":6,"i42":6,"i43":6,"i44":6,"i45":6,"i46":38,"i47":38,"i48":38,"i49":38};
     var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
    Methods"],4:["t3","Abstract Methods"],16:["t5","Default 
    Methods"],32:["t6","Deprecated Methods"]};
     var altColor = "altColor";
     var rowColor = "rowColor";
    @@ -50,7 +50,7 @@ var activeTableTab = "activeTableTab";
     
     
     PrevClass
    -NextClass
    +NextClass
     
     
     Frames
    @@ -74,7 +74,7 @@ var activeTableTab = "activeTableTab";
     
     
     Summary:
    -Nested|
    +Nested|
     Field|
     Constr|
     Method
    @@ -131,6 +131,27 @@ extends http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html
     
     
     
    +
    +
    +
    +
    +
    +Nested Class Summary
    +
    +Nested Classes
    +
    +Modifier and Type
    +Interface and Description
    +
    +
    +static interface
    +Table.CheckAndMutateBuilder
    +A helper class for sending checkAndMutate request.
    +
    +
    +
    +
    +
     
     
     
    @@ -196,8 +217,9 @@ extends http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html
       byte[]qualifier,
       byte[]value,
       Deletedelete)
    -Atomically checks if a row/family/qualifier value matches 
    the expected
    - value.
    +Deprecated.
    +Since 2.0.0. Will be 
    removed in 3.0.0. Use checkAndMutate(byte[],
     byte[])
    +
     
     
     
    @@ -209,8 +231,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html
       byte[]value,
       Deletedelete)
     Deprecated.
    -Since 2.0.0. Will be 
    removed in 3.0.0. Use
    -  checkAndDelete(byte[],
     byte[], byte[], byte[], Delete)
    +Since 2.0.0. Will be 
    removed in 3.0.0. Use checkAndMutate(byte[],
     byte[])
     
     
     
    @@ -222,11 +243,19 @@ extends http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html
       CompareOperatorop,
       byte[]value,
       Deletedelete)
    -Atomically checks if a row/family/qualifier value matches 
    the expected
    - value.
    +Deprecated.
    +Since 2.0.0. Will be 
    removed in 3.0.0. Use checkAndMutate(byte[],
     byte[])
    +
     
     
     
    +Table.CheckAndMutateBuilder
    +checkAndMutate(byte[]row,
    +  byte[]family)
    +Atomically checks if a row/family/qualifier value matches 
    the expected value.
    +
    +
    +
     boolean
     checkAndMutate(byte[]row,
       byte[]family,
    @@ -235,12 +264,11 @@ extends http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html
       byte[]value,
       RowMutationsmutation)
     Deprecated.
    -Since 2.0.0. Will be 
    removed in 3.0.0. Use
    - checkAndMutate(byte[],
     byte[], byte[], CompareOperator, byte[], RowMutations)
    +Since 2.0.0. Will be 
    removed in 3.0.0. Use checkAndMutate(byte[],
     byte[])
     
     
     
    -
    +
     boolean
     checkAndMutate(byte[]row,
       byte[]family,
    @@ -248,21 +276,24 @@ extends http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html
       CompareOperatorop,
       byte[]value,
       RowMutationsmutation)
    -Atomically checks if a row/family/qualifier value matches 
    the expected value.
    +Deprecated.
    +Since 2.0.0. Will be 
    removed in 3.0.0. Use checkAndMutate(byte[],
     byte[])
    +
     
     
    -
    +
     boolean
     checkAndPut(byte[]row,
    byte[]family,
    byte[]qualifier,
    byte[]value,
    Putput)
    -Atomically checks if a row/family/qualifier value matches 
    the expected
    - value.
    +Deprecated.
    +Since 2.0.0. Will be 
    removed in 3.0.0. Use checkAndMutate(byte[],
     byte[])
    +
     
     
    -
    +
     boolean
     checkAndPut(byte[]row,
    byte[]family,
    @@ -271,12 +302,11 @@ extends http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html
    byte[]value,
    Putput)
     Deprecated.
    -Since 2.0.0. Will be 
    removed in 3.0.0. Use
    -  checkAndPut(byte[],
     byte[], byte[], CompareOperator, byte[], Put)}
    +Since 2.0.0. Will be 
    removed in 3.0.0. Use checkAndMutate(byte[],
     byte[])
     
     
     
    -
    +
     boolean
     checkAndPut(byte[]row,
       

    [20/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e23b49ba/devapidocs/org/apache/hadoop/hbase/PrivateCellUtil.TagRewriteCell.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/PrivateCellUtil.TagRewriteCell.html 
    b/devapidocs/org/apache/hadoop/hbase/PrivateCellUtil.TagRewriteCell.html
    index d6c34f5..02962be 100644
    --- a/devapidocs/org/apache/hadoop/hbase/PrivateCellUtil.TagRewriteCell.html
    +++ b/devapidocs/org/apache/hadoop/hbase/PrivateCellUtil.TagRewriteCell.html
    @@ -109,7 +109,7 @@ var activeTableTab = "activeTableTab";
     
     
     All Implemented Interfaces:
    -http://docs.oracle.com/javase/8/docs/api/java/lang/Cloneable.html?is-external=true;
     title="class or interface in java.lang">Cloneable, Cell, ExtendedCell, HeapSize, RawCell, SettableSequenceId, SettableTimestamp
    +http://docs.oracle.com/javase/8/docs/api/java/lang/Cloneable.html?is-external=true;
     title="class or interface in java.lang">Cloneable, Cell, ExtendedCell, HeapSize, RawCell
     
     
     Direct Known Subclasses:
    @@ -121,7 +121,7 @@ var activeTableTab = "activeTableTab";
     
     
     
    -static class PrivateCellUtil.TagRewriteCell
    +static class PrivateCellUtil.TagRewriteCell
     extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">Object
     implements ExtendedCell
     This can be used when a Cell has to change with 
    addition/removal of one or more tags. This is
    @@ -313,8 +313,7 @@ implements 
     
     void
    -setTimestamp(byte[]ts,
    -inttsOffset)
    +setTimestamp(byte[]ts)
     Sets with the given timestamp.
     
     
    @@ -380,7 +379,7 @@ implements 
     
     cell
    -protectedCell cell
    +protectedCell cell
     
     
     
    @@ -389,7 +388,7 @@ implements 
     
     tags
    -protectedbyte[] tags
    +protectedbyte[] tags
     
     
     
    @@ -398,7 +397,7 @@ implements 
     
     HEAP_SIZE_OVERHEAD
    -private static finallong HEAP_SIZE_OVERHEAD
    +private static finallong HEAP_SIZE_OVERHEAD
     
     
     
    @@ -415,7 +414,7 @@ implements 
     
     TagRewriteCell
    -publicTagRewriteCell(Cellcell,
    +publicTagRewriteCell(Cellcell,
       byte[]tags)
     
     Parameters:
    @@ -438,7 +437,7 @@ implements 
     
     getRowArray
    -publicbyte[]getRowArray()
    +publicbyte[]getRowArray()
     Description copied from 
    interface:Cell
     Contiguous raw bytes that may start at any index in the 
    containing array. Max length is
      Short.MAX_VALUE which is 32,767 bytes.
    @@ -456,7 +455,7 @@ implements 
     
     getRowOffset
    -publicintgetRowOffset()
    +publicintgetRowOffset()
     
     Specified by:
     getRowOffsetin
     interfaceCell
    @@ -471,7 +470,7 @@ implements 
     
     getRowLength
    -publicshortgetRowLength()
    +publicshortgetRowLength()
     
     Specified by:
     getRowLengthin
     interfaceCell
    @@ -486,7 +485,7 @@ implements 
     
     getFamilyArray
    -publicbyte[]getFamilyArray()
    +publicbyte[]getFamilyArray()
     Description copied from 
    interface:Cell
     Contiguous bytes composed of legal HDFS filename characters 
    which may start at any index in the
      containing array. Max length is Byte.MAX_VALUE, which is 127 bytes.
    @@ -504,7 +503,7 @@ implements 
     
     getFamilyOffset
    -publicintgetFamilyOffset()
    +publicintgetFamilyOffset()
     
     Specified by:
     getFamilyOffsetin
     interfaceCell
    @@ -519,7 +518,7 @@ implements 
     
     getFamilyLength
    -publicbytegetFamilyLength()
    +publicbytegetFamilyLength()
     
     Specified by:
     getFamilyLengthin
     interfaceCell
    @@ -534,7 +533,7 @@ implements 
     
     getQualifierArray
    -publicbyte[]getQualifierArray()
    +publicbyte[]getQualifierArray()
     Description copied from 
    interface:Cell
     Contiguous raw bytes that may start at any index in the 
    containing array.
     
    @@ -551,7 +550,7 @@ implements 
     
     getQualifierOffset
    -publicintgetQualifierOffset()
    +publicintgetQualifierOffset()
     
     Specified by:
     getQualifierOffsetin
     interfaceCell
    @@ -566,7 +565,7 @@ implements 
     
     getQualifierLength
    -publicintgetQualifierLength()
    +publicintgetQualifierLength()
     
     Specified by:
     getQualifierLengthin
     interfaceCell
    @@ -581,7 +580,7 @@ implements 
     
     getTimestamp
    -publiclonggetTimestamp()
    +publiclonggetTimestamp()
     
     Specified by:
     getTimestampin
     interfaceCell
    @@ -597,7 +596,7 @@ implements 
     
     getTypeByte
    -publicbytegetTypeByte()
    +publicbytegetTypeByte()
     
     Specified by:
     getTypeBytein
     interfaceCell
    @@ -612,7 +611,7 @@ implements 
     
     getSequenceId
    -publiclonggetSequenceId()
    +publiclonggetSequenceId()
     Description copied from 
    interface:Cell
     A region-specific unique monotonically increasing sequence 
    ID given to each Cell. It always
      exists for cells in the memstore but is not retained forever. It will be kept 
    for
    @@ -632,7 +631,7 @@ implements 
     
     getValueArray
    -publicbyte[]getValueArray()
    +publicbyte[]getValueArray()
     Description copied from 
    interface:Cell
     Contiguous raw bytes that may start at any index in the 
    containing array. Max length is
      Integer.MAX_VALUE which is 2,147,483,647 bytes.
    @@ -650,7 +649,7 @@ implements 
     
     getValueOffset
    -publicintgetValueOffset()
    +publicintgetValueOffset()
     
     Specified by:
     getValueOffsetin
     interfaceCell
    @@ -665,7 +664,7 @@ implements 
     
     

    [20/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d171b896/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.WriterLength.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.WriterLength.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.WriterLength.html
    index 62bc799..5c004ce 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.WriterLength.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.WriterLength.html
    @@ -250,7 +250,7 @@
     242Cell kv = cell;
     243// null input == user explicitly 
    wants to flush
     244if (row == null  kv == 
    null) {
    -245  rollWriters();
    +245  rollWriters(null);
     246  return;
     247}
     248
    @@ -284,636 +284,642 @@
     276  configureStoragePolicy(conf, 
    fs, tableAndFamily, writerPath);
     277}
     278
    -279// If any of the HFiles for the 
    column families has reached
    -280// maxsize, we need to roll all 
    the writers
    -281if (wl != null  
    wl.written + length = maxsize) {
    -282  this.rollRequested = true;
    -283}
    -284
    -285// This can only happen once a 
    row is finished though
    -286if (rollRequested  
    Bytes.compareTo(this.previousRow, rowKey) != 0) {
    -287  rollWriters();
    -288}
    -289
    -290// create a new WAL writer, if 
    necessary
    -291if (wl == null || wl.writer == 
    null) {
    -292  if 
    (conf.getBoolean(LOCALITY_SENSITIVE_CONF_KEY, DEFAULT_LOCALITY_SENSITIVE)) {
    -293HRegionLocation loc = null;
    -294
    -295String tableName = 
    Bytes.toString(tableNameBytes);
    -296if (tableName != null) {
    -297  try (Connection connection 
    = ConnectionFactory.createConnection(conf);
    -298 RegionLocator 
    locator =
    -299   
    connection.getRegionLocator(TableName.valueOf(tableName))) {
    -300loc = 
    locator.getRegionLocation(rowKey);
    -301  } catch (Throwable e) {
    -302LOG.warn("There's 
    something wrong when locating rowkey: " +
    -303  Bytes.toString(rowKey) 
    + " for tablename: " + tableName, e);
    -304loc = null;
    -305  } }
    -306
    -307if (null == loc) {
    -308  if (LOG.isTraceEnabled()) 
    {
    -309LOG.trace("failed to get 
    region location, so use default writer for rowkey: " +
    -310  
    Bytes.toString(rowKey));
    -311  }
    -312  wl = 
    getNewWriter(tableNameBytes, family, conf, null);
    -313} else {
    -314  if (LOG.isDebugEnabled()) 
    {
    -315LOG.debug("first rowkey: 
    [" + Bytes.toString(rowKey) + "]");
    -316  }
    -317  InetSocketAddress 
    initialIsa =
    -318  new 
    InetSocketAddress(loc.getHostname(), loc.getPort());
    -319  if 
    (initialIsa.isUnresolved()) {
    -320if (LOG.isTraceEnabled()) 
    {
    -321  LOG.trace("failed to 
    resolve bind address: " + loc.getHostname() + ":"
    -322  + loc.getPort() + 
    ", so use default writer");
    -323}
    -324wl = 
    getNewWriter(tableNameBytes, family, conf, null);
    -325  } else {
    -326if (LOG.isDebugEnabled()) 
    {
    -327  LOG.debug("use favored 
    nodes writer: " + initialIsa.getHostString());
    -328}
    -329wl = 
    getNewWriter(tableNameBytes, family, conf, new InetSocketAddress[] { 
    initialIsa
    -330});
    -331  }
    -332}
    -333  } else {
    -334wl = 
    getNewWriter(tableNameBytes, family, conf, null);
    -335  }
    -336}
    -337
    -338// we now have the proper WAL 
    writer. full steam ahead
    -339// TODO : Currently in 
    SettableTimeStamp but this will also move to ExtendedCell
    -340
    PrivateCellUtil.updateLatestStamp(cell, this.now);
    -341wl.writer.append(kv);
    -342wl.written += length;
    -343
    -344// Copy the row so we know when a 
    row transition.
    -345this.previousRow = rowKey;
    -346  }
    -347
    -348  private void rollWriters() throws 
    IOException {
    -349for (WriterLength wl : 
    this.writers.values()) {
    -350  if (wl.writer != null) {
    -351LOG.info(
    -352"Writer=" + 
    wl.writer.getPath() + ((wl.written == 0)? "": ", wrote=" + wl.written));
    -353close(wl.writer);
    -354  }
    -355  wl.writer = null;
    -356  wl.written = 0;
    -357}
    -358this.rollRequested = false;
    -359  }
    -360
    -361  /*
    -362   * Create a new StoreFile.Writer.
    -363   * @param family
    -364   * @return A WriterLength, 
    containing a new StoreFile.Writer.
    -365   * @throws IOException
    -366   */
    -367  
    

    [20/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c54c242b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
    index 3edfbef..9707b2c 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
    @@ -2459,5936 +2459,5935 @@
     2451  }
     2452
     2453  for (HStore s : storesToFlush) {
    -2454MemStoreSize flushableSize = 
    s.getFlushableSize();
    -2455
    totalSizeOfFlushableStores.incMemStoreSize(flushableSize);
    -2456
    storeFlushCtxs.put(s.getColumnFamilyDescriptor().getName(),
    -2457  
    s.createFlushContext(flushOpSeqId, tracker));
    -2458// for writing stores to WAL
    -2459
    committedFiles.put(s.getColumnFamilyDescriptor().getName(), null);
    -2460
    storeFlushableSize.put(s.getColumnFamilyDescriptor().getName(), 
    flushableSize);
    -2461  }
    -2462
    -2463  // write the snapshot start to 
    WAL
    -2464  if (wal != null  
    !writestate.readOnly) {
    -2465FlushDescriptor desc = 
    ProtobufUtil.toFlushDescriptor(FlushAction.START_FLUSH,
    -2466getRegionInfo(), 
    flushOpSeqId, committedFiles);
    -2467// No sync. Sync is below where 
    no updates lock and we do FlushAction.COMMIT_FLUSH
    -2468WALUtil.writeFlushMarker(wal, 
    this.getReplicationScope(), getRegionInfo(), desc, false,
    -2469mvcc);
    -2470  }
    -2471
    -2472  // Prepare flush (take a 
    snapshot)
    -2473  for (StoreFlushContext flush : 
    storeFlushCtxs.values()) {
    -2474flush.prepare();
    -2475  }
    -2476} catch (IOException ex) {
    -2477  doAbortFlushToWAL(wal, 
    flushOpSeqId, committedFiles);
    -2478  throw ex;
    -2479} finally {
    -2480  
    this.updatesLock.writeLock().unlock();
    -2481}
    -2482String s = "Finished memstore 
    snapshotting " + this + ", syncing WAL and waiting on mvcc, " +
    -2483"flushsize=" + 
    totalSizeOfFlushableStores;
    -2484status.setStatus(s);
    -2485doSyncOfUnflushedWALChanges(wal, 
    getRegionInfo());
    -2486return new 
    PrepareFlushResult(storeFlushCtxs, committedFiles, storeFlushableSize, 
    startTime,
    -2487flushOpSeqId, flushedSeqId, 
    totalSizeOfFlushableStores);
    -2488  }
    -2489
    -2490  /**
    -2491   * Utility method broken out of 
    internalPrepareFlushCache so that method is smaller.
    -2492   */
    -2493  private void 
    logFatLineOnFlush(CollectionHStore storesToFlush, long sequenceId) {
    -2494if (!LOG.isInfoEnabled()) {
    -2495  return;
    -2496}
    -2497// Log a fat line detailing what is 
    being flushed.
    -2498StringBuilder perCfExtras = null;
    -2499if (!isAllFamilies(storesToFlush)) 
    {
    -2500  perCfExtras = new 
    StringBuilder();
    -2501  for (HStore store: storesToFlush) 
    {
    -2502perCfExtras.append("; 
    ").append(store.getColumnFamilyName());
    -2503perCfExtras.append("=")
    -2504
    .append(StringUtils.byteDesc(store.getFlushableSize().getDataSize()));
    -2505  }
    -2506}
    -2507LOG.info("Flushing " + + 
    storesToFlush.size() + "/" + stores.size() +
    -2508" column families, memstore=" + 
    StringUtils.byteDesc(this.memstoreDataSize.get()) +
    -2509((perCfExtras != null  
    perCfExtras.length()  0)? perCfExtras.toString(): "") +
    -2510((wal != null) ? "" : "; WAL is 
    null, using passed sequenceid=" + sequenceId));
    -2511  }
    -2512
    -2513  private void doAbortFlushToWAL(final 
    WAL wal, final long flushOpSeqId,
    -2514  final Mapbyte[], 
    ListPath committedFiles) {
    -2515if (wal == null) return;
    -2516try {
    -2517  FlushDescriptor desc = 
    ProtobufUtil.toFlushDescriptor(FlushAction.ABORT_FLUSH,
    -2518  getRegionInfo(), flushOpSeqId, 
    committedFiles);
    -2519  WALUtil.writeFlushMarker(wal, 
    this.getReplicationScope(), getRegionInfo(), desc, false,
    -2520  mvcc);
    -2521} catch (Throwable t) {
    -2522  LOG.warn("Received unexpected 
    exception trying to write ABORT_FLUSH marker to WAL:" +
    -2523  
    StringUtils.stringifyException(t));
    -2524  // ignore this since we will be 
    aborting the RS with DSE.
    -2525}
    -2526// we have called 
    wal.startCacheFlush(), now we have to abort it
    -2527
    wal.abortCacheFlush(this.getRegionInfo().getEncodedNameAsBytes());
    -2528  }
    -2529
    -2530  /**
    -2531   * Sync unflushed WAL changes. See 
    HBASE-8208 for details
    -2532   */
    -2533  private static void 
    doSyncOfUnflushedWALChanges(final WAL wal, final RegionInfo hri)
    -2534  throws IOException {
    -2535if (wal == null) {
    -2536  return;
    -2537}
    -2538try {
    -2539  wal.sync(); // ensure that flush 
    marker is sync'ed
    -2540} catch (IOException ioe) {
    -2541  
    wal.abortCacheFlush(hri.getEncodedNameAsBytes());
    -2542 

    [20/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.html 
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.html
    index 1bddf29..f667b93 100644
    --- a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.html
    +++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionUtils.html
    @@ -124,380 +124,381 @@
     116  
    HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
     117// Go big. Multiply by 10. If we 
    can't get to meta after this many retries
     118// then something seriously wrong.
    -119int serversideMultiplier = 
    c.getInt("hbase.client.serverside.retries.multiplier", 10);
    -120int retries = hcRetries * 
    serversideMultiplier;
    -121
    c.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, retries);
    -122log.info(sn + " server-side 
    Connection retries=" + retries);
    -123  }
    -124
    -125  /**
    -126   * A ClusterConnection that will 
    short-circuit RPC making direct invocations against the
    -127   * localhost if the invocation target 
    is 'this' server; save on network and protobuf
    -128   * invocations.
    -129   */
    -130  // TODO This has to still do PB 
    marshalling/unmarshalling stuff. Check how/whether we can avoid.
    -131  @VisibleForTesting // Class is visible 
    so can assert we are short-circuiting when expected.
    -132  public static class 
    ShortCircuitingClusterConnection extends ConnectionImplementation {
    -133private final ServerName 
    serverName;
    -134private final 
    AdminService.BlockingInterface localHostAdmin;
    -135private final 
    ClientService.BlockingInterface localHostClient;
    -136
    -137private 
    ShortCircuitingClusterConnection(Configuration conf, ExecutorService pool, User 
    user,
    -138ServerName serverName, 
    AdminService.BlockingInterface admin,
    -139ClientService.BlockingInterface 
    client)
    -140throws IOException {
    -141  super(conf, pool, user);
    -142  this.serverName = serverName;
    -143  this.localHostAdmin = admin;
    -144  this.localHostClient = client;
    -145}
    -146
    -147@Override
    -148public AdminService.BlockingInterface 
    getAdmin(ServerName sn) throws IOException {
    -149  return serverName.equals(sn) ? 
    this.localHostAdmin : super.getAdmin(sn);
    -150}
    -151
    -152@Override
    -153public 
    ClientService.BlockingInterface getClient(ServerName sn) throws IOException {
    -154  return serverName.equals(sn) ? 
    this.localHostClient : super.getClient(sn);
    -155}
    -156
    -157@Override
    -158public MasterKeepAliveConnection 
    getKeepAliveMasterService() throws MasterNotRunningException {
    -159  if (this.localHostClient instanceof 
    MasterService.BlockingInterface) {
    -160return new 
    ShortCircuitMasterConnection((MasterService.BlockingInterface)this.localHostClient);
    -161  }
    -162  return 
    super.getKeepAliveMasterService();
    -163}
    -164  }
    -165
    -166  /**
    -167   * Creates a short-circuit connection 
    that can bypass the RPC layer (serialization,
    -168   * deserialization, networking, etc..) 
    when talking to a local server.
    -169   * @param conf the current 
    configuration
    -170   * @param pool the thread pool to use 
    for batch operations
    -171   * @param user the user the connection 
    is for
    -172   * @param serverName the local server 
    name
    -173   * @param admin the admin interface of 
    the local server
    -174   * @param client the client interface 
    of the local server
    -175   * @return an short-circuit 
    connection.
    -176   * @throws IOException if IO failure 
    occurred
    -177   */
    -178  public static ClusterConnection 
    createShortCircuitConnection(final Configuration conf,
    -179  ExecutorService pool, User user, 
    final ServerName serverName,
    -180  final 
    AdminService.BlockingInterface admin, final ClientService.BlockingInterface 
    client)
    -181  throws IOException {
    -182if (user == null) {
    -183  user = 
    UserProvider.instantiate(conf).getCurrent();
    -184}
    -185return new 
    ShortCircuitingClusterConnection(conf, pool, user, serverName, admin, 
    client);
    -186  }
    -187
    -188  /**
    -189   * Setup the connection class, so that 
    it will not depend on master being online. Used for testing
    -190   * @param conf configuration to set
    -191   */
    -192  @VisibleForTesting
    -193  public static void 
    setupMasterlessConnection(Configuration conf) {
    -194
    conf.set(ClusterConnection.HBASE_CLIENT_CONNECTION_IMPL, 
    MasterlessConnection.class.getName());
    -195  }
    -196
    -197  /**
    -198   * Some tests shut down the master. But 
    table availability is a master RPC which is performed on
    -199   * region re-lookups.
    -200   */
    -201  static class MasterlessConnection 
    extends ConnectionImplementation {
    -202MasterlessConnection(Configuration 
    conf, ExecutorService pool, User user) throws IOException {
    -203  super(conf, pool, user);
    -204}
    -205
    -206@Override
    -207public 

    [20/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.RegionRequest.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.RegionRequest.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.RegionRequest.html
    index 8ba8dc9..f973938 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.RegionRequest.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.RegionRequest.html
    @@ -37,36 +37,36 @@
     029import java.io.IOException;
     030import java.util.ArrayList;
     031import java.util.Collections;
    -032import java.util.IdentityHashMap;
    -033import java.util.List;
    -034import java.util.Map;
    -035import java.util.Optional;
    -036import 
    java.util.concurrent.CompletableFuture;
    -037import 
    java.util.concurrent.ConcurrentHashMap;
    -038import 
    java.util.concurrent.ConcurrentLinkedQueue;
    -039import 
    java.util.concurrent.ConcurrentMap;
    -040import 
    java.util.concurrent.ConcurrentSkipListMap;
    -041import java.util.concurrent.TimeUnit;
    -042import java.util.function.Supplier;
    -043import java.util.stream.Collectors;
    -044import java.util.stream.Stream;
    -045
    -046import org.apache.commons.logging.Log;
    -047import 
    org.apache.commons.logging.LogFactory;
    -048import 
    org.apache.hadoop.hbase.CellScannable;
    -049import 
    org.apache.hadoop.hbase.DoNotRetryIOException;
    -050import 
    org.apache.hadoop.hbase.HRegionLocation;
    -051import 
    org.apache.hadoop.hbase.ServerName;
    -052import 
    org.apache.hadoop.hbase.TableName;
    -053import 
    org.apache.yetus.audience.InterfaceAudience;
    -054import 
    org.apache.hadoop.hbase.client.MultiResponse.RegionResult;
    -055import 
    org.apache.hadoop.hbase.client.RetriesExhaustedException.ThrowableWithExtraContext;
    -056import 
    org.apache.hadoop.hbase.ipc.HBaseRpcController;
    -057import 
    org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
    -058import 
    org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
    -059import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
    -060import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService;
    -061import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
    +032import java.util.HashMap;
    +033import java.util.IdentityHashMap;
    +034import java.util.List;
    +035import java.util.Map;
    +036import java.util.Optional;
    +037import 
    java.util.concurrent.CompletableFuture;
    +038import 
    java.util.concurrent.ConcurrentHashMap;
    +039import 
    java.util.concurrent.ConcurrentLinkedQueue;
    +040import 
    java.util.concurrent.ConcurrentMap;
    +041import 
    java.util.concurrent.ConcurrentSkipListMap;
    +042import java.util.concurrent.TimeUnit;
    +043import java.util.function.Supplier;
    +044import java.util.stream.Collectors;
    +045import java.util.stream.Stream;
    +046
    +047import org.apache.commons.logging.Log;
    +048import 
    org.apache.commons.logging.LogFactory;
    +049import 
    org.apache.hadoop.hbase.CellScannable;
    +050import 
    org.apache.hadoop.hbase.DoNotRetryIOException;
    +051import 
    org.apache.hadoop.hbase.HRegionLocation;
    +052import 
    org.apache.hadoop.hbase.ServerName;
    +053import 
    org.apache.hadoop.hbase.TableName;
    +054import 
    org.apache.yetus.audience.InterfaceAudience;
    +055import 
    org.apache.hadoop.hbase.client.MultiResponse.RegionResult;
    +056import 
    org.apache.hadoop.hbase.client.RetriesExhaustedException.ThrowableWithExtraContext;
    +057import 
    org.apache.hadoop.hbase.ipc.HBaseRpcController;
    +058import 
    org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
    +059import 
    org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
    +060import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
    +061import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService;
     062import 
    org.apache.hadoop.hbase.util.Bytes;
     063import 
    org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
     064
    @@ -240,212 +240,208 @@
     232  }
     233
     234  private ClientProtos.MultiRequest 
    buildReq(Mapbyte[], RegionRequest actionsByRegion,
    -235  ListCellScannable cells) 
    throws IOException {
    +235  ListCellScannable cells, 
    MapInteger, Integer rowMutationsIndexMap) throws IOException {
     236ClientProtos.MultiRequest.Builder 
    multiRequestBuilder = ClientProtos.MultiRequest.newBuilder();
     237ClientProtos.RegionAction.Builder 
    regionActionBuilder = ClientProtos.RegionAction.newBuilder();
     238ClientProtos.Action.Builder 
    actionBuilder = ClientProtos.Action.newBuilder();
     239ClientProtos.MutationProto.Builder 
    mutationBuilder = ClientProtos.MutationProto.newBuilder();
     240for (Map.Entrybyte[], 
    RegionRequest entry : actionsByRegion.entrySet()) {
    -241  // TODO: remove the extra for loop 
    as we will iterate it in mutationBuilder.
    -242  if 
    (!multiRequestBuilder.hasNonceGroup()) {
    -243for (Action action : 
    

    [20/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStateStore.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStateStore.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStateStore.html
    index 84e9e52..252bcc2 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStateStore.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStateStore.html
    @@ -56,290 +56,293 @@
     048import 
    org.apache.yetus.audience.InterfaceAudience;
     049import 
    org.apache.zookeeper.KeeperException;
     050
    -051import 
    org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions;
    -052
    -053/**
    -054 * Store Region State to hbase:meta 
    table.
    -055 */
    -056@InterfaceAudience.Private
    -057public class RegionStateStore {
    -058  private static final Log LOG = 
    LogFactory.getLog(RegionStateStore.class);
    -059
    -060  /** The delimiter for meta columns for 
    replicaIds gt; 0 */
    -061  protected static final char 
    META_REPLICA_ID_DELIMITER = '_';
    -062
    -063  private final MasterServices master;
    -064
    -065  private MultiHConnection 
    multiHConnection;
    -066
    -067  public RegionStateStore(final 
    MasterServices master) {
    -068this.master = master;
    -069  }
    -070
    -071  public void start() throws IOException 
    {
    -072  }
    -073
    -074  public void stop() {
    -075if (multiHConnection != null) {
    -076  multiHConnection.close();
    -077  multiHConnection = null;
    -078}
    -079  }
    -080
    -081  public interface RegionStateVisitor {
    -082void visitRegionState(RegionInfo 
    regionInfo, State state,
    -083  ServerName regionLocation, 
    ServerName lastHost, long openSeqNum);
    -084  }
    -085
    -086  public void visitMeta(final 
    RegionStateVisitor visitor) throws IOException {
    -087
    MetaTableAccessor.fullScanRegions(master.getConnection(), new 
    MetaTableAccessor.Visitor() {
    -088  final boolean isDebugEnabled = 
    LOG.isDebugEnabled();
    -089
    -090  @Override
    -091  public boolean visit(final Result 
    r) throws IOException {
    -092if (r !=  null  
    !r.isEmpty()) {
    -093  long st = 0;
    -094  if (LOG.isTraceEnabled()) {
    -095st = 
    System.currentTimeMillis();
    -096  }
    -097  visitMetaEntry(visitor, r);
    -098  if (LOG.isTraceEnabled()) {
    -099long et = 
    System.currentTimeMillis();
    -100LOG.trace("[T] LOAD META PERF 
    " + StringUtils.humanTimeDiff(et - st));
    -101  }
    -102} else if (isDebugEnabled) {
    -103  LOG.debug("NULL result from 
    meta - ignoring but this is strange.");
    -104}
    -105return true;
    -106  }
    -107});
    -108  }
    -109
    -110  private void visitMetaEntry(final 
    RegionStateVisitor visitor, final Result result)
    -111  throws IOException {
    -112final RegionLocations rl = 
    MetaTableAccessor.getRegionLocations(result);
    -113if (rl == null) return;
    -114
    -115final HRegionLocation[] locations = 
    rl.getRegionLocations();
    -116if (locations == null) return;
    -117
    -118for (int i = 0; i  
    locations.length; ++i) {
    -119  final HRegionLocation hrl = 
    locations[i];
    -120  if (hrl == null) continue;
    -121
    -122  final RegionInfo regionInfo = 
    hrl.getRegionInfo();
    -123  if (regionInfo == null) continue;
    -124
    -125  final int replicaId = 
    regionInfo.getReplicaId();
    -126  final State state = 
    getRegionState(result, replicaId);
    -127
    -128  final ServerName lastHost = 
    hrl.getServerName();
    -129  final ServerName regionLocation = 
    getRegionServer(result, replicaId);
    -130  final long openSeqNum = -1;
    -131
    -132  // TODO: move under trace, now is 
    visible for debugging
    -133  LOG.info(String.format("Load 
    hbase:meta entry region=%s regionState=%s lastHost=%s regionLocation=%s",
    -134regionInfo, state, lastHost, 
    regionLocation));
    -135
    -136  
    visitor.visitRegionState(regionInfo, state, regionLocation, lastHost, 
    openSeqNum);
    -137}
    -138  }
    -139
    -140  public void updateRegionLocation(final 
    RegionInfo regionInfo, final State state,
    -141  final ServerName regionLocation, 
    final ServerName lastHost, final long openSeqNum,
    -142  final long pid)
    -143  throws IOException {
    -144if (regionInfo.isMetaRegion()) {
    -145  updateMetaLocation(regionInfo, 
    regionLocation);
    -146} else {
    -147  
    updateUserRegionLocation(regionInfo, state, regionLocation, lastHost, 
    openSeqNum, pid);
    -148}
    -149  }
    -150
    -151  public void updateRegionState(final 
    long openSeqNum, final long pid,
    -152  final RegionState newState, final 
    RegionState oldState) throws IOException {
    -153
    updateRegionLocation(newState.getRegion(), newState.getState(), 
    newState.getServerName(),
    -154oldState != null ? 
    oldState.getServerName() : null, openSeqNum, pid);
    -155  }
    -156
    -157  protected void updateMetaLocation(final 
    RegionInfo regionInfo, final 

    [20/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b1eb7453/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html 
    b/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
    index d812b18..620d01b 100644
    --- a/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
    +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
    @@ -699,19 +699,19 @@
     
     java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
     title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
     title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
     title="class or interface in java.io">Serializable)
     
    -org.apache.hadoop.hbase.regionserver.ScannerContext.NextState
    -org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactoryImpl.FactoryStorage
     org.apache.hadoop.hbase.regionserver.MemStoreCompactionStrategy.Action
    -org.apache.hadoop.hbase.regionserver.CompactingMemStore.IndexType
    -org.apache.hadoop.hbase.regionserver.BloomType
    -org.apache.hadoop.hbase.regionserver.DefaultHeapMemoryTuner.StepDirection
     org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope
    -org.apache.hadoop.hbase.regionserver.Region.Operation
    -org.apache.hadoop.hbase.regionserver.ScanType
    +org.apache.hadoop.hbase.regionserver.SplitLogWorker.TaskExecutor.Status
     org.apache.hadoop.hbase.regionserver.TimeRangeTracker.Type
    +org.apache.hadoop.hbase.regionserver.DefaultHeapMemoryTuner.StepDirection
    +org.apache.hadoop.hbase.regionserver.CompactingMemStore.IndexType
    +org.apache.hadoop.hbase.regionserver.ScanType
    +org.apache.hadoop.hbase.regionserver.Region.Operation
    +org.apache.hadoop.hbase.regionserver.BloomType
     org.apache.hadoop.hbase.regionserver.FlushType
    -org.apache.hadoop.hbase.regionserver.SplitLogWorker.TaskExecutor.Status
    +org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactoryImpl.FactoryStorage
     org.apache.hadoop.hbase.regionserver.HRegion.FlushResult.Result
    +org.apache.hadoop.hbase.regionserver.ScannerContext.NextState
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b1eb7453/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/ScanQueryMatcher.MatchCode.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/ScanQueryMatcher.MatchCode.html
     
    b/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/ScanQueryMatcher.MatchCode.html
    index 73013c5..2965410 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/ScanQueryMatcher.MatchCode.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/ScanQueryMatcher.MatchCode.html
    @@ -122,7 +122,7 @@ var activeTableTab = "activeTableTab";
     
     
     
    -public static enum ScanQueryMatcher.MatchCode
    +public static enum ScanQueryMatcher.MatchCode
     extends http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
     title="class or interface in java.lang">EnumScanQueryMatcher.MatchCode
     ScanQueryMatcher.match(org.apache.hadoop.hbase.Cell)
     return codes. These instruct the scanner moving through memstores and 
    StoreFiles
      what to do with the current KeyValue.
    @@ -259,7 +259,7 @@ the order they are declared.
     
     
     INCLUDE
    -public static finalScanQueryMatcher.MatchCode
     INCLUDE
    +public static finalScanQueryMatcher.MatchCode
     INCLUDE
     Include KeyValue in the returned result
     
     
    @@ -269,7 +269,7 @@ the order they are declared.
     
     
     SKIP
    -public static finalScanQueryMatcher.MatchCode
     SKIP
    +public static finalScanQueryMatcher.MatchCode
     SKIP
     Do not include KeyValue in the returned result
     
     
    @@ -279,7 +279,7 @@ the order they are declared.
     
     
     NEXT
    -public static finalScanQueryMatcher.MatchCode
     NEXT
    +public static finalScanQueryMatcher.MatchCode
     NEXT
     Do not include, jump to next StoreFile or memstore (in time 
    order)
     
     
    @@ -289,7 +289,7 @@ the order they are declared.
     
     
     DONE
    -public static finalScanQueryMatcher.MatchCode
     DONE
    +public static finalScanQueryMatcher.MatchCode
     DONE
     Do not include, return current result
     
     
    @@ -299,7 +299,7 @@ the order they are declared.
     
     
     SEEK_NEXT_ROW
    -public static finalScanQueryMatcher.MatchCode
     SEEK_NEXT_ROW
    +public static finalScanQueryMatcher.MatchCode
     SEEK_NEXT_ROW
     Done with the row, seek there.
     
     
    @@ -309,7 +309,7 @@ the order they are declared.
     
     
     SEEK_NEXT_COL
    -public static finalScanQueryMatcher.MatchCode
     SEEK_NEXT_COL
    +public static finalScanQueryMatcher.MatchCode
     SEEK_NEXT_COL
     Done with column, seek to next.
     
     
    @@ -319,7 +319,7 @@ the order they are declared.
     
     
     DONE_SCAN
    -public static finalScanQueryMatcher.MatchCode
     DONE_SCAN
    +public static 

    [20/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9118853f/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StripeStoreFlusher.BoundaryStripeFlushRequest.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StripeStoreFlusher.BoundaryStripeFlushRequest.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StripeStoreFlusher.BoundaryStripeFlushRequest.html
    index c1a448b..39ff026 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StripeStoreFlusher.BoundaryStripeFlushRequest.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/StripeStoreFlusher.BoundaryStripeFlushRequest.html
    @@ -72,119 +72,116 @@
     064
     065long smallestReadPoint = 
    store.getSmallestReadPoint();
     066InternalScanner scanner = 
    createScanner(snapshot.getScanners(), smallestReadPoint, tracker);
    -067if (scanner == null) {
    -068  return result; // NULL scanner 
    returned from coprocessor hooks means skip normal processing
    -069}
    -070
    -071// Let policy select flush method.
    -072StripeFlushRequest req = 
    this.policy.selectFlush(store.getComparator(), this.stripes,
    -073  cellsCount);
    -074
    -075boolean success = false;
    -076StripeMultiFileWriter mw = null;
    -077try {
    -078  mw = req.createWriter(); // Writer 
    according to the policy.
    -079  StripeMultiFileWriter.WriterFactory 
    factory = createWriterFactory(cellsCount);
    -080  StoreScanner storeScanner = 
    (scanner instanceof StoreScanner) ? (StoreScanner)scanner : null;
    -081  mw.init(storeScanner, factory);
    -082
    -083  synchronized (flushLock) {
    -084performFlush(scanner, mw, 
    smallestReadPoint, throughputController);
    -085result = 
    mw.commitWriters(cacheFlushSeqNum, false);
    -086success = true;
    -087  }
    -088} finally {
    -089  if (!success  (mw != 
    null)) {
    -090for (Path leftoverFile : 
    mw.abortWriters()) {
    -091  try {
    -092
    store.getFileSystem().delete(leftoverFile, false);
    -093  } catch (Exception e) {
    -094LOG.error("Failed to delete a 
    file after failed flush: " + e);
    -095  }
    -096}
    -097  }
    -098  try {
    -099scanner.close();
    -100  } catch (IOException ex) {
    -101LOG.warn("Failed to close flush 
    scanner, ignoring", ex);
    -102  }
    -103}
    -104return result;
    -105  }
    -106
    -107  private 
    StripeMultiFileWriter.WriterFactory createWriterFactory(final long kvCount) {
    -108return new 
    StripeMultiFileWriter.WriterFactory() {
    -109  @Override
    -110  public StoreFileWriter 
    createWriter() throws IOException {
    -111StoreFileWriter writer = 
    store.createWriterInTmp(
    -112kvCount, 
    store.getColumnFamilyDescriptor().getCompressionType(),
    -113/* isCompaction = */ false,
    -114/* includeMVCCReadpoint = */ 
    true,
    -115/* includesTags = */ true,
    -116/* shouldDropBehind = */ 
    false);
    -117return writer;
    -118  }
    -119};
    -120  }
    +067
    +068// Let policy select flush method.
    +069StripeFlushRequest req = 
    this.policy.selectFlush(store.getComparator(), this.stripes,
    +070  cellsCount);
    +071
    +072boolean success = false;
    +073StripeMultiFileWriter mw = null;
    +074try {
    +075  mw = req.createWriter(); // Writer 
    according to the policy.
    +076  StripeMultiFileWriter.WriterFactory 
    factory = createWriterFactory(cellsCount);
    +077  StoreScanner storeScanner = 
    (scanner instanceof StoreScanner) ? (StoreScanner)scanner : null;
    +078  mw.init(storeScanner, factory);
    +079
    +080  synchronized (flushLock) {
    +081performFlush(scanner, mw, 
    smallestReadPoint, throughputController);
    +082result = 
    mw.commitWriters(cacheFlushSeqNum, false);
    +083success = true;
    +084  }
    +085} finally {
    +086  if (!success  (mw != 
    null)) {
    +087for (Path leftoverFile : 
    mw.abortWriters()) {
    +088  try {
    +089
    store.getFileSystem().delete(leftoverFile, false);
    +090  } catch (Exception e) {
    +091LOG.error("Failed to delete a 
    file after failed flush: " + e);
    +092  }
    +093}
    +094  }
    +095  try {
    +096scanner.close();
    +097  } catch (IOException ex) {
    +098LOG.warn("Failed to close flush 
    scanner, ignoring", ex);
    +099  }
    +100}
    +101return result;
    +102  }
    +103
    +104  private 
    StripeMultiFileWriter.WriterFactory createWriterFactory(final long kvCount) {
    +105return new 
    StripeMultiFileWriter.WriterFactory() {
    +106  @Override
    +107  public StoreFileWriter 
    createWriter() throws IOException {
    +108StoreFileWriter writer = 
    store.createWriterInTmp(
    +109kvCount, 
    store.getColumnFamilyDescriptor().getCompressionType(),
    +110/* isCompaction = */ false,
    +111/* includeMVCCReadpoint = */ 
    true,
    +112/* includesTags = */ true,
    +113

      1   2   3   >