[04/51] [partial] hbase-site git commit: Published site at 620d70d6186fb800299bcc62ad7179fccfd1be41.

2019-01-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aa3fb87f/devapidocs/src-html/org/apache/hadoop/hbase/thrift2/client/ThriftTable.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/thrift2/client/ThriftTable.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/thrift2/client/ThriftTable.html
new file mode 100644
index 000..94888eb
--- /dev/null
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/thrift2/client/ThriftTable.html
@@ -0,0 +1,564 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+Source code
+
+
+
+
+001/**
+002 *
+003 * Licensed to the Apache Software 
Foundation (ASF) under one
+004 * or more contributor license 
agreements.  See the NOTICE file
+005 * distributed with this work for 
additional information
+006 * regarding copyright ownership.  The 
ASF licenses this file
+007 * to you under the Apache License, 
Version 2.0 (the
+008 * "License"); you may not use this file 
except in compliance
+009 * with the License.  You may obtain a 
copy of the License at
+010 *
+011 * 
http://www.apache.org/licenses/LICENSE-2.0
+012 *
+013 * Unless required by applicable law or 
agreed to in writing, software
+014 * distributed under the License is 
distributed on an "AS IS" BASIS,
+015 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
+016 * See the License for the specific 
language governing permissions and
+017 * limitations under the License.
+018 */
+019package 
org.apache.hadoop.hbase.thrift2.client;
+020
+021import static 
org.apache.hadoop.hbase.thrift.Constants.HBASE_THRIFT_CLIENT_SCANNER_CACHING;
+022import static 
org.apache.hadoop.hbase.thrift.Constants.HBASE_THRIFT_CLIENT_SCANNER_CACHING_DEFAULT;
+023
+024import java.io.IOException;
+025import java.nio.ByteBuffer;
+026import java.util.ArrayDeque;
+027import java.util.ArrayList;
+028import java.util.Arrays;
+029import java.util.List;
+030import java.util.Queue;
+031import java.util.concurrent.TimeUnit;
+032
+033import 
org.apache.commons.lang3.NotImplementedException;
+034import 
org.apache.hadoop.conf.Configuration;
+035import 
org.apache.hadoop.hbase.CompareOperator;
+036import 
org.apache.hadoop.hbase.HConstants;
+037import 
org.apache.hadoop.hbase.TableName;
+038import 
org.apache.hadoop.hbase.client.Append;
+039import 
org.apache.hadoop.hbase.client.Delete;
+040import 
org.apache.hadoop.hbase.client.Get;
+041import 
org.apache.hadoop.hbase.client.Increment;
+042import 
org.apache.hadoop.hbase.client.Put;
+043import 
org.apache.hadoop.hbase.client.Result;
+044import 
org.apache.hadoop.hbase.client.ResultScanner;
+045import 
org.apache.hadoop.hbase.client.Row;
+046import 
org.apache.hadoop.hbase.client.RowMutations;
+047import 
org.apache.hadoop.hbase.client.Scan;
+048import 
org.apache.hadoop.hbase.client.Table;
+049import 
org.apache.hadoop.hbase.client.TableDescriptor;
+050import 
org.apache.hadoop.hbase.client.coprocessor.Batch;
+051import 
org.apache.hadoop.hbase.client.metrics.ScanMetrics;
+052import 
org.apache.hadoop.hbase.io.TimeRange;
+053import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
+054import 
org.apache.hadoop.hbase.thrift2.ThriftUtilities;
+055import 
org.apache.hadoop.hbase.thrift2.generated.TAppend;
+056import 
org.apache.hadoop.hbase.thrift2.generated.TDelete;
+057import 
org.apache.hadoop.hbase.thrift2.generated.TGet;
+058import 
org.apache.hadoop.hbase.thrift2.generated.THBaseService;
+059import 
org.apache.hadoop.hbase.thrift2.generated.TIncrement;
+060import 
org.apache.hadoop.hbase.thrift2.generated.TPut;
+061import 
org.apache.hadoop.hbase.thrift2.generated.TResult;
+062import 
org.apache.hadoop.hbase.thrift2.generated.TRowMutations;
+063import 
org.apache.hadoop.hbase.thrift2.generated.TScan;
+064import 
org.apache.hadoop.hbase.thrift2.generated.TTableDescriptor;
+065import 
org.apache.hadoop.hbase.util.Bytes;
+066import org.apache.thrift.TException;
+067import 
org.apache.thrift.transport.TTransport;
+068import 
org.apache.yetus.audience.InterfaceAudience;
+069
+070import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
+071import 
org.apache.hbase.thirdparty.com.google.common.primitives.Booleans;
+072
+073@InterfaceAudience.Private
+074public class ThriftTable implements Table 
{
+075
+076  private TableName tableName;
+077  private Configuration conf;
+078  private TTransport tTransport;
+079  private THBaseService.Client client;
+080  private ByteBuffer tableNameInBytes;
+081  private int operationTimeout;
+082
+083  private final int scannerCaching;
+084
+085  public ThriftTable(TableName tableName, 
THBaseService.Client client, TTransport tTransport,
+086  Configuration conf) {
+087this.tableName = tableName;
+088this.tableNameInBytes = 
ByteBuffer.wrap(tableName.toBytes());
+089this.conf = conf;
+090this.tTransport = tTransport;
+091this.client = client;
+092this.scannerCaching = 
conf.getInt(HBASE_THRIFT_CLIENT_SCANNER_CACHING,
+093 

[04/51] [partial] hbase-site git commit: Published site at 281d6429e55149cc4c05430dcc1d1dc136d8b245.

2019-01-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/901d593a/devapidocs/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.html 
b/devapidocs/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.html
index c69f626..7ac136f 100644
--- a/devapidocs/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.html
+++ b/devapidocs/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.html
@@ -347,7 +347,7 @@ implements MasterObserver
-postAbortProcedure,
 postAddReplicationPeer,
 postAddRSGroup,
 postAssign,
 post
 Balance, postBalanceRSGroup,
 postBalanceSwitch,
 postCloneSnapshot,
 postCompletedCreateTableAction,
 postCompletedDeleteTableAction,
 postCompletedDisableTableAction,
 postCompletedEnableTableAction,
 postCompletedMergeRegionsAction,
 postCompletedModifyTableAction,
 postCompletedModifyTableAction,
 postCompletedSplitRegionAction,
 postCompletedTruncateTableAction,
 postCreateNamespace,
 postDecommissionRegionServers,
 postDeleteNamespace,
 postDeleteSnapshot,
 postDisableReplicationPeer,
 postDisableTable,
 postEnableReplicationPeer,
 postEnableTable,
 postGetClusterMetrics, postGetLocks,
 postGetNamespaceDescriptor,
 postGetProcedures,
 postGetReplicationPeerConfig,
 postGetRSGroupInfo, postGetRSGroupInfoOfServer,
 postGetRSGroupInfoOfTable,
 postGetTableDescriptors,
 postGetTableNames,
 postListDecommissionedRegionServers,
 postListNamespaceDescriptors,
 postListReplicationPeers,
 postListRSGroups,
 postListSnapshot,
 postLockHeartbeat,
 postMergeRegions,
 postMergeRegionsCommitAction,
 postModifyNamespace,
 postModifyNamespace,
 postModifyTable,
 postModifyTable,
 postMove,
 postMoveServers,
 postMoveServersAndTables,
 postMoveTables,
 postRecommissionRegionServer,
 postRegionOffline, postRemoveReplicationPeer,
 postRemoveRSGroup,
 postRemoveServers,
 postRequestLock,
 postRestoreSnapshot,
 postRollBackMergeRegionsAction,
 postRollBackSplitRegionAction,
 postSetNamespaceQuota,
 postSetSplitOrMergeEnabled,
 postSetTableQuota,
 postSetUserQuota,
 postSetUserQuota,
 postSetUserQuota,
 postSnapshot,
 postStartMaster,
 postTableFlush,
 postTransitReplicationPeerSyncReplicationState,
 postTruncateTable,
 postUnassign,
 postUpdateReplicationPeerConfig,
 preAbortProcedure, preAddReplicationPeer,
 preAddRSGroup,
 preAssign,
 preBalance,
 preBalanceRSGroup, preBalanceSwitch,
 preClearDeadServers,
 preCreateTable,
 preCreateTableRegionsInfos,
 preDecommissionRegionServers,
 preDeleteNamespace,
 preDeleteSnapshot,
 preDeleteTable,
 preDeleteTableAction,
 preDisableReplicationPeer,
 preDisableTable,
 preDisableTableAction,
 preEnableReplicationPeer,
 preEnableTable, preEnableTableAction,
 preGetClusterMetrics,
 preGetLocks,
 preGetNamespaceDescriptor,
 preGetProcedures,
 preGetReplicationPeerConfig,
 preGetRSGroupInfo,
 preGetRSGroupInfoOfServer,
 preGetRSGroupInfoOfTable,
 preGetTableDescriptors, preGetTableNames,
 preListDecommissionedRegionServers,
 preListNamespaceDescriptors,
 preListReplicationPeers,
 preListRSGroups, preListSnapshot,
 preLockHeartbeat,
 preMasterInitialization,
 preMergeRegions,
 preMergeRegionsAction,
 preMergeRegionsCommitAction,
 preModifyNamespace,
 preModifyTable,
 preModifyTable,
 preModifyTableAction,
 preModifyTableAction,
 preMove, preMoveServers,
 preMoveServersAndTables,
 preMoveTables,
 preRecommissionRegionServer,
 preRegionOffline,
 preRemoveReplicationPeer,
 preRemoveRSGroup,
 preRemoveServers,
 preRequestLock,
 preRestoreSnapshot,
 preSetNamespaceQuota,
 preSetSplitOrMergeEnabled,
 preSetTableQuot
 a, preSetUserQuota,
 preSetUserQuota,
 preSetUserQuota,
 preShutdown,
 preSnapshot,
 preSplitRegion,
 preSplitRegionAction,
 preSplitRegionAfterMETAAction,
 preSplitRegionBeforeMETAAction, preStopMaster,
 preTableFlush,
 preTransitReplicationPeerSyncReplicationState,
 preTruncateTable,
 preTruncateTableAction,
 preUnassign,
 preUpdateReplicationPeerConfig
+postAbortProcedure,
 postAddReplicationPeer,
 postAddRSGroup,
 postAssign,
 post
 Balance, postBalanceRSGroup,
 postBalanceSwitch,
 postCloneSnapshot,
 postCompletedCreateTableAction,
 postCompletedDeleteTableAction,
 postCompletedDisableTableAction,
 postCompletedEnableTableAction,
 postCompletedMergeRegionsAction,
 postCompletedModifyTableAction,
 postCompletedModifyTableAction,
 postCompletedSplitRegionAction,
 postCompletedTruncateTableAction,
 postCreateNamespace,
 postDecommissionRegionServers,
 postDeleteNamespace,
 postDeleteSnapshot,
 postDisableReplicationPeer,
 postDisableTable,
 postEnableReplicationPeer,
 postEnableTable,
 postGetClusterMetrics, postGetLocks,
 postGetNamespaceDescriptor,
 postGetProcedures,
 postGetReplicationPeerConfig,
 postGetRSGroupInfo, postGetRSGroupInfoOfServer,
 postGetRSGroupInfoOfTable,
 postGetTableDescriptors,
 postGetTableNames,
 postIsRpcThrottleEnabled,
 postListDecommissionedRegionServers,
 postListNamespaceDescriptors,
 

[04/51] [partial] hbase-site git commit: Published site at 466fa920fee572fe20db3b77ebf539dc304d5f31.

2019-01-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2bf59208/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DisableTableProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DisableTableProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DisableTableProcedureBiConsumer.html
index 736388b..197b99d 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DisableTableProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DisableTableProcedureBiConsumer.html
@@ -26,3624 +26,3599 @@
 018package org.apache.hadoop.hbase.client;
 019
 020import static 
org.apache.hadoop.hbase.TableName.META_TABLE_NAME;
-021
-022import com.google.protobuf.Message;
-023import com.google.protobuf.RpcChannel;
-024import java.io.IOException;
-025import java.util.ArrayList;
-026import java.util.Arrays;
-027import java.util.Collections;
-028import java.util.EnumSet;
-029import java.util.HashMap;
-030import java.util.List;
-031import java.util.Map;
-032import java.util.Optional;
-033import java.util.Set;
-034import 
java.util.concurrent.CompletableFuture;
-035import 
java.util.concurrent.ConcurrentHashMap;
-036import java.util.concurrent.TimeUnit;
-037import 
java.util.concurrent.atomic.AtomicReference;
-038import java.util.function.BiConsumer;
-039import java.util.function.Function;
-040import java.util.function.Supplier;
-041import java.util.regex.Pattern;
-042import java.util.stream.Collectors;
-043import java.util.stream.Stream;
-044import org.apache.commons.io.IOUtils;
-045import 
org.apache.hadoop.conf.Configuration;
-046import 
org.apache.hadoop.hbase.AsyncMetaTableAccessor;
-047import 
org.apache.hadoop.hbase.CacheEvictionStats;
-048import 
org.apache.hadoop.hbase.CacheEvictionStatsAggregator;
-049import 
org.apache.hadoop.hbase.ClusterMetrics;
-050import 
org.apache.hadoop.hbase.ClusterMetrics.Option;
-051import 
org.apache.hadoop.hbase.ClusterMetricsBuilder;
-052import 
org.apache.hadoop.hbase.HConstants;
-053import 
org.apache.hadoop.hbase.HRegionLocation;
-054import 
org.apache.hadoop.hbase.MetaTableAccessor;
-055import 
org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
-056import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-057import 
org.apache.hadoop.hbase.RegionLocations;
-058import 
org.apache.hadoop.hbase.RegionMetrics;
-059import 
org.apache.hadoop.hbase.RegionMetricsBuilder;
-060import 
org.apache.hadoop.hbase.ServerName;
-061import 
org.apache.hadoop.hbase.TableExistsException;
-062import 
org.apache.hadoop.hbase.TableName;
-063import 
org.apache.hadoop.hbase.TableNotDisabledException;
-064import 
org.apache.hadoop.hbase.TableNotEnabledException;
-065import 
org.apache.hadoop.hbase.TableNotFoundException;
-066import 
org.apache.hadoop.hbase.UnknownRegionException;
-067import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
-068import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
-069import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.ServerRequestCallerBuilder;
-070import 
org.apache.hadoop.hbase.client.Scan.ReadType;
-071import 
org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil;
-072import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-073import 
org.apache.hadoop.hbase.client.security.SecurityCapability;
-074import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-075import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-076import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-077import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-078import 
org.apache.hadoop.hbase.quotas.QuotaTableUtil;
-079import 
org.apache.hadoop.hbase.replication.ReplicationException;
-080import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-081import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-082import 
org.apache.hadoop.hbase.replication.SyncReplicationState;
-083import 
org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
-084import 
org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
-085import 
org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
-086import 
org.apache.hadoop.hbase.util.Bytes;
-087import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-088import 
org.apache.hadoop.hbase.util.ForeignExceptionUtil;
-089import 
org.apache.yetus.audience.InterfaceAudience;
-090import org.slf4j.Logger;
-091import org.slf4j.LoggerFactory;
-092
-093import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-094import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-095import 
org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback;
-096import 
org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer;
-097import 

[04/51] [partial] hbase-site git commit: Published site at e4b6b4afb933a961f543537875f87a2dc62d3757.

2019-01-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/849d84a8/testdevapidocs/org/apache/hadoop/hbase/thrift2/TestThrift2ServerCmdLine.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/thrift2/TestThrift2ServerCmdLine.html 
b/testdevapidocs/org/apache/hadoop/hbase/thrift2/TestThrift2ServerCmdLine.html
new file mode 100644
index 000..54f2a06
--- /dev/null
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/thrift2/TestThrift2ServerCmdLine.html
@@ -0,0 +1,374 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+TestThrift2ServerCmdLine (Apache HBase 3.0.0-SNAPSHOT Test API)
+
+
+
+
+
+var methods = {"i0":10,"i1":10};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase.thrift2
+Class 
TestThrift2ServerCmdLine
+
+
+
+https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.thrift.TestThriftServerCmdLine
+
+
+org.apache.hadoop.hbase.thrift2.TestThrift2ServerCmdLine
+
+
+
+
+
+
+
+
+
+
+public class TestThrift2ServerCmdLine
+extends TestThriftServerCmdLine
+
+
+
+
+
+
+
+
+
+
+
+Field Summary
+
+Fields
+
+Modifier and Type
+Field and Description
+
+
+static HBaseClassTestRule
+CLASS_RULE
+
+
+private static https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+TABLENAME
+
+
+
+
+
+
+Fields inherited from classorg.apache.hadoop.hbase.thrift.TestThriftServerCmdLine
+implType,
 port,
 specifyBindIP,
 specifyCompact,
 specifyFramed,
 tableCreated,
 TEST_UTIL
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors
+
+Constructor and Description
+
+
+TestThrift2ServerCmdLine(org.apache.hadoop.hbase.thrift.ImplTypeimplType,
+booleanspecifyFramed,
+booleanspecifyBindIP,
+booleanspecifyCompact)
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All MethodsInstance MethodsConcrete Methods
+
+Modifier and Type
+Method and Description
+
+
+protected 
org.apache.hadoop.hbase.thrift2.ThriftServer
+createThriftServer()
+
+
+protected void
+talkToThriftServer()
+
+
+
+
+
+
+Methods inherited from classorg.apache.hadoop.hbase.thrift.TestThriftServerCmdLine
+getParameters,
 setUpBeforeClass,
 tearDownAfterClass,
 testRunThriftServer
+
+
+
+
+
+Methods inherited from classjava.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--;
 title="class or interface in java.lang">clone, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-;
 title="class or interface in java.lang">equals, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--;
 title="class or interface in java.lang">finalize, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--;
 title="class or interface in java.lang">getClass, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--;
 title="class or interface in java.lang">hashCode, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notify--;
 title="class or interface in java.lang">notify, https://docs.oracle.com/javase/8/docs/api/ja
 va/lang/Object.html?is-external=true#notifyAll--" title="class or interface in 
java.lang">notifyAll, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#toString--;
 title="class or interface in java.lang">toString, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait--;
 title="class or interface in java.lang">wait, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-;
 title="class or interface in java.lang">wait, 

[04/51] [partial] hbase-site git commit: Published site at 3ab895979b643a2980bcdb7fee2078f14b614210.

2019-01-01 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStateStore.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStateStore.html 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStateStore.html
index 8ba9cf5..fbf978f 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStateStore.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStateStore.html
@@ -712,6 +712,6 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 
-Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionFailedOpen.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionFailedOpen.html
 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionFailedOpen.html
index dff40e3..e5f288e 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionFailedOpen.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionFailedOpen.html
@@ -398,6 +398,6 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 
-Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionStateStampComparator.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionStateStampComparator.html
 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionStateStampComparator.html
index 1b64578..4401a5a 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionStateStampComparator.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionStateStampComparator.html
@@ -291,6 +291,6 @@ implements https://docs.oracle.com/javase/8/docs/api/java/util/Comparat
 
 
 
-Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.html 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.html
index b1366ce..8b8259c 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.html
@@ -1319,6 +1319,6 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 
-Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.html
 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.html
index 7998852..8dfeade 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.html
@@ -970,6 +970,6 @@ implements Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/master/assignment/ServerState.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/ServerState.html 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/ServerState.html
index 6837096..d9b370b 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/assignment/ServerState.html
+++ 

[04/51] [partial] hbase-site git commit: Published site at 7820ba1dbdba58b1002cdfde08eb21aa7a0bb6da.

2018-12-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/09ea0d5f/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
index 0f5a095..50bf692 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
@@ -78,8712 +78,8714 @@
 070import 
java.util.concurrent.locks.ReadWriteLock;
 071import 
java.util.concurrent.locks.ReentrantReadWriteLock;
 072import java.util.function.Function;
-073import 
org.apache.hadoop.conf.Configuration;
-074import org.apache.hadoop.fs.FileStatus;
-075import org.apache.hadoop.fs.FileSystem;
-076import 
org.apache.hadoop.fs.LocatedFileStatus;
-077import org.apache.hadoop.fs.Path;
-078import org.apache.hadoop.hbase.Cell;
-079import 
org.apache.hadoop.hbase.CellBuilderType;
-080import 
org.apache.hadoop.hbase.CellComparator;
-081import 
org.apache.hadoop.hbase.CellComparatorImpl;
-082import 
org.apache.hadoop.hbase.CellScanner;
-083import 
org.apache.hadoop.hbase.CellUtil;
-084import 
org.apache.hadoop.hbase.CompareOperator;
-085import 
org.apache.hadoop.hbase.CompoundConfiguration;
-086import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-087import 
org.apache.hadoop.hbase.DroppedSnapshotException;
-088import 
org.apache.hadoop.hbase.ExtendedCellBuilderFactory;
-089import 
org.apache.hadoop.hbase.HConstants;
-090import 
org.apache.hadoop.hbase.HConstants.OperationStatusCode;
-091import 
org.apache.hadoop.hbase.HDFSBlocksDistribution;
-092import 
org.apache.hadoop.hbase.KeyValue;
-093import 
org.apache.hadoop.hbase.KeyValueUtil;
-094import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-095import 
org.apache.hadoop.hbase.NotServingRegionException;
-096import 
org.apache.hadoop.hbase.PrivateCellUtil;
-097import 
org.apache.hadoop.hbase.RegionTooBusyException;
-098import org.apache.hadoop.hbase.Tag;
-099import org.apache.hadoop.hbase.TagUtil;
-100import 
org.apache.hadoop.hbase.UnknownScannerException;
-101import 
org.apache.hadoop.hbase.client.Append;
-102import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-103import 
org.apache.hadoop.hbase.client.CompactionState;
-104import 
org.apache.hadoop.hbase.client.Delete;
-105import 
org.apache.hadoop.hbase.client.Durability;
-106import 
org.apache.hadoop.hbase.client.Get;
-107import 
org.apache.hadoop.hbase.client.Increment;
-108import 
org.apache.hadoop.hbase.client.IsolationLevel;
-109import 
org.apache.hadoop.hbase.client.Mutation;
-110import 
org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor;
-111import 
org.apache.hadoop.hbase.client.Put;
-112import 
org.apache.hadoop.hbase.client.RegionInfo;
-113import 
org.apache.hadoop.hbase.client.RegionInfoBuilder;
-114import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
-115import 
org.apache.hadoop.hbase.client.Result;
-116import 
org.apache.hadoop.hbase.client.RowMutations;
-117import 
org.apache.hadoop.hbase.client.Scan;
-118import 
org.apache.hadoop.hbase.client.TableDescriptor;
-119import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-120import 
org.apache.hadoop.hbase.conf.ConfigurationManager;
-121import 
org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver;
-122import 
org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
-123import 
org.apache.hadoop.hbase.coprocessor.RegionObserver.MutationType;
-124import 
org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
-125import 
org.apache.hadoop.hbase.exceptions.FailedSanityCheckException;
-126import 
org.apache.hadoop.hbase.exceptions.TimeoutIOException;
-127import 
org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
-128import 
org.apache.hadoop.hbase.filter.ByteArrayComparable;
-129import 
org.apache.hadoop.hbase.filter.FilterWrapper;
-130import 
org.apache.hadoop.hbase.filter.IncompatibleFilterException;
-131import 
org.apache.hadoop.hbase.io.HFileLink;
-132import 
org.apache.hadoop.hbase.io.HeapSize;
-133import 
org.apache.hadoop.hbase.io.TimeRange;
-134import 
org.apache.hadoop.hbase.io.hfile.BlockCache;
-135import 
org.apache.hadoop.hbase.io.hfile.HFile;
-136import 
org.apache.hadoop.hbase.ipc.CallerDisconnectedException;
-137import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
-138import 
org.apache.hadoop.hbase.ipc.RpcCall;
-139import 
org.apache.hadoop.hbase.ipc.RpcServer;
-140import 
org.apache.hadoop.hbase.mob.MobFileCache;
-141import 
org.apache.hadoop.hbase.monitoring.MonitoredTask;
-142import 
org.apache.hadoop.hbase.monitoring.TaskMonitor;
-143import 
org.apache.hadoop.hbase.quotas.RegionServerSpaceQuotaManager;
-144import 
org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl.WriteEntry;
-145import 

[04/51] [partial] hbase-site git commit: Published site at c448604ceb987d113913f0583452b2abce04db0d.

2018-12-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4f8b8424/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.html
index 79cb21b..d8d391b 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.html
@@ -378,1508 +378,1510 @@
 370
 371  @Override
 372  public void returnBlock(HFileBlock 
block) {
-373BlockCache blockCache = 
this.cacheConf.getBlockCache();
-374if (blockCache != null  
block != null) {
-375  BlockCacheKey cacheKey = new 
BlockCacheKey(this.getFileContext().getHFileName(),
-376  block.getOffset(), 
this.isPrimaryReplicaReader(), block.getBlockType());
-377  blockCache.returnBlock(cacheKey, 
block);
-378}
-379  }
-380  /**
-381   * @return the first key in the file. 
May be null if file has no entries. Note
-382   * that this is not the first 
row key, but rather the byte form of the
-383   * first KeyValue.
-384   */
-385  @Override
-386  public OptionalCell 
getFirstKey() {
-387if (dataBlockIndexReader == null) {
-388  throw new 
BlockIndexNotLoadedException();
-389}
-390return dataBlockIndexReader.isEmpty() 
? Optional.empty()
-391: 
Optional.of(dataBlockIndexReader.getRootBlockKey(0));
-392  }
-393
-394  /**
-395   * TODO left from {@link HFile} version 
1: move this to StoreFile after Ryan's
-396   * patch goes in to eliminate {@link 
KeyValue} here.
-397   *
-398   * @return the first row key, or null 
if the file is empty.
-399   */
-400  @Override
-401  public Optionalbyte[] 
getFirstRowKey() {
-402// We have to copy the row part to 
form the row key alone
-403return 
getFirstKey().map(CellUtil::cloneRow);
-404  }
-405
-406  /**
-407   * TODO left from {@link HFile} version 
1: move this to StoreFile after
-408   * Ryan's patch goes in to eliminate 
{@link KeyValue} here.
-409   *
-410   * @return the last row key, or null if 
the file is empty.
-411   */
-412  @Override
-413  public Optionalbyte[] 
getLastRowKey() {
-414// We have to copy the row part to 
form the row key alone
-415return 
getLastKey().map(CellUtil::cloneRow);
-416  }
-417
-418  /** @return number of KV entries in 
this HFile */
-419  @Override
-420  public long getEntries() {
-421return trailer.getEntryCount();
-422  }
-423
-424  /** @return comparator */
-425  @Override
-426  public CellComparator getComparator() 
{
-427return comparator;
-428  }
-429
-430  /** @return compression algorithm */
-431  @Override
-432  public Compression.Algorithm 
getCompressionAlgorithm() {
-433return compressAlgo;
-434  }
-435
-436  /**
-437   * @return the total heap size of data 
and meta block indexes in bytes. Does
-438   * not take into account 
non-root blocks of a multilevel data index.
-439   */
-440  @Override
-441  public long indexSize() {
-442return (dataBlockIndexReader != null 
? dataBlockIndexReader.heapSize() : 0)
-443+ ((metaBlockIndexReader != null) 
? metaBlockIndexReader.heapSize()
-444: 0);
-445  }
-446
-447  @Override
-448  public String getName() {
-449return name;
-450  }
-451
-452  @Override
-453  public HFileBlockIndex.BlockIndexReader 
getDataBlockIndexReader() {
-454return dataBlockIndexReader;
-455  }
-456
-457  @Override
-458  public FixedFileTrailer getTrailer() 
{
-459return trailer;
-460  }
-461
-462  @Override
-463  public boolean isPrimaryReplicaReader() 
{
-464return primaryReplicaReader;
-465  }
-466
-467  @Override
-468  public FileInfo loadFileInfo() throws 
IOException {
-469return fileInfo;
-470  }
-471
-472  /**
-473   * An exception thrown when an 
operation requiring a scanner to be seeked
-474   * is invoked on a scanner that is not 
seeked.
-475   */
-476  @SuppressWarnings("serial")
-477  public static class NotSeekedException 
extends IllegalStateException {
-478public NotSeekedException() {
-479  super("Not seeked to a 
key/value");
-480}
-481  }
-482
-483  protected static class HFileScannerImpl 
implements HFileScanner {
-484private ByteBuff blockBuffer;
-485protected final boolean 
cacheBlocks;
-486protected final boolean pread;
-487protected final boolean 
isCompaction;
-488private int currKeyLen;
-489private int currValueLen;
-490private int currMemstoreTSLen;
-491private long currMemstoreTS;
-492// Updated but never read?
-493protected AtomicInteger blockFetches 
= new AtomicInteger(0);
-494protected final HFile.Reader 
reader;
-495private int currTagsLen;
-496// buffer backed keyonlyKV
-497private ByteBufferKeyOnlyKeyValue 
bufBackedKeyOnlyKv = new ByteBufferKeyOnlyKeyValue();
-498// A pair for reusing 

[04/51] [partial] hbase-site git commit: Published site at 8bf966c8e936dec4d83bcbe85c5aab543f14a0df.

2018-12-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsDir.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsDir.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsDir.html
index a957d31..62f81b6 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsDir.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsDir.html
@@ -142,5192 +142,5186 @@
 134import org.apache.hadoop.hbase.wal.WAL;
 135import 
org.apache.hadoop.hbase.wal.WALFactory;
 136import 
org.apache.hadoop.hbase.wal.WALSplitter;
-137import 
org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-138import 
org.apache.hadoop.hbase.zookeeper.ZKUtil;
-139import 
org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-140import 
org.apache.hadoop.hbase.zookeeper.ZNodePaths;
-141import 
org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
-142import 
org.apache.hadoop.ipc.RemoteException;
-143import 
org.apache.hadoop.security.UserGroupInformation;
-144import 
org.apache.hadoop.util.ReflectionUtils;
-145import org.apache.hadoop.util.Tool;
-146import 
org.apache.hadoop.util.ToolRunner;
-147import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-148import 
org.apache.hbase.thirdparty.com.google.common.collect.Sets;
-149import 
org.apache.yetus.audience.InterfaceAudience;
-150import 
org.apache.yetus.audience.InterfaceStability;
-151import 
org.apache.zookeeper.KeeperException;
-152import org.slf4j.Logger;
-153import org.slf4j.LoggerFactory;
-154
-155import 
org.apache.hbase.thirdparty.com.google.common.base.Joiner;
-156import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-157import 
org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;
-158import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
-159import 
org.apache.hbase.thirdparty.com.google.common.collect.Multimap;
-160import 
org.apache.hbase.thirdparty.com.google.common.collect.Ordering;
-161import 
org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;
-162
-163import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-164import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;
-165
-166/**
-167 * HBaseFsck (hbck) is a tool for 
checking and repairing region consistency and
-168 * table integrity problems in a 
corrupted HBase. This tool was written for hbase-1.x. It does not
-169 * work with hbase-2.x; it can read state 
but is not allowed to change state; i.e. effect 'repair'.
-170 * See hbck2 (HBASE-19121) for a hbck 
tool for hbase2.
-171 *
-172 * p
-173 * Region consistency checks verify that 
hbase:meta, region deployment on region
-174 * servers and the state of data in HDFS 
(.regioninfo files) all are in
-175 * accordance.
-176 * p
-177 * Table integrity checks verify that all 
possible row keys resolve to exactly
-178 * one region of a table.  This means 
there are no individual degenerate
-179 * or backwards regions; no holes between 
regions; and that there are no
-180 * overlapping regions.
-181 * p
-182 * The general repair strategy works in 
two phases:
-183 * ol
-184 * li Repair Table Integrity on 
HDFS. (merge or fabricate regions)
-185 * li Repair Region Consistency 
with hbase:meta and assignments
-186 * /ol
-187 * p
-188 * For table integrity repairs, the 
tables' region directories are scanned
-189 * for .regioninfo files.  Each table's 
integrity is then verified.  If there
-190 * are any orphan regions (regions with 
no .regioninfo files) or holes, new
-191 * regions are fabricated.  Backwards 
regions are sidelined as well as empty
-192 * degenerate (endkey==startkey) regions. 
 If there are any overlapping regions,
-193 * a new region is created and all data 
is merged into the new region.
-194 * p
-195 * Table integrity repairs deal solely 
with HDFS and could potentially be done
-196 * offline -- the hbase region servers or 
master do not need to be running.
-197 * This phase can eventually be used to 
completely reconstruct the hbase:meta table in
-198 * an offline fashion.
-199 * p
-200 * Region consistency requires three 
conditions -- 1) valid .regioninfo file
-201 * present in an HDFS region dir,  2) 
valid row with .regioninfo data in META,
-202 * and 3) a region is deployed only at 
the regionserver that was assigned to
-203 * with proper state in the master.
-204 * p
-205 * Region consistency repairs require 
hbase to be online so that hbck can
-206 * contact the HBase master and region 
servers.  The hbck#connect() method must
-207 * first be called successfully.  Much of 
the region consistency information
-208 * is transient and less risky to 
repair.
-209 * p
-210 * If hbck is run from the command line, 
there are a handful of arguments that
-211 * 

[04/51] [partial] hbase-site git commit: Published site at 1acbd36c903b048141866b143507bfce124a5c5f.

2018-11-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5299e667/devapidocs/src-html/org/apache/hadoop/hbase/client/TableDescriptorBuilder.ModifyableTableDescriptor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/TableDescriptorBuilder.ModifyableTableDescriptor.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/TableDescriptorBuilder.ModifyableTableDescriptor.html
index 16c2238..a626878 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/TableDescriptorBuilder.ModifyableTableDescriptor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/TableDescriptorBuilder.ModifyableTableDescriptor.html
@@ -228,1032 +228,1032 @@
 220RESERVED_KEYWORDS.add(IS_META_KEY);
 221  }
 222
-223  @InterfaceAudience.Private
-224  public final static String 
NAMESPACE_FAMILY_INFO = "info";
-225  @InterfaceAudience.Private
-226  public final static byte[] 
NAMESPACE_FAMILY_INFO_BYTES = Bytes.toBytes(NAMESPACE_FAMILY_INFO);
+223  /**
+224   * @deprecated namespace table has been 
folded into the ns family in meta table, do not use this
+225   * any more.
+226   */
 227  @InterfaceAudience.Private
-228  public final static byte[] 
NAMESPACE_COL_DESC_BYTES = Bytes.toBytes("d");
-229
-230  /**
-231   * pre
-232   * Pattern that matches a coprocessor 
specification. Form is:
-233   * {@code coprocessor jar file 
location '|' class name ['|' priority ['|' 
arguments]]}
-234   * where arguments are {@code 
KEY '=' VALUE [,...]}
-235   * For example: {@code 
hdfs:///foo.jar|com.foo.FooRegionObserver|1001|arg1=1,arg2=2}
-236   * /pre
-237   */
-238  private static final Pattern 
CP_HTD_ATTR_VALUE_PATTERN =
-239
Pattern.compile("(^[^\\|]*)\\|([^\\|]+)\\|[\\s]*([\\d]*)[\\s]*(\\|.*)?$");
-240
-241  private static final String 
CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN = "[^=,]+";
-242  private static final String 
CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN = "[^,]+";
-243  private static final Pattern 
CP_HTD_ATTR_VALUE_PARAM_PATTERN = Pattern.compile(
-244"(" + 
CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN + ")=(" +
-245  
CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN + "),?");
-246  private static final Pattern 
CP_HTD_ATTR_KEY_PATTERN =
-247
Pattern.compile("^coprocessor\\$([0-9]+)$", Pattern.CASE_INSENSITIVE);
-248  /**
-249   * Table descriptor for namespace 
table
-250   */
-251  // TODO We used to set CacheDataInL1 
for NS table. When we have BucketCache in file mode, now the
-252  // NS data goes to File mode BC only. 
Test how that affect the system. If too much, we have to
-253  // rethink about adding back the 
setCacheDataInL1 for NS table.
-254  public static final TableDescriptor 
NAMESPACE_TABLEDESC
-255= 
TableDescriptorBuilder.newBuilder(TableName.NAMESPACE_TABLE_NAME)
-256  
.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(NAMESPACE_FAMILY_INFO_BYTES)
-257// Ten is arbitrary number.  Keep 
versions to help debugging.
-258.setMaxVersions(10)
-259.setInMemory(true)
-260.setBlocksize(8 * 1024)
-261
.setScope(HConstants.REPLICATION_SCOPE_LOCAL)
-262.build())
-263  .build();
-264  private final ModifyableTableDescriptor 
desc;
+228  @Deprecated
+229  public final static String 
NAMESPACE_FAMILY_INFO = "info";
+230
+231  /**
+232   * @deprecated namespace table has been 
folded into the ns family in meta table, do not use this
+233   * any more.
+234   */
+235  @InterfaceAudience.Private
+236  @Deprecated
+237  public final static byte[] 
NAMESPACE_FAMILY_INFO_BYTES = Bytes.toBytes(NAMESPACE_FAMILY_INFO);
+238
+239  /**
+240   * @deprecated namespace table has been 
folded into the ns family in meta table, do not use this
+241   * any more.
+242   */
+243  @InterfaceAudience.Private
+244  @Deprecated
+245  public final static byte[] 
NAMESPACE_COL_DESC_BYTES = Bytes.toBytes("d");
+246
+247  /**
+248   * pre
+249   * Pattern that matches a coprocessor 
specification. Form is:
+250   * {@code coprocessor jar file 
location '|' class name ['|' priority ['|' 
arguments]]}
+251   * where arguments are {@code 
KEY '=' VALUE [,...]}
+252   * For example: {@code 
hdfs:///foo.jar|com.foo.FooRegionObserver|1001|arg1=1,arg2=2}
+253   * /pre
+254   */
+255  private static final Pattern 
CP_HTD_ATTR_VALUE_PATTERN =
+256
Pattern.compile("(^[^\\|]*)\\|([^\\|]+)\\|[\\s]*([\\d]*)[\\s]*(\\|.*)?$");
+257
+258  private static final String 
CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN = "[^=,]+";
+259  private static final String 
CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN = "[^,]+";
+260  private static final Pattern 
CP_HTD_ATTR_VALUE_PARAM_PATTERN = Pattern.compile(
+261"(" + 
CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN + ")=(" +
+262  
CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN + "),?");
+263  private static final Pattern 
CP_HTD_ATTR_KEY_PATTERN =
+264
Pattern.compile("^coprocessor\\$([0-9]+)$", Pattern.CASE_INSENSITIVE);
 265
 266  /**

[04/51] [partial] hbase-site git commit: Published site at 130057f13774f6b213cdb06952c805a29d59396e.

2018-11-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/68eae623/testdevapidocs/src-html/org/apache/hadoop/hbase/security/access/TestAccessController.BulkLoadAccessTestAction.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/security/access/TestAccessController.BulkLoadAccessTestAction.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/security/access/TestAccessController.BulkLoadAccessTestAction.html
index 5062e9b..23b4be7 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/security/access/TestAccessController.BulkLoadAccessTestAction.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/security/access/TestAccessController.BulkLoadAccessTestAction.html
@@ -282,7 +282,7 @@
 274  public static void tearDownAfterClass() 
throws Exception {
 275cleanUp();
 276TEST_UTIL.shutdownMiniCluster();
-277int total = 
TableAuthManager.getTotalRefCount();
+277int total = 
AuthManager.getTotalRefCount();
 278assertTrue("Unexpected reference 
count: " + total, total == 0);
 279  }
 280
@@ -1642,12 +1642,12 @@
 1634  }
 1635
 1636  UserPermission ownerperm =
-1637  new 
UserPermission(Bytes.toBytes(USER_OWNER.getName()), tableName, null, 
Action.values());
+1637  new 
UserPermission(USER_OWNER.getName(), tableName, Action.values());
 1638  assertTrue("Owner should have all 
permissions on table",
 1639
hasFoundUserPermission(ownerperm, perms));
 1640
 1641  User user = 
User.createUserForTesting(TEST_UTIL.getConfiguration(), "user", new 
String[0]);
-1642  byte[] userName = 
Bytes.toBytes(user.getShortName());
+1642  String userName = 
user.getShortName();
 1643
 1644  UserPermission up =
 1645  new UserPermission(userName, 
tableName, family1, qualifier, Permission.Action.READ);
@@ -1733,7 +1733,7 @@
 1725  }
 1726
 1727  UserPermission newOwnerperm =
-1728  new 
UserPermission(Bytes.toBytes(newOwner.getName()), tableName, null, 
Action.values());
+1728  new 
UserPermission(newOwner.getName(), tableName, Action.values());
 1729  assertTrue("New owner should have 
all permissions on table",
 1730
hasFoundUserPermission(newOwnerperm, perms));
 1731} finally {
@@ -1757,1888 +1757,1898 @@
 1749
 1750CollectionString superUsers 
= Superusers.getSuperUsers();
 1751ListUserPermission 
adminPerms = new ArrayList(superUsers.size() + 1);
-1752adminPerms.add(new 
UserPermission(Bytes.toBytes(USER_ADMIN.getShortName()),
-1753  AccessControlLists.ACL_TABLE_NAME, 
null, null, Bytes.toBytes("ACRW")));
-1754
-1755for(String user: superUsers) {
-1756  adminPerms.add(new 
UserPermission(Bytes.toBytes(user), AccessControlLists.ACL_TABLE_NAME,
-1757  null, null, 
Action.values()));
-1758}
-1759assertTrue("Only super users, global 
users and user admin has permission on table hbase:acl " +
-1760"per setup", perms.size() == 5 + 
superUsers.size() 
-1761
hasFoundUserPermission(adminPerms, perms));
-1762  }
-1763
-1764  /** global operations */
-1765  private void 
verifyGlobal(AccessTestAction action) throws Exception {
-1766verifyAllowed(action, SUPERUSER);
-1767
-1768verifyDenied(action, USER_CREATE, 
USER_RW, USER_NONE, USER_RO);
-1769  }
-1770
-1771  @Test
-1772  public void testCheckPermissions() 
throws Exception {
-1773// 
--
-1774// test global permissions
-1775AccessTestAction globalAdmin = new 
AccessTestAction() {
-1776  @Override
-1777  public Void run() throws Exception 
{
-1778checkGlobalPerms(TEST_UTIL, 
Permission.Action.ADMIN);
-1779return null;
-1780  }
-1781};
-1782// verify that only superuser can 
admin
-1783verifyGlobal(globalAdmin);
-1784
-1785// 
--
-1786// test multiple permissions
-1787AccessTestAction globalReadWrite = 
new AccessTestAction() {
-1788  @Override
-1789  public Void run() throws Exception 
{
-1790checkGlobalPerms(TEST_UTIL, 
Permission.Action.READ, Permission.Action.WRITE);
-1791return null;
-1792  }
-1793};
+1752adminPerms.add(new 
UserPermission(USER_ADMIN.getShortName(), Bytes.toBytes("ACRW")));
+1753for(String user: superUsers) {
+1754  // Global permission
+1755  adminPerms.add(new 
UserPermission(user, Action.values()));
+1756}
+1757assertTrue("Only super users, global 
users and user admin has permission on table hbase:acl " +
+1758"per setup", perms.size() == 5 + 
superUsers.size() 
+1759
hasFoundUserPermission(adminPerms, perms));
+1760  }
+1761
+1762  /** global operations */
+1763  private void 
verifyGlobal(AccessTestAction action) throws Exception {
+1764verifyAllowed(action, SUPERUSER);
+1765
+1766verifyDenied(action, USER_CREATE, 
USER_RW, USER_NONE, USER_RO);
+1767  }
+1768
+1769  @Test

[04/51] [partial] hbase-site git commit: Published site at d5e4faacc354c1bc4d93efa71ca97ee3a056123e.

2018-10-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b5e107c3/testdevapidocs/src-html/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.TestPeerProcedure.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.TestPeerProcedure.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.TestPeerProcedure.html
index c9e0e55..f1b9105 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.TestPeerProcedure.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.TestPeerProcedure.html
@@ -37,157 +37,157 @@
 029import java.util.Arrays;
 030import java.util.List;
 031import 
org.apache.hadoop.hbase.HBaseClassTestRule;
-032import 
org.apache.hadoop.hbase.HRegionInfo;
-033import 
org.apache.hadoop.hbase.ServerName;
-034import 
org.apache.hadoop.hbase.TableName;
-035import 
org.apache.hadoop.hbase.client.RegionInfo;
-036import 
org.apache.hadoop.hbase.client.RegionInfoBuilder;
-037import 
org.apache.hadoop.hbase.master.locking.LockProcedure;
-038import 
org.apache.hadoop.hbase.master.procedure.TableProcedureInterface.TableOperationType;
-039import 
org.apache.hadoop.hbase.procedure2.LockType;
-040import 
org.apache.hadoop.hbase.procedure2.LockedResource;
-041import 
org.apache.hadoop.hbase.procedure2.LockedResourceType;
-042import 
org.apache.hadoop.hbase.procedure2.Procedure;
-043import 
org.apache.hadoop.hbase.procedure2.ProcedureEvent;
-044import 
org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility.TestProcedure;
-045import 
org.apache.hadoop.hbase.testclassification.MasterTests;
-046import 
org.apache.hadoop.hbase.testclassification.SmallTests;
-047import 
org.apache.hadoop.hbase.util.Bytes;
-048import org.junit.After;
-049import org.junit.Before;
-050import org.junit.ClassRule;
-051import org.junit.Rule;
-052import org.junit.Test;
-053import 
org.junit.experimental.categories.Category;
-054import org.junit.rules.TestName;
-055import org.slf4j.Logger;
-056import org.slf4j.LoggerFactory;
-057
-058@Category({MasterTests.class, 
SmallTests.class})
-059public class TestMasterProcedureScheduler 
{
-060
-061  @ClassRule
-062  public static final HBaseClassTestRule 
CLASS_RULE =
-063  
HBaseClassTestRule.forClass(TestMasterProcedureScheduler.class);
-064
-065  private static final Logger LOG = 
LoggerFactory.getLogger(TestMasterProcedureScheduler.class);
-066
-067  private MasterProcedureScheduler 
queue;
-068
-069  @Rule
-070  public TestName name = new 
TestName();
-071
-072  @Before
-073  public void setUp() throws IOException 
{
-074queue = new 
MasterProcedureScheduler();
-075queue.start();
-076  }
-077
-078  @After
-079  public void tearDown() throws 
IOException {
-080assertEquals("proc-queue expected to 
be empty", 0, queue.size());
-081queue.stop();
-082queue.clear();
-083  }
-084
-085  /**
-086   * Verify simple 
create/insert/fetch/delete of the table queue.
-087   */
-088  @Test
-089  public void testSimpleTableOpsQueues() 
throws Exception {
-090final int NUM_TABLES = 10;
-091final int NUM_ITEMS = 10;
-092
-093int count = 0;
-094for (int i = 1; i = NUM_TABLES; 
++i) {
-095  TableName tableName = 
TableName.valueOf(String.format("test-%04d", i));
-096  // insert items
-097  for (int j = 1; j = NUM_ITEMS; 
++j) {
-098queue.addBack(new 
TestTableProcedure(i * 1000 + j, tableName,
-099  
TableProcedureInterface.TableOperationType.EDIT));
-100assertEquals(++count, 
queue.size());
-101  }
-102}
-103assertEquals(NUM_TABLES * NUM_ITEMS, 
queue.size());
-104
-105for (int j = 1; j = NUM_ITEMS; 
++j) {
-106  for (int i = 1; i = NUM_TABLES; 
++i) {
-107Procedure proc = queue.poll();
-108assertTrue(proc != null);
-109TableName tableName = 
((TestTableProcedure)proc).getTableName();
-110
queue.waitTableExclusiveLock(proc, tableName);
-111
queue.wakeTableExclusiveLock(proc, tableName);
-112queue.completionCleanup(proc);
-113assertEquals(--count, 
queue.size());
-114assertEquals(i * 1000 + j, 
proc.getProcId());
-115  }
-116}
-117assertEquals(0, queue.size());
-118
-119for (int i = 1; i = NUM_TABLES; 
++i) {
-120  final TableName tableName = 
TableName.valueOf(String.format("test-%04d", i));
-121  final TestTableProcedure dummyProc 
= new TestTableProcedure(100, tableName,
-122
TableProcedureInterface.TableOperationType.DELETE);
-123  // complete the table deletion
-124  
assertTrue(queue.markTableAsDeleted(tableName, dummyProc));
-125}
-126  }
-127
-128  /**
-129   * Check that the table queue is not 
deletable until every procedure
-130   * in-progress is completed (this is a 
special case for write-locks).
-131   */

[04/51] [partial] hbase-site git commit: Published site at 3fe8649b2c9ba1271c25e8f476548907e4c7a90d.

2018-10-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8f09a71d/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.html
index 9b964f6..98ef11a 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.html
@@ -105,7 +105,7 @@
 097 * will first be initialized to the 
oldest file's tracker(which is stored in the trailer), using the
 098 * method {@link 
ProcedureStoreTracker#resetTo(ProcedureStoreTracker, boolean)}, and then merge 
it
 099 * with the tracker of every newer wal 
files, using the
-100 * {@link 
ProcedureStoreTracker#setDeletedIfModifiedInBoth(ProcedureStoreTracker, 
boolean)}.
+100 * {@link 
ProcedureStoreTracker#setDeletedIfModifiedInBoth(ProcedureStoreTracker)}.
 101 * If we find out
 102 * that all the modified procedures for 
the oldest wal file are modified or deleted in newer wal
 103 * files, then we can delete it. This is 
because that, every time we call
@@ -1181,244 +1181,243 @@
 1173}
 1174
 1175// compute the holding tracker.
-1176//  - the first WAL is used for the 
'updates'
-1177//  - the global tracker is passed 
in first to decide which procedures are not
-1178//exist anymore, so we can mark 
them as deleted in holdingCleanupTracker.
-1179//Only global tracker have the 
whole picture here.
-1180//  - the other WALs are scanned to 
remove procs already updated in a newer wal.
-1181//If it is updated in a newer 
wal, we can mark it as delelted in holdingCleanupTracker
-1182//But, we can not delete it if 
it was shown deleted in the newer wal, as said
-1183//above.
-1184// TODO: exit early if 
holdingCleanupTracker.isEmpty()
-1185
holdingCleanupTracker.resetTo(logs.getFirst().getTracker(), true);
-1186//Passing in the global tracker, we 
can delete the procedures not in the global
-1187//tracker, because they are deleted 
in the later logs
-1188
holdingCleanupTracker.setDeletedIfModifiedInBoth(storeTracker, true);
-1189for (int i = 1, size = logs.size() - 
1; i  size; ++i) {
-1190  // Set deleteIfNotExists to false 
since a single log's tracker is passed in.
-1191  // Since a specific procedure may 
not show up in the log at all(not executed or
-1192  // updated during the time), we 
can not delete the procedure just because this log
-1193  // don't have the info of the 
procedure. We can delete the procedure only if
-1194  // in this log's tracker, it was 
cleanly showed that the procedure is modified or deleted
-1195  // in the corresponding 
BitSetNode.
-1196  
holdingCleanupTracker.setDeletedIfModifiedInBoth(logs.get(i).getTracker(), 
false);
-1197}
-1198  }
-1199
-1200  /**
-1201   * Remove all logs with logId = 
{@code lastLogId}.
-1202   */
-1203  private void removeAllLogs(long 
lastLogId, String why) {
-1204if (logs.size() = 1) {
-1205  return;
-1206}
-1207
-1208LOG.info("Remove all state logs with 
ID less than {}, since {}", lastLogId, why);
-1209
-1210boolean removed = false;
-1211while (logs.size()  1) {
-1212  ProcedureWALFile log = 
logs.getFirst();
-1213  if (lastLogId  log.getLogId()) 
{
-1214break;
-1215  }
-1216  removeLogFile(log, 
walArchiveDir);
-1217  removed = true;
-1218}
-1219
-1220if (removed) {
-1221  buildHoldingCleanupTracker();
-1222}
-1223  }
-1224
-1225  private boolean removeLogFile(final 
ProcedureWALFile log, final Path walArchiveDir) {
-1226try {
-1227  LOG.trace("Removing log={}", 
log);
-1228  log.removeFile(walArchiveDir);
-1229  logs.remove(log);
-1230  LOG.debug("Removed log={}, 
activeLogs={}", log, logs);
-1231  assert logs.size()  0 : 
"expected at least one log";
-1232} catch (IOException e) {
-1233  LOG.error("Unable to remove log: " 
+ log, e);
-1234  return false;
-1235}
-1236return true;
-1237  }
-1238
-1239  // 
==
-1240  //  FileSystem Log Files helpers
-1241  // 
==
-1242  public Path getWALDir() {
-1243return this.walDir;
-1244  }
-1245
-1246  @VisibleForTesting
-1247  Path getWalArchiveDir() {
-1248return this.walArchiveDir;
-1249  }
-1250
-1251  public FileSystem getFileSystem() {
-1252return this.fs;
-1253  }
-1254
-1255  protected Path getLogFilePath(final 
long logId) throws IOException {
-1256return new Path(walDir, 
String.format(LOG_PREFIX + "%020d.log", logId));
-1257  }
-1258
-1259  private static long 

[04/51] [partial] hbase-site git commit: Published site at 7adf590106826b9e4432cfeee06acdc0ccff8c6e.

2018-10-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/425db230/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestHRegion.HStoreForTesting.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestHRegion.HStoreForTesting.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestHRegion.HStoreForTesting.html
index ed3db7a..156dabb 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestHRegion.HStoreForTesting.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestHRegion.HStoreForTesting.html
@@ -5542,785 +5542,825 @@
 5534  }
 5535
 5536  @Test
-5537  public void testWriteRequestsCounter() 
throws IOException {
-5538byte[] fam = 
Bytes.toBytes("info");
-5539byte[][] families = { fam };
-5540this.region = initHRegion(tableName, 
method, CONF, families);
+5537  public void 
testReverseScanWhenPutCellsAfterOpenReverseScan() throws Exception {
+5538byte[] cf1 = Bytes.toBytes("CF1");
+5539byte[][] families = { cf1 };
+5540byte[] col = Bytes.toBytes("C");
 5541
-5542Assert.assertEquals(0L, 
region.getWriteRequestsCount());
-5543
-5544Put put = new Put(row);
-5545put.addColumn(fam, fam, fam);
-5546
-5547Assert.assertEquals(0L, 
region.getWriteRequestsCount());
-5548region.put(put);
-5549Assert.assertEquals(1L, 
region.getWriteRequestsCount());
-5550region.put(put);
-5551Assert.assertEquals(2L, 
region.getWriteRequestsCount());
-5552region.put(put);
-5553Assert.assertEquals(3L, 
region.getWriteRequestsCount());
-5554
-region.delete(new Delete(row));
-5556Assert.assertEquals(4L, 
region.getWriteRequestsCount());
-5557  }
-5558
-5559  @Test
-5560  public void 
testOpenRegionWrittenToWAL() throws Exception {
-5561final ServerName serverName = 
ServerName.valueOf(name.getMethodName(), 100, 42);
-5562final RegionServerServices rss = 
spy(TEST_UTIL.createMockRegionServerService(serverName));
-5563
-5564HTableDescriptor htd = new 
HTableDescriptor(TableName.valueOf(name.getMethodName()));
-5565htd.addFamily(new 
HColumnDescriptor(fam1));
-5566htd.addFamily(new 
HColumnDescriptor(fam2));
-5567
-5568HRegionInfo hri = new 
HRegionInfo(htd.getTableName(),
-5569  HConstants.EMPTY_BYTE_ARRAY, 
HConstants.EMPTY_BYTE_ARRAY);
-5570
-5571// open the region w/o rss and wal 
and flush some files
-5572region =
-5573 
HBaseTestingUtility.createRegionAndWAL(hri, TEST_UTIL.getDataTestDir(), 
TEST_UTIL
-5574 .getConfiguration(), 
htd);
-5575assertNotNull(region);
-5576
-5577// create a file in fam1 for the 
region before opening in OpenRegionHandler
-5578region.put(new 
Put(Bytes.toBytes("a")).addColumn(fam1, fam1, fam1));
-5579region.flush(true);
-5580
HBaseTestingUtility.closeRegionAndWAL(region);
+5542HBaseConfiguration conf = new 
HBaseConfiguration();
+5543this.region = initHRegion(tableName, 
method, conf, families);
+5544
+5545Put put = new 
Put(Bytes.toBytes("16"));
+5546put.addColumn(cf1, col, 
Bytes.toBytes("val"));
+5547region.put(put);
+5548Put put2 = new 
Put(Bytes.toBytes("15"));
+5549put2.addColumn(cf1, col, 
Bytes.toBytes("val"));
+5550region.put(put2);
+5551
+5552// Create a reverse scan
+5553Scan scan = new 
Scan(Bytes.toBytes("16"));
+5554scan.setReversed(true);
+RegionScannerImpl scanner = 
region.getScanner(scan);
+5556
+5557// Put a lot of cells that have 
sequenceIDs grater than the readPt of the reverse scan
+5558for (int i = 10; i  20; 
i++) {
+5559  Put p = new Put(Bytes.toBytes("" + 
i));
+5560  p.addColumn(cf1, col, 
Bytes.toBytes("" + i));
+5561  region.put(p);
+5562}
+5563ListCell currRow = new 
ArrayList();
+5564boolean hasNext;
+5565do {
+5566  hasNext = scanner.next(currRow);
+5567} while (hasNext);
+5568
+5569assertEquals(2, currRow.size());
+5570assertEquals("16", 
Bytes.toString(currRow.get(0).getRowArray(),
+5571  currRow.get(0).getRowOffset(), 
currRow.get(0).getRowLength()));
+5572assertEquals("15", 
Bytes.toString(currRow.get(1).getRowArray(),
+5573  currRow.get(1).getRowOffset(), 
currRow.get(1).getRowLength()));
+5574  }
+5575
+5576  @Test
+5577  public void testWriteRequestsCounter() 
throws IOException {
+5578byte[] fam = 
Bytes.toBytes("info");
+5579byte[][] families = { fam };
+5580this.region = initHRegion(tableName, 
method, CONF, families);
 5581
-5582ArgumentCaptorWALEdit 
editCaptor = ArgumentCaptor.forClass(WALEdit.class);
+5582Assert.assertEquals(0L, 
region.getWriteRequestsCount());
 5583
-5584// capture append() calls
-5585WAL wal = mockWAL();
-5586when(rss.getWAL((HRegionInfo) 
any())).thenReturn(wal);
-5587
-5588region = HRegion.openHRegion(hri, 
htd, rss.getWAL(hri),
-5589  

[04/51] [partial] hbase-site git commit: Published site at 5fbb227deb365fe812d433fe39b85ac4b0ddee20.

2018-10-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9ebe686/devapidocs/org/apache/hadoop/hbase/util/class-use/ByteBufferUtils.Comparer.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/util/class-use/ByteBufferUtils.Comparer.html
 
b/devapidocs/org/apache/hadoop/hbase/util/class-use/ByteBufferUtils.Comparer.html
new file mode 100644
index 000..d3d202d
--- /dev/null
+++ 
b/devapidocs/org/apache/hadoop/hbase/util/class-use/ByteBufferUtils.Comparer.html
@@ -0,0 +1,195 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+Uses of Class org.apache.hadoop.hbase.util.ByteBufferUtils.Comparer 
(Apache HBase 3.0.0-SNAPSHOT API)
+
+
+
+
+
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev
+Next
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+
+
+
+Uses of 
Classorg.apache.hadoop.hbase.util.ByteBufferUtils.Comparer
+
+
+
+
+
+Packages that use ByteBufferUtils.Comparer
+
+Package
+Description
+
+
+
+org.apache.hadoop.hbase.util
+
+
+
+
+
+
+
+
+
+
+Uses of ByteBufferUtils.Comparer in 
org.apache.hadoop.hbase.util
+
+Subclasses of ByteBufferUtils.Comparer in 
org.apache.hadoop.hbase.util
+
+Modifier and Type
+Class and Description
+
+
+
+(package private) static class
+ByteBufferUtils.ComparerHolder.PureJavaComparer
+
+
+(package private) static class
+ByteBufferUtils.ComparerHolder.UnsafeComparer
+
+
+
+
+Fields in org.apache.hadoop.hbase.util
 declared as ByteBufferUtils.Comparer
+
+Modifier and Type
+Field and Description
+
+
+
+(package private) static ByteBufferUtils.Comparer
+ByteBufferUtils.ComparerHolder.BEST_COMPARER
+
+
+
+
+Methods in org.apache.hadoop.hbase.util
 that return ByteBufferUtils.Comparer
+
+Modifier and Type
+Method and Description
+
+
+
+(package private) static ByteBufferUtils.Comparer
+ByteBufferUtils.ComparerHolder.getBestComparer()
+
+
+
+
+
+
+
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev
+Next
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+
+
+Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+
+

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9ebe686/devapidocs/org/apache/hadoop/hbase/util/class-use/ByteBufferUtils.ComparerHolder.PureJavaComparer.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/util/class-use/ByteBufferUtils.ComparerHolder.PureJavaComparer.html
 
b/devapidocs/org/apache/hadoop/hbase/util/class-use/ByteBufferUtils.ComparerHolder.PureJavaComparer.html
new file mode 100644
index 000..6e8eb4d
--- /dev/null
+++ 
b/devapidocs/org/apache/hadoop/hbase/util/class-use/ByteBufferUtils.ComparerHolder.PureJavaComparer.html
@@ -0,0 +1,165 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+Uses of Class 
org.apache.hadoop.hbase.util.ByteBufferUtils.ComparerHolder.PureJavaComparer 
(Apache HBase 3.0.0-SNAPSHOT API)
+
+
+
+
+
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev
+Next
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+
+
+
+Uses of 
Classorg.apache.hadoop.hbase.util.ByteBufferUtils.ComparerHolder.PureJavaComparer
+
+
+
+
+
+Packages that use ByteBufferUtils.ComparerHolder.PureJavaComparer
+
+Package
+Description
+
+
+
+org.apache.hadoop.hbase.util
+
+
+
+
+
+
+
+
+
+
+Uses of ByteBufferUtils.ComparerHolder.PureJavaComparer
 in org.apache.hadoop.hbase.util
+
+Fields in org.apache.hadoop.hbase.util
 declared as ByteBufferUtils.ComparerHolder.PureJavaComparer
+
+Modifier 

[04/51] [partial] hbase-site git commit: Published site at 821e4d7de2d576189f4288d1c2acf9e9a9471f5c.

2018-10-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/323b17d9/testdevapidocs/src-html/org/apache/hadoop/hbase/master/procedure/TestModifyTableProcedure.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/procedure/TestModifyTableProcedure.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/procedure/TestModifyTableProcedure.html
index ad8e4a5..ec3e399 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/procedure/TestModifyTableProcedure.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/procedure/TestModifyTableProcedure.html
@@ -30,385 +30,383 @@
 022import static 
org.junit.Assert.assertTrue;
 023
 024import java.io.IOException;
-025
-026import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-027import 
org.apache.hadoop.hbase.HBaseClassTestRule;
-028import 
org.apache.hadoop.hbase.HColumnDescriptor;
-029import 
org.apache.hadoop.hbase.HTableDescriptor;
-030import 
org.apache.hadoop.hbase.InvalidFamilyOperationException;
-031import 
org.apache.hadoop.hbase.TableName;
-032import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
-033import 
org.apache.hadoop.hbase.client.PerClientRandomNonceGenerator;
-034import 
org.apache.hadoop.hbase.client.RegionInfo;
-035import 
org.apache.hadoop.hbase.client.TableDescriptor;
-036import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-037import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureTestingUtility.StepHook;
-038import 
org.apache.hadoop.hbase.procedure2.Procedure;
-039import 
org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
-040import 
org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
-041import 
org.apache.hadoop.hbase.testclassification.MasterTests;
-042import 
org.apache.hadoop.hbase.testclassification.MediumTests;
-043import 
org.apache.hadoop.hbase.util.Bytes;
-044import 
org.apache.hadoop.hbase.util.NonceKey;
-045import org.junit.Assert;
-046import org.junit.ClassRule;
-047import org.junit.Rule;
-048import org.junit.Test;
-049import 
org.junit.experimental.categories.Category;
-050import org.junit.rules.TestName;
-051
-052@Category({MasterTests.class, 
MediumTests.class})
-053public class TestModifyTableProcedure 
extends TestTableDDLProcedureBase {
-054
-055  @ClassRule
-056  public static final HBaseClassTestRule 
CLASS_RULE =
-057  
HBaseClassTestRule.forClass(TestModifyTableProcedure.class);
-058
-059  @Rule public TestName name = new 
TestName();
-060
-061  @Test
-062  public void testModifyTable() throws 
Exception {
-063final TableName tableName = 
TableName.valueOf(name.getMethodName());
-064final 
ProcedureExecutorMasterProcedureEnv procExec = 
getMasterProcedureExecutor();
-065
-066
MasterProcedureTestingUtility.createTable(procExec, tableName, null, "cf");
-067
UTIL.getAdmin().disableTable(tableName);
-068
-069// Modify the table descriptor
-070HTableDescriptor htd = new 
HTableDescriptor(UTIL.getAdmin().getTableDescriptor(tableName));
-071
-072// Test 1: Modify 1 property
-073long newMaxFileSize = 
htd.getMaxFileSize() * 2;
-074htd.setMaxFileSize(newMaxFileSize);
-075htd.setRegionReplication(3);
-076
-077long procId1 = 
ProcedureTestingUtility.submitAndWait(
-078procExec, new 
ModifyTableProcedure(procExec.getEnvironment(), htd));
-079
ProcedureTestingUtility.assertProcNotFailed(procExec.getResult(procId1));
-080
-081HTableDescriptor currentHtd = 
UTIL.getAdmin().getTableDescriptor(tableName);
-082assertEquals(newMaxFileSize, 
currentHtd.getMaxFileSize());
-083
-084// Test 2: Modify multiple 
properties
-085boolean newReadOnlyOption = 
htd.isReadOnly() ? false : true;
-086long newMemStoreFlushSize = 
htd.getMemStoreFlushSize() * 2;
-087htd.setReadOnly(newReadOnlyOption);
-088
htd.setMemStoreFlushSize(newMemStoreFlushSize);
-089
-090long procId2 = 
ProcedureTestingUtility.submitAndWait(
-091procExec, new 
ModifyTableProcedure(procExec.getEnvironment(), htd));
-092
ProcedureTestingUtility.assertProcNotFailed(procExec.getResult(procId2));
-093
-094currentHtd = 
UTIL.getAdmin().getTableDescriptor(tableName);
-095assertEquals(newReadOnlyOption, 
currentHtd.isReadOnly());
-096assertEquals(newMemStoreFlushSize, 
currentHtd.getMemStoreFlushSize());
-097  }
-098
-099  @Test
-100  public void testModifyTableAddCF() 
throws Exception {
-101final TableName tableName = 
TableName.valueOf(name.getMethodName());
-102final 
ProcedureExecutorMasterProcedureEnv procExec = 
getMasterProcedureExecutor();
-103
-104
MasterProcedureTestingUtility.createTable(procExec, tableName, null, "cf1");
-105HTableDescriptor currentHtd = 
UTIL.getAdmin().getTableDescriptor(tableName);
-106assertEquals(1, 
currentHtd.getFamiliesKeys().size());
-107
-108// Test 1: Modify the table 
descriptor online
-109String cf2 = "cf2";
-110HTableDescriptor htd = 

[04/51] [partial] hbase-site git commit: Published site at fa5fa6ecdd071b72b58971058ff3ab9d28c3e709.

2018-10-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d1341859/license.html
--
diff --git a/license.html b/license.html
index 22567b5..252581d 100644
--- a/license.html
+++ b/license.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Project Licenses
 
@@ -491,7 +491,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-10-11
+  Last Published: 
2018-10-12
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d1341859/mail-lists.html
--
diff --git a/mail-lists.html b/mail-lists.html
index 2fee6c0..34eaf86 100644
--- a/mail-lists.html
+++ b/mail-lists.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Project Mailing Lists
 
@@ -341,7 +341,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-10-11
+  Last Published: 
2018-10-12
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d1341859/metrics.html
--
diff --git a/metrics.html b/metrics.html
index 0938acd..c44190a 100644
--- a/metrics.html
+++ b/metrics.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase   
   Apache HBase (TM) Metrics
@@ -459,7 +459,7 @@ export HBASE_REGIONSERVER_OPTS=$HBASE_JMX_OPTS 
-Dcom.sun.management.jmxrem
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-10-11
+  Last Published: 
2018-10-12
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d1341859/old_news.html
--
diff --git a/old_news.html b/old_news.html
index 45acdd2..ce46430 100644
--- a/old_news.html
+++ b/old_news.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  
   Old Apache HBase (TM) News
@@ -440,7 +440,7 @@ under the License. -->
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-10-11
+  Last Published: 
2018-10-12
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d1341859/plugin-management.html
--
diff --git a/plugin-management.html b/plugin-management.html
index a745e35..a229b91 100644
--- a/plugin-management.html
+++ b/plugin-management.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Project Plugin Management
 
@@ -440,7 +440,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-10-11
+  Last Published: 
2018-10-12
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d1341859/plugins.html
--
diff --git a/plugins.html b/plugins.html
index f87797f..290350a 100644
--- a/plugins.html
+++ b/plugins.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Project Plugins
 
@@ -375,7 +375,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-10-11
+  Last Published: 
2018-10-12
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d1341859/poweredbyhbase.html
--
diff --git a/poweredbyhbase.html b/poweredbyhbase.html
index 6016ce6..a614e2a 100644
--- a/poweredbyhbase.html
+++ b/poweredbyhbase.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Powered By Apache HBase™
 
@@ -769,7 +769,7 @@ under the License. -->
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-10-11
+  Last Published: 
2018-10-12
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d1341859/project-info.html
--
diff --git a/project-info.html b/project-info.html
index 

[04/51] [partial] hbase-site git commit: Published site at 6bc7089f9e0793efc9bdd46a84f5ccd9bc4579ad.

2018-09-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/419d0338/testdevapidocs/org/apache/hadoop/hbase/procedure2/class-use/TestProcedureSkipPersistence.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/procedure2/class-use/TestProcedureSkipPersistence.html
 
b/testdevapidocs/org/apache/hadoop/hbase/procedure2/class-use/TestProcedureSkipPersistence.html
new file mode 100644
index 000..e4812fc
--- /dev/null
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/procedure2/class-use/TestProcedureSkipPersistence.html
@@ -0,0 +1,125 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+Uses of Class 
org.apache.hadoop.hbase.procedure2.TestProcedureSkipPersistence (Apache HBase 
3.0.0-SNAPSHOT Test API)
+
+
+
+
+
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev
+Next
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+
+
+
+Uses of 
Classorg.apache.hadoop.hbase.procedure2.TestProcedureSkipPersistence
+
+No usage of 
org.apache.hadoop.hbase.procedure2.TestProcedureSkipPersistence
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev
+Next
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+
+
+Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+
+

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/419d0338/testdevapidocs/org/apache/hadoop/hbase/procedure2/package-frame.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/procedure2/package-frame.html 
b/testdevapidocs/org/apache/hadoop/hbase/procedure2/package-frame.html
index ec42355..93c7697 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/procedure2/package-frame.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/procedure2/package-frame.html
@@ -60,6 +60,8 @@
 TestProcedureReplayOrder.TestTwoStepProcedure
 TestProcedureSchedulerConcurrency
 TestProcedureSchedulerConcurrency.TestProcedureWithEvent
+TestProcedureSkipPersistence
+TestProcedureSkipPersistence.TestProcedure
 TestProcedureSuspended
 TestProcedureSuspended.TestLockProcedure
 TestProcedureSuspended.TestProcEnv

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/419d0338/testdevapidocs/org/apache/hadoop/hbase/procedure2/package-summary.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/procedure2/package-summary.html 
b/testdevapidocs/org/apache/hadoop/hbase/procedure2/package-summary.html
index 689909e..6d9932a 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/procedure2/package-summary.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/procedure2/package-summary.html
@@ -280,94 +280,102 @@
 
 
 
-TestProcedureSuspended
+TestProcedureSkipPersistence
+
+
+
+TestProcedureSkipPersistence.TestProcedure
 
 
 
-TestProcedureSuspended.TestLockProcedure
+TestProcedureSuspended
 
 
 
-TestProcedureSuspended.TestProcEnv
+TestProcedureSuspended.TestLockProcedure
 
 
 
-TestProcedureToString
+TestProcedureSuspended.TestProcEnv
 
 
 
+TestProcedureToString
+
+
+
 TestProcedureToString.BasicProcedure
 
 A do-nothing basic procedure just for testing 
toString.
 
 
-
+
 TestProcedureToString.BasicProcedureEnv
 
 A do-nothing environment for BasicProcedure.
 
 
-
+
 TestProcedureToString.DoublingStateStringBasicProcedure
 
 A do-nothing basic procedure that overrides the 
toStringState method.
 
 
-
+
 TestProcedureToString.SimpleStateMachineProcedure
 
 Do-nothing SimpleMachineProcedure for checking its 
toString.
 
 
-
+
 TestProcedureUtil
 
 
-
+
 TestProcedureUtil.TestProcedureNoDefaultConstructor
 
 
-
+
 TestStateMachineProcedure
 
 
-
+
 TestStateMachineProcedure.SimpleChildProcedure
 
 
-
+
 TestStateMachineProcedure.TestProcEnv
 
 
-
+
 TestStateMachineProcedure.TestSMProcedure
 
 
-
+
 TestStateMachineProcedure.TestSMProcedureBadRollback
 
 
-
+
 TestYieldProcedures
 
 
-
+
 TestYieldProcedures.TestProcEnv
 
 
-
+
 TestYieldProcedures.TestScheduler
 
 
-
+
 TestYieldProcedures.TestStateMachineProcedure
 
 
-
+
 

[04/51] [partial] hbase-site git commit: Published site at d7e08317d2f214e4cca7b67578aba0ed7a567d54.

2018-09-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/37cf49a6/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
index 566f410..da040ad 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
@@ -341,8361 +341,8425 @@
 333  private final int 
rowLockWaitDuration;
 334  static final int 
DEFAULT_ROWLOCK_WAIT_DURATION = 3;
 335
-336  // The internal wait duration to 
acquire a lock before read/update
-337  // from the region. It is not per row. 
The purpose of this wait time
-338  // is to avoid waiting a long time 
while the region is busy, so that
-339  // we can release the IPC handler soon 
enough to improve the
-340  // availability of the region server. 
It can be adjusted by
-341  // tuning configuration 
"hbase.busy.wait.duration".
-342  final long busyWaitDuration;
-343  static final long 
DEFAULT_BUSY_WAIT_DURATION = HConstants.DEFAULT_HBASE_RPC_TIMEOUT;
-344
-345  // If updating multiple rows in one 
call, wait longer,
-346  // i.e. waiting for busyWaitDuration * 
# of rows. However,
-347  // we can limit the max multiplier.
-348  final int maxBusyWaitMultiplier;
-349
-350  // Max busy wait duration. There is no 
point to wait longer than the RPC
-351  // purge timeout, when a RPC call will 
be terminated by the RPC engine.
-352  final long maxBusyWaitDuration;
-353
-354  // Max cell size. If nonzero, the 
maximum allowed size for any given cell
-355  // in bytes
-356  final long maxCellSize;
-357
-358  // Number of mutations for minibatch 
processing.
-359  private final int miniBatchSize;
+336  private Path regionDir;
+337  private FileSystem walFS;
+338
+339  // The internal wait duration to 
acquire a lock before read/update
+340  // from the region. It is not per row. 
The purpose of this wait time
+341  // is to avoid waiting a long time 
while the region is busy, so that
+342  // we can release the IPC handler soon 
enough to improve the
+343  // availability of the region server. 
It can be adjusted by
+344  // tuning configuration 
"hbase.busy.wait.duration".
+345  final long busyWaitDuration;
+346  static final long 
DEFAULT_BUSY_WAIT_DURATION = HConstants.DEFAULT_HBASE_RPC_TIMEOUT;
+347
+348  // If updating multiple rows in one 
call, wait longer,
+349  // i.e. waiting for busyWaitDuration * 
# of rows. However,
+350  // we can limit the max multiplier.
+351  final int maxBusyWaitMultiplier;
+352
+353  // Max busy wait duration. There is no 
point to wait longer than the RPC
+354  // purge timeout, when a RPC call will 
be terminated by the RPC engine.
+355  final long maxBusyWaitDuration;
+356
+357  // Max cell size. If nonzero, the 
maximum allowed size for any given cell
+358  // in bytes
+359  final long maxCellSize;
 360
-361  // negative number indicates infinite 
timeout
-362  static final long 
DEFAULT_ROW_PROCESSOR_TIMEOUT = 60 * 1000L;
-363  final ExecutorService 
rowProcessorExecutor = Executors.newCachedThreadPool();
-364
-365  private final 
ConcurrentHashMapRegionScanner, Long scannerReadPoints;
-366
-367  /**
-368   * The sequence ID that was 
enLongAddered when this region was opened.
-369   */
-370  private long openSeqNum = 
HConstants.NO_SEQNUM;
-371
-372  /**
-373   * The default setting for whether to 
enable on-demand CF loading for
-374   * scan requests to this region. 
Requests can override it.
-375   */
-376  private boolean 
isLoadingCfsOnDemandDefault = false;
-377
-378  private final AtomicInteger 
majorInProgress = new AtomicInteger(0);
-379  private final AtomicInteger 
minorInProgress = new AtomicInteger(0);
+361  // Number of mutations for minibatch 
processing.
+362  private final int miniBatchSize;
+363
+364  // negative number indicates infinite 
timeout
+365  static final long 
DEFAULT_ROW_PROCESSOR_TIMEOUT = 60 * 1000L;
+366  final ExecutorService 
rowProcessorExecutor = Executors.newCachedThreadPool();
+367
+368  private final 
ConcurrentHashMapRegionScanner, Long scannerReadPoints;
+369
+370  /**
+371   * The sequence ID that was 
enLongAddered when this region was opened.
+372   */
+373  private long openSeqNum = 
HConstants.NO_SEQNUM;
+374
+375  /**
+376   * The default setting for whether to 
enable on-demand CF loading for
+377   * scan requests to this region. 
Requests can override it.
+378   */
+379  private boolean 
isLoadingCfsOnDemandDefault = false;
 380
-381  //
-382  // Context: During replay we want to 
ensure that we do not lose any data. So, we
-383  // have to be conservative in how we 
replay wals. For each store, we calculate
-384  // the maxSeqId up to which the store 
was flushed. 

[04/51] [partial] hbase-site git commit: Published site at 8eaaa63114a64bcaeaf0ed9bdd88615ee22255c1.

2018-09-25 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f6f9d4f3/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.PostOpenDeployTasksThread.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.PostOpenDeployTasksThread.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.PostOpenDeployTasksThread.html
index 4a11f27..7c7966d 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.PostOpenDeployTasksThread.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.PostOpenDeployTasksThread.html
@@ -49,287 +49,290 @@
 041 * Handles opening of a region on a 
region server.
 042 * p
 043 * This is executed after receiving an 
OPEN RPC from the master or client.
-044 */
-045@InterfaceAudience.Private
-046public class OpenRegionHandler extends 
EventHandler {
-047  private static final Logger LOG = 
LoggerFactory.getLogger(OpenRegionHandler.class);
-048
-049  protected final RegionServerServices 
rsServices;
-050
-051  private final RegionInfo regionInfo;
-052  private final TableDescriptor htd;
-053  private final long masterSystemTime;
-054
-055  public OpenRegionHandler(final Server 
server,
-056  final RegionServerServices 
rsServices, RegionInfo regionInfo,
-057  TableDescriptor htd, long 
masterSystemTime) {
-058this(server, rsServices, regionInfo, 
htd, masterSystemTime, EventType.M_RS_OPEN_REGION);
-059  }
-060
-061  protected OpenRegionHandler(final 
Server server,
-062  final 
RegionServerServices rsServices, final RegionInfo regionInfo,
-063  final 
TableDescriptor htd, long masterSystemTime, EventType eventType) {
-064super(server, eventType);
-065this.rsServices = rsServices;
-066this.regionInfo = regionInfo;
-067this.htd = htd;
-068this.masterSystemTime = 
masterSystemTime;
-069  }
-070
-071  public RegionInfo getRegionInfo() {
-072return regionInfo;
-073  }
-074
-075  @Override
-076  public void process() throws 
IOException {
-077boolean openSuccessful = false;
-078final String regionName = 
regionInfo.getRegionNameAsString();
-079HRegion region = null;
-080
-081try {
-082  if (this.server.isStopped() || 
this.rsServices.isStopping()) {
-083return;
-084  }
-085  final String encodedName = 
regionInfo.getEncodedName();
-086
-087  // 2 different difficult situations 
can occur
-088  // 1) The opening was cancelled. 
This is an expected situation
-089  // 2) The region is now marked as 
online while we're suppose to open. This would be a bug.
-090
-091  // Check that this region is not 
already online
-092  if 
(this.rsServices.getRegion(encodedName) != null) {
-093LOG.error("Region " + encodedName 
+
-094" was already online when we 
started processing the opening. " +
-095"Marking this new attempt as 
failed");
-096return;
-097  }
-098
-099  // Check that we're still supposed 
to open the region.
-100  // If fails, just return.  Someone 
stole the region from under us.
-101  if (!isRegionStillOpening()){
-102LOG.error("Region " + encodedName 
+ " opening cancelled");
-103return;
-104  }
-105
-106  // Open region.  After a successful 
open, failures in subsequent
-107  // processing needs to do a close 
as part of cleanup.
-108  region = openRegion();
-109  if (region == null) {
-110return;
-111  }
-112
-113  if (!updateMeta(region, 
masterSystemTime) || this.server.isStopped() ||
-114  this.rsServices.isStopping()) 
{
-115return;
-116  }
-117
-118  if (!isRegionStillOpening()) {
-119return;
-120  }
-121
-122  // Successful region open, and add 
it to MutableOnlineRegions
-123  
this.rsServices.addRegion(region);
-124  openSuccessful = true;
-125
-126  // Done!  Successful region open
-127  LOG.debug("Opened " + regionName + 
" on " + this.server.getServerName());
-128} finally {
-129  // Do all clean up here
-130  if (!openSuccessful) {
-131doCleanUpOnFailedOpen(region);
-132  }
-133  final Boolean current = 
this.rsServices.getRegionsInTransitionInRS().
-134  
remove(this.regionInfo.getEncodedNameAsBytes());
-135
-136  // Let's check if we have met a 
race condition on open cancellation
-137  // A better solution would be to 
not have any race condition.
-138  // 
this.rsServices.getRegionsInTransitionInRS().remove(
-139  //  
this.regionInfo.getEncodedNameAsBytes(), Boolean.TRUE);
-140  // would help.
-141  if (openSuccessful) {
-142if (current == null) { // Should 
NEVER happen, but let's be paranoid.
-143  LOG.error("Bad state: we've 
just 

[04/51] [partial] hbase-site git commit: Published site at cd161d976ef47b84e904f2d54bac65d2f3417c2a.

2018-09-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fa1bebf8/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.html
index 2c14c50..43c66a8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.html
@@ -46,2104 +46,2113 @@
 038import 
java.util.concurrent.atomic.AtomicLong;
 039import java.util.stream.Collectors;
 040import java.util.stream.Stream;
-041import 
org.apache.hadoop.conf.Configuration;
-042import 
org.apache.hadoop.hbase.HConstants;
-043import 
org.apache.hadoop.hbase.exceptions.IllegalArgumentIOException;
-044import 
org.apache.hadoop.hbase.log.HBaseMarkers;
-045import 
org.apache.hadoop.hbase.procedure2.Procedure.LockState;
-046import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore;
-047import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureIterator;
-048import 
org.apache.hadoop.hbase.procedure2.util.StringUtils;
-049import 
org.apache.hadoop.hbase.security.User;
-050import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-051import 
org.apache.hadoop.hbase.util.IdLock;
-052import 
org.apache.hadoop.hbase.util.NonceKey;
-053import 
org.apache.hadoop.hbase.util.Threads;
-054import 
org.apache.yetus.audience.InterfaceAudience;
-055import org.slf4j.Logger;
-056import org.slf4j.LoggerFactory;
-057
-058import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-059import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-060
-061import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState;
-062
-063/**
-064 * Thread Pool that executes the 
submitted procedures.
-065 * The executor has a ProcedureStore 
associated.
-066 * Each operation is logged and on 
restart the pending procedures are resumed.
-067 *
-068 * Unless the Procedure code throws an 
error (e.g. invalid user input)
-069 * the procedure will complete (at some 
point in time), On restart the pending
-070 * procedures are resumed and the once 
failed will be rolledback.
-071 *
-072 * The user can add procedures to the 
executor via submitProcedure(proc)
-073 * check for the finished state via 
isFinished(procId)
-074 * and get the result via 
getResult(procId)
-075 */
-076@InterfaceAudience.Private
-077public class 
ProcedureExecutorTEnvironment {
-078  private static final Logger LOG = 
LoggerFactory.getLogger(ProcedureExecutor.class);
-079
-080  public static final String 
CHECK_OWNER_SET_CONF_KEY = "hbase.procedure.check.owner.set";
-081  private static final boolean 
DEFAULT_CHECK_OWNER_SET = false;
-082
-083  public static final String 
WORKER_KEEP_ALIVE_TIME_CONF_KEY =
-084  
"hbase.procedure.worker.keep.alive.time.msec";
-085  private static final long 
DEFAULT_WORKER_KEEP_ALIVE_TIME = TimeUnit.MINUTES.toMillis(1);
-086
-087  /**
-088   * {@link #testing} is non-null when 
ProcedureExecutor is being tested. Tests will try to
-089   * break PE having it fail at various 
junctures. When non-null, testing is set to an instance of
-090   * the below internal {@link Testing} 
class with flags set for the particular test.
-091   */
-092  Testing testing = null;
-093
-094  /**
-095   * Class with parameters describing how 
to fail/die when in testing-context.
-096   */
-097  public static class Testing {
-098protected boolean killIfHasParent = 
true;
-099protected boolean killIfSuspended = 
false;
-100
-101/**
-102 * Kill the PE BEFORE we store state 
to the WAL. Good for figuring out if a Procedure is
-103 * persisting all the state it needs 
to recover after a crash.
-104 */
-105protected boolean 
killBeforeStoreUpdate = false;
-106protected boolean 
toggleKillBeforeStoreUpdate = false;
-107
-108/**
-109 * Set when we want to fail AFTER 
state has been stored into the WAL. Rarely used. HBASE-20978
-110 * is about a case where memory-state 
was being set after store to WAL where a crash could
-111 * cause us to get stuck. This flag 
allows killing at what was a vulnerable time.
-112 */
-113protected boolean 
killAfterStoreUpdate = false;
-114protected boolean 
toggleKillAfterStoreUpdate = false;
-115
-116protected boolean 
shouldKillBeforeStoreUpdate() {
-117  final boolean kill = 
this.killBeforeStoreUpdate;
-118  if 
(this.toggleKillBeforeStoreUpdate) {
-119this.killBeforeStoreUpdate = 
!kill;
-120LOG.warn("Toggle KILL before 
store update to: " + this.killBeforeStoreUpdate);
-121  }
-122  return kill;
-123}
-124
-125protected boolean 
shouldKillBeforeStoreUpdate(boolean isSuspended, boolean hasParent) {
-126  if (isSuspended  

[04/51] [partial] hbase-site git commit: Published site at c6a65ba63fce85ac7c4b62b96ef2bbe6c35d2f00.

2018-09-04 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/293abb17/devapidocs/src-html/org/apache/hadoop/hbase/security/visibility/VisibilityLabelFilter.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/security/visibility/VisibilityLabelFilter.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/security/visibility/VisibilityLabelFilter.html
index 0ba0d47..3a7cd7f 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/security/visibility/VisibilityLabelFilter.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/security/visibility/VisibilityLabelFilter.html
@@ -31,77 +31,77 @@
 023
 024import 
org.apache.yetus.audience.InterfaceAudience;
 025import org.apache.hadoop.hbase.Cell;
-026import 
org.apache.hadoop.hbase.CellUtil;
-027import 
org.apache.hadoop.hbase.PrivateCellUtil;
-028import 
org.apache.hadoop.hbase.filter.FilterBase;
-029import 
org.apache.hadoop.hbase.util.ByteRange;
-030import 
org.apache.hadoop.hbase.util.SimpleMutableByteRange;
-031
-032/**
-033 * This Filter checks the visibility 
expression with each KV against visibility labels associated
-034 * with the scan. Based on the check the 
KV is included in the scan result or gets filtered out.
-035 */
-036@InterfaceAudience.Private
-037class VisibilityLabelFilter extends 
FilterBase {
-038
-039  private final VisibilityExpEvaluator 
expEvaluator;
-040  private final MapByteRange, 
Integer cfVsMaxVersions;
-041  private final ByteRange curFamily;
-042  private final ByteRange curQualifier;
-043  private int curFamilyMaxVersions;
-044  private int curQualMetVersions;
-045
-046  public 
VisibilityLabelFilter(VisibilityExpEvaluator expEvaluator,
-047  MapByteRange, Integer 
cfVsMaxVersions) {
-048this.expEvaluator = expEvaluator;
-049this.cfVsMaxVersions = 
cfVsMaxVersions;
-050this.curFamily = new 
SimpleMutableByteRange();
-051this.curQualifier = new 
SimpleMutableByteRange();
-052  }
-053
-054  @Override
-055  public boolean filterRowKey(Cell cell) 
throws IOException {
-056// Impl in FilterBase might do 
unnecessary copy for Off heap backed Cells.
-057return false;
-058  }
-059
-060  @Override
-061  public ReturnCode filterCell(final Cell 
cell) throws IOException {
-062if (curFamily.getBytes() == null
-063|| 
!(PrivateCellUtil.matchingFamily(cell, curFamily.getBytes(), 
curFamily.getOffset(),
-064curFamily.getLength( {
-065  
curFamily.set(cell.getFamilyArray(), cell.getFamilyOffset(), 
cell.getFamilyLength());
-066  // For this family, all the columns 
can have max of curFamilyMaxVersions versions. No need to
-067  // consider the older versions for 
visibility label check.
-068  // Ideally this should have been 
done at a lower layer by HBase (?)
-069  curFamilyMaxVersions = 
cfVsMaxVersions.get(curFamily);
-070  // Family is changed. Just unset 
curQualifier.
-071  curQualifier.unset();
-072}
-073if (curQualifier.getBytes() == null 
|| !(PrivateCellUtil.matchingQualifier(cell,
-074  curQualifier.getBytes(), 
curQualifier.getOffset(), curQualifier.getLength( {
-075  
curQualifier.set(cell.getQualifierArray(), cell.getQualifierOffset(),
-076  cell.getQualifierLength());
-077  curQualMetVersions = 0;
-078}
-079curQualMetVersions++;
-080if (curQualMetVersions  
curFamilyMaxVersions) {
-081  return ReturnCode.SKIP;
-082}
-083
-084return 
this.expEvaluator.evaluate(cell) ? ReturnCode.INCLUDE : ReturnCode.SKIP;
-085  }
-086
-087  @Override
-088  public void reset() throws IOException 
{
-089this.curFamily.unset();
-090this.curQualifier.unset();
-091this.curFamilyMaxVersions = 0;
-092this.curQualMetVersions = 0;
-093  }
+026import 
org.apache.hadoop.hbase.PrivateCellUtil;
+027import 
org.apache.hadoop.hbase.filter.FilterBase;
+028import 
org.apache.hadoop.hbase.util.ByteRange;
+029import 
org.apache.hadoop.hbase.util.SimpleMutableByteRange;
+030
+031/**
+032 * This Filter checks the visibility 
expression with each KV against visibility labels associated
+033 * with the scan. Based on the check the 
KV is included in the scan result or gets filtered out.
+034 */
+035@InterfaceAudience.Private
+036class VisibilityLabelFilter extends 
FilterBase {
+037
+038  private final VisibilityExpEvaluator 
expEvaluator;
+039  private final MapByteRange, 
Integer cfVsMaxVersions;
+040  private final ByteRange curFamily;
+041  private final ByteRange curQualifier;
+042  private int curFamilyMaxVersions;
+043  private int curQualMetVersions;
+044
+045  public 
VisibilityLabelFilter(VisibilityExpEvaluator expEvaluator,
+046  MapByteRange, Integer 
cfVsMaxVersions) {
+047this.expEvaluator = expEvaluator;
+048this.cfVsMaxVersions = 
cfVsMaxVersions;
+049this.curFamily = new 
SimpleMutableByteRange();
+050this.curQualifier = new 
SimpleMutableByteRange();
+051  }
+052
+053  @Override
+054  public boolean 

[04/51] [partial] hbase-site git commit: Published site at 7c1fad4992a169a35b4457e6f4afcb30d04406e9.

2018-08-31 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/74f60271/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureUtil.CompatStateSerializer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureUtil.CompatStateSerializer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureUtil.CompatStateSerializer.html
index 1a491f3..76e3c88 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureUtil.CompatStateSerializer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureUtil.CompatStateSerializer.html
@@ -213,122 +213,130 @@
 205if (proc.hasLock()) {
 206  builder.setLocked(true);
 207}
-208return builder.build();
-209  }
-210
-211  /**
-212   * Helper to convert the protobuf 
procedure.
-213   * Used by ProcedureStore 
implementations.
-214   *
-215   * TODO: OPTIMIZATION: some of the 
field never change during the execution
-216   * (e.g. className, 
procId, parentId, ...).
-217   * We can split in 
'data' and 'state', and the store
-218   * may take 
advantage of it by storing the data only on insert().
-219   */
-220  public static Procedure 
convertToProcedure(final ProcedureProtos.Procedure proto) throws IOException 
{
-221// Procedure from class name
-222final Procedure proc = 
newProcedure(proto.getClassName());
-223
-224// set fields
-225proc.setProcId(proto.getProcId());
-226proc.setState(proto.getState());
-227
proc.setSubmittedTime(proto.getSubmittedTime());
-228
proc.setLastUpdate(proto.getLastUpdate());
-229
-230if (proto.hasParentId()) {
-231  
proc.setParentProcId(proto.getParentId());
-232}
+208
+209if (proc.isBypass()) {
+210  builder.setBypass(true);
+211}
+212return builder.build();
+213  }
+214
+215  /**
+216   * Helper to convert the protobuf 
procedure.
+217   * Used by ProcedureStore 
implementations.
+218   *
+219   * TODO: OPTIMIZATION: some of the 
field never change during the execution
+220   * (e.g. className, 
procId, parentId, ...).
+221   * We can split in 
'data' and 'state', and the store
+222   * may take 
advantage of it by storing the data only on insert().
+223   */
+224  public static Procedure 
convertToProcedure(final ProcedureProtos.Procedure proto) throws IOException 
{
+225// Procedure from class name
+226final Procedure proc = 
newProcedure(proto.getClassName());
+227
+228// set fields
+229proc.setProcId(proto.getProcId());
+230proc.setState(proto.getState());
+231
proc.setSubmittedTime(proto.getSubmittedTime());
+232
proc.setLastUpdate(proto.getLastUpdate());
 233
-234if (proto.hasOwner()) {
-235  proc.setOwner(proto.getOwner());
+234if (proto.hasParentId()) {
+235  
proc.setParentProcId(proto.getParentId());
 236}
 237
-238if (proto.hasTimeout()) {
-239  
proc.setTimeout(proto.getTimeout());
+238if (proto.hasOwner()) {
+239  proc.setOwner(proto.getOwner());
 240}
 241
-242if (proto.getStackIdCount()  0) 
{
-243  
proc.setStackIndexes(proto.getStackIdList());
+242if (proto.hasTimeout()) {
+243  
proc.setTimeout(proto.getTimeout());
 244}
 245
-246if (proto.hasException()) {
-247  assert proc.getState() == 
ProcedureProtos.ProcedureState.FAILED ||
-248 proc.getState() == 
ProcedureProtos.ProcedureState.ROLLEDBACK :
-249 "The procedure must be 
failed (waiting to rollback) or rolledback";
-250  
proc.setFailure(RemoteProcedureException.fromProto(proto.getException()));
-251}
-252
-253if (proto.hasResult()) {
-254  
proc.setResult(proto.getResult().toByteArray());
+246if (proto.getStackIdCount()  0) 
{
+247  
proc.setStackIndexes(proto.getStackIdList());
+248}
+249
+250if (proto.hasException()) {
+251  assert proc.getState() == 
ProcedureProtos.ProcedureState.FAILED ||
+252 proc.getState() == 
ProcedureProtos.ProcedureState.ROLLEDBACK :
+253 "The procedure must be 
failed (waiting to rollback) or rolledback";
+254  
proc.setFailure(RemoteProcedureException.fromProto(proto.getException()));
 255}
 256
-257if (proto.getNonce() != 
HConstants.NO_NONCE) {
-258  proc.setNonceKey(new 
NonceKey(proto.getNonceGroup(), proto.getNonce()));
+257if (proto.hasResult()) {
+258  
proc.setResult(proto.getResult().toByteArray());
 259}
 260
-261if (proto.getLocked()) {
-262  proc.lockedWhenLoading();
+261if (proto.getNonce() != 
HConstants.NO_NONCE) {
+262  proc.setNonceKey(new 
NonceKey(proto.getNonceGroup(), proto.getNonce()));
 263}
 264
-265ProcedureStateSerializer serializer = 
null;
-266
-267if (proto.getStateMessageCount()  
0) {
-268  serializer = new 

[04/51] [partial] hbase-site git commit: Published site at 3afe9fb7e6ebfa71187cbe131558a83fae61cecd.

2018-08-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/424d7e41/testdevapidocs/org/apache/hadoop/hbase/client/package-summary.html
--
diff --git a/testdevapidocs/org/apache/hadoop/hbase/client/package-summary.html 
b/testdevapidocs/org/apache/hadoop/hbase/client/package-summary.html
index 5b020ba..ccfe07f 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/client/package-summary.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/client/package-summary.html
@@ -873,385 +873,391 @@
 
 
 
+TestHbck
+
+Class to test HBaseHbck.
+
+
+
 TestHTableMultiplexer
 
 
-
+
 TestHTableMultiplexerFlushCache
 
 
-
+
 TestHTableMultiplexerViaMocks
 
 
-
+
 TestIllegalTableDescriptor
 
 
-
+
 TestImmutableHColumnDescriptor
 
 
-
+
 TestImmutableHRegionInfo
 
 Test ImmutableHRegionInfo
 
 
-
+
 TestImmutableHTableDescriptor
 
 
-
+
 TestIncrement
 
 
-
+
 TestIncrementFromClientSideWithCoprocessor
 
 Test all Increment client operations with a 
coprocessor that
  just implements the default flush/compact/scan policy.
 
 
-
+
 TestIncrementsFromClientSide
 
 Run Increment tests that use the HBase clients; 
HTable.
 
 
-
+
 TestInterfaceAlign
 
 
-
+
 TestIntraRowPagination
 
 Test scan/get offset and limit settings within one row 
through HRegion API.
 
 
-
+
 TestLeaseRenewal
 
 
-
+
 TestLimitedScanWithFilter
 
 With filter we may stop at a middle of row and think that 
we still have more cells for the
  current row but actually all the remaining cells will be filtered out by the 
filter.
 
 
-
+
 TestMalformedCellFromClient
 
 The purpose of this test is to ensure whether rs deals with 
the malformed cells correctly.
 
 
-
+
 TestMetaCache
 
 
-
+
 TestMetaCache.CallQueueTooBigExceptionInjector
 
 Throws CallQueueTooBigException for all gets.
 
 
-
+
 TestMetaCache.ExceptionInjector
 
 
-
+
 TestMetaCache.FakeRSRpcServices
 
 
-
+
 TestMetaCache.RegionServerWithFakeRpcServices
 
 
-
+
 TestMetaCache.RoundRobinExceptionInjector
 
 Rotates through the possible cache clearing and non-cache 
clearing exceptions
  for requests.
 
 
-
+
 TestMetaWithReplicas
 
 Tests the scenarios where replicas are enabled for the meta 
table
 
 
-
+
 TestMetricsConnection
 
 
-
+
 TestMobCloneSnapshotFromClient
 
 Test clone snapshots from the client
 
 
-
+
 TestMobCloneSnapshotFromClient.DelayFlushCoprocessor
 
 This coprocessor is used to delay the flush.
 
 
-
+
 TestMobRestoreSnapshotFromClient
 
 Test restore snapshots from the client
 
 
-
+
 TestMobSnapshotCloneIndependence
 
 Test to verify that the cloned table is independent of the 
table from which it was cloned
 
 
-
+
 TestMobSnapshotFromClient
 
 Test create/using/deleting snapshots from the client
 
 
-
+
 TestMultiParallel
 
 
-
+
 TestMultiParallel.MyMasterObserver
 
 
-
+
 TestMultipleTimestamps
 
 Run tests related to TimestampsFilter using 
HBase client APIs.
 
 
-
+
 TestMultiRespectsLimits
 
 This test sets the multi size WAY low and then checks 
to make sure that gets will still make
  progress.
 
 
-
+
 TestMutation
 
 
-
+
 TestMvccConsistentScanner
 
 
-
+
 TestOperation
 
 Run tests that use the functionality of the Operation 
superclass for
  Puts, Gets, Deletes, Scans, and MultiPuts.
 
 
-
+
 TestProcedureFuture
 
 
-
+
 TestProcedureFuture.TestFuture
 
 
-
+
 TestPutDeleteEtcCellIteration
 
 Test that I can Iterate Client Actions that hold Cells (Get 
does not have Cells).
 
 
-
+
 TestPutDotHas
 
 
-
+
 TestPutWithDelete
 
 
-
+
 TestPutWriteToWal
 
 
-
+
 TestQuotasShell
 
 
-
+
 TestRawAsyncScanCursor
 
 
-
+
 TestRawAsyncTableLimitedScanWithFilter
 
 With filter we may stop at a middle of row and think that 
we still have more cells for the
  current row but actually all the remaining cells will be filtered out by the 
filter.
 
 
-
+
 TestRawAsyncTablePartialScan
 
 
-
+
 TestRawAsyncTableScan
 
 
-
+
 TestRegionInfoDisplay
 
 
-
+
 TestReplicasClient
 
 Tests for region replicas.
 
 
-
+
 TestReplicasClient.SlowMeCopro
 
 This copro is used to synchronize the tests.
 
 
-
+
 TestReplicationShell
 
 
-
+
 TestReplicaWithCluster
 
 
-
+
 TestReplicaWithCluster.RegionServerHostingPrimayMetaRegionSlowOrStopCopro
 
 This copro is used to slow down the primary meta region 
scan a bit
 
 
-
+
 TestReplicaWithCluster.RegionServerStoppedCopro
 
 This copro is used to simulate region server down exception 
for Get and Scan
 
 
-
+
 TestReplicaWithCluster.SlowMeCopro
 
 This copro is used to synchronize the tests.
 
 
-
+
 TestRestoreSnapshotFromClient
 
 Test restore snapshots from the client
 
 
-
+
 TestRestoreSnapshotFromClientWithRegionReplicas
 
 
-
+
 TestResult
 
 
-
+
 TestResultFromCoprocessor
 
 
-
+
 TestResultFromCoprocessor.MyObserver
 
 
-
+
 TestResultScannerCursor
 
 
-
+
 TestResultSizeEstimation
 
 
-
+
 TestRetriesExhaustedWithDetailsException
 
 
-
+
 TestReversedScannerCallable
 
 
-
+
 TestRowComparator
 
 
-
+
 TestRpcControllerFactory
 
 
-
+
 

[04/51] [partial] hbase-site git commit: Published site at a452487a9b82bfd33bc10683c3f8b8ae74d58883.

2018-08-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0cf79db0/devapidocs/src-html/org/apache/hadoop/hbase/filter/ColumnValueFilter.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/filter/ColumnValueFilter.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/filter/ColumnValueFilter.html
index ee5595f..961079a 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/filter/ColumnValueFilter.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/filter/ColumnValueFilter.html
@@ -29,224 +29,240 @@
 021
 022import java.io.IOException;
 023import java.util.ArrayList;
-024
-025import org.apache.hadoop.hbase.Cell;
-026import 
org.apache.hadoop.hbase.CellUtil;
-027import 
org.apache.hadoop.hbase.CompareOperator;
-028import 
org.apache.hadoop.hbase.PrivateCellUtil;
-029import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-030import 
org.apache.hadoop.hbase.util.Bytes;
-031import 
org.apache.yetus.audience.InterfaceAudience;
-032
-033import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-034import 
org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException;
-035import 
org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations;
-036
-037import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-038import 
org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos;
-039import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
-040
-041/**
-042 * Different from {@link 
SingleColumnValueFilter} which returns an bentire/b row
-043 * when specified condition is matched, 
{@link ColumnValueFilter} return the matched cell only.
-044 * p
-045 * This filter is used to filter cells 
based on column and value.
-046 * It takes a {@link 
org.apache.hadoop.hbase.CompareOperator} operator (, =, =, !=, , 
=), and
-047 * and a {@link ByteArrayComparable} 
comparator.
-048 */
-049@InterfaceAudience.Public
-050public class ColumnValueFilter extends 
FilterBase {
-051  private final byte[] family;
-052  private final byte[] qualifier;
-053  private final CompareOperator op;
-054  private final ByteArrayComparable 
comparator;
-055
-056  // This flag is used to speed up 
seeking cells when matched column is found, such that following
-057  // columns in the same row can be 
skipped faster by NEXT_ROW instead of NEXT_COL.
-058  private boolean columnFound = false;
-059
-060  public ColumnValueFilter(final byte[] 
family, final byte[] qualifier,
-061   final 
CompareOperator op, final byte[] value) {
-062this(family, qualifier, op, new 
BinaryComparator(value));
-063  }
-064
-065  public ColumnValueFilter(final byte[] 
family, final byte[] qualifier,
-066   final 
CompareOperator op,
-067   final 
ByteArrayComparable comparator) {
-068this.family = 
Preconditions.checkNotNull(family, "family should not be null.");
-069this.qualifier = qualifier == null ? 
new byte[0] : qualifier;
-070this.op = 
Preconditions.checkNotNull(op, "CompareOperator should not be null");
-071this.comparator = 
Preconditions.checkNotNull(comparator, "Comparator should not be null");
-072  }
-073
-074  /**
-075   * @return operator
-076   */
-077  public CompareOperator 
getCompareOperator() {
-078return op;
-079  }
-080
-081  /**
-082   * @return the comparator
-083   */
-084  public ByteArrayComparable 
getComparator() {
-085return comparator;
-086  }
-087
-088  /**
-089   * @return the column family
-090   */
-091  public byte[] getFamily() {
-092return family;
-093  }
-094
-095  /**
-096   * @return the qualifier
-097   */
-098  public byte[] getQualifier() {
-099return qualifier;
-100  }
-101
-102  @Override
-103  public void reset() throws IOException 
{
-104columnFound = false;
-105  }
-106
-107  @Override
-108  public boolean filterRowKey(Cell cell) 
throws IOException {
-109return false;
-110  }
-111
-112  @Override
-113  public ReturnCode filterCell(Cell c) 
throws IOException {
-114// 1. Check column match
-115if (!CellUtil.matchingColumn(c, 
this.family, this.qualifier)) {
-116  return columnFound ? 
ReturnCode.NEXT_ROW : ReturnCode.NEXT_COL;
-117}
-118// Column found
-119columnFound = true;
-120// 2. Check value match:
-121// True means filter out, just skip 
this cell, else include it.
-122return 
compareValue(getCompareOperator(), getComparator(), c) ?
-123  ReturnCode.SKIP : 
ReturnCode.INCLUDE;
-124  }
-125
-126  /**
-127   * This method is used to determine a 
cell should be included or filtered out.
-128   * @param op one of operators {@link 
CompareOperator}
-129   * @param comparator comparator used to 
compare cells.
-130   * @param cell cell to be compared.
-131   * @return true means cell should be 
filtered out, included otherwise.
-132   */
-133  private boolean compareValue(final 
CompareOperator op, final 

[04/51] [partial] hbase-site git commit: Published site at 6a5b4f2a5c188f8eef4f2250b8b7db7dd1e750e4.

2018-08-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ff05a18/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHRegion.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHRegion.html 
b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHRegion.html
index edfb4bf..8b112b5 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHRegion.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHRegion.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":9,"i1":10,"i2":10,"i3":10,"i4":9,"i5":9,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":9,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":9,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i109":
 
10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":9,"i129":10};
+var methods = 
{"i0":9,"i1":10,"i2":10,"i3":10,"i4":9,"i5":9,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":9,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":9,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i109":
 
10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":9,"i130":10};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -644,235 +644,241 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 void
+testFlushAndMemstoreSizeCounting()
+A test case of HBASE-21041
+
+
+
+void
 testFlushCacheWhileScanning()
 Flushes the cache in a thread while scanning.
 
 
-
+
 void
 testFlushedFileWithNoTags()
 
-
+
 void
 testFlushMarkers()
 
-
+
 void
 testFlushMarkersWALFail()
 
-
+
 void
 testFlushResult()
 Test that we get the expected flush results back
 
 
-
+
 void
 testFlushSizeAccounting()
 Test we do not lose data if we fail a flush and then 
close.
 
 
-
+
 void
 testGet_Basic()
 
-
+
 void
 testGet_Empty()
 
-
+
 void
 testGet_FamilyChecker()
 
-
+
 void
 testgetHDFSBlocksDistribution()
 
-
+
 void
 testGetScanner_WithNoFamilies()
 
-
+
 void
 testGetScanner_WithNotOkFamilies()
 
-
+
 void
 testGetScanner_WithOkFamilies()
 
-
+
 void
 testGetScanner_WithRegionClosed()
 This method tests 
https://issues.apache.org/jira/browse/HBASE-2516.
 
 
-
+
 void
 testGetWhileRegionClose()
 
-
+
 void
 testGetWithFilter()
 
-
+
 void
 testHolesInMeta()
 
-
+
 void
 testIncrementTimestampsAreMonotonic()
 
-
+
 void
 testIncrWithReadOnlyTable()
 
-
+
 void
 testIndexesScanWithOneDeletedRow()
 
-
+
 void
 testLongQualifier()
 Write an HFile block full with Cells whose qualifier that 
are identical between
  0 and Short.MAX_VALUE.
 
 
-
+
 void
 testMemstoreSizeAccountingWithFailedPostBatchMutate()
 
-
+
 void
 testMemstoreSnapshotSize()
 
-
+
 void
 testMutateRow_WriteRequestCount()
 
-
+
 void
 testOpenRegionWrittenToWAL()
 
-
+
 void
 testParallelAppendWithMemStoreFlush()
 Test case to check append function with memstore 
flushing
 
 
-
+
 void
 testParallelIncrementWithMemStoreFlush()
 Test case to check increment function with memstore 
flushing
 
 
-
+
 void
 testPutWithLatestTS()
 

[04/51] [partial] hbase-site git commit: Published site at 63f2d3cbdc8151f5f61f33e0a078c51b9ac076a5.

2018-08-21 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7ae6a80c/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
index 81f5178..7df71bd 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
@@ -108,3669 +108,3727 @@
 100import 
org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
 101import 
org.apache.hadoop.hbase.log.HBaseMarkers;
 102import 
org.apache.hadoop.hbase.master.MasterRpcServices.BalanceSwitchMode;
-103import 
org.apache.hadoop.hbase.master.assignment.AssignmentManager;
-104import 
org.apache.hadoop.hbase.master.assignment.MergeTableRegionsProcedure;
-105import 
org.apache.hadoop.hbase.master.assignment.RegionStates;
-106import 
org.apache.hadoop.hbase.master.assignment.RegionStates.RegionStateNode;
-107import 
org.apache.hadoop.hbase.master.balancer.BalancerChore;
-108import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer;
-109import 
org.apache.hadoop.hbase.master.balancer.ClusterStatusChore;
-110import 
org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory;
-111import 
org.apache.hadoop.hbase.master.cleaner.CleanerChore;
-112import 
org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
-113import 
org.apache.hadoop.hbase.master.cleaner.LogCleaner;
-114import 
org.apache.hadoop.hbase.master.cleaner.ReplicationBarrierCleaner;
-115import 
org.apache.hadoop.hbase.master.locking.LockManager;
-116import 
org.apache.hadoop.hbase.master.normalizer.NormalizationPlan;
-117import 
org.apache.hadoop.hbase.master.normalizer.NormalizationPlan.PlanType;
-118import 
org.apache.hadoop.hbase.master.normalizer.RegionNormalizer;
-119import 
org.apache.hadoop.hbase.master.normalizer.RegionNormalizerChore;
-120import 
org.apache.hadoop.hbase.master.normalizer.RegionNormalizerFactory;
-121import 
org.apache.hadoop.hbase.master.procedure.CreateTableProcedure;
-122import 
org.apache.hadoop.hbase.master.procedure.DeleteNamespaceProcedure;
-123import 
org.apache.hadoop.hbase.master.procedure.DeleteTableProcedure;
-124import 
org.apache.hadoop.hbase.master.procedure.DisableTableProcedure;
-125import 
org.apache.hadoop.hbase.master.procedure.EnableTableProcedure;
-126import 
org.apache.hadoop.hbase.master.procedure.InitMetaProcedure;
-127import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants;
-128import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
-129import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureScheduler;
-130import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil;
-131import 
org.apache.hadoop.hbase.master.procedure.ModifyTableProcedure;
-132import 
org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch;
-133import 
org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure;
-134import 
org.apache.hadoop.hbase.master.procedure.TruncateTableProcedure;
-135import 
org.apache.hadoop.hbase.master.replication.AbstractPeerProcedure;
-136import 
org.apache.hadoop.hbase.master.replication.AddPeerProcedure;
-137import 
org.apache.hadoop.hbase.master.replication.DisablePeerProcedure;
-138import 
org.apache.hadoop.hbase.master.replication.EnablePeerProcedure;
-139import 
org.apache.hadoop.hbase.master.replication.RemovePeerProcedure;
-140import 
org.apache.hadoop.hbase.master.replication.ReplicationPeerManager;
-141import 
org.apache.hadoop.hbase.master.replication.SyncReplicationReplayWALManager;
-142import 
org.apache.hadoop.hbase.master.replication.TransitPeerSyncReplicationStateProcedure;
-143import 
org.apache.hadoop.hbase.master.replication.UpdatePeerConfigProcedure;
-144import 
org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
-145import 
org.apache.hadoop.hbase.master.zksyncer.MasterAddressSyncer;
-146import 
org.apache.hadoop.hbase.master.zksyncer.MetaLocationSyncer;
-147import 
org.apache.hadoop.hbase.mob.MobConstants;
-148import 
org.apache.hadoop.hbase.monitoring.MemoryBoundedLogMessageBuffer;
-149import 
org.apache.hadoop.hbase.monitoring.MonitoredTask;
-150import 
org.apache.hadoop.hbase.monitoring.TaskMonitor;
-151import 
org.apache.hadoop.hbase.procedure.MasterProcedureManagerHost;
-152import 
org.apache.hadoop.hbase.procedure.flush.MasterFlushTableProcedureManager;
-153import 
org.apache.hadoop.hbase.procedure2.LockedResource;
-154import 
org.apache.hadoop.hbase.procedure2.Procedure;
-155import 
org.apache.hadoop.hbase.procedure2.ProcedureEvent;
-156import 
org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
-157import 
org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher.RemoteProcedure;
-158import 
org.apache.hadoop.hbase.procedure2.RemoteProcedureException;

[04/51] [partial] hbase-site git commit: Published site at 092efb42749bf7fc6ad338c96aae8e7b9d3a2c74.

2018-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/HQuorumPeer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/HQuorumPeer.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/HQuorumPeer.html
index 0b475e4..6b1d637 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/HQuorumPeer.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/HQuorumPeer.html
@@ -52,126 +52,130 @@
 044import 
org.apache.zookeeper.server.ZooKeeperServerMain;
 045import 
org.apache.zookeeper.server.quorum.QuorumPeerConfig;
 046import 
org.apache.zookeeper.server.quorum.QuorumPeerMain;
-047
-048/**
-049 * HBase's version of ZooKeeper's 
QuorumPeer. When HBase is set to manage
-050 * ZooKeeper, this class is used to start 
up QuorumPeer instances. By doing
-051 * things in here rather than directly 
calling to ZooKeeper, we have more
-052 * control over the process. This class 
uses {@link ZKConfig} to get settings
-053 * from the hbase-site.xml file.
-054 */
-055@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
-056@InterfaceStability.Evolving
-057public final class HQuorumPeer {
-058  private HQuorumPeer() {
-059  }
-060
-061  /**
-062   * Parse ZooKeeper configuration from 
HBase XML config and run a QuorumPeer.
-063   * @param args String[] of command line 
arguments. Not used.
-064   */
-065  public static void main(String[] args) 
{
-066Configuration conf = 
HBaseConfiguration.create();
-067try {
-068  Properties zkProperties = 
ZKConfig.makeZKProps(conf);
-069  writeMyID(zkProperties);
-070  QuorumPeerConfig zkConfig = new 
QuorumPeerConfig();
-071  
zkConfig.parseProperties(zkProperties);
-072
-073  // login the zookeeper server 
principal (if using security)
-074  ZKUtil.loginServer(conf, 
HConstants.ZK_SERVER_KEYTAB_FILE,
-075
HConstants.ZK_SERVER_KERBEROS_PRINCIPAL,
-076
zkConfig.getClientPortAddress().getHostName());
-077
-078  runZKServer(zkConfig);
-079} catch (Exception e) {
-080  e.printStackTrace();
-081  System.exit(-1);
-082}
-083  }
-084
-085  private static void 
runZKServer(QuorumPeerConfig zkConfig)
-086  throws UnknownHostException, 
IOException {
-087if (zkConfig.isDistributed()) {
-088  QuorumPeerMain qp = new 
QuorumPeerMain();
-089  qp.runFromConfig(zkConfig);
-090} else {
-091  ZooKeeperServerMain zk = new 
ZooKeeperServerMain();
-092  ServerConfig serverConfig = new 
ServerConfig();
-093  serverConfig.readFrom(zkConfig);
-094  zk.runFromConfig(serverConfig);
-095}
-096  }
-097
-098  private static boolean 
addressIsLocalHost(String address) {
-099return address.equals("localhost") || 
address.equals("127.0.0.1");
+047import org.slf4j.Logger;
+048import org.slf4j.LoggerFactory;
+049
+050/**
+051 * HBase's version of ZooKeeper's 
QuorumPeer. When HBase is set to manage
+052 * ZooKeeper, this class is used to start 
up QuorumPeer instances. By doing
+053 * things in here rather than directly 
calling to ZooKeeper, we have more
+054 * control over the process. This class 
uses {@link ZKConfig} to get settings
+055 * from the hbase-site.xml file.
+056 */
+057@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
+058@InterfaceStability.Evolving
+059public final class HQuorumPeer {
+060  private static final Logger LOG = 
LoggerFactory.getLogger(HQuorumPeer.class);
+061
+062  private HQuorumPeer() {
+063  }
+064
+065  /**
+066   * Parse ZooKeeper configuration from 
HBase XML config and run a QuorumPeer.
+067   * @param args String[] of command line 
arguments. Not used.
+068   */
+069  public static void main(String[] args) 
{
+070Configuration conf = 
HBaseConfiguration.create();
+071try {
+072  Properties zkProperties = 
ZKConfig.makeZKProps(conf);
+073  writeMyID(zkProperties);
+074  QuorumPeerConfig zkConfig = new 
QuorumPeerConfig();
+075  
zkConfig.parseProperties(zkProperties);
+076
+077  // login the zookeeper server 
principal (if using security)
+078  ZKUtil.loginServer(conf, 
HConstants.ZK_SERVER_KEYTAB_FILE,
+079
HConstants.ZK_SERVER_KERBEROS_PRINCIPAL,
+080
zkConfig.getClientPortAddress().getHostName());
+081
+082  runZKServer(zkConfig);
+083} catch (Exception e) {
+084  LOG.error("Failed to start 
ZKServer", e);
+085  System.exit(-1);
+086}
+087  }
+088
+089  private static void 
runZKServer(QuorumPeerConfig zkConfig)
+090  throws UnknownHostException, 
IOException {
+091if (zkConfig.isDistributed()) {
+092  QuorumPeerMain qp = new 
QuorumPeerMain();
+093  qp.runFromConfig(zkConfig);
+094} else {
+095  ZooKeeperServerMain zk = new 
ZooKeeperServerMain();
+096  ServerConfig serverConfig = new 
ServerConfig();
+097  serverConfig.readFrom(zkConfig);
+098  

[04/51] [partial] hbase-site git commit: Published site at 613d831429960348dc42c3bdb6ea5d31be15c81c.

2018-08-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7cf6034b/testdevapidocs/allclasses-frame.html
--
diff --git a/testdevapidocs/allclasses-frame.html 
b/testdevapidocs/allclasses-frame.html
index 1b24e50..b9a38d0 100644
--- a/testdevapidocs/allclasses-frame.html
+++ b/testdevapidocs/allclasses-frame.html
@@ -490,6 +490,7 @@
 RESTApiClusterManager.Service
 RestartActionBaseAction
 RestartActiveMasterAction
+RestartActiveNameNodeAction
 RestartMetaTest
 RestartRandomDataNodeAction
 RestartRandomRsAction

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7cf6034b/testdevapidocs/allclasses-noframe.html
--
diff --git a/testdevapidocs/allclasses-noframe.html 
b/testdevapidocs/allclasses-noframe.html
index 7b96d5d..8092c7f 100644
--- a/testdevapidocs/allclasses-noframe.html
+++ b/testdevapidocs/allclasses-noframe.html
@@ -490,6 +490,7 @@
 RESTApiClusterManager.Service
 RestartActionBaseAction
 RestartActiveMasterAction
+RestartActiveNameNodeAction
 RestartMetaTest
 RestartRandomDataNodeAction
 RestartRandomRsAction

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7cf6034b/testdevapidocs/constant-values.html
--
diff --git a/testdevapidocs/constant-values.html 
b/testdevapidocs/constant-values.html
index 92fdfe5..6491347 100644
--- a/testdevapidocs/constant-values.html
+++ b/testdevapidocs/constant-values.html
@@ -2220,6 +2220,20 @@
 "hbase.chaosmonkey.action.killmastertimeout"
 
 
+
+
+protectedstaticfinallong
+KILL_NAMENODE_TIMEOUT_DEFAULT
+6L
+
+
+
+
+publicstaticfinalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+KILL_NAMENODE_TIMEOUT_KEY
+"hbase.chaosmonkey.action.killnamenodetimeout"
+
+
 
 
 protectedstaticfinallong
@@ -2276,6 +2290,20 @@
 "hbase.chaosmonkey.action.startmastertimeout"
 
 
+
+
+protectedstaticfinallong
+START_NAMENODE_TIMEOUT_DEFAULT
+6L
+
+
+
+
+publicstaticfinalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+START_NAMENODE_TIMEOUT_KEY
+"hbase.chaosmonkey.action.startnamenodetimeout"
+
+
 
 
 protectedstaticfinallong
@@ -2327,6 +2355,39 @@
 
 
 
+org.apache.hadoop.hbase.chaos.actions.RestartActiveNameNodeAction
+
+Modifier and Type
+Constant Field
+Value
+
+
+
+
+
+privatestaticfinalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+ACTIVE_NN_LOCK_NAME
+"ActiveStandbyElectorLock"
+
+
+
+
+privatestaticfinalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+ZK_PARENT_ZNODE_DEFAULT
+"/hadoop-ha"
+
+
+
+
+privatestaticfinalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+ZK_PARENT_ZNODE_KEY
+"ha.zookeeper.parent-znode"
+
+
+
+
+
+
 org.apache.hadoop.hbase.chaos.actions.SplitAllRegionOfTableAction
 
 Modifier and Type

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7cf6034b/testdevapidocs/index-all.html
--
diff --git a/testdevapidocs/index-all.html b/testdevapidocs/index-all.html
index 8c8b1c3..41763ca 100644
--- a/testdevapidocs/index-all.html
+++ b/testdevapidocs/index-all.html
@@ -498,6 +498,8 @@
 
 activateFailure
 - Static variable in class org.apache.hadoop.hbase.regionserver.wal.InstrumentedLogWriter
 
+ACTIVE_NN_LOCK_NAME
 - Static variable in class org.apache.hadoop.hbase.chaos.actions.RestartActiveNameNodeAction
+
 activeMasterManager
 - Variable in class org.apache.hadoop.hbase.master.TestActiveMasterManager.DummyMaster
 
 ACTOR_PATTERN
 - Static variable in class org.apache.hadoop.hbase.mapred.TestTableMapReduceUtil
@@ -22257,6 +22259,10 @@
 
 KILL_MASTER_TIMEOUT_KEY
 - Static variable in class org.apache.hadoop.hbase.chaos.actions.Action
 
+KILL_NAMENODE_TIMEOUT_DEFAULT
 - Static variable in class org.apache.hadoop.hbase.chaos.actions.Action
+
+KILL_NAMENODE_TIMEOUT_KEY
 - Static variable in class org.apache.hadoop.hbase.chaos.actions.Action
+
 KILL_RS_TIMEOUT_DEFAULT
 - Static variable in class org.apache.hadoop.hbase.chaos.actions.Action
 
 KILL_RS_TIMEOUT_KEY
 - Static variable in class org.apache.hadoop.hbase.chaos.actions.Action
@@ -22319,6 +22325,19 @@
 
 killMetaRs
 - Variable in class org.apache.hadoop.hbase.chaos.factories.UnbalanceMonkeyFactory
 
+killNameNode(ServerName)
 - Method in class org.apache.hadoop.hbase.chaos.actions.Action
+
+killNameNode(ServerName)
 - Method in class org.apache.hadoop.hbase.DistributedHBaseCluster
+
+killNameNode(ServerName)
 - Method in class org.apache.hadoop.hbase.HBaseCluster
+
+Kills the namenode process if this is a distributed 

[04/51] [partial] hbase-site git commit: Published site at ba5d1c1f28301adc99019d9d6c4a04fac98ae511.

2018-07-25 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/804782f0/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.html
index dd41a30..624224f 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.html
@@ -90,768 +90,758 @@
 082extends 
AbstractStateMachineTableProcedureMergeTableRegionsState {
 083  private static final Logger LOG = 
LoggerFactory.getLogger(MergeTableRegionsProcedure.class);
 084  private Boolean traceEnabled;
-085  private volatile boolean lock = 
false;
-086  private ServerName regionLocation;
-087  private RegionInfo[] regionsToMerge;
-088  private RegionInfo mergedRegion;
-089  private boolean forcible;
-090
-091  public MergeTableRegionsProcedure() {
-092// Required by the Procedure 
framework to create the procedure on replay
-093  }
-094
-095  public MergeTableRegionsProcedure(final 
MasterProcedureEnv env,
-096  final RegionInfo regionToMergeA, 
final RegionInfo regionToMergeB) throws IOException {
-097this(env, regionToMergeA, 
regionToMergeB, false);
-098  }
-099
-100  public MergeTableRegionsProcedure(final 
MasterProcedureEnv env,
-101  final RegionInfo regionToMergeA, 
final RegionInfo regionToMergeB,
-102  final boolean forcible) throws 
IOException {
-103this(env, new RegionInfo[] 
{regionToMergeA, regionToMergeB}, forcible);
-104  }
-105
-106  public MergeTableRegionsProcedure(final 
MasterProcedureEnv env,
-107  final RegionInfo[] regionsToMerge, 
final boolean forcible)
-108  throws IOException {
-109super(env);
-110
-111// Check daughter regions and make 
sure that we have valid daughter regions
-112// before doing the real work. This 
check calls the super method #checkOnline also.
-113checkRegionsToMerge(env, 
regionsToMerge, forcible);
-114
-115// WARN: make sure there is no parent 
region of the two merging regions in
-116// hbase:meta If exists, fixing up 
daughters would cause daughter regions(we
-117// have merged one) online again when 
we restart master, so we should clear
-118// the parent region to prevent the 
above case
-119// Since HBASE-7721, we don't need 
fix up daughters any more. so here do nothing
-120this.regionsToMerge = 
regionsToMerge;
-121this.mergedRegion = 
createMergedRegionInfo(regionsToMerge);
-122preflightChecks(env, true);
-123this.forcible = forcible;
-124  }
-125
-126  private static void 
checkRegionsToMerge(MasterProcedureEnv env, final RegionInfo[] 
regionsToMerge,
-127  final boolean forcible) throws 
MergeRegionException {
-128// For now, we only merge 2 
regions.
-129// It could be extended to more than 
2 regions in the future.
-130if (regionsToMerge == null || 
regionsToMerge.length != 2) {
-131  throw new 
MergeRegionException("Expected to merge 2 regions, got: " +
-132
Arrays.toString(regionsToMerge));
-133}
-134
-135checkRegionsToMerge(env, 
regionsToMerge[0], regionsToMerge[1], forcible);
-136  }
-137
-138  /**
-139   * One time checks.
-140   */
-141  private static void 
checkRegionsToMerge(MasterProcedureEnv env, final RegionInfo regionToMergeA,
-142  final RegionInfo regionToMergeB, 
final boolean forcible) throws MergeRegionException {
-143if 
(!regionToMergeA.getTable().equals(regionToMergeB.getTable())) {
-144  throw new 
MergeRegionException("Can't merge regions from two different tables: " +
-145regionToMergeA + ", " + 
regionToMergeB);
-146}
-147
-148if (regionToMergeA.getReplicaId() != 
RegionInfo.DEFAULT_REPLICA_ID ||
-149regionToMergeB.getReplicaId() != 
RegionInfo.DEFAULT_REPLICA_ID) {
-150  throw new 
MergeRegionException("Can't merge non-default replicas");
-151}
-152
-153try {
-154  checkOnline(env, regionToMergeA);
-155  checkOnline(env, regionToMergeB);
-156} catch (DoNotRetryRegionException 
dnrre) {
-157  throw new 
MergeRegionException(dnrre);
-158}
-159
-160if 
(!RegionInfo.areAdjacent(regionToMergeA, regionToMergeB)) {
-161  String msg = "Unable to merge 
non-adjacent regions " + regionToMergeA.getShortNameToLog() +
-162  ", " + 
regionToMergeB.getShortNameToLog() + " where forcible = " + forcible;
-163  LOG.warn(msg);
-164  if (!forcible) {
-165throw new 
MergeRegionException(msg);
-166  }
-167}
-168  }
+085  private ServerName regionLocation;
+086  private RegionInfo[] regionsToMerge;
+087  private RegionInfo mergedRegion;
+088  private boolean forcible;
+089
+090  public MergeTableRegionsProcedure() {
+091// 

[04/51] [partial] hbase-site git commit: Published site at b4759ce6e72f50ccd9d410bd5917dc5a515414f1.

2018-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureDeque.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureDeque.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureDeque.html
index 7766509..1b983c2 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureDeque.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureDeque.html
@@ -39,7 +39,11 @@
 031 */
 032@InterfaceAudience.Private
 033public class ProcedureDeque extends 
ArrayDequeProcedure {
-034}
+034  public ProcedureDeque() {
+035// Default is 16 for a list that is 
rarely used; elements will resize if too small.
+036super(2);
+037  }
+038}
 
 
 



[04/51] [partial] hbase-site git commit: Published site at e66a6603e36ecd67237ca16acd5e2de03f0d372d.

2018-07-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0c6f447e/apidocs/org/apache/hadoop/hbase/class-use/HColumnDescriptor.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/class-use/HColumnDescriptor.html 
b/apidocs/org/apache/hadoop/hbase/class-use/HColumnDescriptor.html
index 03beb7e..9b34e4d 100644
--- a/apidocs/org/apache/hadoop/hbase/class-use/HColumnDescriptor.html
+++ b/apidocs/org/apache/hadoop/hbase/class-use/HColumnDescriptor.html
@@ -1,10 +1,10 @@
 http://www.w3.org/TR/html4/loose.dtd;>
 
-
+
 
 
 
-类 org.apache.hadoop.hbase.HColumnDescriptor的使用 (Apache HBase 
3.0.0-SNAPSHOT API)
+Uses of Class org.apache.hadoop.hbase.HColumnDescriptor (Apache HBase 
3.0.0-SNAPSHOT API)
 
 
 
@@ -12,7 +12,7 @@
 
 
 
-您的浏览器已禁用 JavaScript。
+JavaScript is disabled on your browser.
 
 
 
 
 
-跳过导航链接
+Skip navigation links
 
 
 
-
-概览
-程序包
-ç±»
-使用
-树
-已过时
-索引
-帮助
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
 
 
 
 
-上一个
-下一个
+Prev
+Next
 
 
-框架
-无框架
+Frames
+NoFrames
 
 
-所有类
+AllClasses
 
 
 
 
-

类的使用
org.apache.hadoop.hbase.HColumnDescriptor

+

Uses of Class
org.apache.hadoop.hbase.HColumnDescriptor

  • - - +
    使用HColumnDescriptor的程序包  
    + - - + + @@ -94,70 +94,70 @@
  • -

    org.apache.hadoop.hbase中HColumnDescriptor的使用

    -
  • Packages that use HColumnDescriptor 
    程序包说明PackageDescription
    - +

    Uses of HColumnDescriptor in org.apache.hadoop.hbase

    +
    返回HColumnDescriptor的org.apache.hadoop.hbase中的方法 
    + - - + + - +
    Methods in org.apache.hadoop.hbase that return HColumnDescriptor 
    限定符和类型方法和说明Modifier and TypeMethod and Description
    HColumnDescriptor[]

    [04/51] [partial] hbase-site git commit: Published site at e66a6603e36ecd67237ca16acd5e2de03f0d372d.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5427a45e/apidocs/org/apache/hadoop/hbase/class-use/HBaseIOException.html
    --
    diff --git a/apidocs/org/apache/hadoop/hbase/class-use/HBaseIOException.html 
    b/apidocs/org/apache/hadoop/hbase/class-use/HBaseIOException.html
    index 3a6e532..c8387a5 100644
    --- a/apidocs/org/apache/hadoop/hbase/class-use/HBaseIOException.html
    +++ b/apidocs/org/apache/hadoop/hbase/class-use/HBaseIOException.html
    @@ -1,10 +1,10 @@
     http://www.w3.org/TR/html4/loose.dtd;>
     
    -
    +
     
     
     
    -Uses of Class org.apache.hadoop.hbase.HBaseIOException (Apache HBase 
    3.0.0-SNAPSHOT API)
    +类 org.apache.hadoop.hbase.HBaseIOException的使用 (Apache HBase 
    3.0.0-SNAPSHOT API)
     
     
     
    @@ -12,7 +12,7 @@
     
     
     
    -JavaScript is disabled on your browser.
    +您的浏览器已禁用 JavaScript。
     
     
     
     
     
    -Skip navigation links
    +跳过导航链接
     
     
     
    -
    -Overview
    -Package
    -Class
    -Use
    -Tree
    -Deprecated
    -Index
    -Help
    +
    +概览
    +程序包
    +ç±»
    +使用
    +树
    +已过时
    +索引
    +帮助
     
     
     
     
    -Prev
    -Next
    +上一个
    +下一个
     
     
    -Frames
    -NoFrames
    +框架
    +无框架
     
     
    -AllClasses
    +所有类
     
     
     
     
    -

    Uses of Class
    org.apache.hadoop.hbase.HBaseIOException

    +

    类的使用
    org.apache.hadoop.hbase.HBaseIOException

    • - - +
      Packages that use HBaseIOException 
      + - - + + @@ -89,13 +89,33 @@ @@ -136,54 +156,54 @@
    • -

      Uses of HBaseIOException in

      [04/51] [partial] hbase-site git commit: Published site at 0f23784182ab88649de340d75804e0ff20dcd0fc.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bcb555af/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/LruBlockCache.EvictionThread.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/LruBlockCache.EvictionThread.html
       
      b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/LruBlockCache.EvictionThread.html
      index 67f4551..017124c 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/LruBlockCache.EvictionThread.html
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/LruBlockCache.EvictionThread.html
      @@ -387,817 +387,804 @@
       379}
       380
       381LruCachedBlock cb = 
      map.get(cacheKey);
      -382if (cb != null) {
      -383  int comparison = 
      BlockCacheUtil.validateBlockAddition(cb.getBuffer(), buf, cacheKey);
      -384  if (comparison != 0) {
      -385if (comparison  0) {
      -386  LOG.warn("Cached block contents 
      differ by nextBlockOnDiskSize. Keeping cached block.");
      -387  return;
      -388} else {
      -389  LOG.warn("Cached block contents 
      differ by nextBlockOnDiskSize. Caching new block.");
      -390}
      -391  } else {
      -392String msg = "Cached an already 
      cached block: " + cacheKey + " cb:" + cb.getCacheKey();
      -393msg += ". This is harmless and 
      can happen in rare cases (see HBASE-8547)";
      -394LOG.debug(msg);
      -395return;
      -396  }
      -397}
      -398long currentSize = size.get();
      -399long currentAcceptableSize = 
      acceptableSize();
      -400long hardLimitSize = (long) 
      (hardCapacityLimitFactor * currentAcceptableSize);
      -401if (currentSize = hardLimitSize) 
      {
      -402  stats.failInsert();
      -403  if (LOG.isTraceEnabled()) {
      -404LOG.trace("LruBlockCache current 
      size " + StringUtils.byteDesc(currentSize)
      -405  + " has exceeded acceptable 
      size " + StringUtils.byteDesc(currentAcceptableSize) + "."
      -406  + " The hard limit size is " + 
      StringUtils.byteDesc(hardLimitSize)
      -407  + ", failed to put cacheKey:" + 
      cacheKey + " into LruBlockCache.");
      -408  }
      -409  if (!evictionInProgress) {
      -410runEviction();
      -411  }
      -412  return;
      -413}
      -414cb = new LruCachedBlock(cacheKey, 
      buf, count.incrementAndGet(), inMemory);
      -415long newSize = updateSizeMetrics(cb, 
      false);
      -416map.put(cacheKey, cb);
      -417long val = 
      elements.incrementAndGet();
      -418if (buf.getBlockType().isData()) {
      -419   dataBlockElements.increment();
      -420}
      -421if (LOG.isTraceEnabled()) {
      -422  long size = map.size();
      -423  assertCounterSanity(size, val);
      -424}
      -425if (newSize  
      currentAcceptableSize  !evictionInProgress) {
      -426  runEviction();
      -427}
      -428  }
      -429
      -430  /**
      -431   * Sanity-checking for parity between 
      actual block cache content and metrics.
      -432   * Intended only for use with TRACE 
      level logging and -ea JVM.
      -433   */
      -434  private static void 
      assertCounterSanity(long mapSize, long counterVal) {
      -435if (counterVal  0) {
      -436  LOG.trace("counterVal overflow. 
      Assertions unreliable. counterVal=" + counterVal +
      -437", mapSize=" + mapSize);
      -438  return;
      -439}
      -440if (mapSize  Integer.MAX_VALUE) 
      {
      -441  double pct_diff = 
      Math.absdouble) counterVal) / ((double) mapSize)) - 1.);
      -442  if (pct_diff  0.05) {
      -443LOG.trace("delta between reported 
      and actual size  5%. counterVal=" + counterVal +
      -444  ", mapSize=" + mapSize);
      -445  }
      -446}
      -447  }
      -448
      -449  /**
      -450   * Cache the block with the specified 
      name and buffer.
      -451   * p
      -452   *
      -453   * @param cacheKey block's cache key
      -454   * @param buf  block buffer
      -455   */
      -456  @Override
      -457  public void cacheBlock(BlockCacheKey 
      cacheKey, Cacheable buf) {
      -458cacheBlock(cacheKey, buf, false);
      -459  }
      -460
      -461  /**
      -462   * Helper function that updates the 
      local size counter and also updates any
      -463   * per-cf or per-blocktype metrics it 
      can discern from given
      -464   * {@link LruCachedBlock}
      -465   */
      -466  private long 
      updateSizeMetrics(LruCachedBlock cb, boolean evict) {
      -467long heapsize = cb.heapSize();
      -468BlockType bt = 
      cb.getBuffer().getBlockType();
      -469if (evict) {
      -470  heapsize *= -1;
      -471}
      -472if (bt != null  
      bt.isData()) {
      -473   dataBlockSize.add(heapsize);
      -474}
      -475return size.addAndGet(heapsize);
      -476  }
      -477
      -478  /**
      -479   * Get the buffer of the block with the 
      specified name.
      -480   *
      -481   * @param cacheKey   block's 
      cache key
      -482   * @param cachingtrue if 
      the caller caches blocks on cache misses
      -483   * @param repeat Whether 
      this is a repeat lookup for the same block
      -484   *   (used to 
      avoid double counting cache misses when doing double-check
      -485   *   locking)
      -486   * @param updateCacheMetrics Whether to 
      update cache metrics or not
      -487   *
      -488   * @return 

      [04/51] [partial] hbase-site git commit: Published site at 85b41f36e01214b6485c9352875c84ebf877dab3.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a5c66de0/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ReplicationFuture.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ReplicationFuture.html
       
      b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ReplicationFuture.html
      index c10cfbf..a3e2f4a 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ReplicationFuture.html
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ReplicationFuture.html
      @@ -3371,7 +3371,7 @@
       3363private V result = null;
       3364
       3365private final HBaseAdmin admin;
      -3366private final Long procId;
      +3366protected final Long procId;
       3367
       3368public ProcedureFuture(final 
      HBaseAdmin admin, final Long procId) {
       3369  this.admin = admin;
      @@ -3653,653 +3653,651 @@
       3645 * @return a description of the 
      operation
       3646 */
       3647protected String getDescription() 
      {
      -3648  return "Operation: " + 
      getOperationType() + ", "
      -3649  + "Table Name: " + 
      tableName.getNameWithNamespaceInclAsString();
      -3650
      -3651}
      -3652
      -3653protected abstract class 
      TableWaitForStateCallable implements WaitForStateCallable {
      -3654  @Override
      -3655  public void 
      throwInterruptedException() throws InterruptedIOException {
      -3656throw new 
      InterruptedIOException("Interrupted while waiting for operation: "
      -3657+ getOperationType() + " on 
      table: " + tableName.getNameWithNamespaceInclAsString());
      -3658  }
      -3659
      -3660  @Override
      -3661  public void 
      throwTimeoutException(long elapsedTime) throws TimeoutException {
      -3662throw new TimeoutException("The 
      operation: " + getOperationType() + " on table: " +
      -3663tableName.getNameAsString() 
      + " has not completed after " + elapsedTime + "ms");
      -3664  }
      -3665}
      -3666
      -3667@Override
      -3668protected V 
      postOperationResult(final V result, final long deadlineTs)
      -3669throws IOException, 
      TimeoutException {
      -3670  LOG.info(getDescription() + " 
      completed");
      -3671  return 
      super.postOperationResult(result, deadlineTs);
      -3672}
      -3673
      -3674@Override
      -3675protected V 
      postOperationFailure(final IOException exception, final long deadlineTs)
      -3676throws IOException, 
      TimeoutException {
      -3677  LOG.info(getDescription() + " 
      failed with " + exception.getMessage());
      -3678  return 
      super.postOperationFailure(exception, deadlineTs);
      -3679}
      -3680
      -3681protected void 
      waitForTableEnabled(final long deadlineTs)
      -3682throws IOException, 
      TimeoutException {
      -3683  waitForState(deadlineTs, new 
      TableWaitForStateCallable() {
      -3684@Override
      -3685public boolean checkState(int 
      tries) throws IOException {
      -3686  try {
      -3687if 
      (getAdmin().isTableAvailable(tableName)) {
      -3688  return true;
      -3689}
      -3690  } catch 
      (TableNotFoundException tnfe) {
      -3691LOG.debug("Table " + 
      tableName.getNameWithNamespaceInclAsString()
      -3692+ " was not enabled, 
      sleeping. tries=" + tries);
      -3693  }
      -3694  return false;
      -3695}
      -3696  });
      -3697}
      -3698
      -3699protected void 
      waitForTableDisabled(final long deadlineTs)
      -3700throws IOException, 
      TimeoutException {
      -3701  waitForState(deadlineTs, new 
      TableWaitForStateCallable() {
      -3702@Override
      -3703public boolean checkState(int 
      tries) throws IOException {
      -3704  return 
      getAdmin().isTableDisabled(tableName);
      -3705}
      -3706  });
      -3707}
      -3708
      -3709protected void 
      waitTableNotFound(final long deadlineTs)
      -3710throws IOException, 
      TimeoutException {
      -3711  waitForState(deadlineTs, new 
      TableWaitForStateCallable() {
      -3712@Override
      -3713public boolean checkState(int 
      tries) throws IOException {
      -3714  return 
      !getAdmin().tableExists(tableName);
      -3715}
      -3716  });
      -3717}
      -3718
      -3719protected void 
      waitForSchemaUpdate(final long deadlineTs)
      -3720throws IOException, 
      TimeoutException {
      -3721  waitForState(deadlineTs, new 
      TableWaitForStateCallable() {
      -3722@Override
      -3723public boolean checkState(int 
      tries) throws IOException {
      -3724  return 
      getAdmin().getAlterStatus(tableName).getFirst() == 0;
      -3725}
      -3726  });
      -3727}
      -3728
      -3729protected void 
      waitForAllRegionsOnline(final long deadlineTs, final byte[][] splitKeys)
      -3730throws IOException, 
      TimeoutException {
      -3731  final TableDescriptor desc = 
      getTableDescriptor();
      -3732  final AtomicInteger actualRegCount 
      = new AtomicInteger(0);
      -3733  final MetaTableAccessor.Visitor 
      visitor = new MetaTableAccessor.Visitor() {
      -3734@Override
      -3735public boolean visit(Result 
      rowResult) throws IOException {
      -3736  

      [04/51] [partial] hbase-site git commit: Published site at 6198e1fc7dfa85c3bc6b2855f9a5fb5f4b2354ff.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/eb5d2c62/devapidocs/org/apache/hadoop/hbase/master/replication/RemovePeerProcedure.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/master/replication/RemovePeerProcedure.html
       
      b/devapidocs/org/apache/hadoop/hbase/master/replication/RemovePeerProcedure.html
      index 3c3b9b1..b7f19fe 100644
      --- 
      a/devapidocs/org/apache/hadoop/hbase/master/replication/RemovePeerProcedure.html
      +++ 
      b/devapidocs/org/apache/hadoop/hbase/master/replication/RemovePeerProcedure.html
      @@ -18,7 +18,7 @@
       catch(err) {
       }
       //-->
      -var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10};
      +var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10};
       var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
      Methods"],8:["t4","Concrete Methods"]};
       var altColor = "altColor";
       var rowColor = "rowColor";
      @@ -272,13 +272,17 @@ extends 
      +private void
      +removeRemoteWALs(MasterProcedureEnvenv)
      +
      +
       protected void
       serializeStateData(ProcedureStateSerializerserializer)
       The user-level code of the procedure may have some state to
        persist (e.g.
       
       
      -
      +
       protected void
       updatePeerStorage(MasterProcedureEnvenv)
       
      @@ -288,14 +292,14 @@ extends ModifyPeerProcedure
      -enablePeerBeforeFinish,
       executeFromState,
       getInitialState,
       getNewPeerConfig,
       getOldPeerConfig,
       getState,
       getStateId,
       nextStateAfterRefresh,
       rollbackState,
       setLastPushedSequenceId,
       setLastPushedSequenceIdForTable,
       updateLastPushedSequenceIdForSerialPeer
      +enablePeerBeforeFinish,
       executeFromState,
       getInitialState,
       getNewPeerConfig,
       getOldPeerConfig,
       getState,
       getStateId,
       nextStateAfterRefresh,
       setLastPushedSequenceId,
       setLastPushedSequenceIdForTable,
       updateLastPushedSequenceIdForSerialPeer
       
       
       
       
       
       Methods inherited from 
      classorg.apache.hadoop.hbase.master.replication.AbstractPeerProcedure
      -acquireLock,
       getLatch,
       getPeerId,
       hasLock,
       holdLock,
       releaseLock
       
      +acquireLock,
       getLatch,
       getPeerId,
       hasLock,
       holdLock,
       refreshPeer, 
      releaseLock,
       rollbackState
       
       
       
      @@ -431,13 +435,27 @@ extends 
      +
      +
      +
      +
      +removeRemoteWALs
      +privatevoidremoveRemoteWALs(MasterProcedureEnvenv)
      +   throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
       title="class or interface in java.io">IOException
      +
      +Throws:
      +https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
       title="class or interface in java.io">IOException
      +
      +
      +
       
       
       
       
       
       postPeerModification
      -protectedvoidpostPeerModification(MasterProcedureEnvenv)
      +protectedvoidpostPeerModification(MasterProcedureEnvenv)
        throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
       title="class or interface in java.io">IOException,
       ReplicationException
       Description copied from 
      class:ModifyPeerProcedure
      @@ -463,7 +481,7 @@ extends 
       
       serializeStateData
      -protectedvoidserializeStateData(ProcedureStateSerializerserializer)
      +protectedvoidserializeStateData(ProcedureStateSerializerserializer)
      throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
       title="class or interface in java.io">IOException
       Description copied from 
      class:Procedure
       The user-level code of the procedure may have some state to
      @@ -485,7 +503,7 @@ extends 
       
       deserializeStateData
      -protectedvoiddeserializeStateData(ProcedureStateSerializerserializer)
      +protectedvoiddeserializeStateData(ProcedureStateSerializerserializer)
        throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
       title="class or interface in java.io">IOException
       Description copied from 
      class:Procedure
       Called on store load to allow the user to decode the 
      previously serialized
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/eb5d2c62/devapidocs/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.html
       
      b/devapidocs/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.html
      index 4807474..71aa91b 100644
      --- 
      a/devapidocs/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.html
      +++ 
      b/devapidocs/org/apache/hadoop/hbase/master/replication/ReplicationPeerManager.html
      @@ -18,7 +18,7 @@
       catch(err) {
       }
       //-->
      -var methods = 
      {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":9,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10};
      +var methods = 
      

      [04/51] [partial] hbase-site git commit: Published site at 14087cc919da9f2e0b1a68f701f6365ad9d1d71f.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/55ce8d97/devapidocs/src-html/org/apache/hadoop/hbase/RegionLoad.html
      --
      diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/RegionLoad.html 
      b/devapidocs/src-html/org/apache/hadoop/hbase/RegionLoad.html
      index b5f6437..49c2b92 100644
      --- a/devapidocs/src-html/org/apache/hadoop/hbase/RegionLoad.html
      +++ b/devapidocs/src-html/org/apache/hadoop/hbase/RegionLoad.html
      @@ -102,318 +102,323 @@
       094  }
       095
       096  @Override
      -097  public long 
      getFilteredReadRequestCount() {
      -098return 
      metrics.getFilteredReadRequestCount();
      +097  public long getCpRequestCount() {
      +098return metrics.getCpRequestCount();
       099  }
       100
       101  @Override
      -102  public Size getStoreFileIndexSize() {
      -103return 
      metrics.getStoreFileIndexSize();
      +102  public long 
      getFilteredReadRequestCount() {
      +103return 
      metrics.getFilteredReadRequestCount();
       104  }
       105
       106  @Override
      -107  public long getWriteRequestCount() {
      -108return 
      metrics.getWriteRequestCount();
      +107  public Size getStoreFileIndexSize() {
      +108return 
      metrics.getStoreFileIndexSize();
       109  }
       110
       111  @Override
      -112  public Size 
      getStoreFileRootLevelIndexSize() {
      -113return 
      metrics.getStoreFileRootLevelIndexSize();
      +112  public long getWriteRequestCount() {
      +113return 
      metrics.getWriteRequestCount();
       114  }
       115
       116  @Override
      -117  public Size 
      getStoreFileUncompressedDataIndexSize() {
      -118return 
      metrics.getStoreFileUncompressedDataIndexSize();
      +117  public Size 
      getStoreFileRootLevelIndexSize() {
      +118return 
      metrics.getStoreFileRootLevelIndexSize();
       119  }
       120
       121  @Override
      -122  public Size getBloomFilterSize() {
      -123return 
      metrics.getBloomFilterSize();
      +122  public Size 
      getStoreFileUncompressedDataIndexSize() {
      +123return 
      metrics.getStoreFileUncompressedDataIndexSize();
       124  }
       125
       126  @Override
      -127  public long getCompactingCellCount() 
      {
      -128return 
      metrics.getCompactingCellCount();
      +127  public Size getBloomFilterSize() {
      +128return 
      metrics.getBloomFilterSize();
       129  }
       130
       131  @Override
      -132  public long getCompactedCellCount() {
      -133return 
      metrics.getCompactedCellCount();
      +132  public long getCompactingCellCount() 
      {
      +133return 
      metrics.getCompactingCellCount();
       134  }
       135
       136  @Override
      -137  public long getCompletedSequenceId() 
      {
      -138return 
      metrics.getCompletedSequenceId();
      +137  public long getCompactedCellCount() {
      +138return 
      metrics.getCompactedCellCount();
       139  }
       140
       141  @Override
      -142  public Mapbyte[], Long 
      getStoreSequenceId() {
      -143return 
      metrics.getStoreSequenceId();
      +142  public long getCompletedSequenceId() 
      {
      +143return 
      metrics.getCompletedSequenceId();
       144  }
       145
       146  @Override
      -147  public Size 
      getUncompressedStoreFileSize() {
      -148return 
      metrics.getUncompressedStoreFileSize();
      +147  public Mapbyte[], Long 
      getStoreSequenceId() {
      +148return 
      metrics.getStoreSequenceId();
       149  }
       150
      -151  /**
      -152   * @return the number of stores
      -153   * @deprecated As of release 2.0.0, 
      this will be removed in HBase 3.0.0
      -154   * Use {@link 
      #getStoreCount} instead.
      -155   */
      -156  @Deprecated
      -157  public int getStores() {
      -158return metrics.getStoreCount();
      -159  }
      -160
      -161  /**
      -162   * @return the number of storefiles
      -163   * @deprecated As of release 2.0.0, 
      this will be removed in HBase 3.0.0
      -164   * Use {@link 
      #getStoreFileCount} instead.
      -165   */
      -166  @Deprecated
      -167  public int getStorefiles() {
      -168return metrics.getStoreFileCount();
      -169  }
      -170
      -171  /**
      -172   * @return the total size of the 
      storefiles, in MB
      -173   * @deprecated As of release 2.0.0, 
      this will be removed in HBase 3.0.0
      -174   * Use {@link 
      #getStoreFileSize} instead.
      -175   */
      -176  @Deprecated
      -177  public int getStorefileSizeMB() {
      -178return (int) 
      metrics.getStoreFileSize().get(Size.Unit.MEGABYTE);
      -179  }
      -180
      -181  /**
      -182   * @return the memstore size, in MB
      -183   * @deprecated As of release 2.0.0, 
      this will be removed in HBase 3.0.0
      -184   * Use {@link 
      #getMemStoreSize} instead.
      -185   */
      -186  @Deprecated
      -187  public int getMemStoreSizeMB() {
      -188return (int) 
      metrics.getMemStoreSize().get(Size.Unit.MEGABYTE);
      -189  }
      -190
      -191  /**
      -192   * @deprecated As of release 2.0.0, 
      this will be removed in HBase 3.0.0
      -193   * ((a 
      href="https://issues.apache.org/jira/browse/HBASE-3935"HBASE-3935/a;)).
      -194   * Use {@link 
      #getStoreFileRootLevelIndexSize} instead.
      -195   */
      -196  @Deprecated
      -197  public int getStorefileIndexSizeMB() 
      {
      -198// Return value divided by 1024
      -199return (getRootIndexSizeKB()  
      10);
      -200  }
      -201
      -202  /**
      -203   * @deprecated As of release 2.0.0, 
      this will be removed in HBase 3.0.0
      -204   * Use {@link 
      #getStoreFileRootLevelIndexSize()} instead.
      -205   */
      -206  @Deprecated
      -207  public 

      [04/51] [partial] hbase-site git commit: Published site at 72784c2d836a4b977667449d3adec5e8d15453f5.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2b11656f/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionFailedOpen.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionFailedOpen.html
       
      b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionFailedOpen.html
      index 541beed..1100e95 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionFailedOpen.html
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionFailedOpen.html
      @@ -42,1015 +42,1038 @@
       034import 
      java.util.concurrent.ConcurrentHashMap;
       035import 
      java.util.concurrent.ConcurrentSkipListMap;
       036import 
      java.util.concurrent.atomic.AtomicInteger;
      -037
      -038import 
      org.apache.hadoop.hbase.HConstants;
      -039import 
      org.apache.hadoop.hbase.ServerName;
      -040import 
      org.apache.hadoop.hbase.TableName;
      -041import 
      org.apache.hadoop.hbase.client.RegionInfo;
      -042import 
      org.apache.hadoop.hbase.exceptions.UnexpectedStateException;
      -043import 
      org.apache.hadoop.hbase.master.RegionState;
      -044import 
      org.apache.hadoop.hbase.master.RegionState.State;
      -045import 
      org.apache.hadoop.hbase.procedure2.ProcedureEvent;
      -046import 
      org.apache.hadoop.hbase.util.Bytes;
      -047import 
      org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
      -048import 
      org.apache.yetus.audience.InterfaceAudience;
      -049import org.slf4j.Logger;
      -050import org.slf4j.LoggerFactory;
      -051import 
      org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
      -052
      -053/**
      -054 * RegionStates contains a set of Maps 
      that describes the in-memory state of the AM, with
      -055 * the regions available in the system, 
      the region in transition, the offline regions and
      -056 * the servers holding regions.
      -057 */
      -058@InterfaceAudience.Private
      -059public class RegionStates {
      -060  private static final Logger LOG = 
      LoggerFactory.getLogger(RegionStates.class);
      -061
      -062  protected static final State[] 
      STATES_EXPECTED_ON_OPEN = new State[] {
      -063State.OPEN, // State may already be 
      OPEN if we died after receiving the OPEN from regionserver
      -064// but before complete 
      finish of AssignProcedure. HBASE-20100.
      -065State.OFFLINE, State.CLOSED,  // 
      disable/offline
      -066State.SPLITTING, State.SPLIT, // 
      ServerCrashProcedure
      -067State.OPENING, State.FAILED_OPEN, // 
      already in-progress (retrying)
      -068  };
      -069
      -070  protected static final State[] 
      STATES_EXPECTED_ON_CLOSE = new State[] {
      -071State.SPLITTING, State.SPLIT, 
      State.MERGING, // ServerCrashProcedure
      -072State.OPEN,   // 
      enabled/open
      -073State.CLOSING // 
      already in-progress (retrying)
      -074  };
      -075
      -076  private static class 
      AssignmentProcedureEvent extends ProcedureEventRegionInfo {
      -077public AssignmentProcedureEvent(final 
      RegionInfo regionInfo) {
      -078  super(regionInfo);
      -079}
      -080  }
      -081
      -082  private static class ServerReportEvent 
      extends ProcedureEventServerName {
      -083public ServerReportEvent(final 
      ServerName serverName) {
      -084  super(serverName);
      -085}
      -086  }
      -087
      -088  /**
      -089   * Current Region State.
      -090   * In-memory only. Not persisted.
      -091   */
      -092  // Mutable/Immutable? Changes have to 
      be synchronized or not?
      -093  // Data members are volatile which 
      seems to say multi-threaded access is fine.
      -094  // In the below we do check and set but 
      the check state could change before
      -095  // we do the set because no 
      synchronizationwhich seems dodgy. Clear up
      -096  // understanding here... how many 
      threads accessing? Do locks make it so one
      -097  // thread at a time working on a single 
      Region's RegionStateNode? Lets presume
      -098  // so for now. Odd is that elsewhere in 
      this RegionStates, we synchronize on
      -099  // the RegionStateNode instance. 
      TODO.
      -100  public static class RegionStateNode 
      implements ComparableRegionStateNode {
      -101private final RegionInfo 
      regionInfo;
      -102private final ProcedureEvent? 
      event;
      -103
      -104private volatile 
      RegionTransitionProcedure procedure = null;
      -105private volatile ServerName 
      regionLocation = null;
      -106private volatile ServerName lastHost 
      = null;
      -107/**
      -108 * A Region-in-Transition (RIT) moves 
      through states.
      -109 * See {@link State} for complete 
      list. A Region that
      -110 * is opened moves from OFFLINE = 
      OPENING = OPENED.
      -111 */
      -112private volatile State state = 
      State.OFFLINE;
      -113
      -114/**
      -115 * Updated whenever a call to {@link 
      #setRegionLocation(ServerName)}
      -116 * or {@link #setState(State, 
      State...)}.
      -117 */
      -118private volatile long lastUpdate = 
      0;
      -119
      -120private volatile long openSeqNum = 
      HConstants.NO_SEQNUM;
      -121
      -122public RegionStateNode(final 
      RegionInfo regionInfo) {
      -123  this.regionInfo = regionInfo;
      -124  this.event = new 
      

      [04/51] [partial] hbase-site git commit: Published site at 9101fc246f86445006bfbcdfda5cc495016dc280.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/65565d77/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionStat.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionStat.html
       
      b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionStat.html
      index 4b5d00c..96ecbf8 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionStat.html
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionStat.html
      @@ -6,7 +6,7 @@
       
       
       
      -001/*
      +001/**
       002 * Licensed to the Apache Software 
      Foundation (ASF) under one
       003 * or more contributor license 
      agreements.  See the NOTICE file
       004 * distributed with this work for 
      additional information
      @@ -23,1981 +23,1894 @@
       015 * See the License for the specific 
      language governing permissions and
       016 * limitations under the License.
       017 */
      -018
      -019package 
      org.apache.hadoop.hbase.master.assignment;
      -020
      -021import java.io.IOException;
      -022import java.util.ArrayList;
      -023import java.util.Arrays;
      -024import java.util.Collection;
      -025import java.util.Collections;
      -026import java.util.HashMap;
      -027import java.util.HashSet;
      -028import java.util.List;
      -029import java.util.Map;
      -030import java.util.Set;
      -031import 
      java.util.concurrent.CopyOnWriteArrayList;
      -032import java.util.concurrent.Future;
      -033import java.util.concurrent.TimeUnit;
      -034import 
      java.util.concurrent.atomic.AtomicBoolean;
      -035import 
      java.util.concurrent.locks.Condition;
      -036import 
      java.util.concurrent.locks.ReentrantLock;
      -037import java.util.stream.Collectors;
      -038import 
      org.apache.hadoop.conf.Configuration;
      -039import 
      org.apache.hadoop.hbase.HBaseIOException;
      -040import 
      org.apache.hadoop.hbase.HConstants;
      -041import 
      org.apache.hadoop.hbase.PleaseHoldException;
      -042import 
      org.apache.hadoop.hbase.RegionException;
      -043import 
      org.apache.hadoop.hbase.RegionStateListener;
      -044import 
      org.apache.hadoop.hbase.ServerName;
      -045import 
      org.apache.hadoop.hbase.TableName;
      -046import 
      org.apache.hadoop.hbase.YouAreDeadException;
      -047import 
      org.apache.hadoop.hbase.client.RegionInfo;
      -048import 
      org.apache.hadoop.hbase.client.RegionInfoBuilder;
      -049import 
      org.apache.hadoop.hbase.client.RegionReplicaUtil;
      -050import 
      org.apache.hadoop.hbase.client.Result;
      -051import 
      org.apache.hadoop.hbase.client.TableState;
      -052import 
      org.apache.hadoop.hbase.exceptions.UnexpectedStateException;
      -053import 
      org.apache.hadoop.hbase.favored.FavoredNodesManager;
      -054import 
      org.apache.hadoop.hbase.favored.FavoredNodesPromoter;
      -055import 
      org.apache.hadoop.hbase.master.AssignmentListener;
      -056import 
      org.apache.hadoop.hbase.master.LoadBalancer;
      -057import 
      org.apache.hadoop.hbase.master.MasterServices;
      -058import 
      org.apache.hadoop.hbase.master.MetricsAssignmentManager;
      -059import 
      org.apache.hadoop.hbase.master.NoSuchProcedureException;
      -060import 
      org.apache.hadoop.hbase.master.RegionPlan;
      -061import 
      org.apache.hadoop.hbase.master.RegionState;
      -062import 
      org.apache.hadoop.hbase.master.RegionState.State;
      -063import 
      org.apache.hadoop.hbase.master.ServerListener;
      -064import 
      org.apache.hadoop.hbase.master.TableStateManager;
      -065import 
      org.apache.hadoop.hbase.master.assignment.RegionStates.RegionStateNode;
      -066import 
      org.apache.hadoop.hbase.master.assignment.RegionStates.ServerState;
      -067import 
      org.apache.hadoop.hbase.master.assignment.RegionStates.ServerStateNode;
      -068import 
      org.apache.hadoop.hbase.master.balancer.FavoredStochasticBalancer;
      -069import 
      org.apache.hadoop.hbase.master.normalizer.RegionNormalizer;
      -070import 
      org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
      -071import 
      org.apache.hadoop.hbase.master.procedure.MasterProcedureScheduler;
      -072import 
      org.apache.hadoop.hbase.master.procedure.ProcedureSyncWait;
      -073import 
      org.apache.hadoop.hbase.master.procedure.ServerCrashException;
      -074import 
      org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure;
      -075import 
      org.apache.hadoop.hbase.procedure2.Procedure;
      -076import 
      org.apache.hadoop.hbase.procedure2.ProcedureEvent;
      -077import 
      org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
      -078import 
      org.apache.hadoop.hbase.procedure2.ProcedureInMemoryChore;
      -079import 
      org.apache.hadoop.hbase.procedure2.util.StringUtils;
      -080import 
      org.apache.hadoop.hbase.regionserver.SequenceId;
      -081import 
      org.apache.hadoop.hbase.util.Bytes;
      -082import 
      org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
      -083import 
      org.apache.hadoop.hbase.util.HasThread;
      -084import 
      org.apache.hadoop.hbase.util.Pair;
      -085import 
      org.apache.hadoop.hbase.util.Threads;
      -086import 
      org.apache.hadoop.hbase.util.VersionInfo;
      -087import 
      org.apache.yetus.audience.InterfaceAudience;
      -088import org.slf4j.Logger;
      -089import org.slf4j.LoggerFactory;
      -090
      

      [04/51] [partial] hbase-site git commit: Published site at 0b28155d274910b4e667b949d51f78809a1eff0b.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e11cf2cb/devapidocs/src-html/org/apache/hadoop/hbase/wal/WAL.Reader.html
      --
      diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WAL.Reader.html 
      b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WAL.Reader.html
      index e31f5c6..f4d1eb0 100644
      --- a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WAL.Reader.html
      +++ b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WAL.Reader.html
      @@ -31,277 +31,266 @@
       023import java.util.Set;
       024import 
      org.apache.hadoop.hbase.HConstants;
       025import 
      org.apache.hadoop.hbase.client.RegionInfo;
      -026import 
      org.apache.hadoop.hbase.regionserver.wal.CompressionContext;
      -027import 
      org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
      -028import 
      org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
      -029import 
      org.apache.hadoop.hbase.regionserver.wal.WALCoprocessorHost;
      -030import 
      org.apache.hadoop.hbase.replication.regionserver.WALFileLengthProvider;
      -031import 
      org.apache.yetus.audience.InterfaceAudience;
      -032import 
      org.apache.yetus.audience.InterfaceStability;
      -033
      -034import 
      org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
      -035
      -036/**
      -037 * A Write Ahead Log (WAL) provides 
      service for reading, writing waledits. This interface provides
      -038 * APIs for WAL users (such as 
      RegionServer) to use the WAL (do append, sync, etc).
      -039 *
      -040 * Note that some internals, such as log 
      rolling and performance evaluation tools, will use
      -041 * WAL.equals to determine if they have 
      already seen a given WAL.
      -042 */
      -043@InterfaceAudience.Private
      -044@InterfaceStability.Evolving
      -045public interface WAL extends Closeable, 
      WALFileLengthProvider {
      -046
      -047  /**
      -048   * Registers WALActionsListener
      -049   */
      -050  void registerWALActionsListener(final 
      WALActionsListener listener);
      -051
      -052  /**
      -053   * Unregisters WALActionsListener
      -054   */
      -055  boolean 
      unregisterWALActionsListener(final WALActionsListener listener);
      -056
      -057  /**
      -058   * Roll the log writer. That is, start 
      writing log messages to a new file.
      -059   *
      -060   * p
      -061   * The implementation is synchronized 
      in order to make sure there's one rollWriter
      -062   * running at any given time.
      -063   *
      -064   * @return If lots of logs, flush the 
      returned regions so next time through we
      -065   * can clean logs. Returns null 
      if nothing to flush. Names are actual
      -066   * region names as returned by 
      {@link RegionInfo#getEncodedName()}
      -067   */
      -068  byte[][] rollWriter() throws 
      FailedLogCloseException, IOException;
      -069
      -070  /**
      -071   * Roll the log writer. That is, start 
      writing log messages to a new file.
      -072   *
      -073   * p
      -074   * The implementation is synchronized 
      in order to make sure there's one rollWriter
      -075   * running at any given time.
      -076   *
      -077   * @param force
      -078   *  If true, force creation of 
      a new writer even if no entries have
      -079   *  been written to the current 
      writer
      -080   * @return If lots of logs, flush the 
      returned regions so next time through we
      -081   * can clean logs. Returns null 
      if nothing to flush. Names are actual
      -082   * region names as returned by 
      {@link RegionInfo#getEncodedName()}
      -083   */
      -084  byte[][] rollWriter(boolean force) 
      throws FailedLogCloseException, IOException;
      -085
      -086  /**
      -087   * Stop accepting new writes. If we 
      have unsynced writes still in buffer, sync them.
      -088   * Extant edits are left in place in 
      backing storage to be replayed later.
      -089   */
      -090  void shutdown() throws IOException;
      -091
      -092  /**
      -093   * Caller no longer needs any edits 
      from this WAL. Implementers are free to reclaim
      -094   * underlying resources after this 
      call; i.e. filesystem based WALs can archive or
      -095   * delete files.
      -096   */
      -097  @Override
      -098  void close() throws IOException;
      -099
      -100  /**
      -101   * Append a set of edits to the WAL. 
      The WAL is not flushed/sync'd after this transaction
      -102   * completes BUT on return this edit 
      must have its region edit/sequence id assigned
      -103   * else it messes up our unification of 
      mvcc and sequenceid.  On return codekey/code will
      -104   * have the region edit/sequence id 
      filled in.
      -105   * @param info the regioninfo 
      associated with append
      -106   * @param key Modified by this call; we 
      add to it this edits region edit/sequence id.
      -107   * @param edits Edits to append. MAY 
      CONTAIN NO EDITS for case where we want to get an edit
      -108   * sequence id that is after all 
      currently appended edits.
      -109   * @param inMemstore Always true except 
      for case where we are writing a compaction completion
      -110   * record into the WAL; in this case 
      the entry is just so we can finish an unfinished compaction
      -111   * -- it is not an edit for memstore.
      -112   * @return Returns a 'transaction id' 
      and codekey/code will have the region edit/sequence id
      -113   * in it.
      -114   */
      -115  long 

      [04/51] [partial] hbase-site git commit: Published site at 7d3750bd9fc9747623549c242cc4171e224b3eaf.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.RemoteProcedure.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.RemoteProcedure.html
       
      b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.RemoteProcedure.html
      index 594ef24..17d5c40 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.RemoteProcedure.html
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.RemoteProcedure.html
      @@ -170,241 +170,242 @@
       162  }
       163
       164  /**
      -165   * Add a remote rpc. Be sure to check 
      result for successful add.
      +165   * Add a remote rpc.
       166   * @param key the node identifier
      -167   * @return True if we successfully 
      added the operation.
      -168   */
      -169  public boolean addOperationToNode(final 
      TRemote key, RemoteProcedure rp) {
      +167   */
      +168  public void addOperationToNode(final 
      TRemote key, RemoteProcedure rp)
      +169  throws 
      NullTargetServerDispatchException, NoServerDispatchException, 
      NoNodeDispatchException {
       170if (key == null) {
      -171  // Key is remote server name. Be 
      careful. It could have been nulled by a concurrent
      -172  // ServerCrashProcedure shutting 
      down outstanding RPC requests. See remoteCallFailed.
      -173  return false;
      -174}
      -175assert key != null : "found null key 
      for node";
      -176BufferNode node = nodeMap.get(key);
      -177if (node == null) {
      -178  return false;
      -179}
      -180node.add(rp);
      -181// Check our node still in the map; 
      could have been removed by #removeNode.
      -182return nodeMap.containsValue(node);
      -183  }
      -184
      -185  /**
      -186   * Remove a remote node
      -187   * @param key the node identifier
      -188   */
      -189  public boolean removeNode(final TRemote 
      key) {
      -190final BufferNode node = 
      nodeMap.remove(key);
      -191if (node == null) return false;
      -192node.abortOperationsInQueue();
      -193return true;
      -194  }
      -195
      -196  // 
      
      -197  //  Task Helpers
      -198  // 
      
      -199  protected FutureVoid 
      submitTask(CallableVoid task) {
      -200return threadPool.submit(task);
      -201  }
      -202
      -203  protected FutureVoid 
      submitTask(CallableVoid task, long delay, TimeUnit unit) {
      -204final FutureTaskVoid 
      futureTask = new FutureTask(task);
      -205timeoutExecutor.add(new 
      DelayedTask(futureTask, delay, unit));
      -206return futureTask;
      -207  }
      -208
      -209  protected abstract void 
      remoteDispatch(TRemote key, SetRemoteProcedure operations);
      -210  protected abstract void 
      abortPendingOperations(TRemote key, SetRemoteProcedure operations);
      -211
      -212  /**
      -213   * Data structure with reference to 
      remote operation.
      -214   */
      -215  public static abstract class 
      RemoteOperation {
      -216private final RemoteProcedure 
      remoteProcedure;
      -217
      -218protected RemoteOperation(final 
      RemoteProcedure remoteProcedure) {
      -219  this.remoteProcedure = 
      remoteProcedure;
      -220}
      -221
      -222public RemoteProcedure 
      getRemoteProcedure() {
      -223  return remoteProcedure;
      -224}
      -225  }
      -226
      -227  /**
      -228   * Remote procedure reference.
      -229   */
      -230  public interface 
      RemoteProcedureTEnv, TRemote {
      -231/**
      -232 * For building the remote 
      operation.
      -233 */
      -234RemoteOperation remoteCallBuild(TEnv 
      env, TRemote remote);
      -235
      -236/**
      -237 * Called when the executeProcedure 
      call is failed.
      -238 */
      -239void remoteCallFailed(TEnv env, 
      TRemote remote, IOException exception);
      -240
      -241/**
      -242 * Called when RS tells the remote 
      procedure is succeeded through the
      -243 * {@code reportProcedureDone} 
      method.
      -244 */
      -245void remoteOperationCompleted(TEnv 
      env);
      -246
      -247/**
      -248 * Called when RS tells the remote 
      procedure is failed through the {@code reportProcedureDone}
      -249 * method.
      -250 */
      -251void remoteOperationFailed(TEnv env, 
      RemoteProcedureException error);
      -252  }
      -253
      -254  /**
      -255   * Account of what procedures are 
      running on remote node.
      -256   * @param TEnv
      -257   * @param TRemote
      -258   */
      -259  public interface RemoteNodeTEnv, 
      TRemote {
      -260TRemote getKey();
      -261void add(RemoteProcedureTEnv, 
      TRemote operation);
      -262void dispatch();
      -263  }
      -264
      -265  protected 
      ArrayListMultimapClass?, RemoteOperation 
      buildAndGroupRequestByType(final TEnv env,
      -266  final TRemote remote, final 
      SetRemoteProcedure remoteProcedures) {
      -267final 
      ArrayListMultimapClass?, RemoteOperation requestByType = 
      ArrayListMultimap.create();
      -268for (RemoteProcedure proc: 
      remoteProcedures) {
      -269  RemoteOperation operation = 
      proc.remoteCallBuild(env, remote);
      -270  
      

      [04/51] [partial] hbase-site git commit: Published site at 997747076d8ec0b4346d7cb99c4b0667a7c14905.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4df09ed9/testdevapidocs/org/apache/hadoop/hbase/rsgroup/TestRSGroupsWithACL.html
      --
      diff --git 
      a/testdevapidocs/org/apache/hadoop/hbase/rsgroup/TestRSGroupsWithACL.html 
      b/testdevapidocs/org/apache/hadoop/hbase/rsgroup/TestRSGroupsWithACL.html
      index d08fee2..046a589 100644
      --- a/testdevapidocs/org/apache/hadoop/hbase/rsgroup/TestRSGroupsWithACL.html
      +++ b/testdevapidocs/org/apache/hadoop/hbase/rsgroup/TestRSGroupsWithACL.html
      @@ -18,7 +18,7 @@
       catch(err) {
       }
       //-->
      -var methods = 
      {"i0":9,"i1":9,"i2":9,"i3":9,"i4":9,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10};
      +var methods = 
      {"i0":9,"i1":9,"i2":9,"i3":9,"i4":9,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10};
       var tabs = {65535:["t0","All Methods"],1:["t1","Static 
      Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
       var altColor = "altColor";
       var rowColor = "rowColor";
      @@ -114,7 +114,7 @@ var activeTableTab = "activeTableTab";
       
       
       
      -public class TestRSGroupsWithACL
      +public class TestRSGroupsWithACL
       extends SecureTestUtil
       Performs authorization checks for rsgroup operations, 
      according to different
        levels of authorized users.
      @@ -152,106 +152,94 @@ extends Field and Description
       
       
      -private static 
      org.apache.hadoop.hbase.security.access.AccessController
      -accessController
      -
      -
       static HBaseClassTestRule
       CLASS_RULE
       
      -
      +
       private static 
      org.apache.hadoop.conf.Configuration
       conf
       
      -
      -private static 
      org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment
      -CP_ENV
      -
       
      -private static 
      org.apache.hadoop.hbase.coprocessor.ObserverContextorg.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment
      -CTX
      -
      -
       private static https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">String
       GROUP_ADMIN
       
      -
      +
       private static https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">String
       GROUP_CREATE
       
      -
      +
       private static https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">String
       GROUP_READ
       
      -
      +
       private static https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">String
       GROUP_WRITE
       
      -
      +
       private static org.slf4j.Logger
       LOG
       
      -
      +
       private static 
      org.apache.hadoop.hbase.rsgroup.RSGroupAdminEndpoint
       rsGroupAdminEndpoint
       
      -
      +
       private static 
      org.apache.hadoop.hbase.security.User
       SUPERUSER
       
      -
      +
       private static 
      org.apache.hadoop.hbase.client.Connection
       systemUserConnection
       
      -
      +
       private static byte[]
       TEST_FAMILY
       
      -
      +
       private static 
      org.apache.hadoop.hbase.TableName
       TEST_TABLE
       
      -
      +
       private static HBaseTestingUtility
       TEST_UTIL
       
      -
      +
       private static 
      org.apache.hadoop.hbase.security.User
       USER_ADMIN
       
      -
      +
       private static 
      org.apache.hadoop.hbase.security.User
       USER_CREATE
       
      -
      +
       private static 
      org.apache.hadoop.hbase.security.User
       USER_GROUP_ADMIN
       
      -
      +
       private static 
      org.apache.hadoop.hbase.security.User
       USER_GROUP_CREATE
       
      -
      +
       private static 
      org.apache.hadoop.hbase.security.User
       USER_GROUP_READ
       
      -
      +
       private static 
      org.apache.hadoop.hbase.security.User
       USER_GROUP_WRITE
       
      -
      +
       private static 
      org.apache.hadoop.hbase.security.User
       USER_NONE
       
      -
      +
       private static 
      org.apache.hadoop.hbase.security.User
       USER_OWNER
       
      -
      +
       private static 
      org.apache.hadoop.hbase.security.User
       USER_RO
       
      -
      +
       private static 
      org.apache.hadoop.hbase.security.User
       USER_RW
       
      @@ -347,14 +335,6 @@ extends void
       testRemoveRSGroup()
       
      -
      -void
      -testRemoveServers()
      -
      -
      -private void
      -validateAdminPermissions(SecureTestUtil.AccessTestActionaction)
      -
       
       
       
      @@ -390,7 +370,7 @@ extends 
       
       CLASS_RULE
      -public static finalHBaseClassTestRule CLASS_RULE
      +public static finalHBaseClassTestRule CLASS_RULE
       
       
       
      @@ -399,7 +379,7 @@ extends 
       
       LOG
      -private static finalorg.slf4j.Logger LOG
      +private static finalorg.slf4j.Logger LOG
       
       
       
      @@ -408,7 +388,7 @@ extends 
       
       TEST_TABLE
      -private staticorg.apache.hadoop.hbase.TableName TEST_TABLE
      +private staticorg.apache.hadoop.hbase.TableName TEST_TABLE
       
       
       
      @@ -417,7 +397,7 @@ extends 
       
       TEST_UTIL
      -private static finalHBaseTestingUtility TEST_UTIL
      +private static finalHBaseTestingUtility TEST_UTIL
       
       
       
      @@ -426,7 +406,7 @@ extends 
       
       conf
      -private staticorg.apache.hadoop.conf.Configuration conf
      +private staticorg.apache.hadoop.conf.Configuration conf
       
       
       
      @@ -435,7 +415,7 @@ extends 
       
       systemUserConnection
      -private staticorg.apache.hadoop.hbase.client.Connection systemUserConnection
      +private staticorg.apache.hadoop.hbase.client.Connection systemUserConnection
       
       
       
      @@ -444,7 +424,7 @@ extends 
       
       SUPERUSER
      -private staticorg.apache.hadoop.hbase.security.User SUPERUSER
      +private 

      [04/51] [partial] hbase-site git commit: Published site at f3d1c021de2264301f68eadb9ef126ff83d7ef53.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/883dde2f/testdevapidocs/index-all.html
      --
      diff --git a/testdevapidocs/index-all.html b/testdevapidocs/index-all.html
      index bacc42a..9d38150 100644
      --- a/testdevapidocs/index-all.html
      +++ b/testdevapidocs/index-all.html
      @@ -3329,6 +3329,8 @@
       
       CF
       - Static variable in class org.apache.hadoop.hbase.regionserver.TestStoreScanner
       
      +CF
       - Static variable in class org.apache.hadoop.hbase.replication.regionserver.TestRaceWhenCreatingReplicationSource
      +
       CF
       - Static variable in class org.apache.hadoop.hbase.replication.SerialReplicationTestBase
       
       CF
       - Static variable in class org.apache.hadoop.hbase.security.visibility.TestDefaultScanLabelGeneratorStack
      @@ -5115,6 +5117,8 @@
       
       CLASS_RULE
       - Static variable in class org.apache.hadoop.hbase.master.TestMasterTransitions
       
      +CLASS_RULE
       - Static variable in class org.apache.hadoop.hbase.master.TestMetaAssignmentWithStopMaster
      +
       CLASS_RULE
       - Static variable in class org.apache.hadoop.hbase.master.TestMetaShutdownHandler
       
       CLASS_RULE
       - Static variable in class org.apache.hadoop.hbase.master.TestMetricsMasterProcSourceImpl
      @@ -5737,6 +5741,8 @@
       
       CLASS_RULE
       - Static variable in class org.apache.hadoop.hbase.replication.regionserver.TestMetricsReplicationSourceImpl
       
      +CLASS_RULE
       - Static variable in class org.apache.hadoop.hbase.replication.regionserver.TestRaceWhenCreatingReplicationSource
      +
       CLASS_RULE
       - Static variable in class org.apache.hadoop.hbase.replication.regionserver.TestRegionReplicaReplicationEndpoint
       
       CLASS_RULE
       - Static variable in class org.apache.hadoop.hbase.replication.regionserver.TestRegionReplicaReplicationEndpointNoMaster
      @@ -6844,12 +6850,6 @@
       
       closeCompactedFile(int)
       - Method in class org.apache.hadoop.hbase.regionserver.TestHStore
       
      -closeConnection()
       - Method in class org.apache.hadoop.hbase.PerformanceEvaluation.AsyncTest
      -
      -closeConnection()
       - Method in class org.apache.hadoop.hbase.PerformanceEvaluation.Test
      -
      -closeConnection()
       - Method in class org.apache.hadoop.hbase.PerformanceEvaluation.TestBase
      -
       closeConnection()
       - Method in class org.apache.hadoop.hbase.test.IntegrationTestReplication.ClusterID
       
       closeCount
       - Variable in class org.apache.hadoop.hbase.master.TestAssignmentListener.DummyAssignmentListener
      @@ -8386,6 +8386,8 @@
       
       CONN
       - Static variable in class org.apache.hadoop.hbase.replication.regionserver.TestSerialReplicationEndpoint
       
      +connCount
       - Variable in class org.apache.hadoop.hbase.PerformanceEvaluation.TestOptions
      +
       connection
       - Variable in class org.apache.hadoop.hbase.AcidGuaranteesTestTool.AtomicGetReader
       
       connection
       - Variable in class org.apache.hadoop.hbase.AcidGuaranteesTestTool.AtomicityWriter
      @@ -8974,6 +8976,8 @@
       
       CPMasterObserver()
       - Constructor for class org.apache.hadoop.hbase.namespace.TestNamespaceAuditor.CPMasterObserver
       
      +CPMasterObserver()
       - Constructor for class org.apache.hadoop.hbase.rsgroup.TestRSGroups.CPMasterObserver
      +
       cpName1
       - Static variable in class org.apache.hadoop.hbase.coprocessor.TestClassLoading
       
       cpName2
       - Static variable in class org.apache.hadoop.hbase.coprocessor.TestClassLoading
      @@ -9010,6 +9014,8 @@
       
       cq
       - Variable in class org.apache.hadoop.hbase.regionserver.TestHRegionReplayEvents
       
      +CQ
       - Static variable in class org.apache.hadoop.hbase.replication.regionserver.TestRaceWhenCreatingReplicationSource
      +
       CQ
       - Static variable in class org.apache.hadoop.hbase.replication.SerialReplicationTestBase
       
       CQ
       - Static variable in class org.apache.hadoop.hbase.TestSequenceIdMonotonicallyIncreasing
      @@ -9202,12 +9208,6 @@
       
       createConnection(Configuration)
       - Method in class org.apache.hadoop.hbase.MockRegionServerServices
       
      -createConnection()
       - Method in class org.apache.hadoop.hbase.PerformanceEvaluation.AsyncTest
      -
      -createConnection()
       - Method in class org.apache.hadoop.hbase.PerformanceEvaluation.Test
      -
      -createConnection()
       - Method in class org.apache.hadoop.hbase.PerformanceEvaluation.TestBase
      -
       createConnection(Configuration)
       - Method in class org.apache.hadoop.hbase.regionserver.TestHeapMemoryManager.RegionServerStub
       
       createConnection(Configuration)
       - Method in class org.apache.hadoop.hbase.regionserver.TestSplitLogWorker.DummyServer
      @@ -12071,12 +12071,16 @@
       
       doStart()
       - Method in class org.apache.hadoop.hbase.client.replication.TestReplicationAdminWithClusters.TestUpdatableReplicationEndpoint
       
      +doStart()
       - Method in class org.apache.hadoop.hbase.replication.regionserver.TestRaceWhenCreatingReplicationSource.LocalReplicationEndpoint
      +
       doStart()
       - Method in class org.apache.hadoop.hbase.replication.SerialReplicationTestBase.LocalReplicationEndpoint
       
       doStart()
       - Method in class org.apache.hadoop.hbase.replication.TestReplicationEndpoint.ReplicationEndpointForTest
       
       doStop()
       - Method in class org.apache.hadoop.hbase.client.replication.TestReplicationAdminWithClusters.TestUpdatableReplicationEndpoint
       

      [04/51] [partial] hbase-site git commit: Published site at cf529f18a9959589fa635f78df4840472526ea2c.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndDeleteTest.html
      --
      diff --git 
      a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndDeleteTest.html
       
      b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndDeleteTest.html
      index 3f8844b..cdb9398 100644
      --- 
      a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndDeleteTest.html
      +++ 
      b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndDeleteTest.html
      @@ -140,2712 +140,2713 @@
       132public class PerformanceEvaluation 
      extends Configured implements Tool {
       133  static final String RANDOM_SEEK_SCAN = 
      "randomSeekScan";
       134  static final String RANDOM_READ = 
      "randomRead";
      -135  private static final Logger LOG = 
      LoggerFactory.getLogger(PerformanceEvaluation.class.getName());
      -136  private static final ObjectMapper 
      MAPPER = new ObjectMapper();
      -137  static {
      -138
      MAPPER.configure(MapperFeature.SORT_PROPERTIES_ALPHABETICALLY, true);
      -139  }
      -140
      -141  public static final String TABLE_NAME = 
      "TestTable";
      -142  public static final String 
      FAMILY_NAME_BASE = "info";
      -143  public static final byte[] FAMILY_ZERO 
      = Bytes.toBytes("info0");
      -144  public static final byte[] COLUMN_ZERO 
      = Bytes.toBytes("" + 0);
      -145  public static final int 
      DEFAULT_VALUE_LENGTH = 1000;
      -146  public static final int ROW_LENGTH = 
      26;
      -147
      -148  private static final int ONE_GB = 1024 
      * 1024 * 1000;
      -149  private static final int 
      DEFAULT_ROWS_PER_GB = ONE_GB / DEFAULT_VALUE_LENGTH;
      -150  // TODO : should we make this 
      configurable
      -151  private static final int TAG_LENGTH = 
      256;
      -152  private static final DecimalFormat FMT 
      = new DecimalFormat("0.##");
      -153  private static final MathContext CXT = 
      MathContext.DECIMAL64;
      -154  private static final BigDecimal 
      MS_PER_SEC = BigDecimal.valueOf(1000);
      -155  private static final BigDecimal 
      BYTES_PER_MB = BigDecimal.valueOf(1024 * 1024);
      -156  private static final TestOptions 
      DEFAULT_OPTS = new TestOptions();
      -157
      -158  private static MapString, 
      CmdDescriptor COMMANDS = new TreeMap();
      -159  private static final Path PERF_EVAL_DIR 
      = new Path("performance_evaluation");
      -160
      -161  static {
      -162
      addCommandDescriptor(AsyncRandomReadTest.class, "asyncRandomRead",
      -163"Run async random read test");
      -164
      addCommandDescriptor(AsyncRandomWriteTest.class, "asyncRandomWrite",
      -165"Run async random write test");
      -166
      addCommandDescriptor(AsyncSequentialReadTest.class, "asyncSequentialRead",
      -167"Run async sequential read 
      test");
      -168
      addCommandDescriptor(AsyncSequentialWriteTest.class, "asyncSequentialWrite",
      -169"Run async sequential write 
      test");
      -170
      addCommandDescriptor(AsyncScanTest.class, "asyncScan",
      -171"Run async scan test (read every 
      row)");
      -172
      addCommandDescriptor(RandomReadTest.class, RANDOM_READ,
      -173  "Run random read test");
      -174
      addCommandDescriptor(RandomSeekScanTest.class, RANDOM_SEEK_SCAN,
      -175  "Run random seek and scan 100 
      test");
      -176
      addCommandDescriptor(RandomScanWithRange10Test.class, "scanRange10",
      -177  "Run random seek scan with both 
      start and stop row (max 10 rows)");
      -178
      addCommandDescriptor(RandomScanWithRange100Test.class, "scanRange100",
      -179  "Run random seek scan with both 
      start and stop row (max 100 rows)");
      -180
      addCommandDescriptor(RandomScanWithRange1000Test.class, "scanRange1000",
      -181  "Run random seek scan with both 
      start and stop row (max 1000 rows)");
      -182
      addCommandDescriptor(RandomScanWithRange1Test.class, "scanRange1",
      -183  "Run random seek scan with both 
      start and stop row (max 1 rows)");
      -184
      addCommandDescriptor(RandomWriteTest.class, "randomWrite",
      -185  "Run random write test");
      -186
      addCommandDescriptor(SequentialReadTest.class, "sequentialRead",
      -187  "Run sequential read test");
      -188
      addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite",
      -189  "Run sequential write test");
      -190addCommandDescriptor(ScanTest.class, 
      "scan",
      -191  "Run scan test (read every 
      row)");
      -192
      addCommandDescriptor(FilteredScanTest.class, "filterScan",
      -193  "Run scan test using a filter to 
      find a specific row based on it's value " +
      -194  "(make sure to use --rows=20)");
      -195
      addCommandDescriptor(IncrementTest.class, "increment",
      -196  "Increment on each row; clients 
      overlap on keyspace so some concurrent operations");
      -197
      addCommandDescriptor(AppendTest.class, "append",
      -198  "Append on each row; clients 
      overlap on keyspace so some concurrent operations");
      -199
      addCommandDescriptor(CheckAndMutateTest.class, "checkAndMutate",
      -200  "CheckAndMutate on each row; 
      clients overlap on keyspace so some concurrent operations");
      -201
      

      [04/51] [partial] hbase-site git commit: Published site at 021f66d11d2cbb7308308093e29e69d6e7661ee9.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/92a26cfb/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ScannerContext.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ScannerContext.html 
      b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ScannerContext.html
      index 03a0b2a..cabb570 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ScannerContext.html
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ScannerContext.html
      @@ -561,209 +561,206 @@
       553
       554/**
       555 * Set all fields together.
      -556 * @param batch
      -557 * @param sizeScope
      -558 * @param dataSize
      -559 */
      -560void setFields(int batch, LimitScope 
      sizeScope, long dataSize, long heapSize,
      -561LimitScope timeScope, long time) 
      {
      -562  setBatch(batch);
      -563  setSizeScope(sizeScope);
      -564  setDataSize(dataSize);
      -565  setHeapSize(heapSize);
      -566  setTimeScope(timeScope);
      -567  setTime(time);
      -568}
      -569
      -570int getBatch() {
      -571  return this.batch;
      -572}
      -573
      -574void setBatch(int batch) {
      -575  this.batch = batch;
      -576}
      -577
      -578/**
      -579 * @param checkerScope
      -580 * @return true when the limit can be 
      enforced from the scope of the checker
      -581 */
      -582boolean 
      canEnforceBatchLimitFromScope(LimitScope checkerScope) {
      -583  return 
      LimitScope.BETWEEN_CELLS.canEnforceLimitFromScope(checkerScope);
      -584}
      -585
      -586long getDataSize() {
      -587  return this.dataSize;
      -588}
      -589
      -590long getHeapSize() {
      -591  return this.heapSize;
      -592}
      -593
      -594void setDataSize(long dataSize) {
      -595  this.dataSize = dataSize;
      -596}
      -597
      -598void setHeapSize(long heapSize) {
      -599  this.heapSize = heapSize;
      -600}
      -601
      -602/**
      -603 * @return {@link LimitScope} 
      indicating scope in which the size limit is enforced
      -604 */
      -605LimitScope getSizeScope() {
      -606  return this.sizeScope;
      -607}
      -608
      -609/**
      -610 * Change the scope in which the size 
      limit is enforced
      -611 */
      -612void setSizeScope(LimitScope scope) 
      {
      -613  this.sizeScope = scope;
      -614}
      -615
      -616/**
      -617 * @param checkerScope
      -618 * @return true when the limit can be 
      enforced from the scope of the checker
      -619 */
      -620boolean 
      canEnforceSizeLimitFromScope(LimitScope checkerScope) {
      -621  return 
      this.sizeScope.canEnforceLimitFromScope(checkerScope);
      -622}
      -623
      -624long getTime() {
      -625  return this.time;
      -626}
      -627
      -628void setTime(long time) {
      -629  this.time = time;
      -630}
      -631
      -632/**
      -633 * @return {@link LimitScope} 
      indicating scope in which the time limit is enforced
      -634 */
      -635LimitScope getTimeScope() {
      -636  return this.timeScope;
      -637}
      -638
      -639/**
      -640 * Change the scope in which the time 
      limit is enforced
      -641 */
      -642void setTimeScope(LimitScope scope) 
      {
      -643  this.timeScope = scope;
      -644}
      -645
      -646/**
      -647 * @param checkerScope
      -648 * @return true when the limit can be 
      enforced from the scope of the checker
      -649 */
      -650boolean 
      canEnforceTimeLimitFromScope(LimitScope checkerScope) {
      -651  return 
      this.timeScope.canEnforceLimitFromScope(checkerScope);
      -652}
      -653
      -654@Override
      -655public String toString() {
      -656  StringBuilder sb = new 
      StringBuilder();
      -657  sb.append("{");
      +556 */
      +557void setFields(int batch, LimitScope 
      sizeScope, long dataSize, long heapSize,
      +558LimitScope timeScope, long time) 
      {
      +559  setBatch(batch);
      +560  setSizeScope(sizeScope);
      +561  setDataSize(dataSize);
      +562  setHeapSize(heapSize);
      +563  setTimeScope(timeScope);
      +564  setTime(time);
      +565}
      +566
      +567int getBatch() {
      +568  return this.batch;
      +569}
      +570
      +571void setBatch(int batch) {
      +572  this.batch = batch;
      +573}
      +574
      +575/**
      +576 * @param checkerScope
      +577 * @return true when the limit can be 
      enforced from the scope of the checker
      +578 */
      +579boolean 
      canEnforceBatchLimitFromScope(LimitScope checkerScope) {
      +580  return 
      LimitScope.BETWEEN_CELLS.canEnforceLimitFromScope(checkerScope);
      +581}
      +582
      +583long getDataSize() {
      +584  return this.dataSize;
      +585}
      +586
      +587long getHeapSize() {
      +588  return this.heapSize;
      +589}
      +590
      +591void setDataSize(long dataSize) {
      +592  this.dataSize = dataSize;
      +593}
      +594
      +595void setHeapSize(long heapSize) {
      +596  this.heapSize = heapSize;
      +597}
      +598
      +599/**
      +600 * @return {@link LimitScope} 
      indicating scope in which the size limit is enforced
      +601 */
      +602LimitScope getSizeScope() {
      +603  return this.sizeScope;
      +604}
      +605
      +606/**
      +607 * Change the scope in which the size 
      limit is enforced
      +608 */
      +609void 

      [04/51] [partial] hbase-site git commit: Published site at acd0d1e446c164d9c54bfb461b2d449c8d717c07.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f2065178/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.SequentialReadTest.html
      --
      diff --git 
      a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.SequentialReadTest.html
       
      b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.SequentialReadTest.html
      index 2510283..418c60c 100644
      --- 
      a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.SequentialReadTest.html
      +++ 
      b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.SequentialReadTest.html
      @@ -77,77 +77,77 @@
       069import 
      org.apache.hadoop.hbase.client.RowMutations;
       070import 
      org.apache.hadoop.hbase.client.Scan;
       071import 
      org.apache.hadoop.hbase.client.Table;
      -072import 
      org.apache.hadoop.hbase.filter.BinaryComparator;
      -073import 
      org.apache.hadoop.hbase.filter.Filter;
      -074import 
      org.apache.hadoop.hbase.filter.FilterAllFilter;
      -075import 
      org.apache.hadoop.hbase.filter.FilterList;
      -076import 
      org.apache.hadoop.hbase.filter.PageFilter;
      -077import 
      org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
      -078import 
      org.apache.hadoop.hbase.filter.WhileMatchFilter;
      -079import 
      org.apache.hadoop.hbase.io.compress.Compression;
      -080import 
      org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
      -081import 
      org.apache.hadoop.hbase.io.hfile.RandomDistribution;
      -082import 
      org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
      -083import 
      org.apache.hadoop.hbase.regionserver.BloomType;
      -084import 
      org.apache.hadoop.hbase.regionserver.CompactingMemStore;
      -085import 
      org.apache.hadoop.hbase.trace.HBaseHTraceConfiguration;
      -086import 
      org.apache.hadoop.hbase.trace.SpanReceiverHost;
      -087import 
      org.apache.hadoop.hbase.trace.TraceUtil;
      -088import 
      org.apache.hadoop.hbase.util.ByteArrayHashKey;
      -089import 
      org.apache.hadoop.hbase.util.Bytes;
      -090import 
      org.apache.hadoop.hbase.util.Hash;
      -091import 
      org.apache.hadoop.hbase.util.MurmurHash;
      -092import 
      org.apache.hadoop.hbase.util.Pair;
      -093import 
      org.apache.hadoop.hbase.util.YammerHistogramUtils;
      -094import 
      org.apache.hadoop.io.LongWritable;
      -095import org.apache.hadoop.io.Text;
      -096import org.apache.hadoop.mapreduce.Job;
      -097import 
      org.apache.hadoop.mapreduce.Mapper;
      -098import 
      org.apache.hadoop.mapreduce.lib.input.NLineInputFormat;
      -099import 
      org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
      -100import 
      org.apache.hadoop.mapreduce.lib.reduce.LongSumReducer;
      -101import org.apache.hadoop.util.Tool;
      -102import 
      org.apache.hadoop.util.ToolRunner;
      -103import 
      org.apache.htrace.core.ProbabilitySampler;
      -104import org.apache.htrace.core.Sampler;
      -105import 
      org.apache.htrace.core.TraceScope;
      -106import 
      org.apache.yetus.audience.InterfaceAudience;
      -107import org.slf4j.Logger;
      -108import org.slf4j.LoggerFactory;
      -109import 
      org.apache.hbase.thirdparty.com.google.common.base.MoreObjects;
      -110import 
      org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
      -111
      -112/**
      -113 * Script used evaluating HBase 
      performance and scalability.  Runs a HBase
      -114 * client that steps through one of a set 
      of hardcoded tests or 'experiments'
      -115 * (e.g. a random reads test, a random 
      writes test, etc.). Pass on the
      -116 * command-line which test to run and how 
      many clients are participating in
      -117 * this experiment. Run {@code 
      PerformanceEvaluation --help} to obtain usage.
      -118 *
      -119 * pThis class sets up and runs 
      the evaluation programs described in
      -120 * Section 7, iPerformance 
      Evaluation/i, of the a
      -121 * 
      href="http://labs.google.com/papers/bigtable.html"Bigtable/a;
      -122 * paper, pages 8-10.
      -123 *
      -124 * pBy default, runs as a 
      mapreduce job where each mapper runs a single test
      -125 * client. Can also run as a 
      non-mapreduce, multithreaded application by
      -126 * specifying {@code --nomapred}. Each 
      client does about 1GB of data, unless
      -127 * specified otherwise.
      -128 */
      -129@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
      -130public class PerformanceEvaluation 
      extends Configured implements Tool {
      -131  static final String RANDOM_SEEK_SCAN = 
      "randomSeekScan";
      -132  static final String RANDOM_READ = 
      "randomRead";
      -133  private static final Logger LOG = 
      LoggerFactory.getLogger(PerformanceEvaluation.class.getName());
      -134  private static final ObjectMapper 
      MAPPER = new ObjectMapper();
      -135  static {
      -136
      MAPPER.configure(MapperFeature.SORT_PROPERTIES_ALPHABETICALLY, true);
      -137  }
      -138
      -139  public static final String TABLE_NAME = 
      "TestTable";
      -140  public static final byte[] FAMILY_NAME 
      = Bytes.toBytes("info");
      -141  public static final byte [] COLUMN_ZERO 
      = Bytes.toBytes("" + 0);
      -142  public static final byte [] 
      QUALIFIER_NAME = COLUMN_ZERO;
      +072import 
      org.apache.hadoop.hbase.client.metrics.ScanMetrics;
      +073import 
      org.apache.hadoop.hbase.filter.BinaryComparator;
      +074import 
      org.apache.hadoop.hbase.filter.Filter;
      +075import 
      

      [04/51] [partial] hbase-site git commit: Published site at 87f5b5f3411d96c31b4cb61b9a57ced22be91d1f.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/de18d468/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.EverythingPassesWALEntryFilterSubclass.html
      --
      diff --git 
      a/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.EverythingPassesWALEntryFilterSubclass.html
       
      b/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.EverythingPassesWALEntryFilterSubclass.html
      index 7a938de..43a87b6 100644
      --- 
      a/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.EverythingPassesWALEntryFilterSubclass.html
      +++ 
      b/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.EverythingPassesWALEntryFilterSubclass.html
      @@ -33,539 +33,515 @@
       025import java.util.ArrayList;
       026import java.util.List;
       027import java.util.UUID;
      -028import 
      java.util.concurrent.atomic.AtomicBoolean;
      -029import 
      java.util.concurrent.atomic.AtomicInteger;
      -030import 
      java.util.concurrent.atomic.AtomicReference;
      -031import org.apache.hadoop.hbase.Cell;
      -032import 
      org.apache.hadoop.hbase.HBaseClassTestRule;
      -033import org.apache.hadoop.hbase.Waiter;
      -034import 
      org.apache.hadoop.hbase.client.Connection;
      -035import 
      org.apache.hadoop.hbase.client.ConnectionFactory;
      -036import 
      org.apache.hadoop.hbase.client.Put;
      -037import 
      org.apache.hadoop.hbase.client.RegionInfo;
      -038import 
      org.apache.hadoop.hbase.client.Table;
      -039import 
      org.apache.hadoop.hbase.regionserver.HRegion;
      -040import 
      org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint;
      -041import 
      org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationGlobalSourceSource;
      -042import 
      org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSourceImpl;
      -043import 
      org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSourceSource;
      -044import 
      org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSourceSourceImpl;
      -045import 
      org.apache.hadoop.hbase.replication.regionserver.MetricsSource;
      -046import 
      org.apache.hadoop.hbase.testclassification.MediumTests;
      -047import 
      org.apache.hadoop.hbase.testclassification.ReplicationTests;
      -048import 
      org.apache.hadoop.hbase.util.Bytes;
      -049import 
      org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
      -050import 
      org.apache.hadoop.hbase.util.Threads;
      -051import 
      org.apache.hadoop.hbase.wal.WAL.Entry;
      -052import 
      org.apache.hadoop.hbase.zookeeper.ZKConfig;
      -053import 
      org.apache.hadoop.metrics2.lib.DynamicMetricsRegistry;
      -054import org.junit.AfterClass;
      -055import org.junit.Assert;
      -056import org.junit.Before;
      -057import org.junit.BeforeClass;
      -058import org.junit.ClassRule;
      -059import org.junit.Test;
      -060import 
      org.junit.experimental.categories.Category;
      -061import org.slf4j.Logger;
      -062import org.slf4j.LoggerFactory;
      -063
      -064/**
      -065 * Tests ReplicationSource and 
      ReplicationEndpoint interactions
      -066 */
      -067@Category({ ReplicationTests.class, 
      MediumTests.class })
      -068public class TestReplicationEndpoint 
      extends TestReplicationBase {
      -069
      -070  @ClassRule
      -071  public static final HBaseClassTestRule 
      CLASS_RULE =
      -072  
      HBaseClassTestRule.forClass(TestReplicationEndpoint.class);
      -073
      -074  private static final Logger LOG = 
      LoggerFactory.getLogger(TestReplicationEndpoint.class);
      -075
      -076  static int numRegionServers;
      -077
      -078  @BeforeClass
      -079  public static void setUpBeforeClass() 
      throws Exception {
      -080
      TestReplicationBase.setUpBeforeClass();
      -081numRegionServers = 
      utility1.getHBaseCluster().getRegionServerThreads().size();
      -082  }
      -083
      -084  @AfterClass
      -085  public static void tearDownAfterClass() 
      throws Exception {
      -086
      TestReplicationBase.tearDownAfterClass();
      -087// check stop is called
      -088
      Assert.assertTrue(ReplicationEndpointForTest.stoppedCount.get()  0);
      -089  }
      -090
      -091  @Before
      -092  public void setup() throws Exception 
      {
      -093
      ReplicationEndpointForTest.contructedCount.set(0);
      -094
      ReplicationEndpointForTest.startedCount.set(0);
      -095
      ReplicationEndpointForTest.replicateCount.set(0);
      -096
      ReplicationEndpointReturningFalse.replicated.set(false);
      -097
      ReplicationEndpointForTest.lastEntries = null;
      -098final ListRegionServerThread 
      rsThreads =
      -099
      utility1.getMiniHBaseCluster().getRegionServerThreads();
      -100for (RegionServerThread rs : 
      rsThreads) {
      -101  
      utility1.getAdmin().rollWALWriter(rs.getRegionServer().getServerName());
      -102}
      -103// Wait for  all log roll to finish
      -104utility1.waitFor(3000, new 
      Waiter.ExplainingPredicateException() {
      -105  @Override
      -106  public boolean evaluate() throws 
      Exception {
      -107for (RegionServerThread rs : 
      rsThreads) {
      -108  if 
      (!rs.getRegionServer().walRollRequestFinished()) {
      -109return false;
      -110  }
      -111}
      -112return true;
      -113  }
      -114
      -115  

      [04/51] [partial] hbase-site git commit: Published site at 2912c953551bedbfbf30c32c156ed7bb187d54c3.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsDir.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsDir.html
       
      b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsDir.html
      index 8302e28..c370eb9 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsDir.html
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsDir.html
      @@ -2113,3031 +2113,3033 @@
       2105
      errors.reportError(ERROR_CODE.ORPHAN_TABLE_STATE,
       2106tableName + " unable to 
      delete dangling table state " + tableState);
       2107  }
      -2108} else {
      -2109  
      errors.reportError(ERROR_CODE.ORPHAN_TABLE_STATE,
      -2110  tableName + " has dangling 
      table state " + tableState);
      -2111}
      -2112  }
      -2113}
      -2114// check that all tables have 
      states
      -2115for (TableName tableName : 
      tablesInfo.keySet()) {
      -2116  if (isTableIncluded(tableName) 
       !tableStates.containsKey(tableName)) {
      -2117if (fixMeta) {
      -2118  
      MetaTableAccessor.updateTableState(connection, tableName, 
      TableState.State.ENABLED);
      -2119  TableState newState = 
      MetaTableAccessor.getTableState(connection, tableName);
      -2120  if (newState == null) {
      -2121
      errors.reportError(ERROR_CODE.NO_TABLE_STATE,
      -2122"Unable to change state 
      for table " + tableName + " in meta ");
      -2123  }
      -2124} else {
      -2125  
      errors.reportError(ERROR_CODE.NO_TABLE_STATE,
      -2126  tableName + " has no state 
      in meta ");
      -2127}
      -2128  }
      -2129}
      -2130  }
      -2131
      -2132  private void preCheckPermission() 
      throws IOException, AccessDeniedException {
      -2133if 
      (shouldIgnorePreCheckPermission()) {
      -2134  return;
      -2135}
      -2136
      -2137Path hbaseDir = 
      FSUtils.getRootDir(getConf());
      -2138FileSystem fs = 
      hbaseDir.getFileSystem(getConf());
      -2139UserProvider userProvider = 
      UserProvider.instantiate(getConf());
      -2140UserGroupInformation ugi = 
      userProvider.getCurrent().getUGI();
      -2141FileStatus[] files = 
      fs.listStatus(hbaseDir);
      -2142for (FileStatus file : files) {
      -2143  try {
      -2144FSUtils.checkAccess(ugi, file, 
      FsAction.WRITE);
      -2145  } catch (AccessDeniedException 
      ace) {
      -2146LOG.warn("Got 
      AccessDeniedException when preCheckPermission ", ace);
      -2147
      errors.reportError(ERROR_CODE.WRONG_USAGE, "Current user " + 
      ugi.getUserName()
      -2148  + " does not have write perms 
      to " + file.getPath()
      -2149  + ". Please rerun hbck as hdfs 
      user " + file.getOwner());
      -2150throw ace;
      -2151  }
      -2152}
      -2153  }
      -2154
      -2155  /**
      -2156   * Deletes region from meta table
      -2157   */
      -2158  private void deleteMetaRegion(HbckInfo 
      hi) throws IOException {
      -2159
      deleteMetaRegion(hi.metaEntry.getRegionName());
      -2160  }
      -2161
      -2162  /**
      -2163   * Deletes region from meta table
      -2164   */
      -2165  private void deleteMetaRegion(byte[] 
      metaKey) throws IOException {
      -2166Delete d = new Delete(metaKey);
      -2167meta.delete(d);
      -2168LOG.info("Deleted " + 
      Bytes.toString(metaKey) + " from META" );
      -2169  }
      -2170
      -2171  /**
      -2172   * Reset the split parent region info 
      in meta table
      -2173   */
      -2174  private void resetSplitParent(HbckInfo 
      hi) throws IOException {
      -2175RowMutations mutations = new 
      RowMutations(hi.metaEntry.getRegionName());
      -2176Delete d = new 
      Delete(hi.metaEntry.getRegionName());
      -2177
      d.addColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER);
      -2178
      d.addColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER);
      -2179mutations.add(d);
      -2180
      -2181RegionInfo hri = 
      RegionInfoBuilder.newBuilder(hi.metaEntry)
      -2182.setOffline(false)
      -2183.setSplit(false)
      -2184.build();
      -2185Put p = 
      MetaTableAccessor.makePutFromRegionInfo(hri, 
      EnvironmentEdgeManager.currentTime());
      -2186mutations.add(p);
      -2187
      -2188meta.mutateRow(mutations);
      -2189LOG.info("Reset split parent " + 
      hi.metaEntry.getRegionNameAsString() + " in META" );
      -2190  }
      -2191
      -2192  /**
      -2193   * This backwards-compatibility 
      wrapper for permanently offlining a region
      -2194   * that should not be alive.  If the 
      region server does not support the
      -2195   * "offline" method, it will use the 
      closest unassign method instead.  This
      -2196   * will basically work until one 
      attempts to disable or delete the affected
      -2197   * table.  The problem has to do with 
      in-memory only master state, so
      -2198   * restarting the HMaster or failing 
      over to another should fix this.
      -2199   */
      -2200  private void offline(byte[] 
      regionName) throws IOException {
      -2201String regionString = 
      Bytes.toStringBinary(regionName);
      -2202if (!rsSupportsOffline) {
      -2203  

      [04/51] [partial] hbase-site git commit: Published site at 2a2258656b2fcd92b967131b6c1f037363553bc4.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e0fb1fde/devapidocs/src-html/org/apache/hadoop/hbase/master/HMasterCommandLine.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMasterCommandLine.html 
      b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMasterCommandLine.html
      index 64b9ab5..30fe780 100644
      --- a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMasterCommandLine.html
      +++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMasterCommandLine.html
      @@ -30,308 +30,309 @@
       022import java.io.IOException;
       023import java.util.List;
       024
      -025import 
      org.apache.commons.cli.CommandLine;
      -026import 
      org.apache.commons.cli.GnuParser;
      -027import org.apache.commons.cli.Options;
      -028import 
      org.apache.commons.cli.ParseException;
      -029import 
      org.apache.hadoop.conf.Configuration;
      -030import 
      org.apache.hadoop.hbase.HConstants;
      -031import 
      org.apache.hadoop.hbase.LocalHBaseCluster;
      -032import 
      org.apache.hadoop.hbase.MasterNotRunningException;
      -033import 
      org.apache.hadoop.hbase.ZNodeClearer;
      -034import 
      org.apache.hadoop.hbase.ZooKeeperConnectionException;
      -035import 
      org.apache.hadoop.hbase.trace.TraceUtil;
      -036import 
      org.apache.yetus.audience.InterfaceAudience;
      -037import 
      org.apache.hadoop.hbase.client.Admin;
      -038import 
      org.apache.hadoop.hbase.client.Connection;
      -039import 
      org.apache.hadoop.hbase.client.ConnectionFactory;
      -040import 
      org.apache.hadoop.hbase.regionserver.HRegionServer;
      -041import 
      org.apache.hadoop.hbase.util.JVMClusterUtil;
      -042import 
      org.apache.hadoop.hbase.util.ServerCommandLine;
      -043import 
      org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
      -044import 
      org.apache.hadoop.hbase.zookeeper.ZKUtil;
      -045import 
      org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
      -046import 
      org.apache.zookeeper.KeeperException;
      -047import org.slf4j.Logger;
      -048import org.slf4j.LoggerFactory;
      -049
      -050@InterfaceAudience.Private
      -051public class HMasterCommandLine extends 
      ServerCommandLine {
      -052  private static final Logger LOG = 
      LoggerFactory.getLogger(HMasterCommandLine.class);
      -053
      -054  private static final String USAGE =
      -055"Usage: Master [opts] 
      start|stop|clear\n" +
      -056" start  Start Master. If local mode, 
      start Master and RegionServer in same JVM\n" +
      -057" stop   Start cluster shutdown; 
      Master signals RegionServer shutdown\n" +
      -058" clear  Delete the master znode in 
      ZooKeeper after a master crashes\n "+
      -059" where [opts] are:\n" +
      -060"   
      --minRegionServers=servers   Minimum RegionServers needed to host user 
      tables.\n" +
      -061"   
      --localRegionServers=servers " +
      -062  "RegionServers to start in master 
      process when in standalone mode.\n" +
      -063"   --masters=servers 
         Masters to start in this process.\n" +
      -064"   --backup   
      Master should start in backup mode";
      -065
      -066  private final Class? extends 
      HMaster masterClass;
      -067
      -068  public HMasterCommandLine(Class? 
      extends HMaster masterClass) {
      -069this.masterClass = masterClass;
      -070  }
      -071
      -072  @Override
      -073  protected String getUsage() {
      -074return USAGE;
      -075  }
      -076
      -077  @Override
      -078  public int run(String args[]) throws 
      Exception {
      -079Options opt = new Options();
      -080opt.addOption("localRegionServers", 
      true,
      -081  "RegionServers to start in master 
      process when running standalone");
      -082opt.addOption("masters", true, 
      "Masters to start in this process");
      -083opt.addOption("minRegionServers", 
      true, "Minimum RegionServers needed to host user tables");
      -084opt.addOption("backup", false, "Do 
      not try to become HMaster until the primary fails");
      -085
      -086CommandLine cmd;
      -087try {
      -088  cmd = new GnuParser().parse(opt, 
      args);
      -089} catch (ParseException e) {
      -090  LOG.error("Could not parse: ", 
      e);
      -091  usage(null);
      -092  return 1;
      -093}
      -094
      +025import 
      org.apache.hadoop.conf.Configuration;
      +026import 
      org.apache.hadoop.hbase.HConstants;
      +027import 
      org.apache.hadoop.hbase.LocalHBaseCluster;
      +028import 
      org.apache.hadoop.hbase.MasterNotRunningException;
      +029import 
      org.apache.hadoop.hbase.ZNodeClearer;
      +030import 
      org.apache.hadoop.hbase.ZooKeeperConnectionException;
      +031import 
      org.apache.hadoop.hbase.trace.TraceUtil;
      +032import 
      org.apache.yetus.audience.InterfaceAudience;
      +033import 
      org.apache.hadoop.hbase.client.Admin;
      +034import 
      org.apache.hadoop.hbase.client.Connection;
      +035import 
      org.apache.hadoop.hbase.client.ConnectionFactory;
      +036import 
      org.apache.hadoop.hbase.regionserver.HRegionServer;
      +037import 
      org.apache.hadoop.hbase.util.JVMClusterUtil;
      +038import 
      org.apache.hadoop.hbase.util.ServerCommandLine;
      +039import 
      org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
      +040import 
      org.apache.hadoop.hbase.zookeeper.ZKUtil;
      +041import 
      org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
      +042import 
      org.apache.zookeeper.KeeperException;
      +043import org.slf4j.Logger;
      

      [04/51] [partial] hbase-site git commit: Published site at e468b4022f76688851b3e0c34722f01a56bd624f.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/16541468/devapidocs/org/apache/hadoop/hbase/client/RawAsyncTableImpl.html
      --
      diff --git a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncTableImpl.html 
      b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncTableImpl.html
      index a74a6ca..48004c8 100644
      --- a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncTableImpl.html
      +++ b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncTableImpl.html
      @@ -114,7 +114,7 @@ var activeTableTab = "activeTableTab";
       
       
       @InterfaceAudience.Private
      -class RawAsyncTableImpl
      +class RawAsyncTableImpl
       extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
       title="class or interface in java.lang">Object
       implements AsyncTableAdvancedScanResultConsumer
       The implementation of RawAsyncTable.
      @@ -552,7 +552,7 @@ implements 
       
       conn
      -private finalAsyncConnectionImpl conn
      +private finalAsyncConnectionImpl conn
       
       
       
      @@ -561,7 +561,7 @@ implements 
       
       tableName
      -private finalTableName tableName
      +private finalTableName tableName
       
       
       
      @@ -570,7 +570,7 @@ implements 
       
       defaultScannerCaching
      -private finalint defaultScannerCaching
      +private finalint defaultScannerCaching
       
       
       
      @@ -579,7 +579,7 @@ implements 
       
       defaultScannerMaxResultSize
      -private finallong defaultScannerMaxResultSize
      +private finallong defaultScannerMaxResultSize
       
       
       
      @@ -588,7 +588,7 @@ implements 
       
       rpcTimeoutNs
      -private finallong rpcTimeoutNs
      +private finallong rpcTimeoutNs
       
       
       
      @@ -597,7 +597,7 @@ implements 
       
       readRpcTimeoutNs
      -private finallong readRpcTimeoutNs
      +private finallong readRpcTimeoutNs
       
       
       
      @@ -606,7 +606,7 @@ implements 
       
       writeRpcTimeoutNs
      -private finallong writeRpcTimeoutNs
      +private finallong writeRpcTimeoutNs
       
       
       
      @@ -615,7 +615,7 @@ implements 
       
       operationTimeoutNs
      -private finallong operationTimeoutNs
      +private finallong operationTimeoutNs
       
       
       
      @@ -624,7 +624,7 @@ implements 
       
       scanTimeoutNs
      -private finallong scanTimeoutNs
      +private finallong scanTimeoutNs
       
       
       
      @@ -633,7 +633,7 @@ implements 
       
       pauseNs
      -private finallong pauseNs
      +private finallong pauseNs
       
       
       
      @@ -642,7 +642,7 @@ implements 
       
       maxAttempts
      -private finalint maxAttempts
      +private finalint maxAttempts
       
       
       
      @@ -651,7 +651,7 @@ implements 
       
       startLogErrorsCnt
      -private finalint startLogErrorsCnt
      +private finalint startLogErrorsCnt
       
       
       
      @@ -668,7 +668,7 @@ implements 
       
       RawAsyncTableImpl
      -RawAsyncTableImpl(AsyncConnectionImplconn,
      +RawAsyncTableImpl(AsyncConnectionImplconn,
         AsyncTableBuilderBase?builder)
       
       
      @@ -686,7 +686,7 @@ implements 
       
       getName
      -publicTableNamegetName()
      +publicTableNamegetName()
       Description copied from 
      interface:AsyncTable
       Gets the fully qualified table name instance of this 
      table.
       
      @@ -701,7 +701,7 @@ implements 
       
       getConfiguration
      -publicorg.apache.hadoop.conf.ConfigurationgetConfiguration()
      +publicorg.apache.hadoop.conf.ConfigurationgetConfiguration()
       Description copied from 
      interface:AsyncTable
       Returns the Configuration object used by this 
      instance.
        
      @@ -720,7 +720,7 @@ implements 
       
       call
      -private staticREQ,PREQ,PRESP,RESPhttps://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
       title="class or interface in 
      java.util.concurrent">CompletableFutureRESPcall(HBaseRpcControllercontroller,
      +private staticREQ,PREQ,PRESP,RESPhttps://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
       title="class or interface in 
      java.util.concurrent">CompletableFutureRESPcall(HBaseRpcControllercontroller,
         HRegionLocationloc,
         
      org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService.Interfacestub,
         REQreq,
      @@ -737,7 +737,7 @@ implements 
       
       mutate
      -private staticREQ,RESPhttps://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
       title="class or interface in 
      java.util.concurrent">CompletableFutureRESPmutate(HBaseRpcControllercontroller,
      +private staticREQ,RESPhttps://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
       title="class or interface in 
      java.util.concurrent">CompletableFutureRESPmutate(HBaseRpcControllercontroller,
        HRegionLocationloc,
        
      org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService.Interfacestub,
        REQreq,
      @@ -753,7 +753,7 @@ implements 
       
       voidMutate
      -private staticREQhttps://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
       title="class or interface in 

      [04/51] [partial] hbase-site git commit: Published site at 64061f896fe21512504e3886a400759e88b519da.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f1ebf5b6/devapidocs/org/apache/hadoop/hbase/master/balancer/class-use/BaseLoadBalancer.Cluster.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/master/balancer/class-use/BaseLoadBalancer.Cluster.html
       
      b/devapidocs/org/apache/hadoop/hbase/master/balancer/class-use/BaseLoadBalancer.Cluster.html
      index 361d69b..6997988 100644
      --- 
      a/devapidocs/org/apache/hadoop/hbase/master/balancer/class-use/BaseLoadBalancer.Cluster.html
      +++ 
      b/devapidocs/org/apache/hadoop/hbase/master/balancer/class-use/BaseLoadBalancer.Cluster.html
      @@ -157,14 +157,6 @@
       
       
       
      -protected BaseLoadBalancer.Cluster.Action
      -FavoredStochasticBalancer.FavoredNodeLocalityPicker.generate(BaseLoadBalancer.Clustercluster)
      -
      -
      -(package private) BaseLoadBalancer.Cluster.Action
      -FavoredStochasticBalancer.FavoredNodeLoadPicker.generate(BaseLoadBalancer.Clustercluster)
      -
      -
       (package private) abstract BaseLoadBalancer.Cluster.Action
       StochasticLoadBalancer.CandidateGenerator.generate(BaseLoadBalancer.Clustercluster)
       
      @@ -189,6 +181,14 @@
       StochasticLoadBalancer.RegionReplicaRackCandidateGenerator.generate(BaseLoadBalancer.Clustercluster)
       
       
      +protected BaseLoadBalancer.Cluster.Action
      +FavoredStochasticBalancer.FavoredNodeLocalityPicker.generate(BaseLoadBalancer.Clustercluster)
      +
      +
      +(package private) BaseLoadBalancer.Cluster.Action
      +FavoredStochasticBalancer.FavoredNodeLoadPicker.generate(BaseLoadBalancer.Clustercluster)
      +
      +
       private int
       FavoredStochasticBalancer.FavoredNodeLocalityPicker.getDifferentFavoredNode(BaseLoadBalancer.Clustercluster,
      https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
       title="class or interface in java.util">ListServerNamefavoredNodes,
      @@ -247,12 +247,12 @@
       
       
       private int
      -FavoredStochasticBalancer.FavoredNodeLoadPicker.pickLeastLoadedServer(BaseLoadBalancer.Clustercluster,
      +StochasticLoadBalancer.LoadCandidateGenerator.pickLeastLoadedServer(BaseLoadBalancer.Clustercluster,
        intthisServer)
       
       
       private int
      -StochasticLoadBalancer.LoadCandidateGenerator.pickLeastLoadedServer(BaseLoadBalancer.Clustercluster,
      +FavoredStochasticBalancer.FavoredNodeLoadPicker.pickLeastLoadedServer(BaseLoadBalancer.Clustercluster,
        intthisServer)
       
       
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f1ebf5b6/devapidocs/org/apache/hadoop/hbase/master/balancer/package-tree.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/master/balancer/package-tree.html 
      b/devapidocs/org/apache/hadoop/hbase/master/balancer/package-tree.html
      index ad40e69..eb945fb 100644
      --- a/devapidocs/org/apache/hadoop/hbase/master/balancer/package-tree.html
      +++ b/devapidocs/org/apache/hadoop/hbase/master/balancer/package-tree.html
      @@ -197,8 +197,8 @@
       
       java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
       title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
       title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
       title="class or interface in java.io">Serializable)
       
      -org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.LocalityType
       org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action.Type
      +org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.LocalityType
       
       
       
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f1ebf5b6/devapidocs/org/apache/hadoop/hbase/master/class-use/CatalogJanitor.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/master/class-use/CatalogJanitor.html 
      b/devapidocs/org/apache/hadoop/hbase/master/class-use/CatalogJanitor.html
      index d29d09b..ed61e0f 100644
      --- a/devapidocs/org/apache/hadoop/hbase/master/class-use/CatalogJanitor.html
      +++ b/devapidocs/org/apache/hadoop/hbase/master/class-use/CatalogJanitor.html
      @@ -117,11 +117,11 @@
       
       
       CatalogJanitor
      -MasterServices.getCatalogJanitor()
      +HMaster.getCatalogJanitor()
       
       
       CatalogJanitor
      -HMaster.getCatalogJanitor()
      +MasterServices.getCatalogJanitor()
       
       
       
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f1ebf5b6/devapidocs/org/apache/hadoop/hbase/master/class-use/ClusterSchema.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/master/class-use/ClusterSchema.html 
      b/devapidocs/org/apache/hadoop/hbase/master/class-use/ClusterSchema.html
      index fb802a0..fcc4f20 100644
      --- a/devapidocs/org/apache/hadoop/hbase/master/class-use/ClusterSchema.html
      +++ b/devapidocs/org/apache/hadoop/hbase/master/class-use/ClusterSchema.html
      @@ -132,11 +132,11 @@
       
       
       ClusterSchema
      

      [04/51] [partial] hbase-site git commit: Published site at 4cb40e6d846ce1f28ffb40d388c9efb753197813.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4dc2a2e8/devapidocs/org/apache/hadoop/hbase/io/class-use/ImmutableBytesWritable.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/io/class-use/ImmutableBytesWritable.html 
      b/devapidocs/org/apache/hadoop/hbase/io/class-use/ImmutableBytesWritable.html
      index abeccf1..ef30022 100644
      --- 
      a/devapidocs/org/apache/hadoop/hbase/io/class-use/ImmutableBytesWritable.html
      +++ 
      b/devapidocs/org/apache/hadoop/hbase/io/class-use/ImmutableBytesWritable.html
      @@ -162,11 +162,11 @@ Input/OutputFormats, a table indexing MapReduce job, and 
      utility methods.
       
       
       ImmutableBytesWritable
      -TableRecordReader.createKey()
      +TableSnapshotInputFormat.TableSnapshotRecordReader.createKey()
       
       
       ImmutableBytesWritable
      -TableSnapshotInputFormat.TableSnapshotRecordReader.createKey()
      +TableRecordReader.createKey()
       
       
       ImmutableBytesWritable
      @@ -183,11 +183,9 @@ Input/OutputFormats, a table indexing MapReduce job, and 
      utility methods.
       
       
       org.apache.hadoop.mapred.RecordReaderImmutableBytesWritable,Result
      -TableInputFormatBase.getRecordReader(org.apache.hadoop.mapred.InputSplitsplit,
      +TableSnapshotInputFormat.getRecordReader(org.apache.hadoop.mapred.InputSplitsplit,
      org.apache.hadoop.mapred.JobConfjob,
      -   org.apache.hadoop.mapred.Reporterreporter)
      -Builds a TableRecordReader.
      -
      +   
      org.apache.hadoop.mapred.Reporterreporter)
       
       
       org.apache.hadoop.mapred.RecordReaderImmutableBytesWritable,Result
      @@ -197,9 +195,11 @@ Input/OutputFormats, a table indexing MapReduce job, and 
      utility methods.
       
       
       org.apache.hadoop.mapred.RecordReaderImmutableBytesWritable,Result
      -TableSnapshotInputFormat.getRecordReader(org.apache.hadoop.mapred.InputSplitsplit,
      +TableInputFormatBase.getRecordReader(org.apache.hadoop.mapred.InputSplitsplit,
      org.apache.hadoop.mapred.JobConfjob,
      -   
      org.apache.hadoop.mapred.Reporterreporter)
      +   org.apache.hadoop.mapred.Reporterreporter)
      +Builds a TableRecordReader.
      +
       
       
       
      @@ -218,12 +218,10 @@ Input/OutputFormats, a table indexing MapReduce job, and 
      utility methods.
       
       
       void
      -IdentityTableMap.map(ImmutableBytesWritablekey,
      -   Resultvalue,
      +RowCounter.RowCounterMapper.map(ImmutableBytesWritablerow,
      +   Resultvalues,
      org.apache.hadoop.mapred.OutputCollectorImmutableBytesWritable,Resultoutput,
      -   org.apache.hadoop.mapred.Reporterreporter)
      -Pass the key, value to reduce
      -
      +   org.apache.hadoop.mapred.Reporterreporter)
       
       
       void
      @@ -236,19 +234,21 @@ Input/OutputFormats, a table indexing MapReduce job, and 
      utility methods.
       
       
       void
      -RowCounter.RowCounterMapper.map(ImmutableBytesWritablerow,
      -   Resultvalues,
      +IdentityTableMap.map(ImmutableBytesWritablekey,
      +   Resultvalue,
      org.apache.hadoop.mapred.OutputCollectorImmutableBytesWritable,Resultoutput,
      -   org.apache.hadoop.mapred.Reporterreporter)
      +   org.apache.hadoop.mapred.Reporterreporter)
      +Pass the key, value to reduce
      +
       
       
       boolean
      -TableRecordReader.next(ImmutableBytesWritablekey,
      +TableSnapshotInputFormat.TableSnapshotRecordReader.next(ImmutableBytesWritablekey,
       Resultvalue)
       
       
       boolean
      -TableSnapshotInputFormat.TableSnapshotRecordReader.next(ImmutableBytesWritablekey,
      +TableRecordReader.next(ImmutableBytesWritablekey,
       Resultvalue)
       
       
      @@ -281,12 +281,10 @@ Input/OutputFormats, a table indexing MapReduce job, and 
      utility methods.
       
       
       void
      -IdentityTableMap.map(ImmutableBytesWritablekey,
      -   Resultvalue,
      +RowCounter.RowCounterMapper.map(ImmutableBytesWritablerow,
      +   Resultvalues,
      org.apache.hadoop.mapred.OutputCollectorImmutableBytesWritable,Resultoutput,
      -   org.apache.hadoop.mapred.Reporterreporter)
      -Pass the key, value to reduce
      -
      +   org.apache.hadoop.mapred.Reporterreporter)
       
       
       void
      @@ -299,10 +297,12 @@ Input/OutputFormats, a table indexing MapReduce job, and 
      utility methods.
       
       
       void
      -RowCounter.RowCounterMapper.map(ImmutableBytesWritablerow,
      -   Resultvalues,
      +IdentityTableMap.map(ImmutableBytesWritablekey,
      +   Resultvalue,
      org.apache.hadoop.mapred.OutputCollectorImmutableBytesWritable,Resultoutput,
      -   org.apache.hadoop.mapred.Reporterreporter)
      +   org.apache.hadoop.mapred.Reporterreporter)
      +Pass the key, value to reduce
      +
       
       
       void
      @@ -349,7 +349,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
      utility methods.
       
       
       private ImmutableBytesWritable
      -MultithreadedTableMapper.SubMapRecordReader.key
      +TableRecordReaderImpl.key
       
       
       private ImmutableBytesWritable
      @@ -357,7 +357,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
      utility methods.
       
       
       private ImmutableBytesWritable
      -TableRecordReaderImpl.key
      +MultithreadedTableMapper.SubMapRecordReader.key
       
       
       (package private) ImmutableBytesWritable
      @@ -427,33 +427,33 @@ Input/OutputFormats, a table indexing MapReduce job, and 
      utility methods.
       
       
       ImmutableBytesWritable
      -MultithreadedTableMapper.SubMapRecordReader.getCurrentKey()
      

      [04/51] [partial] hbase-site git commit: Published site at 8ab7b20f48951d77945181024f5e15842bc253c4.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6eb695c8/export_control.html
      --
      diff --git a/export_control.html b/export_control.html
      index 9189760..b6baff7 100644
      --- a/export_control.html
      +++ b/export_control.html
      @@ -7,7 +7,7 @@
         
       
       
      -
      +
       
       Apache HBase  
         Export Control
      @@ -324,7 +324,7 @@ for more details.
       https://www.apache.org/;>The Apache Software 
      Foundation.
       All rights reserved.  
       
      -  Last Published: 
      2018-03-20
      +  Last Published: 
      2018-03-21
       
       
       
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6eb695c8/index.html
      --
      diff --git a/index.html b/index.html
      index 53b0f07..97d9a58 100644
      --- a/index.html
      +++ b/index.html
      @@ -7,7 +7,7 @@
         
       
       
      -
      +
       
       Apache HBase  Apache HBase™ Home
       
      @@ -426,7 +426,7 @@ Apache HBase is an open-source, distributed, versioned, 
      non-relational database
       https://www.apache.org/;>The Apache Software 
      Foundation.
       All rights reserved.  
       
      -  Last Published: 
      2018-03-20
      +  Last Published: 
      2018-03-21
       
       
       
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6eb695c8/integration.html
      --
      diff --git a/integration.html b/integration.html
      index f0ababb..8795959 100644
      --- a/integration.html
      +++ b/integration.html
      @@ -7,7 +7,7 @@
         
       
       
      -
      +
       
       Apache HBase  CI Management
       
      @@ -284,7 +284,7 @@
       https://www.apache.org/;>The Apache Software 
      Foundation.
       All rights reserved.  
       
      -  Last Published: 
      2018-03-20
      +  Last Published: 
      2018-03-21
       
       
       
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6eb695c8/issue-tracking.html
      --
      diff --git a/issue-tracking.html b/issue-tracking.html
      index 35e08e5..b8dd965 100644
      --- a/issue-tracking.html
      +++ b/issue-tracking.html
      @@ -7,7 +7,7 @@
         
       
       
      -
      +
       
       Apache HBase  Issue Management
       
      @@ -281,7 +281,7 @@
       https://www.apache.org/;>The Apache Software 
      Foundation.
       All rights reserved.  
       
      -  Last Published: 
      2018-03-20
      +  Last Published: 
      2018-03-21
       
       
       
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6eb695c8/license.html
      --
      diff --git a/license.html b/license.html
      index 368210a..71fce42 100644
      --- a/license.html
      +++ b/license.html
      @@ -7,7 +7,7 @@
         
       
       
      -
      +
       
       Apache HBase  Project Licenses
       
      @@ -484,7 +484,7 @@
       https://www.apache.org/;>The Apache Software 
      Foundation.
       All rights reserved.  
       
      -  Last Published: 
      2018-03-20
      +  Last Published: 
      2018-03-21
       
       
       
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6eb695c8/mail-lists.html
      --
      diff --git a/mail-lists.html b/mail-lists.html
      index 0954698..b5d366e 100644
      --- a/mail-lists.html
      +++ b/mail-lists.html
      @@ -7,7 +7,7 @@
         
       
       
      -
      +
       
       Apache HBase  Project Mailing Lists
       
      @@ -334,7 +334,7 @@
       https://www.apache.org/;>The Apache Software 
      Foundation.
       All rights reserved.  
       
      -  Last Published: 
      2018-03-20
      +  Last Published: 
      2018-03-21
       
       
       
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6eb695c8/metrics.html
      --
      diff --git a/metrics.html b/metrics.html
      index 8f29865..7ed0623 100644
      --- a/metrics.html
      +++ b/metrics.html
      @@ -7,7 +7,7 @@
         
       
       
      -
      +
       
       Apache HBase   
         Apache HBase (TM) Metrics
      @@ -452,7 +452,7 @@ export HBASE_REGIONSERVER_OPTS=$HBASE_JMX_OPTS 
      -Dcom.sun.management.jmxrem
       https://www.apache.org/;>The Apache Software 
      Foundation.
       All rights reserved.  
       
      -  Last Published: 
      2018-03-20
      +  Last Published: 
      2018-03-21
       
       
       
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6eb695c8/old_news.html
      --
      diff --git a/old_news.html b/old_news.html
      index 

      [04/51] [partial] hbase-site git commit: Published site at 00095a2ef9442e3fd86c04876c9d91f2f8b23ad8.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerMonitor.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerMonitor.html
       
      b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerMonitor.html
      index 3bc66bb..97aa79c 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerMonitor.html
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerMonitor.html
      @@ -1435,459 +1435,460 @@
       1427   */
       1428  private void execProcedure(final 
      RootProcedureState procStack,
       1429  final 
      ProcedureTEnvironment procedure) {
      -1430
      Preconditions.checkArgument(procedure.getState() == ProcedureState.RUNNABLE);
      -1431
      -1432// Procedures can suspend 
      themselves. They skip out by throwing a ProcedureSuspendedException.
      -1433// The exception is caught below and 
      then we hurry to the exit without disturbing state. The
      -1434// idea is that the processing of 
      this procedure will be unsuspended later by an external event
      -1435// such the report of a region open. 
      TODO: Currently, its possible for two worker threads
      -1436// to be working on the same 
      procedure concurrently (locking in procedures is NOT about
      -1437// concurrency but about tying an 
      entity to a procedure; i.e. a region to a particular
      -1438// procedure instance). This can 
      make for issues if both threads are changing state.
      -1439// See 
      env.getProcedureScheduler().wakeEvent(regionNode.getProcedureEvent());
      -1440// in 
      RegionTransitionProcedure#reportTransition for example of Procedure putting
      -1441// itself back on the scheduler 
      making it possible for two threads running against
      -1442// the one Procedure. Might be ok if 
      they are both doing different, idempotent sections.
      -1443boolean suspended = false;
      -1444
      -1445// Whether to 're-' -execute; run 
      through the loop again.
      -1446boolean reExecute = false;
      -1447
      -1448ProcedureTEnvironment[] 
      subprocs = null;
      -1449do {
      -1450  reExecute = false;
      -1451  try {
      -1452subprocs = 
      procedure.doExecute(getEnvironment());
      -1453if (subprocs != null  
      subprocs.length == 0) {
      -1454  subprocs = null;
      -1455}
      -1456  } catch 
      (ProcedureSuspendedException e) {
      -1457if (LOG.isTraceEnabled()) {
      -1458  LOG.trace("Suspend " + 
      procedure);
      -1459}
      -1460suspended = true;
      -1461  } catch (ProcedureYieldException 
      e) {
      -1462if (LOG.isTraceEnabled()) {
      -1463  LOG.trace("Yield " + procedure 
      + ": " + e.getMessage(), e);
      -1464}
      -1465scheduler.yield(procedure);
      -1466return;
      -1467  } catch (InterruptedException e) 
      {
      -1468if (LOG.isTraceEnabled()) {
      -1469  LOG.trace("Yield interrupt " + 
      procedure + ": " + e.getMessage(), e);
      -1470}
      -1471
      handleInterruptedException(procedure, e);
      -1472scheduler.yield(procedure);
      -1473return;
      -1474  } catch (Throwable e) {
      -1475// Catch NullPointerExceptions 
      or similar errors...
      -1476String msg = "CODE-BUG: Uncaught 
      runtime exception: " + procedure;
      -1477LOG.error(msg, e);
      -1478procedure.setFailure(new 
      RemoteProcedureException(msg, e));
      -1479  }
      -1480
      -1481  if (!procedure.isFailed()) {
      -1482if (subprocs != null) {
      -1483  if (subprocs.length == 1 
       subprocs[0] == procedure) {
      -1484// Procedure returned 
      itself. Quick-shortcut for a state machine-like procedure;
      -1485// i.e. we go around this 
      loop again rather than go back out on the scheduler queue.
      -1486subprocs = null;
      -1487reExecute = true;
      -1488if (LOG.isTraceEnabled()) 
      {
      -1489  LOG.trace("Short-circuit 
      to next step on pid=" + procedure.getProcId());
      -1490}
      -1491  } else {
      -1492// Yield the current 
      procedure, and make the subprocedure runnable
      -1493// subprocs may come back 
      'null'.
      -1494subprocs = 
      initializeChildren(procStack, procedure, subprocs);
      -1495LOG.info("Initialized 
      subprocedures=" +
      -1496  (subprocs == null? null:
      -1497
      Stream.of(subprocs).map(e - "{" + e.toString() + "}").
      -1498
      collect(Collectors.toList()).toString()));
      -1499  }
      -1500} else if (procedure.getState() 
      == ProcedureState.WAITING_TIMEOUT) {
      -1501  if (LOG.isTraceEnabled()) {
      -1502LOG.trace("Added to 
      timeoutExecutor " + procedure);
      -1503  }
      -1504  
      timeoutExecutor.add(procedure);
      -1505} else if (!suspended) {
      -1506  // No subtask, so we are 
      done
      -1507  
      procedure.setState(ProcedureState.SUCCESS);
      -1508}
      -1509  }
      -1510
      -1511   

      [04/51] [partial] hbase-site git commit: Published site at 22f4def942f8a3367d0ca6598317e9b9a7d0cfcd.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8b1eaec1/testdevapidocs/org/apache/hadoop/hbase/wal/package-tree.html
      --
      diff --git a/testdevapidocs/org/apache/hadoop/hbase/wal/package-tree.html 
      b/testdevapidocs/org/apache/hadoop/hbase/wal/package-tree.html
      index f7d6df6..84112c6 100644
      --- a/testdevapidocs/org/apache/hadoop/hbase/wal/package-tree.html
      +++ b/testdevapidocs/org/apache/hadoop/hbase/wal/package-tree.html
      @@ -139,8 +139,8 @@
       
       java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
       title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
       title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
       title="class or interface in java.io">Serializable)
       
      -org.apache.hadoop.hbase.wal.IOTestProvider.AllowedOperations
       org.apache.hadoop.hbase.wal.TestWALSplit.Corruptions
      +org.apache.hadoop.hbase.wal.IOTestProvider.AllowedOperations
       org.apache.hadoop.hbase.wal.FaultyFSLog.FailureType
       
       
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8b1eaec1/testdevapidocs/overview-tree.html
      --
      diff --git a/testdevapidocs/overview-tree.html 
      b/testdevapidocs/overview-tree.html
      index 368fe98..1f25bf6 100644
      --- a/testdevapidocs/overview-tree.html
      +++ b/testdevapidocs/overview-tree.html
      @@ -2462,6 +2462,7 @@
       org.apache.hadoop.hbase.coprocessor.TestCoprocessorConfiguration
       org.apache.hadoop.hbase.coprocessor.TestCoprocessorConfiguration.SystemCoprocessor 
      (implements org.apache.hadoop.hbase.coprocessor.MasterCoprocessor, 
      org.apache.hadoop.hbase.coprocessor.RegionCoprocessor, 
      org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessor)
       org.apache.hadoop.hbase.coprocessor.TestCoprocessorConfiguration.TableCoprocessor 
      (implements org.apache.hadoop.hbase.coprocessor.RegionCoprocessor)
      +org.apache.hadoop.hbase.client.TestCoprocessorDescriptor
       org.apache.hadoop.hbase.coprocessor.TestCoprocessorEndpoint
       org.apache.hadoop.hbase.coprocessor.TestCoprocessorHost
       org.apache.hadoop.hbase.coprocessor.TestCoprocessorHost.TestAbortable (implements 
      org.apache.hadoop.hbase.Abortable)
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8b1eaec1/testdevapidocs/src-html/org/apache/hadoop/hbase/AcidGuaranteesTestBase.html
      --
      diff --git 
      a/testdevapidocs/src-html/org/apache/hadoop/hbase/AcidGuaranteesTestBase.html 
      b/testdevapidocs/src-html/org/apache/hadoop/hbase/AcidGuaranteesTestBase.html
      index 6944c54..05638d9 100644
      --- 
      a/testdevapidocs/src-html/org/apache/hadoop/hbase/AcidGuaranteesTestBase.html
      +++ 
      b/testdevapidocs/src-html/org/apache/hadoop/hbase/AcidGuaranteesTestBase.html
      @@ -84,7 +84,7 @@
       076  
      builder.setValue(CompactingMemStore.IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, 
      "0.9");
       077}
       078
      Stream.of(FAMILIES).map(ColumnFamilyDescriptorBuilder::of)
      -079
      .forEachOrdered(builder::addColumnFamily);
      +079
      .forEachOrdered(builder::setColumnFamily);
       080
      UTIL.getAdmin().createTable(builder.build());
       081
      tool.setConf(UTIL.getConfiguration());
       082  }
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8b1eaec1/testdevapidocs/src-html/org/apache/hadoop/hbase/AcidGuaranteesTestTool.AtomicGetReader.html
      --
      diff --git 
      a/testdevapidocs/src-html/org/apache/hadoop/hbase/AcidGuaranteesTestTool.AtomicGetReader.html
       
      b/testdevapidocs/src-html/org/apache/hadoop/hbase/AcidGuaranteesTestTool.AtomicGetReader.html
      index e7b37cc..f64dba2 100644
      --- 
      a/testdevapidocs/src-html/org/apache/hadoop/hbase/AcidGuaranteesTestTool.AtomicGetReader.html
      +++ 
      b/testdevapidocs/src-html/org/apache/hadoop/hbase/AcidGuaranteesTestTool.AtomicGetReader.html
      @@ -328,7 +328,7 @@
       320if (!admin.tableExists(TABLE_NAME)) 
      {
       321  TableDescriptorBuilder builder = 
      TableDescriptorBuilder.newBuilder(TABLE_NAME);
       322  
      Stream.of(FAMILIES).map(ColumnFamilyDescriptorBuilder::of)
      -323  
      .forEachOrdered(builder::addColumnFamily);
      +323  
      .forEachOrdered(builder::setColumnFamily);
       324  
      admin.createTable(builder.build());
       325}
       326ColumnFamilyDescriptor cfd = 
      admin.getDescriptor(TABLE_NAME).getColumnFamilies()[0];
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8b1eaec1/testdevapidocs/src-html/org/apache/hadoop/hbase/AcidGuaranteesTestTool.AtomicScanReader.html
      --
      diff --git 
      a/testdevapidocs/src-html/org/apache/hadoop/hbase/AcidGuaranteesTestTool.AtomicScanReader.html
       
      b/testdevapidocs/src-html/org/apache/hadoop/hbase/AcidGuaranteesTestTool.AtomicScanReader.html
      index e7b37cc..f64dba2 100644
      --- 
      

      [04/51] [partial] hbase-site git commit: Published site at 31da4d0bce69b3a47066a5df675756087ce4dc60.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a754d895/devapidocs/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.RegionServerEnvironment.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.RegionServerEnvironment.html
       
      b/devapidocs/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.RegionServerEnvironment.html
      index 3a60d1e..f2ada44 100644
      --- 
      a/devapidocs/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.RegionServerEnvironment.html
      +++ 
      b/devapidocs/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.RegionServerEnvironment.html
      @@ -126,7 +126,7 @@ var activeTableTab = "activeTableTab";
       
       
       
      -private static class RegionServerCoprocessorHost.RegionServerEnvironment
      +private static class RegionServerCoprocessorHost.RegionServerEnvironment
       extends BaseEnvironmentRegionServerCoprocessor
       implements RegionServerCoprocessorEnvironment
       Coprocessor environment extension providing access to 
      region server
      @@ -274,7 +274,7 @@ implements 
       
       metricRegistry
      -private finalMetricRegistry metricRegistry
      +private finalMetricRegistry metricRegistry
       
       
       
      @@ -283,7 +283,7 @@ implements 
       
       services
      -private finalRegionServerServices services
      +private finalRegionServerServices services
       
       
       
      @@ -300,7 +300,7 @@ implements 
       
       RegionServerEnvironment
      -publicRegionServerEnvironment(RegionServerCoprocessorimpl,
      +publicRegionServerEnvironment(RegionServerCoprocessorimpl,
      intpriority,
      intseq,
      org.apache.hadoop.conf.Configurationconf,
      @@ -321,7 +321,7 @@ implements 
       
       getOnlineRegions
      -publicOnlineRegionsgetOnlineRegions()
      +publicOnlineRegionsgetOnlineRegions()
       
       Specified by:
       getOnlineRegionsin
       interfaceRegionServerCoprocessorEnvironment
      @@ -336,7 +336,7 @@ implements 
       
       getServerName
      -publicServerNamegetServerName()
      +publicServerNamegetServerName()
       
       Specified by:
       getServerNamein
       interfaceRegionServerCoprocessorEnvironment
      @@ -351,7 +351,7 @@ implements 
       
       getConnection
      -publicConnectiongetConnection()
      +publicConnectiongetConnection()
       Description copied from 
      interface:RegionServerCoprocessorEnvironment
       Returns the hosts' Connection to the Cluster. Do not 
      close! This is a shared connection
        with the hosting server. Throws https://docs.oracle.com/javase/8/docs/api/java/lang/UnsupportedOperationException.html?is-external=true;
       title="class or interface in 
      java.lang">UnsupportedOperationException if you try to close
      @@ -390,7 +390,7 @@ implements 
       
       createConnection
      -publicConnectioncreateConnection(org.apache.hadoop.conf.Configurationconf)
      +publicConnectioncreateConnection(org.apache.hadoop.conf.Configurationconf)
       throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
       title="class or interface in java.io">IOException
       Description copied from 
      interface:RegionServerCoprocessorEnvironment
       Creates a cluster connection using the passed Configuration.
      @@ -426,7 +426,7 @@ implements 
       
       getMetricRegistryForRegionServer
      -publicMetricRegistrygetMetricRegistryForRegionServer()
      +publicMetricRegistrygetMetricRegistryForRegionServer()
       Description copied from 
      interface:RegionServerCoprocessorEnvironment
       Returns a MetricRegistry that can be used to track metrics 
      at the region server level.
       
      @@ -446,7 +446,7 @@ implements 
       
       shutdown
      -publicvoidshutdown()
      +publicvoidshutdown()
       Description copied from 
      class:BaseEnvironment
       Clean up the environment
       
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a754d895/devapidocs/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.RegionServerEnvironmentForCoreCoprocessors.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.RegionServerEnvironmentForCoreCoprocessors.html
       
      b/devapidocs/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.RegionServerEnvironmentForCoreCoprocessors.html
      index a070751..5d82f45 100644
      --- 
      a/devapidocs/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.RegionServerEnvironmentForCoreCoprocessors.html
      +++ 
      b/devapidocs/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.RegionServerEnvironmentForCoreCoprocessors.html
      @@ -127,7 +127,7 @@ var activeTableTab = "activeTableTab";
       
       
       
      -private static class RegionServerCoprocessorHost.RegionServerEnvironmentForCoreCoprocessors
      +private static class RegionServerCoprocessorHost.RegionServerEnvironmentForCoreCoprocessors
       extends RegionServerCoprocessorHost.RegionServerEnvironment
       implements HasRegionServerServices
       Special version of RegionServerEnvironment that exposes 
      RegionServerServices for Core
      @@ -250,7 +250,7 @@ implements 
       
       regionServerServices
      -finalRegionServerServices 

      [04/51] [partial] hbase-site git commit: Published site at 6b77786dfc46d25ac5bb5f1c8a4a9eb47b52a604.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/81cde4ce/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
       
      b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
      index 85217f8..8caa693 100644
      --- 
      a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
      +++ 
      b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
      @@ -133,11 +133,11 @@
       
       
       ProcedureExecutorMasterProcedureEnv
      -MasterServices.getMasterProcedureExecutor()
      +HMaster.getMasterProcedureExecutor()
       
       
       ProcedureExecutorMasterProcedureEnv
      -HMaster.getMasterProcedureExecutor()
      +MasterServices.getMasterProcedureExecutor()
       
       
       private RemoteProcedureDispatcher.RemoteProcedureMasterProcedureEnv,?
      @@ -190,27 +190,27 @@
       
       
       protected boolean
      -SplitTableRegionProcedure.abort(MasterProcedureEnvenv)
      +RegionTransitionProcedure.abort(MasterProcedureEnvenv)
       
       
       protected boolean
      -MergeTableRegionsProcedure.abort(MasterProcedureEnvenv)
      +SplitTableRegionProcedure.abort(MasterProcedureEnvenv)
       
       
       protected boolean
      -RegionTransitionProcedure.abort(MasterProcedureEnvenv)
      +MergeTableRegionsProcedure.abort(MasterProcedureEnvenv)
       
       
       protected Procedure.LockState
      -GCRegionProcedure.acquireLock(MasterProcedureEnvenv)
      +RegionTransitionProcedure.acquireLock(MasterProcedureEnvenv)
       
       
       protected Procedure.LockState
      -MergeTableRegionsProcedure.acquireLock(MasterProcedureEnvenv)
      +GCRegionProcedure.acquireLock(MasterProcedureEnvenv)
       
       
       protected Procedure.LockState
      -RegionTransitionProcedure.acquireLock(MasterProcedureEnvenv)
      +MergeTableRegionsProcedure.acquireLock(MasterProcedureEnvenv)
       
       
       protected boolean
      @@ -303,7 +303,7 @@
       
       
       protected void
      -AssignProcedure.finishTransition(MasterProcedureEnvenv,
      +UnassignProcedure.finishTransition(MasterProcedureEnvenv,
       RegionStates.RegionStateNoderegionNode)
       
       
      @@ -313,7 +313,7 @@
       
       
       protected void
      -UnassignProcedure.finishTransition(MasterProcedureEnvenv,
      +AssignProcedure.finishTransition(MasterProcedureEnvenv,
       RegionStates.RegionStateNoderegionNode)
       
       
      @@ -322,7 +322,7 @@
       
       
       protected ProcedureMetrics
      -AssignProcedure.getProcedureMetrics(MasterProcedureEnvenv)
      +UnassignProcedure.getProcedureMetrics(MasterProcedureEnvenv)
       
       
       protected ProcedureMetrics
      @@ -334,7 +334,7 @@
       
       
       protected ProcedureMetrics
      -UnassignProcedure.getProcedureMetrics(MasterProcedureEnvenv)
      +AssignProcedure.getProcedureMetrics(MasterProcedureEnvenv)
       
       
       (package private) static 
      org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse
      @@ -365,7 +365,7 @@
       
       
       ServerName
      -AssignProcedure.getServer(MasterProcedureEnvenv)
      +UnassignProcedure.getServer(MasterProcedureEnvenv)
       
       
       abstract ServerName
      @@ -375,7 +375,7 @@
       
       
       ServerName
      -UnassignProcedure.getServer(MasterProcedureEnvenv)
      +AssignProcedure.getServer(MasterProcedureEnvenv)
       
       
       private ServerName
      @@ -392,19 +392,19 @@
       
       
       protected boolean
      -MergeTableRegionsProcedure.hasLock(MasterProcedureEnvenv)
      +RegionTransitionProcedure.hasLock(MasterProcedureEnvenv)
       
       
       protected boolean
      -RegionTransitionProcedure.hasLock(MasterProcedureEnvenv)
      +MergeTableRegionsProcedure.hasLock(MasterProcedureEnvenv)
       
       
       protected boolean
      -MergeTableRegionsProcedure.holdLock(MasterProcedureEnvenv)
      +RegionTransitionProcedure.holdLock(MasterProcedureEnvenv)
       
       
       protected boolean
      -RegionTransitionProcedure.holdLock(MasterProcedureEnvenv)
      +MergeTableRegionsProcedure.holdLock(MasterProcedureEnvenv)
       
       
       private boolean
      @@ -518,15 +518,15 @@
       
       
       protected void
      -MergeTableRegionsProcedure.releaseLock(MasterProcedureEnvenv)
      +RegionTransitionProcedure.releaseLock(MasterProcedureEnvenv)
       
       
       protected void
      -RegionTransitionProcedure.releaseLock(MasterProcedureEnvenv)
      +MergeTableRegionsProcedure.releaseLock(MasterProcedureEnvenv)
       
       
       RemoteProcedureDispatcher.RemoteOperation
      -AssignProcedure.remoteCallBuild(MasterProcedureEnvenv,
      +UnassignProcedure.remoteCallBuild(MasterProcedureEnvenv,
      ServerNameserverName)
       
       
      @@ -536,12 +536,12 @@
       
       
       RemoteProcedureDispatcher.RemoteOperation
      -UnassignProcedure.remoteCallBuild(MasterProcedureEnvenv,
      +AssignProcedure.remoteCallBuild(MasterProcedureEnvenv,
      ServerNameserverName)
       
       
       protected boolean
      -AssignProcedure.remoteCallFailed(MasterProcedureEnvenv,
      +UnassignProcedure.remoteCallFailed(MasterProcedureEnvenv,
       RegionStates.RegionStateNoderegionNode,
       https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
       title="class or interface in 
      java.io">IOExceptionexception)
       
      @@ -553,7 +553,7 @@
       
       
       protected boolean
      -UnassignProcedure.remoteCallFailed(MasterProcedureEnvenv,
      

      [04/51] [partial] hbase-site git commit: Published site at 1384da71375427b522b09f06862bb5d629cef52f.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d347bde8/devapidocs/org/apache/hadoop/hbase/master/HMaster.html
      --
      diff --git a/devapidocs/org/apache/hadoop/hbase/master/HMaster.html 
      b/devapidocs/org/apache/hadoop/hbase/master/HMaster.html
      index cd5eaa6..2c04026 100644
      --- a/devapidocs/org/apache/hadoop/hbase/master/HMaster.html
      +++ b/devapidocs/org/apache/hadoop/hbase/master/HMaster.html
      @@ -18,7 +18,7 @@
       catch(err) {
       }
       //-->
      -var methods = 
      {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":9,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":9,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":9,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":9,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i109"
       
      :10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":9,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":9,"i133":10,"i134":10,"i135":10,"i136":10,"i137":10,"i138":10,"i139":10,"i140":10,"i141":10,"i142":10,"i143":10,"i144":10,"i145":10,"i146":10,"i147":10,"i148":10,"i149":10,"i150":10,"i151":10,"i152":10,"i153":10,"i154":10,"i155":10,"i156":10,"i157":10,"i158":10,"i159":10,"i160":10,"i161":10,"i162":10,"i163":10,"i164":10,"i165":10,"i166":10,"i167":10,"i168":10,"i169":10,"i170":9};
      +var methods = 
      {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":9,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":9,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":9,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":9,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i109"
       
      :10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":9,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":9,"i134":10,"i135":10,"i136":10,"i137":10,"i138":10,"i139":10,"i140":10,"i141":10,"i142":10,"i143":10,"i144":10,"i145":10,"i146":10,"i147":10,"i148":10,"i149":10,"i150":10,"i151":10,"i152":10,"i153":10,"i154":10,"i155":10,"i156":10,"i157":10,"i158":10,"i159":10,"i160":10,"i161":10,"i162":10,"i163":10,"i164":10,"i165":10,"i166":10,"i167":10,"i168":10,"i169":10,"i170":10,"i171":9};
       var tabs = {65535:["t0","All Methods"],1:["t1","Static 
      Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
       var altColor = "altColor";
       var rowColor = "rowColor";
      @@ -999,22 +999,26 @@ implements getSnapshotManager()
       
       
      +SnapshotQuotaObserverChore
      +getSnapshotQuotaObserverChore()
      +
      +
       SpaceQuotaSnapshotNotifier
       getSpaceQuotaSnapshotNotifier()
       
      -
      +
       SplitOrMergeTracker
       getSplitOrMergeTracker()
       
      -
      +
       long
       getSplitPlanCount()
       
      -
      +
       TableDescriptors
       getTableDescriptors()
       
      -
      +
       private https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
       title="class or interface in java.util">ListTableDescriptor
       getTableDescriptors(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
       title="class or interface in java.util">ListTableDescriptorhtds,
      https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">Stringnamespace,
      @@ -1022,7 +1026,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
       title="class or interface in java.util">ListTableNametableNameList,
      booleanincludeSysTables)
       
      -
      +
       (package private) 

      [04/51] [partial] hbase-site git commit: Published site at b7b86839250bf9b295ebc1948826f43a88736d6c.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6b94a2f2/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
       
      b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
      index 3563b1c..0cc71bf 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
      @@ -1577,387 +1577,386 @@
       1569  }
       1570
       1571  public void markRegionAsSplit(final 
      RegionInfo parent, final ServerName serverName,
      -1572  final RegionInfo daughterA, final 
      RegionInfo daughterB)
      -1573  throws IOException {
      -1574// Update hbase:meta. Parent will be 
      marked offline and split up in hbase:meta.
      -1575// The parent stays in regionStates 
      until cleared when removed by CatalogJanitor.
      -1576// Update its state in regionStates 
      to it shows as offline and split when read
      -1577// later figuring what regions are 
      in a table and what are not: see
      -1578// regionStates#getRegionsOfTable
      -1579final RegionStateNode node = 
      regionStates.getOrCreateRegionStateNode(parent);
      -1580node.setState(State.SPLIT);
      -1581final RegionStateNode nodeA = 
      regionStates.getOrCreateRegionStateNode(daughterA);
      -1582
      nodeA.setState(State.SPLITTING_NEW);
      -1583final RegionStateNode nodeB = 
      regionStates.getOrCreateRegionStateNode(daughterB);
      -1584
      nodeB.setState(State.SPLITTING_NEW);
      -1585
      -1586regionStateStore.splitRegion(parent, 
      daughterA, daughterB, serverName);
      -1587if 
      (shouldAssignFavoredNodes(parent)) {
      -1588  ListServerName 
      onlineServers = this.master.getServerManager().getOnlineServersList();
      -1589  
      ((FavoredNodesPromoter)getBalancer()).
      -1590  
      generateFavoredNodesForDaughter(onlineServers, parent, daughterA, daughterB);
      -1591}
      -1592  }
      -1593
      -1594  /**
      -1595   * When called here, the merge has 
      happened. The two merged regions have been
      -1596   * unassigned and the above 
      markRegionClosed has been called on each so they have been
      -1597   * disassociated from a hosting 
      Server. The merged region will be open after this call. The
      -1598   * merged regions are removed from 
      hbase:meta below Later they are deleted from the filesystem
      -1599   * by the catalog janitor running 
      against hbase:meta. It notices when the merged region no
      -1600   * longer holds references to the old 
      regions.
      -1601   */
      -1602  public void markRegionAsMerged(final 
      RegionInfo child, final ServerName serverName,
      -1603  final RegionInfo mother, final 
      RegionInfo father) throws IOException {
      -1604final RegionStateNode node = 
      regionStates.getOrCreateRegionStateNode(child);
      -1605node.setState(State.MERGED);
      -1606regionStates.deleteRegion(mother);
      -1607regionStates.deleteRegion(father);
      -1608regionStateStore.mergeRegions(child, 
      mother, father, serverName);
      -1609if (shouldAssignFavoredNodes(child)) 
      {
      -1610  
      ((FavoredNodesPromoter)getBalancer()).
      -1611
      generateFavoredNodesForMergedRegion(child, mother, father);
      -1612}
      -1613  }
      -1614
      -1615  /*
      -1616   * Favored nodes should be applied 
      only when FavoredNodes balancer is configured and the region
      -1617   * belongs to a non-system table.
      -1618   */
      -1619  private boolean 
      shouldAssignFavoredNodes(RegionInfo region) {
      -1620return 
      this.shouldAssignRegionsWithFavoredNodes 
      -1621
      FavoredNodesManager.isFavoredNodeApplicable(region);
      -1622  }
      -1623
      -1624  // 
      
      -1625  //  Assign Queue (Assign/Balance)
      -1626  // 
      
      -1627  private final 
      ArrayListRegionStateNode pendingAssignQueue = new 
      ArrayListRegionStateNode();
      -1628  private final ReentrantLock 
      assignQueueLock = new ReentrantLock();
      -1629  private final Condition 
      assignQueueFullCond = assignQueueLock.newCondition();
      -1630
      -1631  /**
      -1632   * Add the assign operation to the 
      assignment queue.
      -1633   * The pending assignment operation 
      will be processed,
      -1634   * and each region will be assigned by 
      a server using the balancer.
      -1635   */
      -1636  protected void queueAssign(final 
      RegionStateNode regionNode) {
      -1637
      regionNode.getProcedureEvent().suspend();
      -1638
      -1639// TODO: quick-start for meta and 
      the other sys-tables?
      -1640assignQueueLock.lock();
      -1641try {
      -1642  
      pendingAssignQueue.add(regionNode);
      -1643  if (regionNode.isSystemTable() 
      ||
      -1644  pendingAssignQueue.size() == 1 
      ||
      -1645  pendingAssignQueue.size() 
      = assignDispatchWaitQueueMaxSize) {
      -1646assignQueueFullCond.signal();
      -1647  }
      -1648} finally {
      -1649  assignQueueLock.unlock();
      -1650}
      -1651  }
      -1652
      

      [04/51] [partial] hbase-site git commit: Published site at 1d25b60831b8cc8f7ad5fd366f1867de5c20d2f3.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/eb05e3e3/apidocs/org/apache/hadoop/hbase/client/TableDescriptor.html
      --
      diff --git a/apidocs/org/apache/hadoop/hbase/client/TableDescriptor.html 
      b/apidocs/org/apache/hadoop/hbase/client/TableDescriptor.html
      index b6aac56..62edd2e 100644
      --- a/apidocs/org/apache/hadoop/hbase/client/TableDescriptor.html
      +++ b/apidocs/org/apache/hadoop/hbase/client/TableDescriptor.html
      @@ -18,7 +18,7 @@
       catch(err) {
       }
       //-->
      -var methods = 
      {"i0":6,"i1":6,"i2":6,"i3":6,"i4":17,"i5":6,"i6":6,"i7":6,"i8":6,"i9":6,"i10":38,"i11":6,"i12":6,"i13":6,"i14":6,"i15":6,"i16":6,"i17":6,"i18":6,"i19":6,"i20":6,"i21":6,"i22":6,"i23":6,"i24":6,"i25":6,"i26":6,"i27":6,"i28":18};
      +var methods = 
      {"i0":6,"i1":6,"i2":6,"i3":6,"i4":17,"i5":6,"i6":6,"i7":6,"i8":6,"i9":6,"i10":38,"i11":6,"i12":6,"i13":6,"i14":6,"i15":6,"i16":6,"i17":6,"i18":6,"i19":6,"i20":6,"i21":6,"i22":6,"i23":6,"i24":6,"i25":6,"i26":6,"i27":18};
       var tabs = {65535:["t0","All Methods"],1:["t1","Static 
      Methods"],2:["t2","Instance Methods"],4:["t3","Abstract 
      Methods"],16:["t5","Default Methods"],32:["t6","Deprecated Methods"]};
       var altColor = "altColor";
       var rowColor = "rowColor";
      @@ -150,17 +150,17 @@ public interface 
      -http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
       title="class or interface in java.util">Setbyte[]
      +https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
       title="class or interface in java.util">Setbyte[]
       getColumnFamilyNames()
       Returns all the column family names of the current 
      table.
       
       
       
      -static http://docs.oracle.com/javase/8/docs/api/java/util/Comparator.html?is-external=true;
       title="class or interface in java.util">ComparatorTableDescriptor
      -getComparator(http://docs.oracle.com/javase/8/docs/api/java/util/Comparator.html?is-external=true;
       title="class or interface in java.util">ComparatorColumnFamilyDescriptorcfComparator)
      +static https://docs.oracle.com/javase/8/docs/api/java/util/Comparator.html?is-external=true;
       title="class or interface in java.util">ComparatorTableDescriptor
      +getComparator(https://docs.oracle.com/javase/8/docs/api/java/util/Comparator.html?is-external=true;
       title="class or interface in java.util">ComparatorColumnFamilyDescriptorcfComparator)
       
       
      -http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
       title="class or interface in java.util">Collectionhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">String
      +https://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
       title="class or interface in java.util">Collectionhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">String
       getCoprocessors()
       Return the list of attached co-processor represented by 
      their name
        className
      @@ -173,7 +173,7 @@ public interface 
      -http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">String
      +https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">String
       getFlushPolicyClassName()
       This gets the class associated with the flush policy which 
      determines the
        stores need to be flushed when flushing a region.
      @@ -194,7 +194,7 @@ public interface 
      -http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">String
      +https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">String
       getOwnerString()
       Deprecated.
       
      @@ -208,7 +208,7 @@ public interface getRegionReplication()
       
       
      -http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">String
      +https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">String
       getRegionSplitPolicyClassName()
       This gets the class associated with the region split policy 
      which
        determines when a region split should occur.
      @@ -233,13 +233,13 @@ public interface 
      -http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">String
      -getValue(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">Stringkey)
      +https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">String
      +getValue(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">Stringkey)
       Getter for accessing the metadata associated with the 
      key.
       
       
       
      -http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
       title="class or 

      [04/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ec8bf761/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
       
      b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
      index 7edb3ff..665071c 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
      @@ -1221,2378 +1221,2377 @@
       1213
      configurationManager.registerObserver(procEnv);
       1214
       1215int cpus = 
      Runtime.getRuntime().availableProcessors();
      -1216final int numThreads = 
      conf.getInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS,
      -1217Math.max((cpus  0? cpus/4: 
      0),
      -1218
      MasterProcedureConstants.DEFAULT_MIN_MASTER_PROCEDURE_THREADS));
      -1219final boolean abortOnCorruption = 
      conf.getBoolean(
      -1220
      MasterProcedureConstants.EXECUTOR_ABORT_ON_CORRUPTION,
      -1221
      MasterProcedureConstants.DEFAULT_EXECUTOR_ABORT_ON_CORRUPTION);
      -1222procedureStore.start(numThreads);
      -1223procedureExecutor.start(numThreads, 
      abortOnCorruption);
      -1224
      procEnv.getRemoteDispatcher().start();
      -1225  }
      -1226
      -1227  private void stopProcedureExecutor() 
      {
      -1228if (procedureExecutor != null) {
      -1229  
      configurationManager.deregisterObserver(procedureExecutor.getEnvironment());
      -1230  
      procedureExecutor.getEnvironment().getRemoteDispatcher().stop();
      -1231  procedureExecutor.stop();
      -1232  procedureExecutor.join();
      -1233  procedureExecutor = null;
      -1234}
      -1235
      -1236if (procedureStore != null) {
      -1237  
      procedureStore.stop(isAborted());
      -1238  procedureStore = null;
      -1239}
      -1240  }
      -1241
      -1242  private void stopChores() {
      -1243if (this.expiredMobFileCleanerChore 
      != null) {
      -1244  
      this.expiredMobFileCleanerChore.cancel(true);
      -1245}
      -1246if (this.mobCompactChore != null) 
      {
      -1247  
      this.mobCompactChore.cancel(true);
      -1248}
      -1249if (this.balancerChore != null) {
      -1250  this.balancerChore.cancel(true);
      -1251}
      -1252if (this.normalizerChore != null) 
      {
      -1253  
      this.normalizerChore.cancel(true);
      -1254}
      -1255if (this.clusterStatusChore != null) 
      {
      -1256  
      this.clusterStatusChore.cancel(true);
      -1257}
      -1258if (this.catalogJanitorChore != 
      null) {
      -1259  
      this.catalogJanitorChore.cancel(true);
      -1260}
      -1261if (this.clusterStatusPublisherChore 
      != null){
      -1262  
      clusterStatusPublisherChore.cancel(true);
      -1263}
      -1264if (this.mobCompactThread != null) 
      {
      -1265  this.mobCompactThread.close();
      -1266}
      -1267
      -1268if (this.quotaObserverChore != null) 
      {
      -1269  quotaObserverChore.cancel();
      -1270}
      -1271if (this.snapshotQuotaChore != null) 
      {
      -1272  snapshotQuotaChore.cancel();
      -1273}
      -1274  }
      -1275
      -1276  /**
      -1277   * @return Get remote side's 
      InetAddress
      -1278   */
      -1279  InetAddress getRemoteInetAddress(final 
      int port,
      -1280  final long serverStartCode) throws 
      UnknownHostException {
      -1281// Do it out here in its own little 
      method so can fake an address when
      -1282// mocking up in tests.
      -1283InetAddress ia = 
      RpcServer.getRemoteIp();
      -1284
      -1285// The call could be from the local 
      regionserver,
      -1286// in which case, there is no remote 
      address.
      -1287if (ia == null  
      serverStartCode == startcode) {
      -1288  InetSocketAddress isa = 
      rpcServices.getSocketAddress();
      -1289  if (isa != null  
      isa.getPort() == port) {
      -1290ia = isa.getAddress();
      -1291  }
      -1292}
      -1293return ia;
      -1294  }
      -1295
      -1296  /**
      -1297   * @return Maximum time we should run 
      balancer for
      -1298   */
      -1299  private int getMaxBalancingTime() {
      -1300int maxBalancingTime = 
      getConfiguration().getInt(HConstants.HBASE_BALANCER_MAX_BALANCING, -1);
      -1301if (maxBalancingTime == -1) {
      -1302  // if max balancing time isn't 
      set, defaulting it to period time
      -1303  maxBalancingTime = 
      getConfiguration().getInt(HConstants.HBASE_BALANCER_PERIOD,
      -1304
      HConstants.DEFAULT_HBASE_BALANCER_PERIOD);
      -1305}
      -1306return maxBalancingTime;
      -1307  }
      -1308
      -1309  /**
      -1310   * @return Maximum number of regions 
      in transition
      -1311   */
      -1312  private int 
      getMaxRegionsInTransition() {
      -1313int numRegions = 
      this.assignmentManager.getRegionStates().getRegionAssignments().size();
      -1314return Math.max((int) 
      Math.floor(numRegions * this.maxRitPercent), 1);
      -1315  }
      -1316
      -1317  /**
      -1318   * It first sleep to the next balance 
      plan start time. Meanwhile, throttling by the max
      -1319   * number regions in transition to 
      protect availability.
      -1320   * @param nextBalanceStartTime The 
      next balance plan start time
      -1321   * @param maxRegionsInTransition max 
      number of 

      [04/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
       
      b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
      index 802b925..a3e80ab 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
      @@ -73,229 +73,229 @@
       065import 
      java.util.concurrent.TimeoutException;
       066import 
      java.util.concurrent.atomic.AtomicBoolean;
       067import 
      java.util.concurrent.atomic.AtomicInteger;
      -068import 
      java.util.concurrent.atomic.AtomicLong;
      -069import 
      java.util.concurrent.atomic.LongAdder;
      -070import java.util.concurrent.locks.Lock;
      -071import 
      java.util.concurrent.locks.ReadWriteLock;
      -072import 
      java.util.concurrent.locks.ReentrantReadWriteLock;
      -073import java.util.function.Function;
      -074import 
      org.apache.hadoop.conf.Configuration;
      -075import org.apache.hadoop.fs.FileStatus;
      -076import org.apache.hadoop.fs.FileSystem;
      -077import 
      org.apache.hadoop.fs.LocatedFileStatus;
      -078import org.apache.hadoop.fs.Path;
      -079import org.apache.hadoop.hbase.Cell;
      -080import 
      org.apache.hadoop.hbase.CellBuilderType;
      -081import 
      org.apache.hadoop.hbase.CellComparator;
      -082import 
      org.apache.hadoop.hbase.CellComparatorImpl;
      -083import 
      org.apache.hadoop.hbase.CellScanner;
      -084import 
      org.apache.hadoop.hbase.CellUtil;
      -085import 
      org.apache.hadoop.hbase.CompareOperator;
      -086import 
      org.apache.hadoop.hbase.CompoundConfiguration;
      -087import 
      org.apache.hadoop.hbase.DoNotRetryIOException;
      -088import 
      org.apache.hadoop.hbase.DroppedSnapshotException;
      -089import 
      org.apache.hadoop.hbase.ExtendedCellBuilderFactory;
      -090import 
      org.apache.hadoop.hbase.HConstants;
      -091import 
      org.apache.hadoop.hbase.HConstants.OperationStatusCode;
      -092import 
      org.apache.hadoop.hbase.HDFSBlocksDistribution;
      -093import 
      org.apache.hadoop.hbase.HRegionInfo;
      -094import 
      org.apache.hadoop.hbase.KeyValue;
      -095import 
      org.apache.hadoop.hbase.KeyValueUtil;
      -096import 
      org.apache.hadoop.hbase.NamespaceDescriptor;
      -097import 
      org.apache.hadoop.hbase.NotServingRegionException;
      -098import 
      org.apache.hadoop.hbase.PrivateCellUtil;
      -099import 
      org.apache.hadoop.hbase.RegionTooBusyException;
      -100import 
      org.apache.hadoop.hbase.TableName;
      -101import org.apache.hadoop.hbase.Tag;
      -102import org.apache.hadoop.hbase.TagUtil;
      -103import 
      org.apache.hadoop.hbase.UnknownScannerException;
      -104import 
      org.apache.hadoop.hbase.client.Append;
      -105import 
      org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
      -106import 
      org.apache.hadoop.hbase.client.CompactionState;
      -107import 
      org.apache.hadoop.hbase.client.Delete;
      -108import 
      org.apache.hadoop.hbase.client.Durability;
      -109import 
      org.apache.hadoop.hbase.client.Get;
      -110import 
      org.apache.hadoop.hbase.client.Increment;
      -111import 
      org.apache.hadoop.hbase.client.IsolationLevel;
      -112import 
      org.apache.hadoop.hbase.client.Mutation;
      -113import 
      org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor;
      -114import 
      org.apache.hadoop.hbase.client.Put;
      -115import 
      org.apache.hadoop.hbase.client.RegionInfo;
      -116import 
      org.apache.hadoop.hbase.client.RegionReplicaUtil;
      -117import 
      org.apache.hadoop.hbase.client.Result;
      -118import 
      org.apache.hadoop.hbase.client.RowMutations;
      -119import 
      org.apache.hadoop.hbase.client.Scan;
      -120import 
      org.apache.hadoop.hbase.client.TableDescriptor;
      -121import 
      org.apache.hadoop.hbase.client.TableDescriptorBuilder;
      -122import 
      org.apache.hadoop.hbase.conf.ConfigurationManager;
      -123import 
      org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver;
      -124import 
      org.apache.hadoop.hbase.coprocessor.RegionObserver.MutationType;
      -125import 
      org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
      -126import 
      org.apache.hadoop.hbase.exceptions.FailedSanityCheckException;
      -127import 
      org.apache.hadoop.hbase.exceptions.TimeoutIOException;
      -128import 
      org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
      -129import 
      org.apache.hadoop.hbase.filter.ByteArrayComparable;
      -130import 
      org.apache.hadoop.hbase.filter.FilterWrapper;
      -131import 
      org.apache.hadoop.hbase.filter.IncompatibleFilterException;
      -132import 
      org.apache.hadoop.hbase.io.HFileLink;
      -133import 
      org.apache.hadoop.hbase.io.HeapSize;
      -134import 
      org.apache.hadoop.hbase.io.TimeRange;
      -135import 
      org.apache.hadoop.hbase.io.hfile.HFile;
      -136import 
      org.apache.hadoop.hbase.ipc.CallerDisconnectedException;
      -137import 
      org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
      -138import 
      org.apache.hadoop.hbase.ipc.RpcCall;
      -139import 
      org.apache.hadoop.hbase.ipc.RpcServer;
      -140import 
      org.apache.hadoop.hbase.monitoring.MonitoredTask;
      -141import 
      org.apache.hadoop.hbase.monitoring.TaskMonitor;
      -142import 
      

      [04/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/193b4259/hbase-build-configuration/hbase-archetypes/source-repository.html
      --
      diff --git a/hbase-build-configuration/hbase-archetypes/source-repository.html 
      b/hbase-build-configuration/hbase-archetypes/source-repository.html
      index c67c3a7..53230a9 100644
      --- a/hbase-build-configuration/hbase-archetypes/source-repository.html
      +++ b/hbase-build-configuration/hbase-archetypes/source-repository.html
      @@ -7,7 +7,7 @@
         
       
       
      -
      +
       
       Apache HBase - Archetypes  Source Code Management
       
      @@ -134,7 +134,7 @@
       https://www.apache.org/;>The Apache Software 
      Foundation.
       All rights reserved.  
       
      -  Last Published: 
      2018-02-16
      +  Last Published: 
      2018-02-17
       
       
       
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/193b4259/hbase-build-configuration/hbase-archetypes/team-list.html
      --
      diff --git a/hbase-build-configuration/hbase-archetypes/team-list.html 
      b/hbase-build-configuration/hbase-archetypes/team-list.html
      index 54c6d81..19b0e10 100644
      --- a/hbase-build-configuration/hbase-archetypes/team-list.html
      +++ b/hbase-build-configuration/hbase-archetypes/team-list.html
      @@ -7,7 +7,7 @@
         
       
       
      -
      +
       
       Apache HBase - Archetypes  Project Team
       
      @@ -553,7 +553,7 @@
       https://www.apache.org/;>The Apache Software 
      Foundation.
       All rights reserved.  
       
      -  Last Published: 
      2018-02-16
      +  Last Published: 
      2018-02-17
       
       
       
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/193b4259/hbase-build-configuration/hbase-spark/checkstyle.html
      --
      diff --git a/hbase-build-configuration/hbase-spark/checkstyle.html 
      b/hbase-build-configuration/hbase-spark/checkstyle.html
      index 67be5d9..ac5579a 100644
      --- a/hbase-build-configuration/hbase-spark/checkstyle.html
      +++ b/hbase-build-configuration/hbase-spark/checkstyle.html
      @@ -7,7 +7,7 @@
         
       
       
      -
      +
       
       Apache HBase - Spark  Checkstyle Results
       
      @@ -150,7 +150,7 @@
       https://www.apache.org/;>The Apache Software 
      Foundation.
       All rights reserved.  
       
      -  Last Published: 
      2018-02-16
      +  Last Published: 
      2018-02-17
       
       
       
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/193b4259/hbase-build-configuration/hbase-spark/dependencies.html
      --
      diff --git a/hbase-build-configuration/hbase-spark/dependencies.html 
      b/hbase-build-configuration/hbase-spark/dependencies.html
      index ac824e2..7c328de 100644
      --- a/hbase-build-configuration/hbase-spark/dependencies.html
      +++ b/hbase-build-configuration/hbase-spark/dependencies.html
      @@ -7,7 +7,7 @@
         
       
       
      -
      +
       
       Apache HBase - Spark  Project Dependencies
       
      @@ -5692,7 +5692,7 @@ file comparators, endian transformation classes, and much 
      more.
       https://www.apache.org/;>The Apache Software 
      Foundation.
       All rights reserved.  
       
      -  Last Published: 
      2018-02-16
      +  Last Published: 
      2018-02-17
       
       
       
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/193b4259/hbase-build-configuration/hbase-spark/dependency-convergence.html
      --
      diff --git a/hbase-build-configuration/hbase-spark/dependency-convergence.html 
      b/hbase-build-configuration/hbase-spark/dependency-convergence.html
      index 058089f..c60df6d 100644
      --- a/hbase-build-configuration/hbase-spark/dependency-convergence.html
      +++ b/hbase-build-configuration/hbase-spark/dependency-convergence.html
      @@ -7,7 +7,7 @@
         
       
       
      -
      +
       
       Apache HBase - Spark  Reactor Dependency Convergence
       
      @@ -865,7 +865,7 @@
       https://www.apache.org/;>The Apache Software 
      Foundation.
       All rights reserved.  
       
      -  Last Published: 
      2018-02-16
      +  Last Published: 
      2018-02-17
       
       
       
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/193b4259/hbase-build-configuration/hbase-spark/dependency-info.html
      --
      diff --git a/hbase-build-configuration/hbase-spark/dependency-info.html 
      b/hbase-build-configuration/hbase-spark/dependency-info.html
      index 753e1b8..b045d7a 100644
      --- a/hbase-build-configuration/hbase-spark/dependency-info.html
      +++ 

      [04/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/94208cfe/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HRegion.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HRegion.html 
      b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HRegion.html
      index 9856943..36977cd 100644
      --- a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HRegion.html
      +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HRegion.html
      @@ -257,31 +257,23 @@
       
       
       private HRegion
      -MemStoreFlusher.FlushRegionEntry.region
      +MetricsRegionWrapperImpl.region
       
       
       private HRegion
      -RegionServerServices.PostOpenDeployContext.region
      +RegionServicesForStores.region
       
       
       private HRegion
       CompactSplit.CompactionRunner.region
       
       
      -protected HRegion
      -FlushPolicy.region
      -The region configured for this flush policy.
      +(package private) HRegion
      +RegionCoprocessorHost.region
      +The region
       
       
       
      -private HRegion
      -RegionServicesForStores.region
      -
      -
      -private HRegion
      -MetricsRegionWrapperImpl.region
      -
      -
       protected HRegion
       RegionSplitPolicy.region
       The region configured for this split policy.
      @@ -296,19 +288,27 @@
       HRegion.RegionScannerImpl.region
       
       
      -(package private) HRegion
      -RegionCoprocessorHost.region
      -The region
      -
      +private HRegion
      +RegionServerServices.PostOpenDeployContext.region
       
       
      -protected HRegion
      -HStore.region
      +private HRegion
      +MemStoreFlusher.FlushRegionEntry.region
       
       
      +protected HRegion
      +FlushPolicy.region
      +The region configured for this flush policy.
      +
      +
      +
       private HRegion
       BusyRegionSplitPolicy.region
       
      +
      +protected HRegion
      +HStore.region
      +
       
       
       
      @@ -563,14 +563,14 @@
       
       
       void
      -HRegionServer.addRegion(HRegionregion)
      -
      -
      -void
       MutableOnlineRegions.addRegion(HRegionr)
       Add to online regions.
       
       
      +
      +void
      +HRegionServer.addRegion(HRegionregion)
      +
       
       private RSRpcServices.RegionScannerHolder
       RSRpcServices.addScanner(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">StringscannerName,
      @@ -624,57 +624,57 @@
       
       
       protected void
      -FlushPolicy.configureForRegion(HRegionregion)
      -Upon construction, this method will be called with the 
      region to be governed.
      -
      +KeyPrefixRegionSplitPolicy.configureForRegion(HRegionregion)
       
       
       protected void
      -FlushAllLargeStoresPolicy.configureForRegion(HRegionregion)
      +RegionSplitPolicy.configureForRegion(HRegionregion)
      +Upon construction, this method will be called with the 
      region
      + to be governed.
      +
       
       
       protected void
      -ConstantSizeRegionSplitPolicy.configureForRegion(HRegionregion)
      +DelimitedKeyPrefixRegionSplitPolicy.configureForRegion(HRegionregion)
       
       
       protected void
      -RegionSplitPolicy.configureForRegion(HRegionregion)
      -Upon construction, this method will be called with the 
      region
      - to be governed.
      -
      +IncreasingToUpperBoundRegionSplitPolicy.configureForRegion(HRegionregion)
       
       
       protected void
      -FlushNonSloppyStoresFirstPolicy.configureForRegion(HRegionregion)
      +FlushAllLargeStoresPolicy.configureForRegion(HRegionregion)
       
       
       protected void
      -DelimitedKeyPrefixRegionSplitPolicy.configureForRegion(HRegionregion)
      +FlushPolicy.configureForRegion(HRegionregion)
      +Upon construction, this method will be called with the 
      region to be governed.
      +
       
       
       protected void
      -KeyPrefixRegionSplitPolicy.configureForRegion(HRegionregion)
      +ConstantSizeRegionSplitPolicy.configureForRegion(HRegionregion)
       
       
       protected void
      -IncreasingToUpperBoundRegionSplitPolicy.configureForRegion(HRegionregion)
      +FlushNonSloppyStoresFirstPolicy.configureForRegion(HRegionregion)
       
       
       protected void
       BusyRegionSplitPolicy.configureForRegion(HRegionregion)
       
       
      -static RegionSplitPolicy
      -RegionSplitPolicy.create(HRegionregion,
      +static FlushPolicy
      +FlushPolicyFactory.create(HRegionregion,
         org.apache.hadoop.conf.Configurationconf)
      -Create the RegionSplitPolicy configured for the given 
      table.
      +Create the FlushPolicy configured for the given table.
       
       
       
      -static FlushPolicy
      -FlushPolicyFactory.create(HRegionregion,
      +static RegionSplitPolicy
      +RegionSplitPolicy.create(HRegionregion,
         org.apache.hadoop.conf.Configurationconf)
      -Create the FlushPolicy configured for the given table.
      +Create the RegionSplitPolicy configured for the given 
      table.
       
       
       
      @@ -766,13 +766,13 @@
       
       
       protected void
      -ReversedRegionScannerImpl.initializeKVHeap(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
       title="class or interface in java.util">ListKeyValueScannerscanners,
      +HRegion.RegionScannerImpl.initializeKVHeap(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
       title="class or interface in java.util">ListKeyValueScannerscanners,
       http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
       title="class or interface in java.util">ListKeyValueScannerjoinedScanners,
       HRegionregion)
       
       
       protected void
      

      [04/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0cd17dc5/devapidocs/org/apache/hadoop/hbase/master/assignment/MoveRegionProcedure.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/master/assignment/MoveRegionProcedure.html 
      b/devapidocs/org/apache/hadoop/hbase/master/assignment/MoveRegionProcedure.html
      index fe02ff7..8e50bd1 100644
      --- 
      a/devapidocs/org/apache/hadoop/hbase/master/assignment/MoveRegionProcedure.html
      +++ 
      b/devapidocs/org/apache/hadoop/hbase/master/assignment/MoveRegionProcedure.html
      @@ -417,7 +417,7 @@ extends 
       
       executeFromState
      -protectedStateMachineProcedure.FlowexecuteFromState(MasterProcedureEnvenv,
      +protectedStateMachineProcedure.FlowexecuteFromState(MasterProcedureEnvenv,
         
      org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MoveRegionStatestate)
      throws http://docs.oracle.com/javase/8/docs/api/java/lang/InterruptedException.html?is-external=true;
       title="class or interface in java.lang">InterruptedException
       Description copied from 
      class:StateMachineProcedure
      @@ -440,7 +440,7 @@ extends 
       
       rollbackState
      -protectedvoidrollbackState(MasterProcedureEnvenv,
      +protectedvoidrollbackState(MasterProcedureEnvenv,
        
      org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MoveRegionStatestate)
         throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
       title="class or interface in java.io">IOException
       Description copied from 
      class:StateMachineProcedure
      @@ -460,7 +460,7 @@ extends 
       
       abort
      -publicbooleanabort(MasterProcedureEnvenv)
      +publicbooleanabort(MasterProcedureEnvenv)
       Description copied from 
      class:Procedure
       The abort() call is asynchronous and each procedure must 
      decide how to deal
        with it, if they want to be abortable. The simplest implementation
      @@ -483,7 +483,7 @@ extends 
       
       toStringClassDetails
      -publicvoidtoStringClassDetails(http://docs.oracle.com/javase/8/docs/api/java/lang/StringBuilder.html?is-external=true;
       title="class or interface in java.lang">StringBuildersb)
      +publicvoidtoStringClassDetails(http://docs.oracle.com/javase/8/docs/api/java/lang/StringBuilder.html?is-external=true;
       title="class or interface in java.lang">StringBuildersb)
       Description copied from 
      class:Procedure
       Extend the toString() information with the procedure details
        e.g. className and parameters
      @@ -501,7 +501,7 @@ extends 
       
       getInitialState
      -protectedorg.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MoveRegionStategetInitialState()
      +protectedorg.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MoveRegionStategetInitialState()
       Description copied from 
      class:StateMachineProcedure
       Return the initial state object that will be used for the 
      first call to executeFromState().
       
      @@ -518,7 +518,7 @@ extends 
       
       getStateId
      -protectedintgetStateId(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MoveRegionStatestate)
      +protectedintgetStateId(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MoveRegionStatestate)
       Description copied from 
      class:StateMachineProcedure
       Convert the Enum (or more descriptive) state object to an 
      ordinal (or state id).
       
      @@ -537,7 +537,7 @@ extends 
       
       getState
      -protectedorg.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MoveRegionStategetState(intstateId)
      +protectedorg.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MoveRegionStategetState(intstateId)
       Description copied from 
      class:StateMachineProcedure
       Convert an ordinal (or state id) to an Enum (or more 
      descriptive) state object.
       
      @@ -556,7 +556,7 @@ extends 
       
       getTableName
      -publicTableNamegetTableName()
      +publicTableNamegetTableName()
       
       Specified by:
       getTableNamein
       interfaceTableProcedureInterface
      @@ -573,7 +573,7 @@ extends 
       
       getTableOperationType
      -publicTableProcedureInterface.TableOperationTypegetTableOperationType()
      +publicTableProcedureInterface.TableOperationTypegetTableOperationType()
       Description copied from 
      interface:TableProcedureInterface
       Given an operation type we can take decisions about what to 
      do with pending operations.
        e.g. if we get a delete and we have some table operation pending (e.g. add 
      column)
      @@ -594,7 +594,7 @@ extends 
       
       serializeStateData
      -protectedvoidserializeStateData(ProcedureStateSerializerserializer)
      +protectedvoidserializeStateData(ProcedureStateSerializerserializer)
      throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
       title="class or interface in java.io">IOException
       Description copied from 
      class:Procedure
       The user-level code of the procedure may have some state to
      @@ -616,7 +616,7 @@ extends 
       
       deserializeStateData
      

      [04/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/828486ae/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureStateSerializer.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureStateSerializer.html
       
      b/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureStateSerializer.html
      index 01a50f5..733e376 100644
      --- 
      a/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureStateSerializer.html
      +++ 
      b/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureStateSerializer.html
      @@ -120,19 +120,19 @@
       
       
       protected void
      -MoveRegionProcedure.deserializeStateData(ProcedureStateSerializerserializer)
      +UnassignProcedure.deserializeStateData(ProcedureStateSerializerserializer)
       
       
       protected void
      -GCMergedRegionsProcedure.deserializeStateData(ProcedureStateSerializerserializer)
      +MoveRegionProcedure.deserializeStateData(ProcedureStateSerializerserializer)
       
       
       protected void
      -AssignProcedure.deserializeStateData(ProcedureStateSerializerserializer)
      +GCRegionProcedure.deserializeStateData(ProcedureStateSerializerserializer)
       
       
       protected void
      -GCRegionProcedure.deserializeStateData(ProcedureStateSerializerserializer)
      +GCMergedRegionsProcedure.deserializeStateData(ProcedureStateSerializerserializer)
       
       
       protected void
      @@ -144,23 +144,23 @@
       
       
       protected void
      -UnassignProcedure.deserializeStateData(ProcedureStateSerializerserializer)
      +AssignProcedure.deserializeStateData(ProcedureStateSerializerserializer)
       
       
       protected void
      -MoveRegionProcedure.serializeStateData(ProcedureStateSerializerserializer)
      +UnassignProcedure.serializeStateData(ProcedureStateSerializerserializer)
       
       
       protected void
      -GCMergedRegionsProcedure.serializeStateData(ProcedureStateSerializerserializer)
      +MoveRegionProcedure.serializeStateData(ProcedureStateSerializerserializer)
       
       
       protected void
      -AssignProcedure.serializeStateData(ProcedureStateSerializerserializer)
      +GCRegionProcedure.serializeStateData(ProcedureStateSerializerserializer)
       
       
       protected void
      -GCRegionProcedure.serializeStateData(ProcedureStateSerializerserializer)
      +GCMergedRegionsProcedure.serializeStateData(ProcedureStateSerializerserializer)
       
       
       protected void
      @@ -172,7 +172,7 @@
       
       
       protected void
      -UnassignProcedure.serializeStateData(ProcedureStateSerializerserializer)
      +AssignProcedure.serializeStateData(ProcedureStateSerializerserializer)
       
       
       
      @@ -212,115 +212,115 @@
       
       
       protected void
      -CloneSnapshotProcedure.deserializeStateData(ProcedureStateSerializerserializer)
      +DeleteTableProcedure.deserializeStateData(ProcedureStateSerializerserializer)
       
       
       protected void
      -DeleteTableProcedure.deserializeStateData(ProcedureStateSerializerserializer)
      +DisableTableProcedure.deserializeStateData(ProcedureStateSerializerserializer)
       
       
       protected void
      -TruncateTableProcedure.deserializeStateData(ProcedureStateSerializerserializer)
      +DeleteNamespaceProcedure.deserializeStateData(ProcedureStateSerializerserializer)
       
       
       protected void
      -DeleteNamespaceProcedure.deserializeStateData(ProcedureStateSerializerserializer)
      +CreateNamespaceProcedure.deserializeStateData(ProcedureStateSerializerserializer)
       
       
       protected void
      -ServerCrashProcedure.deserializeStateData(ProcedureStateSerializerserializer)
      +EnableTableProcedure.deserializeStateData(ProcedureStateSerializerserializer)
       
       
       protected void
      -DisableTableProcedure.deserializeStateData(ProcedureStateSerializerserializer)
      +CreateTableProcedure.deserializeStateData(ProcedureStateSerializerserializer)
       
       
       protected void
      -RecoverMetaProcedure.deserializeStateData(ProcedureStateSerializerserializer)
      +ModifyNamespaceProcedure.deserializeStateData(ProcedureStateSerializerserializer)
       
       
       protected void
      -CreateNamespaceProcedure.deserializeStateData(ProcedureStateSerializerserializer)
      +AbstractStateMachineRegionProcedure.deserializeStateData(ProcedureStateSerializerserializer)
       
       
       protected void
      -AbstractStateMachineRegionProcedure.deserializeStateData(ProcedureStateSerializerserializer)
      +CloneSnapshotProcedure.deserializeStateData(ProcedureStateSerializerserializer)
       
       
       protected void
      -EnableTableProcedure.deserializeStateData(ProcedureStateSerializerserializer)
      +ServerCrashProcedure.deserializeStateData(ProcedureStateSerializerserializer)
       
       
       protected void
      -CreateTableProcedure.deserializeStateData(ProcedureStateSerializerserializer)
      +ModifyTableProcedure.deserializeStateData(ProcedureStateSerializerserializer)
       
       
       protected void
      -ModifyNamespaceProcedure.deserializeStateData(ProcedureStateSerializerserializer)
      +RecoverMetaProcedure.deserializeStateData(ProcedureStateSerializerserializer)
       
       
       protected void
      -ModifyTableProcedure.deserializeStateData(ProcedureStateSerializerserializer)
      +RestoreSnapshotProcedure.deserializeStateData(ProcedureStateSerializerserializer)
       
       
       protected void
      -RestoreSnapshotProcedure.deserializeStateData(ProcedureStateSerializerserializer)
      

      [04/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f272b0e8/devapidocs/org/apache/hadoop/hbase/master/MasterMetaBootstrap.html
      --
      diff --git a/devapidocs/org/apache/hadoop/hbase/master/MasterMetaBootstrap.html 
      b/devapidocs/org/apache/hadoop/hbase/master/MasterMetaBootstrap.html
      index 816baaa..d47b71e 100644
      --- a/devapidocs/org/apache/hadoop/hbase/master/MasterMetaBootstrap.html
      +++ b/devapidocs/org/apache/hadoop/hbase/master/MasterMetaBootstrap.html
      @@ -295,7 +295,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       processDeadServers
      -publicvoidprocessDeadServers()
      +publicvoidprocessDeadServers()
       
       
       
      @@ -304,7 +304,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       assignMetaReplicas
      -protectedvoidassignMetaReplicas()
      +protectedvoidassignMetaReplicas()
      throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
       title="class or interface in java.io">IOException,
         http://docs.oracle.com/javase/8/docs/api/java/lang/InterruptedException.html?is-external=true;
       title="class or interface in java.lang">InterruptedException,
         org.apache.zookeeper.KeeperException
      @@ -325,7 +325,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       unassignExcessMetaReplica
      -privatevoidunassignExcessMetaReplica(intnumMetaReplicasConfigured)
      +privatevoidunassignExcessMetaReplica(intnumMetaReplicasConfigured)
       
       
       
      @@ -334,7 +334,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       enableCrashedServerProcessing
      -privatevoidenableCrashedServerProcessing(booleanwaitForMeta)
      +privatevoidenableCrashedServerProcessing(booleanwaitForMeta)
       throws http://docs.oracle.com/javase/8/docs/api/java/lang/InterruptedException.html?is-external=true;
       title="class or interface in java.lang">InterruptedException
       
       Throws:
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f272b0e8/devapidocs/org/apache/hadoop/hbase/master/MetricsSnapshotSourceImpl.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/master/MetricsSnapshotSourceImpl.html 
      b/devapidocs/org/apache/hadoop/hbase/master/MetricsSnapshotSourceImpl.html
      index b520d91..85c5e17 100644
      --- a/devapidocs/org/apache/hadoop/hbase/master/MetricsSnapshotSourceImpl.html
      +++ b/devapidocs/org/apache/hadoop/hbase/master/MetricsSnapshotSourceImpl.html
      @@ -50,7 +50,7 @@ var activeTableTab = "activeTableTab";
       
       
       PrevClass
      -NextClass
      +NextClass
       
       
       Frames
      @@ -414,7 +414,7 @@ implements 
       
       PrevClass
      -NextClass
      +NextClass
       
       
       Frames
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f272b0e8/devapidocs/org/apache/hadoop/hbase/master/MirroringTableStateManager.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/master/MirroringTableStateManager.html 
      b/devapidocs/org/apache/hadoop/hbase/master/MirroringTableStateManager.html
      new file mode 100644
      index 000..f657bc4
      --- /dev/null
      +++ b/devapidocs/org/apache/hadoop/hbase/master/MirroringTableStateManager.html
      @@ -0,0 +1,465 @@
      +http://www.w3.org/TR/html4/loose.dtd;>
      +
      +
      +
      +
      +
      +MirroringTableStateManager (Apache HBase 3.0.0-SNAPSHOT API)
      +
      +
      +
      +
      +
      +var methods = {"i0":42,"i1":42,"i2":42,"i3":42};
      +var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
      Methods"],8:["t4","Concrete Methods"],32:["t6","Deprecated Methods"]};
      +var altColor = "altColor";
      +var rowColor = "rowColor";
      +var tableTab = "tableTab";
      +var activeTableTab = "activeTableTab";
      +
      +
      +JavaScript is disabled on your browser.
      +
      +
      +
      +
      +
      +Skip navigation links
      +
      +
      +
      +
      +Overview
      +Package
      +Class
      +Use
      +Tree
      +Deprecated
      +Index
      +Help
      +
      +
      +
      +
      +PrevClass
      +NextClass
      +
      +
      +Frames
      +NoFrames
      +
      +
      +AllClasses
      +
      +
      +
      +
      +
      +
      +
      +Summary:
      +Nested|
      +Field|
      +Constr|
      +Method
      +
      +
      +Detail:
      +Field|
      +Constr|
      +Method
      +
      +
      +
      +
      +
      +
      +
      +
      +org.apache.hadoop.hbase.master
      +Class 
      MirroringTableStateManager
      +
      +
      +
      +http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
       title="class or interface in java.lang">java.lang.Object
      +
      +
      +org.apache.hadoop.hbase.master.TableStateManager
      +
      +
      +org.apache.hadoop.hbase.master.MirroringTableStateManager
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +Deprecated.
      +Since 2.0.0. To be removed 
      in 3.0.0.
      +
      +
      

      [04/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c83a37c8/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureStateSerializer.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureStateSerializer.html
       
      b/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureStateSerializer.html
      index 01a50f5..733e376 100644
      --- 
      a/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureStateSerializer.html
      +++ 
      b/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureStateSerializer.html
      @@ -120,19 +120,19 @@
       
       
       protected void
      -MoveRegionProcedure.deserializeStateData(ProcedureStateSerializerserializer)
      +UnassignProcedure.deserializeStateData(ProcedureStateSerializerserializer)
       
       
       protected void
      -GCMergedRegionsProcedure.deserializeStateData(ProcedureStateSerializerserializer)
      +MoveRegionProcedure.deserializeStateData(ProcedureStateSerializerserializer)
       
       
       protected void
      -AssignProcedure.deserializeStateData(ProcedureStateSerializerserializer)
      +GCRegionProcedure.deserializeStateData(ProcedureStateSerializerserializer)
       
       
       protected void
      -GCRegionProcedure.deserializeStateData(ProcedureStateSerializerserializer)
      +GCMergedRegionsProcedure.deserializeStateData(ProcedureStateSerializerserializer)
       
       
       protected void
      @@ -144,23 +144,23 @@
       
       
       protected void
      -UnassignProcedure.deserializeStateData(ProcedureStateSerializerserializer)
      +AssignProcedure.deserializeStateData(ProcedureStateSerializerserializer)
       
       
       protected void
      -MoveRegionProcedure.serializeStateData(ProcedureStateSerializerserializer)
      +UnassignProcedure.serializeStateData(ProcedureStateSerializerserializer)
       
       
       protected void
      -GCMergedRegionsProcedure.serializeStateData(ProcedureStateSerializerserializer)
      +MoveRegionProcedure.serializeStateData(ProcedureStateSerializerserializer)
       
       
       protected void
      -AssignProcedure.serializeStateData(ProcedureStateSerializerserializer)
      +GCRegionProcedure.serializeStateData(ProcedureStateSerializerserializer)
       
       
       protected void
      -GCRegionProcedure.serializeStateData(ProcedureStateSerializerserializer)
      +GCMergedRegionsProcedure.serializeStateData(ProcedureStateSerializerserializer)
       
       
       protected void
      @@ -172,7 +172,7 @@
       
       
       protected void
      -UnassignProcedure.serializeStateData(ProcedureStateSerializerserializer)
      +AssignProcedure.serializeStateData(ProcedureStateSerializerserializer)
       
       
       
      @@ -212,115 +212,115 @@
       
       
       protected void
      -CloneSnapshotProcedure.deserializeStateData(ProcedureStateSerializerserializer)
      +DeleteTableProcedure.deserializeStateData(ProcedureStateSerializerserializer)
       
       
       protected void
      -DeleteTableProcedure.deserializeStateData(ProcedureStateSerializerserializer)
      +DisableTableProcedure.deserializeStateData(ProcedureStateSerializerserializer)
       
       
       protected void
      -TruncateTableProcedure.deserializeStateData(ProcedureStateSerializerserializer)
      +DeleteNamespaceProcedure.deserializeStateData(ProcedureStateSerializerserializer)
       
       
       protected void
      -DeleteNamespaceProcedure.deserializeStateData(ProcedureStateSerializerserializer)
      +CreateNamespaceProcedure.deserializeStateData(ProcedureStateSerializerserializer)
       
       
       protected void
      -ServerCrashProcedure.deserializeStateData(ProcedureStateSerializerserializer)
      +EnableTableProcedure.deserializeStateData(ProcedureStateSerializerserializer)
       
       
       protected void
      -DisableTableProcedure.deserializeStateData(ProcedureStateSerializerserializer)
      +CreateTableProcedure.deserializeStateData(ProcedureStateSerializerserializer)
       
       
       protected void
      -RecoverMetaProcedure.deserializeStateData(ProcedureStateSerializerserializer)
      +ModifyNamespaceProcedure.deserializeStateData(ProcedureStateSerializerserializer)
       
       
       protected void
      -CreateNamespaceProcedure.deserializeStateData(ProcedureStateSerializerserializer)
      +AbstractStateMachineRegionProcedure.deserializeStateData(ProcedureStateSerializerserializer)
       
       
       protected void
      -AbstractStateMachineRegionProcedure.deserializeStateData(ProcedureStateSerializerserializer)
      +CloneSnapshotProcedure.deserializeStateData(ProcedureStateSerializerserializer)
       
       
       protected void
      -EnableTableProcedure.deserializeStateData(ProcedureStateSerializerserializer)
      +ServerCrashProcedure.deserializeStateData(ProcedureStateSerializerserializer)
       
       
       protected void
      -CreateTableProcedure.deserializeStateData(ProcedureStateSerializerserializer)
      +ModifyTableProcedure.deserializeStateData(ProcedureStateSerializerserializer)
       
       
       protected void
      -ModifyNamespaceProcedure.deserializeStateData(ProcedureStateSerializerserializer)
      +RecoverMetaProcedure.deserializeStateData(ProcedureStateSerializerserializer)
       
       
       protected void
      -ModifyTableProcedure.deserializeStateData(ProcedureStateSerializerserializer)
      +RestoreSnapshotProcedure.deserializeStateData(ProcedureStateSerializerserializer)
       
       
       protected void
      -RestoreSnapshotProcedure.deserializeStateData(ProcedureStateSerializerserializer)
      

      [04/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0ab8335e/devapidocs/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.PushType.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.PushType.html
       
      b/devapidocs/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.PushType.html
      index 3d03e17..ed15d9b 100644
      --- 
      a/devapidocs/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.PushType.html
      +++ 
      b/devapidocs/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.PushType.html
      @@ -248,7 +248,7 @@ the order they are declared.
       
       
       values
      -public staticWALProcedureStore.PushType[]values()
      +public staticWALProcedureStore.PushType[]values()
       Returns an array containing the constants of this enum 
      type, in
       the order they are declared.  This method may be used to iterate
       over the constants as follows:
      @@ -268,7 +268,7 @@ for (WALProcedureStore.PushType c : 
      WALProcedureStore.PushType.values())
       
       
       valueOf
      -public staticWALProcedureStore.PushTypevalueOf(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">Stringname)
      +public staticWALProcedureStore.PushTypevalueOf(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">Stringname)
       Returns the enum constant of this type with the specified 
      name.
       The string must match exactly an identifier used to declare an
       enum constant in this type.  (Extraneous whitespace characters are 
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0ab8335e/devapidocs/org/apache/hadoop/hbase/procedure2/util/class-use/DelayedUtil.DelayedWithTimeout.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/procedure2/util/class-use/DelayedUtil.DelayedWithTimeout.html
       
      b/devapidocs/org/apache/hadoop/hbase/procedure2/util/class-use/DelayedUtil.DelayedWithTimeout.html
      index 5bd2115..c6f6a46 100644
      --- 
      a/devapidocs/org/apache/hadoop/hbase/procedure2/util/class-use/DelayedUtil.DelayedWithTimeout.html
      +++ 
      b/devapidocs/org/apache/hadoop/hbase/procedure2/util/class-use/DelayedUtil.DelayedWithTimeout.html
      @@ -141,11 +141,11 @@
       
       
       private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/DelayQueue.html?is-external=true;
       title="class or interface in java.util.concurrent">DelayQueueDelayedUtil.DelayedWithTimeout
      -RemoteProcedureDispatcher.TimeoutExecutorThread.queue
      +ProcedureExecutor.TimeoutExecutorThread.queue
       
       
       private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/DelayQueue.html?is-external=true;
       title="class or interface in java.util.concurrent">DelayQueueDelayedUtil.DelayedWithTimeout
      -ProcedureExecutor.TimeoutExecutorThread.queue
      +RemoteProcedureDispatcher.TimeoutExecutorThread.queue
       
       
       
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0ab8335e/devapidocs/org/apache/hadoop/hbase/quotas/class-use/MasterQuotaManager.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/quotas/class-use/MasterQuotaManager.html 
      b/devapidocs/org/apache/hadoop/hbase/quotas/class-use/MasterQuotaManager.html
      index dd6045b..934c2fa 100644
      --- 
      a/devapidocs/org/apache/hadoop/hbase/quotas/class-use/MasterQuotaManager.html
      +++ 
      b/devapidocs/org/apache/hadoop/hbase/quotas/class-use/MasterQuotaManager.html
      @@ -125,11 +125,11 @@
       
       
       MasterQuotaManager
      -HMaster.getMasterQuotaManager()
      +MasterServices.getMasterQuotaManager()
       
       
       MasterQuotaManager
      -MasterServices.getMasterQuotaManager()
      +HMaster.getMasterQuotaManager()
       
       
       
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0ab8335e/devapidocs/org/apache/hadoop/hbase/quotas/class-use/QuotaFilter.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/quotas/class-use/QuotaFilter.html 
      b/devapidocs/org/apache/hadoop/hbase/quotas/class-use/QuotaFilter.html
      index d81fa5e..a495cd1 100644
      --- a/devapidocs/org/apache/hadoop/hbase/quotas/class-use/QuotaFilter.html
      +++ b/devapidocs/org/apache/hadoop/hbase/quotas/class-use/QuotaFilter.html
      @@ -110,7 +110,9 @@
       
       
       http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
       title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
       title="class or interface in java.util">ListQuotaSettings
      -AsyncHBaseAdmin.getQuota(QuotaFilterfilter)
      +AsyncAdmin.getQuota(QuotaFilterfilter)
      +List the quotas based on the filter.
      +
       
       
       http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
       title="class or interface in java.util">ListQuotaSettings
      @@ -119,18 +121,16 @@
       
       
       
      

      [04/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/src-html/org/apache/hadoop/hbase/master/SplitLogManager.TaskBatch.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/master/SplitLogManager.TaskBatch.html
       
      b/devapidocs/src-html/org/apache/hadoop/hbase/master/SplitLogManager.TaskBatch.html
      index 2939a56..681e263 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/master/SplitLogManager.TaskBatch.html
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/master/SplitLogManager.TaskBatch.html
      @@ -61,602 +61,608 @@
       053import 
      org.apache.hadoop.hbase.monitoring.TaskMonitor;
       054import 
      org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
       055import 
      org.apache.hadoop.hbase.util.FSUtils;
      -056import 
      org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
      -057import 
      org.apache.yetus.audience.InterfaceAudience;
      -058import org.slf4j.Logger;
      -059import org.slf4j.LoggerFactory;
      -060import 
      org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
      -061
      -062/**
      -063 * Distributes the task of log splitting 
      to the available region servers.
      -064 * Coordination happens via coordination 
      engine. For every log file that has to be split a
      -065 * task is created. SplitLogWorkers race 
      to grab a task.
      -066 *
      -067 * pSplitLogManager monitors the 
      tasks that it creates using the
      -068 * timeoutMonitor thread. If a task's 
      progress is slow then
      -069 * {@link 
      SplitLogManagerCoordination#checkTasks} will take away the
      -070 * task from the owner {@link 
      org.apache.hadoop.hbase.regionserver.SplitLogWorker}
      -071 * and the task will be up for grabs 
      again. When the task is done then it is
      -072 * deleted by SplitLogManager.
      -073 *
      -074 * pClients call {@link 
      #splitLogDistributed(Path)} to split a region server's
      -075 * log files. The caller thread waits in 
      this method until all the log files
      -076 * have been split.
      -077 *
      -078 * pAll the coordination calls 
      made by this class are asynchronous. This is mainly
      -079 * to help reduce response time seen by 
      the callers.
      -080 *
      -081 * pThere is race in this design 
      between the SplitLogManager and the
      -082 * SplitLogWorker. SplitLogManager might 
      re-queue a task that has in reality
      -083 * already been completed by a 
      SplitLogWorker. We rely on the idempotency of
      -084 * the log splitting task for 
      correctness.
      -085 *
      -086 * pIt is also assumed that every 
      log splitting task is unique and once
      -087 * completed (either with success or with 
      error) it will be not be submitted
      -088 * again. If a task is resubmitted then 
      there is a risk that old "delete task"
      -089 * can delete the re-submission.
      -090 */
      -091@InterfaceAudience.Private
      -092public class SplitLogManager {
      -093  private static final Logger LOG = 
      LoggerFactory.getLogger(SplitLogManager.class);
      -094
      -095  private final MasterServices server;
      -096
      -097  private final Configuration conf;
      -098  private final ChoreService 
      choreService;
      -099
      -100  public static final int 
      DEFAULT_UNASSIGNED_TIMEOUT = (3 * 60 * 1000); // 3 min
      -101
      -102  private long unassignedTimeout;
      -103  private long lastTaskCreateTime = 
      Long.MAX_VALUE;
      -104
      -105  @VisibleForTesting
      -106  final ConcurrentMapString, Task 
      tasks = new ConcurrentHashMap();
      -107  private TimeoutMonitor 
      timeoutMonitor;
      -108
      -109  private volatile SetServerName 
      deadWorkers = null;
      -110  private final Object deadWorkersLock = 
      new Object();
      -111
      -112  /**
      -113   * Its OK to construct this object even 
      when region-servers are not online. It does lookup the
      -114   * orphan tasks in coordination engine 
      but it doesn't block waiting for them to be done.
      -115   * @param master the master services
      -116   * @param conf the HBase 
      configuration
      -117   * @throws IOException
      -118   */
      -119  public SplitLogManager(MasterServices 
      master, Configuration conf)
      -120  throws IOException {
      -121this.server = master;
      -122this.conf = conf;
      -123this.choreService = new 
      ChoreService(master.getServerName() + "_splitLogManager_");
      -124if 
      (server.getCoordinatedStateManager() != null) {
      -125  SplitLogManagerCoordination 
      coordination = getSplitLogManagerCoordination();
      -126  SetString failedDeletions = 
      Collections.synchronizedSet(new HashSetString());
      -127  SplitLogManagerDetails details = 
      new SplitLogManagerDetails(tasks, master, failedDeletions);
      -128  coordination.setDetails(details);
      -129  coordination.init();
      -130}
      -131this.unassignedTimeout =
      -132
      conf.getInt("hbase.splitlog.manager.unassigned.timeout", 
      DEFAULT_UNASSIGNED_TIMEOUT);
      -133this.timeoutMonitor =
      -134new 
      TimeoutMonitor(conf.getInt("hbase.splitlog.manager.timeoutmonitor.period", 
      1000),
      -135master);
      -136
      choreService.scheduleChore(timeoutMonitor);
      -137  }
      -138
      -139  private SplitLogManagerCoordination 
      getSplitLogManagerCoordination() {
      -140return 
      server.getCoordinatedStateManager().getSplitLogManagerCoordination();
      -141  }
      -142
      

      [04/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1f2eeb22/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CustomizedScanInfoBuilder.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CustomizedScanInfoBuilder.html
       
      b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CustomizedScanInfoBuilder.html
      index 0bbc1ee..67151bd 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CustomizedScanInfoBuilder.html
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CustomizedScanInfoBuilder.html
      @@ -25,56 +25,70 @@
       017 */
       018package 
      org.apache.hadoop.hbase.regionserver;
       019
      -020import 
      org.apache.yetus.audience.InterfaceAudience;
      -021
      -022/**
      -023 * Helper class for CP hooks to change 
      max versions and TTL.
      -024 */
      -025@InterfaceAudience.Private
      -026public class CustomizedScanInfoBuilder 
      implements ScanOptions {
      -027
      -028  private final ScanInfo scanInfo;
      -029
      -030  private Integer maxVersions;
      -031
      -032  private Long ttl;
      -033
      -034  public 
      CustomizedScanInfoBuilder(ScanInfo scanInfo) {
      -035this.scanInfo = scanInfo;
      -036  }
      -037
      -038  @Override
      -039  public int getMaxVersions() {
      -040return maxVersions != null ? 
      maxVersions.intValue() : scanInfo.getMaxVersions();
      -041  }
      -042
      -043  @Override
      -044  public void setMaxVersions(int 
      maxVersions) {
      -045this.maxVersions = maxVersions;
      -046  }
      -047
      -048  @Override
      -049  public long getTTL() {
      -050return ttl != null ? ttl.longValue() 
      : scanInfo.getTtl();
      -051  }
      -052
      -053  @Override
      -054  public void setTTL(long ttl) {
      -055this.ttl = ttl;
      -056  }
      -057
      -058  public ScanInfo build() {
      -059if (maxVersions == null  
      ttl == null) {
      -060  return scanInfo;
      -061}
      -062return 
      scanInfo.customize(getMaxVersions(), getTTL());
      -063  }
      -064
      -065  @Override
      -066  public String toString() {
      -067return "ScanOptions [maxVersions=" + 
      getMaxVersions() + ", TTL=" + getTTL() + "]";
      -068  }
      -069}
      +020import 
      org.apache.hadoop.hbase.KeepDeletedCells;
      +021import 
      org.apache.yetus.audience.InterfaceAudience;
      +022
      +023/**
      +024 * Helper class for CP hooks to change 
      max versions and TTL.
      +025 */
      +026@InterfaceAudience.Private
      +027public class CustomizedScanInfoBuilder 
      implements ScanOptions {
      +028
      +029  private final ScanInfo scanInfo;
      +030
      +031  private Integer maxVersions;
      +032
      +033  private Long ttl;
      +034
      +035  private KeepDeletedCells 
      keepDeletedCells = null;
      +036
      +037  public 
      CustomizedScanInfoBuilder(ScanInfo scanInfo) {
      +038this.scanInfo = scanInfo;
      +039  }
      +040
      +041  @Override
      +042  public int getMaxVersions() {
      +043return maxVersions != null ? 
      maxVersions.intValue() : scanInfo.getMaxVersions();
      +044  }
      +045
      +046  @Override
      +047  public void setMaxVersions(int 
      maxVersions) {
      +048this.maxVersions = maxVersions;
      +049  }
      +050
      +051  @Override
      +052  public long getTTL() {
      +053return ttl != null ? ttl.longValue() 
      : scanInfo.getTtl();
      +054  }
      +055
      +056  @Override
      +057  public void setTTL(long ttl) {
      +058this.ttl = ttl;
      +059  }
      +060
      +061  public ScanInfo build() {
      +062if (maxVersions == null  
      ttl == null  keepDeletedCells == null) {
      +063  return scanInfo;
      +064}
      +065return 
      scanInfo.customize(getMaxVersions(), getTTL(), getKeepDeletedCells());
      +066  }
      +067
      +068  @Override
      +069  public String toString() {
      +070return "ScanOptions [maxVersions=" + 
      getMaxVersions() + ", TTL=" + getTTL() + "]";
      +071  }
      +072
      +073  @Override
      +074  public void 
      setKeepDeletedCells(KeepDeletedCells keepDeletedCells) {
      +075this.keepDeletedCells = 
      keepDeletedCells;
      +076  }
      +077
      +078  @Override
      +079  public KeepDeletedCells 
      getKeepDeletedCells() {
      +080return keepDeletedCells != null ? 
      keepDeletedCells : scanInfo.getKeepDeletedCells();
      +081  }
      +082
      +083}
       
       
       
      
      
      

      [04/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/cc6597ec/testdevapidocs/org/apache/hadoop/hbase/client/TestFlushFromClient.html
      --
      diff --git 
      a/testdevapidocs/org/apache/hadoop/hbase/client/TestFlushFromClient.html 
      b/testdevapidocs/org/apache/hadoop/hbase/client/TestFlushFromClient.html
      index 3394f53..b44aeb1 100644
      --- a/testdevapidocs/org/apache/hadoop/hbase/client/TestFlushFromClient.html
      +++ b/testdevapidocs/org/apache/hadoop/hbase/client/TestFlushFromClient.html
      @@ -134,30 +134,34 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       asyncConn
       
       
      +static HBaseClassTestRule
      +CLASS_RULE
      +
      +
       private static byte[]
       FAMILY
       
      -
      +
       private static 
      org.apache.commons.logging.Log
       LOG
       
      -
      +
       org.junit.rules.TestName
       name
       
      -
      +
       private static http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
       title="class or interface in java.util">Listbyte[]
       ROWS
       
      -
      +
       private static byte[][]
       SPLITS
       
      -
      +
       org.apache.hadoop.hbase.TableName
       tableName
       
      -
      +
       private static HBaseTestingUtility
       TEST_UTIL
       
      @@ -263,13 +267,22 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       Field Detail
      +
      +
      +
      +
      +
      +CLASS_RULE
      +public static finalHBaseClassTestRule CLASS_RULE
      +
      +
       
       
       
       
       
       LOG
      -private static finalorg.apache.commons.logging.Log LOG
      +private static finalorg.apache.commons.logging.Log LOG
       
       
       
      @@ -278,7 +291,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       TEST_UTIL
      -private static finalHBaseTestingUtility TEST_UTIL
      +private static finalHBaseTestingUtility TEST_UTIL
       
       
       
      @@ -287,7 +300,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       asyncConn
      -private staticorg.apache.hadoop.hbase.client.AsyncConnection asyncConn
      +private staticorg.apache.hadoop.hbase.client.AsyncConnection asyncConn
       
       
       
      @@ -296,7 +309,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       SPLITS
      -private static finalbyte[][] SPLITS
      +private static finalbyte[][] SPLITS
       
       
       
      @@ -305,7 +318,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       ROWS
      -private static finalhttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
       title="class or interface in java.util">Listbyte[] ROWS
      +private static finalhttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
       title="class or interface in java.util">Listbyte[] ROWS
       
       
       
      @@ -314,7 +327,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       FAMILY
      -private static finalbyte[] FAMILY
      +private static finalbyte[] FAMILY
       
       
       
      @@ -323,7 +336,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       name
      -publicorg.junit.rules.TestName name
      +publicorg.junit.rules.TestName name
       
       
       
      @@ -332,7 +345,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       tableName
      -publicorg.apache.hadoop.hbase.TableName tableName
      +publicorg.apache.hadoop.hbase.TableName tableName
       
       
       
      @@ -366,7 +379,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       setUpBeforeClass
      -public staticvoidsetUpBeforeClass()
      +public staticvoidsetUpBeforeClass()
        throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
       title="class or interface in java.lang">Exception
       
       Throws:
      @@ -380,7 +393,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       tearDownAfterClass
      -public staticvoidtearDownAfterClass()
      +public staticvoidtearDownAfterClass()
      throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
       title="class or interface in java.lang">Exception
       
       Throws:
      @@ -394,7 +407,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       setUp
      -publicvoidsetUp()
      +publicvoidsetUp()
      throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
       title="class or interface in java.lang">Exception
       
       Throws:
      @@ -408,7 +421,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       tearDown
      -publicvoidtearDown()
      +publicvoidtearDown()
         throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
       title="class or interface in java.lang">Exception
       
       Throws:
      @@ -422,7 +435,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       testFlushTable
      -publicvoidtestFlushTable()
      +publicvoidtestFlushTable()
       throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
       title="class or interface in java.lang">Exception
       
       Throws:
      @@ -436,7 +449,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       testAsyncFlushTable
      -publicvoidtestAsyncFlushTable()
      

      [04/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aa7ffc92/devapidocs/src-html/org/apache/hadoop/hbase/backup/util/BackupUtils.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/backup/util/BackupUtils.html 
      b/devapidocs/src-html/org/apache/hadoop/hbase/backup/util/BackupUtils.html
      index 252efa5..c2de678 100644
      --- a/devapidocs/src-html/org/apache/hadoop/hbase/backup/util/BackupUtils.html
      +++ b/devapidocs/src-html/org/apache/hadoop/hbase/backup/util/BackupUtils.html
      @@ -92,667 +92,663 @@
       084   */
       085  public static HashMapString, 
      Long getRSLogTimestampMins(
       086  HashMapTableName, 
      HashMapString, Long rsLogTimestampMap) {
      -087
      -088if (rsLogTimestampMap == null || 
      rsLogTimestampMap.isEmpty()) {
      -089  return null;
      -090}
      -091
      -092HashMapString, Long 
      rsLogTimestampMins = new HashMapString, Long();
      -093HashMapString, 
      HashMapTableName, Long rsLogTimestampMapByRS =
      -094new HashMapString, 
      HashMapTableName, Long();
      -095
      -096for (EntryTableName, 
      HashMapString, Long tableEntry : rsLogTimestampMap.entrySet()) {
      -097  TableName table = 
      tableEntry.getKey();
      -098  HashMapString, Long 
      rsLogTimestamp = tableEntry.getValue();
      -099  for (EntryString, Long 
      rsEntry : rsLogTimestamp.entrySet()) {
      -100String rs = rsEntry.getKey();
      -101Long ts = rsEntry.getValue();
      -102if 
      (!rsLogTimestampMapByRS.containsKey(rs)) {
      -103  rsLogTimestampMapByRS.put(rs, 
      new HashMapTableName, Long());
      +087if (rsLogTimestampMap == null || 
      rsLogTimestampMap.isEmpty()) {
      +088  return null;
      +089}
      +090
      +091HashMapString, Long 
      rsLogTimestampMins = new HashMap();
      +092HashMapString, 
      HashMapTableName, Long rsLogTimestampMapByRS = new 
      HashMap();
      +093
      +094for (EntryTableName, 
      HashMapString, Long tableEntry : rsLogTimestampMap.entrySet()) {
      +095  TableName table = 
      tableEntry.getKey();
      +096  HashMapString, Long 
      rsLogTimestamp = tableEntry.getValue();
      +097  for (EntryString, Long 
      rsEntry : rsLogTimestamp.entrySet()) {
      +098String rs = rsEntry.getKey();
      +099Long ts = rsEntry.getValue();
      +100if 
      (!rsLogTimestampMapByRS.containsKey(rs)) {
      +101  rsLogTimestampMapByRS.put(rs, 
      new HashMap());
      +102  
      rsLogTimestampMapByRS.get(rs).put(table, ts);
      +103} else {
       104  
      rsLogTimestampMapByRS.get(rs).put(table, ts);
      -105} else {
      -106  
      rsLogTimestampMapByRS.get(rs).put(table, ts);
      -107}
      -108  }
      -109}
      -110
      -111for (EntryString, 
      HashMapTableName, Long entry : rsLogTimestampMapByRS.entrySet()) 
      {
      -112  String rs = entry.getKey();
      -113  rsLogTimestampMins.put(rs, 
      BackupUtils.getMinValue(entry.getValue()));
      -114}
      -115
      -116return rsLogTimestampMins;
      -117  }
      -118
      -119  /**
      -120   * copy out Table RegionInfo into 
      incremental backup image need to consider move this logic into
      -121   * HBackupFileSystem
      -122   * @param conn connection
      -123   * @param backupInfo backup info
      -124   * @param conf configuration
      -125   * @throws IOException exception
      -126   * @throws InterruptedException 
      exception
      -127   */
      -128  public static void
      -129  copyTableRegionInfo(Connection 
      conn, BackupInfo backupInfo, Configuration conf)
      -130  throws IOException, 
      InterruptedException {
      -131Path rootDir = 
      FSUtils.getRootDir(conf);
      -132FileSystem fs = 
      rootDir.getFileSystem(conf);
      -133
      -134// for each table in the table set, 
      copy out the table info and region
      -135// info files in the correct 
      directory structure
      -136for (TableName table : 
      backupInfo.getTables()) {
      -137
      -138  if 
      (!MetaTableAccessor.tableExists(conn, table)) {
      -139LOG.warn("Table " + table + " 
      does not exists, skipping it.");
      -140continue;
      -141  }
      -142  TableDescriptor orig = 
      FSTableDescriptors.getTableDescriptorFromFs(fs, rootDir, table);
      -143
      -144  // write a copy of descriptor to 
      the target directory
      -145  Path target = new 
      Path(backupInfo.getTableBackupDir(table));
      -146  FileSystem targetFs = 
      target.getFileSystem(conf);
      -147  FSTableDescriptors descriptors =
      -148  new FSTableDescriptors(conf, 
      targetFs, FSUtils.getRootDir(conf));
      -149  
      descriptors.createTableDescriptorForTableDirectory(target, orig, false);
      -150  LOG.debug("Attempting to copy table 
      info for:" + table + " target: " + target
      -151  + " descriptor: " + orig);
      -152  LOG.debug("Finished copying 
      tableinfo.");
      -153  ListRegionInfo regions = 
      null;
      -154  regions = 
      MetaTableAccessor.getTableRegions(conn, table);
      -155  // For each region, write the 
      region info to disk
      -156  LOG.debug("Starting to write region 
      info for table " + table);
      -157  for (RegionInfo regionInfo : 
      regions) {
      -158Path regionDir =
      -159HRegion.getRegionDir(new 
      Path(backupInfo.getTableBackupDir(table)), regionInfo);
      -160

      [04/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/96e5e102/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.HeartbeatHRegionServer.html
      --
      diff --git 
      a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.HeartbeatHRegionServer.html
       
      b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.HeartbeatHRegionServer.html
      index 281c243..1a84ee1 100644
      --- 
      a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.HeartbeatHRegionServer.html
      +++ 
      b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.HeartbeatHRegionServer.html
      @@ -152,433 +152,461 @@
       144
       145  /**
       146   * Make puts to put the input value 
      into each combination of row, family, and qualifier
      -147   * @param rows
      -148   * @param families
      -149   * @param qualifiers
      -150   * @param value
      -151   * @return
      -152   * @throws IOException
      -153   */
      -154  static ArrayListPut 
      createPuts(byte[][] rows, byte[][] families, byte[][] qualifiers,
      -155  byte[] value) throws IOException 
      {
      -156Put put;
      -157ArrayListPut puts = new 
      ArrayList();
      -158
      -159for (int row = 0; row  
      rows.length; row++) {
      -160  put = new Put(rows[row]);
      -161  for (int fam = 0; fam  
      families.length; fam++) {
      -162for (int qual = 0; qual  
      qualifiers.length; qual++) {
      -163  KeyValue kv = new 
      KeyValue(rows[row], families[fam], qualifiers[qual], qual, value);
      -164  put.add(kv);
      -165}
      -166  }
      -167  puts.add(put);
      -168}
      -169
      -170return puts;
      -171  }
      -172
      -173  @AfterClass
      -174  public static void tearDownAfterClass() 
      throws Exception {
      -175TEST_UTIL.shutdownMiniCluster();
      -176  }
      -177
      -178  @Before
      -179  public void setupBeforeTest() throws 
      Exception {
      -180disableSleeping();
      -181  }
      -182
      -183  @After
      -184  public void teardownAfterTest() throws 
      Exception {
      -185disableSleeping();
      -186  }
      -187
      -188  /**
      -189   * Run the test callable when 
      heartbeats are enabled/disabled. We expect all tests to only pass
      -190   * when heartbeat messages are enabled 
      (otherwise the test is pointless). When heartbeats are
      -191   * disabled, the test should throw an 
      exception.
      -192   * @param testCallable
      -193   * @throws InterruptedException
      -194   */
      -195  private void 
      testImportanceOfHeartbeats(CallableVoid testCallable) throws 
      InterruptedException {
      -196
      HeartbeatRPCServices.heartbeatsEnabled = true;
      -197
      +147   */
      +148  static ArrayListPut 
      createPuts(byte[][] rows, byte[][] families, byte[][] qualifiers,
      +149  byte[] value) throws IOException 
      {
      +150Put put;
      +151ArrayListPut puts = new 
      ArrayList();
      +152
      +153for (int row = 0; row  
      rows.length; row++) {
      +154  put = new Put(rows[row]);
      +155  for (int fam = 0; fam  
      families.length; fam++) {
      +156for (int qual = 0; qual  
      qualifiers.length; qual++) {
      +157  KeyValue kv = new 
      KeyValue(rows[row], families[fam], qualifiers[qual], qual, value);
      +158  put.add(kv);
      +159}
      +160  }
      +161  puts.add(put);
      +162}
      +163
      +164return puts;
      +165  }
      +166
      +167  @AfterClass
      +168  public static void tearDownAfterClass() 
      throws Exception {
      +169TEST_UTIL.shutdownMiniCluster();
      +170  }
      +171
      +172  @Before
      +173  public void setupBeforeTest() throws 
      Exception {
      +174disableSleeping();
      +175  }
      +176
      +177  @After
      +178  public void teardownAfterTest() throws 
      Exception {
      +179disableSleeping();
      +180  }
      +181
      +182  /**
      +183   * Run the test callable when 
      heartbeats are enabled/disabled. We expect all tests to only pass
      +184   * when heartbeat messages are enabled 
      (otherwise the test is pointless). When heartbeats are
      +185   * disabled, the test should throw an 
      exception.
      +186   */
      +187  private void 
      testImportanceOfHeartbeats(CallableVoid testCallable) throws 
      InterruptedException {
      +188
      HeartbeatRPCServices.heartbeatsEnabled = true;
      +189
      +190try {
      +191  testCallable.call();
      +192} catch (Exception e) {
      +193  fail("Heartbeat messages are 
      enabled, exceptions should NOT be thrown. Exception trace:"
      +194  + 
      ExceptionUtils.getStackTrace(e));
      +195}
      +196
      +197
      HeartbeatRPCServices.heartbeatsEnabled = false;
       198try {
       199  testCallable.call();
       200} catch (Exception e) {
      -201  fail("Heartbeat messages are 
      enabled, exceptions should NOT be thrown. Exception trace:"
      -202  + 
      ExceptionUtils.getStackTrace(e));
      -203}
      -204
      -205
      HeartbeatRPCServices.heartbeatsEnabled = false;
      -206try {
      -207  testCallable.call();
      -208} catch (Exception e) {
      -209  return;
      -210} finally {
      -211  
      HeartbeatRPCServices.heartbeatsEnabled = true;
      -212}
      -213fail("Heartbeats messages are 
      disabled, an exception should be thrown. If an exception "
      -214+ " is not thrown, the test case 
      is not 

      [04/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html
      --
      diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html 
      b/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html
      index 82c1efb..6796a10 100644
      --- a/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html
      +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html
      @@ -118,7 +118,7 @@ var activeTableTab = "activeTableTab";
       
       
       @InterfaceAudience.Private
      -public class HStore
      +public class HStore
       extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
       title="class or interface in java.lang">Object
       implements Store, HeapSize, StoreConfigInformation, PropagatingConfigurationObserver
       A Store holds a column family in a Region.  Its a memstore 
      and a set of zero
      @@ -218,11 +218,11 @@ implements COMPACTCHECKER_INTERVAL_MULTIPLIER_KEY
       
       
      -private long
      +private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true;
       title="class or interface in 
      java.util.concurrent.atomic">AtomicLong
       compactedCellsCount
       
       
      -private long
      +private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true;
       title="class or interface in 
      java.util.concurrent.atomic">AtomicLong
       compactedCellsSize
       
       
      @@ -278,15 +278,15 @@ implements FIXED_OVERHEAD
       
       
      -private long
      +private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true;
       title="class or interface in 
      java.util.concurrent.atomic">AtomicLong
       flushedCellsCount
       
       
      -private long
      +private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true;
       title="class or interface in 
      java.util.concurrent.atomic">AtomicLong
       flushedCellsSize
       
       
      -private long
      +private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true;
       title="class or interface in 
      java.util.concurrent.atomic">AtomicLong
       flushedOutputFileSize
       
       
      @@ -316,11 +316,11 @@ implements LOG
       
       
      -private long
      +private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true;
       title="class or interface in 
      java.util.concurrent.atomic">AtomicLong
       majorCompactedCellsCount
       
       
      -private long
      +private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true;
       title="class or interface in 
      java.util.concurrent.atomic">AtomicLong
       majorCompactedCellsSize
       
       
      @@ -356,11 +356,11 @@ implements storeEngine
       
       
      -private long
      +private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true;
       title="class or interface in 
      java.util.concurrent.atomic">AtomicLong
       storeSize
       
       
      -private long
      +private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true;
       title="class or interface in 
      java.util.concurrent.atomic">AtomicLong
       totalUncompressedBytes
       
       
      @@ -1226,7 +1226,7 @@ implements 
       
       MEMSTORE_CLASS_NAME
      -public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">String MEMSTORE_CLASS_NAME
      +public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">String MEMSTORE_CLASS_NAME
       
       See Also:
       Constant
       Field Values
      @@ -1239,7 +1239,7 @@ implements 
       
       COMPACTCHECKER_INTERVAL_MULTIPLIER_KEY
      -public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">String COMPACTCHECKER_INTERVAL_MULTIPLIER_KEY
      +public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">String COMPACTCHECKER_INTERVAL_MULTIPLIER_KEY
       
       See Also:
       Constant
       Field Values
      @@ -1252,7 +1252,7 @@ implements 
       
       BLOCKING_STOREFILES_KEY
      -public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">String BLOCKING_STOREFILES_KEY
      +public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">String BLOCKING_STOREFILES_KEY
       
       See Also:
       Constant
       Field Values
      @@ -1265,7 +1265,7 @@ implements 
       
       BLOCK_STORAGE_POLICY_KEY
      -public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">String BLOCK_STORAGE_POLICY_KEY
      +public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">String BLOCK_STORAGE_POLICY_KEY
       
       See Also:
       Constant
       Field Values
      @@ -1278,7 +1278,7 @@ implements 
       
       

      [04/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/14db89d7/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.NoopRsExecutor.html
      --
      diff --git 
      a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.NoopRsExecutor.html
       
      b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.NoopRsExecutor.html
      index f1db5ca..d8515d7 100644
      --- 
      a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.NoopRsExecutor.html
      +++ 
      b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.NoopRsExecutor.html
      @@ -32,813 +32,820 @@
       024import static org.junit.Assert.fail;
       025
       026import java.io.IOException;
      -027import java.net.SocketTimeoutException;
      -028import java.util.NavigableMap;
      -029import java.util.Random;
      -030import java.util.Set;
      -031import java.util.SortedSet;
      -032import 
      java.util.concurrent.ConcurrentSkipListMap;
      -033import 
      java.util.concurrent.ConcurrentSkipListSet;
      -034import 
      java.util.concurrent.ExecutionException;
      -035import java.util.concurrent.Executors;
      -036import java.util.concurrent.Future;
      -037import 
      java.util.concurrent.ScheduledExecutorService;
      -038import java.util.concurrent.TimeUnit;
      -039
      -040import 
      org.apache.hadoop.conf.Configuration;
      -041import 
      org.apache.hadoop.hbase.CategoryBasedTimeout;
      -042import 
      org.apache.hadoop.hbase.DoNotRetryIOException;
      -043import 
      org.apache.hadoop.hbase.HBaseTestingUtility;
      -044import 
      org.apache.hadoop.hbase.NotServingRegionException;
      -045import 
      org.apache.hadoop.hbase.ServerName;
      -046import 
      org.apache.hadoop.hbase.TableName;
      -047import 
      org.apache.hadoop.hbase.client.RegionInfo;
      -048import 
      org.apache.hadoop.hbase.client.RegionInfoBuilder;
      -049import 
      org.apache.hadoop.hbase.client.RetriesExhaustedException;
      -050import 
      org.apache.hadoop.hbase.exceptions.UnexpectedStateException;
      -051import 
      org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
      -052import 
      org.apache.hadoop.hbase.master.MasterServices;
      -053import 
      org.apache.hadoop.hbase.master.RegionState.State;
      -054import 
      org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants;
      -055import 
      org.apache.hadoop.hbase.master.procedure.ProcedureSyncWait;
      -056import 
      org.apache.hadoop.hbase.master.procedure.RSProcedureDispatcher;
      -057import 
      org.apache.hadoop.hbase.procedure2.Procedure;
      -058import 
      org.apache.hadoop.hbase.procedure2.ProcedureMetrics;
      -059import 
      org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
      -060import 
      org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore;
      -061import 
      org.apache.hadoop.hbase.procedure2.util.StringUtils;
      -062import 
      org.apache.hadoop.hbase.regionserver.RegionServerAbortedException;
      -063import 
      org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
      -064import 
      org.apache.hadoop.hbase.testclassification.MasterTests;
      -065import 
      org.apache.hadoop.hbase.testclassification.MediumTests;
      -066import 
      org.apache.hadoop.hbase.util.Bytes;
      -067import 
      org.apache.hadoop.hbase.util.FSUtils;
      -068import 
      org.apache.hadoop.ipc.RemoteException;
      -069import org.junit.After;
      -070import org.junit.Before;
      -071import org.junit.Ignore;
      -072import org.junit.Rule;
      -073import org.junit.Test;
      -074import 
      org.junit.experimental.categories.Category;
      -075import 
      org.junit.rules.ExpectedException;
      -076import org.junit.rules.TestName;
      -077import org.junit.rules.TestRule;
      -078import org.slf4j.Logger;
      -079import org.slf4j.LoggerFactory;
      -080import 
      org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
      -081import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
      -082import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
      -083import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest;
      -084import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse;
      -085import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest;
      -086import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo;
      -087import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse;
      -088import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse.RegionOpeningState;
      -089import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition;
      -090import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
      -091import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest;
      -092
      -093@Category({MasterTests.class, 
      MediumTests.class})
      -094public class TestAssignmentManager {
      -095  private static final Logger LOG = 
      LoggerFactory.getLogger(TestAssignmentManager.class);
      

      [04/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestCIDeleteRpcTimeout.html
      --
      diff --git 
      a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestCIDeleteRpcTimeout.html
       
      b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestCIDeleteRpcTimeout.html
      new file mode 100644
      index 000..22bbba1
      --- /dev/null
      +++ 
      b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestCIDeleteRpcTimeout.html
      @@ -0,0 +1,104 @@
      +http://www.w3.org/TR/html4/loose.dtd;>
      +
      +
      +Source code
      +
      +
      +
      +
      +001/**
      +002 * Licensed to the Apache Software 
      Foundation (ASF) under one
      +003 * or more contributor license 
      agreements.  See the NOTICE file
      +004 * distributed with this work for 
      additional information
      +005 * regarding copyright ownership.  The 
      ASF licenses this file
      +006 * to you under the Apache License, 
      Version 2.0 (the
      +007 * "License"); you may not use this file 
      except in compliance
      +008 * with the License.  You may obtain a 
      copy of the License at
      +009 *
      +010 * 
      http://www.apache.org/licenses/LICENSE-2.0
      +011 *
      +012 * Unless required by applicable law or 
      agreed to in writing, software
      +013 * distributed under the License is 
      distributed on an "AS IS" BASIS,
      +014 * WITHOUT WARRANTIES OR CONDITIONS OF 
      ANY KIND, either express or implied.
      +015 * See the License for the specific 
      language governing permissions and
      +016 * limitations under the License.
      +017 */
      +018package org.apache.hadoop.hbase.client;
      +019
      +020import java.io.IOException;
      +021import 
      org.apache.hadoop.hbase.testclassification.ClientTests;
      +022import 
      org.apache.hadoop.hbase.testclassification.MediumTests;
      +023import 
      org.junit.experimental.categories.Category;
      +024
      +025@Category({ ClientTests.class, 
      MediumTests.class })
      +026public class TestCIDeleteRpcTimeout 
      extends AbstractTestCIRpcTimeout {
      +027
      +028  @Override
      +029  protected void execute(Table table) 
      throws IOException {
      +030table.delete(new Delete(FAM_NAM));
      +031  }
      +032}
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestCIGetOperationTimeout.html
      --
      diff --git 
      a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestCIGetOperationTimeout.html
       
      b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestCIGetOperationTimeout.html
      new file mode 100644
      index 000..7f4d982
      --- /dev/null
      +++ 
      b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestCIGetOperationTimeout.html
      @@ -0,0 +1,104 @@
      +http://www.w3.org/TR/html4/loose.dtd;>
      +
      +
      +Source code
      +
      +
      +
      +
      +001/**
      +002 * Licensed to the Apache Software 
      Foundation (ASF) under one
      +003 * or more contributor license 
      agreements.  See the NOTICE file
      +004 * distributed with this work for 
      additional information
      +005 * regarding copyright ownership.  The 
      ASF licenses this file
      +006 * to you under the Apache License, 
      Version 2.0 (the
      +007 * "License"); you may not use this file 
      except in compliance
      +008 * with the License.  You may obtain a 
      copy of the License at
      +009 *
      +010 * 
      http://www.apache.org/licenses/LICENSE-2.0
      +011 *
      +012 * Unless required by applicable law or 
      agreed to in writing, software
      +013 * distributed under the License is 
      distributed on an "AS IS" BASIS,
      +014 * WITHOUT WARRANTIES OR CONDITIONS OF 
      ANY KIND, either express or implied.
      +015 * See the License for the specific 
      language governing permissions and
      +016 * limitations under the License.
      +017 */
      +018package org.apache.hadoop.hbase.client;
      +019
      +020import java.io.IOException;
      +021import 
      org.apache.hadoop.hbase.testclassification.ClientTests;
      +022import 
      org.apache.hadoop.hbase.testclassification.LargeTests;
      +023import 
      org.junit.experimental.categories.Category;
      +024
      +025@Category({ ClientTests.class, 
      LargeTests.class })
      +026public class TestCIGetOperationTimeout 
      extends AbstractTestCIOperationTimeout {
      +027
      +028  @Override
      +029  protected void execute(Table table) 
      throws IOException {
      +030table.get(new Get(FAM_NAM));
      +031  }
      +032}
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestCIGetRpcTimeout.html
      --
      diff --git 
      a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestCIGetRpcTimeout.html
       
      b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestCIGetRpcTimeout.html
      new file mode 100644
      index 000..9c835ce
      --- /dev/null
      +++ 
      b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestCIGetRpcTimeout.html
      @@ -0,0 +1,104 @@
      

      [04/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/testdevapidocs/org/apache/hadoop/hbase/wal/IOTestProvider.IOTestWriter.html
      --
      diff --git 
      a/testdevapidocs/org/apache/hadoop/hbase/wal/IOTestProvider.IOTestWriter.html 
      b/testdevapidocs/org/apache/hadoop/hbase/wal/IOTestProvider.IOTestWriter.html
      index 6066c36..84a95a3 100644
      --- 
      a/testdevapidocs/org/apache/hadoop/hbase/wal/IOTestProvider.IOTestWriter.html
      +++ 
      b/testdevapidocs/org/apache/hadoop/hbase/wal/IOTestProvider.IOTestWriter.html
      @@ -127,7 +127,7 @@ var activeTableTab = "activeTableTab";
       
       
       
      -private static class IOTestProvider.IOTestWriter
      +private static class IOTestProvider.IOTestWriter
       extends org.apache.hadoop.hbase.regionserver.wal.ProtobufLogWriter
       Presumes init will be called by a single thread prior to 
      any access of other methods.
       
      @@ -272,7 +272,7 @@ extends 
      org.apache.hadoop.hbase.regionserver.wal.ProtobufLogWriter
       
       
       doAppends
      -privateboolean doAppends
      +privateboolean doAppends
       
       
       
      @@ -281,7 +281,7 @@ extends 
      org.apache.hadoop.hbase.regionserver.wal.ProtobufLogWriter
       
       
       doSyncs
      -privateboolean doSyncs
      +privateboolean doSyncs
       
       
       
      @@ -298,7 +298,7 @@ extends 
      org.apache.hadoop.hbase.regionserver.wal.ProtobufLogWriter
       
       
       IOTestWriter
      -privateIOTestWriter()
      +privateIOTestWriter()
       
       
       
      @@ -315,7 +315,7 @@ extends 
      org.apache.hadoop.hbase.regionserver.wal.ProtobufLogWriter
       
       
       init
      -publicvoidinit(org.apache.hadoop.fs.FileSystemfs,
      +publicvoidinit(org.apache.hadoop.fs.FileSystemfs,
        org.apache.hadoop.fs.Pathpath,
        org.apache.hadoop.conf.Configurationconf,
        booleanoverwritable)
      @@ -338,7 +338,7 @@ extends 
      org.apache.hadoop.hbase.regionserver.wal.ProtobufLogWriter
       
       
       getWriterClassName
      -protectedhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">StringgetWriterClassName()
      +protectedhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">StringgetWriterClassName()
       
       Overrides:
       getWriterClassNamein 
      classorg.apache.hadoop.hbase.regionserver.wal.AbstractProtobufLogWriter
      @@ -351,7 +351,7 @@ extends 
      org.apache.hadoop.hbase.regionserver.wal.ProtobufLogWriter
       
       
       append
      -publicvoidappend(org.apache.hadoop.hbase.wal.WAL.Entryentry)
      +publicvoidappend(org.apache.hadoop.hbase.wal.WAL.Entryentry)
       throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
       title="class or interface in java.io">IOException
       
       Specified by:
      @@ -369,7 +369,7 @@ extends 
      org.apache.hadoop.hbase.regionserver.wal.ProtobufLogWriter
       
       
       sync
      -publicvoidsync()
      +publicvoidsync()
         throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
       title="class or interface in java.io">IOException
       
       Specified by:
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/testdevapidocs/org/apache/hadoop/hbase/wal/IOTestProvider.html
      --
      diff --git a/testdevapidocs/org/apache/hadoop/hbase/wal/IOTestProvider.html 
      b/testdevapidocs/org/apache/hadoop/hbase/wal/IOTestProvider.html
      index 4d872c1..724f442 100644
      --- a/testdevapidocs/org/apache/hadoop/hbase/wal/IOTestProvider.html
      +++ b/testdevapidocs/org/apache/hadoop/hbase/wal/IOTestProvider.html
      @@ -114,30 +114,29 @@ var activeTableTab = "activeTableTab";
       
       
       @InterfaceAudience.Private
      -public class IOTestProvider
      +public class IOTestProvider
       extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
       title="class or interface in java.lang">Object
       implements org.apache.hadoop.hbase.wal.WALProvider
      -A WAL Provider that returns a single thread safe WAL that 
      optionally can skip parts of our
      - normal interactions with HDFS.
      -
      - This implementation picks a directory in HDFS based on the same mechanisms as 
      the 
      - FSHLogProvider. Users can configure how much interaction
      - we have with HDFS with the configuration property 
      "hbase.wal.iotestprovider.operations".
      - The value should be a comma separated list of allowed operations:
      +A WAL Provider that returns a single thread safe WAL that 
      optionally can skip parts of our normal
      + interactions with HDFS.
      + 
      + This implementation picks a directory in HDFS based on the same mechanisms as 
      the
      + FSHLogProvider. Users can configure how much interaction we have 
      with HDFS with the
      + configuration property "hbase.wal.iotestprovider.operations". The value 
      should be a comma
      + separated list of allowed operations:
        
      -   append   : edits will be written to the underlying filesystem
      -   sync : wal syncs will result in hflush calls
      -   fileroll : roll requests will result in creating a new file on 
      the underlying
      -   filesystem.
      + append : edits will be written to the underlying filesystem
      + sync : wal 

      [04/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/49431b18/devapidocs/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.html 
      b/devapidocs/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.html
      index 799d977..bcbef4c 100644
      --- 
      a/devapidocs/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.html
      +++ 
      b/devapidocs/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.html
      @@ -50,7 +50,7 @@ var activeTableTab = "activeTableTab";
       
       
       PrevClass
      -NextClass
      +NextClass
       
       
       Frames
      @@ -985,7 +985,7 @@ extends 
       
       PrevClass
      -NextClass
      +NextClass
       
       
       Frames
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/49431b18/devapidocs/org/apache/hadoop/hbase/master/procedure/PeerProcedureInterface.PeerOperationType.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/master/procedure/PeerProcedureInterface.PeerOperationType.html
       
      b/devapidocs/org/apache/hadoop/hbase/master/procedure/PeerProcedureInterface.PeerOperationType.html
      new file mode 100644
      index 000..034eb1a
      --- /dev/null
      +++ 
      b/devapidocs/org/apache/hadoop/hbase/master/procedure/PeerProcedureInterface.PeerOperationType.html
      @@ -0,0 +1,394 @@
      +http://www.w3.org/TR/html4/loose.dtd;>
      +
      +
      +
      +
      +
      +PeerProcedureInterface.PeerOperationType (Apache HBase 3.0.0-SNAPSHOT 
      API)
      +
      +
      +
      +
      +
      +var methods = {"i0":9,"i1":9};
      +var tabs = {65535:["t0","All Methods"],1:["t1","Static 
      Methods"],8:["t4","Concrete Methods"]};
      +var altColor = "altColor";
      +var rowColor = "rowColor";
      +var tableTab = "tableTab";
      +var activeTableTab = "activeTableTab";
      +
      +
      +JavaScript is disabled on your browser.
      +
      +
      +
      +
      +
      +Skip navigation links
      +
      +
      +
      +
      +Overview
      +Package
      +Class
      +Use
      +Tree
      +Deprecated
      +Index
      +Help
      +
      +
      +
      +
      +PrevClass
      +NextClass
      +
      +
      +Frames
      +NoFrames
      +
      +
      +AllClasses
      +
      +
      +
      +
      +
      +
      +
      +Summary:
      +Nested|
      +Enum Constants|
      +Field|
      +Method
      +
      +
      +Detail:
      +Enum Constants|
      +Field|
      +Method
      +
      +
      +
      +
      +
      +
      +
      +
      +org.apache.hadoop.hbase.master.procedure
      +Enum 
      PeerProcedureInterface.PeerOperationType
      +
      +
      +
      +http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
       title="class or interface in java.lang">java.lang.Object
      +
      +
      +http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
       title="class or interface in java.lang">java.lang.EnumPeerProcedureInterface.PeerOperationType
      +
      +
      +org.apache.hadoop.hbase.master.procedure.PeerProcedureInterface.PeerOperationType
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +All Implemented Interfaces:
      +http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
       title="class or interface in java.io">Serializable, http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
       title="class or interface in java.lang">ComparablePeerProcedureInterface.PeerOperationType
      +
      +
      +Enclosing interface:
      +PeerProcedureInterface
      +
      +
      +
      +public static enum PeerProcedureInterface.PeerOperationType
      +extends http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
       title="class or interface in java.lang">EnumPeerProcedureInterface.PeerOperationType
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +Enum Constant Summary
      +
      +Enum Constants
      +
      +Enum Constant and Description
      +
      +
      +ADD
      +
      +
      +DISABLE
      +
      +
      +ENABLE
      +
      +
      +REFRESH
      +
      +
      +REMOVE
      +
      +
      +UPDATE_CONFIG
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +Method Summary
      +
      +All MethodsStatic MethodsConcrete Methods
      +
      +Modifier and Type
      +Method and Description
      +
      +
      +static PeerProcedureInterface.PeerOperationType
      +valueOf(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">Stringname)
      +Returns the enum constant of this type with the specified 
      name.
      +
      +
      +
      +static PeerProcedureInterface.PeerOperationType[]
      +values()
      +Returns an array containing the constants of this enum 
      type, in
      +the order they are declared.
      +
      +
      +
      +
      +
      +
      +
      +Methods inherited from classjava.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
       title="class or interface in java.lang">Enum
      +http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true#clone--;
       title="class or interface in java.lang">clone, http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true#compareTo-E-;
       title="class or interface in java.lang">compareTo, 

      [04/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7c40c62/devapidocs/org/apache/hadoop/hbase/tool/Canary.html
      --
      diff --git a/devapidocs/org/apache/hadoop/hbase/tool/Canary.html 
      b/devapidocs/org/apache/hadoop/hbase/tool/Canary.html
      index eaf6bcb..1873694 100644
      --- a/devapidocs/org/apache/hadoop/hbase/tool/Canary.html
      +++ b/devapidocs/org/apache/hadoop/hbase/tool/Canary.html
      @@ -848,7 +848,7 @@ implements org.apache.hadoop.util.Tool
       
       
       sniff
      -private statichttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
       title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Future.html?is-external=true;
       title="class or interface in java.util.concurrent">Futurehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
       title="class or interface in java.lang">Voidsniff(Adminadmin,
      +private statichttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
       title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Future.html?is-external=true;
       title="class or interface in java.util.concurrent">Futurehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
       title="class or interface in java.lang">Voidsniff(Adminadmin,
       Canary.Sinksink,
       http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">StringtableName,
       http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html?is-external=true;
       title="class or interface in 
      java.util.concurrent">ExecutorServiceexecutor,
      @@ -869,7 +869,7 @@ implements org.apache.hadoop.util.Tool
       
       
       sniff
      -private statichttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
       title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Future.html?is-external=true;
       title="class or interface in java.util.concurrent">Futurehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
       title="class or interface in java.lang">Voidsniff(Adminadmin,
      +private statichttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
       title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Future.html?is-external=true;
       title="class or interface in java.util.concurrent">Futurehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
       title="class or interface in java.lang">Voidsniff(Adminadmin,
       Canary.Sinksink,
       HTableDescriptortableDesc,
       http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html?is-external=true;
       title="class or interface in 
      java.util.concurrent">ExecutorServiceexecutor,
      @@ -889,7 +889,7 @@ implements org.apache.hadoop.util.Tool
       
       
       main
      -public staticvoidmain(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">String[]args)
      +public staticvoidmain(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">String[]args)
        throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
       title="class or interface in java.lang">Exception
       
       Throws:
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7c40c62/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html
       
      b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html
      index fb13dad..c86800e 100644
      --- 
      a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html
      +++ 
      b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html
      @@ -122,7 +122,7 @@ var activeTableTab = "activeTableTab";
       
       
       
      -public static enum HBaseFsck.ErrorReporter.ERROR_CODE
      +public static enum HBaseFsck.ErrorReporter.ERROR_CODE
       extends http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
       title="class or interface in java.lang">EnumHBaseFsck.ErrorReporter.ERROR_CODE
       
       
      @@ -309,7 +309,7 @@ the order they are declared.
       
       
       UNKNOWN
      -public static finalHBaseFsck.ErrorReporter.ERROR_CODE UNKNOWN
      +public static finalHBaseFsck.ErrorReporter.ERROR_CODE UNKNOWN
       
       
       
      @@ -318,7 +318,7 @@ the order they are declared.
       
       
       NO_META_REGION
      -public static finalHBaseFsck.ErrorReporter.ERROR_CODE NO_META_REGION
      +public static 

      [04/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bb398572/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterEnvironmentForCoreCoprocessors.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterEnvironmentForCoreCoprocessors.html
       
      b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterEnvironmentForCoreCoprocessors.html
      index 1318b95..841130a 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterEnvironmentForCoreCoprocessors.html
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterEnvironmentForCoreCoprocessors.html
      @@ -55,1647 +55,1615 @@
       047import 
      org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
       048import 
      org.apache.hadoop.hbase.coprocessor.MasterObserver;
       049import 
      org.apache.hadoop.hbase.coprocessor.MetricsCoprocessor;
      -050import 
      org.apache.hadoop.hbase.coprocessor.ObserverContext;
      -051import 
      org.apache.hadoop.hbase.master.locking.LockProcedure;
      -052import 
      org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
      -053import 
      org.apache.hadoop.hbase.metrics.MetricRegistry;
      -054import 
      org.apache.hadoop.hbase.net.Address;
      -055import 
      org.apache.hadoop.hbase.procedure2.LockType;
      -056import 
      org.apache.hadoop.hbase.procedure2.LockedResource;
      -057import 
      org.apache.hadoop.hbase.procedure2.Procedure;
      -058import 
      org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
      -059import 
      org.apache.hadoop.hbase.quotas.GlobalQuotaSettings;
      -060import 
      org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
      -061import 
      org.apache.hadoop.hbase.security.User;
      -062import 
      org.apache.yetus.audience.InterfaceAudience;
      -063import org.slf4j.Logger;
      -064import org.slf4j.LoggerFactory;
      -065
      -066/**
      -067 * Provides the coprocessor framework and 
      environment for master oriented
      -068 * operations.  {@link HMaster} interacts 
      with the loaded coprocessors
      -069 * through this class.
      -070 */
      -071@InterfaceAudience.Private
      -072public class MasterCoprocessorHost
      -073extends 
      CoprocessorHostMasterCoprocessor, MasterCoprocessorEnvironment {
      -074
      -075  private static final Logger LOG = 
      LoggerFactory.getLogger(MasterCoprocessorHost.class);
      -076
      -077  /**
      -078   * Coprocessor environment extension 
      providing access to master related
      -079   * services.
      -080   */
      -081  private static class MasterEnvironment 
      extends BaseEnvironmentMasterCoprocessor
      -082  implements 
      MasterCoprocessorEnvironment {
      -083private final boolean 
      supportGroupCPs;
      -084private final MetricRegistry 
      metricRegistry;
      -085private final MasterServices 
      services;
      -086
      -087public MasterEnvironment(final 
      MasterCoprocessor impl, final int priority, final int seq,
      -088final Configuration conf, final 
      MasterServices services) {
      -089  super(impl, priority, seq, conf);
      -090  this.services = services;
      -091  supportGroupCPs = 
      !useLegacyMethod(impl.getClass(),
      -092  "preBalanceRSGroup", 
      ObserverContext.class, String.class);
      -093  this.metricRegistry =
      -094  
      MetricsCoprocessor.createRegistryForMasterCoprocessor(impl.getClass().getName());
      -095}
      -096
      -097@Override
      -098public ServerName getServerName() {
      -099  return 
      this.services.getServerName();
      -100}
      -101
      -102@Override
      -103public Connection getConnection() {
      -104  return new 
      SharedConnection(this.services.getConnection());
      -105}
      -106
      -107@Override
      -108public Connection 
      createConnection(Configuration conf) throws IOException {
      -109  return 
      this.services.createConnection(conf);
      -110}
      -111
      -112@Override
      -113public MetricRegistry 
      getMetricRegistryForMaster() {
      -114  return metricRegistry;
      -115}
      -116
      -117@Override
      -118public void shutdown() {
      -119  super.shutdown();
      -120  
      MetricsCoprocessor.removeRegistry(this.metricRegistry);
      -121}
      -122  }
      -123
      -124  /**
      -125   * Special version of MasterEnvironment 
      that exposes MasterServices for Core Coprocessors only.
      -126   * Temporary hack until Core 
      Coprocessors are integrated into Core.
      -127   */
      -128  private static class 
      MasterEnvironmentForCoreCoprocessors extends MasterEnvironment
      -129  implements HasMasterServices {
      -130private final MasterServices 
      masterServices;
      -131
      -132public 
      MasterEnvironmentForCoreCoprocessors(final MasterCoprocessor impl, final int 
      priority,
      -133final int seq, final 
      Configuration conf, final MasterServices services) {
      -134  super(impl, priority, seq, conf, 
      services);
      -135  this.masterServices = services;
      -136}
      -137
      -138/**
      -139 * @return An instance of 
      MasterServices, an object NOT for general user-space Coprocessor
      -140 * consumption.
      -141 */
      -142public MasterServices 
      getMasterServices() {
      -143  return this.masterServices;
      -144}
      -145  }
      -146
      -147  private 

      [04/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/mapreduce/replication/package-tree.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/mapreduce/replication/package-tree.html 
      b/devapidocs/org/apache/hadoop/hbase/mapreduce/replication/package-tree.html
      index 39ac5fa..a3f 100644
      --- a/devapidocs/org/apache/hadoop/hbase/mapreduce/replication/package-tree.html
      +++ b/devapidocs/org/apache/hadoop/hbase/mapreduce/replication/package-tree.html
      @@ -158,6 +158,6 @@
       
       
       
      -Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
      reserved.
      +Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
      reserved.
       
       
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/mapreduce/replication/package-use.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/mapreduce/replication/package-use.html 
      b/devapidocs/org/apache/hadoop/hbase/mapreduce/replication/package-use.html
      index 88d171b..59a2967 100644
      --- a/devapidocs/org/apache/hadoop/hbase/mapreduce/replication/package-use.html
      +++ b/devapidocs/org/apache/hadoop/hbase/mapreduce/replication/package-use.html
      @@ -153,6 +153,6 @@
       
       
       
      -Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
      reserved.
      +Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
      reserved.
       
       
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/master/ActiveMasterManager.html
      --
      diff --git a/devapidocs/org/apache/hadoop/hbase/master/ActiveMasterManager.html 
      b/devapidocs/org/apache/hadoop/hbase/master/ActiveMasterManager.html
      index fa6137e..8cff6b0 100644
      --- a/devapidocs/org/apache/hadoop/hbase/master/ActiveMasterManager.html
      +++ b/devapidocs/org/apache/hadoop/hbase/master/ActiveMasterManager.html
      @@ -557,6 +557,6 @@ extends Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
      reserved.
      +Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
      reserved.
       
       
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/master/AssignmentListener.html
      --
      diff --git a/devapidocs/org/apache/hadoop/hbase/master/AssignmentListener.html 
      b/devapidocs/org/apache/hadoop/hbase/master/AssignmentListener.html
      index d4267dc..d3dbbca 100644
      --- a/devapidocs/org/apache/hadoop/hbase/master/AssignmentListener.html
      +++ b/devapidocs/org/apache/hadoop/hbase/master/AssignmentListener.html
      @@ -250,6 +250,6 @@ public interface Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
      reserved.
      +Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
      reserved.
       
       
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/master/AssignmentVerificationReport.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/master/AssignmentVerificationReport.html 
      b/devapidocs/org/apache/hadoop/hbase/master/AssignmentVerificationReport.html
      index e552205..a352b65 100644
      --- 
      a/devapidocs/org/apache/hadoop/hbase/master/AssignmentVerificationReport.html
      +++ 
      b/devapidocs/org/apache/hadoop/hbase/master/AssignmentVerificationReport.html
      @@ -836,6 +836,6 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       
      -Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
      reserved.
      +Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
      reserved.
       
       
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/master/CatalogJanitor.SplitParentFirstComparator.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/master/CatalogJanitor.SplitParentFirstComparator.html
       
      b/devapidocs/org/apache/hadoop/hbase/master/CatalogJanitor.SplitParentFirstComparator.html
      index 959496c..6213383 100644
      --- 
      a/devapidocs/org/apache/hadoop/hbase/master/CatalogJanitor.SplitParentFirstComparator.html
      +++ 
      b/devapidocs/org/apache/hadoop/hbase/master/CatalogJanitor.SplitParentFirstComparator.html
      @@ -327,6 +327,6 @@ implements http://docs.oracle.com/javase/8/docs/api/java/util/Comparato
       
       
       
      -Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
      reserved.
      +Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
      reserved.
       
       
      
      

      [04/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.MyConnectionImpl.html
      --
      diff --git 
      a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.MyConnectionImpl.html
       
      b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.MyConnectionImpl.html
      index bbd91b8..4f76302 100644
      --- 
      a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.MyConnectionImpl.html
      +++ 
      b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.MyConnectionImpl.html
      @@ -56,1641 +56,1753 @@
       048import 
      java.util.concurrent.atomic.AtomicBoolean;
       049import 
      java.util.concurrent.atomic.AtomicInteger;
       050import 
      java.util.concurrent.atomic.AtomicLong;
      -051
      -052import 
      org.apache.hadoop.conf.Configuration;
      -053import 
      org.apache.hadoop.hbase.CallQueueTooBigException;
      -054import 
      org.apache.hadoop.hbase.CategoryBasedTimeout;
      -055import org.apache.hadoop.hbase.Cell;
      -056import 
      org.apache.hadoop.hbase.HConstants;
      -057import 
      org.apache.hadoop.hbase.HRegionInfo;
      -058import 
      org.apache.hadoop.hbase.HRegionLocation;
      -059import 
      org.apache.hadoop.hbase.RegionLocations;
      -060import 
      org.apache.hadoop.hbase.ServerName;
      -061import 
      org.apache.hadoop.hbase.TableName;
      -062import 
      org.apache.hadoop.hbase.client.AsyncProcessTask.ListRowAccess;
      -063import 
      org.apache.hadoop.hbase.client.AsyncProcessTask.SubmittedRows;
      -064import 
      org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicy;
      -065import 
      org.apache.hadoop.hbase.client.backoff.ServerStatistics;
      -066import 
      org.apache.hadoop.hbase.client.coprocessor.Batch;
      -067import 
      org.apache.hadoop.hbase.ipc.RpcControllerFactory;
      -068import 
      org.apache.hadoop.hbase.testclassification.ClientTests;
      -069import 
      org.apache.hadoop.hbase.testclassification.MediumTests;
      -070import 
      org.apache.hadoop.hbase.util.Bytes;
      -071import 
      org.apache.hadoop.hbase.util.Threads;
      -072import org.junit.Assert;
      -073import org.junit.BeforeClass;
      -074import org.junit.Ignore;
      -075import org.junit.Rule;
      -076import org.junit.Test;
      -077import 
      org.junit.experimental.categories.Category;
      -078import org.junit.rules.TestRule;
      -079import org.mockito.Mockito;
      -080import org.slf4j.Logger;
      -081import org.slf4j.LoggerFactory;
      -082
      -083@Category({ClientTests.class, 
      MediumTests.class})
      -084public class TestAsyncProcess {
      -085  @Rule public final TestRule timeout = 
      CategoryBasedTimeout.builder().withTimeout(this.getClass()).
      -086  
      withLookingForStuckThread(true).build();
      -087  private static final Logger LOG = 
      LoggerFactory.getLogger(TestAsyncProcess.class);
      -088  private static final TableName 
      DUMMY_TABLE =
      -089  TableName.valueOf("DUMMY_TABLE");
      -090  private static final byte[] 
      DUMMY_BYTES_1 = Bytes.toBytes("DUMMY_BYTES_1");
      -091  private static final byte[] 
      DUMMY_BYTES_2 = Bytes.toBytes("DUMMY_BYTES_2");
      -092  private static final byte[] 
      DUMMY_BYTES_3 = Bytes.toBytes("DUMMY_BYTES_3");
      -093  private static final byte[] FAILS = 
      Bytes.toBytes("FAILS");
      -094  private static final Configuration CONF 
      = new Configuration();
      -095  private static final 
      ConnectionConfiguration CONNECTION_CONFIG =
      -096  new 
      ConnectionConfiguration(CONF);
      -097  private static final ServerName sn = 
      ServerName.valueOf("s1,1,1");
      -098  private static final ServerName sn2 = 
      ServerName.valueOf("s2,2,2");
      -099  private static final ServerName sn3 = 
      ServerName.valueOf("s3,3,3");
      -100  private static final HRegionInfo hri1 
      =
      -101  new HRegionInfo(DUMMY_TABLE, 
      DUMMY_BYTES_1, DUMMY_BYTES_2, false, 1);
      -102  private static final HRegionInfo hri2 
      =
      -103  new HRegionInfo(DUMMY_TABLE, 
      DUMMY_BYTES_2, HConstants.EMPTY_END_ROW, false, 2);
      -104  private static final HRegionInfo hri3 
      =
      -105  new HRegionInfo(DUMMY_TABLE, 
      DUMMY_BYTES_3, HConstants.EMPTY_END_ROW, false, 3);
      -106  private static final HRegionLocation 
      loc1 = new HRegionLocation(hri1, sn);
      -107  private static final HRegionLocation 
      loc2 = new HRegionLocation(hri2, sn);
      -108  private static final HRegionLocation 
      loc3 = new HRegionLocation(hri3, sn2);
      -109
      -110  // Replica stuff
      -111  private static final RegionInfo hri1r1 
      = RegionReplicaUtil.getRegionInfoForReplica(hri1, 1);
      -112  private static final RegionInfo hri1r2 
      = RegionReplicaUtil.getRegionInfoForReplica(hri1, 2);
      -113  private static final RegionInfo hri2r1 
      = RegionReplicaUtil.getRegionInfoForReplica(hri2, 1);
      -114  private static final RegionLocations 
      hrls1 = new RegionLocations(new HRegionLocation(hri1, sn),
      -115  new HRegionLocation(hri1r1, sn2), 
      new HRegionLocation(hri1r2, sn3));
      -116  private static final RegionLocations 
      hrls2 = new RegionLocations(new HRegionLocation(hri2, sn2),
      -117  new HRegionLocation(hri2r1, 
      sn3));
      -118  private static final RegionLocations 
      hrls3 =
      -119  new RegionLocations(new 
      HRegionLocation(hri3, sn3), null);
      -120
      -121  private static final String 

      [04/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/63d6f712/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.html
      --
      diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.html 
      b/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.html
      index cd38b83..097bcb1 100644
      --- a/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.html
      +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.html
      @@ -118,7 +118,7 @@ var activeTableTab = "activeTableTab";
       
       
       @InterfaceAudience.Private
      -public class RSRpcServices
      +public class RSRpcServices
       extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
       title="class or interface in java.lang">Object
       implements HBaseRPCErrorHandler, 
      org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface,
       
      org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService.BlockingInterface,
       PriorityFunction, ConfigurationObserver
       Implements the regionserver RPC services.
      @@ -207,7 +207,7 @@ implements clearCompactionQueues
       
       
      -private 
      org.apache.hadoop.hbase.shaded.com.google.common.cache.Cachehttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">String,http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">String
      +private 
      org.apache.hbase.thirdparty.com.google.common.cache.Cachehttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">String,http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">String
       closedScanners
       
       
      @@ -416,7 +416,7 @@ implements 
       org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileResponse
      -bulkLoadHFile(org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcControllercontroller,
      +bulkLoadHFile(org.apache.hbase.thirdparty.com.google.protobuf.RpcControllercontroller,
        
      org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileRequestrequest)
       Atomically bulk load several HFiles into an open 
      region
       
      @@ -472,17 +472,17 @@ implements 
       org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CleanupBulkLoadResponse
      -cleanupBulkLoad(org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcControllercontroller,
      +cleanupBulkLoad(org.apache.hbase.thirdparty.com.google.protobuf.RpcControllercontroller,
      
      org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CleanupBulkLoadRequestrequest)
       
       
       org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesResponse
      -clearCompactionQueues(org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcControllercontroller,
      +clearCompactionQueues(org.apache.hbase.thirdparty.com.google.protobuf.RpcControllercontroller,
        
      org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequestrequest)
       
       
       org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearRegionBlockCacheResponse
      -clearRegionBlockCache(org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcControllercontroller,
      +clearRegionBlockCache(org.apache.hbase.thirdparty.com.google.protobuf.RpcControllercontroller,
        
      org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequestrequest)
       
       
      @@ -491,7 +491,7 @@ implements 
       org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse
      -closeRegion(org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcControllercontroller,
      +closeRegion(org.apache.hbase.thirdparty.com.google.protobuf.RpcControllercontroller,
      
      org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequestrequest)
       Close a region on the region server.
       
      @@ -505,7 +505,7 @@ implements 
       org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse
      -compactRegion(org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcControllercontroller,
      +compactRegion(org.apache.hbase.thirdparty.com.google.protobuf.RpcControllercontroller,
        
      org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequestrequest)
       Compact a region on the region server.
       
      @@ -568,12 +568,12 @@ implements 
       org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceResponse
      -execRegionServerService(org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcControllercontroller,
      +execRegionServerService(org.apache.hbase.thirdparty.com.google.protobuf.RpcControllercontroller,
      
      org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceRequestrequest)
       
       
       org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceResponse
      

      [04/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d449e87f/devapidocs/org/apache/hadoop/hbase/master/RegionState.html
      --
      diff --git a/devapidocs/org/apache/hadoop/hbase/master/RegionState.html 
      b/devapidocs/org/apache/hadoop/hbase/master/RegionState.html
      index 1a0cb4e..c90c499 100644
      --- a/devapidocs/org/apache/hadoop/hbase/master/RegionState.html
      +++ b/devapidocs/org/apache/hadoop/hbase/master/RegionState.html
      @@ -110,7 +110,7 @@ var activeTableTab = "activeTableTab";
       
       
       @InterfaceAudience.Private
      -public class RegionState
      +public class RegionState
       extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
       title="class or interface in java.lang">Object
       State of a Region while undergoing transitions.
        This class is immutable.
      @@ -441,7 +441,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       stamp
      -private finallong stamp
      +private finallong stamp
       
       
       
      @@ -450,7 +450,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       hri
      -private finalRegionInfo hri
      +private finalRegionInfo hri
       
       
       
      @@ -459,7 +459,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       serverName
      -private finalServerName serverName
      +private finalServerName serverName
       
       
       
      @@ -468,7 +468,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       state
      -private finalRegionState.State state
      +private finalRegionState.State state
       
       
       
      @@ -477,7 +477,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       ritDuration
      -privatelong ritDuration
      +privatelong ritDuration
       
       
       
      @@ -494,7 +494,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       RegionState
      -publicRegionState(RegionInforegion,
      +publicRegionState(RegionInforegion,
      RegionState.Statestate,
      ServerNameserverName)
       
      @@ -505,7 +505,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       RegionState
      -publicRegionState(RegionInforegion,
      +publicRegionState(RegionInforegion,
      RegionState.Statestate,
      longstamp,
      ServerNameserverName)
      @@ -517,7 +517,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       RegionState
      -publicRegionState(RegionInforegion,
      +publicRegionState(RegionInforegion,
      RegionState.Statestate,
      longstamp,
      ServerNameserverName,
      @@ -538,7 +538,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       createForTesting
      -public staticRegionStatecreateForTesting(RegionInforegion,
      +public staticRegionStatecreateForTesting(RegionInforegion,
      RegionState.Statestate)
       
       
      @@ -548,7 +548,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       getState
      -publicRegionState.StategetState()
      +publicRegionState.StategetState()
       
       
       
      @@ -557,7 +557,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       getStamp
      -publiclonggetStamp()
      +publiclonggetStamp()
       
       
       
      @@ -566,7 +566,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       getRegion
      -publicRegionInfogetRegion()
      +publicRegionInfogetRegion()
       
       
       
      @@ -575,7 +575,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       getServerName
      -publicServerNamegetServerName()
      +publicServerNamegetServerName()
       
       
       
      @@ -584,7 +584,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       getRitDuration
      -publiclonggetRitDuration()
      +publiclonggetRitDuration()
       
       
       
      @@ -594,7 +594,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       updateRitDuration
       @InterfaceAudience.Private
      -voidupdateRitDuration(longpreviousStamp)
      +voidupdateRitDuration(longpreviousStamp)
       Update the duration of region in transition
       
       Parameters:
      @@ -608,7 +608,7 @@ void
       
       isClosing
      -publicbooleanisClosing()
      +publicbooleanisClosing()
       
       
       
      @@ -617,7 +617,7 @@ void
       
       isClosed
      -publicbooleanisClosed()
      +publicbooleanisClosed()
       
       
       
      @@ -626,7 +626,7 @@ void
       
       isOpening
      -publicbooleanisOpening()
      +publicbooleanisOpening()
       
       
       
      @@ -635,7 +635,7 @@ void
       
       isOpened
      -publicbooleanisOpened()
      +publicbooleanisOpened()
       
       
       
      @@ -644,7 +644,7 @@ void
       
       isOffline
      -publicbooleanisOffline()
      +publicbooleanisOffline()
       
       
       
      @@ -653,7 +653,7 @@ void
       
       isSplitting
      -publicbooleanisSplitting()
      +publicbooleanisSplitting()
       
       
       
      @@ -662,7 +662,7 @@ void
       
       isSplit
      -publicbooleanisSplit()
      +publicbooleanisSplit()
       
       
       
      @@ -671,7 +671,7 @@ void
       
       isSplittingNew
      -publicbooleanisSplittingNew()
      +publicbooleanisSplittingNew()
       
       
       
      @@ -680,7 +680,7 @@ void
       
       isFailedOpen
      -publicbooleanisFailedOpen()
      +publicbooleanisFailedOpen()
       
       
       
      @@ -689,7 +689,7 @@ void
       
       isFailedClose
      -publicbooleanisFailedClose()
      

      [04/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d2b28a1a/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowDeleteFamilyCell.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowDeleteFamilyCell.html
       
      b/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowDeleteFamilyCell.html
      index 3400507..2baa140 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowDeleteFamilyCell.html
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowDeleteFamilyCell.html
      @@ -28,3034 +28,2926 @@
       020import static 
      org.apache.hadoop.hbase.HConstants.EMPTY_BYTE_ARRAY;
       021import static 
      org.apache.hadoop.hbase.Tag.TAG_LENGTH_SIZE;
       022
      -023import 
      com.google.common.annotations.VisibleForTesting;
      -024
      -025import java.io.DataOutput;
      -026import java.io.DataOutputStream;
      -027import java.io.IOException;
      -028import java.io.OutputStream;
      -029import java.math.BigDecimal;
      -030import java.nio.ByteBuffer;
      -031import java.util.ArrayList;
      -032import java.util.Iterator;
      -033import java.util.List;
      -034import java.util.Optional;
      -035
      -036import 
      org.apache.hadoop.hbase.KeyValue.Type;
      -037import 
      org.apache.hadoop.hbase.filter.ByteArrayComparable;
      -038import 
      org.apache.hadoop.hbase.io.HeapSize;
      -039import 
      org.apache.hadoop.hbase.io.TagCompressionContext;
      -040import 
      org.apache.hadoop.hbase.io.util.Dictionary;
      -041import 
      org.apache.hadoop.hbase.io.util.StreamUtils;
      -042import 
      org.apache.hadoop.hbase.util.ByteBufferUtils;
      -043import 
      org.apache.hadoop.hbase.util.ByteRange;
      -044import 
      org.apache.hadoop.hbase.util.Bytes;
      -045import 
      org.apache.hadoop.hbase.util.ClassSize;
      -046import 
      org.apache.yetus.audience.InterfaceAudience;
      -047
      -048
      -049/**
      -050 * Utility methods helpful slinging 
      {@link Cell} instances. It has more powerful and
      -051 * rich set of APIs than those in {@link 
      CellUtil} for internal usage.
      -052 */
      -053@InterfaceAudience.Private
      -054public final class PrivateCellUtil {
      -055
      -056  /**
      -057   * Private constructor to keep this 
      class from being instantiated.
      -058   */
      -059  private PrivateCellUtil() {
      -060  }
      +023import java.io.DataOutput;
      +024import java.io.DataOutputStream;
      +025import java.io.IOException;
      +026import java.io.OutputStream;
      +027import java.math.BigDecimal;
      +028import java.nio.ByteBuffer;
      +029import java.util.ArrayList;
      +030import java.util.Iterator;
      +031import java.util.List;
      +032import java.util.Optional;
      +033import 
      org.apache.hadoop.hbase.KeyValue.Type;
      +034import 
      org.apache.hadoop.hbase.filter.ByteArrayComparable;
      +035import 
      org.apache.hadoop.hbase.io.HeapSize;
      +036import 
      org.apache.hadoop.hbase.io.TagCompressionContext;
      +037import 
      org.apache.hadoop.hbase.io.util.Dictionary;
      +038import 
      org.apache.hadoop.hbase.io.util.StreamUtils;
      +039import 
      org.apache.hadoop.hbase.util.ByteBufferUtils;
      +040import 
      org.apache.hadoop.hbase.util.ByteRange;
      +041import 
      org.apache.hadoop.hbase.util.Bytes;
      +042import 
      org.apache.hadoop.hbase.util.ClassSize;
      +043import 
      org.apache.yetus.audience.InterfaceAudience;
      +044
      +045import 
      org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
      +046
      +047/**
      +048 * Utility methods helpful slinging 
      {@link Cell} instances. It has more powerful and
      +049 * rich set of APIs than those in {@link 
      CellUtil} for internal usage.
      +050 */
      +051@InterfaceAudience.Private
      +052public final class PrivateCellUtil {
      +053
      +054  /**
      +055   * Private constructor to keep this 
      class from being instantiated.
      +056   */
      +057  private PrivateCellUtil() {
      +058  }
      +059
      +060  /*** ByteRange 
      ***/
       061
      -062  /*** ByteRange 
      ***/
      -063
      -064  public static ByteRange 
      fillRowRange(Cell cell, ByteRange range) {
      -065return range.set(cell.getRowArray(), 
      cell.getRowOffset(), cell.getRowLength());
      -066  }
      -067
      -068  public static ByteRange 
      fillFamilyRange(Cell cell, ByteRange range) {
      -069return 
      range.set(cell.getFamilyArray(), cell.getFamilyOffset(), 
      cell.getFamilyLength());
      -070  }
      -071
      -072  public static ByteRange 
      fillQualifierRange(Cell cell, ByteRange range) {
      -073return 
      range.set(cell.getQualifierArray(), cell.getQualifierOffset(),
      -074  cell.getQualifierLength());
      -075  }
      -076
      -077  public static ByteRange 
      fillValueRange(Cell cell, ByteRange range) {
      -078return 
      range.set(cell.getValueArray(), cell.getValueOffset(), 
      cell.getValueLength());
      -079  }
      -080
      -081  public static ByteRange 
      fillTagRange(Cell cell, ByteRange range) {
      -082return range.set(cell.getTagsArray(), 
      cell.getTagsOffset(), cell.getTagsLength());
      -083  }
      +062  public static ByteRange 
      fillRowRange(Cell cell, ByteRange range) {
      +063return range.set(cell.getRowArray(), 
      cell.getRowOffset(), cell.getRowLength());
      +064  }
      +065
      +066  public static ByteRange 
      fillFamilyRange(Cell cell, ByteRange range) {
      +067

      [04/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/devapidocs/src-html/org/apache/hadoop/hbase/replication/ReplicationFactory.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/replication/ReplicationFactory.html
       
      b/devapidocs/src-html/org/apache/hadoop/hbase/replication/ReplicationFactory.html
      index 5272542..929de17 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/replication/ReplicationFactory.html
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/replication/ReplicationFactory.html
      @@ -39,39 +39,34 @@
       031@InterfaceAudience.Private
       032public class ReplicationFactory {
       033
      -034  public static final Class 
      defaultReplicationQueueClass = ReplicationQueuesZKImpl.class;
      -035
      -036  public static ReplicationQueues 
      getReplicationQueues(ReplicationQueuesArguments args)
      -037  throws Exception {
      -038Class? classToBuild = 
      args.getConf().getClass("hbase.region.replica." +
      -039
      "replication.replicationQueues.class", defaultReplicationQueueClass);
      -040return (ReplicationQueues) 
      ConstructorUtils.invokeConstructor(classToBuild, args);
      -041  }
      -042
      -043  public static ReplicationQueuesClient 
      getReplicationQueuesClient(
      -044  ReplicationQueuesClientArguments 
      args) throws Exception {
      -045Class? classToBuild = 
      args.getConf().getClass(
      -046  
      "hbase.region.replica.replication.replicationQueuesClient.class",
      -047  
      ReplicationQueuesClientZKImpl.class);
      -048return (ReplicationQueuesClient) 
      ConstructorUtils.invokeConstructor(classToBuild, args);
      +034  public static ReplicationQueues 
      getReplicationQueues(ReplicationQueuesArguments args)
      +035  throws Exception {
      +036return (ReplicationQueues) 
      ConstructorUtils.invokeConstructor(ReplicationQueuesZKImpl.class,
      +037  args);
      +038  }
      +039
      +040  public static ReplicationQueuesClient
      +041  
      getReplicationQueuesClient(ReplicationQueuesClientArguments args) throws 
      Exception {
      +042return (ReplicationQueuesClient) 
      ConstructorUtils
      +043
      .invokeConstructor(ReplicationQueuesClientZKImpl.class, args);
      +044  }
      +045
      +046  public static ReplicationPeers 
      getReplicationPeers(final ZKWatcher zk, Configuration conf,
      +047  
         Abortable abortable) {
      +048return getReplicationPeers(zk, conf, 
      null, abortable);
       049  }
       050
       051  public static ReplicationPeers 
      getReplicationPeers(final ZKWatcher zk, Configuration conf,
      -052  
         Abortable abortable) {
      -053return getReplicationPeers(zk, conf, 
      null, abortable);
      +052  
         final ReplicationQueuesClient queuesClient, Abortable abortable) {
      +053return new ReplicationPeersZKImpl(zk, 
      conf, queuesClient, abortable);
       054  }
       055
      -056  public static ReplicationPeers 
      getReplicationPeers(final ZKWatcher zk, Configuration conf,
      -057  
         final ReplicationQueuesClient queuesClient, Abortable abortable) {
      -058return new ReplicationPeersZKImpl(zk, 
      conf, queuesClient, abortable);
      -059  }
      -060
      -061  public static ReplicationTracker 
      getReplicationTracker(ZKWatcher zookeeper,
      -062  final ReplicationPeers 
      replicationPeers, Configuration conf, Abortable abortable,
      -063  Stoppable stopper) {
      -064return new 
      ReplicationTrackerZKImpl(zookeeper, replicationPeers, conf, abortable, 
      stopper);
      -065  }
      -066}
      +056  public static ReplicationTracker 
      getReplicationTracker(ZKWatcher zookeeper,
      +057  final ReplicationPeers 
      replicationPeers, Configuration conf, Abortable abortable,
      +058  Stoppable stopper) {
      +059return new 
      ReplicationTrackerZKImpl(zookeeper, replicationPeers, conf, abortable, 
      stopper);
      +060  }
      +061}
       
       
       
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/devapidocs/src-html/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl.html
       
      b/devapidocs/src-html/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl.html
      index 3fae067..fc0477c 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl.html
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl.html
      @@ -227,7 +227,7 @@
       219ReplicationPeerConfigBuilderImpl 
      builder = new ReplicationPeerConfigBuilderImpl();
       220
      builder.setClusterKey(peerConfig.getClusterKey())
       221
      .setReplicationEndpointImpl(peerConfig.getReplicationEndpointImpl())
      -222
      .setPeerData(peerConfig.getPeerData()).setConfiguration(peerConfig.getConfiguration())
      +222
      

      [04/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7c0589c0/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AdminRpcCall.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AdminRpcCall.html
       
      b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AdminRpcCall.html
      index f7fbfbf..88ebcbc 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AdminRpcCall.html
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AdminRpcCall.html
      @@ -34,1583 +34,1583 @@
       026import java.io.IOException;
       027import java.util.ArrayList;
       028import java.util.Arrays;
      -029import java.util.Collection;
      -030import java.util.Collections;
      -031import java.util.EnumSet;
      -032import java.util.HashMap;
      -033import java.util.List;
      -034import java.util.Map;
      -035import java.util.Optional;
      -036import java.util.Set;
      -037import 
      java.util.concurrent.CompletableFuture;
      -038import java.util.concurrent.TimeUnit;
      -039import 
      java.util.concurrent.atomic.AtomicReference;
      -040import java.util.function.BiConsumer;
      -041import java.util.function.Function;
      -042import java.util.regex.Pattern;
      -043import java.util.stream.Collectors;
      -044import java.util.stream.Stream;
      -045import org.apache.commons.io.IOUtils;
      -046import 
      org.apache.hadoop.conf.Configuration;
      -047import 
      org.apache.hadoop.hbase.AsyncMetaTableAccessor;
      -048import 
      org.apache.hadoop.hbase.ClusterMetrics.Option;
      -049import 
      org.apache.hadoop.hbase.ClusterStatus;
      -050import 
      org.apache.hadoop.hbase.HConstants;
      -051import 
      org.apache.hadoop.hbase.HRegionLocation;
      -052import 
      org.apache.hadoop.hbase.MetaTableAccessor;
      -053import 
      org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
      -054import 
      org.apache.hadoop.hbase.NamespaceDescriptor;
      -055import 
      org.apache.hadoop.hbase.RegionLoad;
      -056import 
      org.apache.hadoop.hbase.RegionLocations;
      -057import 
      org.apache.hadoop.hbase.ServerName;
      -058import 
      org.apache.hadoop.hbase.TableExistsException;
      -059import 
      org.apache.hadoop.hbase.TableName;
      -060import 
      org.apache.hadoop.hbase.TableNotDisabledException;
      -061import 
      org.apache.hadoop.hbase.TableNotEnabledException;
      -062import 
      org.apache.hadoop.hbase.TableNotFoundException;
      -063import 
      org.apache.hadoop.hbase.UnknownRegionException;
      -064import 
      org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
      -065import 
      org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
      -066import 
      org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.ServerRequestCallerBuilder;
      -067import 
      org.apache.hadoop.hbase.client.Scan.ReadType;
      -068import 
      org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil;
      -069import 
      org.apache.hadoop.hbase.client.replication.TableCFs;
      -070import 
      org.apache.hadoop.hbase.client.security.SecurityCapability;
      -071import 
      org.apache.hadoop.hbase.exceptions.DeserializationException;
      -072import 
      org.apache.hadoop.hbase.ipc.HBaseRpcController;
      -073import 
      org.apache.hadoop.hbase.quotas.QuotaFilter;
      -074import 
      org.apache.hadoop.hbase.quotas.QuotaSettings;
      -075import 
      org.apache.hadoop.hbase.quotas.QuotaTableUtil;
      -076import 
      org.apache.hadoop.hbase.replication.ReplicationException;
      -077import 
      org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
      -078import 
      org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
      -079import 
      org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
      -080import 
      org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
      -081import 
      org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
      -082import 
      org.apache.hadoop.hbase.util.Bytes;
      -083import 
      org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
      -084import 
      org.apache.hadoop.hbase.util.ForeignExceptionUtil;
      -085import 
      org.apache.yetus.audience.InterfaceAudience;
      -086import org.slf4j.Logger;
      -087import org.slf4j.LoggerFactory;
      -088
      -089import 
      org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
      -090import 
      org.apache.hadoop.hbase.shaded.io.netty.util.HashedWheelTimer;
      -091import 
      org.apache.hadoop.hbase.shaded.io.netty.util.Timeout;
      -092import 
      org.apache.hadoop.hbase.shaded.io.netty.util.TimerTask;
      -093import 
      org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
      -094import 
      org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
      -095import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
      -096import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest;
      -097import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesResponse;
      -098import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
      -099import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
      -100import 
      

      [04/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c4b2cc17/devapidocs/org/apache/hadoop/hbase/RegionLoad.html
      --
      diff --git a/devapidocs/org/apache/hadoop/hbase/RegionLoad.html 
      b/devapidocs/org/apache/hadoop/hbase/RegionLoad.html
      index 34fc362..cd952b4 100644
      --- a/devapidocs/org/apache/hadoop/hbase/RegionLoad.html
      +++ b/devapidocs/org/apache/hadoop/hbase/RegionLoad.html
      @@ -18,7 +18,7 @@
       catch(err) {
       }
       //-->
      -var methods = 
      {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":42,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10};
      +var methods = 
      {"i0":42,"i1":42,"i2":42,"i3":42,"i4":42,"i5":42,"i6":42,"i7":42,"i8":42,"i9":42,"i10":42,"i11":42,"i12":42,"i13":42,"i14":42,"i15":42,"i16":42,"i17":42,"i18":42,"i19":42,"i20":42,"i21":42,"i22":42,"i23":42,"i24":42,"i25":42,"i26":42,"i27":42,"i28":42,"i29":42,"i30":42,"i31":42,"i32":42,"i33":42,"i34":42,"i35":42,"i36":42,"i37":42,"i38":42,"i39":42};
       var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
      Methods"],8:["t4","Concrete Methods"],32:["t6","Deprecated Methods"]};
       var altColor = "altColor";
       var rowColor = "rowColor";
      @@ -107,11 +107,21 @@ var activeTableTab = "activeTableTab";
       
       
       
      +
      +All Implemented Interfaces:
      +RegionMetrics
      +
       
      +Deprecated.
      +As of release 2.0.0, this 
      will be removed in HBase 3.0.0
      + Use RegionMetrics instead.
      +
       
       @InterfaceAudience.Public
      -public class RegionLoad
      -extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
       title="class or interface in java.lang">Object
      + http://docs.oracle.com/javase/8/docs/api/java/lang/Deprecated.html?is-external=true;
       title="class or interface in java.lang">@Deprecated
      +public class RegionLoad
      +extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
       title="class or interface in java.lang">Object
      +implements RegionMetrics
       Encapsulates per-region load metrics.
       
       
      @@ -132,8 +142,16 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       Field and Description
       
       
      +private RegionMetrics
      +metrics
      +Deprecated.
      +
      +
      +
       protected 
      org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad
      -regionLoadPB
      +regionLoadPB
      +Deprecated.
      +
       
       
       
      @@ -150,7 +168,14 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       Constructor and Description
       
       
      -RegionLoad(org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoadregionLoadPB)
      +RegionLoad(org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoadregionLoadPB)
      +Deprecated.
      +
      +
      +
      +RegionLoad(RegionMetricsmetrics)
      +Deprecated.
      +
       
       
       
      @@ -168,104 +193,309 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       Method and Description
       
       
      -long
      -getCompleteSequenceId()
      -This does not really belong inside RegionLoad but its being 
      done in the name of expediency.
      -
      +Size
      +getBloomFilterSize()
      +Deprecated.
      +
       
       
       long
      -getCurrentCompactedKVs()
      +getCompactedCellCount()
      +Deprecated.
      +
       
       
      -float
      -getDataLocality()
      +long
      +getCompactingCellCount()
      +Deprecated.
      +
       
       
       long
      -getFilteredReadRequestsCount()
      +getCompletedSequenceId()
      +Deprecated.
      +This does not really belong inside RegionLoad but its being 
      done in the name of expediency.
      +
       
       
       long
      -getLastMajorCompactionTs()
      +getCompleteSequenceId()
      +Deprecated.
      +As of release 2.0.0, this 
      will be removed in HBase 3.0.0
      + Use getCompletedSequenceId()
       instead.
      +
      +
       
       
      -int
      -getMemStoreSizeMB()
      +long
      +getCurrentCompactedKVs()
      +Deprecated.
      +As of release 2.0.0, this 
      will be removed in HBase 3.0.0
      + Use getCompactedCellCount()
       instead.
      +
      +
       
       
      -byte[]
      -getName()
      +float
      +getDataLocality()
      +Deprecated.
      +
       
       
      -http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">String
      -getNameAsString()
      +long
      +getFilteredReadRequestCount()
      +Deprecated.
      +
       
       
       long
      -getReadRequestsCount()
      +getFilteredReadRequestsCount()
      +Deprecated.
      +As of release 2.0.0, this 
      will be removed in HBase 3.0.0
      + Use getFilteredReadRequestCount()
       instead.
      +
      +
       
       
       long
      -getRequestsCount()
      +getLastMajorCompactionTimestamp()
      +Deprecated.
      +
       
       
      -int
      -getRootIndexSizeKB()
      +long
      +getLastMajorCompactionTs()
      +Deprecated.
      +As of release 2.0.0, this 
      will be removed in HBase 3.0.0
      + Use getLastMajorCompactionTimestamp()
       instead.
      +
      +
       
       
      -http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
       title="class or interface in 
      java.util">Listorg.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.StoreSequenceId
      -getStoreCompleteSequenceId()
      +Size
      +getMemStoreSize()
      +Deprecated.
      +
       
       
      -long
      -getStorefileIndexSizeKB()
      +int
      +getMemStoreSizeMB()
      +Deprecated.
      +As of release 2.0.0, this 
      will be removed in HBase 3.0.0
      + Use 

      [04/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/505bbb2e/devapidocs/org/apache/hadoop/hbase/client/ConnectionUtils.ShortCircuitingClusterConnection.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/client/ConnectionUtils.ShortCircuitingClusterConnection.html
       
      b/devapidocs/org/apache/hadoop/hbase/client/ConnectionUtils.ShortCircuitingClusterConnection.html
      index 9078098..eb1d1fe 100644
      --- 
      a/devapidocs/org/apache/hadoop/hbase/client/ConnectionUtils.ShortCircuitingClusterConnection.html
      +++ 
      b/devapidocs/org/apache/hadoop/hbase/client/ConnectionUtils.ShortCircuitingClusterConnection.html
      @@ -122,7 +122,7 @@ var activeTableTab = "activeTableTab";
       
       
       
      -public static class ConnectionUtils.ShortCircuitingClusterConnection
      +public static class ConnectionUtils.ShortCircuitingClusterConnection
       extends ConnectionImplementation
       A ClusterConnection that will short-circuit RPC making 
      direct invocations against the
        localhost if the invocation target is 'this' server; save on network and 
      protobuf
      @@ -287,7 +287,7 @@ extends 
       
       serverName
      -private finalServerName serverName
      +private finalServerName serverName
       
       
       
      @@ -296,7 +296,7 @@ extends 
       
       localHostAdmin
      -private 
      finalorg.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface
       localHostAdmin
      +private 
      finalorg.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface
       localHostAdmin
       
       
       
      @@ -305,7 +305,7 @@ extends 
       
       localHostClient
      -private 
      finalorg.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService.BlockingInterface
       localHostClient
      +private 
      finalorg.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService.BlockingInterface
       localHostClient
       
       
       
      @@ -322,7 +322,7 @@ extends 
       
       ShortCircuitingClusterConnection
      -privateShortCircuitingClusterConnection(org.apache.hadoop.conf.Configurationconf,
      +privateShortCircuitingClusterConnection(org.apache.hadoop.conf.Configurationconf,
        http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html?is-external=true;
       title="class or interface in 
      java.util.concurrent">ExecutorServicepool,
        Useruser,
        ServerNameserverName,
      @@ -349,7 +349,7 @@ extends 
       
       getAdmin
      -publicorg.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterfacegetAdmin(ServerNamesn)
      +publicorg.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterfacegetAdmin(ServerNamesn)
      
         throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
       title="class or interface in java.io">IOException
       Description copied from 
      interface:ClusterConnection
       Establishes a connection to the region server at the 
      specified address.
      @@ -371,7 +371,7 @@ extends 
       
       getClient
      -publicorg.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService.BlockingInterfacegetClient(ServerNamesn)
      +publicorg.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService.BlockingInterfacegetClient(ServerNamesn)
      
        throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
       title="class or interface in java.io">IOException
       Description copied from 
      interface:ClusterConnection
       Establishes a connection to the region server at the 
      specified address, and returns
      @@ -394,7 +394,7 @@ extends 
       
       getKeepAliveMasterService
      -publicMasterKeepAliveConnectiongetKeepAliveMasterService()
      +publicMasterKeepAliveConnectiongetKeepAliveMasterService()
       throws MasterNotRunningException
       Description copied from 
      interface:ClusterConnection
       This function allows HBaseAdmin and potentially others to 
      get a shared MasterService
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/505bbb2e/devapidocs/org/apache/hadoop/hbase/client/ConnectionUtils.html
      --
      diff --git a/devapidocs/org/apache/hadoop/hbase/client/ConnectionUtils.html 
      b/devapidocs/org/apache/hadoop/hbase/client/ConnectionUtils.html
      index 0468b33..d77d8ec 100644
      --- a/devapidocs/org/apache/hadoop/hbase/client/ConnectionUtils.html
      +++ b/devapidocs/org/apache/hadoop/hbase/client/ConnectionUtils.html
      @@ -110,7 +110,7 @@ var activeTableTab = "activeTableTab";
       
       
       @InterfaceAudience.Private
      -public final class ConnectionUtils
      +public final class ConnectionUtils
       extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
       title="class or interface in java.lang">Object
       Utility used by client connections.
       
      @@ -161,7 +161,7 @@ 

      [04/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c4c0cfa5/apidocs/src-html/org/apache/hadoop/hbase/util/Bytes.RowEndKeyComparator.html
      --
      diff --git 
      a/apidocs/src-html/org/apache/hadoop/hbase/util/Bytes.RowEndKeyComparator.html 
      b/apidocs/src-html/org/apache/hadoop/hbase/util/Bytes.RowEndKeyComparator.html
      index 6ab259f..51d92c2 100644
      --- 
      a/apidocs/src-html/org/apache/hadoop/hbase/util/Bytes.RowEndKeyComparator.html
      +++ 
      b/apidocs/src-html/org/apache/hadoop/hbase/util/Bytes.RowEndKeyComparator.html
      @@ -44,240 +44,240 @@
       036import java.util.Iterator;
       037import java.util.List;
       038
      -039import org.apache.commons.logging.Log;
      -040import 
      org.apache.commons.logging.LogFactory;
      -041import org.apache.hadoop.hbase.Cell;
      -042import 
      org.apache.hadoop.hbase.CellComparator;
      -043import 
      org.apache.hadoop.hbase.KeyValue;
      -044import 
      org.apache.yetus.audience.InterfaceAudience;
      +039import com.google.protobuf.ByteString;
      +040import org.apache.commons.logging.Log;
      +041import 
      org.apache.commons.logging.LogFactory;
      +042import org.apache.hadoop.hbase.Cell;
      +043import 
      org.apache.hadoop.hbase.CellComparator;
      +044import 
      org.apache.hadoop.hbase.KeyValue;
       045import 
      org.apache.hadoop.io.RawComparator;
       046import 
      org.apache.hadoop.io.WritableComparator;
       047import 
      org.apache.hadoop.io.WritableUtils;
      -048
      +048import 
      org.apache.yetus.audience.InterfaceAudience;
       049import sun.misc.Unsafe;
       050
       051import 
      org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
       052import 
      org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
      -053import com.google.protobuf.ByteString;
      -054
      -055/**
      -056 * Utility class that handles byte 
      arrays, conversions to/from other types,
      -057 * comparisons, hash code generation, 
      manufacturing keys for HashMaps or
      -058 * HashSets, and can be used as key in 
      maps or trees.
      -059 */
      -060@SuppressWarnings("restriction")
      -061@InterfaceAudience.Public
      -062@edu.umd.cs.findbugs.annotations.SuppressWarnings(
      -063
      value="EQ_CHECK_FOR_OPERAND_NOT_COMPATIBLE_WITH_THIS",
      -064justification="It has been like this 
      forever")
      -065public class Bytes implements 
      ComparableBytes {
      -066
      -067  // Using the charset canonical name for 
      String/byte[] conversions is much
      -068  // more efficient due to use of cached 
      encoders/decoders.
      -069  private static final String UTF8_CSN = 
      StandardCharsets.UTF_8.name();
      -070
      -071  //HConstants.EMPTY_BYTE_ARRAY should be 
      updated if this changed
      -072  private static final byte [] 
      EMPTY_BYTE_ARRAY = new byte [0];
      -073
      -074  private static final Log LOG = 
      LogFactory.getLog(Bytes.class);
      -075
      -076  /**
      -077   * Size of boolean in bytes
      -078   */
      -079  public static final int SIZEOF_BOOLEAN 
      = Byte.SIZE / Byte.SIZE;
      -080
      -081  /**
      -082   * Size of byte in bytes
      -083   */
      -084  public static final int SIZEOF_BYTE = 
      SIZEOF_BOOLEAN;
      -085
      -086  /**
      -087   * Size of char in bytes
      -088   */
      -089  public static final int SIZEOF_CHAR = 
      Character.SIZE / Byte.SIZE;
      -090
      -091  /**
      -092   * Size of double in bytes
      -093   */
      -094  public static final int SIZEOF_DOUBLE = 
      Double.SIZE / Byte.SIZE;
      -095
      -096  /**
      -097   * Size of float in bytes
      -098   */
      -099  public static final int SIZEOF_FLOAT = 
      Float.SIZE / Byte.SIZE;
      -100
      -101  /**
      -102   * Size of int in bytes
      -103   */
      -104  public static final int SIZEOF_INT = 
      Integer.SIZE / Byte.SIZE;
      -105
      -106  /**
      -107   * Size of long in bytes
      -108   */
      -109  public static final int SIZEOF_LONG = 
      Long.SIZE / Byte.SIZE;
      -110
      -111  /**
      -112   * Size of short in bytes
      -113   */
      -114  public static final int SIZEOF_SHORT = 
      Short.SIZE / Byte.SIZE;
      -115
      -116  /**
      -117   * Mask to apply to a long to reveal 
      the lower int only. Use like this:
      -118   * int i = (int)(0xL ^ 
      some_long_value);
      -119   */
      -120  public static final long 
      MASK_FOR_LOWER_INT_IN_LONG = 0xL;
      -121
      -122  /**
      -123   * Estimate of size cost to pay beyond 
      payload in jvm for instance of byte [].
      -124   * Estimate based on study of jhat and 
      jprofiler numbers.
      -125   */
      -126  // JHat says BU is 56 bytes.
      -127  // SizeOf which uses 
      java.lang.instrument says 24 bytes. (3 longs?)
      -128  public static final int 
      ESTIMATED_HEAP_TAX = 16;
      -129
      -130  private static final boolean 
      UNSAFE_UNALIGNED = UnsafeAvailChecker.unaligned();
      -131
      -132  /**
      -133   * Returns length of the byte array, 
      returning 0 if the array is null.
      -134   * Useful for calculating sizes.
      -135   * @param b byte array, which can be 
      null
      -136   * @return 0 if b is null, otherwise 
      returns length
      -137   */
      -138  final public static int len(byte[] b) 
      {
      -139return b == null ? 0 : b.length;
      -140  }
      -141
      -142  private byte[] bytes;
      -143  private int offset;
      -144  private int length;
      -145
      -146  /**
      -147   * Create a zero-size sequence.
      -148   */
      -149  public Bytes() {
      -150super();
      -151  }
      -152
      -153  /**
      -154   * Create a Bytes using the byte array 
      as the initial value.
      -155   * 

      [04/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/071f974b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServerFactory.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServerFactory.html 
      b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServerFactory.html
      index e54be64..401007f 100644
      --- a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServerFactory.html
      +++ b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServerFactory.html
      @@ -52,25 +52,31 @@
       044  }
       045
       046  public static RpcServer 
      createRpcServer(final Server server, final String name,
      -047  final 
      ListBlockingServiceAndInterface services,
      -048  final InetSocketAddress 
      bindAddress, Configuration conf,
      -049  RpcScheduler scheduler) throws 
      IOException {
      -050String rpcServerClass = 
      conf.get(CUSTOM_RPC_SERVER_IMPL_CONF_KEY,
      -051
      NettyRpcServer.class.getName());
      -052StringBuilder servicesList = new 
      StringBuilder();
      -053for (BlockingServiceAndInterface s: 
      services) {
      -054  ServiceDescriptor sd = 
      s.getBlockingService().getDescriptorForType();
      -055  if (sd == null) continue; // Can be 
      null for certain tests like TestTokenAuthentication
      -056  if (servicesList.length()  0) 
      servicesList.append(", ");
      -057  
      servicesList.append(sd.getFullName());
      -058}
      -059LOG.info("Creating " + rpcServerClass 
      + " hosting " + servicesList);
      -060return 
      ReflectionUtils.instantiateWithCustomCtor(rpcServerClass,
      -061new Class[] { Server.class, 
      String.class, List.class,
      -062InetSocketAddress.class, 
      Configuration.class, RpcScheduler.class },
      -063new Object[] { server, name, 
      services, bindAddress, conf, scheduler });
      -064  }
      -065}
      +047  final 
      ListBlockingServiceAndInterface services, final InetSocketAddress 
      bindAddress,
      +048  Configuration conf, RpcScheduler 
      scheduler) throws IOException {
      +049return createRpcServer(server, name, 
      services, bindAddress, conf, scheduler, true);
      +050  }
      +051
      +052  public static RpcServer 
      createRpcServer(final Server server, final String name,
      +053  final 
      ListBlockingServiceAndInterface services,
      +054  final InetSocketAddress 
      bindAddress, Configuration conf,
      +055  RpcScheduler scheduler, boolean 
      reservoirEnabled) throws IOException {
      +056String rpcServerClass = 
      conf.get(CUSTOM_RPC_SERVER_IMPL_CONF_KEY,
      +057
      NettyRpcServer.class.getName());
      +058StringBuilder servicesList = new 
      StringBuilder();
      +059for (BlockingServiceAndInterface s: 
      services) {
      +060  ServiceDescriptor sd = 
      s.getBlockingService().getDescriptorForType();
      +061  if (sd == null) continue; // Can be 
      null for certain tests like TestTokenAuthentication
      +062  if (servicesList.length()  0) 
      servicesList.append(", ");
      +063  
      servicesList.append(sd.getFullName());
      +064}
      +065LOG.info("Creating " + rpcServerClass 
      + " hosting " + servicesList);
      +066return 
      ReflectionUtils.instantiateWithCustomCtor(rpcServerClass,
      +067new Class[] { Server.class, 
      String.class, List.class,
      +068  InetSocketAddress.class, 
      Configuration.class, RpcScheduler.class, boolean.class },
      +069new Object[] { server, name, 
      services, bindAddress, conf, scheduler, reservoirEnabled });
      +070  }
      +071}
       
       
       
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/071f974b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServerInterface.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServerInterface.html 
      b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServerInterface.html
      index 7552f17..2f2b3a6 100644
      --- a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServerInterface.html
      +++ b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServerInterface.html
      @@ -31,77 +31,73 @@
       023import java.net.InetSocketAddress;
       024
       025import 
      org.apache.yetus.audience.InterfaceAudience;
      -026import 
      org.apache.yetus.audience.InterfaceStability;
      -027import 
      org.apache.hadoop.hbase.CellScanner;
      -028import 
      org.apache.hadoop.hbase.HBaseInterfaceAudience;
      -029import 
      org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler;
      -030import 
      org.apache.hadoop.hbase.regionserver.RSRpcServices;
      -031import 
      org.apache.hadoop.hbase.util.Pair;
      -032import 
      org.apache.hadoop.security.authorize.PolicyProvider;
      -033
      -034import 
      org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
      -035import 
      org.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingService;
      -036import 
      org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.MethodDescriptor;
      -037import 
      org.apache.hadoop.hbase.shaded.com.google.protobuf.Message;
      -038import 
      org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
      -039
      -040@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC,
       HBaseInterfaceAudience.PHOENIX})
      

      [04/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dc4e5c85/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.RAMQueueEntry.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.RAMQueueEntry.html
       
      b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.RAMQueueEntry.html
      index 219283e..2b5d70b 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.RAMQueueEntry.html
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.RAMQueueEntry.html
      @@ -435,1198 +435,1203 @@
       427
       428if (backingMap.containsKey(cacheKey)) 
      {
       429  Cacheable existingBlock = 
      getBlock(cacheKey, false, false, false);
      -430  if 
      (BlockCacheUtil.compareCacheBlock(cachedItem, existingBlock) != 0) {
      -431throw new 
      RuntimeException("Cached block contents differ, which should not have 
      happened."
      -432+ "cacheKey:" + cacheKey);
      -433  }
      -434   String msg = "Caching an already 
      cached block: " + cacheKey;
      -435   msg += ". This is harmless and can 
      happen in rare cases (see HBASE-8547)";
      -436   LOG.warn(msg);
      -437  return;
      -438}
      -439
      -440/*
      -441 * Stuff the entry into the RAM cache 
      so it can get drained to the persistent store
      -442 */
      -443RAMQueueEntry re =
      -444new RAMQueueEntry(cacheKey, 
      cachedItem, accessCount.incrementAndGet(), inMemory);
      -445if (ramCache.putIfAbsent(cacheKey, 
      re) != null) {
      -446  return;
      -447}
      -448int queueNum = (cacheKey.hashCode() 
       0x7FFF) % writerQueues.size();
      -449BlockingQueueRAMQueueEntry bq 
      = writerQueues.get(queueNum);
      -450boolean successfulAddition = false;
      -451if (wait) {
      -452  try {
      -453successfulAddition = bq.offer(re, 
      DEFAULT_CACHE_WAIT_TIME, TimeUnit.MILLISECONDS);
      -454  } catch (InterruptedException e) 
      {
      -455
      Thread.currentThread().interrupt();
      -456  }
      -457} else {
      -458  successfulAddition = 
      bq.offer(re);
      -459}
      -460if (!successfulAddition) {
      -461  ramCache.remove(cacheKey);
      -462  cacheStats.failInsert();
      -463} else {
      -464  this.blockNumber.increment();
      -465  
      this.heapSize.add(cachedItem.heapSize());
      -466  blocksByHFile.add(cacheKey);
      -467}
      -468  }
      -469
      -470  /**
      -471   * Get the buffer of the block with the 
      specified key.
      -472   * @param key block's cache key
      -473   * @param caching true if the caller 
      caches blocks on cache misses
      -474   * @param repeat Whether this is a 
      repeat lookup for the same block
      -475   * @param updateCacheMetrics Whether we 
      should update cache metrics or not
      -476   * @return buffer of specified cache 
      key, or null if not in cache
      -477   */
      -478  @Override
      -479  public Cacheable getBlock(BlockCacheKey 
      key, boolean caching, boolean repeat,
      -480  boolean updateCacheMetrics) {
      -481if (!cacheEnabled) {
      -482  return null;
      -483}
      -484RAMQueueEntry re = 
      ramCache.get(key);
      -485if (re != null) {
      -486  if (updateCacheMetrics) {
      -487cacheStats.hit(caching, 
      key.isPrimary(), key.getBlockType());
      -488  }
      -489  
      re.access(accessCount.incrementAndGet());
      -490  return re.getData();
      -491}
      -492BucketEntry bucketEntry = 
      backingMap.get(key);
      -493if (bucketEntry != null) {
      -494  long start = System.nanoTime();
      -495  ReentrantReadWriteLock lock = 
      offsetLock.getLock(bucketEntry.offset());
      -496  try {
      -497lock.readLock().lock();
      -498// We can not read here even if 
      backingMap does contain the given key because its offset
      -499// maybe changed. If we lock 
      BlockCacheKey instead of offset, then we can only check
      -500// existence here.
      -501if 
      (bucketEntry.equals(backingMap.get(key))) {
      -502  // TODO : change this area - 
      should be removed after server cells and
      -503  // 12295 are available
      -504  int len = 
      bucketEntry.getLength();
      -505  if (LOG.isTraceEnabled()) {
      -506LOG.trace("Read offset=" + 
      bucketEntry.offset() + ", len=" + len);
      -507  }
      -508  Cacheable cachedBlock = 
      ioEngine.read(bucketEntry.offset(), len,
      -509  
      bucketEntry.deserializerReference(this.deserialiserMap));
      -510  long timeTaken = 
      System.nanoTime() - start;
      -511  if (updateCacheMetrics) {
      -512cacheStats.hit(caching, 
      key.isPrimary(), key.getBlockType());
      -513
      cacheStats.ioHit(timeTaken);
      -514  }
      -515  if (cachedBlock.getMemoryType() 
      == MemoryType.SHARED) {
      -516
      bucketEntry.refCount.incrementAndGet();
      -517  }
      -518  
      bucketEntry.access(accessCount.incrementAndGet());
      -519  if (this.ioErrorStartTime  
      0) {
      -520ioErrorStartTime = -1;
      -521  }
      -522  return cachedBlock;
      -523}
      -524  } catch (IOException ioex) {
      -525LOG.error("Failed 

      [04/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4abd958d/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncTable.CoprocessorCallback.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncTable.CoprocessorCallback.html
       
      b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncTable.CoprocessorCallback.html
      index 7cece5c..6361a24 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncTable.CoprocessorCallback.html
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncTable.CoprocessorCallback.html
      @@ -248,379 +248,383 @@
       240 */
       241CheckAndMutateBuilder 
      ifNotExists();
       242
      -243default CheckAndMutateBuilder 
      ifEquals(byte[] value) {
      -244  return 
      ifMatches(CompareOperator.EQUAL, value);
      -245}
      -246
      -247/**
      -248 * @param compareOp comparison 
      operator to use
      -249 * @param value the expected value
      -250 */
      -251CheckAndMutateBuilder 
      ifMatches(CompareOperator compareOp, byte[] value);
      -252
      -253/**
      -254 * @param put data to put if check 
      succeeds
      -255 * @return {@code true} if the new 
      put was executed, {@code false} otherwise. The return value
      -256 * will be wrapped by a 
      {@link CompletableFuture}.
      -257 */
      -258CompletableFutureBoolean 
      thenPut(Put put);
      -259
      -260/**
      -261 * @param delete data to delete if 
      check succeeds
      -262 * @return {@code true} if the new 
      delete was executed, {@code false} otherwise. The return
      -263 * value will be wrapped by a 
      {@link CompletableFuture}.
      -264 */
      -265CompletableFutureBoolean 
      thenDelete(Delete delete);
      -266
      -267/**
      -268 * @param mutation mutations to 
      perform if check succeeds
      -269 * @return true if the new mutation 
      was executed, false otherwise. The return value will be
      -270 * wrapped by a {@link 
      CompletableFuture}.
      -271 */
      -272CompletableFutureBoolean 
      thenMutate(RowMutations mutation);
      -273  }
      -274
      -275  /**
      -276   * Performs multiple mutations 
      atomically on a single row. Currently {@link Put} and
      -277   * {@link Delete} are supported.
      -278   * @param mutation object that 
      specifies the set of mutations to perform atomically
      -279   * @return A {@link CompletableFuture} 
      that always returns null when complete normally.
      -280   */
      -281  CompletableFutureVoid 
      mutateRow(RowMutations mutation);
      -282
      -283  /**
      -284   * The scan API uses the observer 
      pattern.
      -285   * @param scan A configured {@link 
      Scan} object.
      -286   * @param consumer the consumer used to 
      receive results.
      -287   * @see ScanResultConsumer
      -288   * @see AdvancedScanResultConsumer
      -289   */
      -290  void scan(Scan scan, C consumer);
      -291
      -292  /**
      -293   * Gets a scanner on the current table 
      for the given family.
      -294   * @param family The column family to 
      scan.
      -295   * @return A scanner.
      -296   */
      -297  default ResultScanner getScanner(byte[] 
      family) {
      -298return getScanner(new 
      Scan().addFamily(family));
      -299  }
      -300
      -301  /**
      -302   * Gets a scanner on the current table 
      for the given family and qualifier.
      -303   * @param family The column family to 
      scan.
      -304   * @param qualifier The column 
      qualifier to scan.
      -305   * @return A scanner.
      -306   */
      -307  default ResultScanner getScanner(byte[] 
      family, byte[] qualifier) {
      -308return getScanner(new 
      Scan().addColumn(family, qualifier));
      -309  }
      -310
      -311  /**
      -312   * Returns a scanner on the current 
      table as specified by the {@link Scan} object.
      -313   * @param scan A configured {@link 
      Scan} object.
      -314   * @return A scanner.
      -315   */
      -316  ResultScanner getScanner(Scan scan);
      -317
      -318  /**
      -319   * Return all the results that match 
      the given scan object.
      -320   * p
      -321   * Notice that usually you should use 
      this method with a {@link Scan} object that has limit set.
      -322   * For example, if you want to get the 
      closest row after a given row, you could do this:
      -323   * p
      -324   *
      -325   * pre
      -326   * code
      -327   * table.scanAll(new 
      Scan().withStartRow(row, false).setLimit(1)).thenAccept(results - {
      -328   *   if (results.isEmpty()) {
      -329   *  System.out.println("No row 
      after " + Bytes.toStringBinary(row));
      -330   *   } else {
      -331   * System.out.println("The closest 
      row after " + Bytes.toStringBinary(row) + " is "
      -332   * + 
      Bytes.toStringBinary(results.stream().findFirst().get().getRow()));
      -333   *   }
      -334   * });
      -335   * /code
      -336   * /pre
      -337   * p
      -338   * If your result set is very large, 
      you should use other scan method to get a scanner or use
      -339   * callback to process the results. 
      They will do chunking to prevent OOM. The scanAll method will
      -340   * fetch all the results and store them 
      in a List and then return the list to you.
      +243/**
      +244 * Check for equality.
      +245 * @param value the expected value
      +246 */
      +247default CheckAndMutateBuilder 
      ifEquals(byte[] value) {
      +248  return 
      

      [04/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e23b49ba/devapidocs/org/apache/hadoop/hbase/coprocessor/example/ZooKeeperScanPolicyObserver.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/coprocessor/example/ZooKeeperScanPolicyObserver.html
       
      b/devapidocs/org/apache/hadoop/hbase/coprocessor/example/ZooKeeperScanPolicyObserver.html
      index 8e60154..2b69471 100644
      --- 
      a/devapidocs/org/apache/hadoop/hbase/coprocessor/example/ZooKeeperScanPolicyObserver.html
      +++ 
      b/devapidocs/org/apache/hadoop/hbase/coprocessor/example/ZooKeeperScanPolicyObserver.html
      @@ -313,7 +313,7 @@ implements RegionObserver
      -postAppend,
       postBatchMutate,
       postBatchMutateIndispensably,
       postBulkLoadHFile,
       postCheckAndDelete,
       postCheckAndPut,
       postClose,
       postCloseRegionOperation, postCommitStoreFile,
       postCompact,
       postCompactSelection,
       postDelete,
       postExists,
       postFlush,
       postFlush,
       postGetOp,
       postIncrement,
       postInstantiateDeleteTracker,
       postMemStoreCompaction, postMutationBeforeWAL,
       postOpen,
       postPut,
       postReplayWALs,
       postScannerClose,
       postScannerFilterRow,
       postScannerNext,
       postScannerOpen,
       postStartRegionOperation,
       postStoreFileReaderOpen,
       postWALRestore,
       preAppend,
       preAppendAfterRowLock,
       preBatchMutate,
       preBulkLoadHFile,
       preCheckAndDelete,
       preCheckAndDeleteAfterRowLock,
       preCheckAndPut,
       preCheckAndPutAfterRowLock,
       preClose,
       preCommitStoreFile,
       preCompact,
       preCompactSelection,
       preDelete,
       preExists,
       preFlush,
       preFlush,
       preGetOp,
       preIncrement,
       preIncrementAfterRowLock,
       preMemStoreCompaction,
       preMemStoreCompactionCompact,
       preMemStoreCompactionCompactScannerOpen,
       preOpen,
       prePrepareTimeStampForDeleteVersion,
       prePut,
       preReplayWALs,
       preScannerClose,
       preScannerNext,
       preScannerOpen,
       preStoreFileReaderOpen,
       preStoreScannerOpen,
       preWALRestore
      +postAppend,
       postBatchMutate,
       postBatchMutateIndispensably,
       postBulkLoadHFile,
       postCheckAndDelete,
       postCheckAndPut,
       postClose,
       postCloseRegionOperation, postCommitStoreFile,
       postCompact,
       postCompactSelection,
       postDelete,
       postExists,
       postFlush,
       postFlush,
       postGetOp,
       postIncrement,
       postInstantiateDeleteTracker,
       postMemStoreCompaction, postMutationBeforeWAL,
       postOpen,
       postPut,
       postReplayWALs,
       postScannerClose,
       postScannerFilterRow,
       postScannerNext,
       postScannerOpen,
       postStartRegionOperation,
       postStoreFileReaderOpen,
       postWALRestore,
       preAppend,
       preAppendAfterRowLock,
       preBatchMutate,
       preBulkLoadHFile,
       preCheckAndDelete,
       preCheckAndDeleteAfterRowLock,
       preCheckAndPut,
       preCheckAndPutAfterRowLock,
       preClose,
       preCommitStoreFile,
       preCompact,
       preCompactSelection,
       preDelete,
       preExists,
       preFlush,
       preFlush,
       preGetOp,
       preIncrement,
       preIncrementAfterRowLock,
       preMemStoreCompaction,
       preMemStoreCompactionCompact,
       preMemStoreCompactionCompactScannerOpen,
       preOpen,
       prePrepareTimeStampForDeleteVersion,
       prePut,
       preReplayWALs,
       preScannerClose,
       preScannerNext,
       preScannerOpen,
       preStoreFileReaderOpen,
       preStoreScannerOpen,
       preWALRestore
       
       
       
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e23b49ba/devapidocs/org/apache/hadoop/hbase/coprocessor/package-frame.html
      --
      diff --git a/devapidocs/org/apache/hadoop/hbase/coprocessor/package-frame.html 
      b/devapidocs/org/apache/hadoop/hbase/coprocessor/package-frame.html
      index 4033378..5b3a1b5 100644
      --- a/devapidocs/org/apache/hadoop/hbase/coprocessor/package-frame.html
      +++ b/devapidocs/org/apache/hadoop/hbase/coprocessor/package-frame.html
      @@ -55,6 +55,7 @@
       MetricsCoprocessor
       MultiRowMutationEndpoint
       ObserverContextImpl
      +ReadOnlyConfiguration
       
       Enums
       
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e23b49ba/devapidocs/org/apache/hadoop/hbase/coprocessor/package-summary.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/coprocessor/package-summary.html 
      b/devapidocs/org/apache/hadoop/hbase/coprocessor/package-summary.html
      index d585d17..a48ca99 100644
      --- a/devapidocs/org/apache/hadoop/hbase/coprocessor/package-summary.html
      +++ b/devapidocs/org/apache/hadoop/hbase/coprocessor/package-summary.html
      @@ -312,6 +312,12 @@
        third-party Coprocessor developers.
       
       
      +
      +ReadOnlyConfiguration
      +
      +Wraps a Configuration to make it read-only.
      +
      +
       
       
       
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e23b49ba/devapidocs/org/apache/hadoop/hbase/coprocessor/package-tree.html
      --
      diff --git a/devapidocs/org/apache/hadoop/hbase/coprocessor/package-tree.html 
      b/devapidocs/org/apache/hadoop/hbase/coprocessor/package-tree.html
      index f607cda..695ea65 100644
      --- a/devapidocs/org/apache/hadoop/hbase/coprocessor/package-tree.html
      +++ b/devapidocs/org/apache/hadoop/hbase/coprocessor/package-tree.html
      @@ -88,6 +88,11 @@
       
       org.apache.hadoop.hbase.coprocessor.BaseEnvironmentC (implements 
      

      [04/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d171b896/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/dependency-info.html
      --
      diff --git 
      a/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/dependency-info.html
       
      b/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/dependency-info.html
      index e1d6267..62a21f8 100644
      --- 
      a/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/dependency-info.html
      +++ 
      b/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/dependency-info.html
      @@ -7,7 +7,7 @@
         
       
       
      -
      +
       
       Apache HBase - Exemplar for hbase-shaded-client archetype  
      Dependency Information
       
      @@ -147,7 +147,7 @@
       https://www.apache.org/;>The Apache Software 
      Foundation.
       All rights reserved.  
       
      -  Last Published: 
      2017-12-05
      +  Last Published: 
      2017-12-06
       
       
       
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d171b896/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/dependency-management.html
      --
      diff --git 
      a/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/dependency-management.html
       
      b/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/dependency-management.html
      index 3b44899..5b24842 100644
      --- 
      a/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/dependency-management.html
      +++ 
      b/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/dependency-management.html
      @@ -7,7 +7,7 @@
         
       
       
      -
      +
       
       Apache HBase - Exemplar for hbase-shaded-client archetype  
      Project Dependency Management
       
      @@ -775,18 +775,24 @@
       test-jar
       https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
      2.0
       
      +org.apache.hbase
      +http://hbase.apache.org/hbase-build-configuration/hbase-zookeeper;>hbase-zookeeper
      +3.0.0-SNAPSHOT
      +test-jar
      +https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
      2.0
      +
       org.bouncycastle
       http://www.bouncycastle.org/java.html;>bcprov-jdk16
       1.46
       jar
       http://www.bouncycastle.org/licence.html;>Bouncy Castle 
      Licence
      -
      +
       org.hamcrest
       https://github.com/hamcrest/JavaHamcrest/hamcrest-core;>hamcrest-core
       1.3
       jar
       http://www.opensource.org/licenses/bsd-license.php;>New BSD 
      License
      -
      +
       org.mockito
       http://mockito.org;>mockito-core
       2.1.0
      @@ -804,7 +810,7 @@
       https://www.apache.org/;>The Apache Software 
      Foundation.
       All rights reserved.  
       
      -  Last Published: 
      2017-12-05
      +  Last Published: 
      2017-12-06
       
       
       
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d171b896/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/index.html
      --
      diff --git 
      a/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/index.html
       
      b/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/index.html
      index ba33fd7..c49993d 100644
      --- 
      a/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/index.html
      +++ 
      b/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/index.html
      @@ -7,7 +7,7 @@
         
       
       
      -
      +
       
       Apache HBase - Exemplar for hbase-shaded-client archetype  
      About
       
      @@ -119,7 +119,7 @@
       https://www.apache.org/;>The Apache Software 
      Foundation.
       All rights reserved.  
       
      -  Last Published: 
      2017-12-05
      +  Last Published: 
      2017-12-06
       
       
       
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d171b896/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/integration.html
      --
      diff --git 
      a/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/integration.html
       
      b/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/integration.html
      index f79eb3f..fe8398e 100644
      --- 
      a/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/integration.html
      +++ 
      b/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/integration.html
      @@ -7,7 +7,7 @@
         
       
       
      -
      +
       
       Apache HBase - Exemplar for hbase-shaded-client archetype  
      CI Management
       
      @@ -126,7 +126,7 @@
       https://www.apache.org/;>The Apache Software 
      Foundation.
       All rights reserved.  
       
      -  Last Published: 
      2017-12-05
      +  Last Published: 
      2017-12-06
       
        

      [04/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c54c242b/hbase-shaded-check-invariants/project-reports.html
      --
      diff --git a/hbase-shaded-check-invariants/project-reports.html 
      b/hbase-shaded-check-invariants/project-reports.html
      index 9a20279..d577599 100644
      --- a/hbase-shaded-check-invariants/project-reports.html
      +++ b/hbase-shaded-check-invariants/project-reports.html
      @@ -7,7 +7,7 @@
         
       
       
      -
      +
       
       Apache HBase Shaded Packaging Invariants  Generated 
      Reports
       
      @@ -128,7 +128,7 @@
       https://www.apache.org/;>The Apache Software 
      Foundation.
       All rights reserved.  
       
      -  Last Published: 
      2017-12-02
      +  Last Published: 
      2017-12-03
       
       
       
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c54c242b/hbase-shaded-check-invariants/project-summary.html
      --
      diff --git a/hbase-shaded-check-invariants/project-summary.html 
      b/hbase-shaded-check-invariants/project-summary.html
      index 8b2ab4c..33d0ebe 100644
      --- a/hbase-shaded-check-invariants/project-summary.html
      +++ b/hbase-shaded-check-invariants/project-summary.html
      @@ -7,7 +7,7 @@
         
       
       
      -
      +
       
       Apache HBase Shaded Packaging Invariants  Project 
      Summary
       
      @@ -166,7 +166,7 @@
       https://www.apache.org/;>The Apache Software 
      Foundation.
       All rights reserved.  
       
      -  Last Published: 
      2017-12-02
      +  Last Published: 
      2017-12-03
       
       
       
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c54c242b/hbase-shaded-check-invariants/source-repository.html
      --
      diff --git a/hbase-shaded-check-invariants/source-repository.html 
      b/hbase-shaded-check-invariants/source-repository.html
      index 1c61673..0684443 100644
      --- a/hbase-shaded-check-invariants/source-repository.html
      +++ b/hbase-shaded-check-invariants/source-repository.html
      @@ -7,7 +7,7 @@
         
       
       
      -
      +
       
       Apache HBase Shaded Packaging Invariants  Source Code 
      Management
       
      @@ -134,7 +134,7 @@
       https://www.apache.org/;>The Apache Software 
      Foundation.
       All rights reserved.  
       
      -  Last Published: 
      2017-12-02
      +  Last Published: 
      2017-12-03
       
       
       
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c54c242b/hbase-shaded-check-invariants/team-list.html
      --
      diff --git a/hbase-shaded-check-invariants/team-list.html 
      b/hbase-shaded-check-invariants/team-list.html
      index 0d6262f..89c71db 100644
      --- a/hbase-shaded-check-invariants/team-list.html
      +++ b/hbase-shaded-check-invariants/team-list.html
      @@ -7,7 +7,7 @@
         
       
       
      -
      +
       
       Apache HBase Shaded Packaging Invariants  Project 
      Team
       
      @@ -547,7 +547,7 @@
       https://www.apache.org/;>The Apache Software 
      Foundation.
       All rights reserved.  
       
      -  Last Published: 
      2017-12-02
      +  Last Published: 
      2017-12-03
       
       
       
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c54c242b/index.html
      --
      diff --git a/index.html b/index.html
      index d662d56..a173730 100644
      --- a/index.html
      +++ b/index.html
      @@ -7,7 +7,7 @@
         
       
       
      -
      +
       
       Apache HBase  Apache HBase™ Home
       
      @@ -438,7 +438,7 @@ Apache HBase is an open-source, distributed, versioned, 
      non-relational database
       https://www.apache.org/;>The Apache Software 
      Foundation.
       All rights reserved.  
       
      -  Last Published: 
      2017-12-02
      +  Last Published: 
      2017-12-03
       
       
       
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c54c242b/integration.html
      --
      diff --git a/integration.html b/integration.html
      index 30400c2..b51bfbb 100644
      --- a/integration.html
      +++ b/integration.html
      @@ -7,7 +7,7 @@
         
       
       
      -
      +
       
       Apache HBase  CI Management
       
      @@ -296,7 +296,7 @@
       https://www.apache.org/;>The Apache Software 
      Foundation.
       All rights reserved.  
       
      -  Last Published: 
      2017-12-02
      +  Last Published: 
      2017-12-03
       
       
       
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c54c242b/issue-tracking.html
      --
      diff --git 

      [04/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.html
       
      b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.html
      index 25e368d..d0f781f 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.html
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.html
      @@ -25,798 +25,798 @@
       017 */
       018package 
      org.apache.hadoop.hbase.io.asyncfs;
       019
      -020import static 
      org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelOption.CONNECT_TIMEOUT_MILLIS;
      -021import static 
      org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleState.READER_IDLE;
      -022import static 
      org.apache.hadoop.fs.CreateFlag.CREATE;
      -023import static 
      org.apache.hadoop.fs.CreateFlag.OVERWRITE;
      -024import static 
      org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createEncryptor;
      -025import static 
      org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.trySaslNegotiate;
      +020import static 
      org.apache.hadoop.fs.CreateFlag.CREATE;
      +021import static 
      org.apache.hadoop.fs.CreateFlag.OVERWRITE;
      +022import static 
      org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createEncryptor;
      +023import static 
      org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.trySaslNegotiate;
      +024import static 
      org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelOption.CONNECT_TIMEOUT_MILLIS;
      +025import static 
      org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleState.READER_IDLE;
       026import static 
      org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY;
       027import static 
      org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME;
       028import static 
      org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT;
       029import static 
      org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage.PIPELINE_SETUP_CREATE;
       030
      -031import 
      org.apache.hadoop.hbase.shaded.com.google.common.base.Throwables;
      -032import 
      org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableMap;
      -033import 
      com.google.protobuf.CodedOutputStream;
      -034
      -035import 
      org.apache.hadoop.hbase.shaded.io.netty.bootstrap.Bootstrap;
      -036import 
      org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBuf;
      -037import 
      org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBufAllocator;
      -038import 
      org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBufOutputStream;
      -039import 
      org.apache.hadoop.hbase.shaded.io.netty.buffer.PooledByteBufAllocator;
      -040import 
      org.apache.hadoop.hbase.shaded.io.netty.channel.Channel;
      -041import 
      org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelFuture;
      -042import 
      org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelFutureListener;
      -043import 
      org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandler;
      -044import 
      org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandlerContext;
      -045import 
      org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelInitializer;
      -046import 
      org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelPipeline;
      -047import 
      org.apache.hadoop.hbase.shaded.io.netty.channel.EventLoop;
      -048import 
      org.apache.hadoop.hbase.shaded.io.netty.channel.SimpleChannelInboundHandler;
      -049import 
      org.apache.hadoop.hbase.shaded.io.netty.handler.codec.protobuf.ProtobufDecoder;
      -050import 
      org.apache.hadoop.hbase.shaded.io.netty.handler.codec.protobuf.ProtobufVarint32FrameDecoder;
      -051import 
      org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleStateEvent;
      -052import 
      org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleStateHandler;
      -053import 
      org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.Future;
      -054import 
      org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.FutureListener;
      -055import 
      org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.Promise;
      -056
      -057import java.io.IOException;
      -058import 
      java.lang.reflect.InvocationTargetException;
      -059import java.lang.reflect.Method;
      -060import java.util.ArrayList;
      -061import java.util.EnumSet;
      -062import java.util.List;
      -063import java.util.concurrent.TimeUnit;
      -064
      -065import org.apache.commons.logging.Log;
      -066import 
      org.apache.commons.logging.LogFactory;
      -067import 
      org.apache.hadoop.conf.Configuration;
      -068import 
      org.apache.hadoop.crypto.CryptoProtocolVersion;
      -069import 
      org.apache.hadoop.crypto.Encryptor;
      -070import org.apache.hadoop.fs.CreateFlag;
      -071import org.apache.hadoop.fs.FileSystem;
      -072import 
      org.apache.hadoop.fs.FileSystemLinkResolver;
      -073import org.apache.hadoop.fs.Path;
      -074import 
      org.apache.hadoop.fs.UnresolvedLinkException;
      -075import 
      org.apache.hadoop.fs.permission.FsPermission;
      -076import 
      

      [04/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.html
       
      b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.html
      index d438f22..7c59e27 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.html
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.html
      @@ -1290,8 +1290,8 @@
       1282   CompactType 
      compactType) throws IOException {
       1283switch (compactType) {
       1284  case MOB:
      -1285
      compact(this.connection.getAdminForMaster(), getMobRegionInfo(tableName), 
      major,
      -1286  columnFamily);
      +1285
      compact(this.connection.getAdminForMaster(), 
      RegionInfo.createMobRegionInfo(tableName),
      +1286major, columnFamily);
       1287break;
       1288  case NORMAL:
       1289checkTableExists(tableName);
      @@ -3248,7 +3248,7 @@
       3240  new 
      CallableAdminProtos.GetRegionInfoResponse.CompactionState() {
       3241@Override
       3242public 
      AdminProtos.GetRegionInfoResponse.CompactionState call() throws Exception {
      -3243  RegionInfo info = 
      getMobRegionInfo(tableName);
      +3243  RegionInfo info = 
      RegionInfo.createMobRegionInfo(tableName);
       3244  GetRegionInfoRequest 
      request =
       3245
      RequestConverter.buildGetRegionInfoRequest(info.getRegionName(), true);
       3246  GetRegionInfoResponse 
      response = masterAdmin.getRegionInfo(rpcController, request);
      @@ -3312,7 +3312,7 @@
       3304}
       3305break;
       3306  default:
      -3307throw new 
      IllegalArgumentException("Unknowne compactType: " + compactType);
      +3307throw new 
      IllegalArgumentException("Unknown compactType: " + compactType);
       3308}
       3309if (state != null) {
       3310  return 
      ProtobufUtil.createCompactionState(state);
      @@ -3847,325 +3847,320 @@
       3839});
       3840  }
       3841
      -3842  private RegionInfo 
      getMobRegionInfo(TableName tableName) {
      -3843return 
      RegionInfoBuilder.newBuilder(tableName).setStartKey(Bytes.toBytes(".mob")).setRegionId(0)
      -3844.build();
      -3845  }
      -3846
      -3847  private RpcControllerFactory 
      getRpcControllerFactory() {
      -3848return this.rpcControllerFactory;
      -3849  }
      -3850
      -3851  @Override
      -3852  public void addReplicationPeer(String 
      peerId, ReplicationPeerConfig peerConfig, boolean enabled)
      -3853  throws IOException {
      -3854executeCallable(new 
      MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
      -3855  @Override
      -3856  protected Void rpcCall() throws 
      Exception {
      -3857
      master.addReplicationPeer(getRpcController(),
      -3858  
      RequestConverter.buildAddReplicationPeerRequest(peerId, peerConfig, 
      enabled));
      -3859return null;
      -3860  }
      -3861});
      -3862  }
      -3863
      -3864  @Override
      -3865  public void 
      removeReplicationPeer(String peerId) throws IOException {
      -3866executeCallable(new 
      MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
      -3867  @Override
      -3868  protected Void rpcCall() throws 
      Exception {
      -3869
      master.removeReplicationPeer(getRpcController(),
      -3870  
      RequestConverter.buildRemoveReplicationPeerRequest(peerId));
      -3871return null;
      -3872  }
      -3873});
      -3874  }
      -3875
      -3876  @Override
      -3877  public void 
      enableReplicationPeer(final String peerId) throws IOException {
      -3878executeCallable(new 
      MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
      -3879  @Override
      -3880  protected Void rpcCall() throws 
      Exception {
      -3881
      master.enableReplicationPeer(getRpcController(),
      -3882  
      RequestConverter.buildEnableReplicationPeerRequest(peerId));
      -3883return null;
      -3884  }
      -3885});
      -3886  }
      -3887
      -3888  @Override
      -3889  public void 
      disableReplicationPeer(final String peerId) throws IOException {
      -3890executeCallable(new 
      MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
      -3891  @Override
      -3892  protected Void rpcCall() throws 
      Exception {
      -3893
      master.disableReplicationPeer(getRpcController(),
      -3894  
      RequestConverter.buildDisableReplicationPeerRequest(peerId));
      -3895return null;
      -3896  }
      -3897});
      -3898  }
      -3899
      -3900  @Override
      -3901  public ReplicationPeerConfig 
      getReplicationPeerConfig(final String peerId) throws IOException {
      -3902return executeCallable(new 
      MasterCallableReplicationPeerConfig(getConnection(),
      -3903getRpcControllerFactory()) {
      -3904  @Override
      -3905  protected ReplicationPeerConfig 
      rpcCall() throws Exception {
      -3906GetReplicationPeerConfigResponse 
      response = master.getReplicationPeerConfig(
      -3907  getRpcController(), 
      

      [04/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
       
      b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
      index 29ea7b3..6ed75c9 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
      @@ -1313,7093 +1313,7082 @@
       1305
       1306  @Override
       1307  public boolean isSplittable() {
      -1308boolean result = isAvailable() 
       !hasReferences();
      -1309LOG.info("ASKED IF SPLITTABLE " + 
      result + " " + getRegionInfo().getShortNameToLog(),
      -1310  new Throwable("LOGGING: 
      REMOVE"));
      -1311// REMOVE BELOW
      -1312LOG.info("DEBUG LIST ALL FILES");
      -1313for (HStore store : 
      this.stores.values()) {
      -1314  LOG.info("store " + 
      store.getColumnFamilyName());
      -1315  for (HStoreFile sf : 
      store.getStorefiles()) {
      -1316
      LOG.info(sf.toStringDetailed());
      -1317  }
      -1318}
      -1319return result;
      -1320  }
      -1321
      -1322  @Override
      -1323  public boolean isMergeable() {
      -1324if (!isAvailable()) {
      -1325  LOG.debug("Region " + this
      -1326  + " is not mergeable because 
      it is closing or closed");
      -1327  return false;
      -1328}
      -1329if (hasReferences()) {
      -1330  LOG.debug("Region " + this
      -1331  + " is not mergeable because 
      it has references");
      -1332  return false;
      -1333}
      -1334
      -1335return true;
      +1308return isAvailable()  
      !hasReferences();
      +1309  }
      +1310
      +1311  @Override
      +1312  public boolean isMergeable() {
      +1313if (!isAvailable()) {
      +1314  LOG.debug("Region " + this
      +1315  + " is not mergeable because 
      it is closing or closed");
      +1316  return false;
      +1317}
      +1318if (hasReferences()) {
      +1319  LOG.debug("Region " + this
      +1320  + " is not mergeable because 
      it has references");
      +1321  return false;
      +1322}
      +1323
      +1324return true;
      +1325  }
      +1326
      +1327  public boolean areWritesEnabled() {
      +1328synchronized(this.writestate) {
      +1329  return 
      this.writestate.writesEnabled;
      +1330}
      +1331  }
      +1332
      +1333  @VisibleForTesting
      +1334  public MultiVersionConcurrencyControl 
      getMVCC() {
      +1335return mvcc;
       1336  }
       1337
      -1338  public boolean areWritesEnabled() {
      -1339synchronized(this.writestate) {
      -1340  return 
      this.writestate.writesEnabled;
      -1341}
      -1342  }
      -1343
      -1344  @VisibleForTesting
      -1345  public MultiVersionConcurrencyControl 
      getMVCC() {
      -1346return mvcc;
      -1347  }
      -1348
      -1349  @Override
      -1350  public long getMaxFlushedSeqId() {
      -1351return maxFlushedSeqId;
      +1338  @Override
      +1339  public long getMaxFlushedSeqId() {
      +1340return maxFlushedSeqId;
      +1341  }
      +1342
      +1343  /**
      +1344   * @return readpoint considering given 
      IsolationLevel. Pass {@code null} for default
      +1345   */
      +1346  public long 
      getReadPoint(IsolationLevel isolationLevel) {
      +1347if (isolationLevel != null 
       isolationLevel == IsolationLevel.READ_UNCOMMITTED) {
      +1348  // This scan can read even 
      uncommitted transactions
      +1349  return Long.MAX_VALUE;
      +1350}
      +1351return mvcc.getReadPoint();
       1352  }
       1353
      -1354  /**
      -1355   * @return readpoint considering given 
      IsolationLevel. Pass {@code null} for default
      -1356   */
      -1357  public long 
      getReadPoint(IsolationLevel isolationLevel) {
      -1358if (isolationLevel != null 
       isolationLevel == IsolationLevel.READ_UNCOMMITTED) {
      -1359  // This scan can read even 
      uncommitted transactions
      -1360  return Long.MAX_VALUE;
      -1361}
      -1362return mvcc.getReadPoint();
      -1363  }
      -1364
      -1365  public boolean 
      isLoadingCfsOnDemandDefault() {
      -1366return 
      this.isLoadingCfsOnDemandDefault;
      -1367  }
      -1368
      -1369  /**
      -1370   * Close down this HRegion.  Flush the 
      cache, shut down each HStore, don't
      -1371   * service any more calls.
      -1372   *
      -1373   * pThis method could take 
      some time to execute, so don't call it from a
      -1374   * time-sensitive thread.
      -1375   *
      -1376   * @return Vector of all the storage 
      files that the HRegion's component
      -1377   * HStores make use of.  It's a list 
      of all StoreFile objects. Returns empty
      -1378   * vector if already closed and null 
      if judged that it should not close.
      -1379   *
      -1380   * @throws IOException e
      -1381   * @throws DroppedSnapshotException 
      Thrown when replay of wal is required
      -1382   * because a Snapshot was not properly 
      persisted. The region is put in closing mode, and the
      -1383   * caller MUST abort after this.
      -1384   */
      -1385  public Mapbyte[], 
      ListHStoreFile close() throws IOException {
      -1386return close(false);
      -1387  }
      -1388
      -1389  private final Object closeLock = new 
      Object();
      -1390
      -1391  /** Conf key for the periodic flush 
      interval */
      -1392  public static final String 
      

      [04/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b1eb7453/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowByteBufferCell.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowByteBufferCell.html
       
      b/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowByteBufferCell.html
      index 9098105..b05691f 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowByteBufferCell.html
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowByteBufferCell.html
      @@ -37,1514 +37,1514 @@
       029import java.util.ArrayList;
       030import java.util.Iterator;
       031import java.util.List;
      -032
      -033import 
      org.apache.hadoop.hbase.KeyValue.Type;
      -034import 
      org.apache.hadoop.hbase.filter.ByteArrayComparable;
      -035import 
      org.apache.hadoop.hbase.io.HeapSize;
      -036import 
      org.apache.hadoop.hbase.io.TagCompressionContext;
      -037import 
      org.apache.hadoop.hbase.io.util.Dictionary;
      -038import 
      org.apache.hadoop.hbase.io.util.StreamUtils;
      -039import 
      org.apache.hadoop.hbase.util.ByteBufferUtils;
      -040import 
      org.apache.hadoop.hbase.util.ByteRange;
      -041import 
      org.apache.hadoop.hbase.util.Bytes;
      -042import 
      org.apache.hadoop.hbase.util.ClassSize;
      -043import 
      org.apache.yetus.audience.InterfaceAudience;
      -044
      -045import 
      com.google.common.annotations.VisibleForTesting;
      -046
      -047/**
      -048 * Utility methods helpful slinging 
      {@link Cell} instances. It has more powerful and
      -049 * rich set of APIs than those in {@link 
      CellUtil} for internal usage.
      -050 */
      -051@InterfaceAudience.Private
      -052// TODO : Make Tag IA.LimitedPrivate and 
      move some of the Util methods to CP exposed Util class
      -053public class PrivateCellUtil {
      +032import java.util.Optional;
      +033
      +034import 
      org.apache.hadoop.hbase.KeyValue.Type;
      +035import 
      org.apache.hadoop.hbase.filter.ByteArrayComparable;
      +036import 
      org.apache.hadoop.hbase.io.HeapSize;
      +037import 
      org.apache.hadoop.hbase.io.TagCompressionContext;
      +038import 
      org.apache.hadoop.hbase.io.util.Dictionary;
      +039import 
      org.apache.hadoop.hbase.io.util.StreamUtils;
      +040import 
      org.apache.hadoop.hbase.util.ByteBufferUtils;
      +041import 
      org.apache.hadoop.hbase.util.ByteRange;
      +042import 
      org.apache.hadoop.hbase.util.Bytes;
      +043import 
      org.apache.hadoop.hbase.util.ClassSize;
      +044import 
      org.apache.yetus.audience.InterfaceAudience;
      +045
      +046import 
      com.google.common.annotations.VisibleForTesting;
      +047
      +048/**
      +049 * Utility methods helpful slinging 
      {@link Cell} instances. It has more powerful and
      +050 * rich set of APIs than those in {@link 
      CellUtil} for internal usage.
      +051 */
      +052@InterfaceAudience.Private
      +053public final class PrivateCellUtil {
       054
       055  /**
       056   * Private constructor to keep this 
      class from being instantiated.
       057   */
       058  private PrivateCellUtil() {
      -059
      -060  }
      -061
      -062  /*** ByteRange 
      ***/
      -063
      -064  public static ByteRange 
      fillRowRange(Cell cell, ByteRange range) {
      -065return range.set(cell.getRowArray(), 
      cell.getRowOffset(), cell.getRowLength());
      -066  }
      -067
      -068  public static ByteRange 
      fillFamilyRange(Cell cell, ByteRange range) {
      -069return 
      range.set(cell.getFamilyArray(), cell.getFamilyOffset(), 
      cell.getFamilyLength());
      -070  }
      -071
      -072  public static ByteRange 
      fillQualifierRange(Cell cell, ByteRange range) {
      -073return 
      range.set(cell.getQualifierArray(), cell.getQualifierOffset(),
      -074  cell.getQualifierLength());
      -075  }
      -076
      -077  public static ByteRange 
      fillValueRange(Cell cell, ByteRange range) {
      -078return 
      range.set(cell.getValueArray(), cell.getValueOffset(), 
      cell.getValueLength());
      -079  }
      -080
      -081  public static ByteRange 
      fillTagRange(Cell cell, ByteRange range) {
      -082return range.set(cell.getTagsArray(), 
      cell.getTagsOffset(), cell.getTagsLength());
      -083  }
      -084
      -085  /**
      -086   * Returns tag value in a new byte 
      array. If server-side, use {@link Tag#getValueArray()} with
      -087   * appropriate {@link 
      Tag#getValueOffset()} and {@link Tag#getValueLength()} instead to save on
      -088   * allocations.
      -089   * @param cell
      -090   * @return tag value in a new byte 
      array.
      -091   */
      -092  public static byte[] getTagsArray(Cell 
      cell) {
      -093byte[] output = new 
      byte[cell.getTagsLength()];
      -094copyTagsTo(cell, output, 0);
      -095return output;
      -096  }
      -097
      -098  public static byte[] cloneTags(Cell 
      cell) {
      -099byte[] output = new 
      byte[cell.getTagsLength()];
      -100copyTagsTo(cell, output, 0);
      -101return output;
      -102  }
      -103
      -104  /**
      -105   * Copies the tags info into the tag 
      portion of the cell
      -106   * @param cell
      -107   * @param destination
      -108   * @param destinationOffset
      -109   * @return position after tags
      +059  }
      +060
      +061  /*** ByteRange 
      ***/
      +062
      +063  public static ByteRange 
      fillRowRange(Cell cell, ByteRange range) {
      +064return 

        1   2   3   >

    • 使用HBaseIOException的程序包  
      PackageDescription程序包说明
      org.apache.hadoop.hbase.client -
      Provides HBase Client
      +
      Provides HBase Client + +Table of Contents + + Overview +Example API Usage + + + Overview + To administer HBase, create and drop tables, list and alter tables, + use Admin.
      org.apache.hadoop.hbase.coprocessor -
      Table of Contents
      +
      Table of Contents + +Overview +Coprocessor +RegionObserver +Endpoint +Coprocessor loading + + +Overview +Coprocessors are code that runs in-process on each region server.