[02/51] [partial] hbase-site git commit: Published site at 620d70d6186fb800299bcc62ad7179fccfd1be41.

2019-01-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aa3fb87f/testdevapidocs/index-all.html
--
diff --git a/testdevapidocs/index-all.html b/testdevapidocs/index-all.html
index 98e959f..505c4a5 100644
--- a/testdevapidocs/index-all.html
+++ b/testdevapidocs/index-all.html
@@ -6675,6 +6675,8 @@
 
 CLASS_RULE
 - Static variable in class org.apache.hadoop.hbase.thrift2.TestThrift2ServerCmdLine
 
+CLASS_RULE
 - Static variable in class org.apache.hadoop.hbase.thrift2.TestThriftConnection
+
 CLASS_RULE
 - Static variable in class org.apache.hadoop.hbase.thrift2.TestThriftHBaseServiceHandler
 
 CLASS_RULE
 - Static variable in class org.apache.hadoop.hbase.thrift2.TestThriftHBaseServiceHandlerWithLabels
@@ -9900,6 +9902,8 @@
 
 createConnection(Configuration)
 - Method in class org.apache.hadoop.hbase.security.token.TestTokenAuthentication.TokenServer
 
+createConnection(int,
 boolean) - Static method in class 
org.apache.hadoop.hbase.thrift2.TestThriftConnection
+
 createConnection(Configuration)
 - Method in class org.apache.hadoop.hbase.util.MockServer
 
 createCredentialEntryMethod
 - Static variable in class org.apache.hadoop.hbase.TestHBaseConfiguration.ReflectiveCredentialProviderClient
@@ -10962,6 +10966,8 @@
 
 createTable(boolean)
 - Method in class org.apache.hadoop.hbase.TestSequenceIdMonotonicallyIncreasing
 
+createTable(Admin,
 String) - Method in class org.apache.hadoop.hbase.thrift2.TestThriftConnection
+
 createTable()
 - Method in class org.apache.hadoop.hbase.trace.IntegrationTestSendTraceRequests
 
 createTable(HBaseTestingUtility,
 HTableDescriptor, byte[][]) - Static method in class 
org.apache.hadoop.hbase.util.BaseTestHBaseFsck
@@ -14957,6 +14963,8 @@
 
 FAMILYA
 - Static variable in class org.apache.hadoop.hbase.mapreduce.TestImportExport
 
+FAMILYA
 - Static variable in class org.apache.hadoop.hbase.thrift2.TestThriftConnection
+
 FAMILYA_STRING
 - Static variable in class org.apache.hadoop.hbase.coprocessor.TestSecureExport
 
 FAMILYA_STRING
 - Static variable in class org.apache.hadoop.hbase.mapreduce.TestImportExport
@@ -14971,6 +14979,8 @@
 
 FAMILYB
 - Static variable in class org.apache.hadoop.hbase.mapreduce.TestImportExport
 
+FAMILYB
 - Static variable in class org.apache.hadoop.hbase.thrift2.TestThriftConnection
+
 FAMILYB_STRING
 - Static variable in class org.apache.hadoop.hbase.coprocessor.TestSecureExport
 
 FAMILYB_STRING
 - Static variable in class org.apache.hadoop.hbase.mapreduce.TestImportExport
@@ -14981,6 +14991,10 @@
 
 familyBname
 - Static variable in class org.apache.hadoop.hbase.thrift2.TestThriftHBaseServiceHandlerWithReadOnly
 
+FAMILYC
 - Static variable in class org.apache.hadoop.hbase.thrift2.TestThriftConnection
+
+FAMILYD
 - Static variable in class org.apache.hadoop.hbase.thrift2.TestThriftConnection
+
 familyName
 - Variable in class org.apache.hadoop.hbase.chaos.util.ChaosMonkeyRunner
 
 FAMILYNAME
 - Static variable in class org.apache.hadoop.hbase.client.TestEnableTable
@@ -21007,6 +21021,8 @@
 
 HTTP_PRINCIPAL
 - Static variable in class org.apache.hadoop.hbase.security.token.SecureTestCluster
 
+httpPort
 - Static variable in class org.apache.hadoop.hbase.thrift2.TestThriftConnection
+
 httpServerException
 - Variable in class org.apache.hadoop.hbase.thrift.TestThriftHttpServer
 
 HttpServerFunctionalTest - Class in org.apache.hadoop.hbase.http
@@ -25226,6 +25242,8 @@
 
 LOG
 - Static variable in class org.apache.hadoop.hbase.thrift.TestThriftSpnegoHttpServer
 
+LOG
 - Static variable in class org.apache.hadoop.hbase.thrift2.TestThriftConnection
+
 LOG
 - Static variable in class org.apache.hadoop.hbase.thrift2.TestThriftHBaseServiceHandler.DelayingRegionObserver
 
 LOG
 - Static variable in class org.apache.hadoop.hbase.thrift2.TestThriftHBaseServiceHandler
@@ -29382,6 +29400,8 @@
 
 ONE_HOUR
 - Static variable in class org.apache.hadoop.hbase.rest.client.TestRemoteTable
 
+ONE_HOUR
 - Static variable in class org.apache.hadoop.hbase.thrift2.TestThriftConnection
+
 ONE_KILOBYTE
 - Static variable in class org.apache.hadoop.hbase.quotas.SpaceQuotaHelperForTests
 
 ONE_MEGABYTE
 - Static variable in class org.apache.hadoop.hbase.quotas.SpaceQuotaHelperForTests
@@ -33011,8 +33031,12 @@
 
 QUALIFIER_1
 - Static variable in class org.apache.hadoop.hbase.rest.client.TestRemoteTable
 
+QUALIFIER_1
 - Static variable in class org.apache.hadoop.hbase.thrift2.TestThriftConnection
+
 QUALIFIER_2
 - Static variable in class org.apache.hadoop.hbase.rest.client.TestRemoteTable
 
+QUALIFIER_2
 - Static variable in class org.apache.hadoop.hbase.thrift2.TestThriftConnection
+
 QUALIFIER_BAR
 - Static variable in class org.apache.hadoop.hbase.regionserver.TestSCVFWithMiniCluster
 
 QUALIFIER_COUNT
 - Static variable in class org.apache.hadoop.hbase.regionserver.TestDefaultMemStore
@@ -35531,6 +3,8 @@
 
 ROW_1
 - Static variable in class 

[02/51] [partial] hbase-site git commit: Published site at 281d6429e55149cc4c05430dcc1d1dc136d8b245.

2019-01-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/901d593a/devapidocs/org/apache/hadoop/hbase/security/access/CoprocessorWhitelistMasterObserver.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/security/access/CoprocessorWhitelistMasterObserver.html
 
b/devapidocs/org/apache/hadoop/hbase/security/access/CoprocessorWhitelistMasterObserver.html
index 5e990f8..5c5b981 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/security/access/CoprocessorWhitelistMasterObserver.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/security/access/CoprocessorWhitelistMasterObserver.html
@@ -255,7 +255,7 @@ implements MasterObserver
-postAbortProcedure,
 postAddReplicationPeer,
 postAddRSGroup,
 postAssign,
 postBalance, postBalanceRSGroup,
 postBalanceSwitch,
 postClearDeadServers,
 postCloneSnapshot,
 postCompletedCreateTableAction,
 postCompletedDeleteTableAction,
 postCompletedDisableTableAction,
 postCompletedEnableTableAction,
 postCompletedMergeRegionsAction,
 postCompletedModifyTableAction,
 postCompletedModifyTableAction,
 postCompletedSplitRegionAction,
 postCompletedTruncateTableAction,
 postCreateNamespace,
 postCreateTable,
 postDecommissionRegionServers,
 postDeleteNamespace,
 postDeleteSnapshot,
 postDeleteTable,
 postDisableReplicationPeer,
 postDisableTable,
 postEnableReplicationPeer,
 postEnableTable,
 postGetClusterMetrics,
 postGetLocks,
 postGetNamespaceDescriptor,
 postGetProcedures,
 postGetReplicationPeerConfig,
 postGetRSGroupInfo,
 postGetRSGroupInfoOfServer, postGetRSGroupInfoOfTable,
 postGetTableDescriptors,
 postGetTableNames,
 postListDecommissionedRegionServers,
 postListNamespaceDescriptors,
 postListReplicationPeers,
 postListRSGroups,
 postListSnapshot,
 postLockHeartbeat,
 postMergeRegions,
 postMergeRegionsCommitAction,
 postModifyNamespace,
 postModifyNamespace,
 postModifyTable,
 postModifyTable,
 postMove,
 postMoveServers,
 postMoveServersAndTables,
 postMoveTables,
 postRecommissionRegionServer,
 postRegionOffline,
 postRemoveReplicationPeer,
 postRemoveRSGroup,
 postRemoveServers,
 postRequestLock,
 postRestoreSnapshot, postRollBackMergeRegionsAction,
 postRollBackSplitRegionAction,
 postSetNamespaceQuota,
 postSetSpl
 itOrMergeEnabled, postSetTableQuota,
 postSetUserQuota,
 postSetUserQuota,
 postSetUserQuota, postSnapshot,
 postStartMaster,
 postTableFlush,
 postTransitReplicationPeerSyncReplicationState,
 postTruncateTable,
 postUnassign,
 postUpdateReplicationPeerConfig,
 preAbortProcedure,
 preAddReplicationPeer,
 preAddRSGroup,
 preAssign,
 preBalance,
 preBalanceRSGroup,
 preBalanceSwitch,
 preClearDeadServers,
 preCloneSnapshot,
 preCreateNamespace,
 preCreateTableAction,
 preCreateTableRegionsInfos,
 preDecommissionRegionServers,
 preDeleteNamespace,
 preDeleteSnapshot, preDeleteTable,
 preDeleteTableAction,
 preDisableReplicationPeer,
 preDisableTable,
 preDisableTableAction,
 preEnableReplicationPeer,
 preEnableTable,
 preEnableTableAction,
 preGetClusterMetrics,
 preGetLocks,
 preGetNamespaceDescriptor,
 preGetProcedures,
 preGetReplicationPeerConfig,
 preGetRSGroupInfo,
 preGetRSGroupInfoOfServer,
 preGetRSGroupInfoOfTable,
 preGetTableDescriptors,
 preGetTableNames,
 preListDecommissionedRegionServers,
  preListNamespaceDescriptors,
 preListReplicationPeers,
 preListRSGroups,
 preListSnapshot,
 preLockHeartbeat, preMasterInitialization,
 preMergeRegions,
 preMergeRegionsAction,
 preMergeRegionsCommitAction,
 preModifyNamespace,
 preModifyNamespace,
 preModifyTable,
 preModifyTableAction,
 preModifyTableAction,
 preMove,
 preMoveServers,
 preMoveServersAndTables,
 preMoveTables,
 preRecommissionRegionServer,
 preRegionOffline,
 preRemoveReplicationPeer,
 preRemoveRSGroup, preRemoveServers,
 preRequestLock,
 preRestoreSnapshot,
 preSetNamespaceQuota, preSetSplitOrMergeEnabled,
 preSetTableQuota,
 preSetUserQuota,
 preSetUserQuota, 
preSetUserQuota,
 preShutdown,
 preSnapshot,
 preSplitRegion,
 
 preSplitRegionAction,
 preSplitRegionAfterMETAAction,
 preSplitRegionBeforeMETAAction,
 preStopMaster,
 preTableFlush, preTransitReplicationPeerSyncReplicationState,
 preTruncateTable,
 preTruncateTableAction,
 preUnassign,
 preUpdateReplicationPeerConfig
+postAbortProcedure,
 postAddReplicationPeer,
 postAddRSGroup,
 postAssign,
 postBalance, postBalanceRSGroup,
 postBalanceSwitch,
 postClearDeadServers,
 postCloneSnapshot,
 postCompletedCreateTableAction,
 postCompletedDeleteTableAction,
 postCompletedDisableTableAction,
 postCompletedEnableTableAction,
 postCompletedMergeRegionsAction,
 postCompletedModifyTableAction,
 postCompletedModifyTableAction,
 postCompletedSplitRegionAction,
 postCompletedTruncateTableAction,
 postCreateNamespace,
 postCreateTable,
 postDecommissionRegionServers,
 postDeleteNamespace,
 postDeleteSnapshot,
 postDeleteTable,
 postDisableReplicationPeer,
 postDisableTable,
 postEnableReplicationPeer,
 postEnableTable,
 postGetClusterMetrics,
 postGetLocks,
 

[02/51] [partial] hbase-site git commit: Published site at 466fa920fee572fe20db3b77ebf539dc304d5f31.

2019-01-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2bf59208/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.MasterRpcCall.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.MasterRpcCall.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.MasterRpcCall.html
index 736388b..197b99d 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.MasterRpcCall.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.MasterRpcCall.html
@@ -26,3624 +26,3599 @@
 018package org.apache.hadoop.hbase.client;
 019
 020import static 
org.apache.hadoop.hbase.TableName.META_TABLE_NAME;
-021
-022import com.google.protobuf.Message;
-023import com.google.protobuf.RpcChannel;
-024import java.io.IOException;
-025import java.util.ArrayList;
-026import java.util.Arrays;
-027import java.util.Collections;
-028import java.util.EnumSet;
-029import java.util.HashMap;
-030import java.util.List;
-031import java.util.Map;
-032import java.util.Optional;
-033import java.util.Set;
-034import 
java.util.concurrent.CompletableFuture;
-035import 
java.util.concurrent.ConcurrentHashMap;
-036import java.util.concurrent.TimeUnit;
-037import 
java.util.concurrent.atomic.AtomicReference;
-038import java.util.function.BiConsumer;
-039import java.util.function.Function;
-040import java.util.function.Supplier;
-041import java.util.regex.Pattern;
-042import java.util.stream.Collectors;
-043import java.util.stream.Stream;
-044import org.apache.commons.io.IOUtils;
-045import 
org.apache.hadoop.conf.Configuration;
-046import 
org.apache.hadoop.hbase.AsyncMetaTableAccessor;
-047import 
org.apache.hadoop.hbase.CacheEvictionStats;
-048import 
org.apache.hadoop.hbase.CacheEvictionStatsAggregator;
-049import 
org.apache.hadoop.hbase.ClusterMetrics;
-050import 
org.apache.hadoop.hbase.ClusterMetrics.Option;
-051import 
org.apache.hadoop.hbase.ClusterMetricsBuilder;
-052import 
org.apache.hadoop.hbase.HConstants;
-053import 
org.apache.hadoop.hbase.HRegionLocation;
-054import 
org.apache.hadoop.hbase.MetaTableAccessor;
-055import 
org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
-056import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-057import 
org.apache.hadoop.hbase.RegionLocations;
-058import 
org.apache.hadoop.hbase.RegionMetrics;
-059import 
org.apache.hadoop.hbase.RegionMetricsBuilder;
-060import 
org.apache.hadoop.hbase.ServerName;
-061import 
org.apache.hadoop.hbase.TableExistsException;
-062import 
org.apache.hadoop.hbase.TableName;
-063import 
org.apache.hadoop.hbase.TableNotDisabledException;
-064import 
org.apache.hadoop.hbase.TableNotEnabledException;
-065import 
org.apache.hadoop.hbase.TableNotFoundException;
-066import 
org.apache.hadoop.hbase.UnknownRegionException;
-067import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
-068import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
-069import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.ServerRequestCallerBuilder;
-070import 
org.apache.hadoop.hbase.client.Scan.ReadType;
-071import 
org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil;
-072import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-073import 
org.apache.hadoop.hbase.client.security.SecurityCapability;
-074import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-075import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-076import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-077import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-078import 
org.apache.hadoop.hbase.quotas.QuotaTableUtil;
-079import 
org.apache.hadoop.hbase.replication.ReplicationException;
-080import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-081import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-082import 
org.apache.hadoop.hbase.replication.SyncReplicationState;
-083import 
org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
-084import 
org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
-085import 
org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
-086import 
org.apache.hadoop.hbase.util.Bytes;
-087import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-088import 
org.apache.hadoop.hbase.util.ForeignExceptionUtil;
-089import 
org.apache.yetus.audience.InterfaceAudience;
-090import org.slf4j.Logger;
-091import org.slf4j.LoggerFactory;
-092
-093import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-094import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-095import 
org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback;
-096import 
org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer;
-097import 
org.apache.hbase.thirdparty.io.netty.util.Timeout;
-098import 

[02/51] [partial] hbase-site git commit: Published site at e4b6b4afb933a961f543537875f87a2dc62d3757.

2019-01-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/849d84a8/testdevapidocs/src-html/org/apache/hadoop/hbase/thrift/TestThriftServer.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/thrift/TestThriftServer.html 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/thrift/TestThriftServer.html
index 60f13e3..ec94f7a 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/thrift/TestThriftServer.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/thrift/TestThriftServer.html
@@ -25,36 +25,36 @@
 017 */
 018package org.apache.hadoop.hbase.thrift;
 019
-020import static 
org.junit.Assert.assertArrayEquals;
-021import static 
org.junit.Assert.assertEquals;
-022import static 
org.junit.Assert.assertFalse;
-023import static 
org.junit.Assert.assertTrue;
-024import static org.junit.Assert.fail;
-025
-026import java.io.IOException;
-027import java.nio.ByteBuffer;
-028import java.util.ArrayList;
-029import java.util.Collection;
-030import java.util.HashMap;
-031import java.util.List;
-032import java.util.Map;
-033import 
org.apache.hadoop.conf.Configuration;
-034import 
org.apache.hadoop.hbase.CompatibilityFactory;
-035import 
org.apache.hadoop.hbase.HBaseClassTestRule;
-036import 
org.apache.hadoop.hbase.HBaseTestingUtility;
-037import 
org.apache.hadoop.hbase.HColumnDescriptor;
-038import 
org.apache.hadoop.hbase.HConstants;
-039import 
org.apache.hadoop.hbase.HRegionInfo;
-040import 
org.apache.hadoop.hbase.HTableDescriptor;
-041import 
org.apache.hadoop.hbase.TableName;
-042import 
org.apache.hadoop.hbase.client.Put;
-043import 
org.apache.hadoop.hbase.client.Table;
-044import 
org.apache.hadoop.hbase.filter.ParseFilter;
-045import 
org.apache.hadoop.hbase.security.UserProvider;
-046import 
org.apache.hadoop.hbase.test.MetricsAssertHelper;
-047import 
org.apache.hadoop.hbase.testclassification.ClientTests;
-048import 
org.apache.hadoop.hbase.testclassification.LargeTests;
-049import 
org.apache.hadoop.hbase.thrift.ThriftServerRunner.HBaseHandler;
+020import static 
org.apache.hadoop.hbase.thrift.Constants.COALESCE_INC_KEY;
+021import static 
org.junit.Assert.assertArrayEquals;
+022import static 
org.junit.Assert.assertEquals;
+023import static 
org.junit.Assert.assertFalse;
+024import static 
org.junit.Assert.assertTrue;
+025import static org.junit.Assert.fail;
+026
+027import java.io.IOException;
+028import java.nio.ByteBuffer;
+029import java.util.ArrayList;
+030import java.util.Collection;
+031import java.util.HashMap;
+032import java.util.List;
+033import java.util.Map;
+034import 
org.apache.hadoop.conf.Configuration;
+035import 
org.apache.hadoop.hbase.CompatibilityFactory;
+036import 
org.apache.hadoop.hbase.HBaseClassTestRule;
+037import 
org.apache.hadoop.hbase.HBaseTestingUtility;
+038import 
org.apache.hadoop.hbase.HColumnDescriptor;
+039import 
org.apache.hadoop.hbase.HConstants;
+040import 
org.apache.hadoop.hbase.HRegionInfo;
+041import 
org.apache.hadoop.hbase.HTableDescriptor;
+042import 
org.apache.hadoop.hbase.TableName;
+043import 
org.apache.hadoop.hbase.client.Put;
+044import 
org.apache.hadoop.hbase.client.Table;
+045import 
org.apache.hadoop.hbase.filter.ParseFilter;
+046import 
org.apache.hadoop.hbase.security.UserProvider;
+047import 
org.apache.hadoop.hbase.test.MetricsAssertHelper;
+048import 
org.apache.hadoop.hbase.testclassification.ClientTests;
+049import 
org.apache.hadoop.hbase.testclassification.LargeTests;
 050import 
org.apache.hadoop.hbase.thrift.generated.BatchMutation;
 051import 
org.apache.hadoop.hbase.thrift.generated.ColumnDescriptor;
 052import 
org.apache.hadoop.hbase.thrift.generated.Hbase;
@@ -79,7 +79,7 @@
 071import org.slf4j.LoggerFactory;
 072
 073/**
-074 * Unit testing for 
ThriftServerRunner.HBaseHandler, a part of the
+074 * Unit testing for 
ThriftServerRunner.HBaseServiceHandler, a part of the
 075 * org.apache.hadoop.hbase.thrift 
package.
 076 */
 077@Category({ClientTests.class, 
LargeTests.class})
@@ -121,7 +121,7 @@
 113
 114  @BeforeClass
 115  public static void beforeClass() throws 
Exception {
-116
UTIL.getConfiguration().setBoolean(ThriftServerRunner.COALESCE_INC_KEY, 
true);
+116
UTIL.getConfiguration().setBoolean(COALESCE_INC_KEY, true);
 117
UTIL.getConfiguration().setBoolean("hbase.table.sanity.checks", false);
 118
UTIL.getConfiguration().setInt("hbase.client.retries.number", 3);
 119UTIL.startMiniCluster();
@@ -160,8 +160,8 @@
 152   * IllegalArgument exception.
 153   */
 154  public void doTestTableCreateDrop() 
throws Exception {
-155ThriftServerRunner.HBaseHandler 
handler =
-156  new 
ThriftServerRunner.HBaseHandler(UTIL.getConfiguration(),
+155ThriftHBaseServiceHandler handler =
+156  new 
ThriftHBaseServiceHandler(UTIL.getConfiguration(),
 157
UserProvider.instantiate(UTIL.getConfiguration()));
 158doTestTableCreateDrop(handler);
 159  }
@@ -171,7 +171,7 @@
 163

[02/51] [partial] hbase-site git commit: Published site at 3ab895979b643a2980bcdb7fee2078f14b614210.

2019-01-01 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/master/class-use/ClusterStatusPublisher.MulticastPublisher.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/class-use/ClusterStatusPublisher.MulticastPublisher.html
 
b/devapidocs/org/apache/hadoop/hbase/master/class-use/ClusterStatusPublisher.MulticastPublisher.html
index baee068..09345c0 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/class-use/ClusterStatusPublisher.MulticastPublisher.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/class-use/ClusterStatusPublisher.MulticastPublisher.html
@@ -120,6 +120,6 @@
 
 
 
-Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/master/class-use/ClusterStatusPublisher.Publisher.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/class-use/ClusterStatusPublisher.Publisher.html
 
b/devapidocs/org/apache/hadoop/hbase/master/class-use/ClusterStatusPublisher.Publisher.html
index bc75f4d..1849975 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/class-use/ClusterStatusPublisher.Publisher.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/class-use/ClusterStatusPublisher.Publisher.html
@@ -199,6 +199,6 @@
 
 
 
-Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/master/class-use/ClusterStatusPublisher.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/class-use/ClusterStatusPublisher.html
 
b/devapidocs/org/apache/hadoop/hbase/master/class-use/ClusterStatusPublisher.html
index b25c107..b15e345 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/class-use/ClusterStatusPublisher.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/class-use/ClusterStatusPublisher.html
@@ -160,6 +160,6 @@
 
 
 
-Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/master/class-use/DeadServer.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/class-use/DeadServer.html 
b/devapidocs/org/apache/hadoop/hbase/master/class-use/DeadServer.html
index cecba52..e4aee88 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/class-use/DeadServer.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/class-use/DeadServer.html
@@ -173,6 +173,6 @@
 
 
 
-Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/master/class-use/DrainingServerTracker.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/class-use/DrainingServerTracker.html
 
b/devapidocs/org/apache/hadoop/hbase/master/class-use/DrainingServerTracker.html
index 8bcd9be..565eab8 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/class-use/DrainingServerTracker.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/class-use/DrainingServerTracker.html
@@ -160,6 +160,6 @@
 
 
 
-Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/master/class-use/ExpiredMobFileCleanerChore.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/class-use/ExpiredMobFileCleanerChore.html
 
b/devapidocs/org/apache/hadoop/hbase/master/class-use/ExpiredMobFileCleanerChore.html
index 14edf04..3eecc37 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/class-use/ExpiredMobFileCleanerChore.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/class-use/ExpiredMobFileCleanerChore.html
@@ -160,6 +160,6 @@
 
 
 
-Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 

[02/51] [partial] hbase-site git commit: Published site at 7820ba1dbdba58b1002cdfde08eb21aa7a0bb6da.

2018-12-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/09ea0d5f/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
index 0f5a095..50bf692 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
@@ -78,8712 +78,8714 @@
 070import 
java.util.concurrent.locks.ReadWriteLock;
 071import 
java.util.concurrent.locks.ReentrantReadWriteLock;
 072import java.util.function.Function;
-073import 
org.apache.hadoop.conf.Configuration;
-074import org.apache.hadoop.fs.FileStatus;
-075import org.apache.hadoop.fs.FileSystem;
-076import 
org.apache.hadoop.fs.LocatedFileStatus;
-077import org.apache.hadoop.fs.Path;
-078import org.apache.hadoop.hbase.Cell;
-079import 
org.apache.hadoop.hbase.CellBuilderType;
-080import 
org.apache.hadoop.hbase.CellComparator;
-081import 
org.apache.hadoop.hbase.CellComparatorImpl;
-082import 
org.apache.hadoop.hbase.CellScanner;
-083import 
org.apache.hadoop.hbase.CellUtil;
-084import 
org.apache.hadoop.hbase.CompareOperator;
-085import 
org.apache.hadoop.hbase.CompoundConfiguration;
-086import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-087import 
org.apache.hadoop.hbase.DroppedSnapshotException;
-088import 
org.apache.hadoop.hbase.ExtendedCellBuilderFactory;
-089import 
org.apache.hadoop.hbase.HConstants;
-090import 
org.apache.hadoop.hbase.HConstants.OperationStatusCode;
-091import 
org.apache.hadoop.hbase.HDFSBlocksDistribution;
-092import 
org.apache.hadoop.hbase.KeyValue;
-093import 
org.apache.hadoop.hbase.KeyValueUtil;
-094import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-095import 
org.apache.hadoop.hbase.NotServingRegionException;
-096import 
org.apache.hadoop.hbase.PrivateCellUtil;
-097import 
org.apache.hadoop.hbase.RegionTooBusyException;
-098import org.apache.hadoop.hbase.Tag;
-099import org.apache.hadoop.hbase.TagUtil;
-100import 
org.apache.hadoop.hbase.UnknownScannerException;
-101import 
org.apache.hadoop.hbase.client.Append;
-102import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-103import 
org.apache.hadoop.hbase.client.CompactionState;
-104import 
org.apache.hadoop.hbase.client.Delete;
-105import 
org.apache.hadoop.hbase.client.Durability;
-106import 
org.apache.hadoop.hbase.client.Get;
-107import 
org.apache.hadoop.hbase.client.Increment;
-108import 
org.apache.hadoop.hbase.client.IsolationLevel;
-109import 
org.apache.hadoop.hbase.client.Mutation;
-110import 
org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor;
-111import 
org.apache.hadoop.hbase.client.Put;
-112import 
org.apache.hadoop.hbase.client.RegionInfo;
-113import 
org.apache.hadoop.hbase.client.RegionInfoBuilder;
-114import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
-115import 
org.apache.hadoop.hbase.client.Result;
-116import 
org.apache.hadoop.hbase.client.RowMutations;
-117import 
org.apache.hadoop.hbase.client.Scan;
-118import 
org.apache.hadoop.hbase.client.TableDescriptor;
-119import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-120import 
org.apache.hadoop.hbase.conf.ConfigurationManager;
-121import 
org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver;
-122import 
org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
-123import 
org.apache.hadoop.hbase.coprocessor.RegionObserver.MutationType;
-124import 
org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
-125import 
org.apache.hadoop.hbase.exceptions.FailedSanityCheckException;
-126import 
org.apache.hadoop.hbase.exceptions.TimeoutIOException;
-127import 
org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
-128import 
org.apache.hadoop.hbase.filter.ByteArrayComparable;
-129import 
org.apache.hadoop.hbase.filter.FilterWrapper;
-130import 
org.apache.hadoop.hbase.filter.IncompatibleFilterException;
-131import 
org.apache.hadoop.hbase.io.HFileLink;
-132import 
org.apache.hadoop.hbase.io.HeapSize;
-133import 
org.apache.hadoop.hbase.io.TimeRange;
-134import 
org.apache.hadoop.hbase.io.hfile.BlockCache;
-135import 
org.apache.hadoop.hbase.io.hfile.HFile;
-136import 
org.apache.hadoop.hbase.ipc.CallerDisconnectedException;
-137import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
-138import 
org.apache.hadoop.hbase.ipc.RpcCall;
-139import 
org.apache.hadoop.hbase.ipc.RpcServer;
-140import 
org.apache.hadoop.hbase.mob.MobFileCache;
-141import 
org.apache.hadoop.hbase.monitoring.MonitoredTask;
-142import 
org.apache.hadoop.hbase.monitoring.TaskMonitor;
-143import 
org.apache.hadoop.hbase.quotas.RegionServerSpaceQuotaManager;
-144import 
org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl.WriteEntry;
-145import 

[02/51] [partial] hbase-site git commit: Published site at c448604ceb987d113913f0583452b2abce04db0d.

2018-12-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4f8b8424/devapidocs/src-html/org/apache/hadoop/hbase/master/ServerManager.FlushedSequenceIdFlusher.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/ServerManager.FlushedSequenceIdFlusher.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/ServerManager.FlushedSequenceIdFlusher.html
index f1c0bc3..88147e4 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/ServerManager.FlushedSequenceIdFlusher.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/ServerManager.FlushedSequenceIdFlusher.html
@@ -610,585 +610,588 @@
 602  return false;
 603}
 604LOG.info("Processing expiration of " 
+ serverName + " on " + this.master.getServerName());
-605
master.getAssignmentManager().submitServerCrash(serverName, true);
-606
-607// Tell our listeners that a server 
was removed
-608if (!this.listeners.isEmpty()) {
-609  for (ServerListener listener : 
this.listeners) {
-610
listener.serverRemoved(serverName);
-611  }
-612}
-613// trigger a persist of 
flushedSeqId
-614if (flushedSeqIdFlusher != null) {
-615  flushedSeqIdFlusher.triggerNow();
-616}
-617return true;
-618  }
-619
-620  @VisibleForTesting
-621  public void 
moveFromOnlineToDeadServers(final ServerName sn) {
-622synchronized (onlineServers) {
-623  if 
(!this.onlineServers.containsKey(sn)) {
-624LOG.trace("Expiration of {} but 
server not online", sn);
-625  }
-626  // Remove the server from the known 
servers lists and update load info BUT
-627  // add to deadservers first; do 
this so it'll show in dead servers list if
-628  // not in online servers list.
-629  this.deadservers.add(sn);
-630  this.onlineServers.remove(sn);
-631  onlineServers.notifyAll();
-632}
-633this.rsAdmins.remove(sn);
-634  }
-635
-636  /*
-637   * Remove the server from the drain 
list.
-638   */
-639  public synchronized boolean 
removeServerFromDrainList(final ServerName sn) {
-640// Warn if the server (sn) is not 
online.  ServerName is of the form:
-641// hostname , port , 
startcode
-642
-643if (!this.isServerOnline(sn)) {
-644  LOG.warn("Server " + sn + " is not 
currently online. " +
-645   "Removing from draining 
list anyway, as requested.");
-646}
-647// Remove the server from the 
draining servers lists.
-648return 
this.drainingServers.remove(sn);
-649  }
-650
-651  /**
-652   * Add the server to the drain list.
-653   * @param sn
-654   * @return True if the server is added 
or the server is already on the drain list.
-655   */
-656  public synchronized boolean 
addServerToDrainList(final ServerName sn) {
-657// Warn if the server (sn) is not 
online.  ServerName is of the form:
-658// hostname , port , 
startcode
-659
-660if (!this.isServerOnline(sn)) {
-661  LOG.warn("Server " + sn + " is not 
currently online. " +
-662   "Ignoring request to add 
it to draining list.");
-663  return false;
-664}
-665// Add the server to the draining 
servers lists, if it's not already in
-666// it.
-667if 
(this.drainingServers.contains(sn)) {
-668  LOG.warn("Server " + sn + " is 
already in the draining server list." +
-669   "Ignoring request to add 
it again.");
-670  return true;
-671}
-672LOG.info("Server " + sn + " added to 
draining server list.");
-673return 
this.drainingServers.add(sn);
-674  }
-675
-676  // RPC methods to region servers
-677
-678  private HBaseRpcController 
newRpcController() {
-679return rpcControllerFactory == null ? 
null : rpcControllerFactory.newController();
-680  }
-681
-682  /**
-683   * Sends a WARMUP RPC to the specified 
server to warmup the specified region.
-684   * p
-685   * A region server could reject the 
close request because it either does not
-686   * have the specified region or the 
region is being split.
-687   * @param server server to warmup a 
region
-688   * @param region region to  warmup
-689   */
-690  public void sendRegionWarmup(ServerName 
server,
-691  RegionInfo region) {
-692if (server == null) return;
-693try {
-694  AdminService.BlockingInterface 
admin = getRsAdmin(server);
-695  HBaseRpcController controller = 
newRpcController();
-696  
ProtobufUtil.warmupRegion(controller, admin, region);
-697} catch (IOException e) {
-698  LOG.error("Received exception in 
RPC for warmup server:" +
-699server + "region: " + region +
-700"exception: " + e);
-701}
-702  }
-703
-704  /**
-705   * Contacts a region server and waits 
up to timeout ms
-706   * to close the region.  This bypasses 
the active hmaster.
-707   */
-708  public static void 
closeRegionSilentlyAndWait(ClusterConnection connection,
-709ServerName server, RegionInfo region, 
long timeout) throws 

[02/51] [partial] hbase-site git commit: Published site at 8bf966c8e936dec4d83bcbe85c5aab543f14a0df.

2018-12-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemOverlapMerge.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemOverlapMerge.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemOverlapMerge.html
index a957d31..62f81b6 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemOverlapMerge.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemOverlapMerge.html
@@ -142,5192 +142,5186 @@
 134import org.apache.hadoop.hbase.wal.WAL;
 135import 
org.apache.hadoop.hbase.wal.WALFactory;
 136import 
org.apache.hadoop.hbase.wal.WALSplitter;
-137import 
org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-138import 
org.apache.hadoop.hbase.zookeeper.ZKUtil;
-139import 
org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-140import 
org.apache.hadoop.hbase.zookeeper.ZNodePaths;
-141import 
org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
-142import 
org.apache.hadoop.ipc.RemoteException;
-143import 
org.apache.hadoop.security.UserGroupInformation;
-144import 
org.apache.hadoop.util.ReflectionUtils;
-145import org.apache.hadoop.util.Tool;
-146import 
org.apache.hadoop.util.ToolRunner;
-147import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-148import 
org.apache.hbase.thirdparty.com.google.common.collect.Sets;
-149import 
org.apache.yetus.audience.InterfaceAudience;
-150import 
org.apache.yetus.audience.InterfaceStability;
-151import 
org.apache.zookeeper.KeeperException;
-152import org.slf4j.Logger;
-153import org.slf4j.LoggerFactory;
-154
-155import 
org.apache.hbase.thirdparty.com.google.common.base.Joiner;
-156import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-157import 
org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;
-158import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
-159import 
org.apache.hbase.thirdparty.com.google.common.collect.Multimap;
-160import 
org.apache.hbase.thirdparty.com.google.common.collect.Ordering;
-161import 
org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;
-162
-163import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-164import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;
-165
-166/**
-167 * HBaseFsck (hbck) is a tool for 
checking and repairing region consistency and
-168 * table integrity problems in a 
corrupted HBase. This tool was written for hbase-1.x. It does not
-169 * work with hbase-2.x; it can read state 
but is not allowed to change state; i.e. effect 'repair'.
-170 * See hbck2 (HBASE-19121) for a hbck 
tool for hbase2.
-171 *
-172 * p
-173 * Region consistency checks verify that 
hbase:meta, region deployment on region
-174 * servers and the state of data in HDFS 
(.regioninfo files) all are in
-175 * accordance.
-176 * p
-177 * Table integrity checks verify that all 
possible row keys resolve to exactly
-178 * one region of a table.  This means 
there are no individual degenerate
-179 * or backwards regions; no holes between 
regions; and that there are no
-180 * overlapping regions.
-181 * p
-182 * The general repair strategy works in 
two phases:
-183 * ol
-184 * li Repair Table Integrity on 
HDFS. (merge or fabricate regions)
-185 * li Repair Region Consistency 
with hbase:meta and assignments
-186 * /ol
-187 * p
-188 * For table integrity repairs, the 
tables' region directories are scanned
-189 * for .regioninfo files.  Each table's 
integrity is then verified.  If there
-190 * are any orphan regions (regions with 
no .regioninfo files) or holes, new
-191 * regions are fabricated.  Backwards 
regions are sidelined as well as empty
-192 * degenerate (endkey==startkey) regions. 
 If there are any overlapping regions,
-193 * a new region is created and all data 
is merged into the new region.
-194 * p
-195 * Table integrity repairs deal solely 
with HDFS and could potentially be done
-196 * offline -- the hbase region servers or 
master do not need to be running.
-197 * This phase can eventually be used to 
completely reconstruct the hbase:meta table in
-198 * an offline fashion.
-199 * p
-200 * Region consistency requires three 
conditions -- 1) valid .regioninfo file
-201 * present in an HDFS region dir,  2) 
valid row with .regioninfo data in META,
-202 * and 3) a region is deployed only at 
the regionserver that was assigned to
-203 * with proper state in the master.
-204 * p
-205 * Region consistency repairs require 
hbase to be online so that hbck can
-206 * contact the HBase master and region 
servers.  The hbck#connect() method must
-207 * first be called successfully.  Much of 
the region consistency information
-208 * is transient and less risky to 
repair.
-209 * p
-210 * If hbck is run from the command line, 
there are a handful 

[02/51] [partial] hbase-site git commit: Published site at 1acbd36c903b048141866b143507bfce124a5c5f.

2018-11-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5299e667/devapidocs/src-html/org/apache/hadoop/hbase/master/ClusterSchemaServiceImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/ClusterSchemaServiceImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/ClusterSchemaServiceImpl.html
index 09e7d34..2149280 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/ClusterSchemaServiceImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/ClusterSchemaServiceImpl.html
@@ -26,118 +26,110 @@
 018package org.apache.hadoop.hbase.master;
 019
 020import java.io.IOException;
-021import java.util.ArrayList;
-022import java.util.Collections;
-023import java.util.List;
-024import java.util.Set;
-025
-026import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-027import 
org.apache.hadoop.hbase.NamespaceNotFoundException;
-028import 
org.apache.hadoop.hbase.ServiceNotRunningException;
-029import 
org.apache.yetus.audience.InterfaceAudience;
-030import 
org.apache.hadoop.hbase.master.procedure.CreateNamespaceProcedure;
-031import 
org.apache.hadoop.hbase.master.procedure.DeleteNamespaceProcedure;
-032import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
-033import 
org.apache.hadoop.hbase.master.procedure.ModifyNamespaceProcedure;
-034import 
org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch;
-035import 
org.apache.hadoop.hbase.procedure2.Procedure;
-036import 
org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
-037import 
org.apache.hbase.thirdparty.com.google.common.util.concurrent.AbstractService;
-038import 
org.apache.hadoop.hbase.util.NonceKey;
-039
-040@InterfaceAudience.Private
-041class ClusterSchemaServiceImpl extends 
AbstractService implements ClusterSchemaService {
-042  private final TableNamespaceManager 
tableNamespaceManager;
-043  private final MasterServices 
masterServices;
-044  private final static 
ListNamespaceDescriptor EMPTY_NAMESPACE_LIST =
-045Collections.unmodifiableList(new 
ArrayListNamespaceDescriptor(0));
-046
-047  ClusterSchemaServiceImpl(final 
MasterServices masterServices) {
-048this.masterServices = 
masterServices;
-049this.tableNamespaceManager = new 
TableNamespaceManager(masterServices);
-050  }
-051
-052  // All below are synchronized so 
consistent view on whether running or not.
-053
-054
-055  private synchronized void 
checkIsRunning() throws ServiceNotRunningException {
-056if (!isRunning()) throw new 
ServiceNotRunningException();
-057  }
-058
-059  @Override
-060  public synchronized void doStart() {
-061try {
-062  notifyStarted();
-063  
this.tableNamespaceManager.start();
-064} catch (IOException ioe) {
-065  notifyFailed(ioe);
-066}
-067  }
-068
-069  @Override
-070  protected void doStop() {
-071// This is no stop for the table 
manager.
-072notifyStopped();
-073TableNamespaceManager tnsm = 
getTableNamespaceManager();
-074if (tnsm != null) {
-075  tnsm.stop("Stopping");
-076}
-077  }
-078
-079  @Override
-080  public TableNamespaceManager 
getTableNamespaceManager() {
-081return this.tableNamespaceManager;
-082  }
-083
-084  private long submitProcedure(final 
ProcedureMasterProcedureEnv procedure,
-085  final NonceKey nonceKey) throws 
ServiceNotRunningException {
-086checkIsRunning();
-087
ProcedureExecutorMasterProcedureEnv pe = 
this.masterServices.getMasterProcedureExecutor();
-088return pe.submitProcedure(procedure, 
nonceKey);
-089  }
-090
-091  @Override
-092  public long 
createNamespace(NamespaceDescriptor namespaceDescriptor, final NonceKey 
nonceKey,
-093  final ProcedurePrepareLatch 
latch)
-094  throws IOException {
-095return submitProcedure(new 
CreateNamespaceProcedure(
-096  
this.masterServices.getMasterProcedureExecutor().getEnvironment(), 
namespaceDescriptor, latch),
-097nonceKey);
-098  }
-099
-100  @Override
-101  public long 
modifyNamespace(NamespaceDescriptor namespaceDescriptor, final NonceKey 
nonceKey,
-102  final ProcedurePrepareLatch latch) 
throws IOException {
-103return submitProcedure(new 
ModifyNamespaceProcedure(
-104  
this.masterServices.getMasterProcedureExecutor().getEnvironment(), 
namespaceDescriptor, latch),
-105nonceKey);
-106  }
-107
-108  @Override
-109  public long deleteNamespace(String 
name, final NonceKey nonceKey, final ProcedurePrepareLatch latch)
-110  throws IOException {
-111return submitProcedure(new 
DeleteNamespaceProcedure(
-112  
this.masterServices.getMasterProcedureExecutor().getEnvironment(), name, 
latch),
-113  nonceKey);
-114  }
-115
-116  @Override
-117  public NamespaceDescriptor 
getNamespace(String name) throws IOException {
-118NamespaceDescriptor nsd = 
getTableNamespaceManager().get(name);
-119if (nsd == null) throw new 
NamespaceNotFoundException(name);
-120return nsd;
-121  }

[02/51] [partial] hbase-site git commit: Published site at 130057f13774f6b213cdb06952c805a29d59396e.

2018-11-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/68eae623/testdevapidocs/src-html/org/apache/hadoop/hbase/security/access/TestAccessController.MyShellBasedUnixGroupsMapping.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/security/access/TestAccessController.MyShellBasedUnixGroupsMapping.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/security/access/TestAccessController.MyShellBasedUnixGroupsMapping.html
index 5062e9b..23b4be7 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/security/access/TestAccessController.MyShellBasedUnixGroupsMapping.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/security/access/TestAccessController.MyShellBasedUnixGroupsMapping.html
@@ -282,7 +282,7 @@
 274  public static void tearDownAfterClass() 
throws Exception {
 275cleanUp();
 276TEST_UTIL.shutdownMiniCluster();
-277int total = 
TableAuthManager.getTotalRefCount();
+277int total = 
AuthManager.getTotalRefCount();
 278assertTrue("Unexpected reference 
count: " + total, total == 0);
 279  }
 280
@@ -1642,12 +1642,12 @@
 1634  }
 1635
 1636  UserPermission ownerperm =
-1637  new 
UserPermission(Bytes.toBytes(USER_OWNER.getName()), tableName, null, 
Action.values());
+1637  new 
UserPermission(USER_OWNER.getName(), tableName, Action.values());
 1638  assertTrue("Owner should have all 
permissions on table",
 1639
hasFoundUserPermission(ownerperm, perms));
 1640
 1641  User user = 
User.createUserForTesting(TEST_UTIL.getConfiguration(), "user", new 
String[0]);
-1642  byte[] userName = 
Bytes.toBytes(user.getShortName());
+1642  String userName = 
user.getShortName();
 1643
 1644  UserPermission up =
 1645  new UserPermission(userName, 
tableName, family1, qualifier, Permission.Action.READ);
@@ -1733,7 +1733,7 @@
 1725  }
 1726
 1727  UserPermission newOwnerperm =
-1728  new 
UserPermission(Bytes.toBytes(newOwner.getName()), tableName, null, 
Action.values());
+1728  new 
UserPermission(newOwner.getName(), tableName, Action.values());
 1729  assertTrue("New owner should have 
all permissions on table",
 1730
hasFoundUserPermission(newOwnerperm, perms));
 1731} finally {
@@ -1757,1888 +1757,1898 @@
 1749
 1750CollectionString superUsers 
= Superusers.getSuperUsers();
 1751ListUserPermission 
adminPerms = new ArrayList(superUsers.size() + 1);
-1752adminPerms.add(new 
UserPermission(Bytes.toBytes(USER_ADMIN.getShortName()),
-1753  AccessControlLists.ACL_TABLE_NAME, 
null, null, Bytes.toBytes("ACRW")));
-1754
-1755for(String user: superUsers) {
-1756  adminPerms.add(new 
UserPermission(Bytes.toBytes(user), AccessControlLists.ACL_TABLE_NAME,
-1757  null, null, 
Action.values()));
-1758}
-1759assertTrue("Only super users, global 
users and user admin has permission on table hbase:acl " +
-1760"per setup", perms.size() == 5 + 
superUsers.size() 
-1761
hasFoundUserPermission(adminPerms, perms));
-1762  }
-1763
-1764  /** global operations */
-1765  private void 
verifyGlobal(AccessTestAction action) throws Exception {
-1766verifyAllowed(action, SUPERUSER);
-1767
-1768verifyDenied(action, USER_CREATE, 
USER_RW, USER_NONE, USER_RO);
-1769  }
-1770
-1771  @Test
-1772  public void testCheckPermissions() 
throws Exception {
-1773// 
--
-1774// test global permissions
-1775AccessTestAction globalAdmin = new 
AccessTestAction() {
-1776  @Override
-1777  public Void run() throws Exception 
{
-1778checkGlobalPerms(TEST_UTIL, 
Permission.Action.ADMIN);
-1779return null;
-1780  }
-1781};
-1782// verify that only superuser can 
admin
-1783verifyGlobal(globalAdmin);
-1784
-1785// 
--
-1786// test multiple permissions
-1787AccessTestAction globalReadWrite = 
new AccessTestAction() {
-1788  @Override
-1789  public Void run() throws Exception 
{
-1790checkGlobalPerms(TEST_UTIL, 
Permission.Action.READ, Permission.Action.WRITE);
-1791return null;
-1792  }
-1793};
+1752adminPerms.add(new 
UserPermission(USER_ADMIN.getShortName(), Bytes.toBytes("ACRW")));
+1753for(String user: superUsers) {
+1754  // Global permission
+1755  adminPerms.add(new 
UserPermission(user, Action.values()));
+1756}
+1757assertTrue("Only super users, global 
users and user admin has permission on table hbase:acl " +
+1758"per setup", perms.size() == 5 + 
superUsers.size() 
+1759
hasFoundUserPermission(adminPerms, perms));
+1760  }
+1761
+1762  /** global operations */
+1763  private void 
verifyGlobal(AccessTestAction action) throws Exception {
+1764verifyAllowed(action, SUPERUSER);
+1765
+1766verifyDenied(action, USER_CREATE, 
USER_RW, USER_NONE, USER_RO);

[02/51] [partial] hbase-site git commit: Published site at d5e4faacc354c1bc4d93efa71ca97ee3a056123e.

2018-10-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b5e107c3/testdevapidocs/src-html/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.TestTableProcedure.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.TestTableProcedure.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.TestTableProcedure.html
index c9e0e55..f1b9105 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.TestTableProcedure.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.TestTableProcedure.html
@@ -37,157 +37,157 @@
 029import java.util.Arrays;
 030import java.util.List;
 031import 
org.apache.hadoop.hbase.HBaseClassTestRule;
-032import 
org.apache.hadoop.hbase.HRegionInfo;
-033import 
org.apache.hadoop.hbase.ServerName;
-034import 
org.apache.hadoop.hbase.TableName;
-035import 
org.apache.hadoop.hbase.client.RegionInfo;
-036import 
org.apache.hadoop.hbase.client.RegionInfoBuilder;
-037import 
org.apache.hadoop.hbase.master.locking.LockProcedure;
-038import 
org.apache.hadoop.hbase.master.procedure.TableProcedureInterface.TableOperationType;
-039import 
org.apache.hadoop.hbase.procedure2.LockType;
-040import 
org.apache.hadoop.hbase.procedure2.LockedResource;
-041import 
org.apache.hadoop.hbase.procedure2.LockedResourceType;
-042import 
org.apache.hadoop.hbase.procedure2.Procedure;
-043import 
org.apache.hadoop.hbase.procedure2.ProcedureEvent;
-044import 
org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility.TestProcedure;
-045import 
org.apache.hadoop.hbase.testclassification.MasterTests;
-046import 
org.apache.hadoop.hbase.testclassification.SmallTests;
-047import 
org.apache.hadoop.hbase.util.Bytes;
-048import org.junit.After;
-049import org.junit.Before;
-050import org.junit.ClassRule;
-051import org.junit.Rule;
-052import org.junit.Test;
-053import 
org.junit.experimental.categories.Category;
-054import org.junit.rules.TestName;
-055import org.slf4j.Logger;
-056import org.slf4j.LoggerFactory;
-057
-058@Category({MasterTests.class, 
SmallTests.class})
-059public class TestMasterProcedureScheduler 
{
-060
-061  @ClassRule
-062  public static final HBaseClassTestRule 
CLASS_RULE =
-063  
HBaseClassTestRule.forClass(TestMasterProcedureScheduler.class);
-064
-065  private static final Logger LOG = 
LoggerFactory.getLogger(TestMasterProcedureScheduler.class);
-066
-067  private MasterProcedureScheduler 
queue;
-068
-069  @Rule
-070  public TestName name = new 
TestName();
-071
-072  @Before
-073  public void setUp() throws IOException 
{
-074queue = new 
MasterProcedureScheduler();
-075queue.start();
-076  }
-077
-078  @After
-079  public void tearDown() throws 
IOException {
-080assertEquals("proc-queue expected to 
be empty", 0, queue.size());
-081queue.stop();
-082queue.clear();
-083  }
-084
-085  /**
-086   * Verify simple 
create/insert/fetch/delete of the table queue.
-087   */
-088  @Test
-089  public void testSimpleTableOpsQueues() 
throws Exception {
-090final int NUM_TABLES = 10;
-091final int NUM_ITEMS = 10;
-092
-093int count = 0;
-094for (int i = 1; i = NUM_TABLES; 
++i) {
-095  TableName tableName = 
TableName.valueOf(String.format("test-%04d", i));
-096  // insert items
-097  for (int j = 1; j = NUM_ITEMS; 
++j) {
-098queue.addBack(new 
TestTableProcedure(i * 1000 + j, tableName,
-099  
TableProcedureInterface.TableOperationType.EDIT));
-100assertEquals(++count, 
queue.size());
-101  }
-102}
-103assertEquals(NUM_TABLES * NUM_ITEMS, 
queue.size());
-104
-105for (int j = 1; j = NUM_ITEMS; 
++j) {
-106  for (int i = 1; i = NUM_TABLES; 
++i) {
-107Procedure proc = queue.poll();
-108assertTrue(proc != null);
-109TableName tableName = 
((TestTableProcedure)proc).getTableName();
-110
queue.waitTableExclusiveLock(proc, tableName);
-111
queue.wakeTableExclusiveLock(proc, tableName);
-112queue.completionCleanup(proc);
-113assertEquals(--count, 
queue.size());
-114assertEquals(i * 1000 + j, 
proc.getProcId());
-115  }
-116}
-117assertEquals(0, queue.size());
-118
-119for (int i = 1; i = NUM_TABLES; 
++i) {
-120  final TableName tableName = 
TableName.valueOf(String.format("test-%04d", i));
-121  final TestTableProcedure dummyProc 
= new TestTableProcedure(100, tableName,
-122
TableProcedureInterface.TableOperationType.DELETE);
-123  // complete the table deletion
-124  
assertTrue(queue.markTableAsDeleted(tableName, dummyProc));
-125}
-126  }
-127
-128  /**
-129   * Check that the table queue is not 
deletable until every procedure
-130   * in-progress is completed (this is a 
special case for write-locks).
-131   

[02/51] [partial] hbase-site git commit: Published site at 3fe8649b2c9ba1271c25e8f476548907e4c7a90d.

2018-10-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8f09a71d/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.SecureBulkLoadListener.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.SecureBulkLoadListener.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.SecureBulkLoadListener.html
index 3ec2d53..9400177 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.SecureBulkLoadListener.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.SecureBulkLoadListener.html
@@ -33,390 +33,433 @@
 025import java.util.HashMap;
 026import java.util.List;
 027import java.util.Map;
-028
-029import 
org.apache.hadoop.conf.Configuration;
-030import org.apache.hadoop.fs.FileStatus;
-031import org.apache.hadoop.fs.FileSystem;
-032import org.apache.hadoop.fs.FileUtil;
-033import org.apache.hadoop.fs.Path;
-034import 
org.apache.hadoop.fs.permission.FsPermission;
-035import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-036import 
org.apache.hadoop.hbase.HConstants;
-037import 
org.apache.hadoop.hbase.TableName;
-038import 
org.apache.hadoop.hbase.client.Connection;
-039import 
org.apache.hadoop.hbase.ipc.RpcServer;
-040import 
org.apache.hadoop.hbase.regionserver.HRegion.BulkLoadListener;
-041import 
org.apache.hadoop.hbase.security.User;
-042import 
org.apache.hadoop.hbase.security.UserProvider;
-043import 
org.apache.hadoop.hbase.security.token.FsDelegationToken;
-044import 
org.apache.hadoop.hbase.security.token.TokenUtil;
-045import 
org.apache.hadoop.hbase.util.Bytes;
-046import 
org.apache.hadoop.hbase.util.FSHDFSUtils;
-047import 
org.apache.hadoop.hbase.util.FSUtils;
-048import 
org.apache.hadoop.hbase.util.Methods;
-049import 
org.apache.hadoop.hbase.util.Pair;
-050import org.apache.hadoop.io.Text;
-051import 
org.apache.hadoop.security.UserGroupInformation;
-052import 
org.apache.hadoop.security.token.Token;
-053import 
org.apache.yetus.audience.InterfaceAudience;
-054import org.slf4j.Logger;
-055import org.slf4j.LoggerFactory;
-056import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
-057import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileRequest;
-058import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CleanupBulkLoadRequest;
-059import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.PrepareBulkLoadRequest;
-060
-061/**
-062 * Bulk loads in secure mode.
-063 *
-064 * This service addresses two issues:
-065 * ol
-066 * liMoving files in a secure 
filesystem wherein the HBase Client
-067 * and HBase Server are different 
filesystem users./li
-068 * liDoes moving in a secure 
manner. Assuming that the filesystem
-069 * is POSIX compliant./li
-070 * /ol
-071 *
-072 * The algorithm is as follows:
-073 * ol
-074 * liCreate an hbase owned 
staging directory which is
-075 * world traversable (711): {@code 
/hbase/staging}/li
-076 * liA user writes out data to 
his secure output directory: {@code /user/foo/data}/li
-077 * liA call is made to hbase to 
create a secret staging directory
-078 * which globally rwx (777): {@code 
/user/staging/averylongandrandomdirectoryname}/li
-079 * liThe user moves the data into 
the random staging directory,
-080 * then calls 
bulkLoadHFiles()/li
-081 * /ol
-082 *
-083 * Like delegation tokens the strength of 
the security lies in the length
-084 * and randomness of the secret 
directory.
-085 *
-086 */
-087@InterfaceAudience.Private
-088public class SecureBulkLoadManager {
-089
-090  public static final long VERSION = 
0L;
-091
-092  //320/5 = 64 characters
-093  private static final int RANDOM_WIDTH = 
320;
-094  private static final int RANDOM_RADIX = 
32;
+028import 
java.util.concurrent.ConcurrentHashMap;
+029import java.util.function.BiFunction;
+030import java.util.function.Consumer;
+031
+032import 
org.apache.hadoop.conf.Configuration;
+033import org.apache.hadoop.fs.FileStatus;
+034import org.apache.hadoop.fs.FileSystem;
+035import org.apache.hadoop.fs.FileUtil;
+036import org.apache.hadoop.fs.Path;
+037import 
org.apache.hadoop.fs.permission.FsPermission;
+038import 
org.apache.hadoop.hbase.DoNotRetryIOException;
+039import 
org.apache.hadoop.hbase.HConstants;
+040import 
org.apache.hadoop.hbase.TableName;
+041import 
org.apache.hadoop.hbase.client.Connection;
+042import 
org.apache.hadoop.hbase.ipc.RpcServer;
+043import 
org.apache.hadoop.hbase.regionserver.HRegion.BulkLoadListener;
+044import 
org.apache.hadoop.hbase.security.User;
+045import 
org.apache.hadoop.hbase.security.UserProvider;
+046import 
org.apache.hadoop.hbase.security.token.FsDelegationToken;
+047import 
org.apache.hadoop.hbase.security.token.TokenUtil;
+048import 
org.apache.hadoop.hbase.util.Bytes;
+049import 
org.apache.hadoop.hbase.util.FSHDFSUtils;
+050import 

[02/51] [partial] hbase-site git commit: Published site at 7adf590106826b9e4432cfeee06acdc0ccff8c6e.

2018-10-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/425db230/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestHRegion.IsFlushWALMarker.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestHRegion.IsFlushWALMarker.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestHRegion.IsFlushWALMarker.html
index ed3db7a..156dabb 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestHRegion.IsFlushWALMarker.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestHRegion.IsFlushWALMarker.html
@@ -5542,785 +5542,825 @@
 5534  }
 5535
 5536  @Test
-5537  public void testWriteRequestsCounter() 
throws IOException {
-5538byte[] fam = 
Bytes.toBytes("info");
-5539byte[][] families = { fam };
-5540this.region = initHRegion(tableName, 
method, CONF, families);
+5537  public void 
testReverseScanWhenPutCellsAfterOpenReverseScan() throws Exception {
+5538byte[] cf1 = Bytes.toBytes("CF1");
+5539byte[][] families = { cf1 };
+5540byte[] col = Bytes.toBytes("C");
 5541
-5542Assert.assertEquals(0L, 
region.getWriteRequestsCount());
-5543
-5544Put put = new Put(row);
-5545put.addColumn(fam, fam, fam);
-5546
-5547Assert.assertEquals(0L, 
region.getWriteRequestsCount());
-5548region.put(put);
-5549Assert.assertEquals(1L, 
region.getWriteRequestsCount());
-5550region.put(put);
-5551Assert.assertEquals(2L, 
region.getWriteRequestsCount());
-5552region.put(put);
-5553Assert.assertEquals(3L, 
region.getWriteRequestsCount());
-5554
-region.delete(new Delete(row));
-5556Assert.assertEquals(4L, 
region.getWriteRequestsCount());
-5557  }
-5558
-5559  @Test
-5560  public void 
testOpenRegionWrittenToWAL() throws Exception {
-5561final ServerName serverName = 
ServerName.valueOf(name.getMethodName(), 100, 42);
-5562final RegionServerServices rss = 
spy(TEST_UTIL.createMockRegionServerService(serverName));
-5563
-5564HTableDescriptor htd = new 
HTableDescriptor(TableName.valueOf(name.getMethodName()));
-5565htd.addFamily(new 
HColumnDescriptor(fam1));
-5566htd.addFamily(new 
HColumnDescriptor(fam2));
-5567
-5568HRegionInfo hri = new 
HRegionInfo(htd.getTableName(),
-5569  HConstants.EMPTY_BYTE_ARRAY, 
HConstants.EMPTY_BYTE_ARRAY);
-5570
-5571// open the region w/o rss and wal 
and flush some files
-5572region =
-5573 
HBaseTestingUtility.createRegionAndWAL(hri, TEST_UTIL.getDataTestDir(), 
TEST_UTIL
-5574 .getConfiguration(), 
htd);
-5575assertNotNull(region);
-5576
-5577// create a file in fam1 for the 
region before opening in OpenRegionHandler
-5578region.put(new 
Put(Bytes.toBytes("a")).addColumn(fam1, fam1, fam1));
-5579region.flush(true);
-5580
HBaseTestingUtility.closeRegionAndWAL(region);
+5542HBaseConfiguration conf = new 
HBaseConfiguration();
+5543this.region = initHRegion(tableName, 
method, conf, families);
+5544
+5545Put put = new 
Put(Bytes.toBytes("16"));
+5546put.addColumn(cf1, col, 
Bytes.toBytes("val"));
+5547region.put(put);
+5548Put put2 = new 
Put(Bytes.toBytes("15"));
+5549put2.addColumn(cf1, col, 
Bytes.toBytes("val"));
+5550region.put(put2);
+5551
+5552// Create a reverse scan
+5553Scan scan = new 
Scan(Bytes.toBytes("16"));
+5554scan.setReversed(true);
+RegionScannerImpl scanner = 
region.getScanner(scan);
+5556
+5557// Put a lot of cells that have 
sequenceIDs grater than the readPt of the reverse scan
+5558for (int i = 10; i  20; 
i++) {
+5559  Put p = new Put(Bytes.toBytes("" + 
i));
+5560  p.addColumn(cf1, col, 
Bytes.toBytes("" + i));
+5561  region.put(p);
+5562}
+5563ListCell currRow = new 
ArrayList();
+5564boolean hasNext;
+5565do {
+5566  hasNext = scanner.next(currRow);
+5567} while (hasNext);
+5568
+5569assertEquals(2, currRow.size());
+5570assertEquals("16", 
Bytes.toString(currRow.get(0).getRowArray(),
+5571  currRow.get(0).getRowOffset(), 
currRow.get(0).getRowLength()));
+5572assertEquals("15", 
Bytes.toString(currRow.get(1).getRowArray(),
+5573  currRow.get(1).getRowOffset(), 
currRow.get(1).getRowLength()));
+5574  }
+5575
+5576  @Test
+5577  public void testWriteRequestsCounter() 
throws IOException {
+5578byte[] fam = 
Bytes.toBytes("info");
+5579byte[][] families = { fam };
+5580this.region = initHRegion(tableName, 
method, CONF, families);
 5581
-5582ArgumentCaptorWALEdit 
editCaptor = ArgumentCaptor.forClass(WALEdit.class);
+5582Assert.assertEquals(0L, 
region.getWriteRequestsCount());
 5583
-5584// capture append() calls
-5585WAL wal = mockWAL();
-5586when(rss.getWAL((HRegionInfo) 
any())).thenReturn(wal);
-5587
-5588region = HRegion.openHRegion(hri, 
htd, rss.getWAL(hri),
-5589  

[02/51] [partial] hbase-site git commit: Published site at 5fbb227deb365fe812d433fe39b85ac4b0ddee20.

2018-10-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9ebe686/devapidocs/src-html/org/apache/hadoop/hbase/filter/FuzzyRowFilter.Order.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/filter/FuzzyRowFilter.Order.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/filter/FuzzyRowFilter.Order.html
index 3efe1f0..3b88f1d 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/filter/FuzzyRowFilter.Order.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/filter/FuzzyRowFilter.Order.html
@@ -43,629 +43,628 @@
 035import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.BytesBytesPair;
 036import 
org.apache.hadoop.hbase.util.Bytes;
 037import 
org.apache.hadoop.hbase.util.Pair;
-038import 
org.apache.hadoop.hbase.util.UnsafeAccess;
-039import 
org.apache.hadoop.hbase.util.UnsafeAvailChecker;
-040
-041import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-042
-043/**
-044 * This is optimized version of a 
standard FuzzyRowFilter Filters data based on fuzzy row key.
-045 * Performs fast-forwards during 
scanning. It takes pairs (row key, fuzzy info) to match row keys.
-046 * Where fuzzy info is a byte array with 
0 or 1 as its values:
-047 * ul
-048 * li0 - means that this byte in 
provided row key is fixed, i.e. row key's byte at same position
-049 * must match/li
-050 * li1 - means that this byte in 
provided row key is NOT fixed, i.e. row key's byte at this
-051 * position can be different from the one 
in provided row key/li
-052 * /ul
-053 * Example: Let's assume row key format 
is userId_actionId_year_month. Length of userId is fixed and
-054 * is 4, length of actionId is 2 and year 
and month are 4 and 2 bytes long respectively. Let's
-055 * assume that we need to fetch all users 
that performed certain action (encoded as "99") in Jan of
-056 * any year. Then the pair (row key, 
fuzzy info) would be the following: row key = "_99__01"
-057 * (one can use any value instead of "?") 
fuzzy info =
-058 * 
"\x01\x01\x01\x01\x00\x00\x00\x00\x01\x01\x01\x01\x00\x00\x00" I.e. fuzzy info 
tells the matching
-059 * mask is "_99__01", where at ? 
can be any value.
-060 */
-061@InterfaceAudience.Public
-062public class FuzzyRowFilter extends 
FilterBase {
-063  private static final boolean 
UNSAFE_UNALIGNED = UnsafeAvailChecker.unaligned();
-064  private ListPairbyte[], 
byte[] fuzzyKeysData;
-065  private boolean done = false;
-066
-067  /**
-068   * The index of a last successfully 
found matching fuzzy string (in fuzzyKeysData). We will start
-069   * matching next KV with this one. If 
they do not match then we will return back to the one-by-one
-070   * iteration over fuzzyKeysData.
-071   */
-072  private int lastFoundIndex = -1;
-073
-074  /**
-075   * Row tracker (keeps all next rows 
after SEEK_NEXT_USING_HINT was returned)
-076   */
-077  private RowTracker tracker;
-078
-079  public 
FuzzyRowFilter(ListPairbyte[], byte[] fuzzyKeysData) {
-080ListPairbyte[], 
byte[] fuzzyKeyDataCopy = new 
ArrayList(fuzzyKeysData.size());
-081
-082for (Pairbyte[], byte[] 
aFuzzyKeysData : fuzzyKeysData) {
-083  if 
(aFuzzyKeysData.getFirst().length != aFuzzyKeysData.getSecond().length) {
-084PairString, String 
readable =
-085  new 
Pair(Bytes.toStringBinary(aFuzzyKeysData.getFirst()), 
Bytes.toStringBinary(aFuzzyKeysData.getSecond()));
-086throw new 
IllegalArgumentException("Fuzzy pair lengths do not match: " + readable);
-087  }
-088
-089  Pairbyte[], byte[] p = new 
Pair();
-090  // create a copy of pair bytes so 
that they are not modified by the filter.
-091  
p.setFirst(Arrays.copyOf(aFuzzyKeysData.getFirst(), 
aFuzzyKeysData.getFirst().length));
-092  
p.setSecond(Arrays.copyOf(aFuzzyKeysData.getSecond(), 
aFuzzyKeysData.getSecond().length));
-093
-094  // update mask ( 0 - -1 (0xff), 
1 - 2)
-095  
p.setSecond(preprocessMask(p.getSecond()));
-096  preprocessSearchKey(p);
-097
-098  fuzzyKeyDataCopy.add(p);
-099}
-100this.fuzzyKeysData = 
fuzzyKeyDataCopy;
-101this.tracker = new RowTracker();
-102  }
+038import 
org.apache.hadoop.hbase.util.UnsafeAvailChecker;
+039
+040import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+041
+042/**
+043 * This is optimized version of a 
standard FuzzyRowFilter Filters data based on fuzzy row key.
+044 * Performs fast-forwards during 
scanning. It takes pairs (row key, fuzzy info) to match row keys.
+045 * Where fuzzy info is a byte array with 
0 or 1 as its values:
+046 * ul
+047 * li0 - means that this byte in 
provided row key is fixed, i.e. row key's byte at same position
+048 * must match/li
+049 * li1 - means that this byte in 
provided row key is NOT fixed, i.e. row key's byte at this
+050 * position can be different from the one 
in provided row key/li
+051 * /ul
+052 * Example: Let's assume row key format 
is 

[02/51] [partial] hbase-site git commit: Published site at 821e4d7de2d576189f4288d1c2acf9e9a9471f5c.

2018-10-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/323b17d9/testdevapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.NoopStateMachineProcedure.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.NoopStateMachineProcedure.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.NoopStateMachineProcedure.html
new file mode 100644
index 000..eb90a1f
--- /dev/null
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.NoopStateMachineProcedure.html
@@ -0,0 +1,692 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+Source code
+
+
+
+
+001/**
+002 * Licensed to the Apache Software 
Foundation (ASF) under one
+003 * or more contributor license 
agreements.  See the NOTICE file
+004 * distributed with this work for 
additional information
+005 * regarding copyright ownership.  The 
ASF licenses this file
+006 * to you under the Apache License, 
Version 2.0 (the
+007 * "License"); you may not use this file 
except in compliance
+008 * with the License.  You may obtain a 
copy of the License at
+009 *
+010 * 
http://www.apache.org/licenses/LICENSE-2.0
+011 *
+012 * Unless required by applicable law or 
agreed to in writing, software
+013 * distributed under the License is 
distributed on an "AS IS" BASIS,
+014 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
+015 * See the License for the specific 
language governing permissions and
+016 * limitations under the License.
+017 */
+018
+019package 
org.apache.hadoop.hbase.procedure2;
+020
+021import static 
org.junit.Assert.assertEquals;
+022import static 
org.junit.Assert.assertFalse;
+023import static 
org.junit.Assert.assertTrue;
+024
+025import java.io.IOException;
+026import java.util.ArrayList;
+027import java.util.Set;
+028import java.util.concurrent.Callable;
+029import 
org.apache.hadoop.conf.Configuration;
+030import org.apache.hadoop.fs.FileSystem;
+031import org.apache.hadoop.fs.Path;
+032import 
org.apache.hadoop.hbase.HConstants;
+033import 
org.apache.hadoop.hbase.exceptions.IllegalArgumentIOException;
+034import 
org.apache.hadoop.hbase.exceptions.TimeoutIOException;
+035import 
org.apache.hadoop.hbase.procedure2.store.NoopProcedureStore;
+036import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore;
+037import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureIterator;
+038import 
org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore;
+039import 
org.apache.hadoop.hbase.util.NonceKey;
+040import 
org.apache.hadoop.hbase.util.Threads;
+041import org.slf4j.Logger;
+042import org.slf4j.LoggerFactory;
+043
+044import 
org.apache.hbase.thirdparty.com.google.protobuf.ByteString;
+045import 
org.apache.hbase.thirdparty.com.google.protobuf.BytesValue;
+046
+047import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState;
+048
+049public class ProcedureTestingUtility {
+050  private static final Logger LOG = 
LoggerFactory.getLogger(ProcedureTestingUtility.class);
+051
+052  private ProcedureTestingUtility() {
+053  }
+054
+055  public static ProcedureStore 
createStore(final Configuration conf, final Path dir)
+056  throws IOException {
+057return createWalStore(conf, dir);
+058  }
+059
+060  public static WALProcedureStore 
createWalStore(final Configuration conf, final Path dir)
+061  throws IOException {
+062return new WALProcedureStore(conf, 
dir, null, new WALProcedureStore.LeaseRecovery() {
+063  @Override
+064  public void 
recoverFileLease(FileSystem fs, Path path) throws IOException {
+065// no-op
+066  }
+067});
+068  }
+069
+070  public static TEnv void 
restart(final ProcedureExecutorTEnv procExecutor) throws Exception {
+071restart(procExecutor, false, true, 
null, null, null);
+072  }
+073
+074  public static void 
initAndStartWorkers(ProcedureExecutor? procExecutor, int numThreads,
+075  boolean abortOnCorruption) throws 
IOException {
+076procExecutor.init(numThreads, 
abortOnCorruption);
+077procExecutor.startWorkers();
+078  }
+079
+080  public static TEnv void 
restart(ProcedureExecutorTEnv procExecutor,
+081  boolean avoidTestKillDuringRestart, 
boolean failOnCorrupted, CallableVoid stopAction,
+082  CallableVoid 
actionBeforeStartWorker, CallableVoid startAction)
+083  throws Exception {
+084final ProcedureStore procStore = 
procExecutor.getStore();
+085final int storeThreads = 
procExecutor.getCorePoolSize();
+086final int execThreads = 
procExecutor.getCorePoolSize();
+087
+088final ProcedureExecutor.Testing 
testing = procExecutor.testing;
+089if (avoidTestKillDuringRestart) {
+090  procExecutor.testing = null;
+091}
+092
+093// stop
+094LOG.info("RESTART - Stop");
+095procExecutor.stop();
+096procStore.stop(false);

[02/51] [partial] hbase-site git commit: Published site at fa5fa6ecdd071b72b58971058ff3ab9d28c3e709.

2018-10-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d1341859/testdevapidocs/org/apache/hadoop/hbase/procedure2/store/TestProcedureStoreTracker.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/procedure2/store/TestProcedureStoreTracker.html
 
b/testdevapidocs/org/apache/hadoop/hbase/procedure2/store/TestProcedureStoreTracker.html
index 2edbb7d..5348cd0 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/procedure2/store/TestProcedureStoreTracker.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/procedure2/store/TestProcedureStoreTracker.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -170,54 +170,36 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 Method and Description
 
 
-(package private) 
org.apache.hadoop.hbase.procedure2.store.BitSetNode
-buildBitSetNode(long[]active,
-   long[]updated,
-   long[]deleted)
-
-
-(package private) 
org.apache.hadoop.hbase.procedure2.store.ProcedureStoreTracker
-buildTracker(long[]active,
-long[]updated,
-long[]deleted)
-
-
-(package private) boolean
-isDeleted(org.apache.hadoop.hbase.procedure2.store.BitSetNoden,
- longprocId)
-
-
-(package private) boolean
-isDeleted(org.apache.hadoop.hbase.procedure2.store.ProcedureStoreTrackern,
- longprocId)
-
-
 void
 testBasicCRUD()
 
-
+
 void
 testDelete()
 
-
+
+void
+testGetActiveProcIds()
+
+
 void
 testLoad()
 
-
+
 void
 testPartialTracker()
 
-
+
 void
 testRandLoad()
 
-
+
 void
 testSeqInsertAndDelete()
 
-
+
 void
-testSetDeletedIfSet()
+testSetDeletedIfModified()
 
 
 
@@ -338,65 +320,22 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 publicvoidtestDelete()
 
 
-
-
-
-
-
-isDeleted
-booleanisDeleted(org.apache.hadoop.hbase.procedure2.store.ProcedureStoreTrackern,
-  longprocId)
-
-
-
-
-
-
-
-isDeleted
-booleanisDeleted(org.apache.hadoop.hbase.procedure2.store.BitSetNoden,
-  longprocId)
-
-
-
-
-
-
-
-buildTracker
-org.apache.hadoop.hbase.procedure2.store.ProcedureStoreTrackerbuildTracker(long[]active,
-
long[]updated,
-
long[]deleted)
-
-Parameters:
-active - list of active proc ids. To mark them as 
non-deleted, since by default a proc
-   id is always marked deleted.
-
-
-
-
+
 
 
 
 
-buildBitSetNode
-org.apache.hadoop.hbase.procedure2.store.BitSetNodebuildBitSetNode(long[]active,
-
long[]updated,
-
long[]deleted)
-
-Parameters:
-active - list of active proc ids. To mark them as 
non-deleted, since by default a proc
-   id is always marked deleted.
-
+testSetDeletedIfModified
+publicvoidtestSetDeletedIfModified()
 
 
-
+
 
 
 
 
-testSetDeletedIfSet
-publicvoidtestSetDeletedIfSet()
+testGetActiveProcIds
+publicvoidtestGetActiveProcIds()
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d1341859/testdevapidocs/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPerformanceEvaluation.NoSyncWalProcedureStore.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPerformanceEvaluation.NoSyncWalProcedureStore.html
 
b/testdevapidocs/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPerformanceEvaluation.NoSyncWalProcedureStore.html
index d65b315..f28299f 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPerformanceEvaluation.NoSyncWalProcedureStore.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPerformanceEvaluation.NoSyncWalProcedureStore.html
@@ -219,7 +219,7 @@ extends 
org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore
 
 
 Methods inherited from 
classorg.apache.hadoop.hbase.procedure2.store.ProcedureStoreBase
-isRunning, registerListener, sendAbortProcessSignal, sendPostSyncSignal, 
setRunning, unregisterListener
+isRunning, registerListener, sendAbortProcessSignal, 
sendForceUpdateSignal, sendPostSyncSignal, setRunning, 
unregisterListener
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d1341859/testdevapidocs/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPerformanceEvaluation.Worker.html
--
diff --git 

[02/51] [partial] hbase-site git commit: Published site at 6bc7089f9e0793efc9bdd46a84f5ccd9bc4579ad.

2018-09-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/419d0338/testdevapidocs/src-html/org/apache/hadoop/hbase/ResourceCheckerJUnitListener.MaxFileDescriptorResourceAnalyzer.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/ResourceCheckerJUnitListener.MaxFileDescriptorResourceAnalyzer.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/ResourceCheckerJUnitListener.MaxFileDescriptorResourceAnalyzer.html
index 47aac2c..cb4c472 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/ResourceCheckerJUnitListener.MaxFileDescriptorResourceAnalyzer.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/ResourceCheckerJUnitListener.MaxFileDescriptorResourceAnalyzer.html
@@ -26,221 +26,179 @@
 018
 019package org.apache.hadoop.hbase;
 020
-021import 
java.lang.management.ManagementFactory;
-022import 
java.lang.management.MemoryUsage;
-023import java.util.ArrayList;
-024import java.util.HashSet;
-025import java.util.List;
-026import java.util.Map;
-027import java.util.Set;
-028import 
java.util.concurrent.ConcurrentHashMap;
-029import java.util.concurrent.TimeUnit;
-030import 
org.apache.hadoop.hbase.ResourceChecker.Phase;
-031import 
org.apache.hadoop.hbase.util.JVM;
-032import 
org.junit.runner.notification.RunListener;
-033
-034/**
-035 * Listen to the test progress and check 
the usage of:
-036 * ul
-037 * lithreads/li
-038 * liopen file 
descriptor/li
-039 * limax open file 
descriptor/li
-040 * /ul
-041 * p
-042 * When surefire 
forkMode=once/always/perthread, this code is executed on the forked process.
-043 */
-044public class ResourceCheckerJUnitListener 
extends RunListener {
-045  private MapString, 
ResourceChecker rcs = new ConcurrentHashMap();
-046
-047  static class ThreadResourceAnalyzer 
extends ResourceChecker.ResourceAnalyzer {
-048private static SetString 
initialThreadNames = new HashSet();
-049private static ListString 
stringsToLog = null;
-050
-051@Override
-052public int getVal(Phase phase) {
-053  MapThread, 
StackTraceElement[] stackTraces = Thread.getAllStackTraces();
-054  if (phase == Phase.INITIAL) {
-055stringsToLog = null;
-056for (Thread t : 
stackTraces.keySet()) {
-057  
initialThreadNames.add(t.getName());
-058}
-059  } else if (phase == Phase.END) {
-060if (stackTraces.size()  
initialThreadNames.size()) {
-061  stringsToLog = new 
ArrayList();
-062  for (Thread t : 
stackTraces.keySet()) {
-063if 
(!initialThreadNames.contains(t.getName())) {
-064  
stringsToLog.add("\nPotentially hanging thread: " + t.getName() + "\n");
-065  StackTraceElement[] 
stackElements = stackTraces.get(t);
-066  for (StackTraceElement ele 
: stackElements) {
-067stringsToLog.add("\t" + 
ele + "\n");
-068  }
-069}
-070  }
-071}
-072  }
-073  return stackTraces.size();
-074}
-075
-076@Override
-077public int getMax() {
-078  return 500;
-079}
-080
-081@Override
-082public ListString 
getStringsToLog() {
-083  return stringsToLog;
-084}
-085  }
-086
-087
-088  static class 
OpenFileDescriptorResourceAnalyzer extends ResourceChecker.ResourceAnalyzer {
-089@Override
-090public int getVal(Phase phase) {
-091  if (!JVM.isUnix()) {
-092return 0;
-093  }
-094  JVM jvm = new JVM();
-095  return (int) 
jvm.getOpenFileDescriptorCount();
-096}
-097
-098@Override
-099public int getMax() {
-100  return 1024;
-101}
-102  }
-103
-104  static class 
MaxFileDescriptorResourceAnalyzer extends ResourceChecker.ResourceAnalyzer {
-105@Override
-106public int getVal(Phase phase) {
-107  if (!JVM.isUnix()) {
-108return 0;
-109  }
-110  JVM jvm = new JVM();
-111  return (int) 
jvm.getMaxFileDescriptorCount();
-112}
-113  }
-114
-115  static class 
SystemLoadAverageResourceAnalyzer extends ResourceChecker.ResourceAnalyzer {
-116@Override
-117public int getVal(Phase phase) {
-118  if (!JVM.isUnix()) {
-119return 0;
-120  }
-121  return (int) (new 
JVM().getSystemLoadAverage() * 100);
-122}
-123  }
-124
-125  static class 
ProcessCountResourceAnalyzer extends ResourceChecker.ResourceAnalyzer {
-126@Override
-127public int getVal(Phase phase) {
-128  if (!JVM.isUnix()) {
-129return 0;
-130  }
-131  return new 
JVM().getNumberOfRunningProcess();
-132}
-133  }
-134
-135  static class 
AvailableMemoryMBResourceAnalyzer extends ResourceChecker.ResourceAnalyzer {
-136@Override
-137public int getVal(Phase phase) {
-138  if (!JVM.isUnix()) {
-139return 0;
-140  }
-141  return (int) (new 
JVM().getFreeMemory() / (1024L * 1024L));
-142}
-143  }
-144
-145  static class 
MaxHeapMemoryMBResourceAnalyzer extends 

[02/51] [partial] hbase-site git commit: Published site at d7e08317d2f214e4cca7b67578aba0ed7a567d54.

2018-09-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/37cf49a6/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ReplayBatchOperation.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ReplayBatchOperation.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ReplayBatchOperation.html
index 566f410..da040ad 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ReplayBatchOperation.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ReplayBatchOperation.html
@@ -341,8361 +341,8425 @@
 333  private final int 
rowLockWaitDuration;
 334  static final int 
DEFAULT_ROWLOCK_WAIT_DURATION = 3;
 335
-336  // The internal wait duration to 
acquire a lock before read/update
-337  // from the region. It is not per row. 
The purpose of this wait time
-338  // is to avoid waiting a long time 
while the region is busy, so that
-339  // we can release the IPC handler soon 
enough to improve the
-340  // availability of the region server. 
It can be adjusted by
-341  // tuning configuration 
"hbase.busy.wait.duration".
-342  final long busyWaitDuration;
-343  static final long 
DEFAULT_BUSY_WAIT_DURATION = HConstants.DEFAULT_HBASE_RPC_TIMEOUT;
-344
-345  // If updating multiple rows in one 
call, wait longer,
-346  // i.e. waiting for busyWaitDuration * 
# of rows. However,
-347  // we can limit the max multiplier.
-348  final int maxBusyWaitMultiplier;
-349
-350  // Max busy wait duration. There is no 
point to wait longer than the RPC
-351  // purge timeout, when a RPC call will 
be terminated by the RPC engine.
-352  final long maxBusyWaitDuration;
-353
-354  // Max cell size. If nonzero, the 
maximum allowed size for any given cell
-355  // in bytes
-356  final long maxCellSize;
-357
-358  // Number of mutations for minibatch 
processing.
-359  private final int miniBatchSize;
+336  private Path regionDir;
+337  private FileSystem walFS;
+338
+339  // The internal wait duration to 
acquire a lock before read/update
+340  // from the region. It is not per row. 
The purpose of this wait time
+341  // is to avoid waiting a long time 
while the region is busy, so that
+342  // we can release the IPC handler soon 
enough to improve the
+343  // availability of the region server. 
It can be adjusted by
+344  // tuning configuration 
"hbase.busy.wait.duration".
+345  final long busyWaitDuration;
+346  static final long 
DEFAULT_BUSY_WAIT_DURATION = HConstants.DEFAULT_HBASE_RPC_TIMEOUT;
+347
+348  // If updating multiple rows in one 
call, wait longer,
+349  // i.e. waiting for busyWaitDuration * 
# of rows. However,
+350  // we can limit the max multiplier.
+351  final int maxBusyWaitMultiplier;
+352
+353  // Max busy wait duration. There is no 
point to wait longer than the RPC
+354  // purge timeout, when a RPC call will 
be terminated by the RPC engine.
+355  final long maxBusyWaitDuration;
+356
+357  // Max cell size. If nonzero, the 
maximum allowed size for any given cell
+358  // in bytes
+359  final long maxCellSize;
 360
-361  // negative number indicates infinite 
timeout
-362  static final long 
DEFAULT_ROW_PROCESSOR_TIMEOUT = 60 * 1000L;
-363  final ExecutorService 
rowProcessorExecutor = Executors.newCachedThreadPool();
-364
-365  private final 
ConcurrentHashMapRegionScanner, Long scannerReadPoints;
-366
-367  /**
-368   * The sequence ID that was 
enLongAddered when this region was opened.
-369   */
-370  private long openSeqNum = 
HConstants.NO_SEQNUM;
-371
-372  /**
-373   * The default setting for whether to 
enable on-demand CF loading for
-374   * scan requests to this region. 
Requests can override it.
-375   */
-376  private boolean 
isLoadingCfsOnDemandDefault = false;
-377
-378  private final AtomicInteger 
majorInProgress = new AtomicInteger(0);
-379  private final AtomicInteger 
minorInProgress = new AtomicInteger(0);
+361  // Number of mutations for minibatch 
processing.
+362  private final int miniBatchSize;
+363
+364  // negative number indicates infinite 
timeout
+365  static final long 
DEFAULT_ROW_PROCESSOR_TIMEOUT = 60 * 1000L;
+366  final ExecutorService 
rowProcessorExecutor = Executors.newCachedThreadPool();
+367
+368  private final 
ConcurrentHashMapRegionScanner, Long scannerReadPoints;
+369
+370  /**
+371   * The sequence ID that was 
enLongAddered when this region was opened.
+372   */
+373  private long openSeqNum = 
HConstants.NO_SEQNUM;
+374
+375  /**
+376   * The default setting for whether to 
enable on-demand CF loading for
+377   * scan requests to this region. 
Requests can override it.
+378   */
+379  private boolean 
isLoadingCfsOnDemandDefault = false;
 380
-381  //
-382  // Context: During replay we want to 
ensure that we do not lose any data. So, we
-383  // have to be conservative in how we 
replay wals. For each store, we calculate
-384  // the maxSeqId up to which the store 

[02/51] [partial] hbase-site git commit: Published site at 8eaaa63114a64bcaeaf0ed9bdd88615ee22255c1.

2018-09-25 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f6f9d4f3/devapidocs/src-html/org/apache/hadoop/hbase/util/Bytes.ByteArrayComparator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/Bytes.ByteArrayComparator.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/Bytes.ByteArrayComparator.html
index 804ef45..e999ddb 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/Bytes.ByteArrayComparator.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/Bytes.ByteArrayComparator.html
@@ -138,2491 +138,2492 @@
 130  // SizeOf which uses 
java.lang.instrument says 24 bytes. (3 longs?)
 131  public static final int 
ESTIMATED_HEAP_TAX = 16;
 132
-133  private static final boolean 
UNSAFE_UNALIGNED = UnsafeAvailChecker.unaligned();
-134
-135  /**
-136   * Returns length of the byte array, 
returning 0 if the array is null.
-137   * Useful for calculating sizes.
-138   * @param b byte array, which can be 
null
-139   * @return 0 if b is null, otherwise 
returns length
-140   */
-141  final public static int len(byte[] b) 
{
-142return b == null ? 0 : b.length;
-143  }
-144
-145  private byte[] bytes;
-146  private int offset;
-147  private int length;
-148
-149  /**
-150   * Create a zero-size sequence.
-151   */
-152  public Bytes() {
-153super();
-154  }
-155
-156  /**
-157   * Create a Bytes using the byte array 
as the initial value.
-158   * @param bytes This array becomes the 
backing storage for the object.
-159   */
-160  public Bytes(byte[] bytes) {
-161this(bytes, 0, bytes.length);
-162  }
-163
-164  /**
-165   * Set the new Bytes to the contents of 
the passed
-166   * codeibw/code.
-167   * @param ibw the value to set this 
Bytes to.
-168   */
-169  public Bytes(final Bytes ibw) {
-170this(ibw.get(), ibw.getOffset(), 
ibw.getLength());
-171  }
-172
-173  /**
-174   * Set the value to a given byte 
range
-175   * @param bytes the new byte range to 
set to
-176   * @param offset the offset in newData 
to start at
-177   * @param length the number of bytes in 
the range
-178   */
-179  public Bytes(final byte[] bytes, final 
int offset,
-180  final int length) {
-181this.bytes = bytes;
-182this.offset = offset;
-183this.length = length;
-184  }
-185
-186  /**
-187   * Copy bytes from ByteString 
instance.
-188   * @param byteString copy from
-189   * @deprecated As of release 2.0.0, 
this will be removed in HBase 3.0.0.
-190   */
-191  @Deprecated
-192  public Bytes(final ByteString 
byteString) {
-193this(byteString.toByteArray());
-194  }
-195
-196  /**
-197   * Get the data from the Bytes.
-198   * @return The data is only valid 
between offset and offset+length.
-199   */
-200  public byte [] get() {
-201if (this.bytes == null) {
-202  throw new 
IllegalStateException("Uninitialiized. Null constructor " +
-203  "called w/o accompaying 
readFields invocation");
-204}
-205return this.bytes;
-206  }
-207
-208  /**
-209   * @param b Use passed bytes as backing 
array for this instance.
-210   */
-211  public void set(final byte [] b) {
-212set(b, 0, b.length);
-213  }
-214
-215  /**
-216   * @param b Use passed bytes as backing 
array for this instance.
-217   * @param offset
-218   * @param length
-219   */
-220  public void set(final byte [] b, final 
int offset, final int length) {
-221this.bytes = b;
-222this.offset = offset;
-223this.length = length;
-224  }
-225
-226  /**
-227   * @return the number of valid bytes in 
the buffer
-228   * @deprecated use {@link #getLength()} 
instead
-229   */
-230  @Deprecated
-231  public int getSize() {
-232if (this.bytes == null) {
-233  throw new 
IllegalStateException("Uninitialiized. Null constructor " +
-234  "called w/o accompaying 
readFields invocation");
-235}
-236return this.length;
-237  }
-238
-239  /**
-240   * @return the number of valid bytes in 
the buffer
-241   */
-242  public int getLength() {
-243if (this.bytes == null) {
-244  throw new 
IllegalStateException("Uninitialiized. Null constructor " +
-245  "called w/o accompaying 
readFields invocation");
-246}
-247return this.length;
-248  }
-249
-250  /**
-251   * @return offset
-252   */
-253  public int getOffset(){
-254return this.offset;
-255  }
-256
-257  /**
-258   * @deprecated As of release 2.0.0, 
this will be removed in HBase 3.0.0.
-259   */
-260  @Deprecated
-261  public ByteString toByteString() {
-262return 
ByteString.copyFrom(this.bytes, this.offset, this.length);
-263  }
-264
-265  @Override
-266  public int hashCode() {
-267return Bytes.hashCode(bytes, offset, 
length);
-268  }
-269
-270  /**
-271   * Define the sort order of the 
Bytes.
-272   * @param that The other bytes 
writable
-273   * @return Positive if left is bigger 
than right, 0 if they are equal, and
-274   * negative if left is smaller 
than right.
-275   */

[02/51] [partial] hbase-site git commit: Published site at cd161d976ef47b84e904f2d54bac65d2f3417c2a.

2018-09-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fa1bebf8/testdevapidocs/org/apache/hadoop/hbase/client/TestHbck.SuspendProcedure.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/client/TestHbck.SuspendProcedure.html 
b/testdevapidocs/org/apache/hadoop/hbase/client/TestHbck.SuspendProcedure.html
new file mode 100644
index 000..6c88121
--- /dev/null
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/client/TestHbck.SuspendProcedure.html
@@ -0,0 +1,383 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+TestHbck.SuspendProcedure (Apache HBase 3.0.0-SNAPSHOT Test API)
+
+
+
+
+
+var methods = {"i0":10,"i1":10,"i2":10};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase.client
+Class 
TestHbck.SuspendProcedure
+
+
+
+https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.procedure2.ProcedureTEnv
+
+
+org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility.NoopProcedureorg.apache.hadoop.hbase.master.procedure.MasterProcedureEnv
+
+
+org.apache.hadoop.hbase.client.TestHbck.SuspendProcedure
+
+
+
+
+
+
+
+
+
+
+
+All Implemented Interfaces:
+https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in 
java.lang">Comparableorg.apache.hadoop.hbase.procedure2.Procedureorg.apache.hadoop.hbase.master.procedure.MasterProcedureEnv,
 org.apache.hadoop.hbase.master.procedure.TableProcedureInterface
+
+
+Enclosing class:
+TestHbck
+
+
+
+public static class TestHbck.SuspendProcedure
+extends ProcedureTestingUtility.NoopProcedureorg.apache.hadoop.hbase.master.procedure.MasterProcedureEnv
+implements 
org.apache.hadoop.hbase.master.procedure.TableProcedureInterface
+
+
+
+
+
+
+
+
+
+
+
+Nested Class Summary
+
+
+
+
+Nested classes/interfaces inherited from 
classorg.apache.hadoop.hbase.procedure2.Procedure
+org.apache.hadoop.hbase.procedure2.Procedure.LockState
+
+
+
+
+
+Nested classes/interfaces inherited from 
interfaceorg.apache.hadoop.hbase.master.procedure.TableProcedureInterface
+org.apache.hadoop.hbase.master.procedure.TableProcedureInterface.TableOperationType
+
+
+
+
+
+
+
+
+Field Summary
+
+
+
+
+Fields inherited from 
classorg.apache.hadoop.hbase.procedure2.Procedure
+NO_PROC_ID, NO_TIMEOUT
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors
+
+Constructor and Description
+
+
+SuspendProcedure()
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All MethodsInstance MethodsConcrete Methods
+
+Modifier and Type
+Method and Description
+
+
+protected 
org.apache.hadoop.hbase.procedure2.Procedure[]
+execute(org.apache.hadoop.hbase.master.procedure.MasterProcedureEnvenv)
+
+
+org.apache.hadoop.hbase.TableName
+getTableName()
+
+
+org.apache.hadoop.hbase.master.procedure.TableProcedureInterface.TableOperationType
+getTableOperationType()
+
+
+
+
+
+
+Methods inherited from classorg.apache.hadoop.hbase.procedure2.ProcedureTestingUtility.NoopProcedure
+abort,
 deserializeStateData,
 rollback,
 serializeStateData
+
+
+
+
+
+Methods inherited from 
classorg.apache.hadoop.hbase.procedure2.Procedure
+acquireLock, addStackIndex, afterReplay, beforeReplay, compareTo, 
completionCleanup, doExecute, doRollback, elapsedTime, getChildrenLatch, 
getException, getLastUpdate, getNonceKey, getOwner, getParentProcId, 
getProcedureMetrics, getProcId, getProcIdHashCode, getProcName, getResult, 
getRootProcedureId, getRootProcId, getStackIndexes, getState, getSubmittedTime, 
getTimeout, getTimeoutTimestamp, hasChildren, hasException, hasLock, hasOwner, 
hasParent, hasTimeout, haveSameParent, holdLock, incChildrenLatch, isBypass, 
isFailed, isFinished, isInitializing, isRunnable, isSuccess, isWaiting, 
isYieldAfterExecutionStep, releaseLock, removeStackIndex, setAbortFailure, 
setChildrenLatch, setFailure, setFailure, setLastUpdate, setNonceKey, setOwner, 
setOwner, setParentProcId, setProcId, setResult, setRootProcId, 
setStackIndexes, 

[02/51] [partial] hbase-site git commit: Published site at c6a65ba63fce85ac7c4b62b96ef2bbe6c35d2f00.

2018-09-04 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/293abb17/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.RegionMonitor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.RegionMonitor.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.RegionMonitor.html
index 1b52048..ce887a2 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.RegionMonitor.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.RegionMonitor.html
@@ -594,1003 +594,1033 @@
 586  private boolean failOnError = true;
 587  private boolean regionServerMode = 
false;
 588  private boolean zookeeperMode = 
false;
-589  private boolean regionServerAllRegions 
= false;
-590  private boolean writeSniffing = 
false;
-591  private long 
configuredWriteTableTimeout = DEFAULT_TIMEOUT;
-592  private boolean treatFailureAsError = 
false;
-593  private TableName writeTableName = 
DEFAULT_WRITE_TABLE_NAME;
-594  private HashMapString, Long 
configuredReadTableTimeouts = new HashMap();
-595
-596  private ExecutorService executor; // 
threads to retrieve data from regionservers
-597
-598  public Canary() {
-599this(new 
ScheduledThreadPoolExecutor(1), new RegionServerStdOutSink());
-600  }
-601
-602  public Canary(ExecutorService executor, 
Sink sink) {
-603this.executor = executor;
-604this.sink = sink;
-605  }
-606
-607  @Override
-608  public Configuration getConf() {
-609return conf;
-610  }
-611
-612  @Override
-613  public void setConf(Configuration conf) 
{
-614this.conf = conf;
-615  }
-616
-617  private int parseArgs(String[] args) 
{
-618int index = -1;
-619// Process command line args
-620for (int i = 0; i  args.length; 
i++) {
-621  String cmd = args[i];
-622
-623  if (cmd.startsWith("-")) {
-624if (index = 0) {
-625  // command line args must be in 
the form: [opts] [table 1 [table 2 ...]]
-626  System.err.println("Invalid 
command line options");
-627  printUsageAndExit();
-628}
-629
-630if (cmd.equals("-help")) {
-631  // user asked for help, print 
the help and quit.
-632  printUsageAndExit();
-633} else if (cmd.equals("-daemon") 
 interval == 0) {
-634  // user asked for daemon mode, 
set a default interval between checks
-635  interval = DEFAULT_INTERVAL;
-636} else if 
(cmd.equals("-interval")) {
-637  // user has specified an 
interval for canary breaths (-interval N)
-638  i++;
-639
-640  if (i == args.length) {
-641System.err.println("-interval 
needs a numeric value argument.");
-642printUsageAndExit();
-643  }
-644
-645  try {
-646interval = 
Long.parseLong(args[i]) * 1000;
-647  } catch (NumberFormatException 
e) {
-648System.err.println("-interval 
needs a numeric value argument.");
-649printUsageAndExit();
-650  }
-651} else if 
(cmd.equals("-zookeeper")) {
-652  this.zookeeperMode = true;
-653} else 
if(cmd.equals("-regionserver")) {
-654  this.regionServerMode = true;
-655} else 
if(cmd.equals("-allRegions")) {
-656  this.regionServerAllRegions = 
true;
-657} else 
if(cmd.equals("-writeSniffing")) {
-658  this.writeSniffing = true;
-659} else 
if(cmd.equals("-treatFailureAsError")) {
-660  this.treatFailureAsError = 
true;
-661} else if (cmd.equals("-e")) {
-662  this.useRegExp = true;
-663} else if (cmd.equals("-t")) {
-664  i++;
-665
-666  if (i == args.length) {
-667System.err.println("-t needs 
a numeric value argument.");
-668printUsageAndExit();
-669  }
-670
-671  try {
-672this.timeout = 
Long.parseLong(args[i]);
-673  } catch (NumberFormatException 
e) {
-674System.err.println("-t needs 
a numeric value argument.");
-675printUsageAndExit();
-676  }
-677} else 
if(cmd.equals("-writeTableTimeout")) {
-678  i++;
-679
-680  if (i == args.length) {
-681
System.err.println("-writeTableTimeout needs a numeric value argument.");
-682printUsageAndExit();
-683  }
-684
-685  try {
-686
this.configuredWriteTableTimeout = Long.parseLong(args[i]);
-687  } catch (NumberFormatException 
e) {
-688
System.err.println("-writeTableTimeout needs a numeric value argument.");
-689printUsageAndExit();
-690  }
-691} else if 
(cmd.equals("-writeTable")) {
-692  i++;
-693
-694  if (i == args.length) {
-695
System.err.println("-writeTable needs a string value argument.");
-696printUsageAndExit();
-697  }
-698  this.writeTableName = 
TableName.valueOf(args[i]);
-699 

[02/51] [partial] hbase-site git commit: Published site at 7c1fad4992a169a35b4457e6f4afcb30d04406e9.

2018-08-31 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/74f60271/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
index a8cb7c4..8ec6dad 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
@@ -2831,5851 +2831,5852 @@
 2823status.setStatus(msg);
 2824
 2825if (rsServices != null  
rsServices.getMetrics() != null) {
-2826  
rsServices.getMetrics().updateFlush(time - startTime,
-2827  mss.getDataSize(), 
flushedOutputFileSize);
-2828}
-2829
-2830return new 
FlushResultImpl(compactionRequested ?
-2831
FlushResult.Result.FLUSHED_COMPACTION_NEEDED :
-2832  
FlushResult.Result.FLUSHED_NO_COMPACTION_NEEDED, flushOpSeqId);
-2833  }
-2834
-2835  /**
-2836   * Method to safely get the next 
sequence number.
-2837   * @return Next sequence number 
unassociated with any actual edit.
-2838   * @throws IOException
-2839   */
-2840  @VisibleForTesting
-2841  protected long getNextSequenceId(final 
WAL wal) throws IOException {
-2842WriteEntry we = mvcc.begin();
-2843mvcc.completeAndWait(we);
-2844return we.getWriteNumber();
-2845  }
-2846
-2847  
//
-2848  // get() methods for client use.
-2849  
//
-2850
-2851  @Override
-2852  public RegionScannerImpl 
getScanner(Scan scan) throws IOException {
-2853   return getScanner(scan, null);
-2854  }
-2855
-2856  @Override
-2857  public RegionScannerImpl 
getScanner(Scan scan, ListKeyValueScanner additionalScanners)
-2858  throws IOException {
-2859return getScanner(scan, 
additionalScanners, HConstants.NO_NONCE, HConstants.NO_NONCE);
-2860  }
-2861
-2862  private RegionScannerImpl 
getScanner(Scan scan, ListKeyValueScanner additionalScanners,
-2863  long nonceGroup, long nonce) 
throws IOException {
-2864
startRegionOperation(Operation.SCAN);
-2865try {
-2866  // Verify families are all valid
-2867  if (!scan.hasFamilies()) {
-2868// Adding all families to 
scanner
-2869for (byte[] family : 
this.htableDescriptor.getColumnFamilyNames()) {
-2870  scan.addFamily(family);
-2871}
-2872  } else {
-2873for (byte[] family : 
scan.getFamilyMap().keySet()) {
-2874  checkFamily(family);
-2875}
-2876  }
-2877  return 
instantiateRegionScanner(scan, additionalScanners, nonceGroup, nonce);
-2878} finally {
-2879  
closeRegionOperation(Operation.SCAN);
-2880}
-2881  }
-2882
-2883  protected RegionScanner 
instantiateRegionScanner(Scan scan,
-2884  ListKeyValueScanner 
additionalScanners) throws IOException {
-2885return 
instantiateRegionScanner(scan, additionalScanners, HConstants.NO_NONCE,
-2886  HConstants.NO_NONCE);
-2887  }
-2888
-2889  protected RegionScannerImpl 
instantiateRegionScanner(Scan scan,
-2890  ListKeyValueScanner 
additionalScanners, long nonceGroup, long nonce) throws IOException {
-2891if (scan.isReversed()) {
-2892  if (scan.getFilter() != null) {
-2893
scan.getFilter().setReversed(true);
-2894  }
-2895  return new 
ReversedRegionScannerImpl(scan, additionalScanners, this);
-2896}
-2897return new RegionScannerImpl(scan, 
additionalScanners, this, nonceGroup, nonce);
-2898  }
-2899
-2900  /**
-2901   * Prepare a delete for a row mutation 
processor
-2902   * @param delete The passed delete is 
modified by this method. WARNING!
-2903   * @throws IOException
-2904   */
-2905  public void prepareDelete(Delete 
delete) throws IOException {
-2906// Check to see if this is a 
deleteRow insert
-2907
if(delete.getFamilyCellMap().isEmpty()){
-2908  for(byte [] family : 
this.htableDescriptor.getColumnFamilyNames()){
-2909// Don't eat the timestamp
-2910delete.addFamily(family, 
delete.getTimestamp());
-2911  }
-2912} else {
-2913  for(byte [] family : 
delete.getFamilyCellMap().keySet()) {
-2914if(family == null) {
-2915  throw new 
NoSuchColumnFamilyException("Empty family is invalid");
-2916}
-2917checkFamily(family);
-2918  }
-2919}
-2920  }
-2921
-2922  @Override
-2923  public void delete(Delete delete) 
throws IOException {
-2924checkReadOnly();
-2925checkResources();
-2926
startRegionOperation(Operation.DELETE);
-2927try {
-2928  // All edits for the given row 
(across all column families) must happen atomically.
-2929  doBatchMutate(delete);
-2930} finally {
-2931  

[02/51] [partial] hbase-site git commit: Published site at 3afe9fb7e6ebfa71187cbe131558a83fae61cecd.

2018-08-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/424d7e41/testdevapidocs/org/apache/hadoop/hbase/master/AbstractTestDLS.html
--
diff --git a/testdevapidocs/org/apache/hadoop/hbase/master/AbstractTestDLS.html 
b/testdevapidocs/org/apache/hadoop/hbase/master/AbstractTestDLS.html
index f61f909..5980818 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/master/AbstractTestDLS.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/master/AbstractTestDLS.html
@@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public abstract class AbstractTestDLS
+public abstract class AbstractTestDLS
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 Base class for testing distributed log splitting.
 
@@ -351,7 +351,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 LOG
-private static finalorg.slf4j.Logger LOG
+private static finalorg.slf4j.Logger LOG
 
 
 
@@ -360,7 +360,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 TEST_UTIL
-private static finalHBaseTestingUtility TEST_UTIL
+private static finalHBaseTestingUtility TEST_UTIL
 
 
 
@@ -369,7 +369,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 NUM_MASTERS
-private static finalint NUM_MASTERS
+private static finalint NUM_MASTERS
 
 See Also:
 Constant
 Field Values
@@ -382,7 +382,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 NUM_RS
-private static finalint NUM_RS
+private static finalint NUM_RS
 
 See Also:
 Constant
 Field Values
@@ -395,7 +395,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 COLUMN_FAMILY
-private staticbyte[] COLUMN_FAMILY
+private staticbyte[] COLUMN_FAMILY
 
 
 
@@ -404,7 +404,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 testName
-publicorg.junit.rules.TestName testName
+publicorg.junit.rules.TestName testName
 
 
 
@@ -413,7 +413,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 tableName
-privateorg.apache.hadoop.hbase.TableName tableName
+privateorg.apache.hadoop.hbase.TableName tableName
 
 
 
@@ -422,7 +422,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 cluster
-privateMiniHBaseCluster cluster
+privateMiniHBaseCluster cluster
 
 
 
@@ -431,7 +431,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 master
-privateorg.apache.hadoop.hbase.master.HMaster master
+privateorg.apache.hadoop.hbase.master.HMaster master
 
 
 
@@ -440,7 +440,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 conf
-privateorg.apache.hadoop.conf.Configuration conf
+privateorg.apache.hadoop.conf.Configuration conf
 
 
 
@@ -449,7 +449,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 name
-publicorg.junit.rules.TestName name
+publicorg.junit.rules.TestName name
 
 
 
@@ -466,7 +466,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 AbstractTestDLS
-publicAbstractTestDLS()
+publicAbstractTestDLS()
 
 
 
@@ -483,7 +483,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 setup
-public staticvoidsetup()
+public staticvoidsetup()
   throws https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">Exception
 
 Throws:
@@ -497,7 +497,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 tearDown
-public staticvoidtearDown()
+public staticvoidtearDown()
  throws https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">Exception
 
 Throws:
@@ -511,7 +511,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 getWalProvider
-protected abstracthttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringgetWalProvider()
+protected abstracthttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringgetWalProvider()
 
 
 
@@ -520,7 +520,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 startCluster
-privatevoidstartCluster(intnumRS)
+privatevoidstartCluster(intnumRS)
throws https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">Exception
 
 Throws:
@@ -534,7 +534,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 before
-publicvoidbefore()
+publicvoidbefore()
 throws https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in 

[02/51] [partial] hbase-site git commit: Published site at a452487a9b82bfd33bc10683c3f8b8ae74d58883.

2018-08-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0cf79db0/devapidocs/src-html/org/apache/hadoop/hbase/filter/CompareFilter.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/filter/CompareFilter.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/filter/CompareFilter.html
index ad7c82a..1dfa7b8 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/filter/CompareFilter.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/filter/CompareFilter.html
@@ -29,307 +29,322 @@
 021
 022import java.io.IOException;
 023import java.util.ArrayList;
-024
-025import org.apache.hadoop.hbase.Cell;
-026import 
org.apache.hadoop.hbase.CompareOperator;
-027import 
org.apache.hadoop.hbase.PrivateCellUtil;
-028import 
org.apache.hadoop.hbase.util.Bytes;
-029import 
org.apache.yetus.audience.InterfaceAudience;
-030
-031import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-032
-033import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-034import 
org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos;
-035import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
-036import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.CompareType;
-037
-038/**
-039 * This is a generic filter to be used to 
filter by comparison.  It takes an
-040 * operator (equal, greater, not equal, 
etc) and a byte [] comparator.
-041 * p
-042 * To filter by row key, use {@link 
RowFilter}.
-043 * p
-044 * To filter by column family, use {@link 
FamilyFilter}.
-045 * p
-046 * To filter by column qualifier, use 
{@link QualifierFilter}.
-047 * p
-048 * To filter by value, use {@link 
ValueFilter}.
-049 * p
-050 * These filters can be wrapped with 
{@link SkipFilter} and {@link WhileMatchFilter}
-051 * to add more control.
-052 * p
-053 * Multiple filters can be combined using 
{@link FilterList}.
-054 */
-055@InterfaceAudience.Public
-056public abstract class CompareFilter 
extends FilterBase {
-057  /**
-058   * Comparison operators. For filters 
only!
-059   * Use {@link CompareOperator} 
otherwise.
-060   * It (intentionally) has at least the 
below enums with same names.
-061   * @deprecated  since 2.0.0. Will be 
removed in 3.0.0. Use {@link CompareOperator} instead.
-062   */
-063  @Deprecated
-064  @InterfaceAudience.Public
-065  public enum CompareOp {
-066/** less than */
-067LESS,
-068/** less than or equal to */
-069LESS_OR_EQUAL,
-070/** equals */
-071EQUAL,
-072/** not equal */
-073NOT_EQUAL,
-074/** greater than or equal to */
-075GREATER_OR_EQUAL,
-076/** greater than */
-077GREATER,
-078/** no operation */
-079NO_OP,
-080  }
-081
-082  protected CompareOperator op;
-083  protected ByteArrayComparable 
comparator;
-084
-085  /**
-086   * Constructor.
-087   * @param compareOp the compare op for 
row matching
-088   * @param comparator the comparator for 
row matching
-089   * @deprecated Since 2.0.0. Will be 
removed in 3.0.0. Use other constructor.
-090   */
-091  @Deprecated
-092  public CompareFilter(final CompareOp 
compareOp,
-093  final ByteArrayComparable 
comparator) {
-094
this(CompareOperator.valueOf(compareOp.name()), comparator);
-095  }
-096
-097  /**
-098   * Constructor.
-099   * @param op the compare op for row 
matching
-100   * @param comparator the comparator for 
row matching
-101   */
-102  public CompareFilter(final 
CompareOperator op,
-103   final 
ByteArrayComparable comparator) {
-104this.op = op;
-105this.comparator = comparator;
-106  }
-107
-108  /**
-109   * @return operator
-110   * @deprecated  since 2.0.0. Will be 
removed in 3.0.0. Use {@link #getCompareOperator()} instead.
-111   */
-112  @Deprecated
-113  public CompareOp getOperator() {
-114return 
CompareOp.valueOf(op.name());
-115  }
-116
-117  public CompareOperator 
getCompareOperator() {
-118return op;
-119  }
-120
-121  /**
-122   * @return the comparator
-123   */
-124  public ByteArrayComparable 
getComparator() {
-125return comparator;
-126  }
-127
-128  @Override
-129  public boolean filterRowKey(Cell cell) 
throws IOException {
-130// Impl in FilterBase might do 
unnecessary copy for Off heap backed Cells.
-131return false;
-132  }
-133
-134  /**
-135   * @deprecated Since 2.0.0. Will be 
removed in 3.0.0.
-136   * Use {@link 
#compareRow(CompareOperator, ByteArrayComparable, Cell)}
-137   */
-138  @Deprecated
-139  protected boolean compareRow(final 
CompareOp compareOp, final ByteArrayComparable comparator,
-140  final Cell cell) {
-141if (compareOp == CompareOp.NO_OP) {
-142  return true;
-143}
-144int compareResult = 
PrivateCellUtil.compareRow(cell, comparator);
-145return compare(compareOp, 
compareResult);
-146  }
-147
-148  protected boolean compareRow(final 
CompareOperator op, final ByteArrayComparable comparator,
-149   

[02/51] [partial] hbase-site git commit: Published site at 63f2d3cbdc8151f5f61f33e0a078c51b9ac076a5.

2018-08-21 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7ae6a80c/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.TableDescriptorGetter.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.TableDescriptorGetter.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.TableDescriptorGetter.html
index 81f5178..7df71bd 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.TableDescriptorGetter.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.TableDescriptorGetter.html
@@ -108,3669 +108,3727 @@
 100import 
org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
 101import 
org.apache.hadoop.hbase.log.HBaseMarkers;
 102import 
org.apache.hadoop.hbase.master.MasterRpcServices.BalanceSwitchMode;
-103import 
org.apache.hadoop.hbase.master.assignment.AssignmentManager;
-104import 
org.apache.hadoop.hbase.master.assignment.MergeTableRegionsProcedure;
-105import 
org.apache.hadoop.hbase.master.assignment.RegionStates;
-106import 
org.apache.hadoop.hbase.master.assignment.RegionStates.RegionStateNode;
-107import 
org.apache.hadoop.hbase.master.balancer.BalancerChore;
-108import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer;
-109import 
org.apache.hadoop.hbase.master.balancer.ClusterStatusChore;
-110import 
org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory;
-111import 
org.apache.hadoop.hbase.master.cleaner.CleanerChore;
-112import 
org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
-113import 
org.apache.hadoop.hbase.master.cleaner.LogCleaner;
-114import 
org.apache.hadoop.hbase.master.cleaner.ReplicationBarrierCleaner;
-115import 
org.apache.hadoop.hbase.master.locking.LockManager;
-116import 
org.apache.hadoop.hbase.master.normalizer.NormalizationPlan;
-117import 
org.apache.hadoop.hbase.master.normalizer.NormalizationPlan.PlanType;
-118import 
org.apache.hadoop.hbase.master.normalizer.RegionNormalizer;
-119import 
org.apache.hadoop.hbase.master.normalizer.RegionNormalizerChore;
-120import 
org.apache.hadoop.hbase.master.normalizer.RegionNormalizerFactory;
-121import 
org.apache.hadoop.hbase.master.procedure.CreateTableProcedure;
-122import 
org.apache.hadoop.hbase.master.procedure.DeleteNamespaceProcedure;
-123import 
org.apache.hadoop.hbase.master.procedure.DeleteTableProcedure;
-124import 
org.apache.hadoop.hbase.master.procedure.DisableTableProcedure;
-125import 
org.apache.hadoop.hbase.master.procedure.EnableTableProcedure;
-126import 
org.apache.hadoop.hbase.master.procedure.InitMetaProcedure;
-127import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants;
-128import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
-129import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureScheduler;
-130import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil;
-131import 
org.apache.hadoop.hbase.master.procedure.ModifyTableProcedure;
-132import 
org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch;
-133import 
org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure;
-134import 
org.apache.hadoop.hbase.master.procedure.TruncateTableProcedure;
-135import 
org.apache.hadoop.hbase.master.replication.AbstractPeerProcedure;
-136import 
org.apache.hadoop.hbase.master.replication.AddPeerProcedure;
-137import 
org.apache.hadoop.hbase.master.replication.DisablePeerProcedure;
-138import 
org.apache.hadoop.hbase.master.replication.EnablePeerProcedure;
-139import 
org.apache.hadoop.hbase.master.replication.RemovePeerProcedure;
-140import 
org.apache.hadoop.hbase.master.replication.ReplicationPeerManager;
-141import 
org.apache.hadoop.hbase.master.replication.SyncReplicationReplayWALManager;
-142import 
org.apache.hadoop.hbase.master.replication.TransitPeerSyncReplicationStateProcedure;
-143import 
org.apache.hadoop.hbase.master.replication.UpdatePeerConfigProcedure;
-144import 
org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
-145import 
org.apache.hadoop.hbase.master.zksyncer.MasterAddressSyncer;
-146import 
org.apache.hadoop.hbase.master.zksyncer.MetaLocationSyncer;
-147import 
org.apache.hadoop.hbase.mob.MobConstants;
-148import 
org.apache.hadoop.hbase.monitoring.MemoryBoundedLogMessageBuffer;
-149import 
org.apache.hadoop.hbase.monitoring.MonitoredTask;
-150import 
org.apache.hadoop.hbase.monitoring.TaskMonitor;
-151import 
org.apache.hadoop.hbase.procedure.MasterProcedureManagerHost;
-152import 
org.apache.hadoop.hbase.procedure.flush.MasterFlushTableProcedureManager;
-153import 
org.apache.hadoop.hbase.procedure2.LockedResource;
-154import 
org.apache.hadoop.hbase.procedure2.Procedure;
-155import 
org.apache.hadoop.hbase.procedure2.ProcedureEvent;
-156import 
org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
-157import 
org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher.RemoteProcedure;
-158import 
org.apache.hadoop.hbase.procedure2.RemoteProcedureException;

[02/51] [partial] hbase-site git commit: Published site at 092efb42749bf7fc6ad338c96aae8e7b9d3a2c74.

2018-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/testdevapidocs/src-html/org/apache/hadoop/hbase/wal/TestWALSplit.Corruptions.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/wal/TestWALSplit.Corruptions.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/wal/TestWALSplit.Corruptions.html
index ee05a1d..06f2ffa 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/wal/TestWALSplit.Corruptions.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/wal/TestWALSplit.Corruptions.html
@@ -138,1307 +138,1310 @@
 130  private Path OLDLOGDIR;
 131  private Path CORRUPTDIR;
 132  private Path TABLEDIR;
-133
-134  private static final int NUM_WRITERS = 
10;
-135  private static final int ENTRIES = 10; 
// entries per writer per region
-136
-137  private static final String 
FILENAME_BEING_SPLIT = "testfile";
-138  private static final TableName 
TABLE_NAME =
-139  TableName.valueOf("t1");
-140  private static final byte[] FAMILY = 
Bytes.toBytes("f1");
-141  private static final byte[] QUALIFIER = 
Bytes.toBytes("q1");
-142  private static final byte[] VALUE = 
Bytes.toBytes("v1");
-143  private static final String 
WAL_FILE_PREFIX = "wal.dat.";
-144  private static ListString 
REGIONS = new ArrayList();
-145  private static final String 
HBASE_SKIP_ERRORS = "hbase.hlog.split.skip.errors";
-146  private static String ROBBER;
-147  private static String ZOMBIE;
-148  private static String [] GROUP = new 
String [] {"supergroup"};
-149
-150  static enum Corruptions {
-151INSERT_GARBAGE_ON_FIRST_LINE,
-152INSERT_GARBAGE_IN_THE_MIDDLE,
-153APPEND_GARBAGE,
-154TRUNCATE,
-155TRUNCATE_TRAILER
-156  }
-157
-158  @BeforeClass
-159  public static void setUpBeforeClass() 
throws Exception {
-160conf = 
TEST_UTIL.getConfiguration();
-161
conf.setClass("hbase.regionserver.hlog.writer.impl",
-162InstrumentedLogWriter.class, 
Writer.class);
-163// This is how you turn off 
shortcircuit read currently.  TODO: Fix.  Should read config.
-164
System.setProperty("hbase.tests.use.shortcircuit.reads", "false");
-165// Create fake maping user to group 
and set it to the conf.
-166MapString, String [] u2g_map 
= new HashMap(2);
-167ROBBER = User.getCurrent().getName() 
+ "-robber";
-168ZOMBIE = User.getCurrent().getName() 
+ "-zombie";
-169u2g_map.put(ROBBER, GROUP);
-170u2g_map.put(ZOMBIE, GROUP);
-171
DFSTestUtil.updateConfWithFakeGroupMapping(conf, u2g_map);
-172conf.setInt("dfs.heartbeat.interval", 
1);
-173TEST_UTIL.startMiniDFSCluster(2);
-174  }
-175
-176  @AfterClass
-177  public static void tearDownAfterClass() 
throws Exception {
-178TEST_UTIL.shutdownMiniDFSCluster();
-179  }
-180
-181  @Rule
-182  public TestName name = new 
TestName();
-183  private WALFactory wals = null;
-184
-185  @Before
-186  public void setUp() throws Exception 
{
-187LOG.info("Cleaning up cluster for new 
test.");
-188fs = 
TEST_UTIL.getDFSCluster().getFileSystem();
-189HBASEDIR = 
TEST_UTIL.createRootDir();
-190HBASELOGDIR = 
TEST_UTIL.createWALRootDir();
-191OLDLOGDIR = new Path(HBASELOGDIR, 
HConstants.HREGION_OLDLOGDIR_NAME);
-192CORRUPTDIR = new Path(HBASELOGDIR, 
HConstants.CORRUPT_DIR_NAME);
-193TABLEDIR = 
FSUtils.getTableDir(HBASEDIR, TABLE_NAME);
-194REGIONS.clear();
-195Collections.addAll(REGIONS, "bbb", 
"ccc");
-196InstrumentedLogWriter.activateFailure 
= false;
-197wals = new WALFactory(conf, 
name.getMethodName());
-198WALDIR = new Path(HBASELOGDIR,
-199
AbstractFSWALProvider.getWALDirectoryName(ServerName.valueOf(name.getMethodName(),
-20016010, 
System.currentTimeMillis()).toString()));
-201//fs.mkdirs(WALDIR);
-202  }
-203
-204  @After
-205  public void tearDown() throws Exception 
{
-206try {
-207  wals.close();
-208} catch(IOException exception) {
-209  // Some tests will move WALs out 
from under us. In those cases, we'll get an error on close.
-210  LOG.info("Ignoring an error while 
closing down our WALFactory. Fine for some tests, but if" +
-211  " you see a failure look 
here.");
-212  LOG.debug("exception details", 
exception);
-213} finally {
-214  wals = null;
-215  fs.delete(HBASEDIR, true);
-216  fs.delete(HBASELOGDIR, true);
-217}
-218  }
-219
-220  /**
-221   * Simulates splitting a WAL out from 
under a regionserver that is still trying to write it.
-222   * Ensures we do not lose edits.
-223   * @throws IOException
-224   * @throws InterruptedException
-225   */
-226  @Test
-227  public void 
testLogCannotBeWrittenOnceParsed() throws IOException, InterruptedException {
-228final AtomicLong counter = new 
AtomicLong(0);
-229AtomicBoolean stop = new 
AtomicBoolean(false);
-230// Region we'll write edits too and 
then later examine to make sure they all made it in.
-231final 

[02/51] [partial] hbase-site git commit: Published site at 613d831429960348dc42c3bdb6ea5d31be15c81c.

2018-08-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7cf6034b/testdevapidocs/org/apache/hadoop/hbase/HBaseClusterManager.RemoteShell.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/HBaseClusterManager.RemoteShell.html 
b/testdevapidocs/org/apache/hadoop/hbase/HBaseClusterManager.RemoteShell.html
index 1e10092..415dcdd 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/HBaseClusterManager.RemoteShell.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/HBaseClusterManager.RemoteShell.html
@@ -127,7 +127,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-protected class HBaseClusterManager.RemoteShell
+protected class HBaseClusterManager.RemoteShell
 extends org.apache.hadoop.util.Shell.ShellCommandExecutor
 Executes commands over SSH
 
@@ -291,7 +291,7 @@ extends 
org.apache.hadoop.util.Shell.ShellCommandExecutor
 
 
 hostname
-privatehttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String hostname
+privatehttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String hostname
 
 
 
@@ -300,7 +300,7 @@ extends 
org.apache.hadoop.util.Shell.ShellCommandExecutor
 
 
 user
-privatehttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String user
+privatehttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String user
 
 
 
@@ -317,7 +317,7 @@ extends 
org.apache.hadoop.util.Shell.ShellCommandExecutor
 
 
 RemoteShell
-publicRemoteShell(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringhostname,
+publicRemoteShell(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringhostname,
https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String[]execString,
https://docs.oracle.com/javase/8/docs/api/java/io/File.html?is-external=true;
 title="class or interface in java.io">Filedir,
https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringenv,
@@ -330,7 +330,7 @@ extends 
org.apache.hadoop.util.Shell.ShellCommandExecutor
 
 
 RemoteShell
-publicRemoteShell(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringhostname,
+publicRemoteShell(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringhostname,
https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String[]execString,
https://docs.oracle.com/javase/8/docs/api/java/io/File.html?is-external=true;
 title="class or interface in java.io">Filedir,
https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringenv)
@@ -342,7 +342,7 @@ extends 
org.apache.hadoop.util.Shell.ShellCommandExecutor
 
 
 RemoteShell
-publicRemoteShell(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringhostname,
+publicRemoteShell(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringhostname,
https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String[]execString,
https://docs.oracle.com/javase/8/docs/api/java/io/File.html?is-external=true;
 title="class or interface in java.io">Filedir)
 
@@ -353,7 +353,7 @@ extends 
org.apache.hadoop.util.Shell.ShellCommandExecutor
 
 
 RemoteShell
-publicRemoteShell(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringhostname,
+publicRemoteShell(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringhostname,
  

[02/51] [partial] hbase-site git commit: Published site at ba5d1c1f28301adc99019d9d6c4a04fac98ae511.

2018-07-25 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/804782f0/devapidocs/src-html/org/apache/hadoop/hbase/master/locking/LockProcedure.LockInterface.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/locking/LockProcedure.LockInterface.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/locking/LockProcedure.LockInterface.html
index aba85f1..a2daee8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/locking/LockProcedure.LockInterface.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/locking/LockProcedure.LockInterface.html
@@ -84,420 +84,411 @@
 076  private String description;
 077  // True when recovery of master lock 
from WALs
 078  private boolean recoveredMasterLock;
-079  // this is for internal working
-080  private boolean hasLock;
-081
-082  private final 
ProcedureEventLockProcedure event = new ProcedureEvent(this);
-083  // True if this proc acquired relevant 
locks. This value is for client checks.
-084  private final AtomicBoolean locked = 
new AtomicBoolean(false);
-085  // Last system time (in ms) when client 
sent the heartbeat.
-086  // Initialize to system time for 
non-null value in case of recovery.
-087  private final AtomicLong lastHeartBeat 
= new AtomicLong();
-088  // Set to true when unlock request is 
received.
-089  private final AtomicBoolean unlock = 
new AtomicBoolean(false);
-090  // decreased when locks are acquired. 
Only used for local (with master process) purposes.
-091  // Setting latch to non-null value 
increases default timeout to
-092  // 
DEFAULT_LOCAL_MASTER_LOCKS_TIMEOUT_MS (10 min) so that there is no need to 
heartbeat.
-093  private final CountDownLatch 
lockAcquireLatch;
-094
-095  @Override
-096  public TableName getTableName() {
-097return tableName;
-098  }
-099
-100  @Override
-101  public TableOperationType 
getTableOperationType() {
-102return opType;
-103  }
-104
-105  private interface LockInterface {
-106boolean 
acquireLock(MasterProcedureEnv env);
-107void releaseLock(MasterProcedureEnv 
env);
-108  }
-109
-110  public LockProcedure() {
-111lockAcquireLatch = null;
-112  }
-113
-114  private LockProcedure(final 
Configuration conf, final LockType type,
-115  final String description, final 
CountDownLatch lockAcquireLatch) {
-116this.type = type;
-117this.description = description;
-118this.lockAcquireLatch = 
lockAcquireLatch;
-119if (lockAcquireLatch == null) {
-120  
setTimeout(conf.getInt(REMOTE_LOCKS_TIMEOUT_MS_CONF, 
DEFAULT_REMOTE_LOCKS_TIMEOUT_MS));
-121} else {
-122  
setTimeout(conf.getInt(LOCAL_MASTER_LOCKS_TIMEOUT_MS_CONF,
-123  
DEFAULT_LOCAL_MASTER_LOCKS_TIMEOUT_MS));
-124}
-125  }
-126
-127  /**
-128   * Constructor for namespace lock.
-129   * @param lockAcquireLatch if not null, 
the latch is decreased when lock is acquired.
-130   */
-131  public LockProcedure(final 
Configuration conf, final String namespace, final LockType type,
-132  final String description, final 
CountDownLatch lockAcquireLatch)
-133  throws IllegalArgumentException {
-134this(conf, type, description, 
lockAcquireLatch);
-135
-136if (namespace.isEmpty()) {
-137  throw new 
IllegalArgumentException("Empty namespace");
-138}
-139
-140this.namespace = namespace;
-141this.lock = setupNamespaceLock();
-142  }
-143
-144  /**
-145   * Constructor for table lock.
-146   * @param lockAcquireLatch if not null, 
the latch is decreased when lock is acquired.
-147   */
-148  public LockProcedure(final 
Configuration conf, final TableName tableName, final LockType type,
-149  final String description, final 
CountDownLatch lockAcquireLatch)
-150  throws IllegalArgumentException {
-151this(conf, type, description, 
lockAcquireLatch);
-152
-153this.tableName = tableName;
-154this.lock = setupTableLock();
-155  }
-156
-157  /**
-158   * Constructor for region lock(s).
-159   * @param lockAcquireLatch if not null, 
the latch is decreased when lock is acquired.
-160   *Useful for 
locks acquired locally from master process.
-161   * @throws IllegalArgumentException if 
all regions are not from same table.
-162   */
-163  public LockProcedure(final 
Configuration conf, final RegionInfo[] regionInfos,
-164  final LockType type, final String 
description, final CountDownLatch lockAcquireLatch)
-165  throws IllegalArgumentException {
-166this(conf, type, description, 
lockAcquireLatch);
-167
-168// Build RegionInfo from region 
names.
-169if (regionInfos.length == 0) {
-170  throw new 
IllegalArgumentException("No regions specified for region lock");
-171}
-172
-173// check all regions belong to same 
table.
-174final TableName regionTable = 
regionInfos[0].getTable();
-175for (int i = 1; i  
regionInfos.length; ++i) {
-176  if 

[02/51] [partial] hbase-site git commit: Published site at b4759ce6e72f50ccd9d410bd5917dc5a515414f1.

2018-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.RegionReplicaReplayCallable.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.RegionReplicaReplayCallable.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.RegionReplicaReplayCallable.html
index 3d7093a..9917ee8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.RegionReplicaReplayCallable.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.RegionReplicaReplayCallable.html
@@ -39,594 +39,612 @@
 031import 
java.util.concurrent.ThreadPoolExecutor;
 032import java.util.concurrent.TimeUnit;
 033import 
java.util.concurrent.atomic.AtomicLong;
-034
-035import 
org.apache.hadoop.conf.Configuration;
-036import org.apache.hadoop.fs.Path;
-037import 
org.apache.hadoop.hbase.CellScanner;
-038import 
org.apache.hadoop.hbase.CellUtil;
-039import 
org.apache.hadoop.hbase.HBaseConfiguration;
-040import 
org.apache.hadoop.hbase.HBaseIOException;
-041import 
org.apache.hadoop.hbase.HConstants;
-042import 
org.apache.hadoop.hbase.HRegionLocation;
-043import 
org.apache.hadoop.hbase.RegionLocations;
-044import 
org.apache.hadoop.hbase.TableDescriptors;
-045import 
org.apache.hadoop.hbase.TableName;
-046import 
org.apache.hadoop.hbase.TableNotFoundException;
-047import 
org.apache.hadoop.hbase.client.ClusterConnection;
-048import 
org.apache.hadoop.hbase.client.ConnectionFactory;
-049import 
org.apache.hadoop.hbase.client.RegionAdminServiceCallable;
-050import 
org.apache.hadoop.hbase.client.RegionInfo;
-051import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
-052import 
org.apache.hadoop.hbase.client.RetryingCallable;
-053import 
org.apache.hadoop.hbase.client.RpcRetryingCallerFactory;
-054import 
org.apache.hadoop.hbase.client.TableDescriptor;
-055import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-056import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-057import 
org.apache.hadoop.hbase.protobuf.ReplicationProtbufUtil;
-058import 
org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint;
-059import 
org.apache.hadoop.hbase.replication.WALEntryFilter;
-060import 
org.apache.hadoop.hbase.util.Bytes;
-061import 
org.apache.hadoop.hbase.util.Pair;
-062import 
org.apache.hadoop.hbase.util.Threads;
-063import 
org.apache.hadoop.hbase.wal.WAL.Entry;
-064import 
org.apache.hadoop.hbase.wal.WALSplitter.EntryBuffers;
-065import 
org.apache.hadoop.hbase.wal.WALSplitter.OutputSink;
-066import 
org.apache.hadoop.hbase.wal.WALSplitter.PipelineController;
-067import 
org.apache.hadoop.hbase.wal.WALSplitter.RegionEntryBuffer;
-068import 
org.apache.hadoop.hbase.wal.WALSplitter.SinkWriter;
-069import 
org.apache.hadoop.util.StringUtils;
-070import 
org.apache.yetus.audience.InterfaceAudience;
-071import org.slf4j.Logger;
-072import org.slf4j.LoggerFactory;
+034import 
org.apache.hadoop.conf.Configuration;
+035import org.apache.hadoop.fs.Path;
+036import 
org.apache.hadoop.hbase.CellScanner;
+037import 
org.apache.hadoop.hbase.CellUtil;
+038import 
org.apache.hadoop.hbase.HBaseConfiguration;
+039import 
org.apache.hadoop.hbase.HBaseIOException;
+040import 
org.apache.hadoop.hbase.HConstants;
+041import 
org.apache.hadoop.hbase.HRegionLocation;
+042import 
org.apache.hadoop.hbase.RegionLocations;
+043import 
org.apache.hadoop.hbase.TableDescriptors;
+044import 
org.apache.hadoop.hbase.TableName;
+045import 
org.apache.hadoop.hbase.TableNotFoundException;
+046import 
org.apache.hadoop.hbase.client.ClusterConnection;
+047import 
org.apache.hadoop.hbase.client.ConnectionFactory;
+048import 
org.apache.hadoop.hbase.client.RegionAdminServiceCallable;
+049import 
org.apache.hadoop.hbase.client.RegionInfo;
+050import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
+051import 
org.apache.hadoop.hbase.client.RetryingCallable;
+052import 
org.apache.hadoop.hbase.client.RpcRetryingCallerFactory;
+053import 
org.apache.hadoop.hbase.client.TableDescriptor;
+054import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
+055import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
+056import 
org.apache.hadoop.hbase.protobuf.ReplicationProtbufUtil;
+057import 
org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint;
+058import 
org.apache.hadoop.hbase.replication.WALEntryFilter;
+059import 
org.apache.hadoop.hbase.util.Bytes;
+060import 
org.apache.hadoop.hbase.util.Pair;
+061import 
org.apache.hadoop.hbase.util.Threads;
+062import 
org.apache.hadoop.hbase.wal.WAL.Entry;
+063import 
org.apache.hadoop.hbase.wal.WALSplitter.EntryBuffers;
+064import 
org.apache.hadoop.hbase.wal.WALSplitter.OutputSink;
+065import 

[02/51] [partial] hbase-site git commit: Published site at e66a6603e36ecd67237ca16acd5e2de03f0d372d.

2018-07-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0c6f447e/apidocs/org/apache/hadoop/hbase/class-use/HTableDescriptor.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/class-use/HTableDescriptor.html 
b/apidocs/org/apache/hadoop/hbase/class-use/HTableDescriptor.html
index cd41ac7..982d1d9 100644
--- a/apidocs/org/apache/hadoop/hbase/class-use/HTableDescriptor.html
+++ b/apidocs/org/apache/hadoop/hbase/class-use/HTableDescriptor.html
@@ -1,10 +1,10 @@
 http://www.w3.org/TR/html4/loose.dtd;>
 
-
+
 
 
 
-类 org.apache.hadoop.hbase.HTableDescriptor的使用 (Apache HBase 
3.0.0-SNAPSHOT API)
+Uses of Class org.apache.hadoop.hbase.HTableDescriptor (Apache HBase 
3.0.0-SNAPSHOT API)
 
 
 
@@ -12,7 +12,7 @@
 
 
 
-您的浏览器已禁用 JavaScript。
+JavaScript is disabled on your browser.
 
 
 
 
 
-跳过导航链接
+Skip navigation links
 
 
 
-
-概览
-程序包
-ç±»
-使用
-树
-已过时
-索引
-帮助
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
 
 
 
 
-上一个
-下一个
+Prev
+Next
 
 
-框架
-无框架
+Frames
+NoFrames
 
 
-所有类
+AllClasses
 
 
 

[02/51] [partial] hbase-site git commit: Published site at e66a6603e36ecd67237ca16acd5e2de03f0d372d.

2018-07-19 Thread zhangduo
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5427a45e/apidocs/org/apache/hadoop/hbase/class-use/HRegionInfo.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/class-use/HRegionInfo.html 
b/apidocs/org/apache/hadoop/hbase/class-use/HRegionInfo.html
index 1957877..8ea63cd 100644
--- a/apidocs/org/apache/hadoop/hbase/class-use/HRegionInfo.html
+++ b/apidocs/org/apache/hadoop/hbase/class-use/HRegionInfo.html
@@ -1,10 +1,10 @@
 http://www.w3.org/TR/html4/loose.dtd;>
 
-
+
 
 
 
-Uses of Class org.apache.hadoop.hbase.HRegionInfo (Apache HBase 
3.0.0-SNAPSHOT API)
+类 org.apache.hadoop.hbase.HRegionInfo的使用 (Apache HBase 
3.0.0-SNAPSHOT API)
 
 
 
@@ -12,7 +12,7 @@
 
 
 
-JavaScript is disabled on your browser.
+您的浏览器已禁用 JavaScript。
 
 
 
 
 
-Skip navigation links
+跳过导航链接
 
 
 
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
+
+概览
+程序包
+ç±»
+使用
+树
+已过时
+索引
+帮助
 
 
 
 
-Prev
-Next
+上一个
+下一个
 
 
-Frames
-NoFrames
+框架
+无框架
 
 
-AllClasses
+所有类
 
 
 
 
-

Uses of Class
org.apache.hadoop.hbase.HRegionInfo

+

类的使用
org.apache.hadoop.hbase.HRegionInfo

  • - - +
    Packages that use HRegionInfo 
    + - - + + @@ -89,7 +89,17 @@ @@ -100,34 +110,34 @@
  • -

    Uses of HRegionInfo in org.apache.hadoop.hbase

    -
  • 使用HRegionInfo的程序包  
    PackageDescription程序包说明
    org.apache.hadoop.hbase.client -
    Provides HBase Client
    +
    Provides HBase Client + +Table of Contents + + Overview +Example API Usage + + + Overview + To administer HBase, create and drop tables, list and alter tables, + use Admin.
    - +

    org.apache.hadoop.hbase中HRegionInfo的使用

    +
    Fields in org.apache.hadoop.hbase declared as HRegionInfo 
    +
    声明为

    [02/51] [partial] hbase-site git commit: Published site at 0f23784182ab88649de340d75804e0ff20dcd0fc.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bcb555af/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/LruBlockCache.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/LruBlockCache.html 
    b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/LruBlockCache.html
    index 67f4551..017124c 100644
    --- a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/LruBlockCache.html
    +++ b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/LruBlockCache.html
    @@ -387,817 +387,804 @@
     379}
     380
     381LruCachedBlock cb = 
    map.get(cacheKey);
    -382if (cb != null) {
    -383  int comparison = 
    BlockCacheUtil.validateBlockAddition(cb.getBuffer(), buf, cacheKey);
    -384  if (comparison != 0) {
    -385if (comparison  0) {
    -386  LOG.warn("Cached block contents 
    differ by nextBlockOnDiskSize. Keeping cached block.");
    -387  return;
    -388} else {
    -389  LOG.warn("Cached block contents 
    differ by nextBlockOnDiskSize. Caching new block.");
    -390}
    -391  } else {
    -392String msg = "Cached an already 
    cached block: " + cacheKey + " cb:" + cb.getCacheKey();
    -393msg += ". This is harmless and 
    can happen in rare cases (see HBASE-8547)";
    -394LOG.debug(msg);
    -395return;
    -396  }
    -397}
    -398long currentSize = size.get();
    -399long currentAcceptableSize = 
    acceptableSize();
    -400long hardLimitSize = (long) 
    (hardCapacityLimitFactor * currentAcceptableSize);
    -401if (currentSize = hardLimitSize) 
    {
    -402  stats.failInsert();
    -403  if (LOG.isTraceEnabled()) {
    -404LOG.trace("LruBlockCache current 
    size " + StringUtils.byteDesc(currentSize)
    -405  + " has exceeded acceptable 
    size " + StringUtils.byteDesc(currentAcceptableSize) + "."
    -406  + " The hard limit size is " + 
    StringUtils.byteDesc(hardLimitSize)
    -407  + ", failed to put cacheKey:" + 
    cacheKey + " into LruBlockCache.");
    -408  }
    -409  if (!evictionInProgress) {
    -410runEviction();
    -411  }
    -412  return;
    -413}
    -414cb = new LruCachedBlock(cacheKey, 
    buf, count.incrementAndGet(), inMemory);
    -415long newSize = updateSizeMetrics(cb, 
    false);
    -416map.put(cacheKey, cb);
    -417long val = 
    elements.incrementAndGet();
    -418if (buf.getBlockType().isData()) {
    -419   dataBlockElements.increment();
    -420}
    -421if (LOG.isTraceEnabled()) {
    -422  long size = map.size();
    -423  assertCounterSanity(size, val);
    -424}
    -425if (newSize  
    currentAcceptableSize  !evictionInProgress) {
    -426  runEviction();
    -427}
    -428  }
    -429
    -430  /**
    -431   * Sanity-checking for parity between 
    actual block cache content and metrics.
    -432   * Intended only for use with TRACE 
    level logging and -ea JVM.
    -433   */
    -434  private static void 
    assertCounterSanity(long mapSize, long counterVal) {
    -435if (counterVal  0) {
    -436  LOG.trace("counterVal overflow. 
    Assertions unreliable. counterVal=" + counterVal +
    -437", mapSize=" + mapSize);
    -438  return;
    -439}
    -440if (mapSize  Integer.MAX_VALUE) 
    {
    -441  double pct_diff = 
    Math.absdouble) counterVal) / ((double) mapSize)) - 1.);
    -442  if (pct_diff  0.05) {
    -443LOG.trace("delta between reported 
    and actual size  5%. counterVal=" + counterVal +
    -444  ", mapSize=" + mapSize);
    -445  }
    -446}
    -447  }
    -448
    -449  /**
    -450   * Cache the block with the specified 
    name and buffer.
    -451   * p
    -452   *
    -453   * @param cacheKey block's cache key
    -454   * @param buf  block buffer
    -455   */
    -456  @Override
    -457  public void cacheBlock(BlockCacheKey 
    cacheKey, Cacheable buf) {
    -458cacheBlock(cacheKey, buf, false);
    -459  }
    -460
    -461  /**
    -462   * Helper function that updates the 
    local size counter and also updates any
    -463   * per-cf or per-blocktype metrics it 
    can discern from given
    -464   * {@link LruCachedBlock}
    -465   */
    -466  private long 
    updateSizeMetrics(LruCachedBlock cb, boolean evict) {
    -467long heapsize = cb.heapSize();
    -468BlockType bt = 
    cb.getBuffer().getBlockType();
    -469if (evict) {
    -470  heapsize *= -1;
    -471}
    -472if (bt != null  
    bt.isData()) {
    -473   dataBlockSize.add(heapsize);
    -474}
    -475return size.addAndGet(heapsize);
    -476  }
    -477
    -478  /**
    -479   * Get the buffer of the block with the 
    specified name.
    -480   *
    -481   * @param cacheKey   block's 
    cache key
    -482   * @param cachingtrue if 
    the caller caches blocks on cache misses
    -483   * @param repeat Whether 
    this is a repeat lookup for the same block
    -484   *   (used to 
    avoid double counting cache misses when doing double-check
    -485   *   locking)
    -486   * @param updateCacheMetrics Whether to 
    update cache metrics or not
    -487   *
    -488   * @return buffer of specified cache 
    key, or null if not in cache
    -489   */
    -490  @Override
    

    [02/51] [partial] hbase-site git commit: Published site at 85b41f36e01214b6485c9352875c84ebf877dab3.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a5c66de0/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.SplitTableRegionFuture.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.SplitTableRegionFuture.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.SplitTableRegionFuture.html
    index c10cfbf..a3e2f4a 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.SplitTableRegionFuture.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.SplitTableRegionFuture.html
    @@ -3371,7 +3371,7 @@
     3363private V result = null;
     3364
     3365private final HBaseAdmin admin;
    -3366private final Long procId;
    +3366protected final Long procId;
     3367
     3368public ProcedureFuture(final 
    HBaseAdmin admin, final Long procId) {
     3369  this.admin = admin;
    @@ -3653,653 +3653,651 @@
     3645 * @return a description of the 
    operation
     3646 */
     3647protected String getDescription() 
    {
    -3648  return "Operation: " + 
    getOperationType() + ", "
    -3649  + "Table Name: " + 
    tableName.getNameWithNamespaceInclAsString();
    -3650
    -3651}
    -3652
    -3653protected abstract class 
    TableWaitForStateCallable implements WaitForStateCallable {
    -3654  @Override
    -3655  public void 
    throwInterruptedException() throws InterruptedIOException {
    -3656throw new 
    InterruptedIOException("Interrupted while waiting for operation: "
    -3657+ getOperationType() + " on 
    table: " + tableName.getNameWithNamespaceInclAsString());
    -3658  }
    -3659
    -3660  @Override
    -3661  public void 
    throwTimeoutException(long elapsedTime) throws TimeoutException {
    -3662throw new TimeoutException("The 
    operation: " + getOperationType() + " on table: " +
    -3663tableName.getNameAsString() 
    + " has not completed after " + elapsedTime + "ms");
    -3664  }
    -3665}
    -3666
    -3667@Override
    -3668protected V 
    postOperationResult(final V result, final long deadlineTs)
    -3669throws IOException, 
    TimeoutException {
    -3670  LOG.info(getDescription() + " 
    completed");
    -3671  return 
    super.postOperationResult(result, deadlineTs);
    -3672}
    -3673
    -3674@Override
    -3675protected V 
    postOperationFailure(final IOException exception, final long deadlineTs)
    -3676throws IOException, 
    TimeoutException {
    -3677  LOG.info(getDescription() + " 
    failed with " + exception.getMessage());
    -3678  return 
    super.postOperationFailure(exception, deadlineTs);
    -3679}
    -3680
    -3681protected void 
    waitForTableEnabled(final long deadlineTs)
    -3682throws IOException, 
    TimeoutException {
    -3683  waitForState(deadlineTs, new 
    TableWaitForStateCallable() {
    -3684@Override
    -3685public boolean checkState(int 
    tries) throws IOException {
    -3686  try {
    -3687if 
    (getAdmin().isTableAvailable(tableName)) {
    -3688  return true;
    -3689}
    -3690  } catch 
    (TableNotFoundException tnfe) {
    -3691LOG.debug("Table " + 
    tableName.getNameWithNamespaceInclAsString()
    -3692+ " was not enabled, 
    sleeping. tries=" + tries);
    -3693  }
    -3694  return false;
    -3695}
    -3696  });
    -3697}
    -3698
    -3699protected void 
    waitForTableDisabled(final long deadlineTs)
    -3700throws IOException, 
    TimeoutException {
    -3701  waitForState(deadlineTs, new 
    TableWaitForStateCallable() {
    -3702@Override
    -3703public boolean checkState(int 
    tries) throws IOException {
    -3704  return 
    getAdmin().isTableDisabled(tableName);
    -3705}
    -3706  });
    -3707}
    -3708
    -3709protected void 
    waitTableNotFound(final long deadlineTs)
    -3710throws IOException, 
    TimeoutException {
    -3711  waitForState(deadlineTs, new 
    TableWaitForStateCallable() {
    -3712@Override
    -3713public boolean checkState(int 
    tries) throws IOException {
    -3714  return 
    !getAdmin().tableExists(tableName);
    -3715}
    -3716  });
    -3717}
    -3718
    -3719protected void 
    waitForSchemaUpdate(final long deadlineTs)
    -3720throws IOException, 
    TimeoutException {
    -3721  waitForState(deadlineTs, new 
    TableWaitForStateCallable() {
    -3722@Override
    -3723public boolean checkState(int 
    tries) throws IOException {
    -3724  return 
    getAdmin().getAlterStatus(tableName).getFirst() == 0;
    -3725}
    -3726  });
    -3727}
    -3728
    -3729protected void 
    waitForAllRegionsOnline(final long deadlineTs, final byte[][] splitKeys)
    -3730throws IOException, 
    TimeoutException {
    -3731  final TableDescriptor desc = 
    getTableDescriptor();
    -3732  final AtomicInteger actualRegCount 
    = new AtomicInteger(0);
    -3733  final MetaTableAccessor.Visitor 
    visitor = new MetaTableAccessor.Visitor() {
    -3734@Override
    -3735public boolean visit(Result 
    rowResult) 

    [02/51] [partial] hbase-site git commit: Published site at 6198e1fc7dfa85c3bc6b2855f9a5fb5f4b2354ff.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/eb5d2c62/devapidocs/org/apache/hadoop/hbase/master/replication/SyncReplicationReplayWALProcedure.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/master/replication/SyncReplicationReplayWALProcedure.html
     
    b/devapidocs/org/apache/hadoop/hbase/master/replication/SyncReplicationReplayWALProcedure.html
    new file mode 100644
    index 000..a9584c4
    --- /dev/null
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/master/replication/SyncReplicationReplayWALProcedure.html
    @@ -0,0 +1,641 @@
    +http://www.w3.org/TR/html4/loose.dtd;>
    +
    +
    +
    +
    +
    +SyncReplicationReplayWALProcedure (Apache HBase 3.0.0-SNAPSHOT 
    API)
    +
    +
    +
    +
    +
    +var methods = 
    {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10};
    +var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
    Methods"],8:["t4","Concrete Methods"]};
    +var altColor = "altColor";
    +var rowColor = "rowColor";
    +var tableTab = "tableTab";
    +var activeTableTab = "activeTableTab";
    +
    +
    +JavaScript is disabled on your browser.
    +
    +
    +
    +
    +
    +Skip navigation links
    +
    +
    +
    +
    +Overview
    +Package
    +Class
    +Use
    +Tree
    +Deprecated
    +Index
    +Help
    +
    +
    +
    +
    +PrevClass
    +NextClass
    +
    +
    +Frames
    +NoFrames
    +
    +
    +AllClasses
    +
    +
    +
    +
    +
    +
    +
    +Summary:
    +Nested|
    +Field|
    +Constr|
    +Method
    +
    +
    +Detail:
    +Field|
    +Constr|
    +Method
    +
    +
    +
    +
    +
    +
    +
    +
    +org.apache.hadoop.hbase.master.replication
    +Class 
    SyncReplicationReplayWALProcedure
    +
    +
    +
    +https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">java.lang.Object
    +
    +
    +org.apache.hadoop.hbase.procedure2.ProcedureTEnvironment
    +
    +
    +org.apache.hadoop.hbase.procedure2.StateMachineProcedureMasterProcedureEnv,org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SyncReplicationReplayWALState
    +
    +
    +org.apache.hadoop.hbase.master.replication.SyncReplicationReplayWALProcedure
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +All Implemented Interfaces:
    +https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
     title="class or interface in java.lang">ComparableProcedureMasterProcedureEnv, PeerProcedureInterface
    +
    +
    +
    +@InterfaceAudience.Private
    +public class SyncReplicationReplayWALProcedure
    +extends StateMachineProcedureMasterProcedureEnv,org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SyncReplicationReplayWALState
    +implements PeerProcedureInterface
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +Nested Class Summary
    +
    +
    +
    +
    +Nested classes/interfaces inherited from 
    classorg.apache.hadoop.hbase.procedure2.StateMachineProcedure
    +StateMachineProcedure.Flow
    +
    +
    +
    +
    +
    +Nested classes/interfaces inherited from 
    classorg.apache.hadoop.hbase.procedure2.Procedure
    +Procedure.LockState
    +
    +
    +
    +
    +
    +Nested classes/interfaces inherited from 
    interfaceorg.apache.hadoop.hbase.master.procedure.PeerProcedureInterface
    +PeerProcedureInterface.PeerOperationType
    +
    +
    +
    +
    +
    +
    +
    +
    +Field Summary
    +
    +Fields
    +
    +Modifier and Type
    +Field and Description
    +
    +
    +private static org.slf4j.Logger
    +LOG
    +
    +
    +private https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String
    +peerId
    +
    +
    +private https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">Listhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String
    +wals
    +
    +
    +private ServerName
    +worker
    +
    +
    +
    +
    +
    +
    +Fields inherited from classorg.apache.hadoop.hbase.procedure2.Procedure
    +NO_PROC_ID,
     NO_TIMEOUT
    +
    +
    +
    +
    +
    +
    +
    +
    +Constructor Summary
    +
    +Constructors
    +
    +Constructor and Description
    +
    +
    +SyncReplicationReplayWALProcedure()
    +
    +
    +SyncReplicationReplayWALProcedure(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringpeerId,
    + https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">Listhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in 
    java.lang">Stringwals)
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +Method Summary
    +
    +All MethodsInstance MethodsConcrete Methods
    +
    +Modifier and Type
    +Method and Description
    +
    +
    +protected void
    +deserializeStateData(ProcedureStateSerializerserializer)
    +Called on store load to allow the user to decode the 
    previously serialized
    + state.
    +
    +
    +
    +protected StateMachineProcedure.Flow
    

    [02/51] [partial] hbase-site git commit: Published site at 14087cc919da9f2e0b1a68f701f6365ad9d1d71f.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/55ce8d97/devapidocs/src-html/org/apache/hadoop/hbase/RegionMetricsBuilder.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/RegionMetricsBuilder.html 
    b/devapidocs/src-html/org/apache/hadoop/hbase/RegionMetricsBuilder.html
    index 49e37f9..3d2c9cb 100644
    --- a/devapidocs/src-html/org/apache/hadoop/hbase/RegionMetricsBuilder.html
    +++ b/devapidocs/src-html/org/apache/hadoop/hbase/RegionMetricsBuilder.html
    @@ -65,392 +65,410 @@
     057
    .setLastMajorCompactionTimestamp(regionLoadPB.getLastMajorCompactionTs())
     058.setMemStoreSize(new 
    Size(regionLoadPB.getMemStoreSizeMB(), Size.Unit.MEGABYTE))
     059
    .setReadRequestCount(regionLoadPB.getReadRequestsCount())
    -060
    .setWriteRequestCount(regionLoadPB.getWriteRequestsCount())
    -061.setStoreFileIndexSize(new 
    Size(regionLoadPB.getStorefileIndexSizeKB(),
    -062  Size.Unit.KILOBYTE))
    -063
    .setStoreFileRootLevelIndexSize(new Size(regionLoadPB.getRootIndexSizeKB(),
    -064  Size.Unit.KILOBYTE))
    -065
    .setStoreCount(regionLoadPB.getStores())
    -066
    .setStoreFileCount(regionLoadPB.getStorefiles())
    -067.setStoreFileSize(new 
    Size(regionLoadPB.getStorefileSizeMB(), Size.Unit.MEGABYTE))
    -068
    .setStoreSequenceIds(regionLoadPB.getStoreCompleteSequenceIdList().stream()
    -069  .collect(Collectors.toMap(
    -070
    (ClusterStatusProtos.StoreSequenceId s) - 
    s.getFamilyName().toByteArray(),
    -071  
    ClusterStatusProtos.StoreSequenceId::getSequenceId)))
    -072.setUncompressedStoreFileSize(
    -073  new 
    Size(regionLoadPB.getStoreUncompressedSizeMB(),Size.Unit.MEGABYTE))
    -074.build();
    -075  }
    -076
    -077  private static 
    ListClusterStatusProtos.StoreSequenceId toStoreSequenceId(
    -078  Mapbyte[], Long ids) {
    -079return ids.entrySet().stream()
    -080.map(e - 
    ClusterStatusProtos.StoreSequenceId.newBuilder()
    -081  
    .setFamilyName(UnsafeByteOperations.unsafeWrap(e.getKey()))
    -082  .setSequenceId(e.getValue())
    -083  .build())
    -084.collect(Collectors.toList());
    -085  }
    -086
    -087  public static 
    ClusterStatusProtos.RegionLoad toRegionLoad(RegionMetrics regionMetrics) {
    -088return 
    ClusterStatusProtos.RegionLoad.newBuilder()
    -089
    .setRegionSpecifier(HBaseProtos.RegionSpecifier
    -090  
    .newBuilder().setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME)
    -091  
    .setValue(UnsafeByteOperations.unsafeWrap(regionMetrics.getRegionName()))
    -092  .build())
    -093.setTotalStaticBloomSizeKB((int) 
    regionMetrics.getBloomFilterSize()
    -094  .get(Size.Unit.KILOBYTE))
    -095
    .setCurrentCompactedKVs(regionMetrics.getCompactedCellCount())
    -096
    .setTotalCompactingKVs(regionMetrics.getCompactingCellCount())
    -097
    .setCompleteSequenceId(regionMetrics.getCompletedSequenceId())
    -098
    .setDataLocality(regionMetrics.getDataLocality())
    -099
    .setFilteredReadRequestsCount(regionMetrics.getFilteredReadRequestCount())
    -100.setTotalStaticIndexSizeKB((int) 
    regionMetrics.getStoreFileUncompressedDataIndexSize()
    -101  .get(Size.Unit.KILOBYTE))
    -102
    .setLastMajorCompactionTs(regionMetrics.getLastMajorCompactionTimestamp())
    -103.setMemStoreSizeMB((int) 
    regionMetrics.getMemStoreSize().get(Size.Unit.MEGABYTE))
    -104
    .setReadRequestsCount(regionMetrics.getReadRequestCount())
    -105
    .setWriteRequestsCount(regionMetrics.getWriteRequestCount())
    -106.setStorefileIndexSizeKB((long) 
    regionMetrics.getStoreFileIndexSize()
    -107  .get(Size.Unit.KILOBYTE))
    -108.setRootIndexSizeKB((int) 
    regionMetrics.getStoreFileRootLevelIndexSize()
    +060
    .setCpRequestCount(regionLoadPB.getCpRequestsCount())
    +061
    .setWriteRequestCount(regionLoadPB.getWriteRequestsCount())
    +062.setStoreFileIndexSize(new 
    Size(regionLoadPB.getStorefileIndexSizeKB(),
    +063  Size.Unit.KILOBYTE))
    +064
    .setStoreFileRootLevelIndexSize(new Size(regionLoadPB.getRootIndexSizeKB(),
    +065  Size.Unit.KILOBYTE))
    +066
    .setStoreCount(regionLoadPB.getStores())
    +067
    .setStoreFileCount(regionLoadPB.getStorefiles())
    +068.setStoreFileSize(new 
    Size(regionLoadPB.getStorefileSizeMB(), Size.Unit.MEGABYTE))
    +069
    .setStoreSequenceIds(regionLoadPB.getStoreCompleteSequenceIdList().stream()
    +070  .collect(Collectors.toMap(
    +071
    (ClusterStatusProtos.StoreSequenceId s) - 
    s.getFamilyName().toByteArray(),
    +072  
    ClusterStatusProtos.StoreSequenceId::getSequenceId)))
    +073.setUncompressedStoreFileSize(
    +074  new 
    Size(regionLoadPB.getStoreUncompressedSizeMB(),Size.Unit.MEGABYTE))
    +075.build();
    +076  }
    +077
    +078  private static 
    ListClusterStatusProtos.StoreSequenceId toStoreSequenceId(
    +079  Mapbyte[], 

    [02/51] [partial] hbase-site git commit: Published site at 72784c2d836a4b977667449d3adec5e8d15453f5.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2b11656f/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionStateStampComparator.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionStateStampComparator.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionStateStampComparator.html
    index 541beed..1100e95 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionStateStampComparator.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionStateStampComparator.html
    @@ -42,1015 +42,1038 @@
     034import 
    java.util.concurrent.ConcurrentHashMap;
     035import 
    java.util.concurrent.ConcurrentSkipListMap;
     036import 
    java.util.concurrent.atomic.AtomicInteger;
    -037
    -038import 
    org.apache.hadoop.hbase.HConstants;
    -039import 
    org.apache.hadoop.hbase.ServerName;
    -040import 
    org.apache.hadoop.hbase.TableName;
    -041import 
    org.apache.hadoop.hbase.client.RegionInfo;
    -042import 
    org.apache.hadoop.hbase.exceptions.UnexpectedStateException;
    -043import 
    org.apache.hadoop.hbase.master.RegionState;
    -044import 
    org.apache.hadoop.hbase.master.RegionState.State;
    -045import 
    org.apache.hadoop.hbase.procedure2.ProcedureEvent;
    -046import 
    org.apache.hadoop.hbase.util.Bytes;
    -047import 
    org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
    -048import 
    org.apache.yetus.audience.InterfaceAudience;
    -049import org.slf4j.Logger;
    -050import org.slf4j.LoggerFactory;
    -051import 
    org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
    -052
    -053/**
    -054 * RegionStates contains a set of Maps 
    that describes the in-memory state of the AM, with
    -055 * the regions available in the system, 
    the region in transition, the offline regions and
    -056 * the servers holding regions.
    -057 */
    -058@InterfaceAudience.Private
    -059public class RegionStates {
    -060  private static final Logger LOG = 
    LoggerFactory.getLogger(RegionStates.class);
    -061
    -062  protected static final State[] 
    STATES_EXPECTED_ON_OPEN = new State[] {
    -063State.OPEN, // State may already be 
    OPEN if we died after receiving the OPEN from regionserver
    -064// but before complete 
    finish of AssignProcedure. HBASE-20100.
    -065State.OFFLINE, State.CLOSED,  // 
    disable/offline
    -066State.SPLITTING, State.SPLIT, // 
    ServerCrashProcedure
    -067State.OPENING, State.FAILED_OPEN, // 
    already in-progress (retrying)
    -068  };
    -069
    -070  protected static final State[] 
    STATES_EXPECTED_ON_CLOSE = new State[] {
    -071State.SPLITTING, State.SPLIT, 
    State.MERGING, // ServerCrashProcedure
    -072State.OPEN,   // 
    enabled/open
    -073State.CLOSING // 
    already in-progress (retrying)
    -074  };
    -075
    -076  private static class 
    AssignmentProcedureEvent extends ProcedureEventRegionInfo {
    -077public AssignmentProcedureEvent(final 
    RegionInfo regionInfo) {
    -078  super(regionInfo);
    -079}
    -080  }
    -081
    -082  private static class ServerReportEvent 
    extends ProcedureEventServerName {
    -083public ServerReportEvent(final 
    ServerName serverName) {
    -084  super(serverName);
    -085}
    -086  }
    -087
    -088  /**
    -089   * Current Region State.
    -090   * In-memory only. Not persisted.
    -091   */
    -092  // Mutable/Immutable? Changes have to 
    be synchronized or not?
    -093  // Data members are volatile which 
    seems to say multi-threaded access is fine.
    -094  // In the below we do check and set but 
    the check state could change before
    -095  // we do the set because no 
    synchronizationwhich seems dodgy. Clear up
    -096  // understanding here... how many 
    threads accessing? Do locks make it so one
    -097  // thread at a time working on a single 
    Region's RegionStateNode? Lets presume
    -098  // so for now. Odd is that elsewhere in 
    this RegionStates, we synchronize on
    -099  // the RegionStateNode instance. 
    TODO.
    -100  public static class RegionStateNode 
    implements ComparableRegionStateNode {
    -101private final RegionInfo 
    regionInfo;
    -102private final ProcedureEvent? 
    event;
    -103
    -104private volatile 
    RegionTransitionProcedure procedure = null;
    -105private volatile ServerName 
    regionLocation = null;
    -106private volatile ServerName lastHost 
    = null;
    -107/**
    -108 * A Region-in-Transition (RIT) moves 
    through states.
    -109 * See {@link State} for complete 
    list. A Region that
    -110 * is opened moves from OFFLINE = 
    OPENING = OPENED.
    -111 */
    -112private volatile State state = 
    State.OFFLINE;
    -113
    -114/**
    -115 * Updated whenever a call to {@link 
    #setRegionLocation(ServerName)}
    -116 * or {@link #setState(State, 
    State...)}.
    -117 */
    -118private volatile long lastUpdate = 
    0;
    -119
    -120private volatile long openSeqNum = 
    HConstants.NO_SEQNUM;
    -121
    -122public RegionStateNode(final 
    RegionInfo regionInfo) {
    -123  

    [02/51] [partial] hbase-site git commit: Published site at 9101fc246f86445006bfbcdfda5cc495016dc280.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/65565d77/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.html
    index d523437..9050c69 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.html
    @@ -561,7 +561,7 @@
     553try {
     554  
    env.getMasterServices().getMasterQuotaManager().onRegionMerged(this.mergedRegion);
     555} catch (QuotaExceededException e) 
    {
    -556  
    env.getAssignmentManager().getRegionNormalizer().planSkipped(this.mergedRegion,
    +556  
    env.getMasterServices().getRegionNormalizer().planSkipped(this.mergedRegion,
     557  
    NormalizationPlan.PlanType.MERGE);
     558  throw e;
     559}
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/65565d77/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStateStore.RegionStateVisitor.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStateStore.RegionStateVisitor.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStateStore.RegionStateVisitor.html
    index c1b4533..a0c568b 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStateStore.RegionStateVisitor.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStateStore.RegionStateVisitor.html
    @@ -136,220 +136,221 @@
     128  public void 
    updateRegionLocation(RegionStates.RegionStateNode regionStateNode)
     129  throws IOException {
     130if 
    (regionStateNode.getRegionInfo().isMetaRegion()) {
    -131  
    updateMetaLocation(regionStateNode.getRegionInfo(), 
    regionStateNode.getRegionLocation());
    -132} else {
    -133  long openSeqNum = 
    regionStateNode.getState() == State.OPEN ?
    -134  regionStateNode.getOpenSeqNum() 
    : HConstants.NO_SEQNUM;
    -135  
    updateUserRegionLocation(regionStateNode.getRegionInfo(), 
    regionStateNode.getState(),
    -136  
    regionStateNode.getRegionLocation(), regionStateNode.getLastHost(), 
    openSeqNum,
    -137  // The regionStateNode may have 
    no procedure in a test scenario; allow for this.
    -138  regionStateNode.getProcedure() 
    != null?
    -139  
    regionStateNode.getProcedure().getProcId(): Procedure.NO_PROC_ID);
    -140}
    -141  }
    -142
    -143  private void updateMetaLocation(final 
    RegionInfo regionInfo, final ServerName serverName)
    -144  throws IOException {
    -145try {
    -146  
    MetaTableLocator.setMetaLocation(master.getZooKeeper(), serverName,
    -147regionInfo.getReplicaId(), 
    State.OPEN);
    -148} catch (KeeperException e) {
    -149  throw new IOException(e);
    -150}
    -151  }
    -152
    -153  private void 
    updateUserRegionLocation(final RegionInfo regionInfo, final State state,
    -154  final ServerName regionLocation, 
    final ServerName lastHost, final long openSeqNum,
    -155  final long pid)
    -156  throws IOException {
    -157long time = 
    EnvironmentEdgeManager.currentTime();
    -158final int replicaId = 
    regionInfo.getReplicaId();
    -159final Put put = new 
    Put(MetaTableAccessor.getMetaKeyForRegion(regionInfo), time);
    -160MetaTableAccessor.addRegionInfo(put, 
    regionInfo);
    -161final StringBuilder info =
    -162  new 
    StringBuilder("pid=").append(pid).append(" updating hbase:meta row=")
    -163
    .append(regionInfo.getEncodedName()).append(", regionState=").append(state);
    -164if (openSeqNum = 0) {
    -165  Preconditions.checkArgument(state 
    == State.OPEN  regionLocation != null,
    -166  "Open region should be on a 
    server");
    -167  MetaTableAccessor.addLocation(put, 
    regionLocation, openSeqNum, replicaId);
    -168  // only update replication barrier 
    for default replica
    -169  if (regionInfo.getReplicaId() == 
    RegionInfo.DEFAULT_REPLICA_ID 
    -170
    hasGlobalReplicationScope(regionInfo.getTable())) {
    -171
    MetaTableAccessor.addReplicationBarrier(put, openSeqNum);
    -172info.append(", 
    repBarrier=").append(openSeqNum);
    -173  }
    -174  info.append(", 
    openSeqNum=").append(openSeqNum);
    -175  info.append(", 
    regionLocation=").append(regionLocation);
    -176} else if (regionLocation != null 
     !regionLocation.equals(lastHost)) {
    -177  // Ideally, if no regionLocation, 
    write null to the hbase:meta but this will confuse clients
    -178  // currently; they want a server to 
    hit. TODO: Make clients wait if no location.
    -179  
    put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY)
    -180  .setRow(put.getRow())
    -181  
    .setFamily(HConstants.CATALOG_FAMILY)
    

    [02/51] [partial] hbase-site git commit: Published site at 0b28155d274910b4e667b949d51f78809a1eff0b.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e11cf2cb/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALKeyImpl.html
    --
    diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALKeyImpl.html 
    b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALKeyImpl.html
    index a00f005..9bca473 100644
    --- a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALKeyImpl.html
    +++ b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALKeyImpl.html
    @@ -45,593 +45,571 @@
     037
     038import 
    org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
     039import 
    org.apache.hbase.thirdparty.com.google.protobuf.ByteString;
    -040import 
    org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations;
    -041
    -042import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
    -043import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos;
    -044import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FamilyScope;
    -045import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.ScopeType;
    -046
    -047/**
    -048 * Default implementation of Key for an 
    Entry in the WAL.
    -049 * For internal use only though 
    Replication needs to have access.
    -050 *
    -051 * The log intermingles edits to many 
    tables and rows, so each log entry
    -052 * identifies the appropriate table and 
    row.  Within a table and row, they're
    -053 * also sorted.
    -054 *
    -055 * pSome Transactional edits 
    (START, COMMIT, ABORT) will not have an associated row.
    -056 *
    -057 */
    -058// TODO: Key and WALEdit are never used 
    separately, or in one-to-many relation, for practical
    -059//   purposes. They need to be merged 
    into WALEntry.
    -060@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.REPLICATION})
    -061public class WALKeyImpl implements WALKey 
    {
    -062  public static final WALKeyImpl 
    EMPTY_WALKEYIMPL = new WALKeyImpl();
    -063
    -064  public MultiVersionConcurrencyControl 
    getMvcc() {
    -065return mvcc;
    -066  }
    -067
    -068  /**
    -069   * Use it to complete mvcc transaction. 
    This WALKeyImpl was part of
    -070   * (the transaction is started when you 
    call append; see the comment on FSHLog#append). To
    -071   * complete call
    -072   * {@link 
    MultiVersionConcurrencyControl#complete(MultiVersionConcurrencyControl.WriteEntry)}
    -073   * or {@link 
    MultiVersionConcurrencyControl#complete(MultiVersionConcurrencyControl.WriteEntry)}
    -074   * @return A WriteEntry gotten from 
    local WAL subsystem.
    -075   * @see 
    #setWriteEntry(MultiVersionConcurrencyControl.WriteEntry)
    -076   */
    -077  public 
    MultiVersionConcurrencyControl.WriteEntry getWriteEntry() {
    -078return this.writeEntry;
    -079  }
    -080
    -081  public void 
    setWriteEntry(MultiVersionConcurrencyControl.WriteEntry writeEntry) {
    -082assert this.writeEntry == null;
    -083this.writeEntry = writeEntry;
    -084// Set our sequenceid now using 
    WriteEntry.
    -085this.sequenceId = 
    writeEntry.getWriteNumber();
    -086  }
    -087
    -088  private byte [] encodedRegionName;
    -089
    -090  private TableName tablename;
    -091
    -092  /**
    -093   * SequenceId for this edit. Set 
    post-construction at write-to-WAL time. Until then it is
    -094   * NO_SEQUENCE_ID. Change it so 
    multiple threads can read it -- e.g. access is synchronized.
    -095   */
    -096  private long sequenceId;
    -097
    -098  /**
    -099   * Used during WAL replay; the 
    sequenceId of the edit when it came into the system.
    -100   */
    -101  private long origLogSeqNum = 0;
    -102
    -103  /** Time at which this edit was 
    written. */
    -104  private long writeTime;
    -105
    -106  /** The first element in the list is 
    the cluster id on which the change has originated */
    -107  private ListUUID clusterIds;
    -108
    -109  private NavigableMapbyte[], 
    Integer replicationScope;
    -110
    -111  private long nonceGroup = 
    HConstants.NO_NONCE;
    -112  private long nonce = 
    HConstants.NO_NONCE;
    -113  private MultiVersionConcurrencyControl 
    mvcc;
    -114
    -115  /**
    -116   * Set in a way visible to multiple 
    threads; e.g. synchronized getter/setters.
    -117   */
    -118  private 
    MultiVersionConcurrencyControl.WriteEntry writeEntry;
    -119
    -120  private CompressionContext 
    compressionContext;
    -121
    -122  public WALKeyImpl() {
    -123init(null, null, 0L, 
    HConstants.LATEST_TIMESTAMP,
    -124new ArrayList(), 
    HConstants.NO_NONCE, HConstants.NO_NONCE, null, null);
    -125  }
    -126
    -127  public WALKeyImpl(final 
    NavigableMapbyte[], Integer replicationScope) {
    -128init(null, null, 0L, 
    HConstants.LATEST_TIMESTAMP,
    -129new ArrayList(), 
    HConstants.NO_NONCE, HConstants.NO_NONCE, null, replicationScope);
    -130  }
    -131
    -132  @VisibleForTesting
    -133  public WALKeyImpl(final byte[] 
    encodedRegionName, final TableName tablename,
    -134long logSeqNum,
    -135  final long now, UUID clusterId) {
    -136ListUUID clusterIds = new 
    ArrayList(1);
    -137clusterIds.add(clusterId);
    -138init(encodedRegionName, tablename, 
    logSeqNum, now, clusterIds,
    -139HConstants.NO_NONCE, 
    HConstants.NO_NONCE, null, 

    [02/51] [partial] hbase-site git commit: Published site at 7d3750bd9fc9747623549c242cc4171e224b3eaf.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.html
    index 594ef24..17d5c40 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.html
    @@ -170,241 +170,242 @@
     162  }
     163
     164  /**
    -165   * Add a remote rpc. Be sure to check 
    result for successful add.
    +165   * Add a remote rpc.
     166   * @param key the node identifier
    -167   * @return True if we successfully 
    added the operation.
    -168   */
    -169  public boolean addOperationToNode(final 
    TRemote key, RemoteProcedure rp) {
    +167   */
    +168  public void addOperationToNode(final 
    TRemote key, RemoteProcedure rp)
    +169  throws 
    NullTargetServerDispatchException, NoServerDispatchException, 
    NoNodeDispatchException {
     170if (key == null) {
    -171  // Key is remote server name. Be 
    careful. It could have been nulled by a concurrent
    -172  // ServerCrashProcedure shutting 
    down outstanding RPC requests. See remoteCallFailed.
    -173  return false;
    -174}
    -175assert key != null : "found null key 
    for node";
    -176BufferNode node = nodeMap.get(key);
    -177if (node == null) {
    -178  return false;
    -179}
    -180node.add(rp);
    -181// Check our node still in the map; 
    could have been removed by #removeNode.
    -182return nodeMap.containsValue(node);
    -183  }
    -184
    -185  /**
    -186   * Remove a remote node
    -187   * @param key the node identifier
    -188   */
    -189  public boolean removeNode(final TRemote 
    key) {
    -190final BufferNode node = 
    nodeMap.remove(key);
    -191if (node == null) return false;
    -192node.abortOperationsInQueue();
    -193return true;
    -194  }
    -195
    -196  // 
    
    -197  //  Task Helpers
    -198  // 
    
    -199  protected FutureVoid 
    submitTask(CallableVoid task) {
    -200return threadPool.submit(task);
    -201  }
    -202
    -203  protected FutureVoid 
    submitTask(CallableVoid task, long delay, TimeUnit unit) {
    -204final FutureTaskVoid 
    futureTask = new FutureTask(task);
    -205timeoutExecutor.add(new 
    DelayedTask(futureTask, delay, unit));
    -206return futureTask;
    -207  }
    -208
    -209  protected abstract void 
    remoteDispatch(TRemote key, SetRemoteProcedure operations);
    -210  protected abstract void 
    abortPendingOperations(TRemote key, SetRemoteProcedure operations);
    -211
    -212  /**
    -213   * Data structure with reference to 
    remote operation.
    -214   */
    -215  public static abstract class 
    RemoteOperation {
    -216private final RemoteProcedure 
    remoteProcedure;
    -217
    -218protected RemoteOperation(final 
    RemoteProcedure remoteProcedure) {
    -219  this.remoteProcedure = 
    remoteProcedure;
    -220}
    -221
    -222public RemoteProcedure 
    getRemoteProcedure() {
    -223  return remoteProcedure;
    -224}
    -225  }
    -226
    -227  /**
    -228   * Remote procedure reference.
    -229   */
    -230  public interface 
    RemoteProcedureTEnv, TRemote {
    -231/**
    -232 * For building the remote 
    operation.
    -233 */
    -234RemoteOperation remoteCallBuild(TEnv 
    env, TRemote remote);
    -235
    -236/**
    -237 * Called when the executeProcedure 
    call is failed.
    -238 */
    -239void remoteCallFailed(TEnv env, 
    TRemote remote, IOException exception);
    -240
    -241/**
    -242 * Called when RS tells the remote 
    procedure is succeeded through the
    -243 * {@code reportProcedureDone} 
    method.
    -244 */
    -245void remoteOperationCompleted(TEnv 
    env);
    -246
    -247/**
    -248 * Called when RS tells the remote 
    procedure is failed through the {@code reportProcedureDone}
    -249 * method.
    -250 */
    -251void remoteOperationFailed(TEnv env, 
    RemoteProcedureException error);
    -252  }
    -253
    -254  /**
    -255   * Account of what procedures are 
    running on remote node.
    -256   * @param TEnv
    -257   * @param TRemote
    -258   */
    -259  public interface RemoteNodeTEnv, 
    TRemote {
    -260TRemote getKey();
    -261void add(RemoteProcedureTEnv, 
    TRemote operation);
    -262void dispatch();
    -263  }
    -264
    -265  protected 
    ArrayListMultimapClass?, RemoteOperation 
    buildAndGroupRequestByType(final TEnv env,
    -266  final TRemote remote, final 
    SetRemoteProcedure remoteProcedures) {
    -267final 
    ArrayListMultimapClass?, RemoteOperation requestByType = 
    ArrayListMultimap.create();
    -268for (RemoteProcedure proc: 
    remoteProcedures) {
    -269  RemoteOperation operation = 
    proc.remoteCallBuild(env, remote);
    -270  
    requestByType.put(operation.getClass(), operation);
    -271}
    -272return requestByType;
    -273  }
    

    [02/51] [partial] hbase-site git commit: Published site at 997747076d8ec0b4346d7cb99c4b0667a7c14905.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4df09ed9/testdevapidocs/src-html/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.html
    --
    diff --git 
    a/testdevapidocs/src-html/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.html
     
    b/testdevapidocs/src-html/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.html
    index e383c6b..6b66d6d 100644
    --- 
    a/testdevapidocs/src-html/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.html
    +++ 
    b/testdevapidocs/src-html/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.html
    @@ -611,12 +611,12 @@
     603
     604  /**
     605   * Make puts to put the input value 
    into each combination of row, family, and qualifier
    -606   * @param rows
    -607   * @param families
    -608   * @param qualifiers
    -609   * @param value
    -610   * @return
    -611   * @throws IOException
    +606   * @param rows the rows to use
    +607   * @param families the families to 
    use
    +608   * @param qualifiers the qualifiers to 
    use
    +609   * @param value the values to use
    +610   * @return the dot product of the given 
    rows, families, qualifiers, and values
    +611   * @throws IOException if there is a 
    problem creating one of the Put objects
     612   */
     613  static ArrayListPut 
    createPuts(byte[][] rows, byte[][] families, byte[][] qualifiers,
     614  byte[] value) throws IOException 
    {
    @@ -640,11 +640,11 @@
     632  /**
     633   * Make key values to represent each 
    possible combination of family and qualifier in the specified
     634   * row.
    -635   * @param row
    -636   * @param families
    -637   * @param qualifiers
    -638   * @param value
    -639   * @return
    +635   * @param row the row to use
    +636   * @param families the families to 
    use
    +637   * @param qualifiers the qualifiers to 
    use
    +638   * @param value the values to use
    +639   * @return the dot product of the given 
    families, qualifiers, and values for a given row
     640   */
     641  static ArrayListCell 
    createKeyValuesForRow(byte[] row, byte[][] families, byte[][] qualifiers,
     642  byte[] value) {
    @@ -780,9 +780,9 @@
     772  /**
     773   * Exhausts the scanner by calling next 
    repetitively. Once completely exhausted, close scanner and
     774   * return total cell count
    -775   * @param scanner
    -776   * @return
    -777   * @throws Exception
    +775   * @param scanner the scanner to 
    exhaust
    +776   * @return the number of cells 
    counted
    +777   * @throws Exception if there is a 
    problem retrieving cells from the scanner
     778   */
     779  private int 
    countCellsFromScanner(ResultScanner scanner) throws Exception {
     780Result result = null;
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4df09ed9/testdevapidocs/src-html/org/apache/hadoop/hbase/Waiter.ExplainingPredicate.html
    --
    diff --git 
    a/testdevapidocs/src-html/org/apache/hadoop/hbase/Waiter.ExplainingPredicate.html
     
    b/testdevapidocs/src-html/org/apache/hadoop/hbase/Waiter.ExplainingPredicate.html
    index 15f1e52..65b58eb 100644
    --- 
    a/testdevapidocs/src-html/org/apache/hadoop/hbase/Waiter.ExplainingPredicate.html
    +++ 
    b/testdevapidocs/src-html/org/apache/hadoop/hbase/Waiter.ExplainingPredicate.html
    @@ -91,7 +91,7 @@
     083  /**
     084   * A predicate 'closure' used by the 
    {@link Waiter#waitFor(Configuration, long, Predicate)} and
     085   * {@link Waiter#waitFor(Configuration, 
    long, Predicate)} and
    -086   * {@link Waiter#waitFor(Configuration, 
    long, long, boolean, Predicate) methods.
    +086   * {@link Waiter#waitFor(Configuration, 
    long, long, boolean, Predicate)} methods.
     087   */
     088  @InterfaceAudience.Private
     089  public interface PredicateE extends 
    Exception {
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4df09ed9/testdevapidocs/src-html/org/apache/hadoop/hbase/Waiter.Predicate.html
    --
    diff --git 
    a/testdevapidocs/src-html/org/apache/hadoop/hbase/Waiter.Predicate.html 
    b/testdevapidocs/src-html/org/apache/hadoop/hbase/Waiter.Predicate.html
    index 15f1e52..65b58eb 100644
    --- a/testdevapidocs/src-html/org/apache/hadoop/hbase/Waiter.Predicate.html
    +++ b/testdevapidocs/src-html/org/apache/hadoop/hbase/Waiter.Predicate.html
    @@ -91,7 +91,7 @@
     083  /**
     084   * A predicate 'closure' used by the 
    {@link Waiter#waitFor(Configuration, long, Predicate)} and
     085   * {@link Waiter#waitFor(Configuration, 
    long, Predicate)} and
    -086   * {@link Waiter#waitFor(Configuration, 
    long, long, boolean, Predicate) methods.
    +086   * {@link Waiter#waitFor(Configuration, 
    long, long, boolean, Predicate)} methods.
     087   */
     088  @InterfaceAudience.Private
     089  public interface PredicateE extends 
    Exception {
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4df09ed9/testdevapidocs/src-html/org/apache/hadoop/hbase/Waiter.html
    --
    diff --git a/testdevapidocs/src-html/org/apache/hadoop/hbase/Waiter.html 
    

    [02/51] [partial] hbase-site git commit: Published site at f3d1c021de2264301f68eadb9ef126ff83d7ef53.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/883dde2f/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndMutateTest.html
    --
    diff --git 
    a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndMutateTest.html
     
    b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndMutateTest.html
    index a454926..91ad073 100644
    --- 
    a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndMutateTest.html
    +++ 
    b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndMutateTest.html
    @@ -133,7 +133,7 @@ var activeTableTab = "activeTableTab";
     
     
     
    -static class PerformanceEvaluation.CheckAndMutateTest
    +static class PerformanceEvaluation.CheckAndMutateTest
     extends PerformanceEvaluation.CASTableTest
     
     
    @@ -202,8 +202,10 @@ extends Method and Description
     
     
    -(package private) void
    -testRow(inti)
    +(package private) boolean
    +testRow(inti)
    +Test for individual row.
    +
     
     
     
    @@ -221,13 +223,6 @@ extends onStartup,
     onTakedown
     
     
    -
    -
    -
    -Methods inherited from classorg.apache.hadoop.hbase.PerformanceEvaluation.Test
    -closeConnection,
     createConnection
    -
    -
     
     
     
    @@ -261,7 +256,7 @@ extends 
     
     CheckAndMutateTest
    -CheckAndMutateTest(org.apache.hadoop.hbase.client.Connectioncon,
    +CheckAndMutateTest(org.apache.hadoop.hbase.client.Connectioncon,
    PerformanceEvaluation.TestOptionsoptions,
    PerformanceEvaluation.Statusstatus)
     
    @@ -280,11 +275,19 @@ extends 
     
     testRow
    -voidtestRow(inti)
    -  throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException
    +booleantestRow(inti)
    + throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException
    +Description copied from 
    class:PerformanceEvaluation.TestBase
    +Test for individual row.
     
     Specified by:
     testRowin
     classPerformanceEvaluation.TestBase
    +Parameters:
    +i - Row index.
    +Returns:
    +true if the row was sent to server and need to record metrics.
    + False if not, multiGet and multiPut e.g., the rows are sent
    + to server only if enough gets/puts are gathered.
     Throws:
     https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/883dde2f/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndPutTest.html
    --
    diff --git 
    a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndPutTest.html
     
    b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndPutTest.html
    index dc45723..1ab651b 100644
    --- 
    a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndPutTest.html
    +++ 
    b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndPutTest.html
    @@ -133,7 +133,7 @@ var activeTableTab = "activeTableTab";
     
     
     
    -static class PerformanceEvaluation.CheckAndPutTest
    +static class PerformanceEvaluation.CheckAndPutTest
     extends PerformanceEvaluation.CASTableTest
     
     
    @@ -202,8 +202,10 @@ extends Method and Description
     
     
    -(package private) void
    -testRow(inti)
    +(package private) boolean
    +testRow(inti)
    +Test for individual row.
    +
     
     
     
    @@ -221,13 +223,6 @@ extends onStartup,
     onTakedown
     
     
    -
    -
    -
    -Methods inherited from classorg.apache.hadoop.hbase.PerformanceEvaluation.Test
    -closeConnection,
     createConnection
    -
    -
     
     
     
    @@ -261,7 +256,7 @@ extends 
     
     CheckAndPutTest
    -CheckAndPutTest(org.apache.hadoop.hbase.client.Connectioncon,
    +CheckAndPutTest(org.apache.hadoop.hbase.client.Connectioncon,
     PerformanceEvaluation.TestOptionsoptions,
     PerformanceEvaluation.Statusstatus)
     
    @@ -280,11 +275,19 @@ extends 
     
     testRow
    -voidtestRow(inti)
    -  throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException
    +booleantestRow(inti)
    + throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException
    +Description copied from 
    class:PerformanceEvaluation.TestBase
    +Test for individual row.
     
     Specified by:
     testRowin
     classPerformanceEvaluation.TestBase
    +Parameters:
    +i - Row index.
    +Returns:
    +true if the row was sent to server and need to record metrics.
    + False if not, multiGet and multiPut e.g., the rows are sent
    + to server only if enough gets/puts are gathered.
     Throws:
     https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/883dde2f/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.CmdDescriptor.html
    

    [02/51] [partial] hbase-site git commit: Published site at cf529f18a9959589fa635f78df4840472526ea2c.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndPutTest.html
    --
    diff --git 
    a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndPutTest.html
     
    b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndPutTest.html
    index 3f8844b..cdb9398 100644
    --- 
    a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndPutTest.html
    +++ 
    b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndPutTest.html
    @@ -140,2712 +140,2713 @@
     132public class PerformanceEvaluation 
    extends Configured implements Tool {
     133  static final String RANDOM_SEEK_SCAN = 
    "randomSeekScan";
     134  static final String RANDOM_READ = 
    "randomRead";
    -135  private static final Logger LOG = 
    LoggerFactory.getLogger(PerformanceEvaluation.class.getName());
    -136  private static final ObjectMapper 
    MAPPER = new ObjectMapper();
    -137  static {
    -138
    MAPPER.configure(MapperFeature.SORT_PROPERTIES_ALPHABETICALLY, true);
    -139  }
    -140
    -141  public static final String TABLE_NAME = 
    "TestTable";
    -142  public static final String 
    FAMILY_NAME_BASE = "info";
    -143  public static final byte[] FAMILY_ZERO 
    = Bytes.toBytes("info0");
    -144  public static final byte[] COLUMN_ZERO 
    = Bytes.toBytes("" + 0);
    -145  public static final int 
    DEFAULT_VALUE_LENGTH = 1000;
    -146  public static final int ROW_LENGTH = 
    26;
    -147
    -148  private static final int ONE_GB = 1024 
    * 1024 * 1000;
    -149  private static final int 
    DEFAULT_ROWS_PER_GB = ONE_GB / DEFAULT_VALUE_LENGTH;
    -150  // TODO : should we make this 
    configurable
    -151  private static final int TAG_LENGTH = 
    256;
    -152  private static final DecimalFormat FMT 
    = new DecimalFormat("0.##");
    -153  private static final MathContext CXT = 
    MathContext.DECIMAL64;
    -154  private static final BigDecimal 
    MS_PER_SEC = BigDecimal.valueOf(1000);
    -155  private static final BigDecimal 
    BYTES_PER_MB = BigDecimal.valueOf(1024 * 1024);
    -156  private static final TestOptions 
    DEFAULT_OPTS = new TestOptions();
    -157
    -158  private static MapString, 
    CmdDescriptor COMMANDS = new TreeMap();
    -159  private static final Path PERF_EVAL_DIR 
    = new Path("performance_evaluation");
    -160
    -161  static {
    -162
    addCommandDescriptor(AsyncRandomReadTest.class, "asyncRandomRead",
    -163"Run async random read test");
    -164
    addCommandDescriptor(AsyncRandomWriteTest.class, "asyncRandomWrite",
    -165"Run async random write test");
    -166
    addCommandDescriptor(AsyncSequentialReadTest.class, "asyncSequentialRead",
    -167"Run async sequential read 
    test");
    -168
    addCommandDescriptor(AsyncSequentialWriteTest.class, "asyncSequentialWrite",
    -169"Run async sequential write 
    test");
    -170
    addCommandDescriptor(AsyncScanTest.class, "asyncScan",
    -171"Run async scan test (read every 
    row)");
    -172
    addCommandDescriptor(RandomReadTest.class, RANDOM_READ,
    -173  "Run random read test");
    -174
    addCommandDescriptor(RandomSeekScanTest.class, RANDOM_SEEK_SCAN,
    -175  "Run random seek and scan 100 
    test");
    -176
    addCommandDescriptor(RandomScanWithRange10Test.class, "scanRange10",
    -177  "Run random seek scan with both 
    start and stop row (max 10 rows)");
    -178
    addCommandDescriptor(RandomScanWithRange100Test.class, "scanRange100",
    -179  "Run random seek scan with both 
    start and stop row (max 100 rows)");
    -180
    addCommandDescriptor(RandomScanWithRange1000Test.class, "scanRange1000",
    -181  "Run random seek scan with both 
    start and stop row (max 1000 rows)");
    -182
    addCommandDescriptor(RandomScanWithRange1Test.class, "scanRange1",
    -183  "Run random seek scan with both 
    start and stop row (max 1 rows)");
    -184
    addCommandDescriptor(RandomWriteTest.class, "randomWrite",
    -185  "Run random write test");
    -186
    addCommandDescriptor(SequentialReadTest.class, "sequentialRead",
    -187  "Run sequential read test");
    -188
    addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite",
    -189  "Run sequential write test");
    -190addCommandDescriptor(ScanTest.class, 
    "scan",
    -191  "Run scan test (read every 
    row)");
    -192
    addCommandDescriptor(FilteredScanTest.class, "filterScan",
    -193  "Run scan test using a filter to 
    find a specific row based on it's value " +
    -194  "(make sure to use --rows=20)");
    -195
    addCommandDescriptor(IncrementTest.class, "increment",
    -196  "Increment on each row; clients 
    overlap on keyspace so some concurrent operations");
    -197
    addCommandDescriptor(AppendTest.class, "append",
    -198  "Append on each row; clients 
    overlap on keyspace so some concurrent operations");
    -199
    addCommandDescriptor(CheckAndMutateTest.class, "checkAndMutate",
    -200  "CheckAndMutate on each row; 
    clients overlap on keyspace so some concurrent operations");
    -201
    

    [02/51] [partial] hbase-site git commit: Published site at 021f66d11d2cbb7308308093e29e69d6e7661ee9.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/92a26cfb/pseudo-distributed.html
    --
    diff --git a/pseudo-distributed.html b/pseudo-distributed.html
    index 80ecf34..250c185 100644
    --- a/pseudo-distributed.html
    +++ b/pseudo-distributed.html
    @@ -7,7 +7,7 @@
       
     
     
    -
    +
     
     Apache HBase   
     Running Apache HBase (TM) in pseudo-distributed mode
    @@ -308,7 +308,7 @@ under the License. -->
     https://www.apache.org/;>The Apache Software 
    Foundation.
     All rights reserved.  
     
    -  Last Published: 
    2018-05-11
    +  Last Published: 
    2018-05-12
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/92a26cfb/replication.html
    --
    diff --git a/replication.html b/replication.html
    index 8c01fee..146dbaf 100644
    --- a/replication.html
    +++ b/replication.html
    @@ -7,7 +7,7 @@
       
     
     
    -
    +
     
     Apache HBase  
       Apache HBase (TM) Replication
    @@ -303,7 +303,7 @@ under the License. -->
     https://www.apache.org/;>The Apache Software 
    Foundation.
     All rights reserved.  
     
    -  Last Published: 
    2018-05-11
    +  Last Published: 
    2018-05-12
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/92a26cfb/resources.html
    --
    diff --git a/resources.html b/resources.html
    index 9a7530e..b733826 100644
    --- a/resources.html
    +++ b/resources.html
    @@ -7,7 +7,7 @@
       
     
     
    -
    +
     
     Apache HBase  Other Apache HBase (TM) Resources
     
    @@ -331,7 +331,7 @@ under the License. -->
     https://www.apache.org/;>The Apache Software 
    Foundation.
     All rights reserved.  
     
    -  Last Published: 
    2018-05-11
    +  Last Published: 
    2018-05-12
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/92a26cfb/source-repository.html
    --
    diff --git a/source-repository.html b/source-repository.html
    index da1670a..3a3f6b8 100644
    --- a/source-repository.html
    +++ b/source-repository.html
    @@ -7,7 +7,7 @@
       
     
     
    -
    +
     
     Apache HBase  Source Code Management
     
    @@ -299,7 +299,7 @@
     https://www.apache.org/;>The Apache Software 
    Foundation.
     All rights reserved.  
     
    -  Last Published: 
    2018-05-11
    +  Last Published: 
    2018-05-12
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/92a26cfb/sponsors.html
    --
    diff --git a/sponsors.html b/sponsors.html
    index 0dc4bd9..94f7738 100644
    --- a/sponsors.html
    +++ b/sponsors.html
    @@ -7,7 +7,7 @@
       
     
     
    -
    +
     
     Apache HBase  Apache HBase™ Sponsors
     
    @@ -333,7 +333,7 @@ under the License. -->
     https://www.apache.org/;>The Apache Software 
    Foundation.
     All rights reserved.  
     
    -  Last Published: 
    2018-05-11
    +  Last Published: 
    2018-05-12
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/92a26cfb/supportingprojects.html
    --
    diff --git a/supportingprojects.html b/supportingprojects.html
    index 6363a22..6d7592d 100644
    --- a/supportingprojects.html
    +++ b/supportingprojects.html
    @@ -7,7 +7,7 @@
       
     
     
    -
    +
     
     Apache HBase  Supporting Projects
     
    @@ -520,7 +520,7 @@ under the License. -->
     https://www.apache.org/;>The Apache Software 
    Foundation.
     All rights reserved.  
     
    -  Last Published: 
    2018-05-11
    +  Last Published: 
    2018-05-12
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/92a26cfb/team-list.html
    --
    diff --git a/team-list.html b/team-list.html
    index 287e5a5..3352e47 100644
    --- a/team-list.html
    +++ b/team-list.html
    @@ -7,7 +7,7 @@
       
     
     
    -
    +
     
     Apache HBase  Project Team
     
    @@ -730,7 +730,7 @@
     https://www.apache.org/;>The Apache Software 
    Foundation.
     All rights reserved.  
     
    -  Last Published: 
    2018-05-11
    +  Last Published: 
    2018-05-12
     
     
     
    
    

    [02/51] [partial] hbase-site git commit: Published site at acd0d1e446c164d9c54bfb461b2d449c8d717c07.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f2065178/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.Status.html
    --
    diff --git 
    a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.Status.html
     
    b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.Status.html
    index 2510283..418c60c 100644
    --- 
    a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.Status.html
    +++ 
    b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.Status.html
    @@ -77,77 +77,77 @@
     069import 
    org.apache.hadoop.hbase.client.RowMutations;
     070import 
    org.apache.hadoop.hbase.client.Scan;
     071import 
    org.apache.hadoop.hbase.client.Table;
    -072import 
    org.apache.hadoop.hbase.filter.BinaryComparator;
    -073import 
    org.apache.hadoop.hbase.filter.Filter;
    -074import 
    org.apache.hadoop.hbase.filter.FilterAllFilter;
    -075import 
    org.apache.hadoop.hbase.filter.FilterList;
    -076import 
    org.apache.hadoop.hbase.filter.PageFilter;
    -077import 
    org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
    -078import 
    org.apache.hadoop.hbase.filter.WhileMatchFilter;
    -079import 
    org.apache.hadoop.hbase.io.compress.Compression;
    -080import 
    org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
    -081import 
    org.apache.hadoop.hbase.io.hfile.RandomDistribution;
    -082import 
    org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
    -083import 
    org.apache.hadoop.hbase.regionserver.BloomType;
    -084import 
    org.apache.hadoop.hbase.regionserver.CompactingMemStore;
    -085import 
    org.apache.hadoop.hbase.trace.HBaseHTraceConfiguration;
    -086import 
    org.apache.hadoop.hbase.trace.SpanReceiverHost;
    -087import 
    org.apache.hadoop.hbase.trace.TraceUtil;
    -088import 
    org.apache.hadoop.hbase.util.ByteArrayHashKey;
    -089import 
    org.apache.hadoop.hbase.util.Bytes;
    -090import 
    org.apache.hadoop.hbase.util.Hash;
    -091import 
    org.apache.hadoop.hbase.util.MurmurHash;
    -092import 
    org.apache.hadoop.hbase.util.Pair;
    -093import 
    org.apache.hadoop.hbase.util.YammerHistogramUtils;
    -094import 
    org.apache.hadoop.io.LongWritable;
    -095import org.apache.hadoop.io.Text;
    -096import org.apache.hadoop.mapreduce.Job;
    -097import 
    org.apache.hadoop.mapreduce.Mapper;
    -098import 
    org.apache.hadoop.mapreduce.lib.input.NLineInputFormat;
    -099import 
    org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
    -100import 
    org.apache.hadoop.mapreduce.lib.reduce.LongSumReducer;
    -101import org.apache.hadoop.util.Tool;
    -102import 
    org.apache.hadoop.util.ToolRunner;
    -103import 
    org.apache.htrace.core.ProbabilitySampler;
    -104import org.apache.htrace.core.Sampler;
    -105import 
    org.apache.htrace.core.TraceScope;
    -106import 
    org.apache.yetus.audience.InterfaceAudience;
    -107import org.slf4j.Logger;
    -108import org.slf4j.LoggerFactory;
    -109import 
    org.apache.hbase.thirdparty.com.google.common.base.MoreObjects;
    -110import 
    org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
    -111
    -112/**
    -113 * Script used evaluating HBase 
    performance and scalability.  Runs a HBase
    -114 * client that steps through one of a set 
    of hardcoded tests or 'experiments'
    -115 * (e.g. a random reads test, a random 
    writes test, etc.). Pass on the
    -116 * command-line which test to run and how 
    many clients are participating in
    -117 * this experiment. Run {@code 
    PerformanceEvaluation --help} to obtain usage.
    -118 *
    -119 * pThis class sets up and runs 
    the evaluation programs described in
    -120 * Section 7, iPerformance 
    Evaluation/i, of the a
    -121 * 
    href="http://labs.google.com/papers/bigtable.html"Bigtable/a;
    -122 * paper, pages 8-10.
    -123 *
    -124 * pBy default, runs as a 
    mapreduce job where each mapper runs a single test
    -125 * client. Can also run as a 
    non-mapreduce, multithreaded application by
    -126 * specifying {@code --nomapred}. Each 
    client does about 1GB of data, unless
    -127 * specified otherwise.
    -128 */
    -129@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
    -130public class PerformanceEvaluation 
    extends Configured implements Tool {
    -131  static final String RANDOM_SEEK_SCAN = 
    "randomSeekScan";
    -132  static final String RANDOM_READ = 
    "randomRead";
    -133  private static final Logger LOG = 
    LoggerFactory.getLogger(PerformanceEvaluation.class.getName());
    -134  private static final ObjectMapper 
    MAPPER = new ObjectMapper();
    -135  static {
    -136
    MAPPER.configure(MapperFeature.SORT_PROPERTIES_ALPHABETICALLY, true);
    -137  }
    -138
    -139  public static final String TABLE_NAME = 
    "TestTable";
    -140  public static final byte[] FAMILY_NAME 
    = Bytes.toBytes("info");
    -141  public static final byte [] COLUMN_ZERO 
    = Bytes.toBytes("" + 0);
    -142  public static final byte [] 
    QUALIFIER_NAME = COLUMN_ZERO;
    +072import 
    org.apache.hadoop.hbase.client.metrics.ScanMetrics;
    +073import 
    org.apache.hadoop.hbase.filter.BinaryComparator;
    +074import 
    org.apache.hadoop.hbase.filter.Filter;
    +075import 
    org.apache.hadoop.hbase.filter.FilterAllFilter;
    +076import 
    

    [02/51] [partial] hbase-site git commit: Published site at 87f5b5f3411d96c31b4cb61b9a57ced22be91d1f.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/de18d468/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.InterClusterReplicationEndpointForTest.FailingDummyReplicator.html
    --
    diff --git 
    a/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.InterClusterReplicationEndpointForTest.FailingDummyReplicator.html
     
    b/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.InterClusterReplicationEndpointForTest.FailingDummyReplicator.html
    deleted file mode 100644
    index 7a938de..000
    --- 
    a/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.InterClusterReplicationEndpointForTest.FailingDummyReplicator.html
    +++ /dev/null
    @@ -1,632 +0,0 @@
    -http://www.w3.org/TR/html4/loose.dtd;>
    -
    -
    -Source code
    -
    -
    -
    -
    -001/**
    -002 * Licensed to the Apache Software 
    Foundation (ASF) under one
    -003 * or more contributor license 
    agreements.  See the NOTICE file
    -004 * distributed with this work for 
    additional information
    -005 * regarding copyright ownership.  The 
    ASF licenses this file
    -006 * to you under the Apache License, 
    Version 2.0 (the
    -007 * "License"); you may not use this file 
    except in compliance
    -008 * with the License.  You may obtain a 
    copy of the License at
    -009 *
    -010 * 
    http://www.apache.org/licenses/LICENSE-2.0
    -011 *
    -012 * Unless required by applicable law or 
    agreed to in writing, software
    -013 * distributed under the License is 
    distributed on an "AS IS" BASIS,
    -014 * WITHOUT WARRANTIES OR CONDITIONS OF 
    ANY KIND, either express or implied.
    -015 * See the License for the specific 
    language governing permissions and
    -016 * limitations under the License.
    -017 */
    -018package 
    org.apache.hadoop.hbase.replication;
    -019
    -020import static org.mockito.Mockito.mock;
    -021import static 
    org.mockito.Mockito.verify;
    -022import static org.mockito.Mockito.when;
    -023
    -024import java.io.IOException;
    -025import java.util.ArrayList;
    -026import java.util.List;
    -027import java.util.UUID;
    -028import 
    java.util.concurrent.atomic.AtomicBoolean;
    -029import 
    java.util.concurrent.atomic.AtomicInteger;
    -030import 
    java.util.concurrent.atomic.AtomicReference;
    -031import org.apache.hadoop.hbase.Cell;
    -032import 
    org.apache.hadoop.hbase.HBaseClassTestRule;
    -033import org.apache.hadoop.hbase.Waiter;
    -034import 
    org.apache.hadoop.hbase.client.Connection;
    -035import 
    org.apache.hadoop.hbase.client.ConnectionFactory;
    -036import 
    org.apache.hadoop.hbase.client.Put;
    -037import 
    org.apache.hadoop.hbase.client.RegionInfo;
    -038import 
    org.apache.hadoop.hbase.client.Table;
    -039import 
    org.apache.hadoop.hbase.regionserver.HRegion;
    -040import 
    org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint;
    -041import 
    org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationGlobalSourceSource;
    -042import 
    org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSourceImpl;
    -043import 
    org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSourceSource;
    -044import 
    org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSourceSourceImpl;
    -045import 
    org.apache.hadoop.hbase.replication.regionserver.MetricsSource;
    -046import 
    org.apache.hadoop.hbase.testclassification.MediumTests;
    -047import 
    org.apache.hadoop.hbase.testclassification.ReplicationTests;
    -048import 
    org.apache.hadoop.hbase.util.Bytes;
    -049import 
    org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
    -050import 
    org.apache.hadoop.hbase.util.Threads;
    -051import 
    org.apache.hadoop.hbase.wal.WAL.Entry;
    -052import 
    org.apache.hadoop.hbase.zookeeper.ZKConfig;
    -053import 
    org.apache.hadoop.metrics2.lib.DynamicMetricsRegistry;
    -054import org.junit.AfterClass;
    -055import org.junit.Assert;
    -056import org.junit.Before;
    -057import org.junit.BeforeClass;
    -058import org.junit.ClassRule;
    -059import org.junit.Test;
    -060import 
    org.junit.experimental.categories.Category;
    -061import org.slf4j.Logger;
    -062import org.slf4j.LoggerFactory;
    -063
    -064/**
    -065 * Tests ReplicationSource and 
    ReplicationEndpoint interactions
    -066 */
    -067@Category({ ReplicationTests.class, 
    MediumTests.class })
    -068public class TestReplicationEndpoint 
    extends TestReplicationBase {
    -069
    -070  @ClassRule
    -071  public static final HBaseClassTestRule 
    CLASS_RULE =
    -072  
    HBaseClassTestRule.forClass(TestReplicationEndpoint.class);
    -073
    -074  private static final Logger LOG = 
    LoggerFactory.getLogger(TestReplicationEndpoint.class);
    -075
    -076  static int numRegionServers;
    -077
    -078  @BeforeClass
    -079  public static void setUpBeforeClass() 
    throws Exception {
    -080
    TestReplicationBase.setUpBeforeClass();
    -081numRegionServers = 
    utility1.getHBaseCluster().getRegionServerThreads().size();
    -082  }
    -083
    -084  @AfterClass
    -085  public static void tearDownAfterClass() 
    throws Exception {
    -086
    TestReplicationBase.tearDownAfterClass();
    -087// 

    [02/51] [partial] hbase-site git commit: Published site at 2912c953551bedbfbf30c32c156ed7bb187d54c3.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemOverlapMerge.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemOverlapMerge.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemOverlapMerge.html
    index 8302e28..c370eb9 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemOverlapMerge.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemOverlapMerge.html
    @@ -2113,3031 +2113,3033 @@
     2105
    errors.reportError(ERROR_CODE.ORPHAN_TABLE_STATE,
     2106tableName + " unable to 
    delete dangling table state " + tableState);
     2107  }
    -2108} else {
    -2109  
    errors.reportError(ERROR_CODE.ORPHAN_TABLE_STATE,
    -2110  tableName + " has dangling 
    table state " + tableState);
    -2111}
    -2112  }
    -2113}
    -2114// check that all tables have 
    states
    -2115for (TableName tableName : 
    tablesInfo.keySet()) {
    -2116  if (isTableIncluded(tableName) 
     !tableStates.containsKey(tableName)) {
    -2117if (fixMeta) {
    -2118  
    MetaTableAccessor.updateTableState(connection, tableName, 
    TableState.State.ENABLED);
    -2119  TableState newState = 
    MetaTableAccessor.getTableState(connection, tableName);
    -2120  if (newState == null) {
    -2121
    errors.reportError(ERROR_CODE.NO_TABLE_STATE,
    -2122"Unable to change state 
    for table " + tableName + " in meta ");
    -2123  }
    -2124} else {
    -2125  
    errors.reportError(ERROR_CODE.NO_TABLE_STATE,
    -2126  tableName + " has no state 
    in meta ");
    -2127}
    -2128  }
    -2129}
    -2130  }
    -2131
    -2132  private void preCheckPermission() 
    throws IOException, AccessDeniedException {
    -2133if 
    (shouldIgnorePreCheckPermission()) {
    -2134  return;
    -2135}
    -2136
    -2137Path hbaseDir = 
    FSUtils.getRootDir(getConf());
    -2138FileSystem fs = 
    hbaseDir.getFileSystem(getConf());
    -2139UserProvider userProvider = 
    UserProvider.instantiate(getConf());
    -2140UserGroupInformation ugi = 
    userProvider.getCurrent().getUGI();
    -2141FileStatus[] files = 
    fs.listStatus(hbaseDir);
    -2142for (FileStatus file : files) {
    -2143  try {
    -2144FSUtils.checkAccess(ugi, file, 
    FsAction.WRITE);
    -2145  } catch (AccessDeniedException 
    ace) {
    -2146LOG.warn("Got 
    AccessDeniedException when preCheckPermission ", ace);
    -2147
    errors.reportError(ERROR_CODE.WRONG_USAGE, "Current user " + 
    ugi.getUserName()
    -2148  + " does not have write perms 
    to " + file.getPath()
    -2149  + ". Please rerun hbck as hdfs 
    user " + file.getOwner());
    -2150throw ace;
    -2151  }
    -2152}
    -2153  }
    -2154
    -2155  /**
    -2156   * Deletes region from meta table
    -2157   */
    -2158  private void deleteMetaRegion(HbckInfo 
    hi) throws IOException {
    -2159
    deleteMetaRegion(hi.metaEntry.getRegionName());
    -2160  }
    -2161
    -2162  /**
    -2163   * Deletes region from meta table
    -2164   */
    -2165  private void deleteMetaRegion(byte[] 
    metaKey) throws IOException {
    -2166Delete d = new Delete(metaKey);
    -2167meta.delete(d);
    -2168LOG.info("Deleted " + 
    Bytes.toString(metaKey) + " from META" );
    -2169  }
    -2170
    -2171  /**
    -2172   * Reset the split parent region info 
    in meta table
    -2173   */
    -2174  private void resetSplitParent(HbckInfo 
    hi) throws IOException {
    -2175RowMutations mutations = new 
    RowMutations(hi.metaEntry.getRegionName());
    -2176Delete d = new 
    Delete(hi.metaEntry.getRegionName());
    -2177
    d.addColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER);
    -2178
    d.addColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER);
    -2179mutations.add(d);
    -2180
    -2181RegionInfo hri = 
    RegionInfoBuilder.newBuilder(hi.metaEntry)
    -2182.setOffline(false)
    -2183.setSplit(false)
    -2184.build();
    -2185Put p = 
    MetaTableAccessor.makePutFromRegionInfo(hri, 
    EnvironmentEdgeManager.currentTime());
    -2186mutations.add(p);
    -2187
    -2188meta.mutateRow(mutations);
    -2189LOG.info("Reset split parent " + 
    hi.metaEntry.getRegionNameAsString() + " in META" );
    -2190  }
    -2191
    -2192  /**
    -2193   * This backwards-compatibility 
    wrapper for permanently offlining a region
    -2194   * that should not be alive.  If the 
    region server does not support the
    -2195   * "offline" method, it will use the 
    closest unassign method instead.  This
    -2196   * will basically work until one 
    attempts to disable or delete the affected
    -2197   * table.  The problem has to do with 
    in-memory only master state, so
    -2198   * restarting the HMaster or failing 
    over to another should fix this.
    -2199   */
    -2200  private void offline(byte[] 
    regionName) throws IOException {
    -2201String regionString = 
    Bytes.toStringBinary(regionName);
    -2202if (!rsSupportsOffline) 

    [02/51] [partial] hbase-site git commit: Published site at 2a2258656b2fcd92b967131b6c1f037363553bc4.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e0fb1fde/devapidocs/src-html/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.html
    index bcb65f1..a9d5986 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.html
    @@ -39,1086 +39,1087 @@
     031import java.util.Scanner;
     032import java.util.Set;
     033import java.util.TreeMap;
    -034import 
    org.apache.commons.cli.CommandLine;
    -035import 
    org.apache.commons.cli.GnuParser;
    -036import 
    org.apache.commons.cli.HelpFormatter;
    -037import org.apache.commons.cli.Options;
    -038import 
    org.apache.commons.cli.ParseException;
    -039import 
    org.apache.commons.lang3.StringUtils;
    -040import 
    org.apache.hadoop.conf.Configuration;
    -041import org.apache.hadoop.fs.FileSystem;
    -042import 
    org.apache.hadoop.hbase.ClusterMetrics.Option;
    -043import 
    org.apache.hadoop.hbase.HBaseConfiguration;
    -044import 
    org.apache.hadoop.hbase.HConstants;
    -045import 
    org.apache.hadoop.hbase.ServerName;
    -046import 
    org.apache.hadoop.hbase.TableName;
    -047import 
    org.apache.hadoop.hbase.client.Admin;
    -048import 
    org.apache.hadoop.hbase.client.ClusterConnection;
    -049import 
    org.apache.hadoop.hbase.client.Connection;
    -050import 
    org.apache.hadoop.hbase.client.ConnectionFactory;
    -051import 
    org.apache.hadoop.hbase.client.RegionInfo;
    -052import 
    org.apache.hadoop.hbase.favored.FavoredNodeAssignmentHelper;
    -053import 
    org.apache.hadoop.hbase.favored.FavoredNodesPlan;
    -054import 
    org.apache.hadoop.hbase.util.FSUtils;
    -055import 
    org.apache.hadoop.hbase.util.MunkresAssignment;
    -056import 
    org.apache.hadoop.hbase.util.Pair;
    -057import 
    org.apache.yetus.audience.InterfaceAudience;
    -058import org.slf4j.Logger;
    -059import org.slf4j.LoggerFactory;
    -060
    -061import 
    org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
    -062import 
    org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
    -063import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;
    -064import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest;
    -065import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse;
    -066
    -067/**
    -068 * A tool that is used for manipulating 
    and viewing favored nodes information
    -069 * for regions. Run with -h to get a list 
    of the options
    -070 */
    -071@InterfaceAudience.Private
    -072// TODO: Remove? Unused. Partially 
    implemented only.
    -073public class RegionPlacementMaintainer 
    {
    -074  private static final Logger LOG = 
    LoggerFactory.getLogger(RegionPlacementMaintainer.class
    -075  .getName());
    -076  //The cost of a placement that should 
    never be assigned.
    -077  private static final float MAX_COST = 
    Float.POSITIVE_INFINITY;
    -078
    -079  // The cost of a placement that is 
    undesirable but acceptable.
    -080  private static final float AVOID_COST = 
    10f;
    -081
    -082  // The amount by which the cost of a 
    placement is increased if it is the
    -083  // last slot of the server. This is 
    done to more evenly distribute the slop
    -084  // amongst servers.
    -085  private static final float 
    LAST_SLOT_COST_PENALTY = 0.5f;
    -086
    -087  // The amount by which the cost of a 
    primary placement is penalized if it is
    -088  // not the host currently serving the 
    region. This is done to minimize moves.
    -089  private static final float 
    NOT_CURRENT_HOST_PENALTY = 0.1f;
    -090
    -091  private static boolean 
    USE_MUNKRES_FOR_PLACING_SECONDARY_AND_TERTIARY = false;
    -092
    -093  private Configuration conf;
    -094  private final boolean 
    enforceLocality;
    -095  private final boolean 
    enforceMinAssignmentMove;
    -096  private RackManager rackManager;
    -097  private SetTableName 
    targetTableSet;
    -098  private final Connection connection;
    -099
    -100  public 
    RegionPlacementMaintainer(Configuration conf) {
    -101this(conf, true, true);
    -102  }
    -103
    -104  public 
    RegionPlacementMaintainer(Configuration conf, boolean enforceLocality,
    -105  boolean enforceMinAssignmentMove) 
    {
    -106this.conf = conf;
    -107this.enforceLocality = 
    enforceLocality;
    -108this.enforceMinAssignmentMove = 
    enforceMinAssignmentMove;
    -109this.targetTableSet = new 
    HashSet();
    -110this.rackManager = new 
    RackManager(conf);
    -111try {
    -112  this.connection = 
    ConnectionFactory.createConnection(this.conf);
    -113} catch (IOException e) {
    -114  throw new RuntimeException(e);
    -115}
    -116  }
    -117
    -118  private static void printHelp(Options 
    opt) {
    -119new HelpFormatter().printHelp(
    -120"RegionPlacement  -w | -u | 
    -n | -v | -t | -h | -overwrite -r regionName -f favoredNodes " +
    -121"-diff" +
    -122" [-l false] [-m false] [-d] 
    [-tables 

    [02/51] [partial] hbase-site git commit: Published site at e468b4022f76688851b3e0c34722f01a56bd624f.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/16541468/devapidocs/org/apache/hadoop/hbase/client/Table.CheckAndMutateBuilder.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/client/Table.CheckAndMutateBuilder.html 
    b/devapidocs/org/apache/hadoop/hbase/client/Table.CheckAndMutateBuilder.html
    index c06af6d..2add328 100644
    --- a/devapidocs/org/apache/hadoop/hbase/client/Table.CheckAndMutateBuilder.html
    +++ b/devapidocs/org/apache/hadoop/hbase/client/Table.CheckAndMutateBuilder.html
    @@ -18,7 +18,7 @@
     catch(err) {
     }
     //-->
    -var methods = {"i0":18,"i1":6,"i2":6,"i3":6,"i4":6,"i5":6,"i6":6};
    +var methods = {"i0":18,"i1":6,"i2":6,"i3":6,"i4":6,"i5":6,"i6":6,"i7":6};
     var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
    Methods"],4:["t3","Abstract Methods"],16:["t5","Default Methods"]};
     var altColor = "altColor";
     var rowColor = "rowColor";
    @@ -109,7 +109,7 @@ var activeTableTab = "activeTableTab";
     
     
     
    -public static interface Table.CheckAndMutateBuilder
    +public static interface Table.CheckAndMutateBuilder
     A helper class for sending checkAndMutate request.
     
     
    @@ -162,6 +162,10 @@ var activeTableTab = "activeTableTab";
     boolean
     thenPut(Putput)
     
    +
    +Table.CheckAndMutateBuilder
    +timeRange(TimeRangetimeRange)
    +
     
     
     
    @@ -183,20 +187,33 @@ var activeTableTab = "activeTableTab";
     
     
     qualifier
    -Table.CheckAndMutateBuilderqualifier(byte[]qualifier)
    +Table.CheckAndMutateBuilderqualifier(byte[]qualifier)
     
     Parameters:
     qualifier - column qualifier to check.
     
     
     
    +
    +
    +
    +
    +
    +timeRange
    +Table.CheckAndMutateBuildertimeRange(TimeRangetimeRange)
    +
    +Parameters:
    +timeRange - timeRange to check
    +
    +
    +
     
     
     
     
     
     ifNotExists
    -Table.CheckAndMutateBuilderifNotExists()
    +Table.CheckAndMutateBuilderifNotExists()
     Check for lack of column.
     
     
    @@ -206,7 +223,7 @@ var activeTableTab = "activeTableTab";
     
     
     ifEquals
    -defaultTable.CheckAndMutateBuilderifEquals(byte[]value)
    +defaultTable.CheckAndMutateBuilderifEquals(byte[]value)
     Check for equality.
     
     Parameters:
    @@ -220,7 +237,7 @@ var activeTableTab = "activeTableTab";
     
     
     ifMatches
    -Table.CheckAndMutateBuilderifMatches(CompareOperatorcompareOp,
    +Table.CheckAndMutateBuilderifMatches(CompareOperatorcompareOp,
       byte[]value)
     
     Parameters:
    @@ -235,7 +252,7 @@ var activeTableTab = "activeTableTab";
     
     
     thenPut
    -booleanthenPut(Putput)
    +booleanthenPut(Putput)
      throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException
     
     Parameters:
    @@ -253,7 +270,7 @@ var activeTableTab = "activeTableTab";
     
     
     thenDelete
    -booleanthenDelete(Deletedelete)
    +booleanthenDelete(Deletedelete)
     throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException
     
     Parameters:
    @@ -271,7 +288,7 @@ var activeTableTab = "activeTableTab";
     
     
     thenMutate
    -booleanthenMutate(RowMutationsmutation)
    +booleanthenMutate(RowMutationsmutation)
     throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException
     
     Parameters:
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/16541468/devapidocs/org/apache/hadoop/hbase/client/Table.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/client/Table.html 
    b/devapidocs/org/apache/hadoop/hbase/client/Table.html
    index 08ae259..8d85670 100644
    --- a/devapidocs/org/apache/hadoop/hbase/client/Table.html
    +++ b/devapidocs/org/apache/hadoop/hbase/client/Table.html
    @@ -110,7 +110,7 @@ var activeTableTab = "activeTableTab";
     
     
     @InterfaceAudience.Public
    -public interface Table
    +public interface Table
     extends https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html?is-external=true;
     title="class or interface in java.io">Closeable
     Used to communicate with a single HBase table.
      Obtain an instance from a Connection 
    and call close()
     afterwards.
    @@ -605,7 +605,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.htm
     
     
     getName
    -TableNamegetName()
    +TableNamegetName()
     Gets the fully qualified table name instance of this 
    table.
     
     
    @@ -615,7 +615,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.htm
     
     
     getConfiguration
    -org.apache.hadoop.conf.ConfigurationgetConfiguration()
    +org.apache.hadoop.conf.ConfigurationgetConfiguration()
     Returns the Configuration object used by this 
    instance.
      
      The reference returned is not a copy, so any change made to it will
    @@ -629,7 +629,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.htm
     
     getTableDescriptor
     https://docs.oracle.com/javase/8/docs/api/java/lang/Deprecated.html?is-external=true;
     title="class or interface in java.lang">@Deprecated
    -HTableDescriptorgetTableDescriptor()
    

    [02/51] [partial] hbase-site git commit: Published site at 64061f896fe21512504e3886a400759e88b519da.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f1ebf5b6/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
     
    b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
    index 314d278..7fc2f35 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
    @@ -133,11 +133,11 @@
     
     
     ProcedureExecutorMasterProcedureEnv
    -MasterServices.getMasterProcedureExecutor()
    +HMaster.getMasterProcedureExecutor()
     
     
     ProcedureExecutorMasterProcedureEnv
    -HMaster.getMasterProcedureExecutor()
    +MasterServices.getMasterProcedureExecutor()
     
     
     private RemoteProcedureDispatcher.RemoteProcedureMasterProcedureEnv,?
    @@ -190,27 +190,27 @@
     
     
     protected boolean
    -SplitTableRegionProcedure.abort(MasterProcedureEnvenv)
    +RegionTransitionProcedure.abort(MasterProcedureEnvenv)
     
     
     protected boolean
    -MergeTableRegionsProcedure.abort(MasterProcedureEnvenv)
    +SplitTableRegionProcedure.abort(MasterProcedureEnvenv)
     
     
     protected boolean
    -RegionTransitionProcedure.abort(MasterProcedureEnvenv)
    +MergeTableRegionsProcedure.abort(MasterProcedureEnvenv)
     
     
     protected Procedure.LockState
    -GCRegionProcedure.acquireLock(MasterProcedureEnvenv)
    +RegionTransitionProcedure.acquireLock(MasterProcedureEnvenv)
     
     
     protected Procedure.LockState
    -MergeTableRegionsProcedure.acquireLock(MasterProcedureEnvenv)
    +GCRegionProcedure.acquireLock(MasterProcedureEnvenv)
     
     
     protected Procedure.LockState
    -RegionTransitionProcedure.acquireLock(MasterProcedureEnvenv)
    +MergeTableRegionsProcedure.acquireLock(MasterProcedureEnvenv)
     
     
     protected boolean
    @@ -318,7 +318,7 @@
     
     
     protected void
    -AssignProcedure.finishTransition(MasterProcedureEnvenv,
    +UnassignProcedure.finishTransition(MasterProcedureEnvenv,
     RegionStates.RegionStateNoderegionNode)
     
     
    @@ -328,7 +328,7 @@
     
     
     protected void
    -UnassignProcedure.finishTransition(MasterProcedureEnvenv,
    +AssignProcedure.finishTransition(MasterProcedureEnvenv,
     RegionStates.RegionStateNoderegionNode)
     
     
    @@ -337,7 +337,7 @@
     
     
     protected ProcedureMetrics
    -AssignProcedure.getProcedureMetrics(MasterProcedureEnvenv)
    +UnassignProcedure.getProcedureMetrics(MasterProcedureEnvenv)
     
     
     protected ProcedureMetrics
    @@ -349,7 +349,7 @@
     
     
     protected ProcedureMetrics
    -UnassignProcedure.getProcedureMetrics(MasterProcedureEnvenv)
    +AssignProcedure.getProcedureMetrics(MasterProcedureEnvenv)
     
     
     (package private) static 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse
    @@ -380,7 +380,7 @@
     
     
     ServerName
    -AssignProcedure.getServer(MasterProcedureEnvenv)
    +UnassignProcedure.getServer(MasterProcedureEnvenv)
     
     
     abstract ServerName
    @@ -390,7 +390,7 @@
     
     
     ServerName
    -UnassignProcedure.getServer(MasterProcedureEnvenv)
    +AssignProcedure.getServer(MasterProcedureEnvenv)
     
     
     private ServerName
    @@ -407,19 +407,19 @@
     
     
     protected boolean
    -MergeTableRegionsProcedure.hasLock(MasterProcedureEnvenv)
    +RegionTransitionProcedure.hasLock(MasterProcedureEnvenv)
     
     
     protected boolean
    -RegionTransitionProcedure.hasLock(MasterProcedureEnvenv)
    +MergeTableRegionsProcedure.hasLock(MasterProcedureEnvenv)
     
     
     protected boolean
    -MergeTableRegionsProcedure.holdLock(MasterProcedureEnvenv)
    +RegionTransitionProcedure.holdLock(MasterProcedureEnvenv)
     
     
     protected boolean
    -RegionTransitionProcedure.holdLock(MasterProcedureEnvenv)
    +MergeTableRegionsProcedure.holdLock(MasterProcedureEnvenv)
     
     
     private boolean
    @@ -533,15 +533,15 @@
     
     
     protected void
    -MergeTableRegionsProcedure.releaseLock(MasterProcedureEnvenv)
    +RegionTransitionProcedure.releaseLock(MasterProcedureEnvenv)
     
     
     protected void
    -RegionTransitionProcedure.releaseLock(MasterProcedureEnvenv)
    +MergeTableRegionsProcedure.releaseLock(MasterProcedureEnvenv)
     
     
     RemoteProcedureDispatcher.RemoteOperation
    -AssignProcedure.remoteCallBuild(MasterProcedureEnvenv,
    +UnassignProcedure.remoteCallBuild(MasterProcedureEnvenv,
    ServerNameserverName)
     
     
    @@ -551,12 +551,12 @@
     
     
     RemoteProcedureDispatcher.RemoteOperation
    -UnassignProcedure.remoteCallBuild(MasterProcedureEnvenv,
    +AssignProcedure.remoteCallBuild(MasterProcedureEnvenv,
    ServerNameserverName)
     
     
     protected boolean
    -AssignProcedure.remoteCallFailed(MasterProcedureEnvenv,
    +UnassignProcedure.remoteCallFailed(MasterProcedureEnvenv,
     RegionStates.RegionStateNoderegionNode,
     https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in 
    java.io">IOExceptionexception)
     
    @@ -568,7 +568,7 @@
     
     
     protected boolean
    -UnassignProcedure.remoteCallFailed(MasterProcedureEnvenv,
    

    [02/51] [partial] hbase-site git commit: Published site at 4cb40e6d846ce1f28ffb40d388c9efb753197813.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4dc2a2e8/devapidocs/org/apache/hadoop/hbase/io/hfile/PrefetchExecutor.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/io/hfile/PrefetchExecutor.html 
    b/devapidocs/org/apache/hadoop/hbase/io/hfile/PrefetchExecutor.html
    index 3272079..8fb8812 100644
    --- a/devapidocs/org/apache/hadoop/hbase/io/hfile/PrefetchExecutor.html
    +++ b/devapidocs/org/apache/hadoop/hbase/io/hfile/PrefetchExecutor.html
    @@ -109,7 +109,8 @@ var activeTableTab = "activeTableTab";
     
     
     
    -public class PrefetchExecutor
    +@InterfaceAudience.Private
    +public final class PrefetchExecutor
     extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">Object
     
     
    @@ -177,10 +178,12 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
     
     Constructors
     
    -Constructor and Description
    +Modifier
    +Constructor and Description
     
     
    -PrefetchExecutor()
    +private 
    +PrefetchExecutor()
     
     
     
    @@ -242,7 +245,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
     
     
     LOG
    -private static finalorg.slf4j.Logger LOG
    +private static finalorg.slf4j.Logger LOG
     
     
     
    @@ -251,7 +254,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
     
     
     prefetchFutures
    -private static finalhttps://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
     title="class or interface in 
    java.util">Maporg.apache.hadoop.fs.Path,https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Future.html?is-external=true;
     title="class or interface in java.util.concurrent">Future? prefetchFutures
    +private static finalhttps://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
     title="class or interface in 
    java.util">Maporg.apache.hadoop.fs.Path,https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Future.html?is-external=true;
     title="class or interface in java.util.concurrent">Future? prefetchFutures
     Futures for tracking block prefetch activity
     
     
    @@ -261,7 +264,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
     
     
     prefetchExecutorPool
    -private static finalhttps://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ScheduledExecutorService.html?is-external=true;
     title="class or interface in 
    java.util.concurrent">ScheduledExecutorService prefetchExecutorPool
    +private static finalhttps://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ScheduledExecutorService.html?is-external=true;
     title="class or interface in 
    java.util.concurrent">ScheduledExecutorService prefetchExecutorPool
     Executor pool shared among all HFiles for block 
    prefetch
     
     
    @@ -271,7 +274,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
     
     
     prefetchDelayMillis
    -private static finalint prefetchDelayMillis
    +private static finalint prefetchDelayMillis
     Delay before beginning prefetch
     
     
    @@ -281,7 +284,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
     
     
     prefetchDelayVariation
    -private static finalfloat prefetchDelayVariation
    +private static finalfloat prefetchDelayVariation
     Variation in prefetch delay times, to mitigate 
    stampedes
     
     
    @@ -291,7 +294,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
     
     
     RNG
    -private static finalhttps://docs.oracle.com/javase/8/docs/api/java/util/Random.html?is-external=true;
     title="class or interface in java.util">Random RNG
    +private static finalhttps://docs.oracle.com/javase/8/docs/api/java/util/Random.html?is-external=true;
     title="class or interface in java.util">Random RNG
     
     
     
    @@ -300,7 +303,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
     
     
     prefetchPathExclude
    -private static finalhttps://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html?is-external=true;
     title="class or interface in java.util.regex">Pattern prefetchPathExclude
    +private static finalhttps://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html?is-external=true;
     title="class or interface in java.util.regex">Pattern prefetchPathExclude
     
     
     
    @@ -317,7 +320,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
     
     
     PrefetchExecutor
    -publicPrefetchExecutor()
    +privatePrefetchExecutor()
     
     
     
    @@ -334,7 +337,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
     
     
     request
    -public staticvoidrequest(org.apache.hadoop.fs.Pathpath,
    +public staticvoidrequest(org.apache.hadoop.fs.Pathpath,
    https://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.html?is-external=true;
     title="class or interface in java.lang">Runnablerunnable)
     
     
    @@ -344,7 +347,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
     
     
     complete
    -public staticvoidcomplete(org.apache.hadoop.fs.Pathpath)
    +public staticvoidcomplete(org.apache.hadoop.fs.Pathpath)
     
     
     
    @@ -353,7 +356,7 @@ 

    [02/51] [partial] hbase-site git commit: Published site at 8ab7b20f48951d77945181024f5e15842bc253c4.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6eb695c8/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHRegion.HRegionForTesting.html
    --
    diff --git 
    a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHRegion.HRegionForTesting.html
     
    b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHRegion.HRegionForTesting.html
    index 5559cca..9d74b78 100644
    --- 
    a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHRegion.HRegionForTesting.html
    +++ 
    b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHRegion.HRegionForTesting.html
    @@ -165,7 +165,7 @@ extends org.apache.hadoop.hbase.regionserver.HRegion
     
     
     Fields inherited from 
    classorg.apache.hadoop.hbase.regionserver.HRegion
    -busyWaitDuration, checkAndMutateChecksFailed, 
    checkAndMutateChecksPassed, closed, closing, compactionNumBytesCompacted, 
    compactionNumFilesCompacted, compactionsFailed, compactionsFinished, 
    compactionsQueued, conf, dataInMemoryWithoutWAL, DEEP_OVERHEAD, 
    DEFAULT_BUSY_WAIT_DURATION, DEFAULT_CACHE_FLUSH_INTERVAL, 
    DEFAULT_FLUSH_PER_CHANGES, DEFAULT_HBASE_REGIONSERVER_MINIBATCH_SIZE, 
    DEFAULT_MAX_CELL_SIZE, DEFAULT_ROW_PROCESSOR_TIMEOUT, 
    DEFAULT_ROWLOCK_WAIT_DURATION, filteredReadRequestsCount, FIXED_OVERHEAD, 
    flushesQueued, HBASE_MAX_CELL_SIZE_KEY, HBASE_REGIONSERVER_MINIBATCH_SIZE, 
    lastReplayedCompactionSeqId, lastReplayedOpenRegionSeqId, 
    LOAD_CFS_ON_DEMAND_CONFIG_KEY, lock, MAX_FLUSH_PER_CHANGES, 
    maxBusyWaitDuration, maxBusyWaitMultiplier, maxCellSize, maxSeqIdInStores, 
    MEMSTORE_FLUSH_PER_CHANGES, MEMSTORE_PERIODIC_FLUSH_INTERVAL, 
    memstoreFlushSize, numMutationsWithoutWAL, readRequestsCount, 
    rowProcessorExecutor, rowProcessorTimeout, rsServices, stores, 
    SYSTEM_CACHE_FLUSH_INTERVAL
     , timestampSlop, writeRequestsCount, writestate
    +busyWaitDuration, checkAndMutateChecksFailed, 
    checkAndMutateChecksPassed, closed, closing, compactionNumBytesCompacted, 
    compactionNumFilesCompacted, compactionsFailed, compactionsFinished, 
    compactionsQueued, conf, dataInMemoryWithoutWAL, DEEP_OVERHEAD, 
    DEFAULT_BUSY_WAIT_DURATION, DEFAULT_CACHE_FLUSH_INTERVAL, 
    DEFAULT_FLUSH_PER_CHANGES, DEFAULT_HBASE_REGIONSERVER_MINIBATCH_SIZE, 
    DEFAULT_MAX_CELL_SIZE, DEFAULT_ROW_PROCESSOR_TIMEOUT, 
    DEFAULT_ROWLOCK_WAIT_DURATION, DEFAULT_WAL_HSYNC, filteredReadRequestsCount, 
    FIXED_OVERHEAD, flushesQueued, HBASE_MAX_CELL_SIZE_KEY, 
    HBASE_REGIONSERVER_MINIBATCH_SIZE, lastReplayedCompactionSeqId, 
    lastReplayedOpenRegionSeqId, LOAD_CFS_ON_DEMAND_CONFIG_KEY, lock, 
    MAX_FLUSH_PER_CHANGES, maxBusyWaitDuration, maxBusyWaitMultiplier, maxCellSize, 
    maxSeqIdInStores, MEMSTORE_FLUSH_PER_CHANGES, MEMSTORE_PERIODIC_FLUSH_INTERVAL, 
    memstoreFlushSize, numMutationsWithoutWAL, readRequestsCount, 
    rowProcessorExecutor, rowProcessorTimeout, rsServices, stores, SYSTEM_C
     ACHE_FLUSH_INTERVAL, timestampSlop, WAL_HSYNC_CONF_KEY, writeRequestsCount, 
    writestate
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6eb695c8/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHRegion.HRegionWithSeqId.html
    --
    diff --git 
    a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHRegion.HRegionWithSeqId.html
     
    b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHRegion.HRegionWithSeqId.html
    index 3cff0f1..09715bb 100644
    --- 
    a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHRegion.HRegionWithSeqId.html
    +++ 
    b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHRegion.HRegionWithSeqId.html
    @@ -163,7 +163,7 @@ extends org.apache.hadoop.hbase.regionserver.HRegion
     
     
     Fields inherited from 
    classorg.apache.hadoop.hbase.regionserver.HRegion
    -busyWaitDuration, checkAndMutateChecksFailed, 
    checkAndMutateChecksPassed, closed, closing, compactionNumBytesCompacted, 
    compactionNumFilesCompacted, compactionsFailed, compactionsFinished, 
    compactionsQueued, conf, dataInMemoryWithoutWAL, DEEP_OVERHEAD, 
    DEFAULT_BUSY_WAIT_DURATION, DEFAULT_CACHE_FLUSH_INTERVAL, 
    DEFAULT_FLUSH_PER_CHANGES, DEFAULT_HBASE_REGIONSERVER_MINIBATCH_SIZE, 
    DEFAULT_MAX_CELL_SIZE, DEFAULT_ROW_PROCESSOR_TIMEOUT, 
    DEFAULT_ROWLOCK_WAIT_DURATION, filteredReadRequestsCount, FIXED_OVERHEAD, 
    flushesQueued, HBASE_MAX_CELL_SIZE_KEY, HBASE_REGIONSERVER_MINIBATCH_SIZE, 
    lastReplayedCompactionSeqId, lastReplayedOpenRegionSeqId, 
    LOAD_CFS_ON_DEMAND_CONFIG_KEY, lock, MAX_FLUSH_PER_CHANGES, 
    maxBusyWaitDuration, maxBusyWaitMultiplier, maxCellSize, maxSeqIdInStores, 
    MEMSTORE_FLUSH_PER_CHANGES, MEMSTORE_PERIODIC_FLUSH_INTERVAL, 
    memstoreFlushSize, numMutationsWithoutWAL, readRequestsCount, 
    rowProcessorExecutor, rowProcessorTimeout, rsServices, stores, 
    SYSTEM_CACHE_FLUSH_INTERVAL
     , timestampSlop, writeRequestsCount, writestate
    +busyWaitDuration, checkAndMutateChecksFailed, 
    checkAndMutateChecksPassed, closed, closing, compactionNumBytesCompacted, 
    compactionNumFilesCompacted, compactionsFailed, compactionsFinished, 
    compactionsQueued, conf, 

    [02/51] [partial] hbase-site git commit: Published site at 00095a2ef9442e3fd86c04876c9d91f2f8b23ad8.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.html 
    b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.html
    index 3bc66bb..97aa79c 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.html
    @@ -1435,459 +1435,460 @@
     1427   */
     1428  private void execProcedure(final 
    RootProcedureState procStack,
     1429  final 
    ProcedureTEnvironment procedure) {
    -1430
    Preconditions.checkArgument(procedure.getState() == ProcedureState.RUNNABLE);
    -1431
    -1432// Procedures can suspend 
    themselves. They skip out by throwing a ProcedureSuspendedException.
    -1433// The exception is caught below and 
    then we hurry to the exit without disturbing state. The
    -1434// idea is that the processing of 
    this procedure will be unsuspended later by an external event
    -1435// such the report of a region open. 
    TODO: Currently, its possible for two worker threads
    -1436// to be working on the same 
    procedure concurrently (locking in procedures is NOT about
    -1437// concurrency but about tying an 
    entity to a procedure; i.e. a region to a particular
    -1438// procedure instance). This can 
    make for issues if both threads are changing state.
    -1439// See 
    env.getProcedureScheduler().wakeEvent(regionNode.getProcedureEvent());
    -1440// in 
    RegionTransitionProcedure#reportTransition for example of Procedure putting
    -1441// itself back on the scheduler 
    making it possible for two threads running against
    -1442// the one Procedure. Might be ok if 
    they are both doing different, idempotent sections.
    -1443boolean suspended = false;
    -1444
    -1445// Whether to 're-' -execute; run 
    through the loop again.
    -1446boolean reExecute = false;
    -1447
    -1448ProcedureTEnvironment[] 
    subprocs = null;
    -1449do {
    -1450  reExecute = false;
    -1451  try {
    -1452subprocs = 
    procedure.doExecute(getEnvironment());
    -1453if (subprocs != null  
    subprocs.length == 0) {
    -1454  subprocs = null;
    -1455}
    -1456  } catch 
    (ProcedureSuspendedException e) {
    -1457if (LOG.isTraceEnabled()) {
    -1458  LOG.trace("Suspend " + 
    procedure);
    -1459}
    -1460suspended = true;
    -1461  } catch (ProcedureYieldException 
    e) {
    -1462if (LOG.isTraceEnabled()) {
    -1463  LOG.trace("Yield " + procedure 
    + ": " + e.getMessage(), e);
    -1464}
    -1465scheduler.yield(procedure);
    -1466return;
    -1467  } catch (InterruptedException e) 
    {
    -1468if (LOG.isTraceEnabled()) {
    -1469  LOG.trace("Yield interrupt " + 
    procedure + ": " + e.getMessage(), e);
    -1470}
    -1471
    handleInterruptedException(procedure, e);
    -1472scheduler.yield(procedure);
    -1473return;
    -1474  } catch (Throwable e) {
    -1475// Catch NullPointerExceptions 
    or similar errors...
    -1476String msg = "CODE-BUG: Uncaught 
    runtime exception: " + procedure;
    -1477LOG.error(msg, e);
    -1478procedure.setFailure(new 
    RemoteProcedureException(msg, e));
    -1479  }
    -1480
    -1481  if (!procedure.isFailed()) {
    -1482if (subprocs != null) {
    -1483  if (subprocs.length == 1 
     subprocs[0] == procedure) {
    -1484// Procedure returned 
    itself. Quick-shortcut for a state machine-like procedure;
    -1485// i.e. we go around this 
    loop again rather than go back out on the scheduler queue.
    -1486subprocs = null;
    -1487reExecute = true;
    -1488if (LOG.isTraceEnabled()) 
    {
    -1489  LOG.trace("Short-circuit 
    to next step on pid=" + procedure.getProcId());
    -1490}
    -1491  } else {
    -1492// Yield the current 
    procedure, and make the subprocedure runnable
    -1493// subprocs may come back 
    'null'.
    -1494subprocs = 
    initializeChildren(procStack, procedure, subprocs);
    -1495LOG.info("Initialized 
    subprocedures=" +
    -1496  (subprocs == null? null:
    -1497
    Stream.of(subprocs).map(e - "{" + e.toString() + "}").
    -1498
    collect(Collectors.toList()).toString()));
    -1499  }
    -1500} else if (procedure.getState() 
    == ProcedureState.WAITING_TIMEOUT) {
    -1501  if (LOG.isTraceEnabled()) {
    -1502LOG.trace("Added to 
    timeoutExecutor " + procedure);
    -1503  }
    -1504  
    timeoutExecutor.add(procedure);
    -1505} else if (!suspended) {
    -1506  // No subtask, so we are 
    done
    -1507  
    procedure.setState(ProcedureState.SUCCESS);
    -1508}
    -1509  }
    -1510
    -1511  // Add the procedure to the 
    stack
    -1512  
    

    [02/51] [partial] hbase-site git commit: Published site at 22f4def942f8a3367d0ca6598317e9b9a7d0cfcd.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8b1eaec1/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi2.html
    --
    diff --git 
    a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi2.html
     
    b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi2.html
    index 939c65a..3261918 100644
    --- 
    a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi2.html
    +++ 
    b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi2.html
    @@ -78,7 +78,7 @@
     070  public void testAddColumnFamily() 
    throws Exception {
     071// Create a table with two families
     072TableDescriptorBuilder builder = 
    TableDescriptorBuilder.newBuilder(tableName);
    -073
    builder.addColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_0));
    +073
    builder.setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_0));
     074
    admin.createTable(builder.build()).join();
     075
    admin.disableTable(tableName).join();
     076// Verify the table descriptor
    @@ -93,7 +93,7 @@
     085  public void 
    testAddSameColumnFamilyTwice() throws Exception {
     086// Create a table with one families
     087TableDescriptorBuilder builder = 
    TableDescriptorBuilder.newBuilder(tableName);
    -088
    builder.addColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_0));
    +088
    builder.setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_0));
     089
    admin.createTable(builder.build()).join();
     090
    admin.disableTable(tableName).join();
     091// Verify the table descriptor
    @@ -117,7 +117,7 @@
     109TableDescriptorBuilder tdBuilder = 
    TableDescriptorBuilder.newBuilder(tableName);
     110ColumnFamilyDescriptor cfd = 
    ColumnFamilyDescriptorBuilder.of(FAMILY_0);
     111int blockSize = cfd.getBlocksize();
    -112
    admin.createTable(tdBuilder.addColumnFamily(cfd).build()).join();
    +112
    admin.createTable(tdBuilder.setColumnFamily(cfd).build()).join();
     113
    admin.disableTable(tableName).join();
     114// Verify the table descriptor
     115verifyTableDescriptor(tableName, 
    FAMILY_0);
    @@ -137,7 +137,7 @@
     129TableDescriptorBuilder tdBuilder = 
    TableDescriptorBuilder.newBuilder(tableName);
     130ColumnFamilyDescriptor cfd = 
    ColumnFamilyDescriptorBuilder.of(FAMILY_0);
     131int blockSize = cfd.getBlocksize();
    -132
    admin.createTable(tdBuilder.addColumnFamily(cfd).build()).join();
    +132
    admin.createTable(tdBuilder.setColumnFamily(cfd).build()).join();
     133
    admin.disableTable(tableName).join();
     134// Verify the table descriptor
     135verifyTableDescriptor(tableName, 
    FAMILY_0);
    @@ -158,8 +158,8 @@
     150  public void testDeleteColumnFamily() 
    throws Exception {
     151// Create a table with two families
     152TableDescriptorBuilder builder = 
    TableDescriptorBuilder.newBuilder(tableName);
    -153
    builder.addColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_0))
    -154
    .addColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_1));
    +153
    builder.setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_0))
    +154
    .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_1));
     155
    admin.createTable(builder.build()).join();
     156
    admin.disableTable(tableName).join();
     157// Verify the table descriptor
    @@ -174,8 +174,8 @@
     166  public void 
    testDeleteSameColumnFamilyTwice() throws Exception {
     167// Create a table with two families
     168TableDescriptorBuilder builder = 
    TableDescriptorBuilder.newBuilder(tableName);
    -169
    builder.addColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_0))
    -170
    .addColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_1));
    +169
    builder.setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_0))
    +170
    .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_1));
     171
    admin.createTable(builder.build()).join();
     172
    admin.disableTable(tableName).join();
     173// Verify the table descriptor
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8b1eaec1/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi3.html
    --
    diff --git 
    a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi3.html
     
    b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi3.html
    index 04d5471..dfa687f 100644
    --- 
    a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi3.html
    +++ 
    b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi3.html
    @@ -128,7 +128,7 @@
     120byte[][] families = { FAMILY, 
    FAMILY_0, FAMILY_1 };
     121TableDescriptorBuilder builder = 
    TableDescriptorBuilder.newBuilder(tableName);
     122for (byte[] family : families) {
    -123  
    builder.addColumnFamily(ColumnFamilyDescriptorBuilder.of(family));
    +123  
    builder.setColumnFamily(ColumnFamilyDescriptorBuilder.of(family));
     124

    [02/51] [partial] hbase-site git commit: Published site at 31da4d0bce69b3a47066a5df675756087ce4dc60.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a754d895/devapidocs/org/apache/hadoop/hbase/regionserver/wal/WALCoprocessorHost.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/regionserver/wal/WALCoprocessorHost.html 
    b/devapidocs/org/apache/hadoop/hbase/regionserver/wal/WALCoprocessorHost.html
    index fd3f026..6bae4eb 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/regionserver/wal/WALCoprocessorHost.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/regionserver/wal/WALCoprocessorHost.html
    @@ -115,7 +115,7 @@ var activeTableTab = "activeTableTab";
     
     
     @InterfaceAudience.Private
    -public class WALCoprocessorHost
    +public class WALCoprocessorHost
     extends CoprocessorHostWALCoprocessor,WALCoprocessorEnvironment
     Implements the coprocessor environment and runtime support 
    for coprocessors
      loaded within a WAL.
    @@ -299,7 +299,7 @@ extends 
     
     LOG
    -private static finalorg.slf4j.Logger LOG
    +private static finalorg.slf4j.Logger LOG
     
     
     
    @@ -308,7 +308,7 @@ extends 
     
     wal
    -private finalWAL wal
    +private finalWAL wal
     
     
     
    @@ -317,7 +317,7 @@ extends 
     
     walObserverGetter
    -privateCoprocessorHost.ObserverGetterWALCoprocessor,WALObserver walObserverGetter
    +privateCoprocessorHost.ObserverGetterWALCoprocessor,WALObserver walObserverGetter
     
     
     
    @@ -334,7 +334,7 @@ extends 
     
     WALCoprocessorHost
    -publicWALCoprocessorHost(WALlog,
    +publicWALCoprocessorHost(WALlog,
       org.apache.hadoop.conf.Configurationconf)
     Constructor
     
    @@ -358,7 +358,7 @@ extends 
     
     createEnvironment
    -publicWALCoprocessorHost.WALEnvironmentcreateEnvironment(WALCoprocessorinstance,
    +publicWALCoprocessorHost.WALEnvironmentcreateEnvironment(WALCoprocessorinstance,
    intpriority,
    intseq,
    
    org.apache.hadoop.conf.Configurationconf)
    @@ -376,9 +376,9 @@ extends 
     
     checkAndGetInstance
    -publicWALCoprocessorcheckAndGetInstance(https://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true;
     title="class or interface in java.lang">Class?implClass)
    -   throws https://docs.oracle.com/javase/8/docs/api/java/lang/InstantiationException.html?is-external=true;
     title="class or interface in java.lang">InstantiationException,
    -  https://docs.oracle.com/javase/8/docs/api/java/lang/IllegalAccessException.html?is-external=true;
     title="class or interface in java.lang">IllegalAccessException
    +publicWALCoprocessorcheckAndGetInstance(https://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true;
     title="class or interface in java.lang">Class?implClass)
    +   throws https://docs.oracle.com/javase/8/docs/api/java/lang/IllegalAccessException.html?is-external=true;
     title="class or interface in java.lang">IllegalAccessException,
    +  https://docs.oracle.com/javase/8/docs/api/java/lang/InstantiationException.html?is-external=true;
     title="class or interface in java.lang">InstantiationException
     Description copied from 
    class:CoprocessorHost
     Called when a new Coprocessor class needs to be loaded. 
    Checks if type of the given class
      is what the corresponding host implementation expects. If it is of correct 
    type, returns an
    @@ -389,8 +389,8 @@ extends Specified by:
     checkAndGetInstancein
     classCoprocessorHostWALCoprocessor,WALCoprocessorEnvironment
     Throws:
    -https://docs.oracle.com/javase/8/docs/api/java/lang/InstantiationException.html?is-external=true;
     title="class or interface in java.lang">InstantiationException
     https://docs.oracle.com/javase/8/docs/api/java/lang/IllegalAccessException.html?is-external=true;
     title="class or interface in java.lang">IllegalAccessException
    +https://docs.oracle.com/javase/8/docs/api/java/lang/InstantiationException.html?is-external=true;
     title="class or interface in java.lang">InstantiationException
     
     
     
    @@ -400,7 +400,7 @@ extends 
     
     preWALWrite
    -publicvoidpreWALWrite(RegionInfoinfo,
    +publicvoidpreWALWrite(RegionInfoinfo,
     WALKeylogKey,
     WALEditlogEdit)
      throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException
    @@ -416,7 +416,7 @@ extends 
     
     postWALWrite
    -publicvoidpostWALWrite(RegionInfoinfo,
    +publicvoidpostWALWrite(RegionInfoinfo,
      WALKeylogKey,
      WALEditlogEdit)
       throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException
    @@ -432,7 +432,7 @@ extends 
     
     preWALRoll
    -publicvoidpreWALRoll(org.apache.hadoop.fs.PatholdPath,
    +publicvoidpreWALRoll(org.apache.hadoop.fs.PatholdPath,
      

    [02/51] [partial] hbase-site git commit: Published site at 6b77786dfc46d25ac5bb5f1c8a4a9eb47b52a604.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/81cde4ce/devapidocs/org/apache/hadoop/hbase/net/class-use/Address.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/net/class-use/Address.html 
    b/devapidocs/org/apache/hadoop/hbase/net/class-use/Address.html
    index 633dc85..eb6eb6d 100644
    --- a/devapidocs/org/apache/hadoop/hbase/net/class-use/Address.html
    +++ b/devapidocs/org/apache/hadoop/hbase/net/class-use/Address.html
    @@ -338,17 +338,17 @@
     
     
     https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
     title="class or interface in java.util">SetAddress
    -RSGroupInfoManager.moveServers(https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
     title="class or interface in java.util">SetAddressservers,
    +RSGroupInfoManagerImpl.moveServers(https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
     title="class or interface in java.util">SetAddressservers,
    https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringsrcGroup,
    -   https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringdstGroup)
    -Move servers to a new group.
    -
    +   https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in 
    java.lang">StringdstGroup)
     
     
     https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
     title="class or interface in java.util">SetAddress
    -RSGroupInfoManagerImpl.moveServers(https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
     title="class or interface in java.util">SetAddressservers,
    +RSGroupInfoManager.moveServers(https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
     title="class or interface in java.util">SetAddressservers,
    https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringsrcGroup,
    -   https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in 
    java.lang">StringdstGroup)
    +   https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringdstGroup)
    +Move servers to a new group.
    +
     
     
     
    @@ -375,9 +375,7 @@
     
     
     RSGroupInfo
    -RSGroupInfoManager.getRSGroupOfServer(AddressserverHostPort)
    -Gets the group info of server.
    -
    +RSGroupInfoManagerImpl.getRSGroupOfServer(AddressserverHostPort)
     
     
     RSGroupInfo
    @@ -385,13 +383,15 @@
     
     
     RSGroupInfo
    -RSGroupAdmin.getRSGroupOfServer(AddresshostPort)
    -Retrieve the RSGroupInfo a server is affiliated to
    +RSGroupInfoManager.getRSGroupOfServer(AddressserverHostPort)
    +Gets the group info of server.
     
     
     
     RSGroupInfo
    -RSGroupInfoManagerImpl.getRSGroupOfServer(AddressserverHostPort)
    +RSGroupAdmin.getRSGroupOfServer(AddresshostPort)
    +Retrieve the RSGroupInfo a server is affiliated to
    +
     
     
     RSGroupInfo
    @@ -480,17 +480,17 @@
     
     
     https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
     title="class or interface in java.util">SetAddress
    -RSGroupInfoManager.moveServers(https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
     title="class or interface in java.util">SetAddressservers,
    +RSGroupInfoManagerImpl.moveServers(https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
     title="class or interface in java.util">SetAddressservers,
    https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringsrcGroup,
    -   https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringdstGroup)
    -Move servers to a new group.
    -
    +   https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in 
    java.lang">StringdstGroup)
     
     
     https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
     title="class or interface in java.util">SetAddress
    -RSGroupInfoManagerImpl.moveServers(https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
     title="class or interface in java.util">SetAddressservers,
    +RSGroupInfoManager.moveServers(https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
     title="class or interface in java.util">SetAddressservers,
    https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringsrcGroup,
    -   https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in 
    java.lang">StringdstGroup)
    +   https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     

    [02/51] [partial] hbase-site git commit: Published site at 1384da71375427b522b09f06862bb5d629cef52f.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d347bde8/devapidocs/org/apache/hadoop/hbase/master/MasterRpcServices.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/master/MasterRpcServices.html 
    b/devapidocs/org/apache/hadoop/hbase/master/MasterRpcServices.html
    index 88c932b..5f0d8e8 100644
    --- a/devapidocs/org/apache/hadoop/hbase/master/MasterRpcServices.html
    +++ b/devapidocs/org/apache/hadoop/hbase/master/MasterRpcServices.html
    @@ -18,7 +18,7 @@
     catch(err) {
     }
     //-->
    -var methods = 
    {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10};
    +var methods = 
    {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10};
     var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
    Methods"],8:["t4","Concrete Methods"]};
     var altColor = "altColor";
     var rowColor = "rowColor";
    @@ -119,7 +119,7 @@ var activeTableTab = "activeTableTab";
     
     
     @InterfaceAudience.Private
    -public class MasterRpcServices
    +public class MasterRpcServices
     extends RSRpcServices
     implements 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService.BlockingInterface,
     
    org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStatusService.BlockingInterface,
     
    org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockService.BlockingInterface
     Implements the master RPC services.
    @@ -629,38 +629,43 @@ implements 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.Master
      
    org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequestrequest)
     
     
    +org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationResponse
    +reportFileArchival(org.apache.hbase.thirdparty.com.google.protobuf.RpcControllercontroller,
    +  
    org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.FileArchiveNotificationRequestrequest)
    +
    +
     org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportProcedureDoneResponse
     reportProcedureDone(org.apache.hbase.thirdparty.com.google.protobuf.RpcControllercontroller,
    
    org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportProcedureDoneRequestrequest)
     
    -
    +
     org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse
     reportRegionSpaceUse(org.apache.hbase.thirdparty.com.google.protobuf.RpcControllercontroller,
     
    org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequestrequest)
     
    -
    +
     org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse
     reportRegionStateTransition(org.apache.hbase.thirdparty.com.google.protobuf.RpcControllerc,
    
    org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequestreq)
     
    -
    +
     org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse
     reportRSFatalError(org.apache.hbase.thirdparty.com.google.protobuf.RpcControllercontroller,
       
    

    [02/51] [partial] hbase-site git commit: Published site at b7b86839250bf9b295ebc1948826f43a88736d6c.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6b94a2f2/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.StoreFileSplitter.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.StoreFileSplitter.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.StoreFileSplitter.html
    index b0c3f69..72ebff5 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.StoreFileSplitter.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.StoreFileSplitter.html
    @@ -255,7 +255,7 @@
     247  
    setNextState(SplitTableRegionState.SPLIT_TABLE_REGION_UPDATE_META);
     248  break;
     249case 
    SPLIT_TABLE_REGION_UPDATE_META:
    -250  
    updateMetaForDaughterRegions(env);
    +250  updateMeta(env);
     251  
    setNextState(SplitTableRegionState.SPLIT_TABLE_REGION_PRE_OPERATION_AFTER_META);
     252  break;
     253case 
    SPLIT_TABLE_REGION_PRE_OPERATION_AFTER_META:
    @@ -764,7 +764,7 @@
     756   * Add daughter regions to META
     757   * @param env MasterProcedureEnv
     758   */
    -759  private void 
    updateMetaForDaughterRegions(final MasterProcedureEnv env) throws IOException 
    {
    +759  private void updateMeta(final 
    MasterProcedureEnv env) throws IOException {
     760
    env.getAssignmentManager().markRegionAsSplit(getParentRegion(), 
    getParentRegionServerName(env),
     761  daughter_1_RI, daughter_2_RI);
     762  }
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6b94a2f2/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.html
    index b0c3f69..72ebff5 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.html
    @@ -255,7 +255,7 @@
     247  
    setNextState(SplitTableRegionState.SPLIT_TABLE_REGION_UPDATE_META);
     248  break;
     249case 
    SPLIT_TABLE_REGION_UPDATE_META:
    -250  
    updateMetaForDaughterRegions(env);
    +250  updateMeta(env);
     251  
    setNextState(SplitTableRegionState.SPLIT_TABLE_REGION_PRE_OPERATION_AFTER_META);
     252  break;
     253case 
    SPLIT_TABLE_REGION_PRE_OPERATION_AFTER_META:
    @@ -764,7 +764,7 @@
     756   * Add daughter regions to META
     757   * @param env MasterProcedureEnv
     758   */
    -759  private void 
    updateMetaForDaughterRegions(final MasterProcedureEnv env) throws IOException 
    {
    +759  private void updateMeta(final 
    MasterProcedureEnv env) throws IOException {
     760
    env.getAssignmentManager().markRegionAsSplit(getParentRegion(), 
    getParentRegionServerName(env),
     761  daughter_1_RI, daughter_2_RI);
     762  }
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6b94a2f2/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.html
    index 76ab2b9..61ac160 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.html
    @@ -32,107 +32,102 @@
     024import 
    org.apache.hadoop.hbase.TableName;
     025import 
    org.apache.hadoop.hbase.TableNotFoundException;
     026import 
    org.apache.hadoop.hbase.client.RegionInfo;
    -027import 
    org.apache.hadoop.hbase.master.MasterFileSystem;
    -028import 
    org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
    -029import 
    org.apache.hadoop.hbase.security.User;
    -030import 
    org.apache.hadoop.hbase.util.FSUtils;
    -031import 
    org.apache.hadoop.hbase.util.ServerRegionReplicaUtil;
    -032import 
    org.apache.yetus.audience.InterfaceAudience;
    -033
    -034/**
    -035 * Base class for all the Table 
    procedures that want to use a StateMachineProcedure.
    -036 * It provides helpers like basic 
    locking, sync latch, and toStringClassDetails().
    -037 */
    -038@InterfaceAudience.Private
    -039public abstract class 
    AbstractStateMachineTableProcedureTState
    -040extends 
    StateMachineProcedureMasterProcedureEnv, TState
    -041implements TableProcedureInterface 
    {
    +027import 
    org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
    +028import 
    org.apache.hadoop.hbase.security.User;
    +029import 
    

    [02/51] [partial] hbase-site git commit: Published site at 1d25b60831b8cc8f7ad5fd366f1867de5c20d2f3.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/eb05e3e3/apidocs/org/apache/hadoop/hbase/client/class-use/Append.html
    --
    diff --git a/apidocs/org/apache/hadoop/hbase/client/class-use/Append.html 
    b/apidocs/org/apache/hadoop/hbase/client/class-use/Append.html
    index 774a99d..d575d36 100644
    --- a/apidocs/org/apache/hadoop/hbase/client/class-use/Append.html
    +++ b/apidocs/org/apache/hadoop/hbase/client/class-use/Append.html
    @@ -135,16 +135,16 @@
     
     
     Append
    -Append.setACL(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
     title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in 
    java.lang">String,org.apache.hadoop.hbase.security.access.Permissionperms)
    +Append.setACL(https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
     title="class or interface in java.util">Maphttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in 
    java.lang">String,org.apache.hadoop.hbase.security.access.Permissionperms)
     
     
     Append
    -Append.setACL(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">Stringuser,
    +Append.setACL(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">Stringuser,
       
    org.apache.hadoop.hbase.security.access.Permissionperms)
     
     
     Append
    -Append.setAttribute(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">Stringname,
    +Append.setAttribute(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">Stringname,
     byte[]value)
     
     
    @@ -153,7 +153,7 @@
     
     
     Append
    -Append.setClusterIds(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/util/UUID.html?is-external=true;
     title="class or interface in 
    java.util">UUIDclusterIds)
    +Append.setClusterIds(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">Listhttps://docs.oracle.com/javase/8/docs/api/java/util/UUID.html?is-external=true;
     title="class or interface in 
    java.util">UUIDclusterIds)
     
     
     Append
    @@ -161,7 +161,7 @@
     
     
     Append
    -Append.setFamilyCellMap(http://docs.oracle.com/javase/8/docs/api/java/util/NavigableMap.html?is-external=true;
     title="class or interface in java.util">NavigableMapbyte[],http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListCellmap)
    +Append.setFamilyCellMap(https://docs.oracle.com/javase/8/docs/api/java/util/NavigableMap.html?is-external=true;
     title="class or interface in java.util">NavigableMapbyte[],https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListCellmap)
     Deprecated.
     As of release 2.0.0, this 
    will be removed in HBase 3.0.0.
      Use Append(byte[],
     long, NavigableMap) instead
    @@ -170,7 +170,7 @@
     
     
     Append
    -Append.setId(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">Stringid)
    +Append.setId(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">Stringid)
     
     
     Append
    @@ -205,7 +205,7 @@
     
     
     
    -http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFutureResult
    +https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFutureResult
     AsyncTable.append(Appendappend)
     Appends values to one or more columns within a single 
    row.
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/eb05e3e3/apidocs/org/apache/hadoop/hbase/client/class-use/AsyncAdmin.html
    --
    diff --git a/apidocs/org/apache/hadoop/hbase/client/class-use/AsyncAdmin.html 
    b/apidocs/org/apache/hadoop/hbase/client/class-use/AsyncAdmin.html
    index 7807024..247cc4a 100644
    --- a/apidocs/org/apache/hadoop/hbase/client/class-use/AsyncAdmin.html
    +++ b/apidocs/org/apache/hadoop/hbase/client/class-use/AsyncAdmin.html
    @@ -118,7 +118,7 @@
     
     
     default AsyncAdmin
    -AsyncConnection.getAdmin(http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html?is-external=true;
     title="class or interface in 
    java.util.concurrent">ExecutorServicepool)
    

    [02/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ec8bf761/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
    --
    diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html 
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
    index 7edb3ff..665071c 100644
    --- a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
    +++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
    @@ -1221,2378 +1221,2377 @@
     1213
    configurationManager.registerObserver(procEnv);
     1214
     1215int cpus = 
    Runtime.getRuntime().availableProcessors();
    -1216final int numThreads = 
    conf.getInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS,
    -1217Math.max((cpus  0? cpus/4: 
    0),
    -1218
    MasterProcedureConstants.DEFAULT_MIN_MASTER_PROCEDURE_THREADS));
    -1219final boolean abortOnCorruption = 
    conf.getBoolean(
    -1220
    MasterProcedureConstants.EXECUTOR_ABORT_ON_CORRUPTION,
    -1221
    MasterProcedureConstants.DEFAULT_EXECUTOR_ABORT_ON_CORRUPTION);
    -1222procedureStore.start(numThreads);
    -1223procedureExecutor.start(numThreads, 
    abortOnCorruption);
    -1224
    procEnv.getRemoteDispatcher().start();
    -1225  }
    -1226
    -1227  private void stopProcedureExecutor() 
    {
    -1228if (procedureExecutor != null) {
    -1229  
    configurationManager.deregisterObserver(procedureExecutor.getEnvironment());
    -1230  
    procedureExecutor.getEnvironment().getRemoteDispatcher().stop();
    -1231  procedureExecutor.stop();
    -1232  procedureExecutor.join();
    -1233  procedureExecutor = null;
    -1234}
    -1235
    -1236if (procedureStore != null) {
    -1237  
    procedureStore.stop(isAborted());
    -1238  procedureStore = null;
    -1239}
    -1240  }
    -1241
    -1242  private void stopChores() {
    -1243if (this.expiredMobFileCleanerChore 
    != null) {
    -1244  
    this.expiredMobFileCleanerChore.cancel(true);
    -1245}
    -1246if (this.mobCompactChore != null) 
    {
    -1247  
    this.mobCompactChore.cancel(true);
    -1248}
    -1249if (this.balancerChore != null) {
    -1250  this.balancerChore.cancel(true);
    -1251}
    -1252if (this.normalizerChore != null) 
    {
    -1253  
    this.normalizerChore.cancel(true);
    -1254}
    -1255if (this.clusterStatusChore != null) 
    {
    -1256  
    this.clusterStatusChore.cancel(true);
    -1257}
    -1258if (this.catalogJanitorChore != 
    null) {
    -1259  
    this.catalogJanitorChore.cancel(true);
    -1260}
    -1261if (this.clusterStatusPublisherChore 
    != null){
    -1262  
    clusterStatusPublisherChore.cancel(true);
    -1263}
    -1264if (this.mobCompactThread != null) 
    {
    -1265  this.mobCompactThread.close();
    -1266}
    -1267
    -1268if (this.quotaObserverChore != null) 
    {
    -1269  quotaObserverChore.cancel();
    -1270}
    -1271if (this.snapshotQuotaChore != null) 
    {
    -1272  snapshotQuotaChore.cancel();
    -1273}
    -1274  }
    -1275
    -1276  /**
    -1277   * @return Get remote side's 
    InetAddress
    -1278   */
    -1279  InetAddress getRemoteInetAddress(final 
    int port,
    -1280  final long serverStartCode) throws 
    UnknownHostException {
    -1281// Do it out here in its own little 
    method so can fake an address when
    -1282// mocking up in tests.
    -1283InetAddress ia = 
    RpcServer.getRemoteIp();
    -1284
    -1285// The call could be from the local 
    regionserver,
    -1286// in which case, there is no remote 
    address.
    -1287if (ia == null  
    serverStartCode == startcode) {
    -1288  InetSocketAddress isa = 
    rpcServices.getSocketAddress();
    -1289  if (isa != null  
    isa.getPort() == port) {
    -1290ia = isa.getAddress();
    -1291  }
    -1292}
    -1293return ia;
    -1294  }
    -1295
    -1296  /**
    -1297   * @return Maximum time we should run 
    balancer for
    -1298   */
    -1299  private int getMaxBalancingTime() {
    -1300int maxBalancingTime = 
    getConfiguration().getInt(HConstants.HBASE_BALANCER_MAX_BALANCING, -1);
    -1301if (maxBalancingTime == -1) {
    -1302  // if max balancing time isn't 
    set, defaulting it to period time
    -1303  maxBalancingTime = 
    getConfiguration().getInt(HConstants.HBASE_BALANCER_PERIOD,
    -1304
    HConstants.DEFAULT_HBASE_BALANCER_PERIOD);
    -1305}
    -1306return maxBalancingTime;
    -1307  }
    -1308
    -1309  /**
    -1310   * @return Maximum number of regions 
    in transition
    -1311   */
    -1312  private int 
    getMaxRegionsInTransition() {
    -1313int numRegions = 
    this.assignmentManager.getRegionStates().getRegionAssignments().size();
    -1314return Math.max((int) 
    Math.floor(numRegions * this.maxRitPercent), 1);
    -1315  }
    -1316
    -1317  /**
    -1318   * It first sleep to the next balance 
    plan start time. Meanwhile, throttling by the max
    -1319   * number regions in transition to 
    protect availability.
    -1320   * @param nextBalanceStartTime The 
    next balance plan start time
    -1321   * @param maxRegionsInTransition max 
    number of regions in transition
    -1322   * @param cutoffTime when to exit 
    balancer
    -1323   */
    -1324  private void 

    [02/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html 
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html
    index 802b925..a3e80ab 100644
    --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html
    +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html
    @@ -73,229 +73,229 @@
     065import 
    java.util.concurrent.TimeoutException;
     066import 
    java.util.concurrent.atomic.AtomicBoolean;
     067import 
    java.util.concurrent.atomic.AtomicInteger;
    -068import 
    java.util.concurrent.atomic.AtomicLong;
    -069import 
    java.util.concurrent.atomic.LongAdder;
    -070import java.util.concurrent.locks.Lock;
    -071import 
    java.util.concurrent.locks.ReadWriteLock;
    -072import 
    java.util.concurrent.locks.ReentrantReadWriteLock;
    -073import java.util.function.Function;
    -074import 
    org.apache.hadoop.conf.Configuration;
    -075import org.apache.hadoop.fs.FileStatus;
    -076import org.apache.hadoop.fs.FileSystem;
    -077import 
    org.apache.hadoop.fs.LocatedFileStatus;
    -078import org.apache.hadoop.fs.Path;
    -079import org.apache.hadoop.hbase.Cell;
    -080import 
    org.apache.hadoop.hbase.CellBuilderType;
    -081import 
    org.apache.hadoop.hbase.CellComparator;
    -082import 
    org.apache.hadoop.hbase.CellComparatorImpl;
    -083import 
    org.apache.hadoop.hbase.CellScanner;
    -084import 
    org.apache.hadoop.hbase.CellUtil;
    -085import 
    org.apache.hadoop.hbase.CompareOperator;
    -086import 
    org.apache.hadoop.hbase.CompoundConfiguration;
    -087import 
    org.apache.hadoop.hbase.DoNotRetryIOException;
    -088import 
    org.apache.hadoop.hbase.DroppedSnapshotException;
    -089import 
    org.apache.hadoop.hbase.ExtendedCellBuilderFactory;
    -090import 
    org.apache.hadoop.hbase.HConstants;
    -091import 
    org.apache.hadoop.hbase.HConstants.OperationStatusCode;
    -092import 
    org.apache.hadoop.hbase.HDFSBlocksDistribution;
    -093import 
    org.apache.hadoop.hbase.HRegionInfo;
    -094import 
    org.apache.hadoop.hbase.KeyValue;
    -095import 
    org.apache.hadoop.hbase.KeyValueUtil;
    -096import 
    org.apache.hadoop.hbase.NamespaceDescriptor;
    -097import 
    org.apache.hadoop.hbase.NotServingRegionException;
    -098import 
    org.apache.hadoop.hbase.PrivateCellUtil;
    -099import 
    org.apache.hadoop.hbase.RegionTooBusyException;
    -100import 
    org.apache.hadoop.hbase.TableName;
    -101import org.apache.hadoop.hbase.Tag;
    -102import org.apache.hadoop.hbase.TagUtil;
    -103import 
    org.apache.hadoop.hbase.UnknownScannerException;
    -104import 
    org.apache.hadoop.hbase.client.Append;
    -105import 
    org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
    -106import 
    org.apache.hadoop.hbase.client.CompactionState;
    -107import 
    org.apache.hadoop.hbase.client.Delete;
    -108import 
    org.apache.hadoop.hbase.client.Durability;
    -109import 
    org.apache.hadoop.hbase.client.Get;
    -110import 
    org.apache.hadoop.hbase.client.Increment;
    -111import 
    org.apache.hadoop.hbase.client.IsolationLevel;
    -112import 
    org.apache.hadoop.hbase.client.Mutation;
    -113import 
    org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor;
    -114import 
    org.apache.hadoop.hbase.client.Put;
    -115import 
    org.apache.hadoop.hbase.client.RegionInfo;
    -116import 
    org.apache.hadoop.hbase.client.RegionReplicaUtil;
    -117import 
    org.apache.hadoop.hbase.client.Result;
    -118import 
    org.apache.hadoop.hbase.client.RowMutations;
    -119import 
    org.apache.hadoop.hbase.client.Scan;
    -120import 
    org.apache.hadoop.hbase.client.TableDescriptor;
    -121import 
    org.apache.hadoop.hbase.client.TableDescriptorBuilder;
    -122import 
    org.apache.hadoop.hbase.conf.ConfigurationManager;
    -123import 
    org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver;
    -124import 
    org.apache.hadoop.hbase.coprocessor.RegionObserver.MutationType;
    -125import 
    org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
    -126import 
    org.apache.hadoop.hbase.exceptions.FailedSanityCheckException;
    -127import 
    org.apache.hadoop.hbase.exceptions.TimeoutIOException;
    -128import 
    org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
    -129import 
    org.apache.hadoop.hbase.filter.ByteArrayComparable;
    -130import 
    org.apache.hadoop.hbase.filter.FilterWrapper;
    -131import 
    org.apache.hadoop.hbase.filter.IncompatibleFilterException;
    -132import 
    org.apache.hadoop.hbase.io.HFileLink;
    -133import 
    org.apache.hadoop.hbase.io.HeapSize;
    -134import 
    org.apache.hadoop.hbase.io.TimeRange;
    -135import 
    org.apache.hadoop.hbase.io.hfile.HFile;
    -136import 
    org.apache.hadoop.hbase.ipc.CallerDisconnectedException;
    -137import 
    org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
    -138import 
    org.apache.hadoop.hbase.ipc.RpcCall;
    -139import 
    org.apache.hadoop.hbase.ipc.RpcServer;
    -140import 
    org.apache.hadoop.hbase.monitoring.MonitoredTask;
    -141import 
    org.apache.hadoop.hbase.monitoring.TaskMonitor;
    -142import 
    org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl.WriteEntry;
    -143import 
    

    [02/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/193b4259/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncProcess.MyAsyncProcess.html
    --
    diff --git 
    a/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncProcess.MyAsyncProcess.html
     
    b/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncProcess.MyAsyncProcess.html
    index 091d731..3f2fd56 100644
    --- 
    a/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncProcess.MyAsyncProcess.html
    +++ 
    b/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncProcess.MyAsyncProcess.html
    @@ -18,7 +18,7 @@
     catch(err) {
     }
     //-->
    -var methods = 
    {"i0":9,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":9,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10};
    +var methods = 
    {"i0":9,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":9,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10};
     var tabs = {65535:["t0","All Methods"],1:["t1","Static 
    Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
     var altColor = "altColor";
     var rowColor = "rowColor";
    @@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
     
     
     
    -static class TestAsyncProcess.MyAsyncProcess
    +static class TestAsyncProcess.MyAsyncProcess
     extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">Object
     
     
    @@ -154,78 +154,74 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     DEFAULT_START_LOG_ERRORS_AFTER_COUNT
     
     
    -(package private) 
    org.apache.hadoop.hbase.client.BatchErrors
    -globalErrors
    -
    -
     (package private) long
     id
     
    -
    +
     static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String
     LOG_DETAILS_FOR_BATCH_ERROR
     
    -
    +
     static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String
     LOG_DETAILS_PERIOD
     
    -
    +
     (package private) boolean
     logBatchErrorDetails
     
    -
    +
     (package private) http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true;
     title="class or interface in 
    java.util.concurrent.atomic">AtomicInteger
     nbActions
     
    -
    +
     (package private) http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true;
     title="class or interface in 
    java.util.concurrent.atomic">AtomicInteger
     nbMultiResponse
     
    -
    +
     (package private) int
     numTries
     
    -
    +
     (package private) long
     pause
     
    -
    +
     (package private) long
     pauseForCQTBE
     
    -
    +
     private long
     previousTimeout
     
    -
    +
     static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String
     PRIMARY_CALL_TIMEOUT_KEY
     
    -
    +
     (package private) long
     primaryCallTimeoutMicroseconds
     
    -
    +
     (package private) 
    org.apache.hadoop.hbase.client.RequestController
     requestController
     
    -
    +
     (package private) 
    org.apache.hadoop.hbase.ipc.RpcControllerFactory
     rpcFactory
     
    -
    +
     (package private) long
     serverTrackerTimeout
     
    -
    +
     (package private) http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html?is-external=true;
     title="class or interface in 
    java.util.concurrent">ExecutorService
     service
     
    -
    +
     static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String
     START_LOG_ERRORS_AFTER_COUNT_KEY
     
    -
    +
     (package private) int
     startLogErrorsCnt
     
    @@ -252,11 +248,6 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       org.apache.hadoop.conf.Configurationconf,
       http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true;
     title="class or interface in 
    java.util.concurrent.atomic">AtomicIntegernbThreads)
     
    -
    -MyAsyncProcess(org.apache.hadoop.hbase.client.ClusterConnectionhc,
    -  org.apache.hadoop.conf.Configurationconf,
    -  booleanuseGlobalErrors)
    -
     
     
     
    @@ -301,23 +292,19 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
    org.apache.hadoop.hbase.ServerNamearg1)
     
     
    -boolean
    -hasError()
    -
    -
     (package private) void
     incTaskCounters(http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
     title="class or interface in java.util">Collectionbyte[]arg0,
    org.apache.hadoop.hbase.ServerNamearg1)
     
    -
    +
     (package private) static boolean
     isReplicaGet(org.apache.hadoop.hbase.client.Rowarg0)
     
    -
    +
     Resorg.apache.hadoop.hbase.client.AsyncRequestFuture
     submit(org.apache.hadoop.hbase.client.AsyncProcessTaskRestask)
     
    -
    +
     CResultorg.apache.hadoop.hbase.client.AsyncRequestFuture
     submit(http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html?is-external=true;
     title="class or interface in 
    java.util.concurrent">ExecutorServicepool,
       org.apache.hadoop.hbase.TableNametableName,
    @@ -326,7 

    [02/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/94208cfe/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/InternalScanner.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/InternalScanner.html
     
    b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/InternalScanner.html
    index 32b9ea1..ec16364 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/InternalScanner.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/InternalScanner.html
    @@ -708,7 +708,7 @@
     
     
     InternalScanner
    -StripeCompactor.StripeInternalScannerFactory.createScanner(ScanInfoscanInfo,
    +Compactor.InternalScannerFactory.createScanner(ScanInfoscanInfo,
      http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListStoreFileScannerscanners,
      ScanTypescanType,
      Compactor.FileDetailsfd,
    @@ -716,7 +716,7 @@
     
     
     InternalScanner
    -Compactor.InternalScannerFactory.createScanner(ScanInfoscanInfo,
    +StripeCompactor.StripeInternalScannerFactory.createScanner(ScanInfoscanInfo,
      http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListStoreFileScannerscanners,
      ScanTypescanType,
      Compactor.FileDetailsfd,
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/94208cfe/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/KeyValueScanner.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/KeyValueScanner.html
     
    b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/KeyValueScanner.html
    index 4d209ec..1ec8a93 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/KeyValueScanner.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/KeyValueScanner.html
    @@ -222,13 +222,13 @@
     MemStoreMergerSegmentsIterator.scanners
     
     
    -protected http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListKeyValueScanner
    -KeyValueHeap.scannersForDelayedClose
    -
    -
     private http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListKeyValueScanner
     StoreScanner.scannersForDelayedClose
     
    +
    +protected http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListKeyValueScanner
    +KeyValueHeap.scannersForDelayedClose
    +
     
     
     
    @@ -388,15 +388,15 @@
     
     
     http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListKeyValueScanner
    -DefaultMemStore.getScanners(longreadPt)
    +MemStore.getScanners(longreadPt)
     
     
     http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListKeyValueScanner
    -MemStore.getScanners(longreadPt)
    +CompactingMemStore.getScanners(longreadPt)
     
     
     http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListKeyValueScanner
    -CompactingMemStore.getScanners(longreadPt)
    +DefaultMemStore.getScanners(longreadPt)
     
     
     http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListKeyValueScanner
    @@ -442,12 +442,12 @@
     
     
     int
    -ReversedKeyValueHeap.ReversedKVScannerComparator.compare(KeyValueScannerleft,
    +KeyValueHeap.KVScannerComparator.compare(KeyValueScannerleft,
    KeyValueScannerright)
     
     
     int
    -KeyValueHeap.KVScannerComparator.compare(KeyValueScannerleft,
    +ReversedKeyValueHeap.ReversedKVScannerComparator.compare(KeyValueScannerleft,
    KeyValueScannerright)
     
     
    @@ -458,14 +458,14 @@
     
     
     private void
    -CellChunkImmutableSegment.reinitializeCellSet(intnumOfCells,
    +CellArrayImmutableSegment.reinitializeCellSet(intnumOfCells,
    KeyValueScannersegmentScanner,
    CellSetoldCellSet,
    MemStoreCompactionStrategy.Actionaction)
     
     
     private void
    -CellArrayImmutableSegment.reinitializeCellSet(intnumOfCells,
    +CellChunkImmutableSegment.reinitializeCellSet(intnumOfCells,
    KeyValueScannersegmentScanner,
    CellSetoldCellSet,
    MemStoreCompactionStrategy.Actionaction)
    @@ -543,25 +543,25 @@
     
     
     protected void
    -ReversedRegionScannerImpl.initializeKVHeap(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListKeyValueScannerscanners,
    +HRegion.RegionScannerImpl.initializeKVHeap(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListKeyValueScannerscanners,
     

    [02/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0cd17dc5/devapidocs/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.FairQueue.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.FairQueue.html
     
    b/devapidocs/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.FairQueue.html
    deleted file mode 100644
    index 28dc97a..000
    --- 
    a/devapidocs/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.FairQueue.html
    +++ /dev/null
    @@ -1,454 +0,0 @@
    -http://www.w3.org/TR/html4/loose.dtd;>
    -
    -
    -
    -
    -
    -MasterProcedureScheduler.FairQueue (Apache HBase 3.0.0-SNAPSHOT 
    API)
    -
    -
    -
    -
    -
    -var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10};
    -var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
    Methods"],8:["t4","Concrete Methods"]};
    -var altColor = "altColor";
    -var rowColor = "rowColor";
    -var tableTab = "tableTab";
    -var activeTableTab = "activeTableTab";
    -
    -
    -JavaScript is disabled on your browser.
    -
    -
    -
    -
    -
    -Skip navigation links
    -
    -
    -
    -
    -Overview
    -Package
    -Class
    -Use
    -Tree
    -Deprecated
    -Index
    -Help
    -
    -
    -
    -
    -PrevClass
    -NextClass
    -
    -
    -Frames
    -NoFrames
    -
    -
    -AllClasses
    -
    -
    -
    -
    -
    -
    -
    -Summary:
    -Nested|
    -Field|
    -Constr|
    -Method
    -
    -
    -Detail:
    -Field|
    -Constr|
    -Method
    -
    -
    -
    -
    -
    -
    -
    -
    -org.apache.hadoop.hbase.master.procedure
    -Class 
    MasterProcedureScheduler.FairQueueT extends http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
     title="class or interface in java.lang">ComparableT
    -
    -
    -
    -http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">java.lang.Object
    -
    -
    -org.apache.hadoop.hbase.master.procedure.MasterProcedureScheduler.FairQueueT
    -
    -
    -
    -
    -
    -
    -
    -Enclosing class:
    -MasterProcedureScheduler
    -
    -
    -
    -private static class MasterProcedureScheduler.FairQueueT
     extends http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
     title="class or interface in java.lang">ComparableT
    -extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">Object
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -Field Summary
    -
    -Fields
    -
    -Modifier and Type
    -Field and Description
    -
    -
    -private int
    -currentQuantum
    -
    -
    -private MasterProcedureScheduler.QueueT
    -currentQueue
    -
    -
    -private int
    -quantum
    -
    -
    -private MasterProcedureScheduler.QueueT
    -queueHead
    -
    -
    -private int
    -size
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -Constructor Summary
    -
    -Constructors
    -
    -Constructor and Description
    -
    -
    -FairQueue()
    -
    -
    -FairQueue(intquantum)
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -Method Summary
    -
    -All MethodsInstance MethodsConcrete Methods
    -
    -Modifier and Type
    -Method and Description
    -
    -
    -void
    -add(MasterProcedureScheduler.QueueTqueue)
    -
    -
    -private int
    -calculateQuantum(MasterProcedureScheduler.Queuequeue)
    -
    -
    -boolean
    -hasRunnables()
    -
    -
    -private boolean
    -nextQueue()
    -
    -
    -MasterProcedureScheduler.QueueT
    -poll()
    -
    -
    -void
    -remove(MasterProcedureScheduler.QueueTqueue)
    -
    -
    -private void
    -setNextQueue(MasterProcedureScheduler.QueueTqueue)
    -
    -
    -
    -
    -
    -
    -Methods inherited from classjava.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">Object
    -http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--;
     title="class or interface in java.lang">clone, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-;
     title="class or interface in java.lang">equals, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--;
     title="class or interface in java.lang">finalize, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--;
     title="class or interface in java.lang">getClass, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--;
     title="class or interface in java.lang">hashCode, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notify--;
     title="class or interface in java.lang">notify, http://docs.oracle.com/javase/8/docs/api/java/lang
     /Object.html?is-external=true#notifyAll--" title="class or interface in 
    java.lang">notifyAll, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#toString--;
     title="class or interface in java.lang">toString, 

    [02/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/828486ae/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HRegion.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HRegion.html 
    b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HRegion.html
    index 9856943..36977cd 100644
    --- a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HRegion.html
    +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HRegion.html
    @@ -257,31 +257,23 @@
     
     
     private HRegion
    -MemStoreFlusher.FlushRegionEntry.region
    +MetricsRegionWrapperImpl.region
     
     
     private HRegion
    -RegionServerServices.PostOpenDeployContext.region
    +RegionServicesForStores.region
     
     
     private HRegion
     CompactSplit.CompactionRunner.region
     
     
    -protected HRegion
    -FlushPolicy.region
    -The region configured for this flush policy.
    +(package private) HRegion
    +RegionCoprocessorHost.region
    +The region
     
     
     
    -private HRegion
    -RegionServicesForStores.region
    -
    -
    -private HRegion
    -MetricsRegionWrapperImpl.region
    -
    -
     protected HRegion
     RegionSplitPolicy.region
     The region configured for this split policy.
    @@ -296,19 +288,27 @@
     HRegion.RegionScannerImpl.region
     
     
    -(package private) HRegion
    -RegionCoprocessorHost.region
    -The region
    -
    +private HRegion
    +RegionServerServices.PostOpenDeployContext.region
     
     
    -protected HRegion
    -HStore.region
    +private HRegion
    +MemStoreFlusher.FlushRegionEntry.region
     
     
    +protected HRegion
    +FlushPolicy.region
    +The region configured for this flush policy.
    +
    +
    +
     private HRegion
     BusyRegionSplitPolicy.region
     
    +
    +protected HRegion
    +HStore.region
    +
     
     
     
    @@ -563,14 +563,14 @@
     
     
     void
    -HRegionServer.addRegion(HRegionregion)
    -
    -
    -void
     MutableOnlineRegions.addRegion(HRegionr)
     Add to online regions.
     
     
    +
    +void
    +HRegionServer.addRegion(HRegionregion)
    +
     
     private RSRpcServices.RegionScannerHolder
     RSRpcServices.addScanner(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringscannerName,
    @@ -624,57 +624,57 @@
     
     
     protected void
    -FlushPolicy.configureForRegion(HRegionregion)
    -Upon construction, this method will be called with the 
    region to be governed.
    -
    +KeyPrefixRegionSplitPolicy.configureForRegion(HRegionregion)
     
     
     protected void
    -FlushAllLargeStoresPolicy.configureForRegion(HRegionregion)
    +RegionSplitPolicy.configureForRegion(HRegionregion)
    +Upon construction, this method will be called with the 
    region
    + to be governed.
    +
     
     
     protected void
    -ConstantSizeRegionSplitPolicy.configureForRegion(HRegionregion)
    +DelimitedKeyPrefixRegionSplitPolicy.configureForRegion(HRegionregion)
     
     
     protected void
    -RegionSplitPolicy.configureForRegion(HRegionregion)
    -Upon construction, this method will be called with the 
    region
    - to be governed.
    -
    +IncreasingToUpperBoundRegionSplitPolicy.configureForRegion(HRegionregion)
     
     
     protected void
    -FlushNonSloppyStoresFirstPolicy.configureForRegion(HRegionregion)
    +FlushAllLargeStoresPolicy.configureForRegion(HRegionregion)
     
     
     protected void
    -DelimitedKeyPrefixRegionSplitPolicy.configureForRegion(HRegionregion)
    +FlushPolicy.configureForRegion(HRegionregion)
    +Upon construction, this method will be called with the 
    region to be governed.
    +
     
     
     protected void
    -KeyPrefixRegionSplitPolicy.configureForRegion(HRegionregion)
    +ConstantSizeRegionSplitPolicy.configureForRegion(HRegionregion)
     
     
     protected void
    -IncreasingToUpperBoundRegionSplitPolicy.configureForRegion(HRegionregion)
    +FlushNonSloppyStoresFirstPolicy.configureForRegion(HRegionregion)
     
     
     protected void
     BusyRegionSplitPolicy.configureForRegion(HRegionregion)
     
     
    -static RegionSplitPolicy
    -RegionSplitPolicy.create(HRegionregion,
    +static FlushPolicy
    +FlushPolicyFactory.create(HRegionregion,
       org.apache.hadoop.conf.Configurationconf)
    -Create the RegionSplitPolicy configured for the given 
    table.
    +Create the FlushPolicy configured for the given table.
     
     
     
    -static FlushPolicy
    -FlushPolicyFactory.create(HRegionregion,
    +static RegionSplitPolicy
    +RegionSplitPolicy.create(HRegionregion,
       org.apache.hadoop.conf.Configurationconf)
    -Create the FlushPolicy configured for the given table.
    +Create the RegionSplitPolicy configured for the given 
    table.
     
     
     
    @@ -766,13 +766,13 @@
     
     
     protected void
    -ReversedRegionScannerImpl.initializeKVHeap(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListKeyValueScannerscanners,
    +HRegion.RegionScannerImpl.initializeKVHeap(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListKeyValueScannerscanners,
     http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListKeyValueScannerjoinedScanners,
     HRegionregion)
     
     
     protected void
    

    [02/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f272b0e8/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html 
    b/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
    index 1bacac1..93ad2d1 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
    @@ -1297,7 +1297,7 @@ implements 
     
     pendingAssignQueue
    -private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
     title="class or interface in java.util">ArrayListRegionStates.RegionStateNode 
    pendingAssignQueue
    +private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
     title="class or interface in java.util">ArrayListRegionStates.RegionStateNode 
    pendingAssignQueue
     
     
     
    @@ -1306,7 +1306,7 @@ implements 
     
     assignQueueLock
    -private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/locks/ReentrantLock.html?is-external=true;
     title="class or interface in java.util.concurrent.locks">ReentrantLock assignQueueLock
    +private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/locks/ReentrantLock.html?is-external=true;
     title="class or interface in java.util.concurrent.locks">ReentrantLock assignQueueLock
     
     
     
    @@ -1315,7 +1315,7 @@ implements 
     
     assignQueueFullCond
    -private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/locks/Condition.html?is-external=true;
     title="class or interface in java.util.concurrent.locks">Condition assignQueueFullCond
    +private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/locks/Condition.html?is-external=true;
     title="class or interface in java.util.concurrent.locks">Condition assignQueueFullCond
     
     
     
    @@ -2187,7 +2187,7 @@ implements 
     
     loadMeta
    -privatevoidloadMeta()
    +privatevoidloadMeta()
    throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException
     
     Throws:
    @@ -2201,7 +2201,7 @@ implements 
     
     processofflineServersWithOnlineRegions
    -privatebooleanprocessofflineServersWithOnlineRegions()
    +privatebooleanprocessofflineServersWithOnlineRegions()
     Look at what is in meta and the list of servers that have 
    checked in and make reconciliation.
      We cannot tell definitively the difference between a clean shutdown and a 
    cluster that has
      been crashed down. At this stage of a Master startup, they look the same: 
    they have the
    @@ -2231,7 +2231,7 @@ implements 
     
     isFailoverCleanupDone
    -publicbooleanisFailoverCleanupDone()
    +publicbooleanisFailoverCleanupDone()
     Used by ServerCrashProcedure to make sure AssignmentManager 
    has completed
      the failover cleanup before re-assigning regions of dead servers. So that
      when re-assignment happens, AssignmentManager has proper region states.
    @@ -2243,7 +2243,7 @@ implements 
     
     setFailoverCleanupDone
    -publicvoidsetFailoverCleanupDone(booleanb)
    +publicvoidsetFailoverCleanupDone(booleanb)
     Used by ServerCrashProcedure tests verify the ability to 
    suspend the
      execution of the ServerCrashProcedure.
     
    @@ -2254,7 +2254,7 @@ implements 
     
     getFailoverCleanupEvent
    -publicProcedureEventgetFailoverCleanupEvent()
    +publicProcedureEventgetFailoverCleanupEvent()
     
     
     
    @@ -2263,7 +2263,7 @@ implements 
     
     checkFailoverCleanupCompleted
    -privatevoidcheckFailoverCleanupCompleted(RegionInfohri)
    +privatevoidcheckFailoverCleanupCompleted(RegionInfohri)
     throws PleaseHoldException
     Used to check if the failover cleanup is done.
      if not we throw PleaseHoldException since we are rebuilding the 
    RegionStates
    @@ -2281,7 +2281,7 @@ implements 
     
     getNumRegionsOpened
    -publicintgetNumRegionsOpened()
    +publicintgetNumRegionsOpened()
     
     
     
    @@ -2290,7 +2290,7 @@ implements 
     
     submitServerCrash
    -publicvoidsubmitServerCrash(ServerNameserverName,
    +publicvoidsubmitServerCrash(ServerNameserverName,
       booleanshouldSplitWal)
     
     
    @@ -2300,7 +2300,7 @@ implements 
     
     offlineRegion
    -publicvoidofflineRegion(RegionInforegionInfo)
    +publicvoidofflineRegion(RegionInforegionInfo)
     
     
     
    @@ -2309,7 +2309,7 @@ implements 
     
     onlineRegion
    -publicvoidonlineRegion(RegionInforegionInfo,
    +publicvoidonlineRegion(RegionInforegionInfo,
      ServerNameserverName)
     
     
    @@ -2319,7 +2319,7 @@ implements 
     
     getSnapShotOfAssignment
    -publichttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
     title="class or interface in java.util">MapServerName,http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListRegionInfogetSnapShotOfAssignment(http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
     

    [02/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c83a37c8/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HRegion.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HRegion.html 
    b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HRegion.html
    index 8020966..569aed3 100644
    --- a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HRegion.html
    +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HRegion.html
    @@ -257,31 +257,23 @@
     
     
     private HRegion
    -MemStoreFlusher.FlushRegionEntry.region
    +MetricsRegionWrapperImpl.region
     
     
     private HRegion
    -RegionServerServices.PostOpenDeployContext.region
    +RegionServicesForStores.region
     
     
     private HRegion
     CompactSplit.CompactionRunner.region
     
     
    -protected HRegion
    -FlushPolicy.region
    -The region configured for this flush policy.
    +(package private) HRegion
    +RegionCoprocessorHost.region
    +The region
     
     
     
    -private HRegion
    -RegionServicesForStores.region
    -
    -
    -private HRegion
    -MetricsRegionWrapperImpl.region
    -
    -
     protected HRegion
     RegionSplitPolicy.region
     The region configured for this split policy.
    @@ -296,19 +288,27 @@
     HRegion.RegionScannerImpl.region
     
     
    -(package private) HRegion
    -RegionCoprocessorHost.region
    -The region
    -
    +private HRegion
    +RegionServerServices.PostOpenDeployContext.region
     
     
    -protected HRegion
    -HStore.region
    +private HRegion
    +MemStoreFlusher.FlushRegionEntry.region
     
     
    +protected HRegion
    +FlushPolicy.region
    +The region configured for this flush policy.
    +
    +
    +
     private HRegion
     BusyRegionSplitPolicy.region
     
    +
    +protected HRegion
    +HStore.region
    +
     
     
     
    @@ -563,14 +563,14 @@
     
     
     void
    -HRegionServer.addRegion(HRegionregion)
    -
    -
    -void
     MutableOnlineRegions.addRegion(HRegionr)
     Add to online regions.
     
     
    +
    +void
    +HRegionServer.addRegion(HRegionregion)
    +
     
     private RSRpcServices.RegionScannerHolder
     RSRpcServices.addScanner(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringscannerName,
    @@ -624,57 +624,57 @@
     
     
     protected void
    -FlushPolicy.configureForRegion(HRegionregion)
    -Upon construction, this method will be called with the 
    region to be governed.
    -
    +KeyPrefixRegionSplitPolicy.configureForRegion(HRegionregion)
     
     
     protected void
    -FlushAllLargeStoresPolicy.configureForRegion(HRegionregion)
    +RegionSplitPolicy.configureForRegion(HRegionregion)
    +Upon construction, this method will be called with the 
    region
    + to be governed.
    +
     
     
     protected void
    -ConstantSizeRegionSplitPolicy.configureForRegion(HRegionregion)
    +DelimitedKeyPrefixRegionSplitPolicy.configureForRegion(HRegionregion)
     
     
     protected void
    -RegionSplitPolicy.configureForRegion(HRegionregion)
    -Upon construction, this method will be called with the 
    region
    - to be governed.
    -
    +IncreasingToUpperBoundRegionSplitPolicy.configureForRegion(HRegionregion)
     
     
     protected void
    -FlushNonSloppyStoresFirstPolicy.configureForRegion(HRegionregion)
    +FlushAllLargeStoresPolicy.configureForRegion(HRegionregion)
     
     
     protected void
    -DelimitedKeyPrefixRegionSplitPolicy.configureForRegion(HRegionregion)
    +FlushPolicy.configureForRegion(HRegionregion)
    +Upon construction, this method will be called with the 
    region to be governed.
    +
     
     
     protected void
    -KeyPrefixRegionSplitPolicy.configureForRegion(HRegionregion)
    +ConstantSizeRegionSplitPolicy.configureForRegion(HRegionregion)
     
     
     protected void
    -IncreasingToUpperBoundRegionSplitPolicy.configureForRegion(HRegionregion)
    +FlushNonSloppyStoresFirstPolicy.configureForRegion(HRegionregion)
     
     
     protected void
     BusyRegionSplitPolicy.configureForRegion(HRegionregion)
     
     
    -static RegionSplitPolicy
    -RegionSplitPolicy.create(HRegionregion,
    +static FlushPolicy
    +FlushPolicyFactory.create(HRegionregion,
       org.apache.hadoop.conf.Configurationconf)
    -Create the RegionSplitPolicy configured for the given 
    table.
    +Create the FlushPolicy configured for the given table.
     
     
     
    -static FlushPolicy
    -FlushPolicyFactory.create(HRegionregion,
    +static RegionSplitPolicy
    +RegionSplitPolicy.create(HRegionregion,
       org.apache.hadoop.conf.Configurationconf)
    -Create the FlushPolicy configured for the given table.
    +Create the RegionSplitPolicy configured for the given 
    table.
     
     
     
    @@ -766,13 +766,13 @@
     
     
     protected void
    -ReversedRegionScannerImpl.initializeKVHeap(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListKeyValueScannerscanners,
    +HRegion.RegionScannerImpl.initializeKVHeap(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListKeyValueScannerscanners,
     http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListKeyValueScannerjoinedScanners,
     HRegionregion)
     
     
     protected void
    

    [02/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0ab8335e/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/CellSet.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/CellSet.html 
    b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/CellSet.html
    index 8b4fac0..e8eadec 100644
    --- a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/CellSet.html
    +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/CellSet.html
    @@ -134,14 +134,14 @@
     
     
     private void
    -CellArrayImmutableSegment.reinitializeCellSet(intnumOfCells,
    +CellChunkImmutableSegment.reinitializeCellSet(intnumOfCells,
    KeyValueScannersegmentScanner,
    CellSetoldCellSet,
    MemStoreCompactionStrategy.Actionaction)
     
     
     private void
    -CellChunkImmutableSegment.reinitializeCellSet(intnumOfCells,
    +CellArrayImmutableSegment.reinitializeCellSet(intnumOfCells,
    KeyValueScannersegmentScanner,
    CellSetoldCellSet,
    MemStoreCompactionStrategy.Actionaction)
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0ab8335e/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/Chunk.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/Chunk.html 
    b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/Chunk.html
    index 2e18485..dbcd3ed 100644
    --- a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/Chunk.html
    +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/Chunk.html
    @@ -212,11 +212,11 @@
     
     
     Chunk
    -ImmutableMemStoreLAB.getNewExternalChunk()
    +MemStoreLAB.getNewExternalChunk()
     
     
     Chunk
    -MemStoreLAB.getNewExternalChunk()
    +ImmutableMemStoreLAB.getNewExternalChunk()
     
     
     Chunk
    @@ -224,11 +224,11 @@
     
     
     Chunk
    -ImmutableMemStoreLAB.getNewExternalJumboChunk(intsize)
    +MemStoreLAB.getNewExternalJumboChunk(intsize)
     
     
     Chunk
    -MemStoreLAB.getNewExternalJumboChunk(intsize)
    +ImmutableMemStoreLAB.getNewExternalJumboChunk(intsize)
     
     
     Chunk
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0ab8335e/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/FlushLifeCycleTracker.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/FlushLifeCycleTracker.html
     
    b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/FlushLifeCycleTracker.html
    index 03deb52..1f1255c 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/FlushLifeCycleTracker.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/FlushLifeCycleTracker.html
    @@ -422,17 +422,17 @@
     
     
     void
    -FlushRequester.requestFlush(HRegionregion,
    +MemStoreFlusher.requestFlush(HRegionr,
     booleanforceFlushAllStores,
    -FlushLifeCycleTrackertracker)
    -Tell the listener the cache needs to be flushed.
    -
    +FlushLifeCycleTrackertracker)
     
     
     void
    -MemStoreFlusher.requestFlush(HRegionr,
    +FlushRequester.requestFlush(HRegionregion,
     booleanforceFlushAllStores,
    -FlushLifeCycleTrackertracker)
    +FlushLifeCycleTrackertracker)
    +Tell the listener the cache needs to be flushed.
    +
     
     
     private void
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0ab8335e/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/FlushRequestListener.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/FlushRequestListener.html
     
    b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/FlushRequestListener.html
    index d4b102b..29f89fb 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/FlushRequestListener.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/FlushRequestListener.html
    @@ -130,26 +130,26 @@
     
     
     void
    -FlushRequester.registerFlushRequestListener(FlushRequestListenerlistener)
    -Register a FlushRequestListener
    +MemStoreFlusher.registerFlushRequestListener(FlushRequestListenerlistener)
    +Register a MemstoreFlushListener
     
     
     
     void
    -MemStoreFlusher.registerFlushRequestListener(FlushRequestListenerlistener)
    -Register a MemstoreFlushListener
    +FlushRequester.registerFlushRequestListener(FlushRequestListenerlistener)
    +Register a FlushRequestListener
     
     
     
     boolean
    -FlushRequester.unregisterFlushRequestListener(FlushRequestListenerlistener)
    -Unregister the given FlushRequestListener
    +MemStoreFlusher.unregisterFlushRequestListener(FlushRequestListenerlistener)
    +Unregister the listener from MemstoreFlushListeners
     
     
     
     boolean
    -MemStoreFlusher.unregisterFlushRequestListener(FlushRequestListenerlistener)
    -Unregister the listener from MemstoreFlushListeners
    +FlushRequester.unregisterFlushRequestListener(FlushRequestListenerlistener)
    +Unregister the given 

    [02/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/src-html/org/apache/hadoop/hbase/master/SplitLogManager.TimeoutMonitor.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/SplitLogManager.TimeoutMonitor.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/SplitLogManager.TimeoutMonitor.html
    index 2939a56..681e263 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/SplitLogManager.TimeoutMonitor.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/SplitLogManager.TimeoutMonitor.html
    @@ -61,602 +61,608 @@
     053import 
    org.apache.hadoop.hbase.monitoring.TaskMonitor;
     054import 
    org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
     055import 
    org.apache.hadoop.hbase.util.FSUtils;
    -056import 
    org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
    -057import 
    org.apache.yetus.audience.InterfaceAudience;
    -058import org.slf4j.Logger;
    -059import org.slf4j.LoggerFactory;
    -060import 
    org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
    -061
    -062/**
    -063 * Distributes the task of log splitting 
    to the available region servers.
    -064 * Coordination happens via coordination 
    engine. For every log file that has to be split a
    -065 * task is created. SplitLogWorkers race 
    to grab a task.
    -066 *
    -067 * pSplitLogManager monitors the 
    tasks that it creates using the
    -068 * timeoutMonitor thread. If a task's 
    progress is slow then
    -069 * {@link 
    SplitLogManagerCoordination#checkTasks} will take away the
    -070 * task from the owner {@link 
    org.apache.hadoop.hbase.regionserver.SplitLogWorker}
    -071 * and the task will be up for grabs 
    again. When the task is done then it is
    -072 * deleted by SplitLogManager.
    -073 *
    -074 * pClients call {@link 
    #splitLogDistributed(Path)} to split a region server's
    -075 * log files. The caller thread waits in 
    this method until all the log files
    -076 * have been split.
    -077 *
    -078 * pAll the coordination calls 
    made by this class are asynchronous. This is mainly
    -079 * to help reduce response time seen by 
    the callers.
    -080 *
    -081 * pThere is race in this design 
    between the SplitLogManager and the
    -082 * SplitLogWorker. SplitLogManager might 
    re-queue a task that has in reality
    -083 * already been completed by a 
    SplitLogWorker. We rely on the idempotency of
    -084 * the log splitting task for 
    correctness.
    -085 *
    -086 * pIt is also assumed that every 
    log splitting task is unique and once
    -087 * completed (either with success or with 
    error) it will be not be submitted
    -088 * again. If a task is resubmitted then 
    there is a risk that old "delete task"
    -089 * can delete the re-submission.
    -090 */
    -091@InterfaceAudience.Private
    -092public class SplitLogManager {
    -093  private static final Logger LOG = 
    LoggerFactory.getLogger(SplitLogManager.class);
    -094
    -095  private final MasterServices server;
    -096
    -097  private final Configuration conf;
    -098  private final ChoreService 
    choreService;
    -099
    -100  public static final int 
    DEFAULT_UNASSIGNED_TIMEOUT = (3 * 60 * 1000); // 3 min
    -101
    -102  private long unassignedTimeout;
    -103  private long lastTaskCreateTime = 
    Long.MAX_VALUE;
    -104
    -105  @VisibleForTesting
    -106  final ConcurrentMapString, Task 
    tasks = new ConcurrentHashMap();
    -107  private TimeoutMonitor 
    timeoutMonitor;
    -108
    -109  private volatile SetServerName 
    deadWorkers = null;
    -110  private final Object deadWorkersLock = 
    new Object();
    -111
    -112  /**
    -113   * Its OK to construct this object even 
    when region-servers are not online. It does lookup the
    -114   * orphan tasks in coordination engine 
    but it doesn't block waiting for them to be done.
    -115   * @param master the master services
    -116   * @param conf the HBase 
    configuration
    -117   * @throws IOException
    -118   */
    -119  public SplitLogManager(MasterServices 
    master, Configuration conf)
    -120  throws IOException {
    -121this.server = master;
    -122this.conf = conf;
    -123this.choreService = new 
    ChoreService(master.getServerName() + "_splitLogManager_");
    -124if 
    (server.getCoordinatedStateManager() != null) {
    -125  SplitLogManagerCoordination 
    coordination = getSplitLogManagerCoordination();
    -126  SetString failedDeletions = 
    Collections.synchronizedSet(new HashSetString());
    -127  SplitLogManagerDetails details = 
    new SplitLogManagerDetails(tasks, master, failedDeletions);
    -128  coordination.setDetails(details);
    -129  coordination.init();
    -130}
    -131this.unassignedTimeout =
    -132
    conf.getInt("hbase.splitlog.manager.unassigned.timeout", 
    DEFAULT_UNASSIGNED_TIMEOUT);
    -133this.timeoutMonitor =
    -134new 
    TimeoutMonitor(conf.getInt("hbase.splitlog.manager.timeoutmonitor.period", 
    1000),
    -135master);
    -136
    choreService.scheduleChore(timeoutMonitor);
    -137  }
    -138
    -139  private SplitLogManagerCoordination 
    getSplitLogManagerCoordination() {
    -140return 
    

    [02/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1f2eeb22/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
    index b8e6dfa..7b512ba 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
    @@ -28,8473 +28,8472 @@
     020import static 
    org.apache.hadoop.hbase.HConstants.REPLICATION_SCOPE_LOCAL;
     021import static 
    org.apache.hadoop.hbase.regionserver.HStoreFile.MAJOR_COMPACTION_KEY;
     022import static 
    org.apache.hadoop.hbase.util.CollectionUtils.computeIfAbsent;
    -023import java.io.EOFException;
    -024import java.io.FileNotFoundException;
    -025import java.io.IOException;
    -026import java.io.InterruptedIOException;
    -027import java.lang.reflect.Constructor;
    -028import java.nio.ByteBuffer;
    -029import 
    java.nio.charset.StandardCharsets;
    -030import java.text.ParseException;
    -031import java.util.AbstractList;
    -032import java.util.ArrayList;
    -033import java.util.Arrays;
    -034import java.util.Collection;
    -035import java.util.Collections;
    -036import java.util.HashMap;
    -037import java.util.HashSet;
    -038import java.util.Iterator;
    -039import java.util.List;
    -040import java.util.Map;
    -041import java.util.Map.Entry;
    -042import java.util.NavigableMap;
    -043import java.util.NavigableSet;
    -044import java.util.Optional;
    -045import java.util.RandomAccess;
    -046import java.util.Set;
    -047import java.util.TreeMap;
    -048import java.util.UUID;
    -049import java.util.concurrent.Callable;
    -050import 
    java.util.concurrent.CompletionService;
    -051import 
    java.util.concurrent.ConcurrentHashMap;
    -052import 
    java.util.concurrent.ConcurrentMap;
    -053import 
    java.util.concurrent.ConcurrentSkipListMap;
    -054import 
    java.util.concurrent.ExecutionException;
    -055import 
    java.util.concurrent.ExecutorCompletionService;
    -056import 
    java.util.concurrent.ExecutorService;
    -057import java.util.concurrent.Executors;
    -058import java.util.concurrent.Future;
    -059import java.util.concurrent.FutureTask;
    -060import 
    java.util.concurrent.ThreadFactory;
    -061import 
    java.util.concurrent.ThreadPoolExecutor;
    -062import java.util.concurrent.TimeUnit;
    -063import 
    java.util.concurrent.TimeoutException;
    -064import 
    java.util.concurrent.atomic.AtomicBoolean;
    -065import 
    java.util.concurrent.atomic.AtomicInteger;
    -066import 
    java.util.concurrent.atomic.AtomicLong;
    -067import 
    java.util.concurrent.atomic.LongAdder;
    -068import java.util.concurrent.locks.Lock;
    -069import 
    java.util.concurrent.locks.ReadWriteLock;
    -070import 
    java.util.concurrent.locks.ReentrantReadWriteLock;
    -071import java.util.function.Function;
    -072
    -073import 
    org.apache.hadoop.conf.Configuration;
    -074import org.apache.hadoop.fs.FileStatus;
    -075import org.apache.hadoop.fs.FileSystem;
    -076import 
    org.apache.hadoop.fs.LocatedFileStatus;
    -077import org.apache.hadoop.fs.Path;
    -078import org.apache.hadoop.hbase.Cell;
    -079import 
    org.apache.hadoop.hbase.CellBuilderType;
    -080import 
    org.apache.hadoop.hbase.CellComparator;
    -081import 
    org.apache.hadoop.hbase.CellComparatorImpl;
    -082import 
    org.apache.hadoop.hbase.CellScanner;
    -083import 
    org.apache.hadoop.hbase.CellUtil;
    -084import 
    org.apache.hadoop.hbase.CompareOperator;
    -085import 
    org.apache.hadoop.hbase.CompoundConfiguration;
    -086import 
    org.apache.hadoop.hbase.DoNotRetryIOException;
    -087import 
    org.apache.hadoop.hbase.DroppedSnapshotException;
    -088import 
    org.apache.hadoop.hbase.ExtendedCellBuilderFactory;
    -089import 
    org.apache.hadoop.hbase.HConstants;
    -090import 
    org.apache.hadoop.hbase.HConstants.OperationStatusCode;
    -091import 
    org.apache.hadoop.hbase.HDFSBlocksDistribution;
    -092import 
    org.apache.hadoop.hbase.HRegionInfo;
    -093import 
    org.apache.hadoop.hbase.KeyValue;
    -094import 
    org.apache.hadoop.hbase.KeyValueUtil;
    -095import 
    org.apache.hadoop.hbase.NamespaceDescriptor;
    -096import 
    org.apache.hadoop.hbase.NotServingRegionException;
    -097import 
    org.apache.hadoop.hbase.PrivateCellUtil;
    -098import 
    org.apache.hadoop.hbase.RegionTooBusyException;
    -099import 
    org.apache.hadoop.hbase.TableName;
    -100import org.apache.hadoop.hbase.Tag;
    -101import org.apache.hadoop.hbase.TagUtil;
    -102import 
    org.apache.hadoop.hbase.UnknownScannerException;
    -103import 
    org.apache.hadoop.hbase.client.Append;
    -104import 
    org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
    -105import 
    org.apache.hadoop.hbase.client.CompactionState;
    -106import 
    org.apache.hadoop.hbase.client.Delete;
    -107import 
    org.apache.hadoop.hbase.client.Durability;
    -108import 
    org.apache.hadoop.hbase.client.Get;
    -109import 
    org.apache.hadoop.hbase.client.Increment;
    -110import 
    org.apache.hadoop.hbase.client.IsolationLevel;
    -111import 
    org.apache.hadoop.hbase.client.Mutation;
    -112import 
    

    [02/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/cc6597ec/testdevapidocs/org/apache/hadoop/hbase/client/TestFromClientSide3.WaitingForMultiMutationsObserver.html
    --
    diff --git 
    a/testdevapidocs/org/apache/hadoop/hbase/client/TestFromClientSide3.WaitingForMultiMutationsObserver.html
     
    b/testdevapidocs/org/apache/hadoop/hbase/client/TestFromClientSide3.WaitingForMultiMutationsObserver.html
    index a68c29b..e15e11b 100644
    --- 
    a/testdevapidocs/org/apache/hadoop/hbase/client/TestFromClientSide3.WaitingForMultiMutationsObserver.html
    +++ 
    b/testdevapidocs/org/apache/hadoop/hbase/client/TestFromClientSide3.WaitingForMultiMutationsObserver.html
    @@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
     
     
     
    -public static class TestFromClientSide3.WaitingForMultiMutationsObserver
    +public static class TestFromClientSide3.WaitingForMultiMutationsObserver
     extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">Object
     implements org.apache.hadoop.hbase.coprocessor.RegionCoprocessor, 
    org.apache.hadoop.hbase.coprocessor.RegionObserver
     
    @@ -261,7 +261,7 @@ implements 
    org.apache.hadoop.hbase.coprocessor.RegionCoprocessor, org.apache.had
     
     
     latch
    -finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CountDownLatch.html?is-external=true;
     title="class or interface in java.util.concurrent">CountDownLatch latch
    +finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CountDownLatch.html?is-external=true;
     title="class or interface in java.util.concurrent">CountDownLatch latch
     
     
     
    @@ -278,7 +278,7 @@ implements 
    org.apache.hadoop.hbase.coprocessor.RegionCoprocessor, org.apache.had
     
     
     WaitingForMultiMutationsObserver
    -publicWaitingForMultiMutationsObserver()
    +publicWaitingForMultiMutationsObserver()
     
     
     
    @@ -295,7 +295,7 @@ implements 
    org.apache.hadoop.hbase.coprocessor.RegionCoprocessor, org.apache.had
     
     
     getRegionObserver
    -publichttp://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
     title="class or interface in 
    java.util">Optionalorg.apache.hadoop.hbase.coprocessor.RegionObservergetRegionObserver()
    +publichttp://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
     title="class or interface in 
    java.util">Optionalorg.apache.hadoop.hbase.coprocessor.RegionObservergetRegionObserver()
     
     Specified by:
     getRegionObserverin 
    interfaceorg.apache.hadoop.hbase.coprocessor.RegionCoprocessor
    @@ -308,7 +308,7 @@ implements 
    org.apache.hadoop.hbase.coprocessor.RegionCoprocessor, org.apache.had
     
     
     postBatchMutate
    -publicvoidpostBatchMutate(org.apache.hadoop.hbase.coprocessor.ObserverContextorg.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironmentc,
    +publicvoidpostBatchMutate(org.apache.hadoop.hbase.coprocessor.ObserverContextorg.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironmentc,
     
    org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgressorg.apache.hadoop.hbase.client.MutationminiBatchOp)
      throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/cc6597ec/testdevapidocs/org/apache/hadoop/hbase/client/TestFromClientSide3.WaitingForScanObserver.html
    --
    diff --git 
    a/testdevapidocs/org/apache/hadoop/hbase/client/TestFromClientSide3.WaitingForScanObserver.html
     
    b/testdevapidocs/org/apache/hadoop/hbase/client/TestFromClientSide3.WaitingForScanObserver.html
    index 30d758c..675f3a3 100644
    --- 
    a/testdevapidocs/org/apache/hadoop/hbase/client/TestFromClientSide3.WaitingForScanObserver.html
    +++ 
    b/testdevapidocs/org/apache/hadoop/hbase/client/TestFromClientSide3.WaitingForScanObserver.html
    @@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
     
     
     
    -public static class TestFromClientSide3.WaitingForScanObserver
    +public static class TestFromClientSide3.WaitingForScanObserver
     extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">Object
     implements org.apache.hadoop.hbase.coprocessor.RegionCoprocessor, 
    org.apache.hadoop.hbase.coprocessor.RegionObserver
     
    @@ -267,7 +267,7 @@ implements 
    org.apache.hadoop.hbase.coprocessor.RegionCoprocessor, org.apache.had
     
     
     latch
    -private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CountDownLatch.html?is-external=true;
     title="class or interface in java.util.concurrent">CountDownLatch latch
    +private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CountDownLatch.html?is-external=true;
     title="class or interface in java.util.concurrent">CountDownLatch latch
     
     
     
    @@ -284,7 +284,7 @@ implements 
    org.apache.hadoop.hbase.coprocessor.RegionCoprocessor, org.apache.had
     
     
     

    [02/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aa7ffc92/export_control.html
    --
    diff --git a/export_control.html b/export_control.html
    index 5f38c0a..a457e51 100644
    --- a/export_control.html
    +++ b/export_control.html
    @@ -7,7 +7,7 @@
       
     
     
    -
    +
     
     Apache HBase  
       Export Control
    @@ -336,7 +336,7 @@ for more details.
     https://www.apache.org/;>The Apache Software 
    Foundation.
     All rights reserved.  
     
    -  Last Published: 
    2018-01-27
    +  Last Published: 
    2018-01-28
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aa7ffc92/hbase-annotations/checkstyle.html
    --
    diff --git a/hbase-annotations/checkstyle.html 
    b/hbase-annotations/checkstyle.html
    index e3b79d3..4602966 100644
    --- a/hbase-annotations/checkstyle.html
    +++ b/hbase-annotations/checkstyle.html
    @@ -7,7 +7,7 @@
       
     
     
    -
    +
     
     Apache HBase - Annotations  Checkstyle Results
     
    @@ -150,7 +150,7 @@
     https://www.apache.org/;>The Apache Software 
    Foundation.
     All rights reserved.  
     
    -  Last Published: 
    2018-01-27
    +  Last Published: 
    2018-01-28
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aa7ffc92/hbase-annotations/dependencies.html
    --
    diff --git a/hbase-annotations/dependencies.html 
    b/hbase-annotations/dependencies.html
    index 5c0b7eb..4ff9ca6 100644
    --- a/hbase-annotations/dependencies.html
    +++ b/hbase-annotations/dependencies.html
    @@ -7,7 +7,7 @@
       
     
     
    -
    +
     
     Apache HBase - Annotations  Project Dependencies
     
    @@ -272,7 +272,7 @@
     https://www.apache.org/;>The Apache Software 
    Foundation.
     All rights reserved.  
     
    -  Last Published: 
    2018-01-27
    +  Last Published: 
    2018-01-28
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aa7ffc92/hbase-annotations/dependency-convergence.html
    --
    diff --git a/hbase-annotations/dependency-convergence.html 
    b/hbase-annotations/dependency-convergence.html
    index fce9a76..5f180fd 100644
    --- a/hbase-annotations/dependency-convergence.html
    +++ b/hbase-annotations/dependency-convergence.html
    @@ -7,7 +7,7 @@
       
     
     
    -
    +
     
     Apache HBase - Annotations  Reactor Dependency 
    Convergence
     
    @@ -865,7 +865,7 @@
     https://www.apache.org/;>The Apache Software 
    Foundation.
     All rights reserved.  
     
    -  Last Published: 
    2018-01-27
    +  Last Published: 
    2018-01-28
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aa7ffc92/hbase-annotations/dependency-info.html
    --
    diff --git a/hbase-annotations/dependency-info.html 
    b/hbase-annotations/dependency-info.html
    index bac658b..542ca0c 100644
    --- a/hbase-annotations/dependency-info.html
    +++ b/hbase-annotations/dependency-info.html
    @@ -7,7 +7,7 @@
       
     
     
    -
    +
     
     Apache HBase - Annotations  Dependency Information
     
    @@ -147,7 +147,7 @@
     https://www.apache.org/;>The Apache Software 
    Foundation.
     All rights reserved.  
     
    -  Last Published: 
    2018-01-27
    +  Last Published: 
    2018-01-28
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aa7ffc92/hbase-annotations/dependency-management.html
    --
    diff --git a/hbase-annotations/dependency-management.html 
    b/hbase-annotations/dependency-management.html
    index a13777d..3db18fb 100644
    --- a/hbase-annotations/dependency-management.html
    +++ b/hbase-annotations/dependency-management.html
    @@ -7,7 +7,7 @@
       
     
     
    -
    +
     
     Apache HBase - Annotations  Project Dependency 
    Management
     
    @@ -810,7 +810,7 @@
     https://www.apache.org/;>The Apache Software 
    Foundation.
     All rights reserved.  
     
    -  Last Published: 
    2018-01-27
    +  Last Published: 
    2018-01-28
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aa7ffc92/hbase-annotations/index.html
    --
    diff --git a/hbase-annotations/index.html b/hbase-annotations/index.html
    index f816f0f..c88bab2 100644
    --- 

    [02/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/96e5e102/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.HeartbeatRPCServices.html
    --
    diff --git 
    a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.HeartbeatRPCServices.html
     
    b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.HeartbeatRPCServices.html
    index 281c243..1a84ee1 100644
    --- 
    a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.HeartbeatRPCServices.html
    +++ 
    b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.HeartbeatRPCServices.html
    @@ -152,433 +152,461 @@
     144
     145  /**
     146   * Make puts to put the input value 
    into each combination of row, family, and qualifier
    -147   * @param rows
    -148   * @param families
    -149   * @param qualifiers
    -150   * @param value
    -151   * @return
    -152   * @throws IOException
    -153   */
    -154  static ArrayListPut 
    createPuts(byte[][] rows, byte[][] families, byte[][] qualifiers,
    -155  byte[] value) throws IOException 
    {
    -156Put put;
    -157ArrayListPut puts = new 
    ArrayList();
    -158
    -159for (int row = 0; row  
    rows.length; row++) {
    -160  put = new Put(rows[row]);
    -161  for (int fam = 0; fam  
    families.length; fam++) {
    -162for (int qual = 0; qual  
    qualifiers.length; qual++) {
    -163  KeyValue kv = new 
    KeyValue(rows[row], families[fam], qualifiers[qual], qual, value);
    -164  put.add(kv);
    -165}
    -166  }
    -167  puts.add(put);
    -168}
    -169
    -170return puts;
    -171  }
    -172
    -173  @AfterClass
    -174  public static void tearDownAfterClass() 
    throws Exception {
    -175TEST_UTIL.shutdownMiniCluster();
    -176  }
    -177
    -178  @Before
    -179  public void setupBeforeTest() throws 
    Exception {
    -180disableSleeping();
    -181  }
    -182
    -183  @After
    -184  public void teardownAfterTest() throws 
    Exception {
    -185disableSleeping();
    -186  }
    -187
    -188  /**
    -189   * Run the test callable when 
    heartbeats are enabled/disabled. We expect all tests to only pass
    -190   * when heartbeat messages are enabled 
    (otherwise the test is pointless). When heartbeats are
    -191   * disabled, the test should throw an 
    exception.
    -192   * @param testCallable
    -193   * @throws InterruptedException
    -194   */
    -195  private void 
    testImportanceOfHeartbeats(CallableVoid testCallable) throws 
    InterruptedException {
    -196
    HeartbeatRPCServices.heartbeatsEnabled = true;
    -197
    +147   */
    +148  static ArrayListPut 
    createPuts(byte[][] rows, byte[][] families, byte[][] qualifiers,
    +149  byte[] value) throws IOException 
    {
    +150Put put;
    +151ArrayListPut puts = new 
    ArrayList();
    +152
    +153for (int row = 0; row  
    rows.length; row++) {
    +154  put = new Put(rows[row]);
    +155  for (int fam = 0; fam  
    families.length; fam++) {
    +156for (int qual = 0; qual  
    qualifiers.length; qual++) {
    +157  KeyValue kv = new 
    KeyValue(rows[row], families[fam], qualifiers[qual], qual, value);
    +158  put.add(kv);
    +159}
    +160  }
    +161  puts.add(put);
    +162}
    +163
    +164return puts;
    +165  }
    +166
    +167  @AfterClass
    +168  public static void tearDownAfterClass() 
    throws Exception {
    +169TEST_UTIL.shutdownMiniCluster();
    +170  }
    +171
    +172  @Before
    +173  public void setupBeforeTest() throws 
    Exception {
    +174disableSleeping();
    +175  }
    +176
    +177  @After
    +178  public void teardownAfterTest() throws 
    Exception {
    +179disableSleeping();
    +180  }
    +181
    +182  /**
    +183   * Run the test callable when 
    heartbeats are enabled/disabled. We expect all tests to only pass
    +184   * when heartbeat messages are enabled 
    (otherwise the test is pointless). When heartbeats are
    +185   * disabled, the test should throw an 
    exception.
    +186   */
    +187  private void 
    testImportanceOfHeartbeats(CallableVoid testCallable) throws 
    InterruptedException {
    +188
    HeartbeatRPCServices.heartbeatsEnabled = true;
    +189
    +190try {
    +191  testCallable.call();
    +192} catch (Exception e) {
    +193  fail("Heartbeat messages are 
    enabled, exceptions should NOT be thrown. Exception trace:"
    +194  + 
    ExceptionUtils.getStackTrace(e));
    +195}
    +196
    +197
    HeartbeatRPCServices.heartbeatsEnabled = false;
     198try {
     199  testCallable.call();
     200} catch (Exception e) {
    -201  fail("Heartbeat messages are 
    enabled, exceptions should NOT be thrown. Exception trace:"
    -202  + 
    ExceptionUtils.getStackTrace(e));
    -203}
    -204
    -205
    HeartbeatRPCServices.heartbeatsEnabled = false;
    -206try {
    -207  testCallable.call();
    -208} catch (Exception e) {
    -209  return;
    -210} finally {
    -211  
    HeartbeatRPCServices.heartbeatsEnabled = true;
    -212}
    -213fail("Heartbeats messages are 
    disabled, an exception should be thrown. If an exception "
    -214+ " is not thrown, the test case 
    is not testing the 

    [02/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.html 
    b/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.html
    index a132f94..26c39bc 100644
    --- a/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.html
    +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.html
    @@ -114,7 +114,7 @@ var activeTableTab = "activeTableTab";
     
     
     @InterfaceAudience.Private
    -class MemStoreFlusher
    +class MemStoreFlusher
     extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">Object
     implements FlushRequester
     Thread that flushes cache on request
    @@ -438,7 +438,7 @@ implements 
     
     LOG
    -private static finalorg.slf4j.Logger LOG
    +private static finalorg.slf4j.Logger LOG
     
     
     
    @@ -447,7 +447,7 @@ implements 
     
     conf
    -privateorg.apache.hadoop.conf.Configuration conf
    +privateorg.apache.hadoop.conf.Configuration conf
     
     
     
    @@ -456,7 +456,7 @@ implements 
     
     flushQueue
    -private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/BlockingQueue.html?is-external=true;
     title="class or interface in java.util.concurrent">BlockingQueueMemStoreFlusher.FlushQueueEntry 
    flushQueue
    +private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/BlockingQueue.html?is-external=true;
     title="class or interface in java.util.concurrent">BlockingQueueMemStoreFlusher.FlushQueueEntry 
    flushQueue
     
     
     
    @@ -465,7 +465,7 @@ implements 
     
     regionsInQueue
    -private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
     title="class or interface in java.util">MapRegion,MemStoreFlusher.FlushRegionEntry 
    regionsInQueue
    +private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
     title="class or interface in java.util">MapRegion,MemStoreFlusher.FlushRegionEntry 
    regionsInQueue
     
     
     
    @@ -474,7 +474,7 @@ implements 
     
     wakeupPending
    -privatehttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicBoolean.html?is-external=true;
     title="class or interface in java.util.concurrent.atomic">AtomicBoolean wakeupPending
    +privatehttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicBoolean.html?is-external=true;
     title="class or interface in java.util.concurrent.atomic">AtomicBoolean wakeupPending
     
     
     
    @@ -483,7 +483,7 @@ implements 
     
     threadWakeFrequency
    -private finallong threadWakeFrequency
    +private finallong threadWakeFrequency
     
     
     
    @@ -492,7 +492,7 @@ implements 
     
     server
    -private finalHRegionServer server
    +private finalHRegionServer server
     
     
     
    @@ -501,7 +501,7 @@ implements 
     
     lock
    -private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/locks/ReentrantReadWriteLock.html?is-external=true;
     title="class or interface in 
    java.util.concurrent.locks">ReentrantReadWriteLock lock
    +private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/locks/ReentrantReadWriteLock.html?is-external=true;
     title="class or interface in 
    java.util.concurrent.locks">ReentrantReadWriteLock lock
     
     
     
    @@ -510,7 +510,7 @@ implements 
     
     blockSignal
    -private finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">Object blockSignal
    +private finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">Object blockSignal
     
     
     
    @@ -519,7 +519,7 @@ implements 
     
     blockingWaitTime
    -privatelong blockingWaitTime
    +privatelong blockingWaitTime
     
     
     
    @@ -528,7 +528,7 @@ implements 
     
     updatesBlockedMsHighWater
    -private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/LongAdder.html?is-external=true;
     title="class or interface in java.util.concurrent.atomic">LongAdder updatesBlockedMsHighWater
    +private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/LongAdder.html?is-external=true;
     title="class or interface in java.util.concurrent.atomic">LongAdder updatesBlockedMsHighWater
     
     
     
    @@ -537,7 +537,7 @@ implements 
     
     flushHandlers
    -private finalMemStoreFlusher.FlushHandler[] flushHandlers
    +private finalMemStoreFlusher.FlushHandler[] flushHandlers
     
     
     
    @@ -546,7 +546,7 @@ implements 
     
     flushRequestListeners
    -privatehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListFlushRequestListener flushRequestListeners
    +privatehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListFlushRequestListener flushRequestListeners
     
     
     
    @@ -563,7 +563,7 @@ implements 
     
     MemStoreFlusher
    -publicMemStoreFlusher(org.apache.hadoop.conf.Configurationconf,
    

    [02/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/14db89d7/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.ServerNotYetRunningRsExecutor.html
    --
    diff --git 
    a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.ServerNotYetRunningRsExecutor.html
     
    b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.ServerNotYetRunningRsExecutor.html
    index f1db5ca..d8515d7 100644
    --- 
    a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.ServerNotYetRunningRsExecutor.html
    +++ 
    b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.ServerNotYetRunningRsExecutor.html
    @@ -32,813 +32,820 @@
     024import static org.junit.Assert.fail;
     025
     026import java.io.IOException;
    -027import java.net.SocketTimeoutException;
    -028import java.util.NavigableMap;
    -029import java.util.Random;
    -030import java.util.Set;
    -031import java.util.SortedSet;
    -032import 
    java.util.concurrent.ConcurrentSkipListMap;
    -033import 
    java.util.concurrent.ConcurrentSkipListSet;
    -034import 
    java.util.concurrent.ExecutionException;
    -035import java.util.concurrent.Executors;
    -036import java.util.concurrent.Future;
    -037import 
    java.util.concurrent.ScheduledExecutorService;
    -038import java.util.concurrent.TimeUnit;
    -039
    -040import 
    org.apache.hadoop.conf.Configuration;
    -041import 
    org.apache.hadoop.hbase.CategoryBasedTimeout;
    -042import 
    org.apache.hadoop.hbase.DoNotRetryIOException;
    -043import 
    org.apache.hadoop.hbase.HBaseTestingUtility;
    -044import 
    org.apache.hadoop.hbase.NotServingRegionException;
    -045import 
    org.apache.hadoop.hbase.ServerName;
    -046import 
    org.apache.hadoop.hbase.TableName;
    -047import 
    org.apache.hadoop.hbase.client.RegionInfo;
    -048import 
    org.apache.hadoop.hbase.client.RegionInfoBuilder;
    -049import 
    org.apache.hadoop.hbase.client.RetriesExhaustedException;
    -050import 
    org.apache.hadoop.hbase.exceptions.UnexpectedStateException;
    -051import 
    org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
    -052import 
    org.apache.hadoop.hbase.master.MasterServices;
    -053import 
    org.apache.hadoop.hbase.master.RegionState.State;
    -054import 
    org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants;
    -055import 
    org.apache.hadoop.hbase.master.procedure.ProcedureSyncWait;
    -056import 
    org.apache.hadoop.hbase.master.procedure.RSProcedureDispatcher;
    -057import 
    org.apache.hadoop.hbase.procedure2.Procedure;
    -058import 
    org.apache.hadoop.hbase.procedure2.ProcedureMetrics;
    -059import 
    org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
    -060import 
    org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore;
    -061import 
    org.apache.hadoop.hbase.procedure2.util.StringUtils;
    -062import 
    org.apache.hadoop.hbase.regionserver.RegionServerAbortedException;
    -063import 
    org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
    -064import 
    org.apache.hadoop.hbase.testclassification.MasterTests;
    -065import 
    org.apache.hadoop.hbase.testclassification.MediumTests;
    -066import 
    org.apache.hadoop.hbase.util.Bytes;
    -067import 
    org.apache.hadoop.hbase.util.FSUtils;
    -068import 
    org.apache.hadoop.ipc.RemoteException;
    -069import org.junit.After;
    -070import org.junit.Before;
    -071import org.junit.Ignore;
    -072import org.junit.Rule;
    -073import org.junit.Test;
    -074import 
    org.junit.experimental.categories.Category;
    -075import 
    org.junit.rules.ExpectedException;
    -076import org.junit.rules.TestName;
    -077import org.junit.rules.TestRule;
    -078import org.slf4j.Logger;
    -079import org.slf4j.LoggerFactory;
    -080import 
    org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
    -081import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
    -082import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
    -083import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest;
    -084import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse;
    -085import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest;
    -086import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo;
    -087import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse;
    -088import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse.RegionOpeningState;
    -089import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition;
    -090import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
    -091import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest;
    -092
    -093@Category({MasterTests.class, 
    MediumTests.class})
    -094public class TestAssignmentManager {
    -095  private static 

    [02/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestConnectionImplementation.html
    --
    diff --git 
    a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestConnectionImplementation.html
     
    b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestConnectionImplementation.html
    index 3068847..a1ce1bf 100644
    --- 
    a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestConnectionImplementation.html
    +++ 
    b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestConnectionImplementation.html
    @@ -23,83 +23,1033 @@
     015 * See the License for the specific 
    language governing permissions and
     016 * limitations under the License.
     017 */
    -018
    -019package org.apache.hadoop.hbase.client;
    -020
    -021import static org.junit.Assert.fail;
    -022
    -023import 
    org.apache.hadoop.hbase.HBaseTestingUtility;
    -024import 
    org.apache.hadoop.hbase.HConstants;
    -025import 
    org.apache.hadoop.hbase.ServerName;
    -026import 
    org.apache.hadoop.hbase.testclassification.ClientTests;
    -027import 
    org.apache.hadoop.hbase.testclassification.MediumTests;
    -028import org.junit.AfterClass;
    -029import org.junit.BeforeClass;
    -030import org.junit.Test;
    -031import 
    org.junit.experimental.categories.Category;
    -032
    -033import java.net.UnknownHostException;
    -034
    -035/**
    -036 * Tests that we fail fast when hostname 
    resolution is not working and do not cache
    -037 * unresolved InetSocketAddresses.
    -038 */
    -039@Category({MediumTests.class, 
    ClientTests.class})
    -040public class TestConnectionImplementation 
    {
    -041  private static HBaseTestingUtility 
    testUtil;
    -042  private static ConnectionImplementation 
    conn;
    -043
    -044  @BeforeClass
    -045  public static void setupBeforeClass() 
    throws Exception {
    -046testUtil = 
    HBaseTestingUtility.createLocalHTU();
    -047testUtil.startMiniCluster();
    -048conn = (ConnectionImplementation) 
    testUtil.getConnection();
    -049  }
    -050
    -051  @AfterClass
    -052  public static void teardownAfterClass() 
    throws Exception {
    -053conn.close();
    -054testUtil.shutdownMiniCluster();
    -055  }
    -056
    -057  @Test(expected = 
    UnknownHostException.class)
    -058  public void testGetAdminBadHostname() 
    throws Exception {
    -059// verify that we can get an instance 
    with the cluster hostname
    -060ServerName master = 
    testUtil.getHBaseCluster().getMaster().getServerName();
    -061try {
    -062  conn.getAdmin(master);
    -063} catch (UnknownHostException uhe) 
    {
    -064  fail("Obtaining admin to the 
    cluster master should have succeeded");
    -065}
    -066
    -067// test that we fail to get a client 
    to an unresolvable hostname, which
    -068// means it won't be cached
    -069ServerName badHost =
    -070
    ServerName.valueOf("unknownhost.invalid:" + HConstants.DEFAULT_MASTER_PORT,
    -071System.currentTimeMillis());
    -072conn.getAdmin(badHost);
    -073fail("Obtaining admin to unresolvable 
    hostname should have failed");
    -074  }
    -075
    -076  @Test(expected = 
    UnknownHostException.class)
    -077  public void testGetClientBadHostname() 
    throws Exception {
    -078// verify that we can get an instance 
    with the cluster hostname
    -079ServerName rs = 
    testUtil.getHBaseCluster().getRegionServer(0).getServerName();
    -080try {
    -081  conn.getClient(rs);
    -082} catch (UnknownHostException uhe) 
    {
    -083  fail("Obtaining client to the 
    cluster regionserver should have succeeded");
    -084}
    -085
    -086// test that we fail to get a client 
    to an unresolvable hostname, which
    -087// means it won't be cached
    -088ServerName badHost =
    -089
    ServerName.valueOf("unknownhost.invalid:" + 
    HConstants.DEFAULT_REGIONSERVER_PORT,
    -090System.currentTimeMillis());
    -091conn.getAdmin(badHost);
    -092fail("Obtaining client to 
    unresolvable hostname should have failed");
    -093  }
    -094}
    +018package org.apache.hadoop.hbase.client;
    +019
    +020import static 
    org.junit.Assert.assertEquals;
    +021import static 
    org.junit.Assert.assertFalse;
    +022import static 
    org.junit.Assert.assertNotNull;
    +023import static 
    org.junit.Assert.assertNull;
    +024import static 
    org.junit.Assert.assertTrue;
    +025
    +026import java.io.IOException;
    +027import java.lang.reflect.Field;
    +028import java.lang.reflect.Modifier;
    +029import java.net.SocketTimeoutException;
    +030import java.util.ArrayList;
    +031import java.util.List;
    +032import 
    java.util.concurrent.ExecutorService;
    +033import 
    java.util.concurrent.SynchronousQueue;
    +034import 
    java.util.concurrent.ThreadLocalRandom;
    +035import 
    java.util.concurrent.ThreadPoolExecutor;
    +036import java.util.concurrent.TimeUnit;
    +037import 
    java.util.concurrent.atomic.AtomicBoolean;
    +038import 
    java.util.concurrent.atomic.AtomicInteger;
    +039import 
    java.util.concurrent.atomic.AtomicReference;
    +040import 
    org.apache.hadoop.conf.Configuration;
    +041import 
    org.apache.hadoop.hbase.CategoryBasedTimeout;
    +042import org.apache.hadoop.hbase.Cell;
    +043import 
    

    [02/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/testdevapidocs/org/apache/hadoop/hbase/wal/TestWALSplit.html
    --
    diff --git a/testdevapidocs/org/apache/hadoop/hbase/wal/TestWALSplit.html 
    b/testdevapidocs/org/apache/hadoop/hbase/wal/TestWALSplit.html
    index 75f9a0f..e18b27b 100644
    --- a/testdevapidocs/org/apache/hadoop/hbase/wal/TestWALSplit.html
    +++ b/testdevapidocs/org/apache/hadoop/hbase/wal/TestWALSplit.html
    @@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
     
     
     
    -public class TestWALSplit
    +public class TestWALSplit
     extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">Object
     Testing WAL splitting code.
     
    @@ -293,8 +293,8 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     private static void
    -appendCompactionEvent(org.apache.hadoop.hbase.wal.WALProvider.Writerw,
    - org.apache.hadoop.hbase.HRegionInfohri,
    +appendCompactionEvent(org.apache.hadoop.hbase.wal.WALProvider.Writerw,
    + org.apache.hadoop.hbase.client.RegionInfohri,
      http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String[]inputs,
      http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in 
    java.lang">Stringoutput)
     
    @@ -601,7 +601,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     LOG
    -private static finalorg.slf4j.Logger LOG
    +private static finalorg.slf4j.Logger LOG
     
     
     
    @@ -610,7 +610,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     conf
    -private staticorg.apache.hadoop.conf.Configuration conf
    +private staticorg.apache.hadoop.conf.Configuration conf
     
     
     
    @@ -619,7 +619,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     fs
    -privateorg.apache.hadoop.fs.FileSystem fs
    +privateorg.apache.hadoop.fs.FileSystem fs
     
     
     
    @@ -628,7 +628,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     TEST_UTIL
    -protected static finalHBaseTestingUtility TEST_UTIL
    +protected static finalHBaseTestingUtility TEST_UTIL
     
     
     
    @@ -637,7 +637,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     HBASEDIR
    -privateorg.apache.hadoop.fs.Path HBASEDIR
    +privateorg.apache.hadoop.fs.Path HBASEDIR
     
     
     
    @@ -646,7 +646,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     HBASELOGDIR
    -privateorg.apache.hadoop.fs.Path HBASELOGDIR
    +privateorg.apache.hadoop.fs.Path HBASELOGDIR
     
     
     
    @@ -655,7 +655,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     WALDIR
    -privateorg.apache.hadoop.fs.Path WALDIR
    +privateorg.apache.hadoop.fs.Path WALDIR
     
     
     
    @@ -664,7 +664,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     OLDLOGDIR
    -privateorg.apache.hadoop.fs.Path OLDLOGDIR
    +privateorg.apache.hadoop.fs.Path OLDLOGDIR
     
     
     
    @@ -673,7 +673,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     CORRUPTDIR
    -privateorg.apache.hadoop.fs.Path CORRUPTDIR
    +privateorg.apache.hadoop.fs.Path CORRUPTDIR
     
     
     
    @@ -682,7 +682,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     TABLEDIR
    -privateorg.apache.hadoop.fs.Path TABLEDIR
    +privateorg.apache.hadoop.fs.Path TABLEDIR
     
     
     
    @@ -691,7 +691,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     NUM_WRITERS
    -private static finalint NUM_WRITERS
    +private static finalint NUM_WRITERS
     
     See Also:
     Constant
     Field Values
    @@ -704,7 +704,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     ENTRIES
    -private static finalint ENTRIES
    +private static finalint ENTRIES
     
     See Also:
     Constant
     Field Values
    @@ -717,7 +717,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     FILENAME_BEING_SPLIT
    -private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String FILENAME_BEING_SPLIT
    +private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String FILENAME_BEING_SPLIT
     
     See Also:
     Constant
     Field Values
    @@ -730,7 +730,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     TABLE_NAME
    -private static finalorg.apache.hadoop.hbase.TableName TABLE_NAME
    +private static finalorg.apache.hadoop.hbase.TableName TABLE_NAME
     
     
     
    @@ -739,7 +739,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     FAMILY
    -private static finalbyte[] FAMILY
    +private static finalbyte[] FAMILY
     
     
     
    @@ -748,7 +748,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     QUALIFIER
    -private static 

    [02/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/49431b18/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.html
     
    b/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.html
    index 113c94a..808fdf0 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.html
    @@ -118,7 +118,7 @@ var activeTableTab = "activeTableTab";
     
     
     
    -public class RSProcedureDispatcher
    +public class RSProcedureDispatcher
     extends RemoteProcedureDispatcherMasterProcedureEnv,ServerName
     implements ServerListener
     A remote procecdure dispatcher for regionservers.
    @@ -187,6 +187,10 @@ implements private static interface
     RSProcedureDispatcher.RemoteProcedureResolver
     
    +
    +static class
    +RSProcedureDispatcher.ServerOperation
    +
     
     
     
    @@ -307,7 +311,7 @@ implements 
     void
     splitAndResolveOperation(ServerNameserverName,
    -http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
     title="class or interface in java.util">SetRemoteProcedureDispatcher.RemoteProcedureremoteProcedures,
    +http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
     title="class or interface in java.util">SetRemoteProcedureDispatcher.RemoteProcedureoperations,
     RSProcedureDispatcher.RemoteProcedureResolverresolver)
     Fetches RemoteProcedureDispatcher.RemoteOperations
      from the given remoteProcedures and groups them by class of the 
    returned operation.
    @@ -363,7 +367,7 @@ implements 
     
     LOG
    -private static finalorg.slf4j.Logger LOG
    +private static finalorg.slf4j.Logger LOG
     
     
     
    @@ -372,7 +376,7 @@ implements 
     
     RS_RPC_STARTUP_WAIT_TIME_CONF_KEY
    -public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String RS_RPC_STARTUP_WAIT_TIME_CONF_KEY
    +public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String RS_RPC_STARTUP_WAIT_TIME_CONF_KEY
     
     See Also:
     Constant
     Field Values
    @@ -385,7 +389,7 @@ implements 
     
     DEFAULT_RS_RPC_STARTUP_WAIT_TIME
    -private static finalint DEFAULT_RS_RPC_STARTUP_WAIT_TIME
    +private static finalint DEFAULT_RS_RPC_STARTUP_WAIT_TIME
     
     See Also:
     Constant
     Field Values
    @@ -398,7 +402,7 @@ implements 
     
     RS_VERSION_WITH_EXEC_PROCS
    -private static finalint RS_VERSION_WITH_EXEC_PROCS
    +private static finalint RS_VERSION_WITH_EXEC_PROCS
     
     See Also:
     Constant
     Field Values
    @@ -411,7 +415,7 @@ implements 
     
     master
    -protected finalMasterServices master
    +protected finalMasterServices master
     
     
     
    @@ -420,7 +424,7 @@ implements 
     
     rsStartupWaitTime
    -private finallong rsStartupWaitTime
    +private finallong rsStartupWaitTime
     
     
     
    @@ -429,7 +433,7 @@ implements 
     
     procedureEnv
    -privateMasterProcedureEnv procedureEnv
    +privateMasterProcedureEnv procedureEnv
     
     
     
    @@ -446,7 +450,7 @@ implements 
     
     RSProcedureDispatcher
    -publicRSProcedureDispatcher(MasterServicesmaster)
    +publicRSProcedureDispatcher(MasterServicesmaster)
     
     
     
    @@ -463,7 +467,7 @@ implements 
     
     start
    -publicbooleanstart()
    +publicbooleanstart()
     
     Overrides:
     startin
     classRemoteProcedureDispatcherMasterProcedureEnv,ServerName
    @@ -476,7 +480,7 @@ implements 
     
     stop
    -publicbooleanstop()
    +publicbooleanstop()
     
     Overrides:
     stopin
     classRemoteProcedureDispatcherMasterProcedureEnv,ServerName
    @@ -489,7 +493,7 @@ implements 
     
     remoteDispatch
    -protectedvoidremoteDispatch(ServerNameserverName,
    +protectedvoidremoteDispatch(ServerNameserverName,
       http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
     title="class or interface in java.util">SetRemoteProcedureDispatcher.RemoteProcedureremoteProcedures)
     
     Specified by:
    @@ -503,7 +507,7 @@ implements 
     
     abortPendingOperations
    -protectedvoidabortPendingOperations(ServerNameserverName,
    +protectedvoidabortPendingOperations(ServerNameserverName,
       http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
     title="class or interface in java.util">SetRemoteProcedureDispatcher.RemoteProcedureoperations)
     
     Specified by:
    @@ -517,7 +521,7 @@ implements 
     
     serverAdded
    -publicvoidserverAdded(ServerNameserverName)
    +publicvoidserverAdded(ServerNameserverName)
     Description copied from 
    interface:ServerListener
     The server has joined the cluster.
     
    @@ -534,7 +538,7 @@ implements 
     
     serverRemoved
    -publicvoidserverRemoved(ServerNameserverName)
    +publicvoidserverRemoved(ServerNameserverName)
     Description copied from 
    interface:ServerListener
     The server was removed from the cluster.
     
    @@ -551,8 +555,8 @@ implements 
     
     splitAndResolveOperation
    

    [02/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7c40c62/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html 
    b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html
    index 6cdb45b..d7f94ed 100644
    --- a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html
    +++ b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html
    @@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
     
     
     
    -static class HBaseFsck.WorkItemRegion
    +static class HBaseFsck.WorkItemRegion
     extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">Object
     implements http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Callable.html?is-external=true;
     title="class or interface in java.util.concurrent">Callablehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
     title="class or interface in java.lang">Void
     Contact a region server and get all information from 
    it
    @@ -226,7 +226,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/util/concurren
     
     
     hbck
    -private finalHBaseFsck hbck
    +private finalHBaseFsck hbck
     
     
     
    @@ -235,7 +235,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/util/concurren
     
     
     rsinfo
    -private finalServerName rsinfo
    +private finalServerName rsinfo
     
     
     
    @@ -244,7 +244,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/util/concurren
     
     
     errors
    -private finalHBaseFsck.ErrorReporter 
    errors
    +private finalHBaseFsck.ErrorReporter 
    errors
     
     
     
    @@ -253,7 +253,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/util/concurren
     
     
     connection
    -private finalClusterConnection connection
    +private finalClusterConnection connection
     
     
     
    @@ -270,7 +270,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/util/concurren
     
     
     WorkItemRegion
    -WorkItemRegion(HBaseFsckhbck,
    +WorkItemRegion(HBaseFsckhbck,
    ServerNameinfo,
    HBaseFsck.ErrorReportererrors,
    ClusterConnectionconnection)
    @@ -290,7 +290,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/util/concurren
     
     
     call
    -publichttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
     title="class or interface in java.lang">Voidcall()
    +publichttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
     title="class or interface in java.lang">Voidcall()
       throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException
     
     Specified by:
    @@ -306,7 +306,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/util/concurren
     
     
     filterRegions
    -privatehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListRegionInfofilterRegions(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListRegionInforegions)
    +privatehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListRegionInfofilterRegions(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListRegionInforegions)
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7c40c62/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.html 
    b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.html
    index 554de30..c535257 100644
    --- a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.html
    +++ b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.html
    @@ -545,7 +545,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.h
     startMillis
     
     
    -private ClusterStatus
    +private ClusterMetrics
     status
     
     
    @@ -1577,7 +1577,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.h
     
     
     status
    -privateClusterStatus status
    +privateClusterMetrics status
     
     
     
    @@ -2068,7 +2068,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.h
     
     
     cmp
    -static finalhttp://docs.oracle.com/javase/8/docs/api/java/util/Comparator.html?is-external=true;
     title="class or interface in java.util">ComparatorHBaseFsck.HbckInfo cmp
    +static finalhttp://docs.oracle.com/javase/8/docs/api/java/util/Comparator.html?is-external=true;
     title="class or interface in java.util">ComparatorHBaseFsck.HbckInfo cmp
     
     
     
    @@ -3157,7 +3157,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.h
     
     
     checkIntegrity
    -http://docs.oracle.com/javase/8/docs/api/java/util/SortedMap.html?is-external=true;
     title="class or interface in 

    [02/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bb398572/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.html 
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.html
    index 1318b95..841130a 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.html
    @@ -55,1647 +55,1615 @@
     047import 
    org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
     048import 
    org.apache.hadoop.hbase.coprocessor.MasterObserver;
     049import 
    org.apache.hadoop.hbase.coprocessor.MetricsCoprocessor;
    -050import 
    org.apache.hadoop.hbase.coprocessor.ObserverContext;
    -051import 
    org.apache.hadoop.hbase.master.locking.LockProcedure;
    -052import 
    org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
    -053import 
    org.apache.hadoop.hbase.metrics.MetricRegistry;
    -054import 
    org.apache.hadoop.hbase.net.Address;
    -055import 
    org.apache.hadoop.hbase.procedure2.LockType;
    -056import 
    org.apache.hadoop.hbase.procedure2.LockedResource;
    -057import 
    org.apache.hadoop.hbase.procedure2.Procedure;
    -058import 
    org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
    -059import 
    org.apache.hadoop.hbase.quotas.GlobalQuotaSettings;
    -060import 
    org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
    -061import 
    org.apache.hadoop.hbase.security.User;
    -062import 
    org.apache.yetus.audience.InterfaceAudience;
    -063import org.slf4j.Logger;
    -064import org.slf4j.LoggerFactory;
    -065
    -066/**
    -067 * Provides the coprocessor framework and 
    environment for master oriented
    -068 * operations.  {@link HMaster} interacts 
    with the loaded coprocessors
    -069 * through this class.
    -070 */
    -071@InterfaceAudience.Private
    -072public class MasterCoprocessorHost
    -073extends 
    CoprocessorHostMasterCoprocessor, MasterCoprocessorEnvironment {
    -074
    -075  private static final Logger LOG = 
    LoggerFactory.getLogger(MasterCoprocessorHost.class);
    -076
    -077  /**
    -078   * Coprocessor environment extension 
    providing access to master related
    -079   * services.
    -080   */
    -081  private static class MasterEnvironment 
    extends BaseEnvironmentMasterCoprocessor
    -082  implements 
    MasterCoprocessorEnvironment {
    -083private final boolean 
    supportGroupCPs;
    -084private final MetricRegistry 
    metricRegistry;
    -085private final MasterServices 
    services;
    -086
    -087public MasterEnvironment(final 
    MasterCoprocessor impl, final int priority, final int seq,
    -088final Configuration conf, final 
    MasterServices services) {
    -089  super(impl, priority, seq, conf);
    -090  this.services = services;
    -091  supportGroupCPs = 
    !useLegacyMethod(impl.getClass(),
    -092  "preBalanceRSGroup", 
    ObserverContext.class, String.class);
    -093  this.metricRegistry =
    -094  
    MetricsCoprocessor.createRegistryForMasterCoprocessor(impl.getClass().getName());
    -095}
    -096
    -097@Override
    -098public ServerName getServerName() {
    -099  return 
    this.services.getServerName();
    -100}
    -101
    -102@Override
    -103public Connection getConnection() {
    -104  return new 
    SharedConnection(this.services.getConnection());
    -105}
    -106
    -107@Override
    -108public Connection 
    createConnection(Configuration conf) throws IOException {
    -109  return 
    this.services.createConnection(conf);
    -110}
    -111
    -112@Override
    -113public MetricRegistry 
    getMetricRegistryForMaster() {
    -114  return metricRegistry;
    -115}
    -116
    -117@Override
    -118public void shutdown() {
    -119  super.shutdown();
    -120  
    MetricsCoprocessor.removeRegistry(this.metricRegistry);
    -121}
    -122  }
    -123
    -124  /**
    -125   * Special version of MasterEnvironment 
    that exposes MasterServices for Core Coprocessors only.
    -126   * Temporary hack until Core 
    Coprocessors are integrated into Core.
    -127   */
    -128  private static class 
    MasterEnvironmentForCoreCoprocessors extends MasterEnvironment
    -129  implements HasMasterServices {
    -130private final MasterServices 
    masterServices;
    -131
    -132public 
    MasterEnvironmentForCoreCoprocessors(final MasterCoprocessor impl, final int 
    priority,
    -133final int seq, final 
    Configuration conf, final MasterServices services) {
    -134  super(impl, priority, seq, conf, 
    services);
    -135  this.masterServices = services;
    -136}
    -137
    -138/**
    -139 * @return An instance of 
    MasterServices, an object NOT for general user-space Coprocessor
    -140 * consumption.
    -141 */
    -142public MasterServices 
    getMasterServices() {
    -143  return this.masterServices;
    -144}
    -145  }
    -146
    -147  private MasterServices 
    masterServices;
    -148
    -149  public MasterCoprocessorHost(final 
    MasterServices services, final Configuration conf) {
    -150super(services);
    -151this.conf = conf;
    -152

    [02/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/master/balancer/MetricsStochasticBalancerSource.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/master/balancer/MetricsStochasticBalancerSource.html
     
    b/devapidocs/org/apache/hadoop/hbase/master/balancer/MetricsStochasticBalancerSource.html
    index 34052c0..22fc187 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/master/balancer/MetricsStochasticBalancerSource.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/master/balancer/MetricsStochasticBalancerSource.html
    @@ -291,6 +291,6 @@ extends Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
    reserved.
    +Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
    reserved.
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/master/balancer/MetricsStochasticBalancerSourceImpl.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/master/balancer/MetricsStochasticBalancerSourceImpl.html
     
    b/devapidocs/org/apache/hadoop/hbase/master/balancer/MetricsStochasticBalancerSourceImpl.html
    index 5df4ddf..0d02b33 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/master/balancer/MetricsStochasticBalancerSourceImpl.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/master/balancer/MetricsStochasticBalancerSourceImpl.html
    @@ -512,6 +512,6 @@ implements Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
    reserved.
    +Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
    reserved.
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/master/balancer/RegionInfoComparator.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/master/balancer/RegionInfoComparator.html 
    b/devapidocs/org/apache/hadoop/hbase/master/balancer/RegionInfoComparator.html
    index d8bd757..fbfb495 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/master/balancer/RegionInfoComparator.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/master/balancer/RegionInfoComparator.html
    @@ -289,6 +289,6 @@ implements http://docs.oracle.com/javase/8/docs/api/java/util/Comparato
     
     
     
    -Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
    reserved.
    +Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
    reserved.
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.html 
    b/devapidocs/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.html
    index b75289b..1dcd59c 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.html
    @@ -669,6 +669,6 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     
    -Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
    reserved.
    +Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
    reserved.
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/master/balancer/ServerAndLoad.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/master/balancer/ServerAndLoad.html 
    b/devapidocs/org/apache/hadoop/hbase/master/balancer/ServerAndLoad.html
    index db19c36..e26d48f 100644
    --- a/devapidocs/org/apache/hadoop/hbase/master/balancer/ServerAndLoad.html
    +++ b/devapidocs/org/apache/hadoop/hbase/master/balancer/ServerAndLoad.html
    @@ -406,6 +406,6 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
     
     
     
    -Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
    reserved.
    +Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
    reserved.
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.BalanceInfo.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.BalanceInfo.html
     
    b/devapidocs/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.BalanceInfo.html
    index f745355..1526e98 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.BalanceInfo.html
    +++ 
    

    [02/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.MyThreadPoolExecutor.html
    --
    diff --git 
    a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.MyThreadPoolExecutor.html
     
    b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.MyThreadPoolExecutor.html
    index bbd91b8..4f76302 100644
    --- 
    a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.MyThreadPoolExecutor.html
    +++ 
    b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.MyThreadPoolExecutor.html
    @@ -56,1641 +56,1753 @@
     048import 
    java.util.concurrent.atomic.AtomicBoolean;
     049import 
    java.util.concurrent.atomic.AtomicInteger;
     050import 
    java.util.concurrent.atomic.AtomicLong;
    -051
    -052import 
    org.apache.hadoop.conf.Configuration;
    -053import 
    org.apache.hadoop.hbase.CallQueueTooBigException;
    -054import 
    org.apache.hadoop.hbase.CategoryBasedTimeout;
    -055import org.apache.hadoop.hbase.Cell;
    -056import 
    org.apache.hadoop.hbase.HConstants;
    -057import 
    org.apache.hadoop.hbase.HRegionInfo;
    -058import 
    org.apache.hadoop.hbase.HRegionLocation;
    -059import 
    org.apache.hadoop.hbase.RegionLocations;
    -060import 
    org.apache.hadoop.hbase.ServerName;
    -061import 
    org.apache.hadoop.hbase.TableName;
    -062import 
    org.apache.hadoop.hbase.client.AsyncProcessTask.ListRowAccess;
    -063import 
    org.apache.hadoop.hbase.client.AsyncProcessTask.SubmittedRows;
    -064import 
    org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicy;
    -065import 
    org.apache.hadoop.hbase.client.backoff.ServerStatistics;
    -066import 
    org.apache.hadoop.hbase.client.coprocessor.Batch;
    -067import 
    org.apache.hadoop.hbase.ipc.RpcControllerFactory;
    -068import 
    org.apache.hadoop.hbase.testclassification.ClientTests;
    -069import 
    org.apache.hadoop.hbase.testclassification.MediumTests;
    -070import 
    org.apache.hadoop.hbase.util.Bytes;
    -071import 
    org.apache.hadoop.hbase.util.Threads;
    -072import org.junit.Assert;
    -073import org.junit.BeforeClass;
    -074import org.junit.Ignore;
    -075import org.junit.Rule;
    -076import org.junit.Test;
    -077import 
    org.junit.experimental.categories.Category;
    -078import org.junit.rules.TestRule;
    -079import org.mockito.Mockito;
    -080import org.slf4j.Logger;
    -081import org.slf4j.LoggerFactory;
    -082
    -083@Category({ClientTests.class, 
    MediumTests.class})
    -084public class TestAsyncProcess {
    -085  @Rule public final TestRule timeout = 
    CategoryBasedTimeout.builder().withTimeout(this.getClass()).
    -086  
    withLookingForStuckThread(true).build();
    -087  private static final Logger LOG = 
    LoggerFactory.getLogger(TestAsyncProcess.class);
    -088  private static final TableName 
    DUMMY_TABLE =
    -089  TableName.valueOf("DUMMY_TABLE");
    -090  private static final byte[] 
    DUMMY_BYTES_1 = Bytes.toBytes("DUMMY_BYTES_1");
    -091  private static final byte[] 
    DUMMY_BYTES_2 = Bytes.toBytes("DUMMY_BYTES_2");
    -092  private static final byte[] 
    DUMMY_BYTES_3 = Bytes.toBytes("DUMMY_BYTES_3");
    -093  private static final byte[] FAILS = 
    Bytes.toBytes("FAILS");
    -094  private static final Configuration CONF 
    = new Configuration();
    -095  private static final 
    ConnectionConfiguration CONNECTION_CONFIG =
    -096  new 
    ConnectionConfiguration(CONF);
    -097  private static final ServerName sn = 
    ServerName.valueOf("s1,1,1");
    -098  private static final ServerName sn2 = 
    ServerName.valueOf("s2,2,2");
    -099  private static final ServerName sn3 = 
    ServerName.valueOf("s3,3,3");
    -100  private static final HRegionInfo hri1 
    =
    -101  new HRegionInfo(DUMMY_TABLE, 
    DUMMY_BYTES_1, DUMMY_BYTES_2, false, 1);
    -102  private static final HRegionInfo hri2 
    =
    -103  new HRegionInfo(DUMMY_TABLE, 
    DUMMY_BYTES_2, HConstants.EMPTY_END_ROW, false, 2);
    -104  private static final HRegionInfo hri3 
    =
    -105  new HRegionInfo(DUMMY_TABLE, 
    DUMMY_BYTES_3, HConstants.EMPTY_END_ROW, false, 3);
    -106  private static final HRegionLocation 
    loc1 = new HRegionLocation(hri1, sn);
    -107  private static final HRegionLocation 
    loc2 = new HRegionLocation(hri2, sn);
    -108  private static final HRegionLocation 
    loc3 = new HRegionLocation(hri3, sn2);
    -109
    -110  // Replica stuff
    -111  private static final RegionInfo hri1r1 
    = RegionReplicaUtil.getRegionInfoForReplica(hri1, 1);
    -112  private static final RegionInfo hri1r2 
    = RegionReplicaUtil.getRegionInfoForReplica(hri1, 2);
    -113  private static final RegionInfo hri2r1 
    = RegionReplicaUtil.getRegionInfoForReplica(hri2, 1);
    -114  private static final RegionLocations 
    hrls1 = new RegionLocations(new HRegionLocation(hri1, sn),
    -115  new HRegionLocation(hri1r1, sn2), 
    new HRegionLocation(hri1r2, sn3));
    -116  private static final RegionLocations 
    hrls2 = new RegionLocations(new HRegionLocation(hri2, sn2),
    -117  new HRegionLocation(hri2r1, 
    sn3));
    -118  private static final RegionLocations 
    hrls3 =
    -119  new RegionLocations(new 
    HRegionLocation(hri3, sn3), null);
    -120
    -121  private 

    [02/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/63d6f712/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HStoreFile.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HStoreFile.html 
    b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HStoreFile.html
    index 02ff7cb..fa07e37 100644
    --- a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HStoreFile.html
    +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HStoreFile.html
    @@ -400,17 +400,17 @@
     
     
     
    -private 
    org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableListHStoreFile
    +private 
    org.apache.hbase.thirdparty.com.google.common.collect.ImmutableListHStoreFile
     StripeStoreFileManager.State.allCompactedFilesCached
     
     
    -org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableListHStoreFile
    +org.apache.hbase.thirdparty.com.google.common.collect.ImmutableListHStoreFile
     StripeStoreFileManager.State.allFilesCached
     Cached list of all files in the structure, to return from 
    some calls
     
     
     
    -private 
    org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableListHStoreFile
    +private 
    org.apache.hbase.thirdparty.com.google.common.collect.ImmutableListHStoreFile
     DefaultStoreFileManager.compactedfiles
     List of compacted files inside this store that needs to be 
    excluded in reads
      because further new reads will be using only the newly created files out of 
    compaction.
    @@ -439,7 +439,7 @@
     StripeStoreFileManager.CompactionOrFlushMergeCopy.l0Results
     
     
    -org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableListHStoreFile
    +org.apache.hbase.thirdparty.com.google.common.collect.ImmutableListHStoreFile
     StripeStoreFileManager.State.level0Files
     Level 0.
     
    @@ -469,13 +469,13 @@
     DefaultStoreFileManager.storeFileComparator
     
     
    -private 
    org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableListHStoreFile
    +private 
    org.apache.hbase.thirdparty.com.google.common.collect.ImmutableListHStoreFile
     DefaultStoreFileManager.storefiles
     List of store files inside this store.
     
     
     
    -http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
     title="class or interface in 
    java.util">ArrayListorg.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableListHStoreFile
    +http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
     title="class or interface in 
    java.util">ArrayListorg.apache.hbase.thirdparty.com.google.common.collect.ImmutableListHStoreFile
     StripeStoreFileManager.State.stripeFiles
     Files by stripe.
     
    @@ -531,21 +531,21 @@
     
     
     
    -org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableCollectionHStoreFile
    +org.apache.hbase.thirdparty.com.google.common.collect.ImmutableCollectionHStoreFile
     StripeStoreFileManager.clearCompactedFiles()
     
     
    -org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableCollectionHStoreFile
    +org.apache.hbase.thirdparty.com.google.common.collect.ImmutableCollectionHStoreFile
     DefaultStoreFileManager.clearFiles()
     
     
    -org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableCollectionHStoreFile
    +org.apache.hbase.thirdparty.com.google.common.collect.ImmutableCollectionHStoreFile
     StoreFileManager.clearFiles()
     Clears all the files currently in use and returns 
    them.
     
     
     
    -org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableCollectionHStoreFile
    +org.apache.hbase.thirdparty.com.google.common.collect.ImmutableCollectionHStoreFile
     StripeStoreFileManager.clearFiles()
     
     
    @@ -555,7 +555,7 @@
     
     
     
    -org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableCollectionHStoreFile
    +org.apache.hbase.thirdparty.com.google.common.collect.ImmutableCollectionHStoreFile
     HStore.close()
     Close all the readers We don't need to worry about 
    subsequent requests because the Region holds
      a write lock that will prevent any more reads or writes.
    @@ -582,7 +582,7 @@
     
     
     private http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
     title="class or interface in java.util">CollectionHStoreFile
    -StripeStoreFileManager.findExpiredFiles(org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableListHStoreFilestripe,
    +StripeStoreFileManager.findExpiredFiles(org.apache.hbase.thirdparty.com.google.common.collect.ImmutableListHStoreFilestripe,
     longmaxTs,
     http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListHStoreFilefilesCompacting,
     http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
     title="class or interface in java.util">CollectionHStoreFileexpiredStoreFiles)
    @@ -699,7 +699,7 @@
     StripeStoreFileManager.CompactionOrFlushMergeCopy.getStripeCopy(intindex)
     
     
    -http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
     title="class or interface in 
    

    [02/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d449e87f/devapidocs/org/apache/hadoop/hbase/package-frame.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/package-frame.html 
    b/devapidocs/org/apache/hadoop/hbase/package-frame.html
    index 7120e99..066606f 100644
    --- a/devapidocs/org/apache/hadoop/hbase/package-frame.html
    +++ b/devapidocs/org/apache/hadoop/hbase/package-frame.html
    @@ -48,7 +48,7 @@
     AsyncMetaTableAccessor.MetaTableScanResultConsumer
     AuthUtil
     BaseConfigurable
    -ByteBufferCell
    +ByteBufferExtendedCell
     ByteBufferKeyOnlyKeyValue
     ByteBufferKeyValue
     ByteBufferTag
    @@ -106,22 +106,22 @@
     NoTagsByteBufferKeyValue
     NoTagsKeyValue
     PrivateCellUtil
    -PrivateCellUtil.EmptyByteBufferCell
    +PrivateCellUtil.EmptyByteBufferExtendedCell
     PrivateCellUtil.EmptyCell
    -PrivateCellUtil.FirstOnRowByteBufferCell
    +PrivateCellUtil.FirstOnRowByteBufferExtendedCell
     PrivateCellUtil.FirstOnRowCell
    -PrivateCellUtil.FirstOnRowColByteBufferCell
    +PrivateCellUtil.FirstOnRowColByteBufferExtendedCell
     PrivateCellUtil.FirstOnRowColCell
    -PrivateCellUtil.FirstOnRowColTSByteBufferCell
    +PrivateCellUtil.FirstOnRowColTSByteBufferExtendedCell
     PrivateCellUtil.FirstOnRowColTSCell
     PrivateCellUtil.FirstOnRowDeleteFamilyCell
    -PrivateCellUtil.LastOnRowByteBufferCell
    +PrivateCellUtil.LastOnRowByteBufferExtendedCell
     PrivateCellUtil.LastOnRowCell
    -PrivateCellUtil.LastOnRowColByteBufferCell
    +PrivateCellUtil.LastOnRowColByteBufferExtendedCell
     PrivateCellUtil.LastOnRowColCell
    -PrivateCellUtil.TagRewriteByteBufferCell
    +PrivateCellUtil.TagRewriteByteBufferExtendedCell
     PrivateCellUtil.TagRewriteCell
    -PrivateCellUtil.ValueAndTagRewriteByteBufferCell
    +PrivateCellUtil.ValueAndTagRewriteByteBufferExtendedCell
     PrivateCellUtil.ValueAndTagRewriteCell
     RawCellBuilderFactory
     RegionLoad
    @@ -155,7 +155,7 @@
     
     Enums
     
    -Cell.DataType
    +Cell.Type
     CellBuilderType
     ClusterMetrics.Option
     CompareOperator
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d449e87f/devapidocs/org/apache/hadoop/hbase/package-summary.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/package-summary.html 
    b/devapidocs/org/apache/hadoop/hbase/package-summary.html
    index 2ab894e..24faa7c 100644
    --- a/devapidocs/org/apache/hadoop/hbase/package-summary.html
    +++ b/devapidocs/org/apache/hadoop/hbase/package-summary.html
    @@ -289,7 +289,7 @@
     
     
     
    -ByteBufferCell
    +ByteBufferExtendedCell
     
     This class is a server side extension to the Cell interface.
     
    @@ -305,7 +305,7 @@
     
     ByteBufferKeyValue
     
    -This Cell is an implementation of ByteBufferCell where the data resides 
    in
    +This Cell is an implementation of ByteBufferExtendedCell where the data 
    resides in
      off heap/ on heap ByteBuffer
     
     
    @@ -627,7 +627,7 @@
     
     
     
    -PrivateCellUtil.EmptyByteBufferCell
    +PrivateCellUtil.EmptyByteBufferExtendedCell
     
     These cells are used in reseeks/seeks to improve the read 
    performance.
     
    @@ -639,7 +639,7 @@
     
     
     
    -PrivateCellUtil.FirstOnRowByteBufferCell
    +PrivateCellUtil.FirstOnRowByteBufferExtendedCell
     
     
     
    @@ -647,7 +647,7 @@
     
     
     
    -PrivateCellUtil.FirstOnRowColByteBufferCell
    +PrivateCellUtil.FirstOnRowColByteBufferExtendedCell
     
     
     
    @@ -655,7 +655,7 @@
     
     
     
    -PrivateCellUtil.FirstOnRowColTSByteBufferCell
    +PrivateCellUtil.FirstOnRowColTSByteBufferExtendedCell
     
     
     
    @@ -667,7 +667,7 @@
     
     
     
    -PrivateCellUtil.LastOnRowByteBufferCell
    +PrivateCellUtil.LastOnRowByteBufferExtendedCell
     
     
     
    @@ -675,7 +675,7 @@
     
     
     
    -PrivateCellUtil.LastOnRowColByteBufferCell
    +PrivateCellUtil.LastOnRowColByteBufferExtendedCell
     
     
     
    @@ -683,7 +683,7 @@
     
     
     
    -PrivateCellUtil.TagRewriteByteBufferCell
    +PrivateCellUtil.TagRewriteByteBufferExtendedCell
     
     
     
    @@ -693,7 +693,7 @@
     
     
     
    -PrivateCellUtil.ValueAndTagRewriteByteBufferCell
    +PrivateCellUtil.ValueAndTagRewriteByteBufferExtendedCell
     
     
     
    @@ -868,7 +868,7 @@
     
     
     
    -Cell.DataType
    +Cell.Type
     
     The valid types for user to build the cell.
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d449e87f/devapidocs/org/apache/hadoop/hbase/package-tree.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/package-tree.html 
    b/devapidocs/org/apache/hadoop/hbase/package-tree.html
    index 370110e..2dfb84f 100644
    --- a/devapidocs/org/apache/hadoop/hbase/package-tree.html
    +++ b/devapidocs/org/apache/hadoop/hbase/package-tree.html
    @@ -108,35 +108,35 @@
     org.apache.hadoop.hbase.AsyncMetaTableAccessor.MetaTableScanResultConsumer
     (implements org.apache.hadoop.hbase.client.AdvancedScanResultConsumer)
     org.apache.hadoop.hbase.AuthUtil
     org.apache.hadoop.hbase.BaseConfigurable (implements 
    org.apache.hadoop.conf.Configurable)
    -org.apache.hadoop.hbase.ByteBufferCell 
    (implements org.apache.hadoop.hbase.Cell)
    +org.apache.hadoop.hbase.ByteBufferExtendedCell (implements 
    org.apache.hadoop.hbase.ExtendedCell)
     
     org.apache.hadoop.hbase.ByteBufferKeyOnlyKeyValue
    

    [02/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d2b28a1a/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.LastOnRowCell.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.LastOnRowCell.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.LastOnRowCell.html
    index 3400507..2baa140 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.LastOnRowCell.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.LastOnRowCell.html
    @@ -28,3034 +28,2926 @@
     020import static 
    org.apache.hadoop.hbase.HConstants.EMPTY_BYTE_ARRAY;
     021import static 
    org.apache.hadoop.hbase.Tag.TAG_LENGTH_SIZE;
     022
    -023import 
    com.google.common.annotations.VisibleForTesting;
    -024
    -025import java.io.DataOutput;
    -026import java.io.DataOutputStream;
    -027import java.io.IOException;
    -028import java.io.OutputStream;
    -029import java.math.BigDecimal;
    -030import java.nio.ByteBuffer;
    -031import java.util.ArrayList;
    -032import java.util.Iterator;
    -033import java.util.List;
    -034import java.util.Optional;
    -035
    -036import 
    org.apache.hadoop.hbase.KeyValue.Type;
    -037import 
    org.apache.hadoop.hbase.filter.ByteArrayComparable;
    -038import 
    org.apache.hadoop.hbase.io.HeapSize;
    -039import 
    org.apache.hadoop.hbase.io.TagCompressionContext;
    -040import 
    org.apache.hadoop.hbase.io.util.Dictionary;
    -041import 
    org.apache.hadoop.hbase.io.util.StreamUtils;
    -042import 
    org.apache.hadoop.hbase.util.ByteBufferUtils;
    -043import 
    org.apache.hadoop.hbase.util.ByteRange;
    -044import 
    org.apache.hadoop.hbase.util.Bytes;
    -045import 
    org.apache.hadoop.hbase.util.ClassSize;
    -046import 
    org.apache.yetus.audience.InterfaceAudience;
    -047
    -048
    -049/**
    -050 * Utility methods helpful slinging 
    {@link Cell} instances. It has more powerful and
    -051 * rich set of APIs than those in {@link 
    CellUtil} for internal usage.
    -052 */
    -053@InterfaceAudience.Private
    -054public final class PrivateCellUtil {
    -055
    -056  /**
    -057   * Private constructor to keep this 
    class from being instantiated.
    -058   */
    -059  private PrivateCellUtil() {
    -060  }
    +023import java.io.DataOutput;
    +024import java.io.DataOutputStream;
    +025import java.io.IOException;
    +026import java.io.OutputStream;
    +027import java.math.BigDecimal;
    +028import java.nio.ByteBuffer;
    +029import java.util.ArrayList;
    +030import java.util.Iterator;
    +031import java.util.List;
    +032import java.util.Optional;
    +033import 
    org.apache.hadoop.hbase.KeyValue.Type;
    +034import 
    org.apache.hadoop.hbase.filter.ByteArrayComparable;
    +035import 
    org.apache.hadoop.hbase.io.HeapSize;
    +036import 
    org.apache.hadoop.hbase.io.TagCompressionContext;
    +037import 
    org.apache.hadoop.hbase.io.util.Dictionary;
    +038import 
    org.apache.hadoop.hbase.io.util.StreamUtils;
    +039import 
    org.apache.hadoop.hbase.util.ByteBufferUtils;
    +040import 
    org.apache.hadoop.hbase.util.ByteRange;
    +041import 
    org.apache.hadoop.hbase.util.Bytes;
    +042import 
    org.apache.hadoop.hbase.util.ClassSize;
    +043import 
    org.apache.yetus.audience.InterfaceAudience;
    +044
    +045import 
    org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
    +046
    +047/**
    +048 * Utility methods helpful slinging 
    {@link Cell} instances. It has more powerful and
    +049 * rich set of APIs than those in {@link 
    CellUtil} for internal usage.
    +050 */
    +051@InterfaceAudience.Private
    +052public final class PrivateCellUtil {
    +053
    +054  /**
    +055   * Private constructor to keep this 
    class from being instantiated.
    +056   */
    +057  private PrivateCellUtil() {
    +058  }
    +059
    +060  /*** ByteRange 
    ***/
     061
    -062  /*** ByteRange 
    ***/
    -063
    -064  public static ByteRange 
    fillRowRange(Cell cell, ByteRange range) {
    -065return range.set(cell.getRowArray(), 
    cell.getRowOffset(), cell.getRowLength());
    -066  }
    -067
    -068  public static ByteRange 
    fillFamilyRange(Cell cell, ByteRange range) {
    -069return 
    range.set(cell.getFamilyArray(), cell.getFamilyOffset(), 
    cell.getFamilyLength());
    -070  }
    -071
    -072  public static ByteRange 
    fillQualifierRange(Cell cell, ByteRange range) {
    -073return 
    range.set(cell.getQualifierArray(), cell.getQualifierOffset(),
    -074  cell.getQualifierLength());
    -075  }
    -076
    -077  public static ByteRange 
    fillValueRange(Cell cell, ByteRange range) {
    -078return 
    range.set(cell.getValueArray(), cell.getValueOffset(), 
    cell.getValueLength());
    -079  }
    -080
    -081  public static ByteRange 
    fillTagRange(Cell cell, ByteRange range) {
    -082return range.set(cell.getTagsArray(), 
    cell.getTagsOffset(), cell.getTagsLength());
    -083  }
    +062  public static ByteRange 
    fillRowRange(Cell cell, ByteRange range) {
    +063return range.set(cell.getRowArray(), 
    cell.getRowOffset(), cell.getRowLength());
    +064  }
    +065
    +066  public static ByteRange 
    fillFamilyRange(Cell cell, ByteRange range) {
    +067return 
    range.set(cell.getFamilyArray(), cell.getFamilyOffset(), 
    

    [02/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/devapidocs/src-html/org/apache/hadoop/hbase/replication/TableBasedReplicationQueuesClientImpl.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/replication/TableBasedReplicationQueuesClientImpl.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/replication/TableBasedReplicationQueuesClientImpl.html
    deleted file mode 100644
    index 2b0ff55..000
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/replication/TableBasedReplicationQueuesClientImpl.html
    +++ /dev/null
    @@ -1,185 +0,0 @@
    -http://www.w3.org/TR/html4/loose.dtd;>
    -
    -
    -Source code
    -
    -
    -
    -
    -001/*
    -002 *
    -003 * Licensed to the Apache Software 
    Foundation (ASF) under one
    -004 * or more contributor license 
    agreements.  See the NOTICE file
    -005 * distributed with this work for 
    additional information
    -006 * regarding copyright ownership.  The 
    ASF licenses this file
    -007 * to you under the Apache License, 
    Version 2.0 (the
    -008 * "License"); you may not use this file 
    except in compliance
    -009 * with the License.  You may obtain a 
    copy of the License at
    -010 *
    -011 * 
    http://www.apache.org/licenses/LICENSE-2.0
    -012 *
    -013 * Unless required by applicable law or 
    agreed to in writing, software
    -014 * distributed under the License is 
    distributed on an "AS IS" BASIS,
    -015 * WITHOUT WARRANTIES OR CONDITIONS OF 
    ANY KIND, either express or implied.
    -016 * See the License for the specific 
    language governing permissions and
    -017 * limitations under the License.
    -018 */
    -019package 
    org.apache.hadoop.hbase.replication;
    -020
    -021import 
    org.apache.commons.lang3.NotImplementedException;
    -022import 
    org.apache.hadoop.conf.Configuration;
    -023import 
    org.apache.hadoop.hbase.Abortable;
    -024import 
    org.apache.hadoop.hbase.HConstants;
    -025import 
    org.apache.yetus.audience.InterfaceAudience;
    -026import 
    org.apache.hadoop.hbase.client.Result;
    -027import 
    org.apache.hadoop.hbase.client.ResultScanner;
    -028import 
    org.apache.hadoop.hbase.client.Scan;
    -029import 
    org.apache.hadoop.hbase.client.Table;
    -030import 
    org.apache.zookeeper.KeeperException;
    -031
    -032import java.io.IOException;
    -033import java.util.HashSet;
    -034import java.util.List;
    -035import java.util.Set;
    -036
    -037/**
    -038 * Implements the ReplicationQueuesClient 
    interface on top of the Replication Table. It utilizes
    -039 * the ReplicationTableBase to access the 
    Replication Table.
    -040 */
    -041@InterfaceAudience.Private
    -042public class 
    TableBasedReplicationQueuesClientImpl extends ReplicationTableBase
    -043  implements ReplicationQueuesClient {
    -044
    -045  public 
    TableBasedReplicationQueuesClientImpl(ReplicationQueuesClientArguments args)
    -046throws IOException {
    -047super(args.getConf(), 
    args.getAbortable());
    -048  }
    -049  public 
    TableBasedReplicationQueuesClientImpl(Configuration conf,
    -050  
     Abortable abortable) throws IOException {
    -051super(conf, abortable);
    -052  }
    -053
    -054  @Override
    -055  public void init() throws 
    ReplicationException{
    -056// no-op
    -057  }
    -058
    -059  @Override
    -060  public ListString 
    getListOfReplicators() {
    -061return 
    super.getListOfReplicators();
    -062  }
    -063
    -064  @Override
    -065  public ListString 
    getLogsInQueue(String serverName, String queueId) {
    -066return 
    super.getLogsInQueue(serverName, queueId);
    -067  }
    -068
    -069  @Override
    -070  public ListString 
    getAllQueues(String serverName) {
    -071return 
    super.getAllQueues(serverName);
    -072  }
    -073
    -074  @Override
    -075  public SetString getAllWALs() 
    {
    -076SetString allWals = new 
    HashSet();
    -077ResultScanner allQueues = null;
    -078try (Table replicationTable = 
    getOrBlockOnReplicationTable()) {
    -079  allQueues = 
    replicationTable.getScanner(new Scan());
    -080  for (Result queue : allQueues) {
    -081for (String wal : 
    readWALsFromResult(queue)) {
    -082  allWals.add(wal);
    -083}
    -084  }
    -085} catch (IOException e) {
    -086  String errMsg = "Failed getting all 
    WAL's in Replication Table";
    -087  abortable.abort(errMsg, e);
    -088} finally {
    -089  if (allQueues != null) {
    -090allQueues.close();
    -091  }
    -092}
    -093return allWals;
    -094  }
    -095
    -096  @Override
    -097  public int 
    getHFileRefsNodeChangeVersion() throws KeeperException {
    -098// TODO
    -099throw new 
    NotImplementedException(HConstants.NOT_IMPLEMENTED);
    -100  }
    -101
    -102  @Override
    -103  public ListString 
    getAllPeersFromHFileRefsQueue() throws KeeperException {
    -104// TODO
    -105throw new 
    NotImplementedException(HConstants.NOT_IMPLEMENTED);
    -106  }
    -107
    -108  @Override
    -109  public ListString 
    getReplicableHFiles(String peerId) throws KeeperException {
    -110// TODO
    -111throw new 
    NotImplementedException(HConstants.NOT_IMPLEMENTED);
    -112  }
    -113}
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    -
    

    [02/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7c0589c0/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer.html
    index f7fbfbf..88ebcbc 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer.html
    @@ -34,1583 +34,1583 @@
     026import java.io.IOException;
     027import java.util.ArrayList;
     028import java.util.Arrays;
    -029import java.util.Collection;
    -030import java.util.Collections;
    -031import java.util.EnumSet;
    -032import java.util.HashMap;
    -033import java.util.List;
    -034import java.util.Map;
    -035import java.util.Optional;
    -036import java.util.Set;
    -037import 
    java.util.concurrent.CompletableFuture;
    -038import java.util.concurrent.TimeUnit;
    -039import 
    java.util.concurrent.atomic.AtomicReference;
    -040import java.util.function.BiConsumer;
    -041import java.util.function.Function;
    -042import java.util.regex.Pattern;
    -043import java.util.stream.Collectors;
    -044import java.util.stream.Stream;
    -045import org.apache.commons.io.IOUtils;
    -046import 
    org.apache.hadoop.conf.Configuration;
    -047import 
    org.apache.hadoop.hbase.AsyncMetaTableAccessor;
    -048import 
    org.apache.hadoop.hbase.ClusterMetrics.Option;
    -049import 
    org.apache.hadoop.hbase.ClusterStatus;
    -050import 
    org.apache.hadoop.hbase.HConstants;
    -051import 
    org.apache.hadoop.hbase.HRegionLocation;
    -052import 
    org.apache.hadoop.hbase.MetaTableAccessor;
    -053import 
    org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
    -054import 
    org.apache.hadoop.hbase.NamespaceDescriptor;
    -055import 
    org.apache.hadoop.hbase.RegionLoad;
    -056import 
    org.apache.hadoop.hbase.RegionLocations;
    -057import 
    org.apache.hadoop.hbase.ServerName;
    -058import 
    org.apache.hadoop.hbase.TableExistsException;
    -059import 
    org.apache.hadoop.hbase.TableName;
    -060import 
    org.apache.hadoop.hbase.TableNotDisabledException;
    -061import 
    org.apache.hadoop.hbase.TableNotEnabledException;
    -062import 
    org.apache.hadoop.hbase.TableNotFoundException;
    -063import 
    org.apache.hadoop.hbase.UnknownRegionException;
    -064import 
    org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
    -065import 
    org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
    -066import 
    org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.ServerRequestCallerBuilder;
    -067import 
    org.apache.hadoop.hbase.client.Scan.ReadType;
    -068import 
    org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil;
    -069import 
    org.apache.hadoop.hbase.client.replication.TableCFs;
    -070import 
    org.apache.hadoop.hbase.client.security.SecurityCapability;
    -071import 
    org.apache.hadoop.hbase.exceptions.DeserializationException;
    -072import 
    org.apache.hadoop.hbase.ipc.HBaseRpcController;
    -073import 
    org.apache.hadoop.hbase.quotas.QuotaFilter;
    -074import 
    org.apache.hadoop.hbase.quotas.QuotaSettings;
    -075import 
    org.apache.hadoop.hbase.quotas.QuotaTableUtil;
    -076import 
    org.apache.hadoop.hbase.replication.ReplicationException;
    -077import 
    org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
    -078import 
    org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
    -079import 
    org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
    -080import 
    org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
    -081import 
    org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
    -082import 
    org.apache.hadoop.hbase.util.Bytes;
    -083import 
    org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
    -084import 
    org.apache.hadoop.hbase.util.ForeignExceptionUtil;
    -085import 
    org.apache.yetus.audience.InterfaceAudience;
    -086import org.slf4j.Logger;
    -087import org.slf4j.LoggerFactory;
    -088
    -089import 
    org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
    -090import 
    org.apache.hadoop.hbase.shaded.io.netty.util.HashedWheelTimer;
    -091import 
    org.apache.hadoop.hbase.shaded.io.netty.util.Timeout;
    -092import 
    org.apache.hadoop.hbase.shaded.io.netty.util.TimerTask;
    -093import 
    org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
    -094import 
    org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
    -095import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
    -096import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest;
    -097import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesResponse;
    -098import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
    -099import 
    

    [02/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c4b2cc17/devapidocs/org/apache/hadoop/hbase/RegionMetricsBuilder.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/RegionMetricsBuilder.html 
    b/devapidocs/org/apache/hadoop/hbase/RegionMetricsBuilder.html
    new file mode 100644
    index 000..0fc40b0
    --- /dev/null
    +++ b/devapidocs/org/apache/hadoop/hbase/RegionMetricsBuilder.html
    @@ -0,0 +1,850 @@
    +http://www.w3.org/TR/html4/loose.dtd;>
    +
    +
    +
    +
    +
    +RegionMetricsBuilder (Apache HBase 3.0.0-SNAPSHOT API)
    +
    +
    +
    +
    +
    +var methods = 
    {"i0":10,"i1":9,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":9,"i21":9,"i22":9};
    +var tabs = {65535:["t0","All Methods"],1:["t1","Static 
    Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
    +var altColor = "altColor";
    +var rowColor = "rowColor";
    +var tableTab = "tableTab";
    +var activeTableTab = "activeTableTab";
    +
    +
    +JavaScript is disabled on your browser.
    +
    +
    +
    +
    +
    +Skip navigation links
    +
    +
    +
    +
    +Overview
    +Package
    +Class
    +Use
    +Tree
    +Deprecated
    +Index
    +Help
    +
    +
    +
    +
    +PrevClass
    +NextClass
    +
    +
    +Frames
    +NoFrames
    +
    +
    +AllClasses
    +
    +
    +
    +
    +
    +
    +
    +Summary:
    +Nested|
    +Field|
    +Constr|
    +Method
    +
    +
    +Detail:
    +Field|
    +Constr|
    +Method
    +
    +
    +
    +
    +
    +
    +
    +
    +org.apache.hadoop.hbase
    +Class 
    RegionMetricsBuilder
    +
    +
    +
    +http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">java.lang.Object
    +
    +
    +org.apache.hadoop.hbase.RegionMetricsBuilder
    +
    +
    +
    +
    +
    +
    +
    +
    +@InterfaceAudience.Private
    +public final class RegionMetricsBuilder
    +extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">Object
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +Nested Class Summary
    +
    +Nested Classes
    +
    +Modifier and Type
    +Class and Description
    +
    +
    +private static class
    +RegionMetricsBuilder.RegionMetricsImpl
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +Field Summary
    +
    +Fields
    +
    +Modifier and Type
    +Field and Description
    +
    +
    +private Size
    +bloomFilterSize
    +
    +
    +private long
    +compactedCellCount
    +
    +
    +private long
    +compactingCellCount
    +
    +
    +private long
    +completedSequenceId
    +
    +
    +private float
    +dataLocality
    +
    +
    +private long
    +filteredReadRequestCount
    +
    +
    +private Size
    +indexSize
    +
    +
    +private long
    +lastMajorCompactionTimestamp
    +
    +
    +private Size
    +memStoreSize
    +
    +
    +private byte[]
    +name
    +
    +
    +private long
    +readRequestCount
    +
    +
    +private Size
    +rootLevelIndexSize
    +
    +
    +private int
    +storeCount
    +
    +
    +private int
    +storeFileCount
    +
    +
    +private Size
    +storeFileSize
    +
    +
    +private http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
     title="class or interface in java.util">Mapbyte[],http://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true;
     title="class or interface in java.lang">Long
    +storeSequenceIds
    +
    +
    +private Size
    +uncompressedDataIndexSize
    +
    +
    +private Size
    +uncompressedStoreFileSize
    +
    +
    +private long
    +writeRequestCount
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +Constructor Summary
    +
    +Constructors
    +
    +Modifier
    +Constructor and Description
    +
    +
    +private 
    +RegionMetricsBuilder(byte[]name)
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +Method Summary
    +
    +All MethodsStatic MethodsInstance MethodsConcrete Methods
    +
    +Modifier and Type
    +Method and Description
    +
    +
    +RegionMetrics
    +build()
    +
    +
    +static RegionMetricsBuilder
    +newBuilder(byte[]name)
    +
    +
    +RegionMetricsBuilder
    +setBloomFilterSize(Sizevalue)
    +
    +
    +RegionMetricsBuilder
    +setCompactedCellCount(longvalue)
    +
    +
    +RegionMetricsBuilder
    +setCompactingCellCount(longvalue)
    +
    +
    +RegionMetricsBuilder
    +setCompletedSequenceId(longvalue)
    +
    +
    +RegionMetricsBuilder
    +setDataLocality(floatvalue)
    +
    +
    +RegionMetricsBuilder
    +setFilteredReadRequestCount(longvalue)
    +
    +
    +RegionMetricsBuilder
    +setLastMajorCompactionTimestamp(longvalue)
    +
    +
    +RegionMetricsBuilder
    +setMemStoreSize(Sizevalue)
    +
    +
    +RegionMetricsBuilder
    +setReadRequestCount(longvalue)
    +
    +
    +RegionMetricsBuilder
    +setStoreCount(intvalue)
    +
    +
    +RegionMetricsBuilder
    +setStoreFileCount(intvalue)
    +
    +
    +RegionMetricsBuilder
    +setStoreFileIndexSize(Sizevalue)
    +
    +
    +RegionMetricsBuilder
    +setStoreFileRootLevelIndexSize(Sizevalue)
    +
    +
    +RegionMetricsBuilder
    +setStoreFileSize(Sizevalue)
    +
    +
    +RegionMetricsBuilder
    +setStoreFileUncompressedDataIndexSize(Sizevalue)
    +
    +
    +RegionMetricsBuilder
    +setStoreSequenceIds(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
     title="class or interface in 

    [02/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/505bbb2e/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.Converter.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.Converter.html 
    b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.Converter.html
    index 2a67eed..317a230 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.Converter.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.Converter.html
    @@ -110,7 +110,7 @@ var activeTableTab = "activeTableTab";
     
     
     http://docs.oracle.com/javase/8/docs/api/java/lang/FunctionalInterface.html?is-external=true;
     title="class or interface in java.lang">@FunctionalInterface
    -private static interface RawAsyncHBaseAdmin.ConverterD,S
    +private static interface RawAsyncHBaseAdmin.ConverterD,S
     
     
     
    @@ -156,7 +156,7 @@ private static interface 
     
     convert
    -Dconvert(Ssrc)
    +Dconvert(Ssrc)
    throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException
     
     Throws:
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/505bbb2e/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer.html
     
    b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer.html
    index 523e977..4278e36 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer.html
    @@ -127,7 +127,7 @@ var activeTableTab = "activeTableTab";
     
     
     
    -private class RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer
    +private class RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer
     extends RawAsyncHBaseAdmin.NamespaceProcedureBiConsumer
     
     
    @@ -232,7 +232,7 @@ extends 
     
     CreateNamespaceProcedureBiConsumer
    -CreateNamespaceProcedureBiConsumer(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringnamespaceName)
    +CreateNamespaceProcedureBiConsumer(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringnamespaceName)
     
     
     
    @@ -249,7 +249,7 @@ extends 
     
     getOperationType
    -http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringgetOperationType()
    +http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringgetOperationType()
     
     Specified by:
     getOperationTypein
     classRawAsyncHBaseAdmin.NamespaceProcedureBiConsumer
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/505bbb2e/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateTableProcedureBiConsumer.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateTableProcedureBiConsumer.html
     
    b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateTableProcedureBiConsumer.html
    index 2b9d7d2..fc4ec22 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateTableProcedureBiConsumer.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateTableProcedureBiConsumer.html
    @@ -127,7 +127,7 @@ var activeTableTab = "activeTableTab";
     
     
     
    -private class RawAsyncHBaseAdmin.CreateTableProcedureBiConsumer
    +private class RawAsyncHBaseAdmin.CreateTableProcedureBiConsumer
     extends RawAsyncHBaseAdmin.TableProcedureBiConsumer
     
     
    @@ -232,7 +232,7 @@ extends 
     
     CreateTableProcedureBiConsumer
    -CreateTableProcedureBiConsumer(TableNametableName)
    +CreateTableProcedureBiConsumer(TableNametableName)
     
     
     
    @@ -249,7 +249,7 @@ extends 
     
     getOperationType
    -http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringgetOperationType()
    +http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringgetOperationType()
     
     Specified by:
     getOperationTypein
     classRawAsyncHBaseAdmin.TableProcedureBiConsumer
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/505bbb2e/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteColumnFamilyProcedureBiConsumer.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteColumnFamilyProcedureBiConsumer.html
     
    

    [02/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c4c0cfa5/book.html
    --
    diff --git a/book.html b/book.html
    index f9dcb46..c624cdf 100644
    --- a/book.html
    +++ b/book.html
    @@ -37002,7 +37002,7 @@ The server will return cellblocks compressed using this 
    same compressor as long
     
     
     Version 3.0.0-SNAPSHOT
    -Last updated 2017-12-15 14:29:43 UTC
    +Last updated 2017-12-16 14:29:41 UTC
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c4c0cfa5/bulk-loads.html
    --
    diff --git a/bulk-loads.html b/bulk-loads.html
    index 5cc232e..075a9c0 100644
    --- a/bulk-loads.html
    +++ b/bulk-loads.html
    @@ -7,7 +7,7 @@
       
     
     
    -
    +
     
     Apache HBase   
       Bulk Loads in Apache HBase (TM)
    @@ -311,7 +311,7 @@ under the License. -->
     https://www.apache.org/;>The Apache Software 
    Foundation.
     All rights reserved.  
     
    -  Last Published: 
    2017-12-15
    +  Last Published: 
    2017-12-16
     
     
     
    
    
    

    [02/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/071f974b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/SimpleRpcServer.Listener.Reader.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/SimpleRpcServer.Listener.Reader.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/SimpleRpcServer.Listener.Reader.html
    index 67e6eae..a83310a 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/SimpleRpcServer.Listener.Reader.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/SimpleRpcServer.Listener.Reader.html
    @@ -51,10 +51,10 @@
     043
     044import 
    org.apache.hadoop.conf.Configuration;
     045import 
    org.apache.hadoop.hbase.CellScanner;
    -046import 
    org.apache.hadoop.hbase.HConstants;
    -047import org.apache.hadoop.hbase.Server;
    -048import 
    org.apache.yetus.audience.InterfaceAudience;
    -049import 
    org.apache.yetus.audience.InterfaceStability;
    +046import 
    org.apache.hadoop.hbase.HBaseInterfaceAudience;
    +047import 
    org.apache.hadoop.hbase.HConstants;
    +048import org.apache.hadoop.hbase.Server;
    +049import 
    org.apache.yetus.audience.InterfaceAudience;
     050import 
    org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler;
     051import 
    org.apache.hadoop.hbase.security.HBasePolicyProvider;
     052import 
    org.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingService;
    @@ -89,624 +89,623 @@
     081 *
     082 * @see BlockingRpcClient
     083 */
    -084@InterfaceAudience.Private
    -085@InterfaceStability.Evolving
    -086public class SimpleRpcServer extends 
    RpcServer {
    -087
    -088  protected int port; 
    // port we listen on
    -089  protected InetSocketAddress address;
    // inet address we listen on
    -090  private int readThreads;
    // number of read threads
    -091
    -092  protected int socketSendBufferSize;
    -093  protected final long purgeTimeout;
    // in milliseconds
    -094
    -095  // maintains the set of client 
    connections and handles idle timeouts
    -096  private ConnectionManager 
    connectionManager;
    -097  private Listener listener = null;
    -098  protected SimpleRpcServerResponder 
    responder = null;
    -099
    -100  /** Listens on the socket. Creates jobs 
    for the handler threads*/
    -101  private class Listener extends Thread 
    {
    -102
    -103private ServerSocketChannel 
    acceptChannel = null; //the accept channel
    -104private Selector selector = null; 
    //the selector that we use for the server
    -105private Reader[] readers = null;
    -106private int currentReader = 0;
    -107private final int 
    readerPendingConnectionQueueLength;
    -108
    -109private ExecutorService readPool;
    -110
    -111public Listener(final String name) 
    throws IOException {
    -112  super(name);
    -113  // The backlog of requests that we 
    will have the serversocket carry.
    -114  int backlogLength = 
    conf.getInt("hbase.ipc.server.listen.queue.size", 128);
    -115  readerPendingConnectionQueueLength 
    =
    -116  
    conf.getInt("hbase.ipc.server.read.connection-queue.size", 100);
    -117  // Create a new server socket and 
    set to non blocking mode
    -118  acceptChannel = 
    ServerSocketChannel.open();
    -119  
    acceptChannel.configureBlocking(false);
    -120
    -121  // Bind the server socket to the 
    binding addrees (can be different from the default interface)
    -122  bind(acceptChannel.socket(), 
    bindAddress, backlogLength);
    -123  port = 
    acceptChannel.socket().getLocalPort(); //Could be an ephemeral port
    -124  address = 
    (InetSocketAddress)acceptChannel.socket().getLocalSocketAddress();
    -125  // create a selector;
    -126  selector = Selector.open();
    -127
    -128  readers = new 
    Reader[readThreads];
    -129  // Why this executor thing? Why not 
    like hadoop just start up all the threads? I suppose it
    -130  // has an advantage in that it is 
    easy to shutdown the pool.
    -131  readPool = 
    Executors.newFixedThreadPool(readThreads,
    -132new 
    ThreadFactoryBuilder().setNameFormat(
    -133  "Reader=%d,bindAddress=" + 
    bindAddress.getHostName() +
    -134  ",port=" + 
    port).setDaemon(true)
    -135
    .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build());
    -136  for (int i = 0; i  readThreads; 
    ++i) {
    -137Reader reader = new Reader();
    -138readers[i] = reader;
    -139readPool.execute(reader);
    -140  }
    -141  LOG.info(getName() + ": started " + 
    readThreads + " reader(s) listening on port=" + port);
    -142
    -143  // Register accepts on the server 
    socket with the selector.
    -144  acceptChannel.register(selector, 
    SelectionKey.OP_ACCEPT);
    -145  this.setName("Listener,port=" + 
    port);
    -146  this.setDaemon(true);
    -147}
    +084@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.CONFIG})
    +085public class SimpleRpcServer extends 
    RpcServer {
    +086
    +087  protected int port; 
    // port we listen on
    +088  protected InetSocketAddress address;
    // inet address we listen on
    +089  private int 

    [02/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dc4e5c85/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.WriterThread.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.WriterThread.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.WriterThread.html
    index 219283e..2b5d70b 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.WriterThread.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.WriterThread.html
    @@ -435,1198 +435,1203 @@
     427
     428if (backingMap.containsKey(cacheKey)) 
    {
     429  Cacheable existingBlock = 
    getBlock(cacheKey, false, false, false);
    -430  if 
    (BlockCacheUtil.compareCacheBlock(cachedItem, existingBlock) != 0) {
    -431throw new 
    RuntimeException("Cached block contents differ, which should not have 
    happened."
    -432+ "cacheKey:" + cacheKey);
    -433  }
    -434   String msg = "Caching an already 
    cached block: " + cacheKey;
    -435   msg += ". This is harmless and can 
    happen in rare cases (see HBASE-8547)";
    -436   LOG.warn(msg);
    -437  return;
    -438}
    -439
    -440/*
    -441 * Stuff the entry into the RAM cache 
    so it can get drained to the persistent store
    -442 */
    -443RAMQueueEntry re =
    -444new RAMQueueEntry(cacheKey, 
    cachedItem, accessCount.incrementAndGet(), inMemory);
    -445if (ramCache.putIfAbsent(cacheKey, 
    re) != null) {
    -446  return;
    -447}
    -448int queueNum = (cacheKey.hashCode() 
     0x7FFF) % writerQueues.size();
    -449BlockingQueueRAMQueueEntry bq 
    = writerQueues.get(queueNum);
    -450boolean successfulAddition = false;
    -451if (wait) {
    -452  try {
    -453successfulAddition = bq.offer(re, 
    DEFAULT_CACHE_WAIT_TIME, TimeUnit.MILLISECONDS);
    -454  } catch (InterruptedException e) 
    {
    -455
    Thread.currentThread().interrupt();
    -456  }
    -457} else {
    -458  successfulAddition = 
    bq.offer(re);
    -459}
    -460if (!successfulAddition) {
    -461  ramCache.remove(cacheKey);
    -462  cacheStats.failInsert();
    -463} else {
    -464  this.blockNumber.increment();
    -465  
    this.heapSize.add(cachedItem.heapSize());
    -466  blocksByHFile.add(cacheKey);
    -467}
    -468  }
    -469
    -470  /**
    -471   * Get the buffer of the block with the 
    specified key.
    -472   * @param key block's cache key
    -473   * @param caching true if the caller 
    caches blocks on cache misses
    -474   * @param repeat Whether this is a 
    repeat lookup for the same block
    -475   * @param updateCacheMetrics Whether we 
    should update cache metrics or not
    -476   * @return buffer of specified cache 
    key, or null if not in cache
    -477   */
    -478  @Override
    -479  public Cacheable getBlock(BlockCacheKey 
    key, boolean caching, boolean repeat,
    -480  boolean updateCacheMetrics) {
    -481if (!cacheEnabled) {
    -482  return null;
    -483}
    -484RAMQueueEntry re = 
    ramCache.get(key);
    -485if (re != null) {
    -486  if (updateCacheMetrics) {
    -487cacheStats.hit(caching, 
    key.isPrimary(), key.getBlockType());
    -488  }
    -489  
    re.access(accessCount.incrementAndGet());
    -490  return re.getData();
    -491}
    -492BucketEntry bucketEntry = 
    backingMap.get(key);
    -493if (bucketEntry != null) {
    -494  long start = System.nanoTime();
    -495  ReentrantReadWriteLock lock = 
    offsetLock.getLock(bucketEntry.offset());
    -496  try {
    -497lock.readLock().lock();
    -498// We can not read here even if 
    backingMap does contain the given key because its offset
    -499// maybe changed. If we lock 
    BlockCacheKey instead of offset, then we can only check
    -500// existence here.
    -501if 
    (bucketEntry.equals(backingMap.get(key))) {
    -502  // TODO : change this area - 
    should be removed after server cells and
    -503  // 12295 are available
    -504  int len = 
    bucketEntry.getLength();
    -505  if (LOG.isTraceEnabled()) {
    -506LOG.trace("Read offset=" + 
    bucketEntry.offset() + ", len=" + len);
    -507  }
    -508  Cacheable cachedBlock = 
    ioEngine.read(bucketEntry.offset(), len,
    -509  
    bucketEntry.deserializerReference(this.deserialiserMap));
    -510  long timeTaken = 
    System.nanoTime() - start;
    -511  if (updateCacheMetrics) {
    -512cacheStats.hit(caching, 
    key.isPrimary(), key.getBlockType());
    -513
    cacheStats.ioHit(timeTaken);
    -514  }
    -515  if (cachedBlock.getMemoryType() 
    == MemoryType.SHARED) {
    -516
    bucketEntry.refCount.incrementAndGet();
    -517  }
    -518  
    bucketEntry.access(accessCount.incrementAndGet());
    -519  if (this.ioErrorStartTime  
    0) {
    -520ioErrorStartTime = -1;
    -521  }
    -522  return cachedBlock;
    -523}
    -524  } catch (IOException ioex) {
    -525LOG.error("Failed 

    [02/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4abd958d/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncTable.html
    --
    diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncTable.html 
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncTable.html
    index 7cece5c..6361a24 100644
    --- a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncTable.html
    +++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncTable.html
    @@ -248,379 +248,383 @@
     240 */
     241CheckAndMutateBuilder 
    ifNotExists();
     242
    -243default CheckAndMutateBuilder 
    ifEquals(byte[] value) {
    -244  return 
    ifMatches(CompareOperator.EQUAL, value);
    -245}
    -246
    -247/**
    -248 * @param compareOp comparison 
    operator to use
    -249 * @param value the expected value
    -250 */
    -251CheckAndMutateBuilder 
    ifMatches(CompareOperator compareOp, byte[] value);
    -252
    -253/**
    -254 * @param put data to put if check 
    succeeds
    -255 * @return {@code true} if the new 
    put was executed, {@code false} otherwise. The return value
    -256 * will be wrapped by a 
    {@link CompletableFuture}.
    -257 */
    -258CompletableFutureBoolean 
    thenPut(Put put);
    -259
    -260/**
    -261 * @param delete data to delete if 
    check succeeds
    -262 * @return {@code true} if the new 
    delete was executed, {@code false} otherwise. The return
    -263 * value will be wrapped by a 
    {@link CompletableFuture}.
    -264 */
    -265CompletableFutureBoolean 
    thenDelete(Delete delete);
    -266
    -267/**
    -268 * @param mutation mutations to 
    perform if check succeeds
    -269 * @return true if the new mutation 
    was executed, false otherwise. The return value will be
    -270 * wrapped by a {@link 
    CompletableFuture}.
    -271 */
    -272CompletableFutureBoolean 
    thenMutate(RowMutations mutation);
    -273  }
    -274
    -275  /**
    -276   * Performs multiple mutations 
    atomically on a single row. Currently {@link Put} and
    -277   * {@link Delete} are supported.
    -278   * @param mutation object that 
    specifies the set of mutations to perform atomically
    -279   * @return A {@link CompletableFuture} 
    that always returns null when complete normally.
    -280   */
    -281  CompletableFutureVoid 
    mutateRow(RowMutations mutation);
    -282
    -283  /**
    -284   * The scan API uses the observer 
    pattern.
    -285   * @param scan A configured {@link 
    Scan} object.
    -286   * @param consumer the consumer used to 
    receive results.
    -287   * @see ScanResultConsumer
    -288   * @see AdvancedScanResultConsumer
    -289   */
    -290  void scan(Scan scan, C consumer);
    -291
    -292  /**
    -293   * Gets a scanner on the current table 
    for the given family.
    -294   * @param family The column family to 
    scan.
    -295   * @return A scanner.
    -296   */
    -297  default ResultScanner getScanner(byte[] 
    family) {
    -298return getScanner(new 
    Scan().addFamily(family));
    -299  }
    -300
    -301  /**
    -302   * Gets a scanner on the current table 
    for the given family and qualifier.
    -303   * @param family The column family to 
    scan.
    -304   * @param qualifier The column 
    qualifier to scan.
    -305   * @return A scanner.
    -306   */
    -307  default ResultScanner getScanner(byte[] 
    family, byte[] qualifier) {
    -308return getScanner(new 
    Scan().addColumn(family, qualifier));
    -309  }
    -310
    -311  /**
    -312   * Returns a scanner on the current 
    table as specified by the {@link Scan} object.
    -313   * @param scan A configured {@link 
    Scan} object.
    -314   * @return A scanner.
    -315   */
    -316  ResultScanner getScanner(Scan scan);
    -317
    -318  /**
    -319   * Return all the results that match 
    the given scan object.
    -320   * p
    -321   * Notice that usually you should use 
    this method with a {@link Scan} object that has limit set.
    -322   * For example, if you want to get the 
    closest row after a given row, you could do this:
    -323   * p
    -324   *
    -325   * pre
    -326   * code
    -327   * table.scanAll(new 
    Scan().withStartRow(row, false).setLimit(1)).thenAccept(results - {
    -328   *   if (results.isEmpty()) {
    -329   *  System.out.println("No row 
    after " + Bytes.toStringBinary(row));
    -330   *   } else {
    -331   * System.out.println("The closest 
    row after " + Bytes.toStringBinary(row) + " is "
    -332   * + 
    Bytes.toStringBinary(results.stream().findFirst().get().getRow()));
    -333   *   }
    -334   * });
    -335   * /code
    -336   * /pre
    -337   * p
    -338   * If your result set is very large, 
    you should use other scan method to get a scanner or use
    -339   * callback to process the results. 
    They will do chunking to prevent OOM. The scanAll method will
    -340   * fetch all the results and store them 
    in a List and then return the list to you.
    +243/**
    +244 * Check for equality.
    +245 * @param value the expected value
    +246 */
    +247default CheckAndMutateBuilder 
    ifEquals(byte[] value) {
    +248  return 
    ifMatches(CompareOperator.EQUAL, value);
    +249}
    +250
    +251/**
    +252 * @param compareOp comparison 
    operator to use
    +253 

    [02/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e23b49ba/devapidocs/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.OffheapDecodedCell.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.OffheapDecodedCell.html
     
    b/devapidocs/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.OffheapDecodedCell.html
    index f09b3c1..292b87e 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.OffheapDecodedCell.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.OffheapDecodedCell.html
    @@ -114,7 +114,7 @@ var activeTableTab = "activeTableTab";
     
     
     All Implemented Interfaces:
    -http://docs.oracle.com/javase/8/docs/api/java/lang/Cloneable.html?is-external=true;
     title="class or interface in java.lang">Cloneable, Cell, ExtendedCell, HeapSize, RawCell, SettableSequenceId, SettableTimestamp
    +http://docs.oracle.com/javase/8/docs/api/java/lang/Cloneable.html?is-external=true;
     title="class or interface in java.lang">Cloneable, Cell, ExtendedCell, HeapSize, RawCell
     
     
     Enclosing class:
    @@ -122,7 +122,7 @@ var activeTableTab = "activeTableTab";
     
     
     
    -protected static class BufferedDataBlockEncoder.OffheapDecodedCell
    +protected static class BufferedDataBlockEncoder.OffheapDecodedCell
     extends ByteBufferCell
     implements ExtendedCell
     
    @@ -418,8 +418,7 @@ implements 
     
     void
    -setTimestamp(byte[]ts,
    -inttsOffset)
    +setTimestamp(byte[]ts)
     Sets with the given timestamp.
     
     
    @@ -485,7 +484,7 @@ implements 
     
     FIXED_OVERHEAD
    -private static finallong FIXED_OVERHEAD
    +private static finallong FIXED_OVERHEAD
     
     
     
    @@ -494,7 +493,7 @@ implements 
     
     keyBuffer
    -privatehttp://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
     title="class or interface in java.nio">ByteBuffer keyBuffer
    +privatehttp://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
     title="class or interface in java.nio">ByteBuffer keyBuffer
     
     
     
    @@ -503,7 +502,7 @@ implements 
     
     rowLength
    -privateshort rowLength
    +privateshort rowLength
     
     
     
    @@ -512,7 +511,7 @@ implements 
     
     familyOffset
    -privateint familyOffset
    +privateint familyOffset
     
     
     
    @@ -521,7 +520,7 @@ implements 
     
     familyLength
    -privatebyte familyLength
    +privatebyte familyLength
     
     
     
    @@ -530,7 +529,7 @@ implements 
     
     qualifierOffset
    -privateint qualifierOffset
    +privateint qualifierOffset
     
     
     
    @@ -539,7 +538,7 @@ implements 
     
     qualifierLength
    -privateint qualifierLength
    +privateint qualifierLength
     
     
     
    @@ -548,7 +547,7 @@ implements 
     
     timestamp
    -privatelong timestamp
    +privatelong timestamp
     
     
     
    @@ -557,7 +556,7 @@ implements 
     
     typeByte
    -privatebyte typeByte
    +privatebyte typeByte
     
     
     
    @@ -566,7 +565,7 @@ implements 
     
     valueBuffer
    -privatehttp://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
     title="class or interface in java.nio">ByteBuffer valueBuffer
    +privatehttp://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
     title="class or interface in java.nio">ByteBuffer valueBuffer
     
     
     
    @@ -575,7 +574,7 @@ implements 
     
     valueOffset
    -privateint valueOffset
    +privateint valueOffset
     
     
     
    @@ -584,7 +583,7 @@ implements 
     
     valueLength
    -privateint valueLength
    +privateint valueLength
     
     
     
    @@ -593,7 +592,7 @@ implements 
     
     tagsBuffer
    -privatehttp://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
     title="class or interface in java.nio">ByteBuffer tagsBuffer
    +privatehttp://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
     title="class or interface in java.nio">ByteBuffer tagsBuffer
     
     
     
    @@ -602,7 +601,7 @@ implements 
     
     tagsOffset
    -privateint tagsOffset
    +privateint tagsOffset
     
     
     
    @@ -611,7 +610,7 @@ implements 
     
     tagsLength
    -privateint tagsLength
    +privateint tagsLength
     
     
     
    @@ -620,7 +619,7 @@ implements 
     
     seqId
    -privatelong seqId
    +privatelong seqId
     
     
     
    @@ -637,7 +636,7 @@ implements 
     
     OffheapDecodedCell
    -protectedOffheapDecodedCell(http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
     title="class or interface in java.nio">ByteBufferkeyBuffer,
    +protectedOffheapDecodedCell(http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
     title="class or interface in java.nio">ByteBufferkeyBuffer,
      shortrowLength,
      intfamilyOffset,
      bytefamilyLength,
    @@ -668,7 +667,7 @@ implements 
     
     getRowArray
    -publicbyte[]getRowArray()
    +publicbyte[]getRowArray()
     Description copied from 
    interface:Cell
     Contiguous raw bytes that may start at any index in the 
    containing array. Max length is
      Short.MAX_VALUE which is 32,767 bytes.
    @@ -686,7 +685,7 @@ implements 
     
     getRowOffset
    -publicintgetRowOffset()
    +publicintgetRowOffset()
     
     Specified by:
     getRowOffsetin
     interfaceCell
    @@ -701,7 +700,7 @@ 

    [02/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d171b896/hbase-build-configuration/hbase-spark/dependency-convergence.html
    --
    diff --git a/hbase-build-configuration/hbase-spark/dependency-convergence.html 
    b/hbase-build-configuration/hbase-spark/dependency-convergence.html
    index 935303d..3818567 100644
    --- a/hbase-build-configuration/hbase-spark/dependency-convergence.html
    +++ b/hbase-build-configuration/hbase-spark/dependency-convergence.html
    @@ -7,7 +7,7 @@
       
     
     
    -
    +
     
     Apache HBase - Spark  Reactor Dependency Convergence
     
    @@ -488,22 +488,22 @@
     3.4.10
     
     
    -org.apache.hbase:hbase-assembly:pom:3.0.0-SNAPSHOT+-org.apache.hbase:hbase-server:jar:3.0.0-SNAPSHOT:compile|+-org.apache.hbase:hbase-client:jar:3.0.0-SNAPSHOT:compile||\-(org.apache.zookeeper:zookeeper:jar:3.4.10:compile
     - version managed from 3.4.6; omitted for duplicate)|+-org.apache.zookeeper:zookeeper:jar:3.4.10:compile|+-org.apache.hadoop:hadoop-common:jar:2.7.4:compile||\-(org.apache.zookeeper:zookeeper:jar:3.4.10:compile
     - version managed from 3.4.6; omitted for duplicate)|+-org.apache.hadoop:hadoop-auth:jar:2.7.4:compile||\-(org.apache.zookeeper:zookeeper:jar:3.4.10:compile
     - version managed from 3.4.6; omitted for duplicate)|\-org.apache.hadoop:hadoop-client:jar:2.7.4:compile|\-org.apache.had
     oop:hadoop-mapreduce-client-app:jar:2.7.4:compile|\-org.apache.hadoop:hadoop-mapreduce-client-common:jar:2.7.4:compile|\-org.apache.hadoop:hadoop-yarn-server-common:jar:2.7.4:compile|\-(org.apache.zookeeper:zookeeper:jar:3.4.10:compile
     - version managed from 3.4.6; omitted for duplicate)+-org.apache.hbase:hbase-mapreduce:jar:3.0.0-SNAPSHOT:compile|\-(org.apache.zookeeper:zookeeper:jar:3.4.10:compile - 
    version managed from 3.4.6; omitted for duplicate)+-org.apache.hbase:hbase-mapreduce:test-jar:tests:3.0.0-SNAPSHOT:test|\-(org.apache.zookeeper:zookeeper:jar:3.4.10:test - 
    version managed from 3.4.6; omitted for duplicate)+-org.apache.hbase:hbase-testing-util:jar:3.0.0-SNAP
     SHOT:test|+-org.apache.hbase:hbase-server:test-jar:tests:3.0.0-SNAPSHOT:test||\-(org.apache.zookeeper:zookeeper:jar:3.4.10:test
     - version managed from 3.4.6; omitted for duplicate)|\-org.apache.hadoop:hadoop-minicluster:jar:2.7.4:test|+-org.apache.hadoop:hadoop-common:test-jar:tests:2.7.4:test||\-(org.apache.zookeeper:zookeeper:jar:3.4.10:test
     - version managed from 3.4.6; omitted for duplicate)|\-org.apache.hadoop:hadoop-yarn-server-tests:test-jar:tests:2.7.4:test|\-org.apache.hadoop:hadoop-yarn-server-resourcemanager:jar:2.7.4:test|\-(org.apache.zookeeper:zookeeper:jar:3.4.10:test
     - version managed from 3.4.6; omitted for dupli
     cate)+-org.apache.hbase:hbase-backup:jar:3.0.0-SNAPSHOT:compile|\-(org.apache.zookeeper:zookeeper:jar:3.4.10:compile - 
    version managed from 3.4.6; omitted for duplicate)+-org.apache.hbase:hbase-replication:jar:3.0.0-SNAPSHOT:compile|\-(org.apache.zookeeper:zookeeper:jar:3.4.10:compile - 
    version managed from 3.4.6; omitted for duplicate)+-org.apache.hbase:hbase-examples:jar:3.0.0-SNAPSHOT:compile|\-(org.apache.zookeeper:zookeeper:jar:3.4.10:compile - 
    version managed from 3.4.6; omitted for duplicate)+-org.apache.hbase:hbase-zookeeper:jar:3.0.0-SNAPSHOT:compile|\-(org.apache.zookeeper:zookeeper:jar:3.4.10:compile - 
    version managed from 3.4.6; omitted for duplicate)\-org.apache.hbase:hbase-rsgroup:jar:3.0.0-SNAPSHOT:compile\-(org.apache.zookeeper:zookeeper:jar:3.4.10:compile 
    - version managed from 3.4.6
     ; omitted for duplicate)
    -org.apache.hbase:hbase-backup:jar:3.0.0-SNAPSHOT+-org.apache.hbase:hbase-client:jar:3.0.0-SNAPSHOT:compile|+-(org.apache.zookeeper:zookeeper:jar:3.4.10:compile - 
    version managed from 3.4.6; omitted for duplicate)|\-org.apache.hadoop:hadoop-auth:jar:2.7.4:compile|\-(org.apache.zookeeper:zookeeper:jar:3.4.10:compile
     - version managed from 3.4.6; omitted for duplicate)+-org.apache.hbase:hbase-server:jar:3.0.0-SNAPSHOT:compile|+-org.apache.hbase:hbase-zookeeper:jar:3.0.0-SNAPSHOT:compile||\-(org.apache.zookeeper:zookeeper:jar:3.4.10:compile
     - version managed from 3.4.6; omitted for duplicate)|+-org.apache.hbase:hbase-replication:jar:3.0.0-SNAPSHOT:compile||\-(org.apache.zookeeper:zookeeper:jar:3.4.10:compile
     - version managed from 3.4.6; omitted for dup
     licate)|+-(org.apache.zookeeper:zookeeper:jar:3.4.10:compile - 
    version managed from 3.4.6; omitted for duplicate)|\-org.apache.hadoop:hadoop-client:jar:2.7.4:compile|\-org.apache.hadoop:hadoop-mapreduce-client-app:jar:2.7.4:compile|\-org.apache.hadoop:hadoop-mapreduce-client-common:jar:2.7.4:compile|\-org.apache.hadoop:hadoop-yarn-server-common:jar:2.7.4:compile|\-(org.apache.zookeeper:zookeeper:jar:3.4.10:compile
     - version managed from 3.4.6; omitted for duplicate)+-org.apache.hbase:hbase-server:test-jar:tests:3.0.0-SNAPSHOT:test|\-(org.apache.zookeeper:zookeeper:jar:3.4.10:test - 
    version managed from 3.4.6; omitted for duplicat
     

    [02/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c54c242b/testdevapidocs/org/apache/hadoop/hbase/testclassification/ZKTests.html
    --
    diff --git 
    a/testdevapidocs/org/apache/hadoop/hbase/testclassification/ZKTests.html 
    b/testdevapidocs/org/apache/hadoop/hbase/testclassification/ZKTests.html
    new file mode 100644
    index 000..14a67e0
    --- /dev/null
    +++ b/testdevapidocs/org/apache/hadoop/hbase/testclassification/ZKTests.html
    @@ -0,0 +1,168 @@
    +http://www.w3.org/TR/html4/loose.dtd;>
    +
    +
    +
    +
    +
    +ZKTests (Apache HBase 3.0.0-SNAPSHOT Test API)
    +
    +
    +
    +
    +
    +
    +
    +JavaScript is disabled on your browser.
    +
    +
    +
    +
    +
    +Skip navigation links
    +
    +
    +
    +
    +Overview
    +Package
    +Class
    +Use
    +Tree
    +Deprecated
    +Index
    +Help
    +
    +
    +
    +
    +PrevClass
    +NextClass
    +
    +
    +Frames
    +NoFrames
    +
    +
    +AllClasses
    +
    +
    +
    +
    +
    +
    +
    +Summary:
    +Nested|
    +Field|
    +Constr|
    +Method
    +
    +
    +Detail:
    +Field|
    +Constr|
    +Method
    +
    +
    +
    +
    +
    +
    +
    +
    +org.apache.hadoop.hbase.testclassification
    +Interface ZKTests
    +
    +
    +
    +
    +
    +
    +
    +public interface ZKTests
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +Skip navigation links
    +
    +
    +
    +
    +Overview
    +Package
    +Class
    +Use
    +Tree
    +Deprecated
    +Index
    +Help
    +
    +
    +
    +
    +PrevClass
    +NextClass
    +
    +
    +Frames
    +NoFrames
    +
    +
    +AllClasses
    +
    +
    +
    +
    +
    +
    +
    +Summary:
    +Nested|
    +Field|
    +Constr|
    +Method
    +
    +
    +Detail:
    +Field|
    +Constr|
    +Method
    +
    +
    +
    +
    +
    +
    +Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
    reserved.
    +
    +
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c54c242b/testdevapidocs/org/apache/hadoop/hbase/testclassification/class-use/ZKTests.html
    --
    diff --git 
    a/testdevapidocs/org/apache/hadoop/hbase/testclassification/class-use/ZKTests.html
     
    b/testdevapidocs/org/apache/hadoop/hbase/testclassification/class-use/ZKTests.html
    new file mode 100644
    index 000..effe813
    --- /dev/null
    +++ 
    b/testdevapidocs/org/apache/hadoop/hbase/testclassification/class-use/ZKTests.html
    @@ -0,0 +1,125 @@
    +http://www.w3.org/TR/html4/loose.dtd;>
    +
    +
    +
    +
    +
    +Uses of Interface org.apache.hadoop.hbase.testclassification.ZKTests 
    (Apache HBase 3.0.0-SNAPSHOT Test API)
    +
    +
    +
    +
    +
    +
    +
    +JavaScript is disabled on your browser.
    +
    +
    +
    +
    +
    +Skip navigation links
    +
    +
    +
    +
    +Overview
    +Package
    +Class
    +Use
    +Tree
    +Deprecated
    +Index
    +Help
    +
    +
    +
    +
    +Prev
    +Next
    +
    +
    +Frames
    +NoFrames
    +
    +
    +AllClasses
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +Uses of 
    Interfaceorg.apache.hadoop.hbase.testclassification.ZKTests
    +
    +No usage of 
    org.apache.hadoop.hbase.testclassification.ZKTests
    +
    +
    +
    +
    +Skip navigation links
    +
    +
    +
    +
    +Overview
    +Package
    +Class
    +Use
    +Tree
    +Deprecated
    +Index
    +Help
    +
    +
    +
    +
    +Prev
    +Next
    +
    +
    +Frames
    +NoFrames
    +
    +
    +AllClasses
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
    reserved.
    +
    +
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c54c242b/testdevapidocs/org/apache/hadoop/hbase/testclassification/package-frame.html
    --
    diff --git 
    a/testdevapidocs/org/apache/hadoop/hbase/testclassification/package-frame.html 
    b/testdevapidocs/org/apache/hadoop/hbase/testclassification/package-frame.html
    index 563afb4..b67b7c7 100644
    --- 
    a/testdevapidocs/org/apache/hadoop/hbase/testclassification/package-frame.html
    +++ 
    b/testdevapidocs/org/apache/hadoop/hbase/testclassification/package-frame.html
    @@ -33,6 +33,7 @@
     SmallTests
     VerySlowMapReduceTests
     VerySlowRegionServerTests
    +ZKTests
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c54c242b/testdevapidocs/org/apache/hadoop/hbase/testclassification/package-summary.html
    

    [02/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/src-html/org/apache/hadoop/hbase/master/RegionState.State.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/RegionState.State.html 
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/RegionState.State.html
    index 2daacb5..fb5cc60 100644
    --- a/devapidocs/src-html/org/apache/hadoop/hbase/master/RegionState.State.html
    +++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/RegionState.State.html
    @@ -25,413 +25,414 @@
     017 */
     018package org.apache.hadoop.hbase.master;
     019
    -020import java.util.Date;
    -021
    -022import 
    org.apache.hadoop.hbase.ServerName;
    -023import 
    org.apache.hadoop.hbase.client.RegionInfo;
    -024import 
    org.apache.yetus.audience.InterfaceAudience;
    -025import 
    org.apache.yetus.audience.InterfaceStability;
    -026
    -027import 
    org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
    -028import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos;
    -029
    -030/**
    -031 * State of a Region while undergoing 
    transitions.
    -032 * This class is immutable.
    -033 */
    -034@InterfaceAudience.Private
    -035public class RegionState {
    -036
    -037  @InterfaceAudience.Private
    -038  @InterfaceStability.Evolving
    -039  public enum State {
    -040OFFLINE,// region is in an 
    offline state
    -041OPENING,// server has begun 
    to open but not yet done
    -042OPEN,   // server opened 
    region and updated meta
    -043CLOSING,// server has begun 
    to close but not yet done
    -044CLOSED, // server closed 
    region and updated meta
    -045SPLITTING,  // server started 
    split of a region
    -046SPLIT,  // server completed 
    split of a region
    -047FAILED_OPEN,// failed to open, 
    and won't retry any more
    -048FAILED_CLOSE,   // failed to close, 
    and won't retry any more
    -049MERGING,// server started 
    merge a region
    -050MERGED, // server completed 
    merge a region
    -051SPLITTING_NEW,  // new region to be 
    created when RS splits a parent
    -052// region but hasn't 
    be created yet, or master doesn't
    -053// know it's already 
    created
    -054MERGING_NEW;// new region to be 
    created when RS merges two
    -055// daughter regions 
    but hasn't be created yet, or
    -056// master doesn't 
    know it's already created
    -057
    -058/**
    -059 * Convert to protobuf 
    ClusterStatusProtos.RegionState.State
    -060 */
    -061public 
    ClusterStatusProtos.RegionState.State convert() {
    -062  
    ClusterStatusProtos.RegionState.State rs;
    -063  switch (this) {
    -064  case OFFLINE:
    -065rs = 
    ClusterStatusProtos.RegionState.State.OFFLINE;
    -066break;
    -067  case OPENING:
    -068rs = 
    ClusterStatusProtos.RegionState.State.OPENING;
    -069break;
    -070  case OPEN:
    -071rs = 
    ClusterStatusProtos.RegionState.State.OPEN;
    -072break;
    -073  case CLOSING:
    -074rs = 
    ClusterStatusProtos.RegionState.State.CLOSING;
    -075break;
    -076  case CLOSED:
    -077rs = 
    ClusterStatusProtos.RegionState.State.CLOSED;
    -078break;
    -079  case SPLITTING:
    -080rs = 
    ClusterStatusProtos.RegionState.State.SPLITTING;
    -081break;
    -082  case SPLIT:
    -083rs = 
    ClusterStatusProtos.RegionState.State.SPLIT;
    -084break;
    -085  case FAILED_OPEN:
    -086rs = 
    ClusterStatusProtos.RegionState.State.FAILED_OPEN;
    -087break;
    -088  case FAILED_CLOSE:
    -089rs = 
    ClusterStatusProtos.RegionState.State.FAILED_CLOSE;
    -090break;
    -091  case MERGING:
    -092rs = 
    ClusterStatusProtos.RegionState.State.MERGING;
    -093break;
    -094  case MERGED:
    -095rs = 
    ClusterStatusProtos.RegionState.State.MERGED;
    -096break;
    -097  case SPLITTING_NEW:
    -098rs = 
    ClusterStatusProtos.RegionState.State.SPLITTING_NEW;
    -099break;
    -100  case MERGING_NEW:
    -101rs = 
    ClusterStatusProtos.RegionState.State.MERGING_NEW;
    -102break;
    -103  default:
    -104throw new 
    IllegalStateException("");
    -105  }
    -106  return rs;
    -107}
    -108
    -109/**
    -110 * Convert a protobuf 
    HBaseProtos.RegionState.State to a RegionState.State
    -111 *
    -112 * @return the RegionState.State
    -113 */
    -114public static State 
    convert(ClusterStatusProtos.RegionState.State protoState) {
    -115  State state;
    -116  switch (protoState) {
    -117  case OFFLINE:
    -118state = OFFLINE;
    -119break;
    -120  case PENDING_OPEN:
    -121  case OPENING:
    -122state = OPENING;
    -123break;
    -124  case OPEN:
    -125state = OPEN;
    -126break;
    -127  case PENDING_CLOSE:
    -128  case CLOSING:
    -129state = CLOSING;
    -130break;
    -131  case CLOSED:
    -132state = CLOSED;
    -133break;
    -134  case SPLITTING:
    -135 

    [02/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.SplitTableRegionFuture.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.SplitTableRegionFuture.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.SplitTableRegionFuture.html
    index d438f22..7c59e27 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.SplitTableRegionFuture.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.SplitTableRegionFuture.html
    @@ -1290,8 +1290,8 @@
     1282   CompactType 
    compactType) throws IOException {
     1283switch (compactType) {
     1284  case MOB:
    -1285
    compact(this.connection.getAdminForMaster(), getMobRegionInfo(tableName), 
    major,
    -1286  columnFamily);
    +1285
    compact(this.connection.getAdminForMaster(), 
    RegionInfo.createMobRegionInfo(tableName),
    +1286major, columnFamily);
     1287break;
     1288  case NORMAL:
     1289checkTableExists(tableName);
    @@ -3248,7 +3248,7 @@
     3240  new 
    CallableAdminProtos.GetRegionInfoResponse.CompactionState() {
     3241@Override
     3242public 
    AdminProtos.GetRegionInfoResponse.CompactionState call() throws Exception {
    -3243  RegionInfo info = 
    getMobRegionInfo(tableName);
    +3243  RegionInfo info = 
    RegionInfo.createMobRegionInfo(tableName);
     3244  GetRegionInfoRequest 
    request =
     3245
    RequestConverter.buildGetRegionInfoRequest(info.getRegionName(), true);
     3246  GetRegionInfoResponse 
    response = masterAdmin.getRegionInfo(rpcController, request);
    @@ -3312,7 +3312,7 @@
     3304}
     3305break;
     3306  default:
    -3307throw new 
    IllegalArgumentException("Unknowne compactType: " + compactType);
    +3307throw new 
    IllegalArgumentException("Unknown compactType: " + compactType);
     3308}
     3309if (state != null) {
     3310  return 
    ProtobufUtil.createCompactionState(state);
    @@ -3847,325 +3847,320 @@
     3839});
     3840  }
     3841
    -3842  private RegionInfo 
    getMobRegionInfo(TableName tableName) {
    -3843return 
    RegionInfoBuilder.newBuilder(tableName).setStartKey(Bytes.toBytes(".mob")).setRegionId(0)
    -3844.build();
    -3845  }
    -3846
    -3847  private RpcControllerFactory 
    getRpcControllerFactory() {
    -3848return this.rpcControllerFactory;
    -3849  }
    -3850
    -3851  @Override
    -3852  public void addReplicationPeer(String 
    peerId, ReplicationPeerConfig peerConfig, boolean enabled)
    -3853  throws IOException {
    -3854executeCallable(new 
    MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
    -3855  @Override
    -3856  protected Void rpcCall() throws 
    Exception {
    -3857
    master.addReplicationPeer(getRpcController(),
    -3858  
    RequestConverter.buildAddReplicationPeerRequest(peerId, peerConfig, 
    enabled));
    -3859return null;
    -3860  }
    -3861});
    -3862  }
    -3863
    -3864  @Override
    -3865  public void 
    removeReplicationPeer(String peerId) throws IOException {
    -3866executeCallable(new 
    MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
    -3867  @Override
    -3868  protected Void rpcCall() throws 
    Exception {
    -3869
    master.removeReplicationPeer(getRpcController(),
    -3870  
    RequestConverter.buildRemoveReplicationPeerRequest(peerId));
    -3871return null;
    -3872  }
    -3873});
    -3874  }
    -3875
    -3876  @Override
    -3877  public void 
    enableReplicationPeer(final String peerId) throws IOException {
    -3878executeCallable(new 
    MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
    -3879  @Override
    -3880  protected Void rpcCall() throws 
    Exception {
    -3881
    master.enableReplicationPeer(getRpcController(),
    -3882  
    RequestConverter.buildEnableReplicationPeerRequest(peerId));
    -3883return null;
    -3884  }
    -3885});
    -3886  }
    -3887
    -3888  @Override
    -3889  public void 
    disableReplicationPeer(final String peerId) throws IOException {
    -3890executeCallable(new 
    MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
    -3891  @Override
    -3892  protected Void rpcCall() throws 
    Exception {
    -3893
    master.disableReplicationPeer(getRpcController(),
    -3894  
    RequestConverter.buildDisableReplicationPeerRequest(peerId));
    -3895return null;
    -3896  }
    -3897});
    -3898  }
    -3899
    -3900  @Override
    -3901  public ReplicationPeerConfig 
    getReplicationPeerConfig(final String peerId) throws IOException {
    -3902return executeCallable(new 
    MasterCallableReplicationPeerConfig(getConnection(),
    -3903getRpcControllerFactory()) {
    -3904  @Override
    -3905  protected ReplicationPeerConfig 
    rpcCall() throws Exception {
    -3906GetReplicationPeerConfigResponse 
    response = master.getReplicationPeerConfig(
    -3907  getRpcController(), 
    

    [02/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.AdoptAbandonedQueuesWorker.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.AdoptAbandonedQueuesWorker.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.AdoptAbandonedQueuesWorker.html
    index c552d8a..a792ab2 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.AdoptAbandonedQueuesWorker.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.AdoptAbandonedQueuesWorker.html
    @@ -48,922 +48,928 @@
     040import 
    java.util.concurrent.ThreadPoolExecutor;
     041import java.util.concurrent.TimeUnit;
     042import 
    java.util.concurrent.atomic.AtomicLong;
    -043
    -044import org.apache.commons.logging.Log;
    -045import 
    org.apache.commons.logging.LogFactory;
    -046import 
    org.apache.hadoop.conf.Configuration;
    -047import org.apache.hadoop.fs.FileSystem;
    -048import org.apache.hadoop.fs.Path;
    -049import 
    org.apache.hadoop.hbase.HConstants;
    -050import 
    org.apache.hadoop.hbase.MetaTableAccessor;
    -051import org.apache.hadoop.hbase.Server;
    -052import 
    org.apache.hadoop.hbase.TableDescriptors;
    -053import 
    org.apache.hadoop.hbase.TableName;
    -054import 
    org.apache.yetus.audience.InterfaceAudience;
    -055import 
    org.apache.hadoop.hbase.client.Connection;
    -056import 
    org.apache.hadoop.hbase.client.ConnectionFactory;
    -057import 
    org.apache.hadoop.hbase.regionserver.HRegionServer;
    -058import 
    org.apache.hadoop.hbase.regionserver.RegionServerCoprocessorHost;
    -059import 
    org.apache.hadoop.hbase.replication.ReplicationEndpoint;
    -060import 
    org.apache.hadoop.hbase.replication.ReplicationException;
    -061import 
    org.apache.hadoop.hbase.replication.ReplicationListener;
    -062import 
    org.apache.hadoop.hbase.replication.ReplicationPeer;
    -063import 
    org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
    -064import 
    org.apache.hadoop.hbase.replication.ReplicationPeers;
    -065import 
    org.apache.hadoop.hbase.replication.ReplicationQueueInfo;
    -066import 
    org.apache.hadoop.hbase.replication.ReplicationQueues;
    -067import 
    org.apache.hadoop.hbase.replication.ReplicationTracker;
    -068import 
    org.apache.hadoop.hbase.util.Bytes;
    -069import 
    org.apache.hadoop.hbase.util.Pair;
    -070import 
    org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
    -071
    -072import 
    org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
    -073import 
    org.apache.hadoop.hbase.shaded.com.google.common.util.concurrent.ThreadFactoryBuilder;
    -074
    -075/**
    -076 * This class is responsible to manage 
    all the replication
    -077 * sources. There are two classes of 
    sources:
    -078 * ul
    -079 * li Normal sources are 
    persistent and one per peer cluster/li
    -080 * li Old sources are recovered 
    from a failed region server and our
    -081 * only goal is to finish replicating the 
    WAL queue it had up in ZK/li
    -082 * /ul
    -083 *
    -084 * When a region server dies, this class 
    uses a watcher to get notified and it
    -085 * tries to grab a lock in order to 
    transfer all the queues in a local
    -086 * old source.
    -087 *
    -088 * This class implements the 
    ReplicationListener interface so that it can track changes in
    -089 * replication state.
    -090 */
    -091@InterfaceAudience.Private
    -092public class ReplicationSourceManager 
    implements ReplicationListener {
    -093  private static final Log LOG =
    -094  
    LogFactory.getLog(ReplicationSourceManager.class);
    -095  // List of all the sources that read 
    this RS's logs
    -096  private final 
    ListReplicationSourceInterface sources;
    -097  // List of all the sources we got from 
    died RSs
    -098  private final 
    ListReplicationSourceInterface oldsources;
    -099  private final ReplicationQueues 
    replicationQueues;
    -100  private final ReplicationTracker 
    replicationTracker;
    -101  private final ReplicationPeers 
    replicationPeers;
    -102  // UUID for this cluster
    -103  private final UUID clusterId;
    -104  // All about stopping
    -105  private final Server server;
    -106  // All logs we are currently tracking
    -107  // Index structure of the map is: 
    peer_id-logPrefix/logGroup-logs
    -108  private final MapString, 
    MapString, SortedSetString walsById;
    -109  // Logs for recovered sources we are 
    currently tracking
    -110  private final MapString, 
    MapString, SortedSetString walsByIdRecoveredQueues;
    -111  private final Configuration conf;
    -112  private final FileSystem fs;
    -113  // The paths to the latest log of each 
    wal group, for new coming peers
    -114  private SetPath latestPaths;
    -115  // Path to the wals directories
    -116  private final Path logDir;
    -117  // Path to the wal archive
    -118  private final Path oldLogDir;
    -119  private final WALFileLengthProvider 
    walFileLengthProvider;
    -120  // The number of ms that we wait before 
    moving znodes, HBASE-3596
    

    [02/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b1eb7453/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowColByteBufferCell.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowColByteBufferCell.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowColByteBufferCell.html
    index 9098105..b05691f 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowColByteBufferCell.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowColByteBufferCell.html
    @@ -37,1514 +37,1514 @@
     029import java.util.ArrayList;
     030import java.util.Iterator;
     031import java.util.List;
    -032
    -033import 
    org.apache.hadoop.hbase.KeyValue.Type;
    -034import 
    org.apache.hadoop.hbase.filter.ByteArrayComparable;
    -035import 
    org.apache.hadoop.hbase.io.HeapSize;
    -036import 
    org.apache.hadoop.hbase.io.TagCompressionContext;
    -037import 
    org.apache.hadoop.hbase.io.util.Dictionary;
    -038import 
    org.apache.hadoop.hbase.io.util.StreamUtils;
    -039import 
    org.apache.hadoop.hbase.util.ByteBufferUtils;
    -040import 
    org.apache.hadoop.hbase.util.ByteRange;
    -041import 
    org.apache.hadoop.hbase.util.Bytes;
    -042import 
    org.apache.hadoop.hbase.util.ClassSize;
    -043import 
    org.apache.yetus.audience.InterfaceAudience;
    -044
    -045import 
    com.google.common.annotations.VisibleForTesting;
    -046
    -047/**
    -048 * Utility methods helpful slinging 
    {@link Cell} instances. It has more powerful and
    -049 * rich set of APIs than those in {@link 
    CellUtil} for internal usage.
    -050 */
    -051@InterfaceAudience.Private
    -052// TODO : Make Tag IA.LimitedPrivate and 
    move some of the Util methods to CP exposed Util class
    -053public class PrivateCellUtil {
    +032import java.util.Optional;
    +033
    +034import 
    org.apache.hadoop.hbase.KeyValue.Type;
    +035import 
    org.apache.hadoop.hbase.filter.ByteArrayComparable;
    +036import 
    org.apache.hadoop.hbase.io.HeapSize;
    +037import 
    org.apache.hadoop.hbase.io.TagCompressionContext;
    +038import 
    org.apache.hadoop.hbase.io.util.Dictionary;
    +039import 
    org.apache.hadoop.hbase.io.util.StreamUtils;
    +040import 
    org.apache.hadoop.hbase.util.ByteBufferUtils;
    +041import 
    org.apache.hadoop.hbase.util.ByteRange;
    +042import 
    org.apache.hadoop.hbase.util.Bytes;
    +043import 
    org.apache.hadoop.hbase.util.ClassSize;
    +044import 
    org.apache.yetus.audience.InterfaceAudience;
    +045
    +046import 
    com.google.common.annotations.VisibleForTesting;
    +047
    +048/**
    +049 * Utility methods helpful slinging 
    {@link Cell} instances. It has more powerful and
    +050 * rich set of APIs than those in {@link 
    CellUtil} for internal usage.
    +051 */
    +052@InterfaceAudience.Private
    +053public final class PrivateCellUtil {
     054
     055  /**
     056   * Private constructor to keep this 
    class from being instantiated.
     057   */
     058  private PrivateCellUtil() {
    -059
    -060  }
    -061
    -062  /*** ByteRange 
    ***/
    -063
    -064  public static ByteRange 
    fillRowRange(Cell cell, ByteRange range) {
    -065return range.set(cell.getRowArray(), 
    cell.getRowOffset(), cell.getRowLength());
    -066  }
    -067
    -068  public static ByteRange 
    fillFamilyRange(Cell cell, ByteRange range) {
    -069return 
    range.set(cell.getFamilyArray(), cell.getFamilyOffset(), 
    cell.getFamilyLength());
    -070  }
    -071
    -072  public static ByteRange 
    fillQualifierRange(Cell cell, ByteRange range) {
    -073return 
    range.set(cell.getQualifierArray(), cell.getQualifierOffset(),
    -074  cell.getQualifierLength());
    -075  }
    -076
    -077  public static ByteRange 
    fillValueRange(Cell cell, ByteRange range) {
    -078return 
    range.set(cell.getValueArray(), cell.getValueOffset(), 
    cell.getValueLength());
    -079  }
    -080
    -081  public static ByteRange 
    fillTagRange(Cell cell, ByteRange range) {
    -082return range.set(cell.getTagsArray(), 
    cell.getTagsOffset(), cell.getTagsLength());
    -083  }
    -084
    -085  /**
    -086   * Returns tag value in a new byte 
    array. If server-side, use {@link Tag#getValueArray()} with
    -087   * appropriate {@link 
    Tag#getValueOffset()} and {@link Tag#getValueLength()} instead to save on
    -088   * allocations.
    -089   * @param cell
    -090   * @return tag value in a new byte 
    array.
    -091   */
    -092  public static byte[] getTagsArray(Cell 
    cell) {
    -093byte[] output = new 
    byte[cell.getTagsLength()];
    -094copyTagsTo(cell, output, 0);
    -095return output;
    -096  }
    -097
    -098  public static byte[] cloneTags(Cell 
    cell) {
    -099byte[] output = new 
    byte[cell.getTagsLength()];
    -100copyTagsTo(cell, output, 0);
    -101return output;
    -102  }
    -103
    -104  /**
    -105   * Copies the tags info into the tag 
    portion of the cell
    -106   * @param cell
    -107   * @param destination
    -108   * @param destinationOffset
    -109   * @return position after tags
    +059  }
    +060
    +061  /*** ByteRange 
    ***/
    +062
    +063  public static ByteRange 
    fillRowRange(Cell cell, ByteRange range) {
    +064return 

    [02/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9118853f/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.PortAllocator.html
    --
    diff --git 
    a/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.PortAllocator.html
     
    b/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.PortAllocator.html
    index 63075ca..d979a19 100644
    --- 
    a/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.PortAllocator.html
    +++ 
    b/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.PortAllocator.html
    @@ -2560,1729 +2560,1733 @@
     2552   * @throws IOException When starting 
    the cluster fails.
     2553   */
     2554  public MiniMRCluster 
    startMiniMapReduceCluster() throws IOException {
    -2555startMiniMapReduceCluster(2);
    -2556return mrCluster;
    -2557  }
    -2558
    -2559  /**
    -2560   * Tasktracker has a bug where 
    changing the hadoop.log.dir system property
    -2561   * will not change its internal static 
    LOG_DIR variable.
    -2562   */
    -2563  private void forceChangeTaskLogDir() 
    {
    -2564Field logDirField;
    -2565try {
    -2566  logDirField = 
    TaskLog.class.getDeclaredField("LOG_DIR");
    -2567  logDirField.setAccessible(true);
    -2568
    -2569  Field modifiersField = 
    Field.class.getDeclaredField("modifiers");
    -2570  
    modifiersField.setAccessible(true);
    -2571  modifiersField.setInt(logDirField, 
    logDirField.getModifiers()  ~Modifier.FINAL);
    +2555// Set a very high 
    max-disk-utilization percentage to avoid the NodeManagers from failing.
    +2556conf.setIfUnset(
    +2557
    "yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage",
    +2558"99.0");
    +2559startMiniMapReduceCluster(2);
    +2560return mrCluster;
    +2561  }
    +2562
    +2563  /**
    +2564   * Tasktracker has a bug where 
    changing the hadoop.log.dir system property
    +2565   * will not change its internal static 
    LOG_DIR variable.
    +2566   */
    +2567  private void forceChangeTaskLogDir() 
    {
    +2568Field logDirField;
    +2569try {
    +2570  logDirField = 
    TaskLog.class.getDeclaredField("LOG_DIR");
    +2571  logDirField.setAccessible(true);
     2572
    -2573  logDirField.set(null, new 
    File(hadoopLogDir, "userlogs"));
    -2574} catch (SecurityException e) {
    -2575  throw new RuntimeException(e);
    -2576} catch (NoSuchFieldException e) {
    -2577  // TODO Auto-generated catch 
    block
    -2578  throw new RuntimeException(e);
    -2579} catch (IllegalArgumentException e) 
    {
    -2580  throw new RuntimeException(e);
    -2581} catch (IllegalAccessException e) 
    {
    +2573  Field modifiersField = 
    Field.class.getDeclaredField("modifiers");
    +2574  
    modifiersField.setAccessible(true);
    +2575  modifiersField.setInt(logDirField, 
    logDirField.getModifiers()  ~Modifier.FINAL);
    +2576
    +2577  logDirField.set(null, new 
    File(hadoopLogDir, "userlogs"));
    +2578} catch (SecurityException e) {
    +2579  throw new RuntimeException(e);
    +2580} catch (NoSuchFieldException e) {
    +2581  // TODO Auto-generated catch 
    block
     2582  throw new RuntimeException(e);
    -2583}
    -2584  }
    -2585
    -2586  /**
    -2587   * Starts a 
    codeMiniMRCluster/code. Call {@link #setFileSystemURI(String)} 
    to use a different
    -2588   * filesystem.
    -2589   * @param servers  The number of 
    codeTaskTracker/code's to start.
    -2590   * @throws IOException When starting 
    the cluster fails.
    -2591   */
    -2592  private void 
    startMiniMapReduceCluster(final int servers) throws IOException {
    -2593if (mrCluster != null) {
    -2594  throw new 
    IllegalStateException("MiniMRCluster is already running");
    -2595}
    -2596LOG.info("Starting mini mapreduce 
    cluster...");
    -2597setupClusterTestDir();
    -2598createDirsAndSetProperties();
    -2599
    -2600forceChangeTaskLogDir();
    -2601
    -2602 hadoop2 specific settings
    -2603// Tests were failing because this 
    process used 6GB of virtual memory and was getting killed.
    -2604// we up the VM usable so that 
    processes don't get killed.
    -2605
    conf.setFloat("yarn.nodemanager.vmem-pmem-ratio", 8.0f);
    -2606
    -2607// Tests were failing due to 
    MAPREDUCE-4880 / MAPREDUCE-4607 against hadoop 2.0.2-alpha and
    -2608// this avoids the problem by 
    disabling speculative task execution in tests.
    -2609
    conf.setBoolean("mapreduce.map.speculative", false);
    -2610
    conf.setBoolean("mapreduce.reduce.speculative", false);
    -2611
    -2612
    -2613// Allow the user to override FS URI 
    for this map-reduce cluster to use.
    -2614mrCluster = new 
    MiniMRCluster(servers,
    -2615  FS_URI != null ? FS_URI : 
    FileSystem.get(conf).getUri().toString(), 1,
    -2616  null, null, new 
    JobConf(this.conf));
    -2617JobConf jobConf = 
    MapreduceTestingShim.getJobConf(mrCluster);
    -2618if (jobConf == null) {
    -2619  jobConf = 
    mrCluster.createJobConf();
    -2620}
    -2621
    -2622
    jobConf.set("mapreduce.cluster.local.dir",
    -2623  
    conf.get("mapreduce.cluster.local.dir")); 

      1   2   3   >