[09/51] [partial] hbase-site git commit: Published site at 620d70d6186fb800299bcc62ad7179fccfd1be41.

2019-01-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aa3fb87f/devapidocs/src-html/org/apache/hadoop/hbase/thrift2/ThriftUtilities.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/thrift2/ThriftUtilities.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/thrift2/ThriftUtilities.html
index 723049d..30262e8 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/thrift2/ThriftUtilities.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/thrift2/ThriftUtilities.html
@@ -33,1020 +33,1457 @@
 025import java.util.ArrayList;
 026import java.util.List;
 027import java.util.Map;
-028
-029import org.apache.hadoop.hbase.Cell;
-030import 
org.apache.hadoop.hbase.CellBuilderFactory;
-031import 
org.apache.hadoop.hbase.CellBuilderType;
-032import 
org.apache.hadoop.hbase.CellUtil;
-033import 
org.apache.hadoop.hbase.CompareOperator;
-034import 
org.apache.hadoop.hbase.HConstants;
-035import 
org.apache.hadoop.hbase.HRegionInfo;
-036import 
org.apache.hadoop.hbase.HRegionLocation;
-037import 
org.apache.hadoop.hbase.KeepDeletedCells;
-038import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-039import 
org.apache.hadoop.hbase.PrivateCellUtil;
-040import 
org.apache.hadoop.hbase.ServerName;
-041import 
org.apache.hadoop.hbase.TableName;
-042import 
org.apache.hadoop.hbase.client.Append;
-043import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-044import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
-045import 
org.apache.hadoop.hbase.client.Consistency;
-046import 
org.apache.hadoop.hbase.client.Delete;
-047import 
org.apache.hadoop.hbase.client.Durability;
-048import 
org.apache.hadoop.hbase.client.Get;
-049import 
org.apache.hadoop.hbase.client.Increment;
-050import 
org.apache.hadoop.hbase.client.OperationWithAttributes;
-051import 
org.apache.hadoop.hbase.client.Put;
-052import 
org.apache.hadoop.hbase.client.Result;
-053import 
org.apache.hadoop.hbase.client.RowMutations;
-054import 
org.apache.hadoop.hbase.client.Scan;
-055import 
org.apache.hadoop.hbase.client.Scan.ReadType;
-056import 
org.apache.hadoop.hbase.client.TableDescriptor;
-057import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-058import 
org.apache.hadoop.hbase.filter.ParseFilter;
-059import 
org.apache.hadoop.hbase.io.compress.Compression;
-060import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
-061import 
org.apache.hadoop.hbase.regionserver.BloomType;
-062import 
org.apache.hadoop.hbase.security.visibility.Authorizations;
-063import 
org.apache.hadoop.hbase.security.visibility.CellVisibility;
-064import 
org.apache.hadoop.hbase.thrift2.generated.TAppend;
-065import 
org.apache.hadoop.hbase.thrift2.generated.TBloomFilterType;
-066import 
org.apache.hadoop.hbase.thrift2.generated.TColumn;
-067import 
org.apache.hadoop.hbase.thrift2.generated.TColumnFamilyDescriptor;
-068import 
org.apache.hadoop.hbase.thrift2.generated.TColumnIncrement;
-069import 
org.apache.hadoop.hbase.thrift2.generated.TColumnValue;
-070import 
org.apache.hadoop.hbase.thrift2.generated.TCompareOp;
-071import 
org.apache.hadoop.hbase.thrift2.generated.TCompressionAlgorithm;
-072import 
org.apache.hadoop.hbase.thrift2.generated.TConsistency;
-073import 
org.apache.hadoop.hbase.thrift2.generated.TDataBlockEncoding;
-074import 
org.apache.hadoop.hbase.thrift2.generated.TDelete;
-075import 
org.apache.hadoop.hbase.thrift2.generated.TDeleteType;
-076import 
org.apache.hadoop.hbase.thrift2.generated.TDurability;
-077import 
org.apache.hadoop.hbase.thrift2.generated.TGet;
-078import 
org.apache.hadoop.hbase.thrift2.generated.THRegionInfo;
-079import 
org.apache.hadoop.hbase.thrift2.generated.THRegionLocation;
-080import 
org.apache.hadoop.hbase.thrift2.generated.TIncrement;
-081import 
org.apache.hadoop.hbase.thrift2.generated.TKeepDeletedCells;
-082import 
org.apache.hadoop.hbase.thrift2.generated.TMutation;
-083import 
org.apache.hadoop.hbase.thrift2.generated.TNamespaceDescriptor;
-084import 
org.apache.hadoop.hbase.thrift2.generated.TPut;
-085import 
org.apache.hadoop.hbase.thrift2.generated.TReadType;
-086import 
org.apache.hadoop.hbase.thrift2.generated.TResult;
-087import 
org.apache.hadoop.hbase.thrift2.generated.TRowMutations;
-088import 
org.apache.hadoop.hbase.thrift2.generated.TScan;
-089import 
org.apache.hadoop.hbase.thrift2.generated.TServerName;
-090import 
org.apache.hadoop.hbase.thrift2.generated.TTableDescriptor;
-091import 
org.apache.hadoop.hbase.thrift2.generated.TTableName;
-092import 
org.apache.hadoop.hbase.thrift2.generated.TTimeRange;
-093import 
org.apache.hadoop.hbase.util.Bytes;
-094import 
org.apache.yetus.audience.InterfaceAudience;
-095
-096import 
org.apache.hbase.thirdparty.org.apache.commons.collections4.MapUtils;
-097
-098@InterfaceAudience.Private
-099public final class ThriftUtilities {
-100
-101  private final static Cell[] 
EMPTY_CELL_ARRAY = new Cell[]{};
-102  private final static Result 
EMPTY_RESULT = 

[09/51] [partial] hbase-site git commit: Published site at 281d6429e55149cc4c05430dcc1d1dc136d8b245.

2019-01-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/901d593a/devapidocs/org/apache/hadoop/hbase/quotas/MasterQuotasObserver.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/quotas/MasterQuotasObserver.html 
b/devapidocs/org/apache/hadoop/hbase/quotas/MasterQuotasObserver.html
index 5308bec..214647b 100644
--- a/devapidocs/org/apache/hadoop/hbase/quotas/MasterQuotasObserver.html
+++ b/devapidocs/org/apache/hadoop/hbase/quotas/MasterQuotasObserver.html
@@ -256,7 +256,7 @@ implements MasterObserver
-postAbortProcedure,
 postAddReplicationPeer,
 postAddRSGroup,
 postAssign,
 post
 Balance, postBalanceRSGroup,
 postBalanceSwitch,
 postClearDeadServers,
 postCloneSnapshot,
 postCompletedCreateTableAction,
 postCompletedDeleteTableAction,
 postCompletedDisableTableAction,
 postCompletedEnableTableAction,
 postCompletedMergeRegionsAction,
 postCompletedModifyTableAction,
 postCompletedModifyTableAction,
 postCompletedSplitRegionAction,
 postCompletedTruncateTableAction,
 postCreateNamespace,
 postCreateTable,
 pos
 tDecommissionRegionServers, postDeleteSnapshot,
 postDisableReplicationPeer,
 postDisableTable,
 postEnableReplicationPeer,
 postEnableTable,
 postGetClusterMetrics,
 postGetLocks,
 postGetNamespaceDescriptor,
 postGetProcedures,
 postGetReplicationPeerConfig,
 postGetRSGroupInfo,
 postGetRSGroupInfoOfServer,
 postGetRSGroupInfoOfTable,
 postGetTableDescriptors,
 postGetTableNames,
 postListDecommissionedRegionServers,
 postListNamespaceDescriptors,
 postListReplicationPeers,
 postListRSGroups
 , postListSnapshot,
 postLockHeartbeat,
 postMergeRegions,
 postMergeRegionsCommitAction,
 postModifyNamespace,
 postModifyNamespace,
 postModifyTable,
 postModifyTable,
 postMove,
 postMoveServers,
 postMoveServersAndTables,
 postMoveTables,
 postRecommissionRegionServer,
 postRegionOffline,
 postRemoveReplicationPeer,
 postRemoveRSGroup,
 postRemoveServers,
 postRequestLock,
 postRestoreSnapshot,
 postRollBackMergeRegionsAction,
 postRollBackSplitRegionAction,
 postSetNamespaceQuota,
 postSetSplitOrMergeEnabled,
 postSetTableQuota,
 postSetUserQuota,
 postSetUserQuota,
 postSetUserQuota,
 postSnapshot,
 postStartMaster,
 postTableFlush,
 postTransitReplicationPeerSyncReplicationState,
 postTruncateTable,
 postUnassign,
 postUpdateReplicationPeerConfig,
 preAbortProcedure,
 preAddReplicationPeer,
 preAddRSGroup,
 preAssign,
 preBalance,
 preBalanceRSGroup,
 preBalanceSwitch,
 preClearDeadServers,
 preCloneSnapshot,
 preCreateNamespace,
 preCreateTable,
 preCreateTableAction,
 preCreateTableRegionsInfos,
 preDecommissionRegionServers,
 preDeleteNamespace,
 preDeleteSnapshot,
 preDeleteTable,
 preDeleteTableAction, preDisableReplicationPeer,
 preDisableTable,
 preDisableTableAction,
 preEnableReplicationPeer,
 preEnableTable,
 preEnableTableAction,
 preGetClusterMetrics,
 preGetLocks,
 preGetNamespaceDescriptor,
 preGetProcedures, preGetReplicationPeerConfig,
 preGetRSGroupInfo,
 preGetRSGroupInfoOfServer,
 preGetRSGroupInfoOfTable,
 preGetTableDescriptors,
 preGetTableNames,
 preListDecommissionedRegionServers,
 preListNamespaceDescriptors,
 preListReplicationPeers,
 preListRSGroups,
 preListSnapshot,
 preLockHeartbeat,
 preMasterInitialization,
 preMergeRegions,
 preMergeRegionsAction,
 preMergeRegionsCommitAction,
 preModifyNamespace,
 preModifyNamespace,
 preModifyTable,
 preModifyTable,
 preModifyTableAction,
 preModifyTableAction, preMove,
 preMoveServers,
 preMoveServersAndTables,
 preMoveTables,
 preRecommissionRegionServer,
 preRegionOffline,
 preRemoveReplicationPeer,
 preRemoveRSGroup,
 preRemoveServers, href="../../../../../org/apache/hadoop/hbase/coprocessor/MasterObserver.html#preRequestLock-org.apache.hadoop.hbase.coprocessor.ObserverContext-java.lang.String-org.apache.hadoop.hbase.TableName-org.apache.hadoop.hbase.client.RegionInfo:A-java.lang.String-">preRequestLock,
 > href="../../../../../org/apache/hadoop/hbase/coprocessor/MasterObserver.html#preRestoreSnapshot-org.apache.hadoop.hbase.coprocessor.ObserverContext-org.apache.hadoop.hbase.client.SnapshotDescription-org.apache.hadoop.hbase.client.TableDescriptor-">preRestoreSnapshot,
 > href="../../../../../org/apache/hadoop/hbase/coprocessor/MasterObserver.html#preSetNamespaceQuota-org.apache.hadoop.hbase.coprocessor.ObserverContext-java.lang.String-org.apache.hadoop.hbase.quotas.GlobalQuotaSettings-">preSetNamespaceQuota,
 > href="../../../../../org/apache/hadoop/hbase/coprocessor/MasterObserver.html#preSetSplitOrMergeEnabled-org.apache.hadoop.hbase.coprocessor.ObserverContext-boolean-org.apache.hadoop.hbase
 .client.MasterSwitchType-">preSetSplitOrMergeEnabled, preSetTableQuota,
 preSetUserQuota,
 preSetUserQuota,
 preSetUserQuota,
 preShutdown,
 preSnapshot,
 preSplitRegion,
 preSplitRegionAction,
 preSplitRegionAfterMETAAction,
 preSplitRegionBeforeMETAAction,
 preStopMaster,
 preTableFlush,
 preTran
 sitReplicationPeerSyncReplicationState, preTruncateTable,
 

[09/51] [partial] hbase-site git commit: Published site at 466fa920fee572fe20db3b77ebf539dc304d5f31.

2019-01-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2bf59208/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer.html
index 736388b..197b99d 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer.html
@@ -26,3624 +26,3599 @@
 018package org.apache.hadoop.hbase.client;
 019
 020import static 
org.apache.hadoop.hbase.TableName.META_TABLE_NAME;
-021
-022import com.google.protobuf.Message;
-023import com.google.protobuf.RpcChannel;
-024import java.io.IOException;
-025import java.util.ArrayList;
-026import java.util.Arrays;
-027import java.util.Collections;
-028import java.util.EnumSet;
-029import java.util.HashMap;
-030import java.util.List;
-031import java.util.Map;
-032import java.util.Optional;
-033import java.util.Set;
-034import 
java.util.concurrent.CompletableFuture;
-035import 
java.util.concurrent.ConcurrentHashMap;
-036import java.util.concurrent.TimeUnit;
-037import 
java.util.concurrent.atomic.AtomicReference;
-038import java.util.function.BiConsumer;
-039import java.util.function.Function;
-040import java.util.function.Supplier;
-041import java.util.regex.Pattern;
-042import java.util.stream.Collectors;
-043import java.util.stream.Stream;
-044import org.apache.commons.io.IOUtils;
-045import 
org.apache.hadoop.conf.Configuration;
-046import 
org.apache.hadoop.hbase.AsyncMetaTableAccessor;
-047import 
org.apache.hadoop.hbase.CacheEvictionStats;
-048import 
org.apache.hadoop.hbase.CacheEvictionStatsAggregator;
-049import 
org.apache.hadoop.hbase.ClusterMetrics;
-050import 
org.apache.hadoop.hbase.ClusterMetrics.Option;
-051import 
org.apache.hadoop.hbase.ClusterMetricsBuilder;
-052import 
org.apache.hadoop.hbase.HConstants;
-053import 
org.apache.hadoop.hbase.HRegionLocation;
-054import 
org.apache.hadoop.hbase.MetaTableAccessor;
-055import 
org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
-056import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-057import 
org.apache.hadoop.hbase.RegionLocations;
-058import 
org.apache.hadoop.hbase.RegionMetrics;
-059import 
org.apache.hadoop.hbase.RegionMetricsBuilder;
-060import 
org.apache.hadoop.hbase.ServerName;
-061import 
org.apache.hadoop.hbase.TableExistsException;
-062import 
org.apache.hadoop.hbase.TableName;
-063import 
org.apache.hadoop.hbase.TableNotDisabledException;
-064import 
org.apache.hadoop.hbase.TableNotEnabledException;
-065import 
org.apache.hadoop.hbase.TableNotFoundException;
-066import 
org.apache.hadoop.hbase.UnknownRegionException;
-067import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
-068import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
-069import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.ServerRequestCallerBuilder;
-070import 
org.apache.hadoop.hbase.client.Scan.ReadType;
-071import 
org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil;
-072import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-073import 
org.apache.hadoop.hbase.client.security.SecurityCapability;
-074import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-075import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-076import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-077import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-078import 
org.apache.hadoop.hbase.quotas.QuotaTableUtil;
-079import 
org.apache.hadoop.hbase.replication.ReplicationException;
-080import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-081import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-082import 
org.apache.hadoop.hbase.replication.SyncReplicationState;
-083import 
org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
-084import 
org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
-085import 
org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
-086import 
org.apache.hadoop.hbase.util.Bytes;
-087import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-088import 
org.apache.hadoop.hbase.util.ForeignExceptionUtil;
-089import 
org.apache.yetus.audience.InterfaceAudience;
-090import org.slf4j.Logger;
-091import org.slf4j.LoggerFactory;
-092
-093import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-094import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-095import 
org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback;
-096import 

[09/51] [partial] hbase-site git commit: Published site at e4b6b4afb933a961f543537875f87a2dc62d3757.

2019-01-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/849d84a8/devapidocs/src-html/org/apache/hadoop/hbase/thrift2/ThriftHBaseServiceHandler.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/thrift2/ThriftHBaseServiceHandler.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/thrift2/ThriftHBaseServiceHandler.html
index 1a0f64e..2290ca8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/thrift2/ThriftHBaseServiceHandler.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/thrift2/ThriftHBaseServiceHandler.html
@@ -26,849 +26,796 @@
 018 */
 019package 
org.apache.hadoop.hbase.thrift2;
 020
-021import static 
org.apache.hadoop.hbase.thrift2.ThriftUtilities.appendFromThrift;
-022import static 
org.apache.hadoop.hbase.thrift2.ThriftUtilities.columnFamilyDescriptorFromThrift;
-023import static 
org.apache.hadoop.hbase.thrift2.ThriftUtilities.compareOpFromThrift;
-024import static 
org.apache.hadoop.hbase.thrift2.ThriftUtilities.deleteFromThrift;
-025import static 
org.apache.hadoop.hbase.thrift2.ThriftUtilities.deletesFromThrift;
-026import static 
org.apache.hadoop.hbase.thrift2.ThriftUtilities.getFromThrift;
-027import static 
org.apache.hadoop.hbase.thrift2.ThriftUtilities.getsFromThrift;
-028import static 
org.apache.hadoop.hbase.thrift2.ThriftUtilities.incrementFromThrift;
-029import static 
org.apache.hadoop.hbase.thrift2.ThriftUtilities.namespaceDescriptorFromHBase;
-030import static 
org.apache.hadoop.hbase.thrift2.ThriftUtilities.namespaceDescriptorFromThrift;
-031import static 
org.apache.hadoop.hbase.thrift2.ThriftUtilities.namespaceDescriptorsFromHBase;
-032import static 
org.apache.hadoop.hbase.thrift2.ThriftUtilities.putFromThrift;
-033import static 
org.apache.hadoop.hbase.thrift2.ThriftUtilities.putsFromThrift;
-034import static 
org.apache.hadoop.hbase.thrift2.ThriftUtilities.resultFromHBase;
-035import static 
org.apache.hadoop.hbase.thrift2.ThriftUtilities.resultsFromHBase;
-036import static 
org.apache.hadoop.hbase.thrift2.ThriftUtilities.rowMutationsFromThrift;
-037import static 
org.apache.hadoop.hbase.thrift2.ThriftUtilities.scanFromThrift;
-038import static 
org.apache.hadoop.hbase.thrift2.ThriftUtilities.splitKeyFromThrift;
-039import static 
org.apache.hadoop.hbase.thrift2.ThriftUtilities.tableDescriptorFromHBase;
-040import static 
org.apache.hadoop.hbase.thrift2.ThriftUtilities.tableDescriptorFromThrift;
-041import static 
org.apache.hadoop.hbase.thrift2.ThriftUtilities.tableDescriptorsFromHBase;
-042import static 
org.apache.hadoop.hbase.thrift2.ThriftUtilities.tableNameFromThrift;
-043import static 
org.apache.hadoop.hbase.thrift2.ThriftUtilities.tableNamesFromHBase;
-044import static 
org.apache.thrift.TBaseHelper.byteBufferToByteArray;
-045
-046import java.io.IOException;
-047import 
java.lang.reflect.InvocationHandler;
-048import 
java.lang.reflect.InvocationTargetException;
-049import java.lang.reflect.Method;
-050import java.lang.reflect.Proxy;
-051import java.nio.ByteBuffer;
-052import java.util.ArrayList;
-053import java.util.Collections;
-054import java.util.List;
-055import java.util.Map;
-056import 
java.util.concurrent.ConcurrentHashMap;
-057import 
java.util.concurrent.atomic.AtomicInteger;
-058import java.util.regex.Pattern;
-059
-060import 
org.apache.hadoop.conf.Configuration;
-061import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-062import 
org.apache.hadoop.hbase.HRegionLocation;
-063import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-064import 
org.apache.hadoop.hbase.TableName;
-065import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-066import 
org.apache.hadoop.hbase.client.RegionLocator;
-067import 
org.apache.hadoop.hbase.client.ResultScanner;
-068import 
org.apache.hadoop.hbase.client.Table;
-069import 
org.apache.hadoop.hbase.client.TableDescriptor;
-070import 
org.apache.hadoop.hbase.security.UserProvider;
-071import 
org.apache.hadoop.hbase.thrift.ThriftMetrics;
-072import 
org.apache.hadoop.hbase.thrift2.generated.TAppend;
-073import 
org.apache.hadoop.hbase.thrift2.generated.TColumnFamilyDescriptor;
-074import 
org.apache.hadoop.hbase.thrift2.generated.TCompareOp;
-075import 
org.apache.hadoop.hbase.thrift2.generated.TDelete;
-076import 
org.apache.hadoop.hbase.thrift2.generated.TGet;
-077import 
org.apache.hadoop.hbase.thrift2.generated.THBaseService;
-078import 
org.apache.hadoop.hbase.thrift2.generated.THRegionLocation;
-079import 
org.apache.hadoop.hbase.thrift2.generated.TIOError;
-080import 
org.apache.hadoop.hbase.thrift2.generated.TIllegalArgument;
-081import 
org.apache.hadoop.hbase.thrift2.generated.TIncrement;
-082import 
org.apache.hadoop.hbase.thrift2.generated.TNamespaceDescriptor;
-083import 
org.apache.hadoop.hbase.thrift2.generated.TPut;
-084import 
org.apache.hadoop.hbase.thrift2.generated.TResult;
-085import 
org.apache.hadoop.hbase.thrift2.generated.TRowMutations;
-086import 

[09/51] [partial] hbase-site git commit: Published site at 3ab895979b643a2980bcdb7fee2078f14b614210.

2019-01-01 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcExecutor.RandomQueueBalancer.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcExecutor.RandomQueueBalancer.html
 
b/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcExecutor.RandomQueueBalancer.html
index 22b8b47..2eb21b3 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcExecutor.RandomQueueBalancer.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcExecutor.RandomQueueBalancer.html
@@ -120,6 +120,6 @@
 
 
 
-Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcExecutor.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcExecutor.html 
b/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcExecutor.html
index 1d90566..358639d 100644
--- a/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcExecutor.html
+++ b/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcExecutor.html
@@ -198,6 +198,6 @@
 
 
 
-Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcResponse.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcResponse.html 
b/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcResponse.html
index fa23279..c7a246f 100644
--- a/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcResponse.html
+++ b/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcResponse.html
@@ -239,6 +239,6 @@
 
 
 
-Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcScheduler.Context.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcScheduler.Context.html 
b/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcScheduler.Context.html
index 28ca3e2..7e84557 100644
--- a/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcScheduler.Context.html
+++ b/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcScheduler.Context.html
@@ -185,6 +185,6 @@
 
 
 
-Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcScheduler.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcScheduler.html 
b/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcScheduler.html
index 7538b9e..cb92deb 100644
--- a/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcScheduler.html
+++ b/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcScheduler.html
@@ -364,6 +364,6 @@
 
 
 
-Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcSchedulerContext.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcSchedulerContext.html 
b/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcSchedulerContext.html
index fd3f6a9..8865fe8 100644
--- a/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcSchedulerContext.html
+++ b/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcSchedulerContext.html
@@ -120,6 +120,6 @@
 
 
 
-Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcServer.BlockingServiceAndInterface.html
--
diff --git 

[09/51] [partial] hbase-site git commit: Published site at 7820ba1dbdba58b1002cdfde08eb21aa7a0bb6da.

2018-12-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/09ea0d5f/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatchOperation.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatchOperation.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatchOperation.html
index 0f5a095..50bf692 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatchOperation.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatchOperation.html
@@ -78,8712 +78,8714 @@
 070import 
java.util.concurrent.locks.ReadWriteLock;
 071import 
java.util.concurrent.locks.ReentrantReadWriteLock;
 072import java.util.function.Function;
-073import 
org.apache.hadoop.conf.Configuration;
-074import org.apache.hadoop.fs.FileStatus;
-075import org.apache.hadoop.fs.FileSystem;
-076import 
org.apache.hadoop.fs.LocatedFileStatus;
-077import org.apache.hadoop.fs.Path;
-078import org.apache.hadoop.hbase.Cell;
-079import 
org.apache.hadoop.hbase.CellBuilderType;
-080import 
org.apache.hadoop.hbase.CellComparator;
-081import 
org.apache.hadoop.hbase.CellComparatorImpl;
-082import 
org.apache.hadoop.hbase.CellScanner;
-083import 
org.apache.hadoop.hbase.CellUtil;
-084import 
org.apache.hadoop.hbase.CompareOperator;
-085import 
org.apache.hadoop.hbase.CompoundConfiguration;
-086import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-087import 
org.apache.hadoop.hbase.DroppedSnapshotException;
-088import 
org.apache.hadoop.hbase.ExtendedCellBuilderFactory;
-089import 
org.apache.hadoop.hbase.HConstants;
-090import 
org.apache.hadoop.hbase.HConstants.OperationStatusCode;
-091import 
org.apache.hadoop.hbase.HDFSBlocksDistribution;
-092import 
org.apache.hadoop.hbase.KeyValue;
-093import 
org.apache.hadoop.hbase.KeyValueUtil;
-094import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-095import 
org.apache.hadoop.hbase.NotServingRegionException;
-096import 
org.apache.hadoop.hbase.PrivateCellUtil;
-097import 
org.apache.hadoop.hbase.RegionTooBusyException;
-098import org.apache.hadoop.hbase.Tag;
-099import org.apache.hadoop.hbase.TagUtil;
-100import 
org.apache.hadoop.hbase.UnknownScannerException;
-101import 
org.apache.hadoop.hbase.client.Append;
-102import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-103import 
org.apache.hadoop.hbase.client.CompactionState;
-104import 
org.apache.hadoop.hbase.client.Delete;
-105import 
org.apache.hadoop.hbase.client.Durability;
-106import 
org.apache.hadoop.hbase.client.Get;
-107import 
org.apache.hadoop.hbase.client.Increment;
-108import 
org.apache.hadoop.hbase.client.IsolationLevel;
-109import 
org.apache.hadoop.hbase.client.Mutation;
-110import 
org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor;
-111import 
org.apache.hadoop.hbase.client.Put;
-112import 
org.apache.hadoop.hbase.client.RegionInfo;
-113import 
org.apache.hadoop.hbase.client.RegionInfoBuilder;
-114import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
-115import 
org.apache.hadoop.hbase.client.Result;
-116import 
org.apache.hadoop.hbase.client.RowMutations;
-117import 
org.apache.hadoop.hbase.client.Scan;
-118import 
org.apache.hadoop.hbase.client.TableDescriptor;
-119import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-120import 
org.apache.hadoop.hbase.conf.ConfigurationManager;
-121import 
org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver;
-122import 
org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
-123import 
org.apache.hadoop.hbase.coprocessor.RegionObserver.MutationType;
-124import 
org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
-125import 
org.apache.hadoop.hbase.exceptions.FailedSanityCheckException;
-126import 
org.apache.hadoop.hbase.exceptions.TimeoutIOException;
-127import 
org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
-128import 
org.apache.hadoop.hbase.filter.ByteArrayComparable;
-129import 
org.apache.hadoop.hbase.filter.FilterWrapper;
-130import 
org.apache.hadoop.hbase.filter.IncompatibleFilterException;
-131import 
org.apache.hadoop.hbase.io.HFileLink;
-132import 
org.apache.hadoop.hbase.io.HeapSize;
-133import 
org.apache.hadoop.hbase.io.TimeRange;
-134import 
org.apache.hadoop.hbase.io.hfile.BlockCache;
-135import 
org.apache.hadoop.hbase.io.hfile.HFile;
-136import 
org.apache.hadoop.hbase.ipc.CallerDisconnectedException;
-137import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
-138import 
org.apache.hadoop.hbase.ipc.RpcCall;
-139import 
org.apache.hadoop.hbase.ipc.RpcServer;
-140import 
org.apache.hadoop.hbase.mob.MobFileCache;
-141import 
org.apache.hadoop.hbase.monitoring.MonitoredTask;
-142import 
org.apache.hadoop.hbase.monitoring.TaskMonitor;
-143import 
org.apache.hadoop.hbase.quotas.RegionServerSpaceQuotaManager;
-144import 
org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl.WriteEntry;
-145import 

[09/51] [partial] hbase-site git commit: Published site at c448604ceb987d113913f0583452b2abce04db0d.

2018-12-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4f8b8424/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.html
index 1124f8b..0c29054 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.html
@@ -,587 +,592 @@
 1103  
blockStream.write(midKeyMetadata);
 1104
blockWriter.writeHeaderAndData(out);
 1105if (cacheConf != null) {
-1106  HFileBlock blockForCaching = 
blockWriter.getBlockForCaching(cacheConf);
-1107  
cacheConf.getBlockCache().cacheBlock(new BlockCacheKey(nameForCaching,
-1108rootLevelIndexPos, true, 
blockForCaching.getBlockType()), blockForCaching);
-1109}
-1110  }
-
-1112  // Add root index block size
-1113  totalBlockOnDiskSize += 
blockWriter.getOnDiskSizeWithoutHeader();
-1114  totalBlockUncompressedSize +=
-1115  
blockWriter.getUncompressedSizeWithoutHeader();
-1116
-1117  if (LOG.isTraceEnabled()) {
-1118LOG.trace("Wrote a " + numLevels 
+ "-level index with root level at pos "
-1119  + rootLevelIndexPos + ", " + 
rootChunk.getNumEntries()
-1120  + " root-level entries, " + 
totalNumEntries + " total entries, "
-1121  + 
StringUtils.humanReadableInt(this.totalBlockOnDiskSize) +
-1122  " on-disk size, "
-1123  + 
StringUtils.humanReadableInt(totalBlockUncompressedSize) +
-1124  " total uncompressed 
size.");
-1125  }
-1126  return rootLevelIndexPos;
-1127}
-1128
-1129/**
-1130 * Writes the block index data as a 
single level only. Does not do any
-1131 * block framing.
-1132 *
-1133 * @param out the buffered output 
stream to write the index to. Typically a
-1134 *  stream writing into an 
{@link HFile} block.
-1135 * @param description a short 
description of the index being written. Used
-1136 *  in a log message.
-1137 * @throws IOException
-1138 */
-1139public void 
writeSingleLevelIndex(DataOutput out, String description)
-1140throws IOException {
-1141  expectNumLevels(1);
-1142
-1143  if (!singleLevelOnly)
-1144throw new 
IOException("Single-level mode is turned off");
-1145
-1146  if (rootChunk.getNumEntries()  
0)
-1147throw new 
IOException("Root-level entries already added in " +
-1148"single-level mode");
-1149
-1150  rootChunk = curInlineChunk;
-1151  curInlineChunk = new 
BlockIndexChunk();
-1152
-1153  if (LOG.isTraceEnabled()) {
-1154LOG.trace("Wrote a single-level 
" + description + " index with "
-1155  + rootChunk.getNumEntries() + 
" entries, " + rootChunk.getRootSize()
-1156  + " bytes");
-1157  }
-1158  rootChunk.writeRoot(out);
-1159}
-1160
-1161/**
-1162 * Split the current level of the 
block index into intermediate index
-1163 * blocks of permitted size and 
write those blocks to disk. Return the next
-1164 * level of the block index 
referencing those intermediate-level blocks.
-1165 *
-1166 * @param out
-1167 * @param currentLevel the current 
level of the block index, such as the a
-1168 *  chunk referencing all 
leaf-level index blocks
-1169 * @return the parent level block 
index, which becomes the root index after
-1170 * a few (usually zero) 
iterations
-1171 * @throws IOException
-1172 */
-1173private BlockIndexChunk 
writeIntermediateLevel(FSDataOutputStream out,
-1174BlockIndexChunk currentLevel) 
throws IOException {
-1175  // Entries referencing 
intermediate-level blocks we are about to create.
-1176  BlockIndexChunk parent = new 
BlockIndexChunk();
-1177
-1178  // The current intermediate-level 
block index chunk.
-1179  BlockIndexChunk curChunk = new 
BlockIndexChunk();
-1180
-1181  for (int i = 0; i  
currentLevel.getNumEntries(); ++i) {
-1182
curChunk.add(currentLevel.getBlockKey(i),
-1183
currentLevel.getBlockOffset(i), currentLevel.getOnDiskDataSize(i));
-1184
-1185// HBASE-16288: We have to have 
at least minIndexNumEntries(16) items in the index so that
-1186// we won't end up with too-many 
levels for a index with very large rowKeys. Also, if the
-1187// first key is larger than 
maxChunkSize this will cause infinite recursion.
-1188if (i = minIndexNumEntries 
 curChunk.getRootSize() = maxChunkSize) {
-1189  writeIntermediateBlock(out, 
parent, curChunk);
-1190}
-1191  }
-1192
-1193  if (curChunk.getNumEntries()  
0) {
-1194writeIntermediateBlock(out, 
parent, curChunk);
-1195  

[09/51] [partial] hbase-site git commit: Published site at 8bf966c8e936dec4d83bcbe85c5aab543f14a0df.

2018-12-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html
index a957d31..62f81b6 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html
@@ -142,5192 +142,5186 @@
 134import org.apache.hadoop.hbase.wal.WAL;
 135import 
org.apache.hadoop.hbase.wal.WALFactory;
 136import 
org.apache.hadoop.hbase.wal.WALSplitter;
-137import 
org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-138import 
org.apache.hadoop.hbase.zookeeper.ZKUtil;
-139import 
org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-140import 
org.apache.hadoop.hbase.zookeeper.ZNodePaths;
-141import 
org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
-142import 
org.apache.hadoop.ipc.RemoteException;
-143import 
org.apache.hadoop.security.UserGroupInformation;
-144import 
org.apache.hadoop.util.ReflectionUtils;
-145import org.apache.hadoop.util.Tool;
-146import 
org.apache.hadoop.util.ToolRunner;
-147import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-148import 
org.apache.hbase.thirdparty.com.google.common.collect.Sets;
-149import 
org.apache.yetus.audience.InterfaceAudience;
-150import 
org.apache.yetus.audience.InterfaceStability;
-151import 
org.apache.zookeeper.KeeperException;
-152import org.slf4j.Logger;
-153import org.slf4j.LoggerFactory;
-154
-155import 
org.apache.hbase.thirdparty.com.google.common.base.Joiner;
-156import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-157import 
org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;
-158import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
-159import 
org.apache.hbase.thirdparty.com.google.common.collect.Multimap;
-160import 
org.apache.hbase.thirdparty.com.google.common.collect.Ordering;
-161import 
org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;
-162
-163import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-164import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;
-165
-166/**
-167 * HBaseFsck (hbck) is a tool for 
checking and repairing region consistency and
-168 * table integrity problems in a 
corrupted HBase. This tool was written for hbase-1.x. It does not
-169 * work with hbase-2.x; it can read state 
but is not allowed to change state; i.e. effect 'repair'.
-170 * See hbck2 (HBASE-19121) for a hbck 
tool for hbase2.
-171 *
-172 * p
-173 * Region consistency checks verify that 
hbase:meta, region deployment on region
-174 * servers and the state of data in HDFS 
(.regioninfo files) all are in
-175 * accordance.
-176 * p
-177 * Table integrity checks verify that all 
possible row keys resolve to exactly
-178 * one region of a table.  This means 
there are no individual degenerate
-179 * or backwards regions; no holes between 
regions; and that there are no
-180 * overlapping regions.
-181 * p
-182 * The general repair strategy works in 
two phases:
-183 * ol
-184 * li Repair Table Integrity on 
HDFS. (merge or fabricate regions)
-185 * li Repair Region Consistency 
with hbase:meta and assignments
-186 * /ol
-187 * p
-188 * For table integrity repairs, the 
tables' region directories are scanned
-189 * for .regioninfo files.  Each table's 
integrity is then verified.  If there
-190 * are any orphan regions (regions with 
no .regioninfo files) or holes, new
-191 * regions are fabricated.  Backwards 
regions are sidelined as well as empty
-192 * degenerate (endkey==startkey) regions. 
 If there are any overlapping regions,
-193 * a new region is created and all data 
is merged into the new region.
-194 * p
-195 * Table integrity repairs deal solely 
with HDFS and could potentially be done
-196 * offline -- the hbase region servers or 
master do not need to be running.
-197 * This phase can eventually be used to 
completely reconstruct the hbase:meta table in
-198 * an offline fashion.
-199 * p
-200 * Region consistency requires three 
conditions -- 1) valid .regioninfo file
-201 * present in an HDFS region dir,  2) 
valid row with .regioninfo data in META,
-202 * and 3) a region is deployed only at 
the regionserver that was assigned to
-203 * with proper state in the master.
-204 * p
-205 * Region consistency repairs require 
hbase to be online so that hbck can
-206 * contact the HBase master and region 
servers.  The hbck#connect() method must
-207 * first be called successfully.  Much of 
the region consistency information
-208 * is transient and less risky to 
repair.
-209 * p
-210 * If hbck is run from the 

[09/51] [partial] hbase-site git commit: Published site at 1acbd36c903b048141866b143507bfce124a5c5f.

2018-11-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5299e667/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.OperationStatusCode.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.OperationStatusCode.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.OperationStatusCode.html
index 333b785..da8def9 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.OperationStatusCode.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.OperationStatusCode.html
@@ -34,1359 +34,1365 @@
 026import java.util.List;
 027import java.util.UUID;
 028import java.util.regex.Pattern;
-029
-030import 
org.apache.commons.lang3.ArrayUtils;
-031import 
org.apache.hadoop.hbase.util.Bytes;
-032import 
org.apache.yetus.audience.InterfaceAudience;
-033
-034/**
-035 * HConstants holds a bunch of 
HBase-related constants
-036 */
-037@InterfaceAudience.Public
-038public final class HConstants {
-039  // NOTICE Please do not add a 
constants here, unless they are referenced by a lot of classes.
-040
-041  //Bytes.UTF8_ENCODING should be updated 
if this changed
-042  /** When we encode strings, we always 
specify UTF8 encoding */
-043  public static final String 
UTF8_ENCODING = "UTF-8";
-044
-045  //Bytes.UTF8_CHARSET should be updated 
if this changed
-046  /** When we encode strings, we always 
specify UTF8 encoding */
-047  public static final Charset 
UTF8_CHARSET = Charset.forName(UTF8_ENCODING);
-048  /**
-049   * Default block size for an HFile.
-050   */
-051  public final static int 
DEFAULT_BLOCKSIZE = 64 * 1024;
-052
-053  /** Used as a magic return value while 
optimized index key feature enabled(HBASE-7845) */
-054  public final static int INDEX_KEY_MAGIC 
= -2;
-055  /*
-056 * Name of directory that holds 
recovered edits written by the wal log
-057 * splitting code, one per region
-058 */
-059  public static final String 
RECOVERED_EDITS_DIR = "recovered.edits";
-060  /**
-061   * The first four bytes of Hadoop RPC 
connections
-062   */
-063  public static final byte[] RPC_HEADER = 
new byte[] { 'H', 'B', 'a', 's' };
-064  public static final byte 
RPC_CURRENT_VERSION = 0;
-065
-066  // HFileBlock constants. TODO THESE 
DEFINES BELONG IN HFILEBLOCK, NOT UP HERE.
-067  // Needed down in hbase-common though 
by encoders but these encoders should not be dealing
-068  // in the internals of hfileblocks. Fix 
encapsulation.
-069
-070  /** The size data structures with minor 
version is 0 */
-071  public static final int 
HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM = MAGIC_LENGTH + 2 * Bytes.SIZEOF_INT
-072  + Bytes.SIZEOF_LONG;
-073  /** The size of a version 2 HFile block 
header, minor version 1.
-074   * There is a 1 byte checksum type, 
followed by a 4 byte bytesPerChecksum
-075   * followed by another 4 byte value to 
store sizeofDataOnDisk.
-076   */
-077  public static final int 
HFILEBLOCK_HEADER_SIZE = HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM +
-078Bytes.SIZEOF_BYTE + 2 * 
Bytes.SIZEOF_INT;
-079  /** Just an array of bytes of the right 
size. */
-080  public static final byte[] 
HFILEBLOCK_DUMMY_HEADER = new byte[HFILEBLOCK_HEADER_SIZE];
-081
-082  //End HFileBlockConstants.
-083
-084  /**
-085   * Status codes used for return values 
of bulk operations.
-086   */
-087  @InterfaceAudience.Private
-088  public enum OperationStatusCode {
-089NOT_RUN,
-090SUCCESS,
-091BAD_FAMILY,
-092STORE_TOO_BUSY,
-093SANITY_CHECK_FAILURE,
-094FAILURE
-095  }
-096
-097  /** long constant for zero */
-098  public static final Long ZERO_L = 
Long.valueOf(0L);
-099  public static final String NINES = 
"99";
-100  public static final String ZEROES = 
"00";
-101
-102  // For migration
-103
-104  /** name of version file */
-105  public static final String 
VERSION_FILE_NAME = "hbase.version";
-106
-107  /**
-108   * Current version of file system.
-109   * Version 4 supports only one kind of 
bloom filter.
-110   * Version 5 changes versions in 
catalog table regions.
-111   * Version 6 enables blockcaching on 
catalog tables.
-112   * Version 7 introduces hfile -- hbase 
0.19 to 0.20..
-113   * Version 8 introduces namespace
-114   */
-115  // public static final String 
FILE_SYSTEM_VERSION = "6";
-116  public static final String 
FILE_SYSTEM_VERSION = "8";
-117
-118  // Configuration parameters
-119
-120  //TODO: Is having HBase homed on port 
60k OK?
-121
-122  /** Cluster is in distributed mode or 
not */
-123  public static final String 
CLUSTER_DISTRIBUTED = "hbase.cluster.distributed";
-124
-125  /** Config for pluggable load balancers 
*/
-126  public static final String 
HBASE_MASTER_LOADBALANCER_CLASS = "hbase.master.loadbalancer.class";
-127
-128  /** Config for balancing the cluster by 
table */
-129  public static final String 
HBASE_MASTER_LOADBALANCE_BYTABLE = "hbase.master.loadbalance.bytable";
-130
-131  /** Config for the 

[09/51] [partial] hbase-site git commit: Published site at 130057f13774f6b213cdb06952c805a29d59396e.

2018-11-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/68eae623/testdevapidocs/org/apache/hadoop/hbase/master/assignment/TestRegionAssignedToMultipleRegionServers.ServerManagerForTest.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/master/assignment/TestRegionAssignedToMultipleRegionServers.ServerManagerForTest.html
 
b/testdevapidocs/org/apache/hadoop/hbase/master/assignment/TestRegionAssignedToMultipleRegionServers.ServerManagerForTest.html
new file mode 100644
index 000..b308bce
--- /dev/null
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/master/assignment/TestRegionAssignedToMultipleRegionServers.ServerManagerForTest.html
@@ -0,0 +1,307 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+TestRegionAssignedToMultipleRegionServers.ServerManagerForTest (Apache 
HBase 3.0.0-SNAPSHOT Test API)
+
+
+
+
+
+var methods = {"i0":10};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase.master.assignment
+Class 
TestRegionAssignedToMultipleRegionServers.ServerManagerForTest
+
+
+
+https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.master.ServerManager
+
+
+org.apache.hadoop.hbase.master.assignment.TestRegionAssignedToMultipleRegionServers.ServerManagerForTest
+
+
+
+
+
+
+
+
+
+Enclosing class:
+TestRegionAssignedToMultipleRegionServers
+
+
+
+private static final class TestRegionAssignedToMultipleRegionServers.ServerManagerForTest
+extends org.apache.hadoop.hbase.master.ServerManager
+
+
+
+
+
+
+
+
+
+
+
+Field Summary
+
+
+
+
+Fields inherited from 
classorg.apache.hadoop.hbase.master.ServerManager
+FLUSHEDSEQUENCEID_FLUSHER_INTERVAL, 
FLUSHEDSEQUENCEID_FLUSHER_INTERVAL_DEFAULT, PERSIST_FLUSHEDSEQUENCEID, 
PERSIST_FLUSHEDSEQUENCEID_DEFAULT, WAIT_ON_REGIONSERVERS_INTERVAL, 
WAIT_ON_REGIONSERVERS_MAXTOSTART, WAIT_ON_REGIONSERVERS_MINTOSTART, 
WAIT_ON_REGIONSERVERS_TIMEOUT
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors
+
+Constructor and Description
+
+
+ServerManagerForTest(org.apache.hadoop.hbase.master.MasterServicesmaster)
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All MethodsInstance MethodsConcrete Methods
+
+Modifier and Type
+Method and Description
+
+
+https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in 
java.util">Listorg.apache.hadoop.hbase.ServerName
+createDestinationServersList()
+
+
+
+
+
+
+Methods inherited from 
classorg.apache.hadoop.hbase.master.ServerManager
+addServerToDrainList, areDeadServersInProgress, 
closeRegionSilentlyAndWait, countOfRegionServers, createDestinationServersList, 
expireServer, getAverageLoad, getDeadServers, getDrainingServersList, 
getFlushedSequenceIdByRegion, getInfoPort, getLastFlushedSequenceId, getLoad, 
getOnlineServers, getOnlineServersList, getOnlineServersListWithPredicator, 
getRsAdmin, getVersion, getVersionNumber, isClusterShutdown, 
isRegionInServerManagerStates, isServerDead, isServerOnline, 
loadLastFlushedSequenceIds, moveFromOnlineToDeadServers, regionServerReport, 
registerListener, removeDeletedRegionFromLoadedFlushedSequenceIds, 
removeRegion, removeRegions, removeServerFromDrainList, sendRegionWarmup, 
shutdownCluster, startChore, stop, unregisterListener, 
waitForRegionServers
+
+
+
+
+
+Methods inherited from classjava.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--;
 title="class or interface in java.lang">clone, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-;
 title="class or interface in java.lang">equals, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--;
 title="class or interface in java.lang">finalize, 

[09/51] [partial] hbase-site git commit: Published site at d5e4faacc354c1bc4d93efa71ca97ee3a056123e.

2018-10-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b5e107c3/testdevapidocs/org/apache/hadoop/hbase/master/procedure/class-use/TestSchedulerQueueDeadLock.TableSharedProcedure.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/master/procedure/class-use/TestSchedulerQueueDeadLock.TableSharedProcedure.html
 
b/testdevapidocs/org/apache/hadoop/hbase/master/procedure/class-use/TestSchedulerQueueDeadLock.TableSharedProcedure.html
new file mode 100644
index 000..babdba0
--- /dev/null
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/master/procedure/class-use/TestSchedulerQueueDeadLock.TableSharedProcedure.html
@@ -0,0 +1,165 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+Uses of Class 
org.apache.hadoop.hbase.master.procedure.TestSchedulerQueueDeadLock.TableSharedProcedure
 (Apache HBase 3.0.0-SNAPSHOT Test API)
+
+
+
+
+
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev
+Next
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+
+
+
+Uses of 
Classorg.apache.hadoop.hbase.master.procedure.TestSchedulerQueueDeadLock.TableSharedProcedure
+
+
+
+
+
+Packages that use TestSchedulerQueueDeadLock.TableSharedProcedure
+
+Package
+Description
+
+
+
+org.apache.hadoop.hbase.master.procedure
+
+
+
+
+
+
+
+
+
+
+Uses of TestSchedulerQueueDeadLock.TableSharedProcedure
 in org.apache.hadoop.hbase.master.procedure
+
+Subclasses of TestSchedulerQueueDeadLock.TableSharedProcedure
 in org.apache.hadoop.hbase.master.procedure
+
+Modifier and Type
+Class and Description
+
+
+
+static class
+TestSchedulerQueueDeadLock.TableSharedProcedureWithId
+
+
+
+
+
+
+
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev
+Next
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+
+
+Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+
+

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b5e107c3/testdevapidocs/org/apache/hadoop/hbase/master/procedure/class-use/TestSchedulerQueueDeadLock.TableSharedProcedureWithId.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/master/procedure/class-use/TestSchedulerQueueDeadLock.TableSharedProcedureWithId.html
 
b/testdevapidocs/org/apache/hadoop/hbase/master/procedure/class-use/TestSchedulerQueueDeadLock.TableSharedProcedureWithId.html
new file mode 100644
index 000..e28149c
--- /dev/null
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/master/procedure/class-use/TestSchedulerQueueDeadLock.TableSharedProcedureWithId.html
@@ -0,0 +1,125 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+Uses of Class 
org.apache.hadoop.hbase.master.procedure.TestSchedulerQueueDeadLock.TableSharedProcedureWithId
 (Apache HBase 3.0.0-SNAPSHOT Test API)
+
+
+
+
+
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev
+Next
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+
+
+
+Uses of 
Classorg.apache.hadoop.hbase.master.procedure.TestSchedulerQueueDeadLock.TableSharedProcedureWithId
+
+No usage of 
org.apache.hadoop.hbase.master.procedure.TestSchedulerQueueDeadLock.TableSharedProcedureWithId
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev
+Next
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+var methods = {"i0":6,"i1":6};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],4:["t3","Abstract Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase.util
+Class 
ByteBufferUtils.Comparer
+
+
+
+https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.util.ByteBufferUtils.Comparer
+
+
+
+
+
+
+
+Direct Known Subclasses:
+ByteBufferUtils.ComparerHolder.PureJavaComparer,
 ByteBufferUtils.ComparerHolder.UnsafeComparer
+
+
+Enclosing class:
+ByteBufferUtils
+
+
+
+abstract static class ByteBufferUtils.Comparer
+extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+
+
+
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors
+
+Constructor and Description
+
+
+Comparer()
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All MethodsInstance MethodsAbstract Methods
+
+Modifier and Type
+Method and Description
+
+
+(package private) abstract int
+compareTo(byte[]buf1,
+ into1,
+ intl1,
+ https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBufferbuf2,
+ into2,
+ intl2)
+
+
+(package private) abstract int
+compareTo(https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBufferbuf1,
+ into1,
+ intl1,
+ https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBufferbuf2,
+ into2,
+ intl2)
+
+
+
+
+
+
+Methods inherited from classjava.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--;
 title="class or interface in java.lang">clone, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-;
 title="class or interface in java.lang">equals, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--;
 title="class or interface in java.lang">finalize, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--;
 title="class or interface in java.lang">getClass, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--;
 title="class or interface in java.lang">hashCode, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notify--;
 title="class or interface in java.lang">notify, https://docs.oracle.com/javase/8/docs/api/ja
 va/lang/Object.html?is-external=true#notifyAll--" title="class or interface in 
java.lang">notifyAll, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#toString--;
 title="class or interface in java.lang">toString, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait--;
 title="class or interface in java.lang">wait, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-;
 title="class or interface in java.lang">wait, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-int-;
 title="class or interface in java.lang">wait
+
+
+
+
+
+
+
+
+
+
+
+
+
+

[09/51] [partial] hbase-site git commit: Published site at 821e4d7de2d576189f4288d1c2acf9e9a9471f5c.

2018-10-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/323b17d9/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.html
deleted file mode 100644
index 6d373f8..000
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.html
+++ /dev/null
@@ -1,468 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd;>
-
-
-Source code
-
-
-
-
-001/**
-002 * Licensed to the Apache Software 
Foundation (ASF) under one
-003 * or more contributor license 
agreements.  See the NOTICE file
-004 * distributed with this work for 
additional information
-005 * regarding copyright ownership.  The 
ASF licenses this file
-006 * to you under the Apache License, 
Version 2.0 (the
-007 * "License"); you may not use this file 
except in compliance
-008 * with the License.  You may obtain a 
copy of the License at
-009 *
-010 * 
http://www.apache.org/licenses/LICENSE-2.0
-011 *
-012 * Unless required by applicable law or 
agreed to in writing, software
-013 * distributed under the License is 
distributed on an "AS IS" BASIS,
-014 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
-015 * See the License for the specific 
language governing permissions and
-016 * limitations under the License.
-017 */
-018package org.apache.hadoop.hbase.client;
-019
-020import static 
org.junit.Assert.assertEquals;
-021import static 
org.junit.Assert.assertFalse;
-022import static org.junit.Assert.fail;
-023
-024import java.io.IOException;
-025import java.util.HashSet;
-026import java.util.List;
-027import java.util.Set;
-028import 
org.apache.hadoop.conf.Configuration;
-029import org.apache.hadoop.fs.Path;
-030import 
org.apache.hadoop.hbase.HBaseClassTestRule;
-031import 
org.apache.hadoop.hbase.HBaseTestingUtility;
-032import 
org.apache.hadoop.hbase.HColumnDescriptor;
-033import 
org.apache.hadoop.hbase.HConstants;
-034import 
org.apache.hadoop.hbase.HTableDescriptor;
-035import 
org.apache.hadoop.hbase.TableName;
-036import 
org.apache.hadoop.hbase.master.MasterFileSystem;
-037import 
org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
-038import 
org.apache.hadoop.hbase.regionserver.NoSuchColumnFamilyException;
-039import 
org.apache.hadoop.hbase.snapshot.CorruptedSnapshotException;
-040import 
org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
-041import 
org.apache.hadoop.hbase.testclassification.ClientTests;
-042import 
org.apache.hadoop.hbase.testclassification.LargeTests;
-043import 
org.apache.hadoop.hbase.util.Bytes;
-044import 
org.apache.hadoop.hbase.util.FSUtils;
-045import org.junit.After;
-046import org.junit.AfterClass;
-047import org.junit.Before;
-048import org.junit.BeforeClass;
-049import org.junit.ClassRule;
-050import org.junit.Rule;
-051import org.junit.Test;
-052import 
org.junit.experimental.categories.Category;
-053import org.junit.rules.TestName;
-054
-055/**
-056 * Test restore snapshots from the 
client
-057 */
-058@Category({LargeTests.class, 
ClientTests.class})
-059public class 
TestRestoreSnapshotFromClient {
-060
-061  @ClassRule
-062  public static final HBaseClassTestRule 
CLASS_RULE =
-063  
HBaseClassTestRule.forClass(TestRestoreSnapshotFromClient.class);
-064
-065  protected final static 
HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
-066
-067  protected final byte[] FAMILY = 
Bytes.toBytes("cf");
-068  protected final byte[] TEST_FAMILY2 = 
Bytes.toBytes("cf2");
-069
-070  protected TableName tableName;
-071  protected byte[] emptySnapshot;
-072  protected byte[] snapshotName0;
-073  protected byte[] snapshotName1;
-074  protected byte[] snapshotName2;
-075  protected int snapshot0Rows;
-076  protected int snapshot1Rows;
-077  protected Admin admin;
-078
-079  @Rule
-080  public TestName name = new 
TestName();
-081
-082  @BeforeClass
-083  public static void setupCluster() 
throws Exception {
-084
setupConf(TEST_UTIL.getConfiguration());
-085TEST_UTIL.startMiniCluster(3);
-086  }
-087
-088  protected static void 
setupConf(Configuration conf) {
-089
TEST_UTIL.getConfiguration().setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, 
true);
-090
TEST_UTIL.getConfiguration().setInt("hbase.hstore.compactionThreshold", 10);
-091
TEST_UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 100);
-092
TEST_UTIL.getConfiguration().setInt("hbase.client.pause", 250);
-093
TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 
6);
-094
TEST_UTIL.getConfiguration().setBoolean(
-095
"hbase.master.enabletable.roundrobin", true);
-096  }
-097
-098  @AfterClass
-099  public static void tearDownAfterClass() 
throws Exception {
-100TEST_UTIL.shutdownMiniCluster();
-101  }

[09/51] [partial] hbase-site git commit: Published site at fa5fa6ecdd071b72b58971058ff3ab9d28c3e709.

2018-10-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d1341859/devapidocs/src-html/org/apache/hadoop/hbase/util/JVMClusterUtil.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/JVMClusterUtil.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/JVMClusterUtil.html
index f9d05cb..67f0fc6 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/JVMClusterUtil.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/JVMClusterUtil.html
@@ -31,316 +31,325 @@
 023import java.lang.reflect.Constructor;
 024import 
java.lang.reflect.InvocationTargetException;
 025import java.util.List;
-026
-027import 
org.apache.yetus.audience.InterfaceAudience;
-028import org.slf4j.Logger;
-029import org.slf4j.LoggerFactory;
-030import 
org.apache.hadoop.conf.Configuration;
-031import 
org.apache.hadoop.hbase.CoordinatedStateManager;
-032import 
org.apache.hadoop.hbase.master.HMaster;
-033import 
org.apache.hadoop.hbase.regionserver.HRegionServer;
-034
-035/**
-036 * Utility used running a cluster all in 
the one JVM.
-037 */
-038@InterfaceAudience.Private
-039public class JVMClusterUtil {
-040  private static final Logger LOG = 
LoggerFactory.getLogger(JVMClusterUtil.class);
-041
-042  /**
-043   * Datastructure to hold RegionServer 
Thread and RegionServer instance
-044   */
-045  public static class RegionServerThread 
extends Thread {
-046private final HRegionServer 
regionServer;
-047
-048public RegionServerThread(final 
HRegionServer r, final int index) {
-049  super(r, "RS:" + index + ";" + 
r.getServerName().toShortString());
-050  this.regionServer = r;
-051}
-052
-053/** @return the region server */
-054public HRegionServer 
getRegionServer() {
-055  return this.regionServer;
-056}
-057
-058/**
-059 * Block until the region server has 
come online, indicating it is ready
-060 * to be used.
-061 */
-062public void waitForServerOnline() {
-063  // The server is marked online 
after the init method completes inside of
-064  // the HRS#run method.  HRS#init 
can fail for whatever region.  In those
-065  // cases, we'll jump out of the run 
without setting online flag.  Check
-066  // stopRequested so we don't wait 
here a flag that will never be flipped.
-067  
regionServer.waitForServerOnline();
-068}
-069  }
-070
-071  /**
-072   * Creates a {@link 
RegionServerThread}.
-073   * Call 'start' on the returned thread 
to make it run.
-074   * @param c Configuration to use.
-075   * @param hrsc Class to create.
-076   * @param index Used distinguishing the 
object returned.
-077   * @throws IOException
-078   * @return Region server added.
-079   */
-080  public static 
JVMClusterUtil.RegionServerThread createRegionServerThread(final Configuration 
c,
-081  final Class? extends 
HRegionServer hrsc, final int index) throws IOException {
-082HRegionServer server;
-083try {
-084  Constructor? extends 
HRegionServer ctor = hrsc.getConstructor(Configuration.class);
-085  ctor.setAccessible(true);
-086  server = ctor.newInstance(c);
-087} catch (InvocationTargetException 
ite) {
-088  Throwable target = 
ite.getTargetException();
-089  throw new RuntimeException("Failed 
construction of RegionServer: " +
-090hrsc.toString() + 
((target.getCause() != null)?
-091  target.getCause().getMessage(): 
""), target);
-092} catch (Exception e) {
-093  IOException ioe = new 
IOException();
-094  ioe.initCause(e);
-095  throw ioe;
-096}
-097return new 
JVMClusterUtil.RegionServerThread(server, index);
-098  }
-099
+026import java.util.concurrent.TimeUnit;
+027import java.util.function.Supplier;
+028
+029import 
org.apache.yetus.audience.InterfaceAudience;
+030import org.slf4j.Logger;
+031import org.slf4j.LoggerFactory;
+032import 
org.apache.hadoop.conf.Configuration;
+033import 
org.apache.hadoop.hbase.master.HMaster;
+034import 
org.apache.hadoop.hbase.regionserver.HRegionServer;
+035
+036/**
+037 * Utility used running a cluster all in 
the one JVM.
+038 */
+039@InterfaceAudience.Private
+040public class JVMClusterUtil {
+041  private static final Logger LOG = 
LoggerFactory.getLogger(JVMClusterUtil.class);
+042
+043  /**
+044   * Datastructure to hold RegionServer 
Thread and RegionServer instance
+045   */
+046  public static class RegionServerThread 
extends Thread {
+047private final HRegionServer 
regionServer;
+048
+049public RegionServerThread(final 
HRegionServer r, final int index) {
+050  super(r, "RS:" + index + ";" + 
r.getServerName().toShortString());
+051  this.regionServer = r;
+052}
+053
+054/** @return the region server */
+055public HRegionServer 
getRegionServer() {
+056  return this.regionServer;
+057}
+058
+059/**
+060 * Block until the region server has 
come online, indicating it is ready
+061 * to be used.
+062

[09/51] [partial] hbase-site git commit: Published site at 6bc7089f9e0793efc9bdd46a84f5ccd9bc4579ad.

2018-09-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/419d0338/testdevapidocs/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.CreateTableProcedureOnHDFSFailure.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.CreateTableProcedureOnHDFSFailure.html
 
b/testdevapidocs/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.CreateTableProcedureOnHDFSFailure.html
index 98a803b..cea7107 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.CreateTableProcedureOnHDFSFailure.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.CreateTableProcedureOnHDFSFailure.html
@@ -280,7 +280,7 @@ extends 
org.apache.hadoop.hbase.master.procedure.CreateTableProcedure
 
 
 Methods inherited from 
classorg.apache.hadoop.hbase.procedure2.Procedure
-addStackIndex, afterReplay, beforeReplay, compareTo, completionCleanup, 
doExecute, doRollback, elapsedTime, getChildrenLatch, getException, 
getLastUpdate, getNonceKey, getOwner, getParentProcId, getProcedureMetrics, 
getProcId, getProcIdHashCode, getProcName, getResult, getRootProcedureId, 
getRootProcId, getStackIndexes, getState, getSubmittedTime, getTimeout, 
getTimeoutTimestamp, hasChildren, hasException, hasLock, hasOwner, hasParent, 
hasTimeout, haveSameParent, holdLock, incChildrenLatch, isBypass, isFailed, 
isFinished, isInitializing, isRunnable, isSuccess, isWaiting, removeStackIndex, 
setAbortFailure, setChildrenLatch, setFailure, setFailure, setLastUpdate, 
setNonceKey, setOwner, setOwner, setParentProcId, setProcId, setResult, 
setRootProcId, setStackIndexes, setState, setSubmittedTime, setTimeout, 
setTimeoutFailure, toString, toStringClass, toStringDetails, toStringSimpleSB, 
updateMetricsOnFinish, updateMetricsOnSubmit, updateTimestamp, 
wasExecuted
+addStackIndex, afterReplay, beforeReplay, compareTo, completionCleanup, 
doExecute, doRollback, elapsedTime, getChildrenLatch, getException, 
getLastUpdate, getNonceKey, getOwner, getParentProcId, getProcedureMetrics, 
getProcId, getProcIdHashCode, getProcName, getResult, getRootProcedureId, 
getRootProcId, getStackIndexes, getState, getSubmittedTime, getTimeout, 
getTimeoutTimestamp, hasChildren, hasException, hasLock, hasOwner, hasParent, 
hasTimeout, haveSameParent, holdLock, incChildrenLatch, isBypass, isFailed, 
isFinished, isInitializing, isRunnable, isSuccess, isWaiting, removeStackIndex, 
setAbortFailure, setChildrenLatch, setFailure, setFailure, setLastUpdate, 
setNonceKey, setOwner, setOwner, setParentProcId, setProcId, setResult, 
setRootProcId, setStackIndexes, setState, setSubmittedTime, setTimeout, 
setTimeoutFailure, skipPersistence, toString, toStringClass, toStringDetails, 
toStringSimpleSB, updateMetricsOnFinish, updateMetricsOnSubmit, 
updateTimestamp, wasExecuted
 
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/419d0338/testdevapidocs/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.TestNamespaceProcedure.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.TestNamespaceProcedure.html
 
b/testdevapidocs/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.TestNamespaceProcedure.html
index 0f85c5c..a26a3be 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.TestNamespaceProcedure.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.TestNamespaceProcedure.html
@@ -259,7 +259,7 @@ implements 
org.apache.hadoop.hbase.master.procedure.TableProcedureInterface
 
 Methods inherited from 
classorg.apache.hadoop.hbase.procedure2.Procedure
-addStackIndex, afterReplay, beforeReplay, compareTo, completionCleanup, 
doExecute, doRollback, elapsedTime, getChildrenLatch, getException, 
getLastUpdate, getNonceKey, getOwner, getParentProcId, getProcedureMetrics, 
getProcId, getProcIdHashCode, getProcName, getResult, getRootProcedureId, 
getRootProcId, getStackIndexes, getState, getSubmittedTime, getTimeout, 
getTimeoutTimestamp, hasChildren, hasException, hasLock, hasOwner, hasParent, 
hasTimeout, haveSameParent, holdLock, incChildrenLatch, isBypass, isFailed, 
isFinished, isInitializing, isRunnable, isSuccess, isWaiting, 
isYieldAfterExecutionStep, removeStackIndex, setAbortFailure, setChildrenLatch, 
setFailure, setFailure, setLastUpdate, setNonceKey, setOwner, setOwner, 
setParentProcId, setProcId, setResult, setRootProcId, setStackIndexes, 
setState, setSubmittedTime, setTimeout, setTimeoutFailure, shouldWaitClientAck, 
toString, toStringClass, toStringDetails, toStringSimpleSB, toStringState, 
updateMetricsOnFinish, updateMetric
 sOnSubmit, updateTimestamp, waitInitialized, wasExecuted
+addStackIndex, afterReplay, 

[09/51] [partial] hbase-site git commit: Published site at d7e08317d2f214e4cca7b67578aba0ed7a567d54.

2018-09-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/37cf49a6/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.Result.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.Result.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.Result.html
index 566f410..da040ad 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.Result.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.Result.html
@@ -341,8361 +341,8425 @@
 333  private final int 
rowLockWaitDuration;
 334  static final int 
DEFAULT_ROWLOCK_WAIT_DURATION = 3;
 335
-336  // The internal wait duration to 
acquire a lock before read/update
-337  // from the region. It is not per row. 
The purpose of this wait time
-338  // is to avoid waiting a long time 
while the region is busy, so that
-339  // we can release the IPC handler soon 
enough to improve the
-340  // availability of the region server. 
It can be adjusted by
-341  // tuning configuration 
"hbase.busy.wait.duration".
-342  final long busyWaitDuration;
-343  static final long 
DEFAULT_BUSY_WAIT_DURATION = HConstants.DEFAULT_HBASE_RPC_TIMEOUT;
-344
-345  // If updating multiple rows in one 
call, wait longer,
-346  // i.e. waiting for busyWaitDuration * 
# of rows. However,
-347  // we can limit the max multiplier.
-348  final int maxBusyWaitMultiplier;
-349
-350  // Max busy wait duration. There is no 
point to wait longer than the RPC
-351  // purge timeout, when a RPC call will 
be terminated by the RPC engine.
-352  final long maxBusyWaitDuration;
-353
-354  // Max cell size. If nonzero, the 
maximum allowed size for any given cell
-355  // in bytes
-356  final long maxCellSize;
-357
-358  // Number of mutations for minibatch 
processing.
-359  private final int miniBatchSize;
+336  private Path regionDir;
+337  private FileSystem walFS;
+338
+339  // The internal wait duration to 
acquire a lock before read/update
+340  // from the region. It is not per row. 
The purpose of this wait time
+341  // is to avoid waiting a long time 
while the region is busy, so that
+342  // we can release the IPC handler soon 
enough to improve the
+343  // availability of the region server. 
It can be adjusted by
+344  // tuning configuration 
"hbase.busy.wait.duration".
+345  final long busyWaitDuration;
+346  static final long 
DEFAULT_BUSY_WAIT_DURATION = HConstants.DEFAULT_HBASE_RPC_TIMEOUT;
+347
+348  // If updating multiple rows in one 
call, wait longer,
+349  // i.e. waiting for busyWaitDuration * 
# of rows. However,
+350  // we can limit the max multiplier.
+351  final int maxBusyWaitMultiplier;
+352
+353  // Max busy wait duration. There is no 
point to wait longer than the RPC
+354  // purge timeout, when a RPC call will 
be terminated by the RPC engine.
+355  final long maxBusyWaitDuration;
+356
+357  // Max cell size. If nonzero, the 
maximum allowed size for any given cell
+358  // in bytes
+359  final long maxCellSize;
 360
-361  // negative number indicates infinite 
timeout
-362  static final long 
DEFAULT_ROW_PROCESSOR_TIMEOUT = 60 * 1000L;
-363  final ExecutorService 
rowProcessorExecutor = Executors.newCachedThreadPool();
-364
-365  private final 
ConcurrentHashMapRegionScanner, Long scannerReadPoints;
-366
-367  /**
-368   * The sequence ID that was 
enLongAddered when this region was opened.
-369   */
-370  private long openSeqNum = 
HConstants.NO_SEQNUM;
-371
-372  /**
-373   * The default setting for whether to 
enable on-demand CF loading for
-374   * scan requests to this region. 
Requests can override it.
-375   */
-376  private boolean 
isLoadingCfsOnDemandDefault = false;
-377
-378  private final AtomicInteger 
majorInProgress = new AtomicInteger(0);
-379  private final AtomicInteger 
minorInProgress = new AtomicInteger(0);
+361  // Number of mutations for minibatch 
processing.
+362  private final int miniBatchSize;
+363
+364  // negative number indicates infinite 
timeout
+365  static final long 
DEFAULT_ROW_PROCESSOR_TIMEOUT = 60 * 1000L;
+366  final ExecutorService 
rowProcessorExecutor = Executors.newCachedThreadPool();
+367
+368  private final 
ConcurrentHashMapRegionScanner, Long scannerReadPoints;
+369
+370  /**
+371   * The sequence ID that was 
enLongAddered when this region was opened.
+372   */
+373  private long openSeqNum = 
HConstants.NO_SEQNUM;
+374
+375  /**
+376   * The default setting for whether to 
enable on-demand CF loading for
+377   * scan requests to this region. 
Requests can override it.
+378   */
+379  private boolean 
isLoadingCfsOnDemandDefault = false;
 380
-381  //
-382  // Context: During replay we want to 
ensure that we do not lose any data. So, we
-383  // have to be conservative in how we 
replay wals. For each store, we calculate
-384  // the maxSeqId up to which the store 
was flushed. 

[09/51] [partial] hbase-site git commit: Published site at 8eaaa63114a64bcaeaf0ed9bdd88615ee22255c1.

2018-09-25 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f6f9d4f3/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.ScannerListener.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.ScannerListener.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.ScannerListener.html
index 25f458d..20e3eaa 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.ScannerListener.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.ScannerListener.html
@@ -28,3711 +28,3756 @@
 020import java.io.FileNotFoundException;
 021import java.io.IOException;
 022import java.io.InterruptedIOException;
-023import 
java.lang.reflect.InvocationTargetException;
-024import java.net.BindException;
-025import java.net.InetSocketAddress;
-026import java.net.UnknownHostException;
-027import java.nio.ByteBuffer;
-028import java.util.ArrayList;
-029import java.util.Arrays;
-030import java.util.Collections;
-031import java.util.HashMap;
-032import java.util.Iterator;
-033import java.util.List;
-034import java.util.Map;
-035import java.util.Map.Entry;
-036import java.util.NavigableMap;
-037import java.util.Set;
-038import java.util.TreeSet;
-039import 
java.util.concurrent.ConcurrentHashMap;
-040import 
java.util.concurrent.ConcurrentMap;
-041import java.util.concurrent.TimeUnit;
-042import 
java.util.concurrent.atomic.AtomicBoolean;
-043import 
java.util.concurrent.atomic.AtomicLong;
-044import 
java.util.concurrent.atomic.LongAdder;
-045import 
org.apache.commons.lang3.mutable.MutableObject;
-046import 
org.apache.hadoop.conf.Configuration;
-047import org.apache.hadoop.fs.Path;
-048import 
org.apache.hadoop.hbase.ByteBufferExtendedCell;
-049import 
org.apache.hadoop.hbase.CacheEvictionStats;
-050import 
org.apache.hadoop.hbase.CacheEvictionStatsBuilder;
-051import org.apache.hadoop.hbase.Cell;
-052import 
org.apache.hadoop.hbase.CellScannable;
-053import 
org.apache.hadoop.hbase.CellScanner;
-054import 
org.apache.hadoop.hbase.CellUtil;
-055import 
org.apache.hadoop.hbase.CompareOperator;
-056import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-057import 
org.apache.hadoop.hbase.DroppedSnapshotException;
-058import 
org.apache.hadoop.hbase.HBaseIOException;
-059import 
org.apache.hadoop.hbase.HConstants;
-060import 
org.apache.hadoop.hbase.MultiActionResultTooLarge;
-061import 
org.apache.hadoop.hbase.NotServingRegionException;
-062import 
org.apache.hadoop.hbase.PrivateCellUtil;
-063import 
org.apache.hadoop.hbase.RegionTooBusyException;
-064import org.apache.hadoop.hbase.Server;
-065import 
org.apache.hadoop.hbase.ServerName;
-066import 
org.apache.hadoop.hbase.TableName;
-067import 
org.apache.hadoop.hbase.UnknownScannerException;
-068import 
org.apache.hadoop.hbase.client.Append;
-069import 
org.apache.hadoop.hbase.client.ConnectionUtils;
-070import 
org.apache.hadoop.hbase.client.Delete;
-071import 
org.apache.hadoop.hbase.client.Durability;
-072import 
org.apache.hadoop.hbase.client.Get;
-073import 
org.apache.hadoop.hbase.client.Increment;
-074import 
org.apache.hadoop.hbase.client.Mutation;
-075import 
org.apache.hadoop.hbase.client.Put;
-076import 
org.apache.hadoop.hbase.client.RegionInfo;
-077import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
-078import 
org.apache.hadoop.hbase.client.Result;
-079import 
org.apache.hadoop.hbase.client.Row;
-080import 
org.apache.hadoop.hbase.client.RowMutations;
-081import 
org.apache.hadoop.hbase.client.Scan;
-082import 
org.apache.hadoop.hbase.client.TableDescriptor;
-083import 
org.apache.hadoop.hbase.client.VersionInfoUtil;
-084import 
org.apache.hadoop.hbase.conf.ConfigurationObserver;
-085import 
org.apache.hadoop.hbase.exceptions.FailedSanityCheckException;
-086import 
org.apache.hadoop.hbase.exceptions.OutOfOrderScannerNextException;
-087import 
org.apache.hadoop.hbase.exceptions.ScannerResetException;
-088import 
org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
-089import 
org.apache.hadoop.hbase.filter.ByteArrayComparable;
-090import 
org.apache.hadoop.hbase.io.TimeRange;
-091import 
org.apache.hadoop.hbase.ipc.HBaseRPCErrorHandler;
-092import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-093import 
org.apache.hadoop.hbase.ipc.PriorityFunction;
-094import 
org.apache.hadoop.hbase.ipc.QosPriority;
-095import 
org.apache.hadoop.hbase.ipc.RpcCallContext;
-096import 
org.apache.hadoop.hbase.ipc.RpcCallback;
-097import 
org.apache.hadoop.hbase.ipc.RpcScheduler;
-098import 
org.apache.hadoop.hbase.ipc.RpcServer;
-099import 
org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface;
-100import 
org.apache.hadoop.hbase.ipc.RpcServerFactory;
-101import 
org.apache.hadoop.hbase.ipc.RpcServerInterface;
-102import 
org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
-103import 
org.apache.hadoop.hbase.ipc.ServerRpcController;
-104import 

[09/51] [partial] hbase-site git commit: Published site at cd161d976ef47b84e904f2d54bac65d2f3417c2a.

2018-09-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fa1bebf8/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.KeepAliveWorkerThread.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.KeepAliveWorkerThread.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.KeepAliveWorkerThread.html
index 2c14c50..43c66a8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.KeepAliveWorkerThread.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.KeepAliveWorkerThread.html
@@ -46,2104 +46,2113 @@
 038import 
java.util.concurrent.atomic.AtomicLong;
 039import java.util.stream.Collectors;
 040import java.util.stream.Stream;
-041import 
org.apache.hadoop.conf.Configuration;
-042import 
org.apache.hadoop.hbase.HConstants;
-043import 
org.apache.hadoop.hbase.exceptions.IllegalArgumentIOException;
-044import 
org.apache.hadoop.hbase.log.HBaseMarkers;
-045import 
org.apache.hadoop.hbase.procedure2.Procedure.LockState;
-046import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore;
-047import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureIterator;
-048import 
org.apache.hadoop.hbase.procedure2.util.StringUtils;
-049import 
org.apache.hadoop.hbase.security.User;
-050import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-051import 
org.apache.hadoop.hbase.util.IdLock;
-052import 
org.apache.hadoop.hbase.util.NonceKey;
-053import 
org.apache.hadoop.hbase.util.Threads;
-054import 
org.apache.yetus.audience.InterfaceAudience;
-055import org.slf4j.Logger;
-056import org.slf4j.LoggerFactory;
-057
-058import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-059import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-060
-061import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState;
-062
-063/**
-064 * Thread Pool that executes the 
submitted procedures.
-065 * The executor has a ProcedureStore 
associated.
-066 * Each operation is logged and on 
restart the pending procedures are resumed.
-067 *
-068 * Unless the Procedure code throws an 
error (e.g. invalid user input)
-069 * the procedure will complete (at some 
point in time), On restart the pending
-070 * procedures are resumed and the once 
failed will be rolledback.
-071 *
-072 * The user can add procedures to the 
executor via submitProcedure(proc)
-073 * check for the finished state via 
isFinished(procId)
-074 * and get the result via 
getResult(procId)
-075 */
-076@InterfaceAudience.Private
-077public class 
ProcedureExecutorTEnvironment {
-078  private static final Logger LOG = 
LoggerFactory.getLogger(ProcedureExecutor.class);
-079
-080  public static final String 
CHECK_OWNER_SET_CONF_KEY = "hbase.procedure.check.owner.set";
-081  private static final boolean 
DEFAULT_CHECK_OWNER_SET = false;
-082
-083  public static final String 
WORKER_KEEP_ALIVE_TIME_CONF_KEY =
-084  
"hbase.procedure.worker.keep.alive.time.msec";
-085  private static final long 
DEFAULT_WORKER_KEEP_ALIVE_TIME = TimeUnit.MINUTES.toMillis(1);
-086
-087  /**
-088   * {@link #testing} is non-null when 
ProcedureExecutor is being tested. Tests will try to
-089   * break PE having it fail at various 
junctures. When non-null, testing is set to an instance of
-090   * the below internal {@link Testing} 
class with flags set for the particular test.
-091   */
-092  Testing testing = null;
-093
-094  /**
-095   * Class with parameters describing how 
to fail/die when in testing-context.
-096   */
-097  public static class Testing {
-098protected boolean killIfHasParent = 
true;
-099protected boolean killIfSuspended = 
false;
-100
-101/**
-102 * Kill the PE BEFORE we store state 
to the WAL. Good for figuring out if a Procedure is
-103 * persisting all the state it needs 
to recover after a crash.
-104 */
-105protected boolean 
killBeforeStoreUpdate = false;
-106protected boolean 
toggleKillBeforeStoreUpdate = false;
-107
-108/**
-109 * Set when we want to fail AFTER 
state has been stored into the WAL. Rarely used. HBASE-20978
-110 * is about a case where memory-state 
was being set after store to WAL where a crash could
-111 * cause us to get stuck. This flag 
allows killing at what was a vulnerable time.
-112 */
-113protected boolean 
killAfterStoreUpdate = false;
-114protected boolean 
toggleKillAfterStoreUpdate = false;
-115
-116protected boolean 
shouldKillBeforeStoreUpdate() {
-117  final boolean kill = 
this.killBeforeStoreUpdate;
-118  if 
(this.toggleKillBeforeStoreUpdate) {
-119this.killBeforeStoreUpdate = 
!kill;
-120LOG.warn("Toggle KILL before 
store update to: " + this.killBeforeStoreUpdate);
-121  }
-122  return kill;
-123}
-124
-125protected boolean 

[09/51] [partial] hbase-site git commit: Published site at c6a65ba63fce85ac7c4b62b96ef2bbe6c35d2f00.

2018-09-04 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/293abb17/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.MoveRegionAction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.MoveRegionAction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.MoveRegionAction.html
index c372545..af3b364 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.MoveRegionAction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.MoveRegionAction.html
@@ -1279,322 +1279,339 @@
 1271ListRegionInfo 
lastFewRegions = new ArrayList();
 1272// assign the remaining by going 
through the list and try to assign to servers one-by-one
 1273int serverIdx = 
RANDOM.nextInt(numServers);
-1274for (RegionInfo region : 
unassignedRegions) {
+1274OUTER : for (RegionInfo region : 
unassignedRegions) {
 1275  boolean assigned = false;
-1276  for (int j = 0; j  numServers; 
j++) { // try all servers one by one
+1276  INNER : for (int j = 0; j  
numServers; j++) { // try all servers one by one
 1277ServerName serverName = 
servers.get((j + serverIdx) % numServers);
 1278if 
(!cluster.wouldLowerAvailability(region, serverName)) {
 1279  ListRegionInfo 
serverRegions =
 1280  
assignments.computeIfAbsent(serverName, k - new ArrayList());
-1281  serverRegions.add(region);
-1282  cluster.doAssignRegion(region, 
serverName);
-1283  serverIdx = (j + serverIdx + 
1) % numServers; //remain from next server
-1284  assigned = true;
-1285  break;
-1286}
-1287  }
-1288  if (!assigned) {
-1289lastFewRegions.add(region);
-1290  }
-1291}
-1292// just sprinkle the rest of the 
regions on random regionservers. The balanceCluster will
-1293// make it optimal later. we can end 
up with this if numReplicas  numServers.
-1294for (RegionInfo region : 
lastFewRegions) {
-1295  int i = 
RANDOM.nextInt(numServers);
-1296  ServerName server = 
servers.get(i);
-1297  ListRegionInfo 
serverRegions = assignments.computeIfAbsent(server, k - new 
ArrayList());
-1298  serverRegions.add(region);
-1299  cluster.doAssignRegion(region, 
server);
-1300}
-1301return assignments;
-1302  }
-1303
-1304  protected Cluster 
createCluster(ListServerName servers, CollectionRegionInfo 
regions) {
-1305// Get the snapshot of the current 
assignments for the regions in question, and then create
-1306// a cluster out of it. Note that we 
might have replicas already assigned to some servers
-1307// earlier. So we want to get the 
snapshot to see those assignments, but this will only contain
-1308// replicas of the regions that are 
passed (for performance).
-1309MapServerName, 
ListRegionInfo clusterState = 
getRegionAssignmentsByServer(regions);
-1310
-1311for (ServerName server : servers) 
{
-1312  if 
(!clusterState.containsKey(server)) {
-1313clusterState.put(server, 
EMPTY_REGION_LIST);
-1314  }
-1315}
-1316return new Cluster(regions, 
clusterState, null, this.regionFinder,
-1317rackManager);
-1318  }
-1319
-1320  private ListServerName 
findIdleServers(ListServerName servers) {
-1321return 
this.services.getServerManager()
-1322
.getOnlineServersListWithPredicator(servers, IDLE_SERVER_PREDICATOR);
-1323  }
-1324
-1325  /**
-1326   * Used to assign a single region to a 
random server.
-1327   */
-1328  @Override
-1329  public ServerName 
randomAssignment(RegionInfo regionInfo, ListServerName servers)
-1330  throws HBaseIOException {
-1331
metricsBalancer.incrMiscInvocations();
-1332if (servers != null  
servers.contains(masterServerName)) {
-1333  if (shouldBeOnMaster(regionInfo)) 
{
-1334return masterServerName;
-1335  }
-1336  if 
(!LoadBalancer.isTablesOnMaster(getConf())) {
-1337// Guarantee we do not put any 
regions on master
-1338servers = new 
ArrayList(servers);
-1339
servers.remove(masterServerName);
-1340  }
-1341}
-1342
-1343int numServers = servers == null ? 0 
: servers.size();
-1344if (numServers == 0) {
-1345  LOG.warn("Wanted to retain 
assignment but no servers to assign to");
-1346  return null;
-1347}
-1348if (numServers == 1) { // Only one 
server, nothing fancy we can do here
-1349  return servers.get(0);
-1350}
-1351ListServerName idleServers = 
findIdleServers(servers);
-1352if (idleServers.size() == 1) {
-1353  return idleServers.get(0);
-1354}
-1355final ListServerName 
finalServers = idleServers.isEmpty() ?
-1356servers : idleServers;
-1357ListRegionInfo regions = 
Lists.newArrayList(regionInfo);

[09/51] [partial] hbase-site git commit: Published site at 7c1fad4992a169a35b4457e6f4afcb30d04406e9.

2018-08-31 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/74f60271/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.ProcedureExecutorListener.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.ProcedureExecutorListener.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.ProcedureExecutorListener.html
index d11176a..2c14c50 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.ProcedureExecutorListener.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.ProcedureExecutorListener.html
@@ -982,1050 +982,1168 @@
 974  }
 975
 976  /**
-977   * Add a new root-procedure to the 
executor.
-978   * @param proc the new procedure to 
execute.
-979   * @param nonceKey the registered 
unique identifier for this operation from the client or process.
-980   * @return the procedure id, that can 
be used to monitor the operation
-981   */
-982  
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NP_NULL_ON_SOME_PATH",
-983  justification = "FindBugs is blind 
to the check-for-null")
-984  public long 
submitProcedure(ProcedureTEnvironment proc, NonceKey nonceKey) {
-985
Preconditions.checkArgument(lastProcId.get() = 0);
-986
-987prepareProcedure(proc);
-988
-989final Long currentProcId;
-990if (nonceKey != null) {
-991  currentProcId = 
nonceKeysToProcIdsMap.get(nonceKey);
-992  
Preconditions.checkArgument(currentProcId != null,
-993"Expected nonceKey=" + nonceKey + 
" to be reserved, use registerNonce(); proc=" + proc);
-994} else {
-995  currentProcId = nextProcId();
-996}
-997
-998// Initialize the procedure
-999proc.setNonceKey(nonceKey);
-1000
proc.setProcId(currentProcId.longValue());
-1001
-1002// Commit the transaction
-1003store.insert(proc, null);
-1004LOG.debug("Stored {}", proc);
-1005
-1006// Add the procedure to the 
executor
-1007return pushProcedure(proc);
-1008  }
-1009
-1010  /**
-1011   * Add a set of new root-procedure to 
the executor.
-1012   * @param procs the new procedures to 
execute.
-1013   */
-1014  // TODO: Do we need to take nonces 
here?
-1015  public void 
submitProcedures(ProcedureTEnvironment[] procs) {
-1016
Preconditions.checkArgument(lastProcId.get() = 0);
-1017if (procs == null || procs.length 
= 0) {
-1018  return;
-1019}
-1020
-1021// Prepare procedure
-1022for (int i = 0; i  procs.length; 
++i) {
-1023  
prepareProcedure(procs[i]).setProcId(nextProcId());
-1024}
-1025
-1026// Commit the transaction
-1027store.insert(procs);
-1028if (LOG.isDebugEnabled()) {
-1029  LOG.debug("Stored " + 
Arrays.toString(procs));
-1030}
-1031
-1032// Add the procedure to the 
executor
-1033for (int i = 0; i  procs.length; 
++i) {
-1034  pushProcedure(procs[i]);
-1035}
-1036  }
-1037
-1038  private ProcedureTEnvironment 
prepareProcedure(ProcedureTEnvironment proc) {
-1039
Preconditions.checkArgument(proc.getState() == ProcedureState.INITIALIZING);
-1040
Preconditions.checkArgument(!proc.hasParent(), "unexpected parent", proc);
-1041if (this.checkOwnerSet) {
-1042  
Preconditions.checkArgument(proc.hasOwner(), "missing owner");
-1043}
-1044return proc;
-1045  }
-1046
-1047  private long 
pushProcedure(ProcedureTEnvironment proc) {
-1048final long currentProcId = 
proc.getProcId();
+977   * Bypass a procedure. If the procedure 
is set to bypass, all the logic in
+978   * execute/rollback will be ignored and 
it will return success, whatever.
+979   * It is used to recover buggy stuck 
procedures, releasing the lock resources
+980   * and letting other procedures to run. 
Bypassing one procedure (and its ancestors will
+981   * be bypassed automatically) may leave 
the cluster in a middle state, e.g. region
+982   * not assigned, or some hdfs files 
left behind. After getting rid of those stuck procedures,
+983   * the operators may have to do some 
clean up on hdfs or schedule some assign procedures
+984   * to let region online. DO AT YOUR OWN 
RISK.
+985   * p
+986   * A procedure can be bypassed only 
if
+987   * 1. The procedure is in state of 
RUNNABLE, WAITING, WAITING_TIMEOUT
+988   * or it is a root procedure without 
any child.
+989   * 2. No other worker thread is 
executing it
+990   * 3. No child procedure has been 
submitted
+991   *
+992   * p
+993   * If all the requirements are meet, 
the procedure and its ancestors will be
+994   * bypassed and persisted to WAL.
+995   *
+996   * p
+997   * If the procedure is in WAITING 
state, will set it to RUNNABLE add it to run queue.
+998   * TODO: What about WAITING_TIMEOUT?
+999   * @param id the procedure id
+1000   * @param lockWait time to wait lock
+1001   * @param force if force set to true, 
we will bypass the procedure even if it is 

[09/51] [partial] hbase-site git commit: Published site at 3afe9fb7e6ebfa71187cbe131558a83fae61cecd.

2018-08-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/424d7e41/testdevapidocs/org/apache/hadoop/hbase/class-use/HBaseClassTestRule.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/class-use/HBaseClassTestRule.html 
b/testdevapidocs/org/apache/hadoop/hbase/class-use/HBaseClassTestRule.html
index 1372fe5..1983a9e 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/class-use/HBaseClassTestRule.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/class-use/HBaseClassTestRule.html
@@ -1175,466 +1175,470 @@
 
 
 static HBaseClassTestRule
-TestServerLoadDurability.CLASS_RULE
+TestHbck.CLASS_RULE
 
 
 static HBaseClassTestRule
-TestAllowPartialScanResultCache.CLASS_RULE
+TestServerLoadDurability.CLASS_RULE
 
 
 static HBaseClassTestRule
-TestFromClientSide3.CLASS_RULE
+TestAllowPartialScanResultCache.CLASS_RULE
 
 
 static HBaseClassTestRule
-TestHTableMultiplexerFlushCache.CLASS_RULE
+TestFromClientSide3.CLASS_RULE
 
 
 static HBaseClassTestRule
-TestRawAsyncTableScan.CLASS_RULE
+TestHTableMultiplexerFlushCache.CLASS_RULE
 
 
 static HBaseClassTestRule
-TestIncrementFromClientSideWithCoprocessor.CLASS_RULE
+TestRawAsyncTableScan.CLASS_RULE
 
 
 static HBaseClassTestRule
-TestFromClientSide.CLASS_RULE
+TestIncrementFromClientSideWithCoprocessor.CLASS_RULE
 
 
 static HBaseClassTestRule
-TestConnectionImplementation.CLASS_RULE
+TestFromClientSide.CLASS_RULE
 
 
 static HBaseClassTestRule
-TestScanWithoutFetchingData.CLASS_RULE
+TestConnectionImplementation.CLASS_RULE
 
 
 static HBaseClassTestRule
-TestAsyncTableGetMultiThreaded.CLASS_RULE
+TestScanWithoutFetchingData.CLASS_RULE
 
 
 static HBaseClassTestRule
-TestSnapshotCloneIndependence.CLASS_RULE
+TestAsyncTableGetMultiThreaded.CLASS_RULE
 
 
 static HBaseClassTestRule
-TestCIDeleteRpcTimeout.CLASS_RULE
+TestSnapshotCloneIndependence.CLASS_RULE
 
 
 static HBaseClassTestRule
-TestZKAsyncRegistry.CLASS_RULE
+TestCIDeleteRpcTimeout.CLASS_RULE
 
 
 static HBaseClassTestRule
-TestAsyncProcedureAdminApi.CLASS_RULE
+TestZKAsyncRegistry.CLASS_RULE
 
 
 static HBaseClassTestRule
-TestAsyncClusterAdminApi2.CLASS_RULE
+TestAsyncProcedureAdminApi.CLASS_RULE
 
 
 static HBaseClassTestRule
-TestCIGetOperationTimeout.CLASS_RULE
+TestAsyncClusterAdminApi2.CLASS_RULE
 
 
 static HBaseClassTestRule
-TestAlwaysSetScannerId.CLASS_RULE
+TestCIGetOperationTimeout.CLASS_RULE
 
 
 static HBaseClassTestRule
-TestSnapshotFromClient.CLASS_RULE
+TestAlwaysSetScannerId.CLASS_RULE
 
 
 static HBaseClassTestRule
-TestRestoreSnapshotFromClient.CLASS_RULE
+TestSnapshotFromClient.CLASS_RULE
 
 
 static HBaseClassTestRule
-TestAsyncSingleRequestRpcRetryingCaller.CLASS_RULE
+TestRestoreSnapshotFromClient.CLASS_RULE
 
 
 static HBaseClassTestRule
-TestAdmin2.CLASS_RULE
+TestAsyncSingleRequestRpcRetryingCaller.CLASS_RULE
 
 
 static HBaseClassTestRule
-TestCloneSnapshotFromClient.CLASS_RULE
+TestAdmin2.CLASS_RULE
 
 
 static HBaseClassTestRule
-TestAsyncRegionAdminApi.CLASS_RULE
+TestCloneSnapshotFromClient.CLASS_RULE
 
 
 static HBaseClassTestRule
-TestPutDeleteEtcCellIteration.CLASS_RULE
+TestAsyncRegionAdminApi.CLASS_RULE
 
 
 static HBaseClassTestRule
-TestMvccConsistentScanner.CLASS_RULE
+TestPutDeleteEtcCellIteration.CLASS_RULE
 
 
 static HBaseClassTestRule
-TestAsyncTableBatch.CLASS_RULE
+TestMvccConsistentScanner.CLASS_RULE
 
 
 static HBaseClassTestRule
-TestCompleteResultScanResultCache.CLASS_RULE
+TestAsyncTableBatch.CLASS_RULE
 
 
 static HBaseClassTestRule
-TestCIGetRpcTimeout.CLASS_RULE
+TestCompleteResultScanResultCache.CLASS_RULE
 
 
 static HBaseClassTestRule
-TestScannersFromClientSide2.CLASS_RULE
+TestCIGetRpcTimeout.CLASS_RULE
 
 
 static HBaseClassTestRule
-TestAsyncTableScanAll.CLASS_RULE
+TestScannersFromClientSide2.CLASS_RULE
 
 
 static HBaseClassTestRule
-TestAsyncTableAdminApi.CLASS_RULE
+TestAsyncTableScanAll.CLASS_RULE
 
 
 static HBaseClassTestRule
-TestBlockEvictionFromClient.CLASS_RULE
+TestAsyncTableAdminApi.CLASS_RULE
 
 
 static HBaseClassTestRule
-TestCIBadHostname.CLASS_RULE
+TestBlockEvictionFromClient.CLASS_RULE
 
 
 static HBaseClassTestRule
-TestAsyncTableAdminApi2.CLASS_RULE
+TestCIBadHostname.CLASS_RULE
 
 
 static HBaseClassTestRule
-TestAsyncQuotaAdminApi.CLASS_RULE
+TestAsyncTableAdminApi2.CLASS_RULE
 
 
 static HBaseClassTestRule
-TestAsyncMetaRegionLocator.CLASS_RULE
+TestAsyncQuotaAdminApi.CLASS_RULE
 
 
 static HBaseClassTestRule
-TestAsyncTableNoncedRetry.CLASS_RULE
+TestAsyncMetaRegionLocator.CLASS_RULE
 
 
 static HBaseClassTestRule
-TestConnectionUtils.CLASS_RULE
+TestAsyncTableNoncedRetry.CLASS_RULE
 
 
 static HBaseClassTestRule
-TestAsyncTableScanMetrics.CLASS_RULE
+TestConnectionUtils.CLASS_RULE
 
 
 static HBaseClassTestRule
-TestFromClientSideScanExcpetionWithCoprocessor.CLASS_RULE
+TestAsyncTableScanMetrics.CLASS_RULE
 
 
 static HBaseClassTestRule
-TestAsyncDecommissionAdminApi.CLASS_RULE
+TestFromClientSideScanExcpetionWithCoprocessor.CLASS_RULE
 
 
 static 

[09/51] [partial] hbase-site git commit: Published site at a452487a9b82bfd33bc10683c3f8b8ae74d58883.

2018-08-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0cf79db0/devapidocs/org/apache/hadoop/hbase/io/hfile/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/io/hfile/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/io/hfile/package-tree.html
index 7c60d28..54d332f 100644
--- a/devapidocs/org/apache/hadoop/hbase/io/hfile/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/io/hfile/package-tree.html
@@ -276,9 +276,9 @@
 
 org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType
 org.apache.hadoop.hbase.io.hfile.HFileBlock.Writer.State
-org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory
-org.apache.hadoop.hbase.io.hfile.CacheConfig.ExternalBlockCaches
 org.apache.hadoop.hbase.io.hfile.BlockType
+org.apache.hadoop.hbase.io.hfile.CacheConfig.ExternalBlockCaches
+org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory
 org.apache.hadoop.hbase.io.hfile.BlockPriority
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0cf79db0/devapidocs/org/apache/hadoop/hbase/ipc/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/ipc/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/ipc/package-tree.html
index c12ae36..91a2ed4 100644
--- a/devapidocs/org/apache/hadoop/hbase/ipc/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/ipc/package-tree.html
@@ -353,9 +353,9 @@
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.ipc.MetricsHBaseServerSourceFactoryImpl.SourceStorage
 org.apache.hadoop.hbase.ipc.CallEvent.Type
 org.apache.hadoop.hbase.ipc.BufferCallBeforeInitHandler.BufferCallAction
+org.apache.hadoop.hbase.ipc.MetricsHBaseServerSourceFactoryImpl.SourceStorage
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0cf79db0/devapidocs/org/apache/hadoop/hbase/mapreduce/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/mapreduce/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/mapreduce/package-tree.html
index deb19ba..dfed8e7 100644
--- a/devapidocs/org/apache/hadoop/hbase/mapreduce/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/mapreduce/package-tree.html
@@ -293,10 +293,10 @@
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.mapreduce.CellCounter.CellCounterMapper.Counters
-org.apache.hadoop.hbase.mapreduce.SyncTable.SyncMapper.Counter
 org.apache.hadoop.hbase.mapreduce.RowCounter.RowCounterMapper.Counters
 org.apache.hadoop.hbase.mapreduce.TableSplit.Version
+org.apache.hadoop.hbase.mapreduce.CellCounter.CellCounterMapper.Counters
+org.apache.hadoop.hbase.mapreduce.SyncTable.SyncMapper.Counter
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0cf79db0/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
index d1b7e01..3ca26df 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 

[09/51] [partial] hbase-site git commit: Published site at 6a5b4f2a5c188f8eef4f2250b8b7db7dd1e750e4.

2018-08-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ff05a18/testdevapidocs/org/apache/hadoop/hbase/TestIOFencing.BlockCompactionsInPrepRegion.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/TestIOFencing.BlockCompactionsInPrepRegion.html
 
b/testdevapidocs/org/apache/hadoop/hbase/TestIOFencing.BlockCompactionsInPrepRegion.html
index 501420c..ee94889 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/TestIOFencing.BlockCompactionsInPrepRegion.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/TestIOFencing.BlockCompactionsInPrepRegion.html
@@ -233,7 +233,7 @@ extends 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ff05a18/testdevapidocs/org/apache/hadoop/hbase/TestIOFencing.CompactionBlockerRegion.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/TestIOFencing.CompactionBlockerRegion.html
 
b/testdevapidocs/org/apache/hadoop/hbase/TestIOFencing.CompactionBlockerRegion.html
index e2ab629..abb9e50 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/TestIOFencing.CompactionBlockerRegion.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/TestIOFencing.CompactionBlockerRegion.html
@@ -260,7 +260,7 @@ extends org.apache.hadoop.hbase.regionserver.HRegion
 
 
 Methods inherited from 
classorg.apache.hadoop.hbase.regionserver.HRegion
-addRegionToSnapshot, append, append, areWritesEnabled, batchMutate, 
batchMutate, batchMutate, batchReplay, blockUpdates, bulkLoadHFiles, 
bulkLoadHFiles, checkAndMutate, checkAndRowMutate, checkFamilies, 
checkReadOnly, checkReadsEnabled, checkSplit, checkTimestamps, close, close, 
closeRegionOperation, closeRegionOperation, compact, compactStores, 
computeHDFSBlocksDistribution, computeHDFSBlocksDistribution, createHRegion, 
createHRegion, decrementCompactionsQueuedCount, delete, deregisterChildren, 
doRegionCompactionPrep, equals, execService, flush, flushcache, get, get, get, 
getBlockedRequestsCount, getCellComparator, getCheckAndMutateChecksFailed, 
getCheckAndMutateChecksPassed, getCompactionState, getCompactPriority, 
getCoprocessorHost, getCpRequestsCount, getDataInMemoryWithoutWAL, 
getEarliestFlushTimeForAllStores, getEffectiveDurability, getFilesystem, 
getFilteredReadRequestsCount, getHDFSBlocksDistribution, getLoadStatistics, 
getLockedRows, getMaxFlushedSeqId, getMaxStoreSeq
 Id, getMemStoreDataSize, getMemStoreFlushSize, getMemStoreHeapSize, 
getMemStoreOffHeapSize, getMetrics, getMVCC, getNextSequenceId, 
getNumMutationsWithoutWAL, getOldestHfileTs, getOldestSeqIdOfStore, 
getOpenSeqNum, getReadLockCount, getReadPoint, getReadPoint, 
getReadRequestsCount, getRegionDir, getRegionDir, getRegionFileSystem, 
getRegionInfo, getRegionServicesForStores, getReplicationScope, getRowLock, 
getRowLock, getRowLockInternal, getScanner, getScanner, getSmallestReadPoint, 
getSplitPolicy, getStore, getStoreFileList, getStoreFileOpenAndCloseThreadPool, 
getStoreOpenAndCloseThreadPool, getStores, getTableDescriptor, getWAL, 
getWriteRequestsCount, hashCode, hasReferences, heapSize, increment, increment, 
incrementCompactionsQueuedCount, incrementFlushesQueuedCount, initialize, 
instantiateHStore, instantiateRegionScanner, instantiateRegionScanner, 
internalFlushcache, internalFlushCacheAndCommit, internalPrepareFlushCache, 
isAvailable, isClosed, isClosing, isLoadingCfsOnDemandDefau
 lt, isMergeable, isReadOnly, isSplittable, mutateRow, mutateRowsWithLocks, 
onConfigurationChange, openHRegion, openHRegion, openHRegion, openHRegion, 
openHRegion, openHRegion, openHRegion, openHRegion, openHRegion, openHRegion, 
openReadOnlyFileSystemHRegion, prepareDelete, prepareDeleteTimestamps, 
processRowsWithLocks, processRowsWithLocks, processRowsWithLocks, put, 
refreshStoreFiles, refreshStoreFiles, registerChildren, registerService, 
replayRecoveredEditsIfAny, reportCompactionRequestEnd, 
reportCompactionRequestFailure, reportCompactionRequestStart, 
requestCompaction, requestCompaction, requestFlush, restoreEdit, rowIsInRange, 
rowIsInRange, setClosing, setCoprocessorHost, setReadsEnabled, 
setTimeoutForWriteLock, startRegionOperation, startRegionOperation, toString, 
unblockUpdates, waitForFlushes, waitForFlushes, waitForFlushesAndCompactions, 
warmupHRegion, writeRegionOpenMarker
+addRegionToSnapshot, append, append, areWritesEnabled, batchMutate, 
batchMutate, batchMutate, batchReplay, blockUpdates, bulkLoadHFiles, 
bulkLoadHFiles, checkAndMutate, checkAndRowMutate, checkFamilies, 
checkReadOnly, checkReadsEnabled, checkSplit, checkTimestamps, close, close, 
closeRegionOperation, closeRegionOperation, compact, compactStores, 
computeHDFSBlocksDistribution, computeHDFSBlocksDistribution, createHRegion, 
createHRegion, decrementCompactionsQueuedCount, delete, deregisterChildren, 
doRegionCompactionPrep, dropMemStoreContents, equals, execService, flush, 
flushcache, get, get, get, getBlockedRequestsCount, 

[09/51] [partial] hbase-site git commit: Published site at 63f2d3cbdc8151f5f61f33e0a078c51b9ac076a5.

2018-08-21 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7ae6a80c/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.Testing.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.Testing.html 
b/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.Testing.html
index 9ec6338..c0da1d3 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.Testing.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.Testing.html
@@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public static class ProcedureExecutor.Testing
+public static class ProcedureExecutor.Testing
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 Class with parameters describing how to fail/die when in 
testing-context.
 
@@ -148,14 +148,18 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 protected boolean
-killIfSuspended
+killIfHasParent
 
 
 protected boolean
-toggleKillAfterStoreUpdate
+killIfSuspended
 
 
 protected boolean
+toggleKillAfterStoreUpdate
+
+
+protected boolean
 toggleKillBeforeStoreUpdate
 
 
@@ -204,7 +208,8 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 protected boolean
-shouldKillBeforeStoreUpdate(booleanisSuspended)
+shouldKillBeforeStoreUpdate(booleanisSuspended,
+   booleanhasParent)
 
 
 
@@ -228,13 +233,22 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 Field Detail
+
+
+
+
+
+killIfHasParent
+protectedboolean killIfHasParent
+
+
 
 
 
 
 
 killIfSuspended
-protectedboolean killIfSuspended
+protectedboolean killIfSuspended
 
 
 
@@ -243,7 +257,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 killBeforeStoreUpdate
-protectedboolean killBeforeStoreUpdate
+protectedboolean killBeforeStoreUpdate
 Kill the PE BEFORE we store state to the WAL. Good for 
figuring out if a Procedure is
  persisting all the state it needs to recover after a crash.
 
@@ -254,7 +268,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 toggleKillBeforeStoreUpdate
-protectedboolean toggleKillBeforeStoreUpdate
+protectedboolean toggleKillBeforeStoreUpdate
 
 
 
@@ -263,7 +277,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 killAfterStoreUpdate
-protectedboolean killAfterStoreUpdate
+protectedboolean killAfterStoreUpdate
 Set when we want to fail AFTER state has been stored into 
the WAL. Rarely used. HBASE-20978
  is about a case where memory-state was being set after store to WAL where a 
crash could
  cause us to get stuck. This flag allows killing at what was a vulnerable 
time.
@@ -275,7 +289,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 toggleKillAfterStoreUpdate
-protectedboolean toggleKillAfterStoreUpdate
+protectedboolean toggleKillAfterStoreUpdate
 
 
 
@@ -292,7 +306,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 Testing
-publicTesting()
+publicTesting()
 
 
 
@@ -309,16 +323,17 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 shouldKillBeforeStoreUpdate
-protectedbooleanshouldKillBeforeStoreUpdate()
+protectedbooleanshouldKillBeforeStoreUpdate()
 
 
-
+
 
 
 
 
 shouldKillBeforeStoreUpdate
-protectedbooleanshouldKillBeforeStoreUpdate(booleanisSuspended)
+protectedbooleanshouldKillBeforeStoreUpdate(booleanisSuspended,
+  booleanhasParent)
 
 
 
@@ -327,7 +342,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 shouldKillAfterStoreUpdate
-protectedbooleanshouldKillAfterStoreUpdate()
+protectedbooleanshouldKillAfterStoreUpdate()
 
 
 
@@ -336,7 +351,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 shouldKillAfterStoreUpdate
-protectedbooleanshouldKillAfterStoreUpdate(booleanisSuspended)
+protectedbooleanshouldKillAfterStoreUpdate(booleanisSuspended)
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7ae6a80c/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerMonitor.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerMonitor.html
 
b/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerMonitor.html
index 09af38d..b73cd3a 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerMonitor.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerMonitor.html
@@ -127,7 +127,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private final class ProcedureExecutor.WorkerMonitor
+private final class ProcedureExecutor.WorkerMonitor
 extends InlineChore
 
 
@@ -277,7 

[09/51] [partial] hbase-site git commit: Published site at 092efb42749bf7fc6ad338c96aae8e7b9d3a2c74.

2018-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.RegionEntryBuffer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.RegionEntryBuffer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.RegionEntryBuffer.html
index 63e4b46..514f830 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.RegionEntryBuffer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.RegionEntryBuffer.html
@@ -468,15 +468,15 @@
 460   * creating it if necessary.
 461   * @param logEntry
 462   * @param fileNameBeingSplit the file 
being split currently. Used to generate tmp file name.
-463   * @param conf
-464   * @return Path to file into which to 
dump split log edits.
-465   * @throws IOException
-466   */
-467  @SuppressWarnings("deprecation")
-468  @VisibleForTesting
-469  static Path 
getRegionSplitEditsPath(final Entry logEntry, String fileNameBeingSplit,
-470  Configuration conf)
-471  throws IOException {
+463   * @param tmpDirName of the directory 
used to sideline old recovered edits file
+464   * @param conf
+465   * @return Path to file into which to 
dump split log edits.
+466   * @throws IOException
+467   */
+468  @SuppressWarnings("deprecation")
+469  @VisibleForTesting
+470  static Path 
getRegionSplitEditsPath(final Entry logEntry, String fileNameBeingSplit,
+471  String tmpDirName, Configuration 
conf) throws IOException {
 472FileSystem fs = 
FileSystem.get(conf);
 473Path rootDir = 
FSUtils.getRootDir(conf);
 474Path tableDir = 
FSUtils.getTableDir(rootDir, logEntry.getKey().getTableName());
@@ -491,7 +491,7 @@
 483  return null;
 484}
 485if (fs.exists(dir)  
fs.isFile(dir)) {
-486  Path tmp = new Path("/tmp");
+486  Path tmp = new Path(tmpDirName);
 487  if (!fs.exists(tmp)) {
 488fs.mkdirs(tmp);
 489  }
@@ -1520,411 +1520,413 @@
 1512 * @return a path with a write for 
that path. caller should close.
 1513 */
 1514WriterAndPath createWAP(byte[] 
region, Entry entry) throws IOException {
-1515  Path regionedits = 
getRegionSplitEditsPath(entry,
-1516  
fileBeingSplit.getPath().getName(), conf);
-1517  if (regionedits == null) {
-1518return null;
-1519  }
-1520  FileSystem rootFs = 
FileSystem.get(conf);
-1521  if (rootFs.exists(regionedits)) 
{
-1522LOG.warn("Found old edits file. 
It could be the "
-1523+ "result of a previous 
failed split attempt. Deleting " + regionedits + ", length="
-1524+ 
rootFs.getFileStatus(regionedits).getLen());
-1525if (!rootFs.delete(regionedits, 
false)) {
-1526  LOG.warn("Failed delete of old 
{}", regionedits);
-1527}
-1528  }
-1529  Writer w = 
createWriter(regionedits);
-1530  LOG.debug("Creating writer 
path={}", regionedits);
-1531  return new 
WriterAndPath(regionedits, w, entry.getKey().getSequenceId());
-1532}
-1533
-1534void filterCellByStore(Entry 
logEntry) {
-1535  Mapbyte[], Long 
maxSeqIdInStores =
-1536  
regionMaxSeqIdInStores.get(Bytes.toString(logEntry.getKey().getEncodedRegionName()));
-1537  if 
(MapUtils.isEmpty(maxSeqIdInStores)) {
-1538return;
-1539  }
-1540  // Create the array list for the 
cells that aren't filtered.
-1541  // We make the assumption that 
most cells will be kept.
-1542  ArrayListCell keptCells = 
new ArrayList(logEntry.getEdit().getCells().size());
-1543  for (Cell cell : 
logEntry.getEdit().getCells()) {
-1544if 
(CellUtil.matchingFamily(cell, WALEdit.METAFAMILY)) {
-1545  keptCells.add(cell);
-1546} else {
-1547  byte[] family = 
CellUtil.cloneFamily(cell);
-1548  Long maxSeqId = 
maxSeqIdInStores.get(family);
-1549  // Do not skip cell even if 
maxSeqId is null. Maybe we are in a rolling upgrade,
-1550  // or the master was crashed 
before and we can not get the information.
-1551  if (maxSeqId == null || 
maxSeqId.longValue()  logEntry.getKey().getSequenceId()) {
-1552keptCells.add(cell);
-1553  }
-1554}
-1555  }
-1556
-1557  // Anything in the keptCells array 
list is still live.
-1558  // So rather than removing the 
cells from the array list
-1559  // which would be an O(n^2) 
operation, we just replace the list
-1560  
logEntry.getEdit().setCells(keptCells);
-1561}
-1562
-1563@Override
-1564public void append(RegionEntryBuffer 
buffer) throws IOException {
-1565  appendBuffer(buffer, true);
-1566}
-1567
-1568WriterAndPath 
appendBuffer(RegionEntryBuffer buffer, boolean reusable) throws IOException{
-1569  ListEntry entries = 
buffer.entryBuffer;
-1570  if (entries.isEmpty()) {
-1571LOG.warn("got an empty buffer, 
skipping");

[09/51] [partial] hbase-site git commit: Published site at 613d831429960348dc42c3bdb6ea5d31be15c81c.

2018-08-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7cf6034b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALFactory.Providers.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALFactory.Providers.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALFactory.Providers.html
index d2d8da1..5bbbf0c 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALFactory.Providers.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALFactory.Providers.html
@@ -90,391 +90,392 @@
 082  static final String 
DEFAULT_WAL_PROVIDER = Providers.defaultProvider.name();
 083
 084  public static final String 
META_WAL_PROVIDER = "hbase.wal.meta_provider";
-085  static final String 
DEFAULT_META_WAL_PROVIDER = Providers.defaultProvider.name();
-086
-087  final String factoryId;
-088  private final WALProvider provider;
-089  // The meta updates are written to a 
different wal. If this
-090  // regionserver holds meta regions, 
then this ref will be non-null.
-091  // lazily intialized; most 
RegionServers don't deal with META
-092  private final 
AtomicReferenceWALProvider metaProvider = new 
AtomicReference();
-093
-094  /**
-095   * Configuration-specified WAL Reader 
used when a custom reader is requested
-096   */
-097  private final Class? extends 
AbstractFSWALProvider.Reader logReaderClass;
-098
-099  /**
-100   * How long to attempt opening 
in-recovery wals
-101   */
-102  private final int timeoutMillis;
-103
-104  private final Configuration conf;
-105
-106  // Used for the singleton WALFactory, 
see below.
-107  private WALFactory(Configuration conf) 
{
-108// this code is duplicated here so we 
can keep our members final.
-109// until we've moved reader/writer 
construction down into providers, this initialization must
-110// happen prior to provider 
initialization, in case they need to instantiate a reader/writer.
-111timeoutMillis = 
conf.getInt("hbase.hlog.open.timeout", 30);
-112/* TODO Both of these are probably 
specific to the fs wal provider */
-113logReaderClass = 
conf.getClass("hbase.regionserver.hlog.reader.impl", ProtobufLogReader.class,
-114  
AbstractFSWALProvider.Reader.class);
-115this.conf = conf;
-116// end required early 
initialization
-117
-118// this instance can't create wals, 
just reader/writers.
-119provider = null;
-120factoryId = SINGLETON_ID;
-121  }
-122
-123  @VisibleForTesting
-124  public Class? extends 
WALProvider getProviderClass(String key, String defaultValue) {
-125try {
-126  Providers provider = 
Providers.valueOf(conf.get(key, defaultValue));
-127  if (provider != 
Providers.defaultProvider) {
-128// User gives a wal provider 
explicitly, just use that one
-129return provider.clazz;
-130  }
-131  // AsyncFSWAL has better 
performance in most cases, and also uses less resources, we will try
-132  // to use it if possible. But it 
deeply hacks into the internal of DFSClient so will be easily
-133  // broken when upgrading hadoop. If 
it is broken, then we fall back to use FSHLog.
-134  if (AsyncFSWALProvider.load()) {
-135return 
AsyncFSWALProvider.class;
-136  } else {
-137return FSHLogProvider.class;
-138  }
-139} catch (IllegalArgumentException 
exception) {
-140  // Fall back to them specifying a 
class name
-141  // Note that the passed default 
class shouldn't actually be used, since the above only fails
-142  // when there is a config value 
present.
-143  return conf.getClass(key, 
Providers.defaultProvider.clazz, WALProvider.class);
-144}
-145  }
-146
-147  static WALProvider 
createProvider(Class? extends WALProvider clazz) throws IOException {
-148LOG.info("Instantiating WALProvider 
of type {}", clazz);
-149try {
-150  return 
clazz.getDeclaredConstructor().newInstance();
-151} catch (Exception e) {
-152  LOG.error("couldn't set up 
WALProvider, the configured class is " + clazz);
-153  LOG.debug("Exception details for 
failure to load WALProvider.", e);
-154  throw new IOException("couldn't set 
up WALProvider", e);
-155}
-156  }
-157
-158  /**
-159   * @param conf must not be null, will 
keep a reference to read params in later reader/writer
-160   *  instances.
-161   * @param factoryId a unique identifier 
for this factory. used i.e. by filesystem implementations
-162   *  to make a directory
-163   */
-164  public WALFactory(Configuration conf, 
String factoryId) throws IOException {
-165// default 
enableSyncReplicationWALProvider is true, only disable 
SyncReplicationWALProvider
-166// for HMaster or HRegionServer which 
take system table only. See HBASE-1
-167this(conf, factoryId, true);
-168  }
-169
-170  /**
-171   * @param conf must not be null, will 
keep a reference to read params in later reader/writer
-172   *  

[09/51] [partial] hbase-site git commit: Published site at ba5d1c1f28301adc99019d9d6c4a04fac98ae511.

2018-07-25 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/804782f0/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
index 95f2a65..073d0d0 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
@@ -931,7 +931,7 @@
 923InitMetaProcedure initMetaProc = 
null;
 924if 
(assignmentManager.getRegionStates().getRegionState(RegionInfoBuilder.FIRST_META_REGIONINFO)
 925  .isOffline()) {
-926  OptionalProcedure? 
optProc = procedureExecutor.getProcedures().stream()
+926  
OptionalProcedureMasterProcedureEnv optProc = 
procedureExecutor.getProcedures().stream()
 927.filter(p - p instanceof 
InitMetaProcedure).findAny();
 928  if (optProc.isPresent()) {
 929initMetaProc = 
(InitMetaProcedure) optProc.get();
@@ -3210,566 +3210,567 @@
 3202  cpHost.preGetProcedures();
 3203}
 3204
-3205final ListProcedure? 
procList = this.procedureExecutor.getProcedures();
-3206
-3207if (cpHost != null) {
-3208  
cpHost.postGetProcedures(procList);
-3209}
-3210
-3211return procList;
-3212  }
-3213
-3214  @Override
-3215  public ListLockedResource 
getLocks() throws IOException {
-3216if (cpHost != null) {
-3217  cpHost.preGetLocks();
-3218}
-3219
-3220MasterProcedureScheduler 
procedureScheduler =
-3221  
procedureExecutor.getEnvironment().getProcedureScheduler();
-3222
-3223final ListLockedResource 
lockedResources = procedureScheduler.getLocks();
-3224
-3225if (cpHost != null) {
-3226  
cpHost.postGetLocks(lockedResources);
-3227}
-3228
-3229return lockedResources;
-3230  }
-3231
-3232  /**
-3233   * Returns the list of table 
descriptors that match the specified request
-3234   * @param namespace the namespace to 
query, or null if querying for all
-3235   * @param regex The regular expression 
to match against, or null if querying for all
-3236   * @param tableNameList the list of 
table names, or null if querying for all
-3237   * @param includeSysTables False to 
match only against userspace tables
-3238   * @return the list of table 
descriptors
-3239   */
-3240  public ListTableDescriptor 
listTableDescriptors(final String namespace, final String regex,
-3241  final ListTableName 
tableNameList, final boolean includeSysTables)
-3242  throws IOException {
-3243ListTableDescriptor htds = 
new ArrayList();
-3244if (cpHost != null) {
-3245  
cpHost.preGetTableDescriptors(tableNameList, htds, regex);
-3246}
-3247htds = getTableDescriptors(htds, 
namespace, regex, tableNameList, includeSysTables);
-3248if (cpHost != null) {
-3249  
cpHost.postGetTableDescriptors(tableNameList, htds, regex);
-3250}
-3251return htds;
-3252  }
-3253
-3254  /**
-3255   * Returns the list of table names 
that match the specified request
-3256   * @param regex The regular expression 
to match against, or null if querying for all
-3257   * @param namespace the namespace to 
query, or null if querying for all
-3258   * @param includeSysTables False to 
match only against userspace tables
-3259   * @return the list of table names
-3260   */
-3261  public ListTableName 
listTableNames(final String namespace, final String regex,
-3262  final boolean includeSysTables) 
throws IOException {
-3263ListTableDescriptor htds = 
new ArrayList();
-3264if (cpHost != null) {
-3265  cpHost.preGetTableNames(htds, 
regex);
-3266}
-3267htds = getTableDescriptors(htds, 
namespace, regex, null, includeSysTables);
-3268if (cpHost != null) {
-3269  cpHost.postGetTableNames(htds, 
regex);
-3270}
-3271ListTableName result = new 
ArrayList(htds.size());
-3272for (TableDescriptor htd: htds) 
result.add(htd.getTableName());
-3273return result;
-3274  }
-3275
-3276  /**
-3277   * @return list of table table 
descriptors after filtering by regex and whether to include system
-3278   *tables, etc.
-3279   * @throws IOException
-3280   */
-3281  private ListTableDescriptor 
getTableDescriptors(final ListTableDescriptor htds,
-3282  final String namespace, final 
String regex, final ListTableName tableNameList,
-3283  final boolean includeSysTables)
-3284  throws IOException {
-3285if (tableNameList == null || 
tableNameList.isEmpty()) {
-3286  // request for all 
TableDescriptors
-3287  CollectionTableDescriptor 
allHtds;
-3288  if (namespace != null  
namespace.length()  0) {
-3289// Do a check on the namespace 
existence. Will fail if does not exist.
-3290
this.clusterSchemaService.getNamespace(namespace);
-3291

[09/51] [partial] hbase-site git commit: Published site at b4759ce6e72f50ccd9d410bd5917dc5a515414f1.

2018-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.StoreFileCostFunction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.StoreFileCostFunction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.StoreFileCostFunction.html
index 233dba3..91b9055 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.StoreFileCostFunction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.StoreFileCostFunction.html
@@ -540,1205 +540,1204 @@
 532  
sm.getRegionMetrics().forEach((byte[] regionName, RegionMetrics rm) - {
 533DequeBalancerRegionLoad 
rLoads = oldLoads.get(Bytes.toString(regionName));
 534if (rLoads == null) {
-535  // There was nothing there
-536  rLoads = new 
ArrayDeque();
-537} else if (rLoads.size() = 
numRegionLoadsToRemember) {
-538  rLoads.remove();
-539}
-540rLoads.add(new 
BalancerRegionLoad(rm));
-541
loads.put(Bytes.toString(regionName), rLoads);
-542  });
-543});
-544
-545for(CostFromRegionLoadFunction cost : 
regionLoadFunctions) {
-546  cost.setLoads(loads);
-547}
-548  }
-549
-550  protected void initCosts(Cluster 
cluster) {
-551for (CostFunction c:costFunctions) 
{
-552  c.init(cluster);
-553}
-554  }
-555
-556  protected void 
updateCostsWithAction(Cluster cluster, Action action) {
-557for (CostFunction c : costFunctions) 
{
-558  c.postAction(action);
-559}
-560  }
-561
-562  /**
-563   * Get the names of the cost 
functions
-564   */
-565  public String[] getCostFunctionNames() 
{
-566if (costFunctions == null) return 
null;
-567String[] ret = new 
String[costFunctions.length];
-568for (int i = 0; i  
costFunctions.length; i++) {
-569  CostFunction c = 
costFunctions[i];
-570  ret[i] = 
c.getClass().getSimpleName();
-571}
-572
-573return ret;
-574  }
-575
-576  /**
-577   * This is the main cost function.  It 
will compute a cost associated with a proposed cluster
-578   * state.  All different costs will be 
combined with their multipliers to produce a double cost.
-579   *
-580   * @param cluster The state of the 
cluster
-581   * @param previousCost the previous 
cost. This is used as an early out.
-582   * @return a double of a cost 
associated with the proposed cluster state.  This cost is an
-583   * aggregate of all individual 
cost functions.
-584   */
-585  protected double computeCost(Cluster 
cluster, double previousCost) {
-586double total = 0;
-587
-588for (int i = 0; i  
costFunctions.length; i++) {
-589  CostFunction c = 
costFunctions[i];
-590  this.tempFunctionCosts[i] = 0.0;
-591
-592  if (c.getMultiplier() = 0) {
-593continue;
-594  }
-595
-596  Float multiplier = 
c.getMultiplier();
-597  Double cost = c.cost();
-598
-599  this.tempFunctionCosts[i] = 
multiplier*cost;
-600  total += 
this.tempFunctionCosts[i];
-601
-602  if (total  previousCost) {
-603break;
-604  }
-605}
-606
-607return total;
-608  }
-609
-610  /** Generates a candidate action to be 
applied to the cluster for cost function search */
-611  abstract static class 
CandidateGenerator {
-612abstract Cluster.Action 
generate(Cluster cluster);
-613
-614/**
-615 * From a list of regions pick a 
random one. Null can be returned which
-616 * {@link 
StochasticLoadBalancer#balanceCluster(Map)} recognize as signal to try a region 
move
-617 * rather than swap.
-618 *
-619 * @param clusterThe state of 
the cluster
-620 * @param server index of the 
server
-621 * @param chanceOfNoSwap Chance that 
this will decide to try a move rather
-622 *   than a 
swap.
-623 * @return a random {@link 
RegionInfo} or null if an asymmetrical move is
-624 * suggested.
-625 */
-626protected int 
pickRandomRegion(Cluster cluster, int server, double chanceOfNoSwap) {
-627  // Check to see if this is just a 
move.
-628  if 
(cluster.regionsPerServer[server].length == 0 || RANDOM.nextFloat()  
chanceOfNoSwap) {
-629// signal a move only.
-630return -1;
-631  }
-632  int rand = 
RANDOM.nextInt(cluster.regionsPerServer[server].length);
-633  return 
cluster.regionsPerServer[server][rand];
-634
-635}
-636protected int 
pickRandomServer(Cluster cluster) {
-637  if (cluster.numServers  1) {
-638return -1;
-639  }
-640
-641  return 
RANDOM.nextInt(cluster.numServers);
-642}
-643
-644protected int pickRandomRack(Cluster 
cluster) {
-645  if (cluster.numRacks  1) {
-646return -1;
-647  }
-648

[09/51] [partial] hbase-site git commit: Published site at e66a6603e36ecd67237ca16acd5e2de03f0d372d.

2018-07-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0c6f447e/apidocs/org/apache/hadoop/hbase/class-use/CellBuilder.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/class-use/CellBuilder.html 
b/apidocs/org/apache/hadoop/hbase/class-use/CellBuilder.html
index 4729dfd..503a09a 100644
--- a/apidocs/org/apache/hadoop/hbase/class-use/CellBuilder.html
+++ b/apidocs/org/apache/hadoop/hbase/class-use/CellBuilder.html
@@ -1,10 +1,10 @@
 http://www.w3.org/TR/html4/loose.dtd;>
 
-
+
 
 
 
-接口 org.apache.hadoop.hbase.CellBuilder的使用 (Apache HBase 
3.0.0-SNAPSHOT API)
+Uses of Interface org.apache.hadoop.hbase.CellBuilder (Apache HBase 
3.0.0-SNAPSHOT API)
 
 
 
@@ -12,7 +12,7 @@
 
 
 
-您的浏览器已禁用 JavaScript。
+JavaScript is disabled on your browser.
 
 
 
 
 
-跳过导航链接
+Skip navigation links
 
 
 
-
-概览
-程序包
-ç±»
-使用
-树
-已过时
-索引
-帮助
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
 
 
 
 
-上一个
-下一个
+Prev
+Next
 
 
-框架
-无框架
+Frames
+NoFrames
 
 
-所有类
+AllClasses
 
 
 
 
-

接口的使用
org.apache.hadoop.hbase.CellBuilder

+

Uses of Interface
org.apache.hadoop.hbase.CellBuilder

  • - - +
    使用CellBuilder的程序包  
    + - - + + @@ -94,70 +94,70 @@
  • -

    org.apache.hadoop.hbase中CellBuilder的使用

    -
  • Packages that use CellBuilder 
    程序包说明PackageDescription
    - +

    Uses of CellBuilder in org.apache.hadoop.hbase

    +
    返回CellBuilder的org.apache.hadoop.hbase中的方法 
    + - - + + - +
    Methods in org.apache.hadoop.hbase that return CellBuilder 
    限定符和类型方法和说明Modifier and TypeMethod and Description
    CellBuilder[09/51] [partial] hbase-site git commit: Published site at e66a6603e36ecd67237ca16acd5e2de03f0d372d.
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5427a45e/apidocs/org/apache/hadoop/hbase/class-use/Cell.html
    --
    diff --git a/apidocs/org/apache/hadoop/hbase/class-use/Cell.html 
    b/apidocs/org/apache/hadoop/hbase/class-use/Cell.html
    index 5cca62a..37e55ec 100644
    --- a/apidocs/org/apache/hadoop/hbase/class-use/Cell.html
    +++ b/apidocs/org/apache/hadoop/hbase/class-use/Cell.html
    @@ -1,10 +1,10 @@
     http://www.w3.org/TR/html4/loose.dtd;>
     
    -
    +
     
     
     
    -Uses of Interface org.apache.hadoop.hbase.Cell (Apache HBase 
    3.0.0-SNAPSHOT API)
    +接口 org.apache.hadoop.hbase.Cell的使用 (Apache HBase 
    3.0.0-SNAPSHOT API)
     
     
     
    @@ -12,7 +12,7 @@
     
     
     
    -JavaScript is disabled on your browser.
    +您的浏览器已禁用 JavaScript。
     
     
     
     
     
    -Skip navigation links
    +跳过导航链接
     
     
     
    -
    -Overview
    -Package
    -Class
    -Use
    -Tree
    -Deprecated
    -Index
    -Help
    +
    +概览
    +程序包
    +ç±»
    +使用
    +树
    +已过时
    +索引
    +帮助
     
     
     
     
    -Prev
    -Next
    +上一个
    +下一个
     
     
    -Frames
    -NoFrames
    +框架
    +无框架
     
     
    -AllClasses
    +所有类
     
     
     

    [09/51] [partial] hbase-site git commit: Published site at 0f23784182ab88649de340d75804e0ff20dcd0fc.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bcb555af/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.TransparentCryptoHelper.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.TransparentCryptoHelper.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.TransparentCryptoHelper.html
    index 05e032c..40ef9f4 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.TransparentCryptoHelper.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.TransparentCryptoHelper.html
    @@ -25,767 +25,805 @@
     017 */
     018package 
    org.apache.hadoop.hbase.io.asyncfs;
     019
    -020import static 
    org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleState.READER_IDLE;
    -021import static 
    org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY;
    +020import static 
    org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY;
    +021import static 
    org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleState.READER_IDLE;
     022
    -023import 
    org.apache.hbase.thirdparty.com.google.common.base.Charsets;
    -024import 
    org.apache.hbase.thirdparty.com.google.common.base.Throwables;
    -025import 
    org.apache.hbase.thirdparty.com.google.common.collect.ImmutableSet;
    -026import 
    org.apache.hbase.thirdparty.com.google.common.collect.Maps;
    -027import 
    com.google.protobuf.CodedOutputStream;
    -028
    -029import 
    org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf;
    -030import 
    org.apache.hbase.thirdparty.io.netty.buffer.ByteBufOutputStream;
    -031import 
    org.apache.hbase.thirdparty.io.netty.buffer.CompositeByteBuf;
    -032import 
    org.apache.hbase.thirdparty.io.netty.buffer.Unpooled;
    -033import 
    org.apache.hbase.thirdparty.io.netty.channel.Channel;
    -034import 
    org.apache.hbase.thirdparty.io.netty.channel.ChannelDuplexHandler;
    -035import 
    org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext;
    -036import 
    org.apache.hbase.thirdparty.io.netty.channel.ChannelOutboundHandlerAdapter;
    -037import 
    org.apache.hbase.thirdparty.io.netty.channel.ChannelPipeline;
    -038import 
    org.apache.hbase.thirdparty.io.netty.channel.ChannelPromise;
    -039import 
    org.apache.hbase.thirdparty.io.netty.channel.SimpleChannelInboundHandler;
    -040import 
    org.apache.hbase.thirdparty.io.netty.handler.codec.LengthFieldBasedFrameDecoder;
    -041import 
    org.apache.hbase.thirdparty.io.netty.handler.codec.MessageToByteEncoder;
    -042import 
    org.apache.hbase.thirdparty.io.netty.handler.codec.protobuf.ProtobufDecoder;
    -043import 
    org.apache.hbase.thirdparty.io.netty.handler.codec.protobuf.ProtobufVarint32FrameDecoder;
    -044import 
    org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateEvent;
    -045import 
    org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler;
    -046import 
    org.apache.hbase.thirdparty.io.netty.util.concurrent.Promise;
    -047
    -048import java.io.IOException;
    -049import java.lang.reflect.Field;
    -050import 
    java.lang.reflect.InvocationTargetException;
    -051import java.lang.reflect.Method;
    -052import java.net.InetAddress;
    -053import java.net.InetSocketAddress;
    -054import java.nio.ByteBuffer;
    -055import 
    java.security.GeneralSecurityException;
    -056import java.util.Arrays;
    -057import java.util.Collections;
    -058import java.util.List;
    -059import java.util.Map;
    -060import java.util.Set;
    -061import java.util.concurrent.TimeUnit;
    -062import 
    java.util.concurrent.atomic.AtomicBoolean;
    -063
    -064import 
    javax.security.auth.callback.Callback;
    -065import 
    javax.security.auth.callback.CallbackHandler;
    -066import 
    javax.security.auth.callback.NameCallback;
    -067import 
    javax.security.auth.callback.PasswordCallback;
    -068import 
    javax.security.auth.callback.UnsupportedCallbackException;
    -069import 
    javax.security.sasl.RealmCallback;
    -070import 
    javax.security.sasl.RealmChoiceCallback;
    -071import javax.security.sasl.Sasl;
    -072import javax.security.sasl.SaslClient;
    -073import 
    javax.security.sasl.SaslException;
    -074
    -075import 
    org.apache.commons.codec.binary.Base64;
    -076import 
    org.apache.commons.lang3.StringUtils;
    -077import 
    org.apache.hadoop.conf.Configuration;
    -078import 
    org.apache.hadoop.crypto.CipherOption;
    -079import 
    org.apache.hadoop.crypto.CipherSuite;
    -080import 
    org.apache.hadoop.crypto.CryptoCodec;
    -081import 
    org.apache.hadoop.crypto.Decryptor;
    -082import 
    org.apache.hadoop.crypto.Encryptor;
    -083import 
    org.apache.hadoop.crypto.key.KeyProvider.KeyVersion;
    -084import 
    org.apache.hadoop.fs.FileEncryptionInfo;
    -085import 
    org.apache.yetus.audience.InterfaceAudience;
    -086import org.slf4j.Logger;
    -087import org.slf4j.LoggerFactory;
    -088
    -089import com.google.protobuf.ByteString;
    -090import 
    org.apache.hadoop.hdfs.DFSClient;
    -091import 
    org.apache.hadoop.hdfs.protocol.DatanodeInfo;
    -092import 
    

    [09/51] [partial] hbase-site git commit: Published site at 85b41f36e01214b6485c9352875c84ebf877dab3.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a5c66de0/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ModifyColumnFamilyFuture.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ModifyColumnFamilyFuture.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ModifyColumnFamilyFuture.html
    index c10cfbf..a3e2f4a 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ModifyColumnFamilyFuture.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ModifyColumnFamilyFuture.html
    @@ -3371,7 +3371,7 @@
     3363private V result = null;
     3364
     3365private final HBaseAdmin admin;
    -3366private final Long procId;
    +3366protected final Long procId;
     3367
     3368public ProcedureFuture(final 
    HBaseAdmin admin, final Long procId) {
     3369  this.admin = admin;
    @@ -3653,653 +3653,651 @@
     3645 * @return a description of the 
    operation
     3646 */
     3647protected String getDescription() 
    {
    -3648  return "Operation: " + 
    getOperationType() + ", "
    -3649  + "Table Name: " + 
    tableName.getNameWithNamespaceInclAsString();
    -3650
    -3651}
    -3652
    -3653protected abstract class 
    TableWaitForStateCallable implements WaitForStateCallable {
    -3654  @Override
    -3655  public void 
    throwInterruptedException() throws InterruptedIOException {
    -3656throw new 
    InterruptedIOException("Interrupted while waiting for operation: "
    -3657+ getOperationType() + " on 
    table: " + tableName.getNameWithNamespaceInclAsString());
    -3658  }
    -3659
    -3660  @Override
    -3661  public void 
    throwTimeoutException(long elapsedTime) throws TimeoutException {
    -3662throw new TimeoutException("The 
    operation: " + getOperationType() + " on table: " +
    -3663tableName.getNameAsString() 
    + " has not completed after " + elapsedTime + "ms");
    -3664  }
    -3665}
    -3666
    -3667@Override
    -3668protected V 
    postOperationResult(final V result, final long deadlineTs)
    -3669throws IOException, 
    TimeoutException {
    -3670  LOG.info(getDescription() + " 
    completed");
    -3671  return 
    super.postOperationResult(result, deadlineTs);
    -3672}
    -3673
    -3674@Override
    -3675protected V 
    postOperationFailure(final IOException exception, final long deadlineTs)
    -3676throws IOException, 
    TimeoutException {
    -3677  LOG.info(getDescription() + " 
    failed with " + exception.getMessage());
    -3678  return 
    super.postOperationFailure(exception, deadlineTs);
    -3679}
    -3680
    -3681protected void 
    waitForTableEnabled(final long deadlineTs)
    -3682throws IOException, 
    TimeoutException {
    -3683  waitForState(deadlineTs, new 
    TableWaitForStateCallable() {
    -3684@Override
    -3685public boolean checkState(int 
    tries) throws IOException {
    -3686  try {
    -3687if 
    (getAdmin().isTableAvailable(tableName)) {
    -3688  return true;
    -3689}
    -3690  } catch 
    (TableNotFoundException tnfe) {
    -3691LOG.debug("Table " + 
    tableName.getNameWithNamespaceInclAsString()
    -3692+ " was not enabled, 
    sleeping. tries=" + tries);
    -3693  }
    -3694  return false;
    -3695}
    -3696  });
    -3697}
    -3698
    -3699protected void 
    waitForTableDisabled(final long deadlineTs)
    -3700throws IOException, 
    TimeoutException {
    -3701  waitForState(deadlineTs, new 
    TableWaitForStateCallable() {
    -3702@Override
    -3703public boolean checkState(int 
    tries) throws IOException {
    -3704  return 
    getAdmin().isTableDisabled(tableName);
    -3705}
    -3706  });
    -3707}
    -3708
    -3709protected void 
    waitTableNotFound(final long deadlineTs)
    -3710throws IOException, 
    TimeoutException {
    -3711  waitForState(deadlineTs, new 
    TableWaitForStateCallable() {
    -3712@Override
    -3713public boolean checkState(int 
    tries) throws IOException {
    -3714  return 
    !getAdmin().tableExists(tableName);
    -3715}
    -3716  });
    -3717}
    -3718
    -3719protected void 
    waitForSchemaUpdate(final long deadlineTs)
    -3720throws IOException, 
    TimeoutException {
    -3721  waitForState(deadlineTs, new 
    TableWaitForStateCallable() {
    -3722@Override
    -3723public boolean checkState(int 
    tries) throws IOException {
    -3724  return 
    getAdmin().getAlterStatus(tableName).getFirst() == 0;
    -3725}
    -3726  });
    -3727}
    -3728
    -3729protected void 
    waitForAllRegionsOnline(final long deadlineTs, final byte[][] splitKeys)
    -3730throws IOException, 
    TimeoutException {
    -3731  final TableDescriptor desc = 
    getTableDescriptor();
    -3732  final AtomicInteger actualRegCount 
    = new AtomicInteger(0);
    -3733  final MetaTableAccessor.Visitor 
    visitor = new MetaTableAccessor.Visitor() {
    -3734@Override
    -3735public boolean visit(Result 
    

    [09/51] [partial] hbase-site git commit: Published site at 6198e1fc7dfa85c3bc6b2855f9a5fb5f4b2354ff.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/eb5d2c62/devapidocs/org/apache/hadoop/hbase/master/cleaner/CleanerChore.CleanerTask.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/master/cleaner/CleanerChore.CleanerTask.html
     
    b/devapidocs/org/apache/hadoop/hbase/master/cleaner/CleanerChore.CleanerTask.html
    index 82070ed..7c0c94d 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/master/cleaner/CleanerChore.CleanerTask.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/master/cleaner/CleanerChore.CleanerTask.html
    @@ -127,7 +127,7 @@ var activeTableTab = "activeTableTab";
     
     
     
    -private class CleanerChore.CleanerTask
    +private class CleanerChore.CleanerTask
     extends https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/RecursiveTask.html?is-external=true;
     title="class or interface in java.util.concurrent">RecursiveTaskhttps://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
     title="class or interface in java.lang">Boolean
     Attemps to clean up a directory, its subdirectories, and 
    files.
      Return value is true if everything was deleted. false on partial / total 
    failures.
    @@ -259,7 +259,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/
     
     
     dir
    -private finalorg.apache.hadoop.fs.Path dir
    +private finalorg.apache.hadoop.fs.Path dir
     
     
     
    @@ -268,7 +268,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/
     
     
     root
    -private finalboolean root
    +private finalboolean root
     
     
     
    @@ -285,7 +285,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/
     
     
     CleanerTask
    -CleanerTask(org.apache.hadoop.fs.FileStatusdir,
    +CleanerTask(org.apache.hadoop.fs.FileStatusdir,
     booleanroot)
     
     
    @@ -295,7 +295,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/
     
     
     CleanerTask
    -CleanerTask(org.apache.hadoop.fs.Pathdir,
    +CleanerTask(org.apache.hadoop.fs.Pathdir,
     booleanroot)
     
     
    @@ -313,7 +313,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/
     
     
     compute
    -protectedhttps://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
     title="class or interface in java.lang">Booleancompute()
    +protectedhttps://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
     title="class or interface in java.lang">Booleancompute()
     
     Specified by:
     https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/RecursiveTask.html?is-external=true#compute--;
     title="class or interface in java.util.concurrent">computein 
    classhttps://docs.oracle.com/javase/8/docs/api/java/util/concurrent/RecursiveTask.html?is-external=true;
     title="class or interface in java.util.concurrent">RecursiveTaskhttps://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
     title="class or interface in java.lang">Boolean
    @@ -326,16 +326,14 @@ extends https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/
     
     
     getFilteredStatus
    -privatehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in 
    java.util">Listorg.apache.hadoop.fs.FileStatusgetFilteredStatus(org.apache.hbase.thirdparty.com.google.common.base.Predicateorg.apache.hadoop.fs.FileStatusfunction)
    +privatehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in 
    java.util">Listorg.apache.hadoop.fs.FileStatusgetFilteredStatus(org.apache.hbase.thirdparty.com.google.common.base.Predicateorg.apache.hadoop.fs.FileStatusfunction)
      throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException
    -Get FileStatus with filter.
    - Pay attention that FSUtils #listStatusWithStatusFilter would return null,
    - even though status is empty but not null.
    +Get FileStatus with filter.
     
     Parameters:
     function - a filter function
     Returns:
    -filtered FileStatus or null if dir doesn't exist
    +filtered FileStatus or empty list if dir doesn't exist
     Throws:
     https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException - if there's an 
    error other than dir not existing
     
    @@ -347,7 +345,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/
     
     
     deleteAction
    -privatebooleandeleteAction(CleanerChore.Actionhttps://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
     title="class or interface in java.lang">Booleandeletion,
    +privatebooleandeleteAction(CleanerChore.Actionhttps://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
     title="class or interface in java.lang">Booleandeletion,
      https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">Stringtype)
     

    [09/51] [partial] hbase-site git commit: Published site at 14087cc919da9f2e0b1a68f701f6365ad9d1d71f.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/55ce8d97/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.html 
    b/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.html
    index 683aed9..92e4f86 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.html
    @@ -686,7 +686,7 @@ implements 
     
     hashCode
    -publicinthashCode()
    +publicinthashCode()
     
     Overrides:
     https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--;
     title="class or interface in java.lang">hashCodein 
    classhttps://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">Object
    @@ -699,7 +699,7 @@ implements 
     
     equals
    -publicbooleanequals(https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">Objectobj)
    +publicbooleanequals(https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">Objectobj)
     
     Overrides:
     https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-;
     title="class or interface in java.lang">equalsin 
    classhttps://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">Object
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/55ce8d97/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.html 
    b/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.html
    index 91b1ef5..41a1aa4 100644
    --- a/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.html
    +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.html
    @@ -18,7 +18,7 @@
     catch(err) {
     }
     //-->
    -var methods = 
    {"i0":6,"i1":6,"i2":6,"i3":6,"i4":6,"i5":6,"i6":6,"i7":6,"i8":6,"i9":6,"i10":6,"i11":6,"i12":6,"i13":6,"i14":6,"i15":6,"i16":6,"i17":6,"i18":6,"i19":6,"i20":6,"i21":6,"i22":6,"i23":6,"i24":6};
    +var methods = 
    {"i0":6,"i1":6,"i2":6,"i3":6,"i4":6,"i5":6,"i6":6,"i7":6,"i8":6,"i9":6,"i10":6,"i11":6,"i12":6,"i13":6,"i14":6,"i15":6,"i16":6,"i17":6,"i18":6,"i19":6,"i20":6,"i21":6,"i22":6,"i23":6,"i24":6,"i25":6};
     var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
    Methods"],4:["t3","Abstract Methods"]};
     var altColor = "altColor";
     var rowColor = "rowColor";
    @@ -133,123 +133,129 @@ public interface 
     long
    +getCpRequestCount()
    +Get the total number of CoprocessorService requests that 
    have been issued against this region
    +
    +
    +
    +long
     getFilteredReadRequestCount()
     Get the total number of filtered read requests that have 
    been issued against this region
     
     
    -
    +
     long
     getLastMajorCompactionAge()
     
    -
    +
     long
     getMaxCompactionQueueSize()
     Note that this metric is updated periodically and hence 
    might miss some data points.
     
     
    -
    +
     long
     getMaxFlushQueueSize()
     Note that this metric is updated periodically and hence 
    might miss some data points.
     
     
    -
    +
     long
     getMaxStoreFileAge()
     
    -
    +
     long
     getMemStoreSize()
     Get the size of the memstore on this region server.
     
     
    -
    +
     long
     getMinStoreFileAge()
     
    -
    +
     https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String
     getNamespace()
     Get the name of the namespace this table is in.
     
     
    -
    +
     long
     getNumBytesCompacted()
     
    -
    +
     long
     getNumCompactionsCompleted()
     
    -
    +
     long
     getNumCompactionsFailed()
     Returns the total number of compactions that have been 
    reported as failed on this region.
     
     
    -
    +
     long
     getNumCompactionsQueued()
     
    -
    +
     long
     getNumFilesCompacted()
     
    -
    +
     long
     getNumFlushesQueued()
     
    -
    +
     long
     getNumReferenceFiles()
     
    -
    +
     long
     getNumStoreFiles()
     Get the number of store files hosted on this region 
    server.
     
     
    -
    +
     long
     getNumStores()
     Get the number of stores hosted on this region server.
     
     
    -
    +
     long
     getReadRequestCount()
     Get the total number of read requests that have been issued 
    against this region
     
     
    -
    +
     int
     getRegionHashCode()
     
    -
    +
     https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String
     getRegionName()
     Get the name of the region.
     
     
    -
    +
     int
     getReplicaId()
     Get the replica id of this region.
     
     
    -
    +
     long
     getStoreFileSize()
     Get the total size of the store files this region server is 
    serving from.
     
     
    -
    +
     https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String
     getTableName()
     Get the name of the 

    [09/51] [partial] hbase-site git commit: Published site at 72784c2d836a4b977667449d3adec5e8d15453f5.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2b11656f/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
    index 74bacd8..546d2b6 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
    @@ -2249,1468 +2249,1484 @@
     2241  }
     2242
     2243  @Override
    -2244  public long addColumn(
    -2245  final TableName tableName,
    -2246  final ColumnFamilyDescriptor 
    column,
    -2247  final long nonceGroup,
    -2248  final long nonce)
    -2249  throws IOException {
    -2250checkInitialized();
    -2251checkTableExists(tableName);
    -2252
    -2253TableDescriptor old = 
    getTableDescriptors().get(tableName);
    -2254if 
    (old.hasColumnFamily(column.getName())) {
    -2255  throw new 
    InvalidFamilyOperationException("Column family '" + column.getNameAsString()
    -2256  + "' in table '" + tableName + 
    "' already exists so cannot be added");
    -2257}
    +2244  public long addColumn(final TableName 
    tableName, final ColumnFamilyDescriptor column,
    +2245  final long nonceGroup, final long 
    nonce) throws IOException {
    +2246checkInitialized();
    +2247checkTableExists(tableName);
    +2248
    +2249return modifyTable(tableName, new 
    TableDescriptorGetter() {
    +2250
    +2251  @Override
    +2252  public TableDescriptor get() 
    throws IOException {
    +2253TableDescriptor old = 
    getTableDescriptors().get(tableName);
    +2254if 
    (old.hasColumnFamily(column.getName())) {
    +2255  throw new 
    InvalidFamilyOperationException("Column family '" + column.getNameAsString()
    +2256  + "' in table '" + 
    tableName + "' already exists so cannot be added");
    +2257}
     2258
    -2259TableDescriptor newDesc = 
    TableDescriptorBuilder
    -2260
    .newBuilder(old).setColumnFamily(column).build();
    -2261return modifyTable(tableName, 
    newDesc, nonceGroup, nonce);
    +2259return 
    TableDescriptorBuilder.newBuilder(old).setColumnFamily(column).build();
    +2260  }
    +2261}, nonceGroup, nonce);
     2262  }
     2263
    -2264  @Override
    -2265  public long modifyColumn(
    -2266  final TableName tableName,
    -2267  final ColumnFamilyDescriptor 
    descriptor,
    -2268  final long nonceGroup,
    -2269  final long nonce)
    -2270  throws IOException {
    -2271checkInitialized();
    -2272checkTableExists(tableName);
    -2273
    -2274TableDescriptor old = 
    getTableDescriptors().get(tableName);
    -2275if (! 
    old.hasColumnFamily(descriptor.getName())) {
    -2276  throw new 
    InvalidFamilyOperationException("Family '" + descriptor.getNameAsString()
    -2277  + "' does not exist, so it 
    cannot be modified");
    -2278}
    -2279
    -2280TableDescriptor td = 
    TableDescriptorBuilder
    -2281.newBuilder(old)
    -2282
    .modifyColumnFamily(descriptor)
    -2283.build();
    -2284
    -2285return modifyTable(tableName, td, 
    nonceGroup, nonce);
    -2286  }
    -2287
    -2288  @Override
    -2289  public long deleteColumn(
    -2290  final TableName tableName,
    -2291  final byte[] columnName,
    -2292  final long nonceGroup,
    -2293  final long nonce)
    -2294  throws IOException {
    -2295checkInitialized();
    -2296checkTableExists(tableName);
    -2297
    -2298TableDescriptor old = 
    getTableDescriptors().get(tableName);
    -2299
    -2300if (! 
    old.hasColumnFamily(columnName)) {
    -2301  throw new 
    InvalidFamilyOperationException("Family '" + Bytes.toString(columnName)
    -2302  + "' does not exist, so it 
    cannot be deleted");
    -2303}
    -2304if (old.getColumnFamilyCount() == 1) 
    {
    -2305  throw new 
    InvalidFamilyOperationException("Family '" + Bytes.toString(columnName)
    -2306  + "' is the only column family 
    in the table, so it cannot be deleted");
    -2307}
    -2308
    -2309TableDescriptor td = 
    TableDescriptorBuilder
    -2310
    .newBuilder(old).removeColumnFamily(columnName).build();
    -2311return modifyTable(tableName, td, 
    nonceGroup, nonce);
    -2312  }
    -2313
    -2314  @Override
    -2315  public long enableTable(final 
    TableName tableName, final long nonceGroup, final long nonce)
    -2316  throws IOException {
    -2317checkInitialized();
    -2318
    -2319return 
    MasterProcedureUtil.submitProcedure(
    -2320new 
    MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
    -2321  @Override
    -2322  protected void run() throws 
    IOException {
    -2323
    getMaster().getMasterCoprocessorHost().preEnableTable(tableName);
    -2324
    -2325// Normally, it would make sense 
    for this authorization check to exist inside
    -2326// AccessController, but because 
    the authorization check is done based on internal state
    -2327// (rather than 

    [09/51] [partial] hbase-site git commit: Published site at 9101fc246f86445006bfbcdfda5cc495016dc280.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/65565d77/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterMetaBootstrap.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterMetaBootstrap.html 
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterMetaBootstrap.html
    index e73540e..7b680e9 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterMetaBootstrap.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterMetaBootstrap.html
    @@ -28,135 +28,100 @@
     020
     021import java.io.IOException;
     022import java.util.List;
    -023import java.util.Set;
    -024import 
    org.apache.hadoop.hbase.HConstants;
    -025import 
    org.apache.hadoop.hbase.ServerName;
    -026import 
    org.apache.hadoop.hbase.client.RegionInfo;
    -027import 
    org.apache.hadoop.hbase.client.RegionInfoBuilder;
    -028import 
    org.apache.hadoop.hbase.client.RegionReplicaUtil;
    -029import 
    org.apache.hadoop.hbase.master.assignment.AssignmentManager;
    -030import 
    org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
    -031import 
    org.apache.hadoop.hbase.zookeeper.ZKUtil;
    -032import 
    org.apache.hadoop.hbase.zookeeper.ZKWatcher;
    -033import 
    org.apache.yetus.audience.InterfaceAudience;
    -034import 
    org.apache.zookeeper.KeeperException;
    -035import org.slf4j.Logger;
    -036import org.slf4j.LoggerFactory;
    -037
    -038/**
    -039 * Used by the HMaster on startup to 
    split meta logs and assign the meta table.
    -040 */
    -041@InterfaceAudience.Private
    -042public class MasterMetaBootstrap {
    -043  private static final Logger LOG = 
    LoggerFactory.getLogger(MasterMetaBootstrap.class);
    -044
    -045  private final HMaster master;
    -046
    -047  public MasterMetaBootstrap(HMaster 
    master) {
    -048this.master = master;
    -049  }
    -050
    -051  public void recoverMeta() throws 
    InterruptedException, IOException {
    -052// This is a blocking call that waits 
    until hbase:meta is deployed.
    -053master.recoverMeta();
    -054// Now we can start the 
    TableStateManager. It is backed by hbase:meta.
    -055
    master.getTableStateManager().start();
    -056// Enable server crash procedure 
    handling
    -057enableCrashedServerProcessing();
    -058  }
    -059
    -060  public void processDeadServers() {
    -061// get a list for previously failed 
    RS which need log splitting work
    -062// we recover hbase:meta region 
    servers inside master initialization and
    -063// handle other failed servers in SSH 
    in order to start up master node ASAP
    -064SetServerName 
    previouslyFailedServers =
    -065
    master.getMasterWalManager().getFailedServersFromLogFolders();
    -066
    -067// Master has recovered hbase:meta 
    region server and we put
    -068// other failed region servers in a 
    queue to be handled later by SSH
    -069for (ServerName tmpServer : 
    previouslyFailedServers) {
    -070  
    master.getServerManager().processDeadServer(tmpServer, true);
    -071}
    -072  }
    -073
    -074  /**
    -075   * For assigning hbase:meta replicas 
    only.
    -076   * TODO: The way this assign runs, 
    nothing but chance to stop all replicas showing up on same
    -077   * server as the hbase:meta region.
    -078   */
    -079  protected void assignMetaReplicas()
    -080  throws IOException, 
    InterruptedException, KeeperException {
    -081int numReplicas = 
    master.getConfiguration().getInt(HConstants.META_REPLICAS_NUM,
    -082   
    HConstants.DEFAULT_META_REPLICA_NUM);
    -083if (numReplicas = 1) {
    -084  // No replicaas to assign. 
    Return.
    -085  return;
    -086}
    -087final AssignmentManager 
    assignmentManager = master.getAssignmentManager();
    -088if 
    (!assignmentManager.isMetaInitialized()) {
    -089  throw new 
    IllegalStateException("hbase:meta must be initialized first before we can " +
    -090  "assign out its replicas");
    -091}
    -092ServerName metaServername =
    -093
    this.master.getMetaTableLocator().getMetaRegionLocation(this.master.getZooKeeper());
    -094for (int i = 1; i  numReplicas; 
    i++) {
    -095  // Get current meta state for 
    replica from zk.
    -096  RegionState metaState = 
    MetaTableLocator.getMetaRegionState(master.getZooKeeper(), i);
    -097  RegionInfo hri = 
    RegionReplicaUtil.getRegionInfoForReplica(
    -098  
    RegionInfoBuilder.FIRST_META_REGIONINFO, i);
    -099  
    LOG.debug(hri.getRegionNameAsString() + " replica region state from zookeeper=" 
    + metaState);
    -100  if 
    (metaServername.equals(metaState.getServerName())) {
    -101metaState = null;
    -102
    LOG.info(hri.getRegionNameAsString() +
    -103  " old location is same as 
    current hbase:meta location; setting location as null...");
    -104  }
    -105  // These assigns run inline. All is 
    blocked till they complete. Only interrupt is shutting
    -106  // down hosting server which calls 
    AM#stop.
    -107  if (metaState != null  
    metaState.getServerName() != null) {
    -108// Try to retain old 
    assignment.
    -109assignmentManager.assign(hri, 
    metaState.getServerName());
    -110  } else {
    

    [09/51] [partial] hbase-site git commit: Published site at 0b28155d274910b4e667b949d51f78809a1eff0b.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e11cf2cb/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.EnsureKvEncoder.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.EnsureKvEncoder.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.EnsureKvEncoder.html
    index 83c17c0..9df0225 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.EnsureKvEncoder.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.EnsureKvEncoder.html
    @@ -54,323 +54,362 @@
     046import org.apache.hadoop.io.IOUtils;
     047
     048import 
    org.apache.hbase.thirdparty.com.google.protobuf.ByteString;
    -049
    +049import 
    org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations;
     050
    -051/**
    -052 * Compression in this class is lifted 
    off Compressor/KeyValueCompression.
    -053 * This is a pure coincidence... they are 
    independent and don't have to be compatible.
    -054 *
    -055 * This codec is used at server side for 
    writing cells to WAL as well as for sending edits
    -056 * as part of the distributed splitting 
    process.
    -057 */
    -058@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC,
    -059  HBaseInterfaceAudience.PHOENIX, 
    HBaseInterfaceAudience.CONFIG})
    -060public class WALCellCodec implements 
    Codec {
    -061  /** Configuration key for the class to 
    use when encoding cells in the WAL */
    -062  public static final String 
    WAL_CELL_CODEC_CLASS_KEY = "hbase.regionserver.wal.codec";
    -063
    -064  protected final CompressionContext 
    compression;
    -065  protected final ByteStringUncompressor 
    statelessUncompressor = new ByteStringUncompressor() {
    -066@Override
    -067public byte[] uncompress(ByteString 
    data, Dictionary dict) throws IOException {
    -068  return 
    WALCellCodec.uncompressByteString(data, dict);
    -069}
    -070  };
    -071
    -072  /**
    -073   * bAll subclasses must 
    implement a no argument constructor/b
    -074   */
    -075  public WALCellCodec() {
    -076this.compression = null;
    -077  }
    -078
    -079  /**
    -080   * Default constructor - ball 
    subclasses must implement a constructor with this signature /b
    -081   * if they are to be dynamically loaded 
    from the {@link Configuration}.
    -082   * @param conf configuration to 
    configure ttthis/tt
    -083   * @param compression compression the 
    codec should support, can be ttnull/tt to indicate no
    -084   *  compression
    -085   */
    -086  public WALCellCodec(Configuration conf, 
    CompressionContext compression) {
    -087this.compression = compression;
    -088  }
    -089
    -090  public static String 
    getWALCellCodecClass(Configuration conf) {
    -091return 
    conf.get(WAL_CELL_CODEC_CLASS_KEY, WALCellCodec.class.getName());
    -092  }
    -093
    -094  /**
    -095   * Create and setup a {@link 
    WALCellCodec} from the {@code cellCodecClsName} and
    -096   * CompressionContext, if {@code 
    cellCodecClsName} is specified.
    -097   * Otherwise Cell Codec classname is 
    read from {@link Configuration}.
    -098   * Fully prepares the codec for use.
    -099   * @param conf {@link Configuration} to 
    read for the user-specified codec. If none is specified,
    -100   *  uses a {@link 
    WALCellCodec}.
    -101   * @param cellCodecClsName name of 
    codec
    -102   * @param compression compression the 
    codec should use
    -103   * @return a {@link WALCellCodec} ready 
    for use.
    -104   * @throws 
    UnsupportedOperationException if the codec cannot be instantiated
    -105   */
    -106
    -107  public static WALCellCodec 
    create(Configuration conf, String cellCodecClsName,
    -108  CompressionContext compression) 
    throws UnsupportedOperationException {
    -109if (cellCodecClsName == null) {
    -110  cellCodecClsName = 
    getWALCellCodecClass(conf);
    -111}
    -112return 
    ReflectionUtils.instantiateWithCustomCtor(cellCodecClsName, new Class[]
    -113{ Configuration.class, 
    CompressionContext.class }, new Object[] { conf, compression });
    -114  }
    -115
    -116  /**
    -117   * Create and setup a {@link 
    WALCellCodec} from the
    -118   * CompressionContext.
    -119   * Cell Codec classname is read from 
    {@link Configuration}.
    -120   * Fully prepares the codec for use.
    -121   * @param conf {@link Configuration} to 
    read for the user-specified codec. If none is specified,
    -122   *  uses a {@link 
    WALCellCodec}.
    -123   * @param compression compression the 
    codec should use
    -124   * @return a {@link WALCellCodec} ready 
    for use.
    -125   * @throws 
    UnsupportedOperationException if the codec cannot be instantiated
    -126   */
    -127  public static WALCellCodec 
    create(Configuration conf,
    -128  CompressionContext compression) 
    throws UnsupportedOperationException {
    -129String cellCodecClsName = 
    getWALCellCodecClass(conf);
    -130return 
    ReflectionUtils.instantiateWithCustomCtor(cellCodecClsName, new Class[]
    -131{ Configuration.class, 
    CompressionContext.class }, new Object[] { conf, compression });
    -132  }
    -133
    -134  

    [09/51] [partial] hbase-site git commit: Published site at 7d3750bd9fc9747623549c242cc4171e224b3eaf.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/src-html/org/apache/hadoop/hbase/master/replication/RefreshPeerProcedure.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/replication/RefreshPeerProcedure.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/replication/RefreshPeerProcedure.html
    index 49f4e5a..e1f22c3 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/replication/RefreshPeerProcedure.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/replication/RefreshPeerProcedure.html
    @@ -30,188 +30,191 @@
     022import 
    org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
     023import 
    org.apache.hadoop.hbase.master.procedure.PeerProcedureInterface;
     024import 
    org.apache.hadoop.hbase.master.procedure.RSProcedureDispatcher.ServerOperation;
    -025import 
    org.apache.hadoop.hbase.procedure2.Procedure;
    -026import 
    org.apache.hadoop.hbase.procedure2.ProcedureEvent;
    -027import 
    org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
    -028import 
    org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException;
    -029import 
    org.apache.hadoop.hbase.procedure2.ProcedureYieldException;
    -030import 
    org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher.RemoteOperation;
    -031import 
    org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher.RemoteProcedure;
    -032import 
    org.apache.hadoop.hbase.procedure2.RemoteProcedureException;
    -033import 
    org.apache.hadoop.hbase.replication.regionserver.RefreshPeerCallable;
    -034import 
    org.apache.yetus.audience.InterfaceAudience;
    -035import org.slf4j.Logger;
    -036import org.slf4j.LoggerFactory;
    -037
    -038import 
    org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
    -039import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.PeerModificationType;
    -040import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RefreshPeerParameter;
    -041import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RefreshPeerStateData;
    -042
    -043@InterfaceAudience.Private
    -044public class RefreshPeerProcedure extends 
    ProcedureMasterProcedureEnv
    -045implements PeerProcedureInterface, 
    RemoteProcedureMasterProcedureEnv, ServerName {
    -046
    -047  private static final Logger LOG = 
    LoggerFactory.getLogger(RefreshPeerProcedure.class);
    -048
    -049  private String peerId;
    -050
    -051  private PeerOperationType type;
    -052
    -053  
    @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = 
    "IS2_INCONSISTENT_SYNC",
    -054  justification = "Will never change 
    after construction")
    -055  private ServerName targetServer;
    -056
    -057  private boolean dispatched;
    -058
    -059  private ProcedureEvent? 
    event;
    -060
    -061  private boolean succ;
    -062
    -063  public RefreshPeerProcedure() {
    -064  }
    -065
    -066  public RefreshPeerProcedure(String 
    peerId, PeerOperationType type, ServerName targetServer) {
    -067this.peerId = peerId;
    -068this.type = type;
    -069this.targetServer = targetServer;
    -070  }
    -071
    -072  @Override
    -073  public String getPeerId() {
    -074return peerId;
    -075  }
    -076
    -077  @Override
    -078  public PeerOperationType 
    getPeerOperationType() {
    -079return PeerOperationType.REFRESH;
    -080  }
    -081
    -082  private static PeerModificationType 
    toPeerModificationType(PeerOperationType type) {
    -083switch (type) {
    -084  case ADD:
    -085return 
    PeerModificationType.ADD_PEER;
    -086  case REMOVE:
    -087return 
    PeerModificationType.REMOVE_PEER;
    -088  case ENABLE:
    -089return 
    PeerModificationType.ENABLE_PEER;
    -090  case DISABLE:
    -091return 
    PeerModificationType.DISABLE_PEER;
    -092  case UPDATE_CONFIG:
    -093return 
    PeerModificationType.UPDATE_PEER_CONFIG;
    -094  default:
    -095throw new 
    IllegalArgumentException("Unknown type: " + type);
    -096}
    -097  }
    -098
    -099  private static PeerOperationType 
    toPeerOperationType(PeerModificationType type) {
    -100switch (type) {
    -101  case ADD_PEER:
    -102return PeerOperationType.ADD;
    -103  case REMOVE_PEER:
    -104return 
    PeerOperationType.REMOVE;
    -105  case ENABLE_PEER:
    -106return 
    PeerOperationType.ENABLE;
    -107  case DISABLE_PEER:
    -108return 
    PeerOperationType.DISABLE;
    -109  case UPDATE_PEER_CONFIG:
    -110return 
    PeerOperationType.UPDATE_CONFIG;
    -111  default:
    -112throw new 
    IllegalArgumentException("Unknown type: " + type);
    -113}
    -114  }
    -115
    -116  @Override
    -117  public RemoteOperation 
    remoteCallBuild(MasterProcedureEnv env, ServerName remote) {
    -118assert targetServer.equals(remote);
    -119return new ServerOperation(this, 
    getProcId(), RefreshPeerCallable.class,
    -120
    RefreshPeerParameter.newBuilder().setPeerId(peerId).setType(toPeerModificationType(type))
    -121
    .setTargetServer(ProtobufUtil.toServerName(remote)).build().toByteArray());
    -122  }
    -123
    -124  private void 
    complete(MasterProcedureEnv 

    [09/51] [partial] hbase-site git commit: Published site at 997747076d8ec0b4346d7cb99c4b0667a7c14905.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4df09ed9/devapidocs/src-html/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.html
    index cbf76ed..1756883 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.html
    @@ -103,93 +103,98 @@
     095done.run(null);
     096  }
     097
    -098  
    org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.PrepareBulkLoadRequest
    -099convert(PrepareBulkLoadRequest 
    request)
    -100  throws 
    org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException 
    {
    -101byte [] bytes = 
    request.toByteArray();
    -102
    org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.PrepareBulkLoadRequest.Builder
    -103  builder =
    -104
    org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.PrepareBulkLoadRequest.
    -105newBuilder();
    -106builder.mergeFrom(bytes);
    -107return builder.build();
    -108  }
    -109
    -110  @Override
    -111  public void 
    cleanupBulkLoad(RpcController controller, CleanupBulkLoadRequest request,
    -112  
    RpcCallbackCleanupBulkLoadResponse done) {
    -113try {
    -114  SecureBulkLoadManager 
    secureBulkLoadManager = this.rsServices.getSecureBulkLoadManager();
    -115  
    secureBulkLoadManager.cleanupBulkLoad((HRegion) this.env.getRegion(), 
    convert(request));
    -116  
    done.run(CleanupBulkLoadResponse.newBuilder().build());
    -117} catch (IOException e) {
    -118  
    CoprocessorRpcUtils.setControllerException(controller, e);
    -119}
    -120done.run(null);
    -121  }
    -122
    -123  /**
    -124   * Convert from CPEP protobuf 2.5 to 
    internal protobuf 3.3.
    -125   * @throws 
    org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException
    -126   */
    -127  
    org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CleanupBulkLoadRequest
    -128  convert(CleanupBulkLoadRequest 
    request)
    -129  throws 
    org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException 
    {
    -130byte [] bytes = 
    request.toByteArray();
    -131
    org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CleanupBulkLoadRequest.Builder
    -132builder =
    -133  
    org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CleanupBulkLoadRequest.
    -134  newBuilder();
    -135builder.mergeFrom(bytes);
    -136return builder.build();
    -137  }
    -138
    -139  @Override
    -140  public void 
    secureBulkLoadHFiles(RpcController controller, SecureBulkLoadHFilesRequest 
    request,
    -141  
    RpcCallbackSecureBulkLoadHFilesResponse done) {
    -142boolean loaded = false;
    -143Mapbyte[], ListPath 
    map = null;
    -144try {
    -145  SecureBulkLoadManager 
    secureBulkLoadManager = this.rsServices.getSecureBulkLoadManager();
    -146  BulkLoadHFileRequest 
    bulkLoadHFileRequest = ConvertSecureBulkLoadHFilesRequest(request);
    -147  map = 
    secureBulkLoadManager.secureBulkLoadHFiles((HRegion) this.env.getRegion(),
    -148  
    convert(bulkLoadHFileRequest));
    -149  loaded = map != null  
    !map.isEmpty();
    -150} catch (IOException e) {
    -151  
    CoprocessorRpcUtils.setControllerException(controller, e);
    -152}
    -153
    done.run(SecureBulkLoadHFilesResponse.newBuilder().setLoaded(loaded).build());
    -154  }
    -155
    -156  
    org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileRequest
    -157  convert(BulkLoadHFileRequest request)
    -158  throws 
    org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException 
    {
    -159byte [] bytes = 
    request.toByteArray();
    -160
    org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileRequest.Builder
    -161builder =
    -162  
    org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileRequest.
    -163newBuilder();
    -164builder.mergeFrom(bytes);
    -165return builder.build();
    -166  }
    -167
    -168  private BulkLoadHFileRequest 
    ConvertSecureBulkLoadHFilesRequest(
    -169  SecureBulkLoadHFilesRequest 
    request) {
    -170BulkLoadHFileRequest.Builder 
    bulkLoadHFileRequest = BulkLoadHFileRequest.newBuilder();
    -171RegionSpecifier region =
    -172
    ProtobufUtil.buildRegionSpecifier(RegionSpecifierType.REGION_NAME, this.env
    -173
    .getRegionInfo().getRegionName());
    -174
    bulkLoadHFileRequest.setRegion(region).setFsToken(request.getFsToken())
    -175
    .setBulkToken(request.getBulkToken()).setAssignSeqNum(request.getAssignSeqNum())
    -176
    .addAllFamilyPath(request.getFamilyPathList());
    -177return 
    bulkLoadHFileRequest.build();
    -178  }
    -179
    -180  @Override
    -181  public IterableService 
    getServices() {
    -182return Collections.singleton(this);
    +098  /**
    +099   *  

    [09/51] [partial] hbase-site git commit: Published site at f3d1c021de2264301f68eadb9ef126ff83d7ef53.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/883dde2f/devapidocs/src-html/org/apache/hadoop/hbase/util/ByteBufferUtils.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/util/ByteBufferUtils.html 
    b/devapidocs/src-html/org/apache/hadoop/hbase/util/ByteBufferUtils.html
    index efd186d..30a1259 100644
    --- a/devapidocs/src-html/org/apache/hadoop/hbase/util/ByteBufferUtils.html
    +++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/ByteBufferUtils.html
    @@ -58,1008 +58,1012 @@
     050  public final static int NEXT_BIT_MASK = 
    1  7;
     051  @VisibleForTesting
     052  final static boolean UNSAFE_AVAIL = 
    UnsafeAvailChecker.isAvailable();
    -053  @VisibleForTesting
    -054  final static boolean UNSAFE_UNALIGNED = 
    UnsafeAvailChecker.unaligned();
    -055
    -056  private ByteBufferUtils() {
    -057  }
    -058
    -059  /**
    -060   * Similar to {@link 
    WritableUtils#writeVLong(java.io.DataOutput, long)},
    -061   * but writes to a {@link 
    ByteBuffer}.
    -062   */
    -063  public static void 
    writeVLong(ByteBuffer out, long i) {
    -064if (i = -112  i = 
    127) {
    -065  out.put((byte) i);
    -066  return;
    -067}
    -068
    -069int len = -112;
    -070if (i  0) {
    -071  i ^= -1L; // take one's 
    complement
    -072  len = -120;
    -073}
    -074
    -075long tmp = i;
    -076while (tmp != 0) {
    -077  tmp = tmp  8;
    -078  len--;
    -079}
    -080
    -081out.put((byte) len);
    -082
    -083len = (len  -120) ? -(len + 120) 
    : -(len + 112);
    -084
    -085for (int idx = len; idx != 0; idx--) 
    {
    -086  int shiftbits = (idx - 1) * 8;
    -087  long mask = 0xFFL  
    shiftbits;
    -088  out.put((byte) ((i  mask) 
     shiftbits));
    -089}
    -090  }
    -091
    -092  /**
    -093   * Similar to {@link 
    WritableUtils#readVLong(DataInput)} but reads from a
    -094   * {@link ByteBuffer}.
    -095   */
    -096  public static long readVLong(ByteBuffer 
    in) {
    -097byte firstByte = in.get();
    -098int len = 
    WritableUtils.decodeVIntSize(firstByte);
    -099if (len == 1) {
    -100  return firstByte;
    -101}
    -102long i = 0;
    -103for (int idx = 0; idx  len-1; 
    idx++) {
    -104  byte b = in.get();
    -105  i = i  8;
    -106  i = i | (b  0xFF);
    -107}
    -108return 
    (WritableUtils.isNegativeVInt(firstByte) ? (i ^ -1L) : i);
    -109  }
    +053  public final static boolean 
    UNSAFE_UNALIGNED = UnsafeAvailChecker.unaligned();
    +054
    +055  private ByteBufferUtils() {
    +056  }
    +057
    +058  /**
    +059   * Similar to {@link 
    WritableUtils#writeVLong(java.io.DataOutput, long)},
    +060   * but writes to a {@link 
    ByteBuffer}.
    +061   */
    +062  public static void 
    writeVLong(ByteBuffer out, long i) {
    +063if (i = -112  i = 
    127) {
    +064  out.put((byte) i);
    +065  return;
    +066}
    +067
    +068int len = -112;
    +069if (i  0) {
    +070  i ^= -1L; // take one's 
    complement
    +071  len = -120;
    +072}
    +073
    +074long tmp = i;
    +075while (tmp != 0) {
    +076  tmp = tmp  8;
    +077  len--;
    +078}
    +079
    +080out.put((byte) len);
    +081
    +082len = (len  -120) ? -(len + 120) 
    : -(len + 112);
    +083
    +084for (int idx = len; idx != 0; idx--) 
    {
    +085  int shiftbits = (idx - 1) * 8;
    +086  long mask = 0xFFL  
    shiftbits;
    +087  out.put((byte) ((i  mask) 
     shiftbits));
    +088}
    +089  }
    +090
    +091  /**
    +092   * Similar to {@link 
    WritableUtils#readVLong(DataInput)} but reads from a
    +093   * {@link ByteBuffer}.
    +094   */
    +095  public static long readVLong(ByteBuffer 
    in) {
    +096byte firstByte = in.get();
    +097int len = 
    WritableUtils.decodeVIntSize(firstByte);
    +098if (len == 1) {
    +099  return firstByte;
    +100}
    +101long i = 0;
    +102for (int idx = 0; idx  len-1; 
    idx++) {
    +103  byte b = in.get();
    +104  i = i  8;
    +105  i = i | (b  0xFF);
    +106}
    +107return 
    (WritableUtils.isNegativeVInt(firstByte) ? (i ^ -1L) : i);
    +108  }
    +109
     110
    -111
    -112  /**
    -113   * Put in buffer integer using 7 bit 
    encoding. For each written byte:
    -114   * 7 bits are used to store value
    -115   * 1 bit is used to indicate whether 
    there is next bit.
    -116   * @param value Int to be compressed.
    -117   * @param out Where to put compressed 
    data
    -118   * @return Number of bytes written.
    -119   * @throws IOException on stream 
    error
    -120   */
    -121   public static int 
    putCompressedInt(OutputStream out, final int value)
    -122  throws IOException {
    -123int i = 0;
    -124int tmpvalue = value;
    -125do {
    -126  byte b = (byte) (tmpvalue  
    VALUE_MASK);
    -127  tmpvalue = 
    NEXT_BIT_SHIFT;
    -128  if (tmpvalue != 0) {
    -129b |= (byte) NEXT_BIT_MASK;
    -130  }
    -131  out.write(b);
    -132  i++;
    -133} while (tmpvalue != 0);
    -134return i;
    -135  }
    -136
    -137   /**
    -138* Put in output stream 32 bit integer 
    (Big Endian byte order).
    -139* @param out Where to put integer.
    -140* @param value Value of integer.
    -141* @throws IOException On stream 
    error.
    -142*/
    -143   public static void putInt(OutputStream 
    out, final int value)
    -144 

    [09/51] [partial] hbase-site git commit: Published site at cf529f18a9959589fa635f78df4840472526ea2c.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncSequentialWriteTest.html
    --
    diff --git 
    a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncSequentialWriteTest.html
     
    b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncSequentialWriteTest.html
    index 3f8844b..cdb9398 100644
    --- 
    a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncSequentialWriteTest.html
    +++ 
    b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncSequentialWriteTest.html
    @@ -140,2712 +140,2713 @@
     132public class PerformanceEvaluation 
    extends Configured implements Tool {
     133  static final String RANDOM_SEEK_SCAN = 
    "randomSeekScan";
     134  static final String RANDOM_READ = 
    "randomRead";
    -135  private static final Logger LOG = 
    LoggerFactory.getLogger(PerformanceEvaluation.class.getName());
    -136  private static final ObjectMapper 
    MAPPER = new ObjectMapper();
    -137  static {
    -138
    MAPPER.configure(MapperFeature.SORT_PROPERTIES_ALPHABETICALLY, true);
    -139  }
    -140
    -141  public static final String TABLE_NAME = 
    "TestTable";
    -142  public static final String 
    FAMILY_NAME_BASE = "info";
    -143  public static final byte[] FAMILY_ZERO 
    = Bytes.toBytes("info0");
    -144  public static final byte[] COLUMN_ZERO 
    = Bytes.toBytes("" + 0);
    -145  public static final int 
    DEFAULT_VALUE_LENGTH = 1000;
    -146  public static final int ROW_LENGTH = 
    26;
    -147
    -148  private static final int ONE_GB = 1024 
    * 1024 * 1000;
    -149  private static final int 
    DEFAULT_ROWS_PER_GB = ONE_GB / DEFAULT_VALUE_LENGTH;
    -150  // TODO : should we make this 
    configurable
    -151  private static final int TAG_LENGTH = 
    256;
    -152  private static final DecimalFormat FMT 
    = new DecimalFormat("0.##");
    -153  private static final MathContext CXT = 
    MathContext.DECIMAL64;
    -154  private static final BigDecimal 
    MS_PER_SEC = BigDecimal.valueOf(1000);
    -155  private static final BigDecimal 
    BYTES_PER_MB = BigDecimal.valueOf(1024 * 1024);
    -156  private static final TestOptions 
    DEFAULT_OPTS = new TestOptions();
    -157
    -158  private static MapString, 
    CmdDescriptor COMMANDS = new TreeMap();
    -159  private static final Path PERF_EVAL_DIR 
    = new Path("performance_evaluation");
    -160
    -161  static {
    -162
    addCommandDescriptor(AsyncRandomReadTest.class, "asyncRandomRead",
    -163"Run async random read test");
    -164
    addCommandDescriptor(AsyncRandomWriteTest.class, "asyncRandomWrite",
    -165"Run async random write test");
    -166
    addCommandDescriptor(AsyncSequentialReadTest.class, "asyncSequentialRead",
    -167"Run async sequential read 
    test");
    -168
    addCommandDescriptor(AsyncSequentialWriteTest.class, "asyncSequentialWrite",
    -169"Run async sequential write 
    test");
    -170
    addCommandDescriptor(AsyncScanTest.class, "asyncScan",
    -171"Run async scan test (read every 
    row)");
    -172
    addCommandDescriptor(RandomReadTest.class, RANDOM_READ,
    -173  "Run random read test");
    -174
    addCommandDescriptor(RandomSeekScanTest.class, RANDOM_SEEK_SCAN,
    -175  "Run random seek and scan 100 
    test");
    -176
    addCommandDescriptor(RandomScanWithRange10Test.class, "scanRange10",
    -177  "Run random seek scan with both 
    start and stop row (max 10 rows)");
    -178
    addCommandDescriptor(RandomScanWithRange100Test.class, "scanRange100",
    -179  "Run random seek scan with both 
    start and stop row (max 100 rows)");
    -180
    addCommandDescriptor(RandomScanWithRange1000Test.class, "scanRange1000",
    -181  "Run random seek scan with both 
    start and stop row (max 1000 rows)");
    -182
    addCommandDescriptor(RandomScanWithRange1Test.class, "scanRange1",
    -183  "Run random seek scan with both 
    start and stop row (max 1 rows)");
    -184
    addCommandDescriptor(RandomWriteTest.class, "randomWrite",
    -185  "Run random write test");
    -186
    addCommandDescriptor(SequentialReadTest.class, "sequentialRead",
    -187  "Run sequential read test");
    -188
    addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite",
    -189  "Run sequential write test");
    -190addCommandDescriptor(ScanTest.class, 
    "scan",
    -191  "Run scan test (read every 
    row)");
    -192
    addCommandDescriptor(FilteredScanTest.class, "filterScan",
    -193  "Run scan test using a filter to 
    find a specific row based on it's value " +
    -194  "(make sure to use --rows=20)");
    -195
    addCommandDescriptor(IncrementTest.class, "increment",
    -196  "Increment on each row; clients 
    overlap on keyspace so some concurrent operations");
    -197
    addCommandDescriptor(AppendTest.class, "append",
    -198  "Append on each row; clients 
    overlap on keyspace so some concurrent operations");
    -199
    addCommandDescriptor(CheckAndMutateTest.class, "checkAndMutate",
    -200  "CheckAndMutate on each row; 
    clients overlap on keyspace so some concurrent 

    [09/51] [partial] hbase-site git commit: Published site at 021f66d11d2cbb7308308093e29e69d6e7661ee9.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/92a26cfb/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushRegionEntry.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushRegionEntry.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushRegionEntry.html
    index 4c42811..0bc3ddb 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushRegionEntry.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushRegionEntry.html
    @@ -563,381 +563,390 @@
     555// If this is first time we've 
    been put off, then emit a log message.
     556if (fqe.getRequeueCount() = 
    0) {
     557  // Note: We don't impose 
    blockingStoreFiles constraint on meta regions
    -558  LOG.warn("Region " + 
    region.getRegionInfo().getEncodedName() + " has too many " +
    -559"store files; delaying flush 
    up to " + this.blockingWaitTime + "ms");
    -560  if 
    (!this.server.compactSplitThread.requestSplit(region)) {
    -561try {
    -562  
    this.server.compactSplitThread.requestSystemCompaction(region,
    -563
    Thread.currentThread().getName());
    -564} catch (IOException e) {
    -565  e = e instanceof 
    RemoteException ?
    -566  
    ((RemoteException)e).unwrapRemoteException() : e;
    -567  LOG.error("Cache flush 
    failed for region " +
    -568
    Bytes.toStringBinary(region.getRegionInfo().getRegionName()), e);
    -569}
    -570  }
    -571}
    -572
    -573// Put back on the queue.  Have 
    it come back out of the queue
    -574// after a delay of 
    this.blockingWaitTime / 100 ms.
    -575
    this.flushQueue.add(fqe.requeue(this.blockingWaitTime / 100));
    -576// Tell a lie, it's not flushed 
    but it's ok
    -577return true;
    -578  }
    -579}
    -580return flushRegion(region, false, 
    fqe.isForceFlushAllStores(), fqe.getTracker());
    -581  }
    -582
    -583  /**
    -584   * Flush a region.
    -585   * @param region Region to flush.
    -586   * @param emergencyFlush Set if we are 
    being force flushed. If true the region
    -587   * needs to be removed from the flush 
    queue. If false, when we were called
    -588   * from the main flusher run loop and 
    we got the entry to flush by calling
    -589   * poll on the flush queue (which 
    removed it).
    -590   * @param forceFlushAllStores whether 
    we want to flush all store.
    -591   * @return true if the region was 
    successfully flushed, false otherwise. If
    -592   * false, there will be accompanying 
    log messages explaining why the region was
    -593   * not flushed.
    -594   */
    -595  private boolean flushRegion(HRegion 
    region, boolean emergencyFlush, boolean forceFlushAllStores,
    -596  FlushLifeCycleTracker tracker) {
    -597synchronized (this.regionsInQueue) 
    {
    -598  FlushRegionEntry fqe = 
    this.regionsInQueue.remove(region);
    -599  // Use the start time of the 
    FlushRegionEntry if available
    -600  if (fqe != null  
    emergencyFlush) {
    -601// Need to remove from region 
    from delay queue. When NOT an
    -602// emergencyFlush, then item was 
    removed via a flushQueue.poll.
    -603flushQueue.remove(fqe);
    -604  }
    -605}
    -606
    -607tracker.beforeExecution();
    -608lock.readLock().lock();
    -609try {
    -610  notifyFlushRequest(region, 
    emergencyFlush);
    -611  FlushResult flushResult = 
    region.flushcache(forceFlushAllStores, false, tracker);
    -612  boolean shouldCompact = 
    flushResult.isCompactionNeeded();
    -613  // We just want to check the size
    -614  boolean shouldSplit = 
    region.checkSplit() != null;
    -615  if (shouldSplit) {
    -616
    this.server.compactSplitThread.requestSplit(region);
    -617  } else if (shouldCompact) {
    -618
    server.compactSplitThread.requestSystemCompaction(region, 
    Thread.currentThread().getName());
    -619  }
    -620} catch (DroppedSnapshotException ex) 
    {
    -621  // Cache flush can fail in a few 
    places. If it fails in a critical
    -622  // section, we get a 
    DroppedSnapshotException and a replay of wal
    -623  // is required. Currently the only 
    way to do this is a restart of
    -624  // the server. Abort because hdfs 
    is probably bad (HBASE-644 is a case
    -625  // where hdfs was bad but passed 
    the hdfs check).
    -626  server.abort("Replay of WAL 
    required. Forcing server shutdown", ex);
    -627  return false;
    -628} catch (IOException ex) {
    -629  ex = ex instanceof RemoteException 
    ? ((RemoteException) ex).unwrapRemoteException() : ex;
    -630  LOG.error(
    -631"Cache flush failed"
    -632+ (region != null ? (" for 
    region " +
    -633
    Bytes.toStringBinary(region.getRegionInfo().getRegionName()))
    -634  : ""), ex);
    -635  if (!server.checkFileSystem()) {
    -636return false;
    -637  

    [09/51] [partial] hbase-site git commit: Published site at acd0d1e446c164d9c54bfb461b2d449c8d717c07.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f2065178/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomScanWithRangeTest.html
    --
    diff --git 
    a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomScanWithRangeTest.html
     
    b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomScanWithRangeTest.html
    index 2510283..418c60c 100644
    --- 
    a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomScanWithRangeTest.html
    +++ 
    b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomScanWithRangeTest.html
    @@ -77,77 +77,77 @@
     069import 
    org.apache.hadoop.hbase.client.RowMutations;
     070import 
    org.apache.hadoop.hbase.client.Scan;
     071import 
    org.apache.hadoop.hbase.client.Table;
    -072import 
    org.apache.hadoop.hbase.filter.BinaryComparator;
    -073import 
    org.apache.hadoop.hbase.filter.Filter;
    -074import 
    org.apache.hadoop.hbase.filter.FilterAllFilter;
    -075import 
    org.apache.hadoop.hbase.filter.FilterList;
    -076import 
    org.apache.hadoop.hbase.filter.PageFilter;
    -077import 
    org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
    -078import 
    org.apache.hadoop.hbase.filter.WhileMatchFilter;
    -079import 
    org.apache.hadoop.hbase.io.compress.Compression;
    -080import 
    org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
    -081import 
    org.apache.hadoop.hbase.io.hfile.RandomDistribution;
    -082import 
    org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
    -083import 
    org.apache.hadoop.hbase.regionserver.BloomType;
    -084import 
    org.apache.hadoop.hbase.regionserver.CompactingMemStore;
    -085import 
    org.apache.hadoop.hbase.trace.HBaseHTraceConfiguration;
    -086import 
    org.apache.hadoop.hbase.trace.SpanReceiverHost;
    -087import 
    org.apache.hadoop.hbase.trace.TraceUtil;
    -088import 
    org.apache.hadoop.hbase.util.ByteArrayHashKey;
    -089import 
    org.apache.hadoop.hbase.util.Bytes;
    -090import 
    org.apache.hadoop.hbase.util.Hash;
    -091import 
    org.apache.hadoop.hbase.util.MurmurHash;
    -092import 
    org.apache.hadoop.hbase.util.Pair;
    -093import 
    org.apache.hadoop.hbase.util.YammerHistogramUtils;
    -094import 
    org.apache.hadoop.io.LongWritable;
    -095import org.apache.hadoop.io.Text;
    -096import org.apache.hadoop.mapreduce.Job;
    -097import 
    org.apache.hadoop.mapreduce.Mapper;
    -098import 
    org.apache.hadoop.mapreduce.lib.input.NLineInputFormat;
    -099import 
    org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
    -100import 
    org.apache.hadoop.mapreduce.lib.reduce.LongSumReducer;
    -101import org.apache.hadoop.util.Tool;
    -102import 
    org.apache.hadoop.util.ToolRunner;
    -103import 
    org.apache.htrace.core.ProbabilitySampler;
    -104import org.apache.htrace.core.Sampler;
    -105import 
    org.apache.htrace.core.TraceScope;
    -106import 
    org.apache.yetus.audience.InterfaceAudience;
    -107import org.slf4j.Logger;
    -108import org.slf4j.LoggerFactory;
    -109import 
    org.apache.hbase.thirdparty.com.google.common.base.MoreObjects;
    -110import 
    org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
    -111
    -112/**
    -113 * Script used evaluating HBase 
    performance and scalability.  Runs a HBase
    -114 * client that steps through one of a set 
    of hardcoded tests or 'experiments'
    -115 * (e.g. a random reads test, a random 
    writes test, etc.). Pass on the
    -116 * command-line which test to run and how 
    many clients are participating in
    -117 * this experiment. Run {@code 
    PerformanceEvaluation --help} to obtain usage.
    -118 *
    -119 * pThis class sets up and runs 
    the evaluation programs described in
    -120 * Section 7, iPerformance 
    Evaluation/i, of the a
    -121 * 
    href="http://labs.google.com/papers/bigtable.html"Bigtable/a;
    -122 * paper, pages 8-10.
    -123 *
    -124 * pBy default, runs as a 
    mapreduce job where each mapper runs a single test
    -125 * client. Can also run as a 
    non-mapreduce, multithreaded application by
    -126 * specifying {@code --nomapred}. Each 
    client does about 1GB of data, unless
    -127 * specified otherwise.
    -128 */
    -129@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
    -130public class PerformanceEvaluation 
    extends Configured implements Tool {
    -131  static final String RANDOM_SEEK_SCAN = 
    "randomSeekScan";
    -132  static final String RANDOM_READ = 
    "randomRead";
    -133  private static final Logger LOG = 
    LoggerFactory.getLogger(PerformanceEvaluation.class.getName());
    -134  private static final ObjectMapper 
    MAPPER = new ObjectMapper();
    -135  static {
    -136
    MAPPER.configure(MapperFeature.SORT_PROPERTIES_ALPHABETICALLY, true);
    -137  }
    -138
    -139  public static final String TABLE_NAME = 
    "TestTable";
    -140  public static final byte[] FAMILY_NAME 
    = Bytes.toBytes("info");
    -141  public static final byte [] COLUMN_ZERO 
    = Bytes.toBytes("" + 0);
    -142  public static final byte [] 
    QUALIFIER_NAME = COLUMN_ZERO;
    +072import 
    org.apache.hadoop.hbase.client.metrics.ScanMetrics;
    +073import 
    org.apache.hadoop.hbase.filter.BinaryComparator;
    +074import 
    org.apache.hadoop.hbase.filter.Filter;
    

    [09/51] [partial] hbase-site git commit: Published site at 87f5b5f3411d96c31b4cb61b9a57ced22be91d1f.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/de18d468/testdevapidocs/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.FailureInjectingReplicationEndpointForTest.html
    --
    diff --git 
    a/testdevapidocs/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.FailureInjectingReplicationEndpointForTest.html
     
    b/testdevapidocs/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.FailureInjectingReplicationEndpointForTest.html
    index 386285f..ae1e807 100644
    --- 
    a/testdevapidocs/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.FailureInjectingReplicationEndpointForTest.html
    +++ 
    b/testdevapidocs/org/apache/hadoop/hbase/replication/regionserver/TestReplicator.FailureInjectingReplicationEndpointForTest.html
    @@ -50,7 +50,7 @@ var activeTableTab = "activeTableTab";
     
     
     PrevClass
    -NextClass
    +NextClass
     
     
     Frames
    @@ -74,14 +74,14 @@ var activeTableTab = "activeTableTab";
     
     
     Summary:
    -Nested|
    -Field|
    +Nested|
    +Field|
     Constr|
     Method
     
     
     Detail:
    -Field|
    +Field|
     Constr|
     Method
     
    @@ -142,7 +142,7 @@ var activeTableTab = "activeTableTab";
     
     
     
    -public static class TestReplicator.FailureInjectingReplicationEndpointForTest
    +public static class TestReplicator.FailureInjectingReplicationEndpointForTest
     extends TestReplicator.ReplicationEndpointForTest
     
     
    @@ -156,35 +156,6 @@ extends 
    -Nested Classes
    -
    -Modifier and Type
    -Class and Description
    -
    -
    -(package private) static class
    -TestReplicator.FailureInjectingReplicationEndpointForTest.FailureInjectingBlockingInterface
    -
    -
    -class
    -TestReplicator.FailureInjectingReplicationEndpointForTest.FailureInjectingReplicatorForTest
    -
    -
    -
    -
    -
    -
    -Nested classes/interfaces inherited from 
    classorg.apache.hadoop.hbase.replication.regionserver.TestReplicator.ReplicationEndpointForTest
    -TestReplicator.ReplicationEndpointForTest.ReplicatorForTest
    -
    -
    -
    -
    -
    -Nested classes/interfaces inherited from 
    classorg.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint
    -org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint.Replicator
    -
     
     
     
    @@ -214,6 +185,24 @@ extends 
    +Fields
    +
    +Modifier and Type
    +Field and Description
    +
    +
    +private https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicBoolean.html?is-external=true;
     title="class or interface in 
    java.util.concurrent.atomic">AtomicBoolean
    +failNext
    +
    +
    +
    +
    +
    +
    +Fields inherited from 
    classorg.apache.hadoop.hbase.replication.regionserver.TestReplicator.ReplicationEndpointForTest
    +batchCount,
     entriesCount
    +
     
     
     
    @@ -253,7 +242,7 @@ extends Method and Description
     
     
    -protected 
    org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint.Replicator
    +protected https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Callable.html?is-external=true;
     title="class or interface in java.util.concurrent">Callablehttps://docs.oracle.com/javase/8/docs/api/java/lang/Integer.html?is-external=true;
     title="class or interface in java.lang">Integer
     createReplicator(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in 
    java.util">Listorg.apache.hadoop.hbase.wal.WAL.Entryentries,
     intordinal)
     
    @@ -270,7 +259,7 @@ extends 
     
    @@ -315,6 +304,23 @@ extends 
     
     
    +
    +
    +
    +
    +
    +Field Detail
    +
    +
    +
    +
    +
    +failNext
    +private finalhttps://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicBoolean.html?is-external=true;
     title="class or interface in java.util.concurrent.atomic">AtomicBoolean failNext
    +
    +
    +
    +
     
     
     
    @@ -327,7 +333,7 @@ extends 
     
     FailureInjectingReplicationEndpointForTest
    -publicFailureInjectingReplicationEndpointForTest()
    +publicFailureInjectingReplicationEndpointForTest()
     
     
     
    @@ -344,8 +350,8 @@ extends 
     
     createReplicator
    -protectedorg.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint.ReplicatorcreateReplicator(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in 
    java.util">Listorg.apache.hadoop.hbase.wal.WAL.Entryentries,
    -   
     intordinal)
    +protectedhttps://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Callable.html?is-external=true;
     title="class or interface in java.util.concurrent">Callablehttps://docs.oracle.com/javase/8/docs/api/java/lang/Integer.html?is-external=true;
     title="class or interface in java.lang">IntegercreateReplicator(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in 
    java.util">Listorg.apache.hadoop.hbase.wal.WAL.Entryentries,
    + intordinal)
     
     Overrides:
     createReplicatorin
     classTestReplicator.ReplicationEndpointForTest
    @@ -381,7 +387,7 @@ extends 
     
     PrevClass
    -NextClass
    +NextClass
     
     
     

    [09/51] [partial] hbase-site git commit: Published site at 2912c953551bedbfbf30c32c156ed7bb187d54c3.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html
    index 8302e28..c370eb9 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionBoundariesInformation.html
    @@ -2113,3031 +2113,3033 @@
     2105
    errors.reportError(ERROR_CODE.ORPHAN_TABLE_STATE,
     2106tableName + " unable to 
    delete dangling table state " + tableState);
     2107  }
    -2108} else {
    -2109  
    errors.reportError(ERROR_CODE.ORPHAN_TABLE_STATE,
    -2110  tableName + " has dangling 
    table state " + tableState);
    -2111}
    -2112  }
    -2113}
    -2114// check that all tables have 
    states
    -2115for (TableName tableName : 
    tablesInfo.keySet()) {
    -2116  if (isTableIncluded(tableName) 
     !tableStates.containsKey(tableName)) {
    -2117if (fixMeta) {
    -2118  
    MetaTableAccessor.updateTableState(connection, tableName, 
    TableState.State.ENABLED);
    -2119  TableState newState = 
    MetaTableAccessor.getTableState(connection, tableName);
    -2120  if (newState == null) {
    -2121
    errors.reportError(ERROR_CODE.NO_TABLE_STATE,
    -2122"Unable to change state 
    for table " + tableName + " in meta ");
    -2123  }
    -2124} else {
    -2125  
    errors.reportError(ERROR_CODE.NO_TABLE_STATE,
    -2126  tableName + " has no state 
    in meta ");
    -2127}
    -2128  }
    -2129}
    -2130  }
    -2131
    -2132  private void preCheckPermission() 
    throws IOException, AccessDeniedException {
    -2133if 
    (shouldIgnorePreCheckPermission()) {
    -2134  return;
    -2135}
    -2136
    -2137Path hbaseDir = 
    FSUtils.getRootDir(getConf());
    -2138FileSystem fs = 
    hbaseDir.getFileSystem(getConf());
    -2139UserProvider userProvider = 
    UserProvider.instantiate(getConf());
    -2140UserGroupInformation ugi = 
    userProvider.getCurrent().getUGI();
    -2141FileStatus[] files = 
    fs.listStatus(hbaseDir);
    -2142for (FileStatus file : files) {
    -2143  try {
    -2144FSUtils.checkAccess(ugi, file, 
    FsAction.WRITE);
    -2145  } catch (AccessDeniedException 
    ace) {
    -2146LOG.warn("Got 
    AccessDeniedException when preCheckPermission ", ace);
    -2147
    errors.reportError(ERROR_CODE.WRONG_USAGE, "Current user " + 
    ugi.getUserName()
    -2148  + " does not have write perms 
    to " + file.getPath()
    -2149  + ". Please rerun hbck as hdfs 
    user " + file.getOwner());
    -2150throw ace;
    -2151  }
    -2152}
    -2153  }
    -2154
    -2155  /**
    -2156   * Deletes region from meta table
    -2157   */
    -2158  private void deleteMetaRegion(HbckInfo 
    hi) throws IOException {
    -2159
    deleteMetaRegion(hi.metaEntry.getRegionName());
    -2160  }
    -2161
    -2162  /**
    -2163   * Deletes region from meta table
    -2164   */
    -2165  private void deleteMetaRegion(byte[] 
    metaKey) throws IOException {
    -2166Delete d = new Delete(metaKey);
    -2167meta.delete(d);
    -2168LOG.info("Deleted " + 
    Bytes.toString(metaKey) + " from META" );
    -2169  }
    -2170
    -2171  /**
    -2172   * Reset the split parent region info 
    in meta table
    -2173   */
    -2174  private void resetSplitParent(HbckInfo 
    hi) throws IOException {
    -2175RowMutations mutations = new 
    RowMutations(hi.metaEntry.getRegionName());
    -2176Delete d = new 
    Delete(hi.metaEntry.getRegionName());
    -2177
    d.addColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER);
    -2178
    d.addColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER);
    -2179mutations.add(d);
    -2180
    -2181RegionInfo hri = 
    RegionInfoBuilder.newBuilder(hi.metaEntry)
    -2182.setOffline(false)
    -2183.setSplit(false)
    -2184.build();
    -2185Put p = 
    MetaTableAccessor.makePutFromRegionInfo(hri, 
    EnvironmentEdgeManager.currentTime());
    -2186mutations.add(p);
    -2187
    -2188meta.mutateRow(mutations);
    -2189LOG.info("Reset split parent " + 
    hi.metaEntry.getRegionNameAsString() + " in META" );
    -2190  }
    -2191
    -2192  /**
    -2193   * This backwards-compatibility 
    wrapper for permanently offlining a region
    -2194   * that should not be alive.  If the 
    region server does not support the
    -2195   * "offline" method, it will use the 
    closest unassign method instead.  This
    -2196   * will basically work until one 
    attempts to disable or delete the affected
    -2197   * table.  The problem has to do with 
    in-memory only master state, so
    -2198   * restarting the HMaster or failing 
    over to another should fix this.
    -2199   */
    -2200  private void offline(byte[] 
    regionName) throws IOException {
    -2201String regionString = 
    

    [09/51] [partial] hbase-site git commit: Published site at 2a2258656b2fcd92b967131b6c1f037363553bc4.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e0fb1fde/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
    index 79bf967..c8b113b 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
    @@ -115,3514 +115,3517 @@
     107import 
    org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer;
     108import 
    org.apache.hadoop.hbase.master.balancer.ClusterStatusChore;
     109import 
    org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory;
    -110import 
    org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
    -111import 
    org.apache.hadoop.hbase.master.cleaner.LogCleaner;
    -112import 
    org.apache.hadoop.hbase.master.cleaner.ReplicationBarrierCleaner;
    -113import 
    org.apache.hadoop.hbase.master.locking.LockManager;
    -114import 
    org.apache.hadoop.hbase.master.normalizer.NormalizationPlan;
    -115import 
    org.apache.hadoop.hbase.master.normalizer.NormalizationPlan.PlanType;
    -116import 
    org.apache.hadoop.hbase.master.normalizer.RegionNormalizer;
    -117import 
    org.apache.hadoop.hbase.master.normalizer.RegionNormalizerChore;
    -118import 
    org.apache.hadoop.hbase.master.normalizer.RegionNormalizerFactory;
    -119import 
    org.apache.hadoop.hbase.master.procedure.CreateTableProcedure;
    -120import 
    org.apache.hadoop.hbase.master.procedure.DeleteNamespaceProcedure;
    -121import 
    org.apache.hadoop.hbase.master.procedure.DeleteTableProcedure;
    -122import 
    org.apache.hadoop.hbase.master.procedure.DisableTableProcedure;
    -123import 
    org.apache.hadoop.hbase.master.procedure.EnableTableProcedure;
    -124import 
    org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants;
    -125import 
    org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
    -126import 
    org.apache.hadoop.hbase.master.procedure.MasterProcedureScheduler;
    -127import 
    org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil;
    -128import 
    org.apache.hadoop.hbase.master.procedure.ModifyTableProcedure;
    -129import 
    org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch;
    -130import 
    org.apache.hadoop.hbase.master.procedure.RecoverMetaProcedure;
    -131import 
    org.apache.hadoop.hbase.master.procedure.TruncateTableProcedure;
    -132import 
    org.apache.hadoop.hbase.master.replication.AddPeerProcedure;
    -133import 
    org.apache.hadoop.hbase.master.replication.DisablePeerProcedure;
    -134import 
    org.apache.hadoop.hbase.master.replication.EnablePeerProcedure;
    -135import 
    org.apache.hadoop.hbase.master.replication.ModifyPeerProcedure;
    -136import 
    org.apache.hadoop.hbase.master.replication.RemovePeerProcedure;
    -137import 
    org.apache.hadoop.hbase.master.replication.ReplicationPeerManager;
    -138import 
    org.apache.hadoop.hbase.master.replication.UpdatePeerConfigProcedure;
    -139import 
    org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
    -140import 
    org.apache.hadoop.hbase.mob.MobConstants;
    -141import 
    org.apache.hadoop.hbase.monitoring.MemoryBoundedLogMessageBuffer;
    -142import 
    org.apache.hadoop.hbase.monitoring.MonitoredTask;
    -143import 
    org.apache.hadoop.hbase.monitoring.TaskMonitor;
    -144import 
    org.apache.hadoop.hbase.procedure.MasterProcedureManagerHost;
    -145import 
    org.apache.hadoop.hbase.procedure.flush.MasterFlushTableProcedureManager;
    -146import 
    org.apache.hadoop.hbase.procedure2.LockedResource;
    -147import 
    org.apache.hadoop.hbase.procedure2.Procedure;
    -148import 
    org.apache.hadoop.hbase.procedure2.ProcedureEvent;
    -149import 
    org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
    -150import 
    org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher.RemoteProcedure;
    -151import 
    org.apache.hadoop.hbase.procedure2.RemoteProcedureException;
    -152import 
    org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore;
    -153import 
    org.apache.hadoop.hbase.quotas.MasterQuotaManager;
    -154import 
    org.apache.hadoop.hbase.quotas.MasterSpaceQuotaObserver;
    -155import 
    org.apache.hadoop.hbase.quotas.QuotaObserverChore;
    -156import 
    org.apache.hadoop.hbase.quotas.QuotaUtil;
    -157import 
    org.apache.hadoop.hbase.quotas.SnapshotQuotaObserverChore;
    -158import 
    org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshotNotifier;
    -159import 
    org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshotNotifierFactory;
    -160import 
    org.apache.hadoop.hbase.regionserver.DefaultStoreEngine;
    -161import 
    org.apache.hadoop.hbase.regionserver.HRegionServer;
    -162import 
    org.apache.hadoop.hbase.regionserver.HStore;
    -163import 
    org.apache.hadoop.hbase.regionserver.RSRpcServices;
    -164import 
    org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
    -165import 
    org.apache.hadoop.hbase.regionserver.RegionSplitPolicy;
    -166import 
    org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy;
    -167import 
    

    [09/51] [partial] hbase-site git commit: Published site at e468b4022f76688851b3e0c34722f01a56bd624f.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/16541468/devapidocs/org/apache/hadoop/hbase/client/AsyncTableImpl.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/client/AsyncTableImpl.html 
    b/devapidocs/org/apache/hadoop/hbase/client/AsyncTableImpl.html
    index 1643124..a0f8712 100644
    --- a/devapidocs/org/apache/hadoop/hbase/client/AsyncTableImpl.html
    +++ b/devapidocs/org/apache/hadoop/hbase/client/AsyncTableImpl.html
    @@ -114,7 +114,7 @@ var activeTableTab = "activeTableTab";
     
     
     @InterfaceAudience.Private
    -class AsyncTableImpl
    +class AsyncTableImpl
     extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">Object
     implements AsyncTableScanResultConsumer
     Just a wrapper of RawAsyncTableImpl. The 
    difference is that users need to provide a
    @@ -384,7 +384,7 @@ implements 
     
     rawTable
    -private finalAsyncTableAdvancedScanResultConsumer rawTable
    +private finalAsyncTableAdvancedScanResultConsumer rawTable
     
     
     
    @@ -393,7 +393,7 @@ implements 
     
     pool
    -private finalhttps://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html?is-external=true;
     title="class or interface in java.util.concurrent">ExecutorService pool
    +private finalhttps://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html?is-external=true;
     title="class or interface in java.util.concurrent">ExecutorService pool
     
     
     
    @@ -410,7 +410,7 @@ implements 
     
     AsyncTableImpl
    -AsyncTableImpl(AsyncConnectionImplconn,
    +AsyncTableImpl(AsyncConnectionImplconn,
    AsyncTableAdvancedScanResultConsumerrawTable,
    https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html?is-external=true;
     title="class or interface in 
    java.util.concurrent">ExecutorServicepool)
     
    @@ -429,7 +429,7 @@ implements 
     
     getName
    -publicTableNamegetName()
    +publicTableNamegetName()
     Description copied from 
    interface:AsyncTable
     Gets the fully qualified table name instance of this 
    table.
     
    @@ -444,7 +444,7 @@ implements 
     
     getConfiguration
    -publicorg.apache.hadoop.conf.ConfigurationgetConfiguration()
    +publicorg.apache.hadoop.conf.ConfigurationgetConfiguration()
     Description copied from 
    interface:AsyncTable
     Returns the Configuration object used by this 
    instance.
      
    @@ -461,7 +461,7 @@ implements 
     
     getRpcTimeout
    -publiclonggetRpcTimeout(https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true;
     title="class or interface in 
    java.util.concurrent">TimeUnitunit)
    +publiclonggetRpcTimeout(https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true;
     title="class or interface in 
    java.util.concurrent">TimeUnitunit)
     Description copied from 
    interface:AsyncTable
     Get timeout of each rpc request in this Table instance. It 
    will be overridden by a more
      specific rpc timeout config such as readRpcTimeout or writeRpcTimeout.
    @@ -484,7 +484,7 @@ implements 
     
     getReadRpcTimeout
    -publiclonggetReadRpcTimeout(https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true;
     title="class or interface in 
    java.util.concurrent">TimeUnitunit)
    +publiclonggetReadRpcTimeout(https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true;
     title="class or interface in 
    java.util.concurrent">TimeUnitunit)
     Description copied from 
    interface:AsyncTable
     Get timeout of each rpc read request in this Table 
    instance.
     
    @@ -503,7 +503,7 @@ implements 
     
     getWriteRpcTimeout
    -publiclonggetWriteRpcTimeout(https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true;
     title="class or interface in 
    java.util.concurrent">TimeUnitunit)
    +publiclonggetWriteRpcTimeout(https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true;
     title="class or interface in 
    java.util.concurrent">TimeUnitunit)
     Description copied from 
    interface:AsyncTable
     Get timeout of each rpc write request in this Table 
    instance.
     
    @@ -522,7 +522,7 @@ implements 
     
     getOperationTimeout
    -publiclonggetOperationTimeout(https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true;
     title="class or interface in 
    java.util.concurrent">TimeUnitunit)
    +publiclonggetOperationTimeout(https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true;
     title="class or interface in 
    java.util.concurrent">TimeUnitunit)
     Description copied from 
    interface:AsyncTable
     Get timeout of each operation in Table instance.
     
    @@ -541,7 +541,7 @@ implements 
     
     getScanTimeout
    -publiclonggetScanTimeout(https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true;
     title="class or interface in 
    java.util.concurrent">TimeUnitunit)
    

    [09/51] [partial] hbase-site git commit: Published site at 64061f896fe21512504e3886a400759e88b519da.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f1ebf5b6/devapidocs/org/apache/hadoop/hbase/io/class-use/ImmutableBytesWritable.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/io/class-use/ImmutableBytesWritable.html 
    b/devapidocs/org/apache/hadoop/hbase/io/class-use/ImmutableBytesWritable.html
    index ef30022..abeccf1 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/io/class-use/ImmutableBytesWritable.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/io/class-use/ImmutableBytesWritable.html
    @@ -162,11 +162,11 @@ Input/OutputFormats, a table indexing MapReduce job, and 
    utility methods.
     
     
     ImmutableBytesWritable
    -TableSnapshotInputFormat.TableSnapshotRecordReader.createKey()
    +TableRecordReader.createKey()
     
     
     ImmutableBytesWritable
    -TableRecordReader.createKey()
    +TableSnapshotInputFormat.TableSnapshotRecordReader.createKey()
     
     
     ImmutableBytesWritable
    @@ -183,9 +183,11 @@ Input/OutputFormats, a table indexing MapReduce job, and 
    utility methods.
     
     
     org.apache.hadoop.mapred.RecordReaderImmutableBytesWritable,Result
    -TableSnapshotInputFormat.getRecordReader(org.apache.hadoop.mapred.InputSplitsplit,
    +TableInputFormatBase.getRecordReader(org.apache.hadoop.mapred.InputSplitsplit,
    org.apache.hadoop.mapred.JobConfjob,
    -   
    org.apache.hadoop.mapred.Reporterreporter)
    +   org.apache.hadoop.mapred.Reporterreporter)
    +Builds a TableRecordReader.
    +
     
     
     org.apache.hadoop.mapred.RecordReaderImmutableBytesWritable,Result
    @@ -195,11 +197,9 @@ Input/OutputFormats, a table indexing MapReduce job, and 
    utility methods.
     
     
     org.apache.hadoop.mapred.RecordReaderImmutableBytesWritable,Result
    -TableInputFormatBase.getRecordReader(org.apache.hadoop.mapred.InputSplitsplit,
    +TableSnapshotInputFormat.getRecordReader(org.apache.hadoop.mapred.InputSplitsplit,
    org.apache.hadoop.mapred.JobConfjob,
    -   org.apache.hadoop.mapred.Reporterreporter)
    -Builds a TableRecordReader.
    -
    +   
    org.apache.hadoop.mapred.Reporterreporter)
     
     
     
    @@ -218,10 +218,12 @@ Input/OutputFormats, a table indexing MapReduce job, and 
    utility methods.
     
     
     void
    -RowCounter.RowCounterMapper.map(ImmutableBytesWritablerow,
    -   Resultvalues,
    +IdentityTableMap.map(ImmutableBytesWritablekey,
    +   Resultvalue,
    org.apache.hadoop.mapred.OutputCollectorImmutableBytesWritable,Resultoutput,
    -   org.apache.hadoop.mapred.Reporterreporter)
    +   org.apache.hadoop.mapred.Reporterreporter)
    +Pass the key, value to reduce
    +
     
     
     void
    @@ -234,21 +236,19 @@ Input/OutputFormats, a table indexing MapReduce job, and 
    utility methods.
     
     
     void
    -IdentityTableMap.map(ImmutableBytesWritablekey,
    -   Resultvalue,
    +RowCounter.RowCounterMapper.map(ImmutableBytesWritablerow,
    +   Resultvalues,
    org.apache.hadoop.mapred.OutputCollectorImmutableBytesWritable,Resultoutput,
    -   org.apache.hadoop.mapred.Reporterreporter)
    -Pass the key, value to reduce
    -
    +   org.apache.hadoop.mapred.Reporterreporter)
     
     
     boolean
    -TableSnapshotInputFormat.TableSnapshotRecordReader.next(ImmutableBytesWritablekey,
    +TableRecordReader.next(ImmutableBytesWritablekey,
     Resultvalue)
     
     
     boolean
    -TableRecordReader.next(ImmutableBytesWritablekey,
    +TableSnapshotInputFormat.TableSnapshotRecordReader.next(ImmutableBytesWritablekey,
     Resultvalue)
     
     
    @@ -281,10 +281,12 @@ Input/OutputFormats, a table indexing MapReduce job, and 
    utility methods.
     
     
     void
    -RowCounter.RowCounterMapper.map(ImmutableBytesWritablerow,
    -   Resultvalues,
    +IdentityTableMap.map(ImmutableBytesWritablekey,
    +   Resultvalue,
    org.apache.hadoop.mapred.OutputCollectorImmutableBytesWritable,Resultoutput,
    -   org.apache.hadoop.mapred.Reporterreporter)
    +   org.apache.hadoop.mapred.Reporterreporter)
    +Pass the key, value to reduce
    +
     
     
     void
    @@ -297,12 +299,10 @@ Input/OutputFormats, a table indexing MapReduce job, and 
    utility methods.
     
     
     void
    -IdentityTableMap.map(ImmutableBytesWritablekey,
    -   Resultvalue,
    +RowCounter.RowCounterMapper.map(ImmutableBytesWritablerow,
    +   Resultvalues,
    org.apache.hadoop.mapred.OutputCollectorImmutableBytesWritable,Resultoutput,
    -   org.apache.hadoop.mapred.Reporterreporter)
    -Pass the key, value to reduce
    -
    +   org.apache.hadoop.mapred.Reporterreporter)
     
     
     void
    @@ -349,7 +349,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
    utility methods.
     
     
     private ImmutableBytesWritable
    -TableRecordReaderImpl.key
    +MultithreadedTableMapper.SubMapRecordReader.key
     
     
     private ImmutableBytesWritable
    @@ -357,7 +357,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
    utility methods.
     
     
     private ImmutableBytesWritable
    -MultithreadedTableMapper.SubMapRecordReader.key
    +TableRecordReaderImpl.key
     
     
     (package private) ImmutableBytesWritable
    @@ -427,33 +427,33 @@ Input/OutputFormats, a table indexing MapReduce job, and 
    utility methods.
     
     
     ImmutableBytesWritable
    -TableSnapshotInputFormat.TableSnapshotRegionRecordReader.getCurrentKey()
    

    [09/51] [partial] hbase-site git commit: Published site at 4cb40e6d846ce1f28ffb40d388c9efb753197813.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4dc2a2e8/devapidocs/org/apache/hadoop/hbase/coprocessor/example/WriteHeavyIncrementObserver.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/coprocessor/example/WriteHeavyIncrementObserver.html
     
    b/devapidocs/org/apache/hadoop/hbase/coprocessor/example/WriteHeavyIncrementObserver.html
    index 09c0b9c..0e0fe7d 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/coprocessor/example/WriteHeavyIncrementObserver.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/coprocessor/example/WriteHeavyIncrementObserver.html
    @@ -113,7 +113,8 @@ var activeTableTab = "activeTableTab";
     
     
     
    -public class WriteHeavyIncrementObserver
    +@InterfaceAudience.Private
    +public class WriteHeavyIncrementObserver
     extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">Object
     implements RegionCoprocessor, RegionObserver
     An example for implementing a counter that reads is much 
    less than writes, i.e, write heavy.
    @@ -360,7 +361,7 @@ implements 
     
     mask
    -private finalint mask
    +private finalint mask
     
     
     
    @@ -369,7 +370,7 @@ implements 
     
     lastTimestamps
    -private finalorg.apache.commons.lang3.mutable.MutableLong[] lastTimestamps
    +private finalorg.apache.commons.lang3.mutable.MutableLong[] lastTimestamps
     
     
     
    @@ -386,7 +387,7 @@ implements 
     
     WriteHeavyIncrementObserver
    -publicWriteHeavyIncrementObserver()
    +publicWriteHeavyIncrementObserver()
     
     
     
    @@ -403,7 +404,7 @@ implements 
     
     getRegionObserver
    -publichttps://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
     title="class or interface in java.util">OptionalRegionObservergetRegionObserver()
    +publichttps://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
     title="class or interface in java.util">OptionalRegionObservergetRegionObserver()
     
     Specified by:
     getRegionObserverin
     interfaceRegionCoprocessor
    @@ -416,7 +417,7 @@ implements 
     
     preFlushScannerOpen
    -publicvoidpreFlushScannerOpen(ObserverContextRegionCoprocessorEnvironmentc,
    +publicvoidpreFlushScannerOpen(ObserverContextRegionCoprocessorEnvironmentc,
     Storestore,
     ScanOptionsoptions,
     FlushLifeCycleTrackertracker)
    @@ -442,7 +443,7 @@ implements 
     
     createCell
    -privateCellcreateCell(byte[]row,
    +privateCellcreateCell(byte[]row,
     byte[]family,
     byte[]qualifier,
     longts,
    @@ -455,7 +456,7 @@ implements 
     
     wrap
    -privateInternalScannerwrap(byte[]family,
    +privateInternalScannerwrap(byte[]family,
      InternalScannerscanner)
     
     
    @@ -465,7 +466,7 @@ implements 
     
     preFlush
    -publicInternalScannerpreFlush(ObserverContextRegionCoprocessorEnvironmentc,
    +publicInternalScannerpreFlush(ObserverContextRegionCoprocessorEnvironmentc,
     Storestore,
     InternalScannerscanner,
     FlushLifeCycleTrackertracker)
    @@ -494,7 +495,7 @@ implements 
     
     preCompactScannerOpen
    -publicvoidpreCompactScannerOpen(ObserverContextRegionCoprocessorEnvironmentc,
    +publicvoidpreCompactScannerOpen(ObserverContextRegionCoprocessorEnvironmentc,
       Storestore,
       ScanTypescanType,
       ScanOptionsoptions,
    @@ -525,7 +526,7 @@ implements 
     
     preCompact
    -publicInternalScannerpreCompact(ObserverContextRegionCoprocessorEnvironmentc,
    +publicInternalScannerpreCompact(ObserverContextRegionCoprocessorEnvironmentc,
       Storestore,
       InternalScannerscanner,
       ScanTypescanType,
    @@ -564,7 +565,7 @@ implements 
     
     preMemStoreCompactionCompactScannerOpen
    -publicvoidpreMemStoreCompactionCompactScannerOpen(ObserverContextRegionCoprocessorEnvironmentc,
    +publicvoidpreMemStoreCompactionCompactScannerOpen(ObserverContextRegionCoprocessorEnvironmentc,
     Storestore,
     ScanOptionsoptions)
      throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException
    @@ -591,7 +592,7 @@ implements 
     
     preMemStoreCompactionCompact
    -publicInternalScannerpreMemStoreCompactionCompact(ObserverContextRegionCoprocessorEnvironmentc,
    +publicInternalScannerpreMemStoreCompactionCompact(ObserverContextRegionCoprocessorEnvironmentc,
     Storestore,
     InternalScannerscanner)
      throws 

    [09/51] [partial] hbase-site git commit: Published site at 8ab7b20f48951d77945181024f5e15842bc253c4.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6eb695c8/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/FSHLog.SyncRunner.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/FSHLog.SyncRunner.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/FSHLog.SyncRunner.html
    index 9971079..03c8b000 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/FSHLog.SyncRunner.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/FSHLog.SyncRunner.html
    @@ -49,1067 +49,1082 @@
     041import org.apache.hadoop.fs.Path;
     042import 
    org.apache.hadoop.hbase.HConstants;
     043import 
    org.apache.hadoop.hbase.client.RegionInfo;
    -044import 
    org.apache.hadoop.hbase.trace.TraceUtil;
    -045import 
    org.apache.hadoop.hbase.util.Bytes;
    -046import 
    org.apache.hadoop.hbase.util.ClassSize;
    -047import 
    org.apache.hadoop.hbase.util.FSUtils;
    -048import 
    org.apache.hadoop.hbase.util.HasThread;
    -049import 
    org.apache.hadoop.hbase.util.Threads;
    -050import 
    org.apache.hadoop.hbase.wal.FSHLogProvider;
    -051import 
    org.apache.hadoop.hbase.wal.WALEdit;
    -052import 
    org.apache.hadoop.hbase.wal.WALKeyImpl;
    -053import 
    org.apache.hadoop.hbase.wal.WALProvider.Writer;
    -054import 
    org.apache.hadoop.hdfs.DFSOutputStream;
    -055import 
    org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
    -056import 
    org.apache.hadoop.hdfs.protocol.DatanodeInfo;
    -057import 
    org.apache.htrace.core.TraceScope;
    -058import 
    org.apache.yetus.audience.InterfaceAudience;
    -059import org.slf4j.Logger;
    -060import org.slf4j.LoggerFactory;
    -061import 
    org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
    -062
    -063/**
    -064 * The default implementation of FSWAL.
    -065 */
    -066@InterfaceAudience.Private
    -067public class FSHLog extends 
    AbstractFSWALWriter {
    -068  // IMPLEMENTATION NOTES:
    -069  //
    -070  // At the core is a ring buffer. Our 
    ring buffer is the LMAX Disruptor. It tries to
    -071  // minimize synchronizations and 
    volatile writes when multiple contending threads as is the case
    -072  // here appending and syncing on a 
    single WAL. The Disruptor is configured to handle multiple
    -073  // producers but it has one consumer 
    only (the producers in HBase are IPC Handlers calling append
    -074  // and then sync). The single 
    consumer/writer pulls the appends and syncs off the ring buffer.
    -075  // When a handler calls sync, it is 
    given back a future. The producer 'blocks' on the future so
    -076  // it does not return until the sync 
    completes. The future is passed over the ring buffer from
    -077  // the producer/handler to the consumer 
    thread where it does its best to batch up the producer
    -078  // syncs so one WAL sync actually spans 
    multiple producer sync invocations. How well the
    -079  // batching works depends on the write 
    rate; i.e. we tend to batch more in times of
    -080  // high writes/syncs.
    -081  //
    -082  // Calls to append now also wait until 
    the append has been done on the consumer side of the
    -083  // disruptor. We used to not wait but 
    it makes the implementation easier to grok if we have
    -084  // the region edit/sequence id after 
    the append returns.
    -085  //
    -086  // TODO: Handlers need to coordinate 
    appending AND syncing. Can we have the threads contend
    -087  // once only? Probably hard given syncs 
    take way longer than an append.
    -088  //
    -089  // The consumer threads pass the syncs 
    off to multiple syncing threads in a round robin fashion
    -090  // to ensure we keep up back-to-back FS 
    sync calls (FS sync calls are the long poll writing the
    -091  // WAL). The consumer thread passes the 
    futures to the sync threads for it to complete
    -092  // the futures when done.
    -093  //
    -094  // The 'sequence' in the below is the 
    sequence of the append/sync on the ringbuffer. It
    -095  // acts as a sort-of transaction id. It 
    is always incrementing.
    -096  //
    -097  // The RingBufferEventHandler class 
    hosts the ring buffer consuming code. The threads that
    -098  // do the actual FS sync are 
    implementations of SyncRunner. SafePointZigZagLatch is a
    -099  // synchronization class used to halt 
    the consumer at a safe point -- just after all outstanding
    -100  // syncs and appends have completed -- 
    so the log roller can swap the WAL out under it.
    -101  //
    -102  // We use ring buffer sequence as txid 
    of FSWALEntry and SyncFuture.
    -103  private static final Logger LOG = 
    LoggerFactory.getLogger(FSHLog.class);
    -104
    -105  /**
    -106   * The nexus at which all incoming 
    handlers meet. Does appends and sync with an ordering. Appends
    -107   * and syncs are each put on the ring 
    which means handlers need to smash up against the ring twice
    -108   * (can we make it once only? ... maybe 
    not since time to append is so different from time to sync
    -109   * and sometimes we don't want to sync 
    or we want to async the sync). The ring is where we make
    -110   * sure of our ordering and it is also 
    where we do batching up of 

    [09/51] [partial] hbase-site git commit: Published site at 00095a2ef9442e3fd86c04876c9d91f2f8b23ad8.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.CompletedProcedureRetainer.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.CompletedProcedureRetainer.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.CompletedProcedureRetainer.html
    index 3bc66bb..97aa79c 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.CompletedProcedureRetainer.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.CompletedProcedureRetainer.html
    @@ -1435,459 +1435,460 @@
     1427   */
     1428  private void execProcedure(final 
    RootProcedureState procStack,
     1429  final 
    ProcedureTEnvironment procedure) {
    -1430
    Preconditions.checkArgument(procedure.getState() == ProcedureState.RUNNABLE);
    -1431
    -1432// Procedures can suspend 
    themselves. They skip out by throwing a ProcedureSuspendedException.
    -1433// The exception is caught below and 
    then we hurry to the exit without disturbing state. The
    -1434// idea is that the processing of 
    this procedure will be unsuspended later by an external event
    -1435// such the report of a region open. 
    TODO: Currently, its possible for two worker threads
    -1436// to be working on the same 
    procedure concurrently (locking in procedures is NOT about
    -1437// concurrency but about tying an 
    entity to a procedure; i.e. a region to a particular
    -1438// procedure instance). This can 
    make for issues if both threads are changing state.
    -1439// See 
    env.getProcedureScheduler().wakeEvent(regionNode.getProcedureEvent());
    -1440// in 
    RegionTransitionProcedure#reportTransition for example of Procedure putting
    -1441// itself back on the scheduler 
    making it possible for two threads running against
    -1442// the one Procedure. Might be ok if 
    they are both doing different, idempotent sections.
    -1443boolean suspended = false;
    -1444
    -1445// Whether to 're-' -execute; run 
    through the loop again.
    -1446boolean reExecute = false;
    -1447
    -1448ProcedureTEnvironment[] 
    subprocs = null;
    -1449do {
    -1450  reExecute = false;
    -1451  try {
    -1452subprocs = 
    procedure.doExecute(getEnvironment());
    -1453if (subprocs != null  
    subprocs.length == 0) {
    -1454  subprocs = null;
    -1455}
    -1456  } catch 
    (ProcedureSuspendedException e) {
    -1457if (LOG.isTraceEnabled()) {
    -1458  LOG.trace("Suspend " + 
    procedure);
    -1459}
    -1460suspended = true;
    -1461  } catch (ProcedureYieldException 
    e) {
    -1462if (LOG.isTraceEnabled()) {
    -1463  LOG.trace("Yield " + procedure 
    + ": " + e.getMessage(), e);
    -1464}
    -1465scheduler.yield(procedure);
    -1466return;
    -1467  } catch (InterruptedException e) 
    {
    -1468if (LOG.isTraceEnabled()) {
    -1469  LOG.trace("Yield interrupt " + 
    procedure + ": " + e.getMessage(), e);
    -1470}
    -1471
    handleInterruptedException(procedure, e);
    -1472scheduler.yield(procedure);
    -1473return;
    -1474  } catch (Throwable e) {
    -1475// Catch NullPointerExceptions 
    or similar errors...
    -1476String msg = "CODE-BUG: Uncaught 
    runtime exception: " + procedure;
    -1477LOG.error(msg, e);
    -1478procedure.setFailure(new 
    RemoteProcedureException(msg, e));
    -1479  }
    -1480
    -1481  if (!procedure.isFailed()) {
    -1482if (subprocs != null) {
    -1483  if (subprocs.length == 1 
     subprocs[0] == procedure) {
    -1484// Procedure returned 
    itself. Quick-shortcut for a state machine-like procedure;
    -1485// i.e. we go around this 
    loop again rather than go back out on the scheduler queue.
    -1486subprocs = null;
    -1487reExecute = true;
    -1488if (LOG.isTraceEnabled()) 
    {
    -1489  LOG.trace("Short-circuit 
    to next step on pid=" + procedure.getProcId());
    -1490}
    -1491  } else {
    -1492// Yield the current 
    procedure, and make the subprocedure runnable
    -1493// subprocs may come back 
    'null'.
    -1494subprocs = 
    initializeChildren(procStack, procedure, subprocs);
    -1495LOG.info("Initialized 
    subprocedures=" +
    -1496  (subprocs == null? null:
    -1497
    Stream.of(subprocs).map(e - "{" + e.toString() + "}").
    -1498
    collect(Collectors.toList()).toString()));
    -1499  }
    -1500} else if (procedure.getState() 
    == ProcedureState.WAITING_TIMEOUT) {
    -1501  if (LOG.isTraceEnabled()) {
    -1502LOG.trace("Added to 
    timeoutExecutor " + procedure);
    -1503  }
    -1504  
    timeoutExecutor.add(procedure);
    -1505} else if (!suspended) {
    -1506  // No subtask, so we are 
    done
    -1507  
    

    [09/51] [partial] hbase-site git commit: Published site at 22f4def942f8a3367d0ca6598317e9b9a7d0cfcd.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8b1eaec1/devapidocs/src-html/org/apache/hadoop/hbase/util/FSTableDescriptors.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/util/FSTableDescriptors.html 
    b/devapidocs/src-html/org/apache/hadoop/hbase/util/FSTableDescriptors.html
    index 7226d18..f065ddb 100644
    --- a/devapidocs/src-html/org/apache/hadoop/hbase/util/FSTableDescriptors.html
    +++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/FSTableDescriptors.html
    @@ -45,733 +45,736 @@
     037import org.apache.hadoop.fs.FileSystem;
     038import org.apache.hadoop.fs.Path;
     039import org.apache.hadoop.fs.PathFilter;
    -040import 
    org.apache.yetus.audience.InterfaceAudience;
    -041import org.slf4j.Logger;
    -042import org.slf4j.LoggerFactory;
    -043import 
    org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
    -044import 
    org.apache.hadoop.hbase.client.TableDescriptor;
    -045import 
    org.apache.hadoop.hbase.client.TableDescriptorBuilder;
    -046import 
    org.apache.hadoop.hbase.Coprocessor;
    -047import 
    org.apache.hadoop.hbase.exceptions.DeserializationException;
    -048import 
    org.apache.hadoop.hbase.HConstants;
    -049import 
    org.apache.hadoop.hbase.regionserver.BloomType;
    -050import 
    org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
    -051import 
    org.apache.hbase.thirdparty.com.google.common.primitives.Ints;
    -052import 
    org.apache.hadoop.hbase.TableDescriptors;
    -053import 
    org.apache.hadoop.hbase.TableInfoMissingException;
    -054import 
    org.apache.hadoop.hbase.TableName;
    -055
    -056/**
    -057 * Implementation of {@link 
    TableDescriptors} that reads descriptors from the
    -058 * passed filesystem.  It expects 
    descriptors to be in a file in the
    -059 * {@link #TABLEINFO_DIR} subdir of the 
    table's directory in FS.  Can be read-only
    -060 *  -- i.e. does not modify the 
    filesystem or can be read and write.
    -061 *
    -062 * pAlso has utility for keeping 
    up the table descriptors tableinfo file.
    -063 * The table schema file is kept in the 
    {@link #TABLEINFO_DIR} subdir
    -064 * of the table directory in the 
    filesystem.
    -065 * It has a {@link 
    #TABLEINFO_FILE_PREFIX} and then a suffix that is the
    -066 * edit sequenceid: e.g. 
    code.tableinfo.03/code.  This sequenceid
    -067 * is always increasing.  It starts at 
    zero.  The table schema file with the
    -068 * highest sequenceid has the most recent 
    schema edit. Usually there is one file
    -069 * only, the most recent but there may be 
    short periods where there are more
    -070 * than one file. Old files are 
    eventually cleaned.  Presumption is that there
    -071 * will not be lots of concurrent clients 
    making table schema edits.  If so,
    -072 * the below needs a bit of a reworking 
    and perhaps some supporting api in hdfs.
    -073 */
    -074@InterfaceAudience.Private
    -075public class FSTableDescriptors 
    implements TableDescriptors {
    -076  private static final Logger LOG = 
    LoggerFactory.getLogger(FSTableDescriptors.class);
    -077  private final FileSystem fs;
    -078  private final Path rootdir;
    -079  private final boolean fsreadonly;
    -080  private volatile boolean usecache;
    -081  private volatile boolean fsvisited;
    -082
    -083  @VisibleForTesting
    -084  long cachehits = 0;
    +040import 
    org.apache.hadoop.hbase.client.CoprocessorDescriptorBuilder;
    +041import 
    org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint;
    +042import 
    org.apache.yetus.audience.InterfaceAudience;
    +043import org.slf4j.Logger;
    +044import org.slf4j.LoggerFactory;
    +045import 
    org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
    +046import 
    org.apache.hadoop.hbase.client.TableDescriptor;
    +047import 
    org.apache.hadoop.hbase.client.TableDescriptorBuilder;
    +048import 
    org.apache.hadoop.hbase.Coprocessor;
    +049import 
    org.apache.hadoop.hbase.exceptions.DeserializationException;
    +050import 
    org.apache.hadoop.hbase.HConstants;
    +051import 
    org.apache.hadoop.hbase.regionserver.BloomType;
    +052import 
    org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
    +053import 
    org.apache.hbase.thirdparty.com.google.common.primitives.Ints;
    +054import 
    org.apache.hadoop.hbase.TableDescriptors;
    +055import 
    org.apache.hadoop.hbase.TableInfoMissingException;
    +056import 
    org.apache.hadoop.hbase.TableName;
    +057
    +058/**
    +059 * Implementation of {@link 
    TableDescriptors} that reads descriptors from the
    +060 * passed filesystem.  It expects 
    descriptors to be in a file in the
    +061 * {@link #TABLEINFO_DIR} subdir of the 
    table's directory in FS.  Can be read-only
    +062 *  -- i.e. does not modify the 
    filesystem or can be read and write.
    +063 *
    +064 * pAlso has utility for keeping 
    up the table descriptors tableinfo file.
    +065 * The table schema file is kept in the 
    {@link #TABLEINFO_DIR} subdir
    +066 * of the table directory in the 
    filesystem.
    +067 * It has a {@link 
    #TABLEINFO_FILE_PREFIX} and then a suffix that is the
    +068 * edit sequenceid: e.g. 
    code.tableinfo.03/code.  This sequenceid
    +069 * is always increasing.  It 

    [09/51] [partial] hbase-site git commit: Published site at 31da4d0bce69b3a47066a5df675756087ce4dc60.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a754d895/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html 
    b/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html
    index 0f17e58..7ae1c82 100644
    --- a/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html
    +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html
    @@ -18,7 +18,7 @@
     catch(err) {
     }
     //-->
    -var methods = 
    {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":9,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":9,"i35":10,"i36":9,"i37":9,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":41,"i93":41,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i109"
     
    :10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10,"i135":10,"i136":10,"i137":10,"i138":10,"i139":10,"i140":10,"i141":10,"i142":10,"i143":10};
    +var methods = 
    {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":9,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":9,"i35":10,"i36":9,"i37":9,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":41,"i94":41,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i109"
     
    :10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10,"i135":10,"i136":10,"i137":10,"i138":10,"i139":10,"i140":10,"i141":10,"i142":10,"i143":10,"i144":10};
     var tabs = {65535:["t0","All Methods"],1:["t1","Static 
    Methods"],2:["t2","Instance Methods"],8:["t4","Concrete 
    Methods"],32:["t6","Deprecated Methods"]};
     var altColor = "altColor";
     var rowColor = "rowColor";
    @@ -118,7 +118,7 @@ var activeTableTab = "activeTableTab";
     
     
     @InterfaceAudience.Private
    -public class HStore
    +public class HStore
     extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">Object
     implements Store, HeapSize, StoreConfigInformation, PropagatingConfigurationObserver
     A Store holds a column family in a Region.  Its a memstore 
    and a set of zero
    @@ -242,103 +242,111 @@ implements cryptoContext
     
     
    +private https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true;
     title="class or interface in 
    java.util.concurrent.atomic">AtomicInteger
    +currentParallelPutCount
    +
    +
     private HFileDataBlockEncoder
     dataBlockEncoder
     
    -
    +
     static long
     DEEP_OVERHEAD
     
    -
    +
     static https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String
     DEFAULT_BLOCK_STORAGE_POLICY
     
    -
    +
     static int
     DEFAULT_BLOCKING_STOREFILE_COUNT
     
    -
    +
     static int
     DEFAULT_COMPACTCHECKER_INTERVAL_MULTIPLIER
     
    -
    +
     private static int
     DEFAULT_FLUSH_RETRIES_NUMBER
     
    -
    +
     private ColumnFamilyDescriptor
     family
     
    -
    +
     private https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListHStoreFile
     filesCompacting
     
    -
    +
     static long
     FIXED_OVERHEAD
     
    -
    +
     private https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true;
     title="class or interface in 
    

    [09/51] [partial] hbase-site git commit: Published site at 6b77786dfc46d25ac5bb5f1c8a4a9eb47b52a604.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/81cde4ce/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFile.Writer.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFile.Writer.html 
    b/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFile.Writer.html
    index c7d05d1..abcb738 100644
    --- a/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFile.Writer.html
    +++ b/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFile.Writer.html
    @@ -143,18 +143,18 @@
     
     
     void
    -HFileDataBlockEncoderImpl.saveMetadata(HFile.Writerwriter)
    -
    -
    -void
     NoOpDataBlockEncoder.saveMetadata(HFile.Writerwriter)
     
    -
    +
     void
     HFileDataBlockEncoder.saveMetadata(HFile.Writerwriter)
     Save metadata in HFile which will be written to disk
     
     
    +
    +void
    +HFileDataBlockEncoderImpl.saveMetadata(HFile.Writerwriter)
    +
     
     
     
    @@ -203,18 +203,18 @@
     
     
     
    -void
    -RowColBloomContext.addLastBloomKey(HFile.Writerwriter)
    +abstract void
    +BloomContext.addLastBloomKey(HFile.Writerwriter)
    +Adds the last bloom key to the HFile Writer as part of 
    StorefileWriter close.
    +
     
     
     void
     RowBloomContext.addLastBloomKey(HFile.Writerwriter)
     
     
    -abstract void
    -BloomContext.addLastBloomKey(HFile.Writerwriter)
    -Adds the last bloom key to the HFile Writer as part of 
    StorefileWriter close.
    -
    +void
    +RowColBloomContext.addLastBloomKey(HFile.Writerwriter)
     
     
     static BloomFilterWriter
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/81cde4ce/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFileBlock.Writer.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFileBlock.Writer.html 
    b/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFileBlock.Writer.html
    index b55ecd8..e1139cc 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFileBlock.Writer.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFileBlock.Writer.html
    @@ -106,15 +106,15 @@
     
     
     
    +private HFileBlock.Writer
    +HFileBlockIndex.BlockIndexWriter.blockWriter
    +
    +
     protected HFileBlock.Writer
     HFileWriterImpl.blockWriter
     block writer
     
     
    -
    -private HFileBlock.Writer
    -HFileBlockIndex.BlockIndexWriter.blockWriter
    -
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/81cde4ce/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFileContext.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFileContext.html 
    b/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFileContext.html
    index 29c8b1e..fabd03f 100644
    --- a/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFileContext.html
    +++ b/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFileContext.html
    @@ -136,15 +136,15 @@
     
     
     HFileContext
    -HFileBlockDecodingContext.getHFileContext()
    +HFileBlockEncodingContext.getHFileContext()
     
     
     HFileContext
    -HFileBlockDefaultDecodingContext.getHFileContext()
    +HFileBlockDecodingContext.getHFileContext()
     
     
     HFileContext
    -HFileBlockEncodingContext.getHFileContext()
    +HFileBlockDefaultDecodingContext.getHFileContext()
     
     
     HFileContext
    @@ -224,23 +224,23 @@
     
     
     private HFileContext
    +HFile.WriterFactory.fileContext
    +
    +
    +private HFileContext
     HFileBlock.fileContext
     Meta data that holds meta information on the 
    hfileblock.
     
     
    -
    +
     private HFileContext
     HFileBlock.Writer.fileContext
     Meta data that holds information about the hfileblock
     
     
    -
    -private HFileContext
    -HFileBlock.FSReaderImpl.fileContext
    -
     
     private HFileContext
    -HFile.WriterFactory.fileContext
    +HFileBlock.FSReaderImpl.fileContext
     
     
     private HFileContext
    @@ -277,20 +277,20 @@
     
     
     HFileContext
    -HFileWriterImpl.getFileContext()
    -
    -
    -HFileContext
     HFile.Writer.getFileContext()
     Return the file context for the HFile this writer belongs 
    to
     
     
    -
    +
     HFileContext
     HFile.Reader.getFileContext()
     Return the file context of the HFile this reader belongs 
    to
     
     
    +
    +HFileContext
    +HFileWriterImpl.getFileContext()
    +
     
     HFileContext
     HFileReaderImpl.getFileContext()
    @@ -323,35 +323,35 @@
     
     
     HFileBlockDecodingContext
    -HFileDataBlockEncoderImpl.newDataBlockDecodingContext(HFileContextfileContext)
    -
    -
    -HFileBlockDecodingContext
     NoOpDataBlockEncoder.newDataBlockDecodingContext(HFileContextmeta)
     
    -
    +
     HFileBlockDecodingContext
     HFileDataBlockEncoder.newDataBlockDecodingContext(HFileContextfileContext)
     create a encoder specific decoding context for 
    reading.
     
     
    -
    -HFileBlockEncodingContext
    -HFileDataBlockEncoderImpl.newDataBlockEncodingContext(byte[]dummyHeader,
    -   HFileContextfileContext)
    -
     
    +HFileBlockDecodingContext
    +HFileDataBlockEncoderImpl.newDataBlockDecodingContext(HFileContextfileContext)
    +
    +
     HFileBlockEncodingContext
     NoOpDataBlockEncoder.newDataBlockEncodingContext(byte[]dummyHeader,
    HFileContextmeta)
     
    -
    +
     HFileBlockEncodingContext
     

    [09/51] [partial] hbase-site git commit: Published site at 1384da71375427b522b09f06862bb5d629cef52f.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d347bde8/devapidocs/org/apache/hadoop/hbase/io/class-use/ImmutableBytesWritable.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/io/class-use/ImmutableBytesWritable.html 
    b/devapidocs/org/apache/hadoop/hbase/io/class-use/ImmutableBytesWritable.html
    index abeccf1..ef30022 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/io/class-use/ImmutableBytesWritable.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/io/class-use/ImmutableBytesWritable.html
    @@ -162,11 +162,11 @@ Input/OutputFormats, a table indexing MapReduce job, and 
    utility methods.
     
     
     ImmutableBytesWritable
    -TableRecordReader.createKey()
    +TableSnapshotInputFormat.TableSnapshotRecordReader.createKey()
     
     
     ImmutableBytesWritable
    -TableSnapshotInputFormat.TableSnapshotRecordReader.createKey()
    +TableRecordReader.createKey()
     
     
     ImmutableBytesWritable
    @@ -183,11 +183,9 @@ Input/OutputFormats, a table indexing MapReduce job, and 
    utility methods.
     
     
     org.apache.hadoop.mapred.RecordReaderImmutableBytesWritable,Result
    -TableInputFormatBase.getRecordReader(org.apache.hadoop.mapred.InputSplitsplit,
    +TableSnapshotInputFormat.getRecordReader(org.apache.hadoop.mapred.InputSplitsplit,
    org.apache.hadoop.mapred.JobConfjob,
    -   org.apache.hadoop.mapred.Reporterreporter)
    -Builds a TableRecordReader.
    -
    +   
    org.apache.hadoop.mapred.Reporterreporter)
     
     
     org.apache.hadoop.mapred.RecordReaderImmutableBytesWritable,Result
    @@ -197,9 +195,11 @@ Input/OutputFormats, a table indexing MapReduce job, and 
    utility methods.
     
     
     org.apache.hadoop.mapred.RecordReaderImmutableBytesWritable,Result
    -TableSnapshotInputFormat.getRecordReader(org.apache.hadoop.mapred.InputSplitsplit,
    +TableInputFormatBase.getRecordReader(org.apache.hadoop.mapred.InputSplitsplit,
    org.apache.hadoop.mapred.JobConfjob,
    -   
    org.apache.hadoop.mapred.Reporterreporter)
    +   org.apache.hadoop.mapred.Reporterreporter)
    +Builds a TableRecordReader.
    +
     
     
     
    @@ -218,12 +218,10 @@ Input/OutputFormats, a table indexing MapReduce job, and 
    utility methods.
     
     
     void
    -IdentityTableMap.map(ImmutableBytesWritablekey,
    -   Resultvalue,
    +RowCounter.RowCounterMapper.map(ImmutableBytesWritablerow,
    +   Resultvalues,
    org.apache.hadoop.mapred.OutputCollectorImmutableBytesWritable,Resultoutput,
    -   org.apache.hadoop.mapred.Reporterreporter)
    -Pass the key, value to reduce
    -
    +   org.apache.hadoop.mapred.Reporterreporter)
     
     
     void
    @@ -236,19 +234,21 @@ Input/OutputFormats, a table indexing MapReduce job, and 
    utility methods.
     
     
     void
    -RowCounter.RowCounterMapper.map(ImmutableBytesWritablerow,
    -   Resultvalues,
    +IdentityTableMap.map(ImmutableBytesWritablekey,
    +   Resultvalue,
    org.apache.hadoop.mapred.OutputCollectorImmutableBytesWritable,Resultoutput,
    -   org.apache.hadoop.mapred.Reporterreporter)
    +   org.apache.hadoop.mapred.Reporterreporter)
    +Pass the key, value to reduce
    +
     
     
     boolean
    -TableRecordReader.next(ImmutableBytesWritablekey,
    +TableSnapshotInputFormat.TableSnapshotRecordReader.next(ImmutableBytesWritablekey,
     Resultvalue)
     
     
     boolean
    -TableSnapshotInputFormat.TableSnapshotRecordReader.next(ImmutableBytesWritablekey,
    +TableRecordReader.next(ImmutableBytesWritablekey,
     Resultvalue)
     
     
    @@ -281,12 +281,10 @@ Input/OutputFormats, a table indexing MapReduce job, and 
    utility methods.
     
     
     void
    -IdentityTableMap.map(ImmutableBytesWritablekey,
    -   Resultvalue,
    +RowCounter.RowCounterMapper.map(ImmutableBytesWritablerow,
    +   Resultvalues,
    org.apache.hadoop.mapred.OutputCollectorImmutableBytesWritable,Resultoutput,
    -   org.apache.hadoop.mapred.Reporterreporter)
    -Pass the key, value to reduce
    -
    +   org.apache.hadoop.mapred.Reporterreporter)
     
     
     void
    @@ -299,10 +297,12 @@ Input/OutputFormats, a table indexing MapReduce job, and 
    utility methods.
     
     
     void
    -RowCounter.RowCounterMapper.map(ImmutableBytesWritablerow,
    -   Resultvalues,
    +IdentityTableMap.map(ImmutableBytesWritablekey,
    +   Resultvalue,
    org.apache.hadoop.mapred.OutputCollectorImmutableBytesWritable,Resultoutput,
    -   org.apache.hadoop.mapred.Reporterreporter)
    +   org.apache.hadoop.mapred.Reporterreporter)
    +Pass the key, value to reduce
    +
     
     
     void
    @@ -349,7 +349,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
    utility methods.
     
     
     private ImmutableBytesWritable
    -MultithreadedTableMapper.SubMapRecordReader.key
    +TableRecordReaderImpl.key
     
     
     private ImmutableBytesWritable
    @@ -357,7 +357,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
    utility methods.
     
     
     private ImmutableBytesWritable
    -TableRecordReaderImpl.key
    +MultithreadedTableMapper.SubMapRecordReader.key
     
     
     (package private) ImmutableBytesWritable
    @@ -427,33 +427,33 @@ Input/OutputFormats, a table indexing MapReduce job, and 
    utility methods.
     
     
     ImmutableBytesWritable
    -MultithreadedTableMapper.SubMapRecordReader.getCurrentKey()
    

    [09/51] [partial] hbase-site git commit: Published site at b7b86839250bf9b295ebc1948826f43a88736d6c.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6b94a2f2/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
    --
    diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/Version.html 
    b/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
    index 8e07c04..8c36b0f 100644
    --- a/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
    +++ b/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
    @@ -16,11 +16,11 @@
     008@InterfaceAudience.Private
     009public class Version {
     010  public static final String version = 
    "3.0.0-SNAPSHOT";
    -011  public static final String revision = 
    "485af49e53cb38e2af4635f2c3bc0b33e15ba0a1";
    +011  public static final String revision = 
    "b7b86839250bf9b295ebc1948826f43a88736d6c";
     012  public static final String user = 
    "jenkins";
    -013  public static final String date = "Sun 
    Mar  4 05:45:33 UTC 2018";
    +013  public static final String date = "Mon 
    Mar  5 14:41:23 UTC 2018";
     014  public static final String url = 
    "git://asf920.gq1.ygridcore.net/home/jenkins/jenkins-slave/workspace/hbase_generate_website/hbase";
    -015  public static final String srcChecksum 
    = "ae056300970e553626dc6912e61066bd";
    +015  public static final String srcChecksum 
    = "a96590ff72b740efcf08fb6bef3bd5d5";
     016}
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6b94a2f2/devapidocs/src-html/org/apache/hadoop/hbase/client/TableDescriptor.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/client/TableDescriptor.html 
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/TableDescriptor.html
    index 1538cfd..b437410 100644
    --- a/devapidocs/src-html/org/apache/hadoop/hbase/client/TableDescriptor.html
    +++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/TableDescriptor.html
    @@ -240,69 +240,75 @@
     232  boolean 
    hasRegionMemStoreReplication();
     233
     234  /**
    -235   * Check if the compaction enable flag 
    of the table is true. If flag is false
    -236   * then no minor/major compactions will 
    be done in real.
    -237   *
    -238   * @return true if table compaction 
    enabled
    -239   */
    -240  boolean isCompactionEnabled();
    -241
    -242  /**
    -243   * Checks if this table is code 
    hbase:meta /code region.
    -244   *
    -245   * @return true if this table is 
    code hbase:meta /code region
    -246   */
    -247  boolean isMetaRegion();
    -248
    -249  /**
    -250   * Checks if the table is a 
    codehbase:meta/code table
    -251   *
    -252   * @return true if table is 
    code hbase:meta /code region.
    -253   */
    -254  boolean isMetaTable();
    -255
    -256  /**
    -257   * Check if normalization enable flag 
    of the table is true. If flag is false
    -258   * then no region normalizer won't 
    attempt to normalize this table.
    -259   *
    -260   * @return true if region normalization 
    is enabled for this table
    -261   */
    -262  boolean isNormalizationEnabled();
    -263
    -264  /**
    -265   * Check if the readOnly flag of the 
    table is set. If the readOnly flag is set
    -266   * then the contents of the table can 
    only be read from but not modified.
    -267   *
    -268   * @return true if all columns in the 
    table should be read only
    -269   */
    -270  boolean isReadOnly();
    -271
    -272  /**
    -273   * Check if the table's cfs' 
    replication scope matched with the replication state
    -274   * @param enabled replication state
    -275   * @return true if matched, otherwise 
    false
    -276   */
    -277  default boolean 
    matchReplicationScope(boolean enabled) {
    -278boolean hasEnabled = false;
    -279boolean hasDisabled = false;
    -280
    -281for (ColumnFamilyDescriptor cf : 
    getColumnFamilies()) {
    -282  if (cf.getScope() != 
    HConstants.REPLICATION_SCOPE_GLOBAL) {
    -283hasDisabled = true;
    -284  } else {
    -285hasEnabled = true;
    -286  }
    -287}
    -288
    -289if (hasEnabled  
    hasDisabled) {
    -290  return false;
    -291}
    -292if (hasEnabled) {
    -293  return enabled;
    -294}
    -295return !enabled;
    -296  }
    -297}
    +235   * @return true if there are at least 
    one cf whose replication scope is serial.
    +236   */
    +237  boolean hasSerialReplicationScope();
    +238
    +239  /**
    +240   * Check if the compaction enable flag 
    of the table is true. If flag is false
    +241   * then no minor/major compactions will 
    be done in real.
    +242   *
    +243   * @return true if table compaction 
    enabled
    +244   */
    +245  boolean isCompactionEnabled();
    +246
    +247  /**
    +248   * Checks if this table is code 
    hbase:meta /code region.
    +249   *
    +250   * @return true if this table is 
    code hbase:meta /code region
    +251   */
    +252  boolean isMetaRegion();
    +253
    +254  /**
    +255   * Checks if the table is a 
    codehbase:meta/code table
    +256   *
    +257   * @return true if table is 
    code hbase:meta /code region.
    +258   */
    +259  boolean isMetaTable();
    +260
    +261  /**
    +262   * Check if normalization enable flag 
    of the table is true. If flag is false
    +263   * then no region normalizer won't 
    attempt to normalize this table.
    +264   *
    +265   * @return true if region normalization 
    is 

    [09/51] [partial] hbase-site git commit: Published site at 1d25b60831b8cc8f7ad5fd366f1867de5c20d2f3.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/eb05e3e3/apidocs/org/apache/hadoop/hbase/client/RetriesExhaustedException.html
    --
    diff --git 
    a/apidocs/org/apache/hadoop/hbase/client/RetriesExhaustedException.html 
    b/apidocs/org/apache/hadoop/hbase/client/RetriesExhaustedException.html
    index f98f198..dcee4f2 100644
    --- a/apidocs/org/apache/hadoop/hbase/client/RetriesExhaustedException.html
    +++ b/apidocs/org/apache/hadoop/hbase/client/RetriesExhaustedException.html
    @@ -91,16 +91,16 @@
     
     
     
    -http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">java.lang.Object
    +https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">java.lang.Object
     
     
    -http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true;
     title="class or interface in java.lang">java.lang.Throwable
    +https://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true;
     title="class or interface in java.lang">java.lang.Throwable
     
     
    -http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
     title="class or interface in java.lang">java.lang.Exception
    +https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
     title="class or interface in java.lang">java.lang.Exception
     
     
    -http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">java.io.IOException
    +https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">java.io.IOException
     
     
     org.apache.hadoop.hbase.client.RetriesExhaustedException
    @@ -118,7 +118,7 @@
     
     
     All Implemented Interfaces:
    -http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
     title="class or interface in java.io">Serializable
    +https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
     title="class or interface in java.io">Serializable
     
     
     Direct Known Subclasses:
    @@ -128,7 +128,7 @@
     
     @InterfaceAudience.Public
     public class RetriesExhaustedException
    -extends http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException
    +extends https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException
     Exception thrown by HTable methods when an attempt to do 
    something (like
      commit changes) fails after a bunch of retries.
     
    @@ -154,23 +154,23 @@ extends http://docs.oracle.com/javase/8/docs/api/java/io/IOException.ht
     
     
     RetriesExhaustedException(intnumRetries,
    - http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in 
    java.util">Listorg.apache.hadoop.hbase.client.RetriesExhaustedException.ThrowableWithExtraContextexceptions)
    + https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in 
    java.util">Listorg.apache.hadoop.hbase.client.RetriesExhaustedException.ThrowableWithExtraContextexceptions)
     Create a new RetriesExhaustedException from the list of 
    prior failures.
     
     
     
    -RetriesExhaustedException(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">Stringmsg)
    +RetriesExhaustedException(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">Stringmsg)
     
     
    -RetriesExhaustedException(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringcallableVitals,
    +RetriesExhaustedException(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringcallableVitals,
      intnumTries,
    - http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true;
     title="class or interface in 
    java.lang">Throwableexceptions)
    + https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">Listhttps://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true;
     title="class or interface in 
    java.lang">Throwableexceptions)
     Create a new RetriesExhaustedException from the list of 
    prior failures.
     
     
     
    -RetriesExhaustedException(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">Stringmsg,
    - 

    [09/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ec8bf761/devapidocs/src-html/org/apache/hadoop/hbase/client/MasterCallable.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/client/MasterCallable.html 
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/MasterCallable.html
    index 9d27237..d2c2295 100644
    --- a/devapidocs/src-html/org/apache/hadoop/hbase/client/MasterCallable.html
    +++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/MasterCallable.html
    @@ -28,129 +28,128 @@
     020
     021import java.io.Closeable;
     022import java.io.IOException;
    -023
    -024import 
    org.apache.hadoop.hbase.HRegionInfo;
    -025import 
    org.apache.hadoop.hbase.TableName;
    -026import 
    org.apache.hadoop.hbase.ipc.HBaseRpcController;
    -027import 
    org.apache.hadoop.hbase.ipc.RpcControllerFactory;
    -028import 
    org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
    -029import 
    org.apache.hadoop.hbase.util.Bytes;
    -030import 
    org.apache.yetus.audience.InterfaceAudience;
    -031
    -032/**
    -033 * A RetryingCallable for Master RPC 
    operations.
    -034 * Implement the #rpcCall method. It will 
    be retried on error. See its javadoc and the javadoc of
    -035 * #call(int). See {@link HBaseAdmin} for 
    examples of how this is used. To get at the
    -036 * rpcController that has been created 
    and configured to make this rpc call, use getRpcController().
    -037 * We are trying to contain all protobuf 
    references including references to rpcController so we
    -038 * don't pollute codebase with protobuf 
    references; keep the protobuf references contained and only
    -039 * present in a few classes rather than 
    all about the code base.
    -040 * pLike {@link 
    RegionServerCallable} only in here, we can safely be 
    PayloadCarryingRpcController
    -041 * all the time. This is not possible in 
    the similar {@link RegionServerCallable} Callable because
    -042 * it has to deal with Coprocessor 
    Endpoints.
    -043 * @param V return type
    -044 */
    -045@InterfaceAudience.Private
    -046abstract class MasterCallableV 
    implements RetryingCallableV, Closeable {
    -047  protected final ClusterConnection 
    connection;
    -048  protected MasterKeepAliveConnection 
    master;
    -049  private final HBaseRpcController 
    rpcController;
    -050
    -051  MasterCallable(final Connection 
    connection, final RpcControllerFactory rpcConnectionFactory) {
    -052this.connection = (ClusterConnection) 
    connection;
    -053this.rpcController = 
    rpcConnectionFactory.newController();
    -054  }
    -055
    -056  @Override
    -057  public void prepare(boolean reload) 
    throws IOException {
    -058this.master = 
    this.connection.getKeepAliveMasterService();
    -059  }
    -060
    -061  @Override
    -062  public void close() throws IOException 
    {
    -063// The above prepare could fail but 
    this would still be called though masterAdmin is null
    -064if (this.master != null) {
    -065  this.master.close();
    -066  this.master = null;
    -067}
    -068  }
    -069
    -070  @Override
    -071  public void throwable(Throwable t, 
    boolean retrying) {
    -072  }
    -073
    -074  @Override
    -075  public String 
    getExceptionMessageAdditionalDetail() {
    -076return "";
    -077  }
    -078
    -079  @Override
    -080  public long sleep(long pause, int 
    tries) {
    -081return 
    ConnectionUtils.getPauseTime(pause, tries);
    -082  }
    -083
    -084  /**
    -085   * Override that changes the {@link 
    java.util.concurrent.Callable#call()} Exception from {@link Exception} to
    -086   * {@link IOException}. It also does 
    setup of an rpcController and calls through to the rpcCall()
    -087   * method which callers are expected to 
    implement. If rpcController is an instance of
    -088   * PayloadCarryingRpcController, we 
    will set a timeout on it.
    -089   */
    -090  @Override
    -091  // Same trick as in 
    RegionServerCallable so users don't have to copy/paste so much boilerplate
    -092  // and so we contain references to 
    protobuf. We can't set priority on the rpcController as
    -093  // we do in RegionServerCallable 
    because we don't always have a Table when we call.
    -094  public V call(int callTimeout) throws 
    IOException {
    -095try {
    -096  if (this.rpcController != null) {
    -097this.rpcController.reset();
    -098
    this.rpcController.setCallTimeout(callTimeout);
    -099  }
    -100  return rpcCall();
    -101} catch (Exception e) {
    -102  throw 
    ProtobufUtil.handleRemoteException(e);
    -103}
    -104  }
    -105
    -106  /**
    -107   * Run the RPC call. Implement this 
    method. To get at the rpcController that has been created
    -108   * and configured to make this rpc 
    call, use getRpcController(). We are trying to contain
    -109   * rpcController references so we don't 
    pollute codebase with protobuf references; keep the
    -110   * protobuf references contained and 
    only present in a few classes rather than all about the
    -111   * code base.
    -112   * @throws Exception
    -113   */
    -114  protected abstract V rpcCall() throws 
    Exception;
    -115
    -116  HBaseRpcController getRpcController() 
    {
    -117return this.rpcController;
    -118  }
    -119
    -120  void setPriority(final 

    [09/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
    index 802b925..a3e80ab 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
    @@ -73,229 +73,229 @@
     065import 
    java.util.concurrent.TimeoutException;
     066import 
    java.util.concurrent.atomic.AtomicBoolean;
     067import 
    java.util.concurrent.atomic.AtomicInteger;
    -068import 
    java.util.concurrent.atomic.AtomicLong;
    -069import 
    java.util.concurrent.atomic.LongAdder;
    -070import java.util.concurrent.locks.Lock;
    -071import 
    java.util.concurrent.locks.ReadWriteLock;
    -072import 
    java.util.concurrent.locks.ReentrantReadWriteLock;
    -073import java.util.function.Function;
    -074import 
    org.apache.hadoop.conf.Configuration;
    -075import org.apache.hadoop.fs.FileStatus;
    -076import org.apache.hadoop.fs.FileSystem;
    -077import 
    org.apache.hadoop.fs.LocatedFileStatus;
    -078import org.apache.hadoop.fs.Path;
    -079import org.apache.hadoop.hbase.Cell;
    -080import 
    org.apache.hadoop.hbase.CellBuilderType;
    -081import 
    org.apache.hadoop.hbase.CellComparator;
    -082import 
    org.apache.hadoop.hbase.CellComparatorImpl;
    -083import 
    org.apache.hadoop.hbase.CellScanner;
    -084import 
    org.apache.hadoop.hbase.CellUtil;
    -085import 
    org.apache.hadoop.hbase.CompareOperator;
    -086import 
    org.apache.hadoop.hbase.CompoundConfiguration;
    -087import 
    org.apache.hadoop.hbase.DoNotRetryIOException;
    -088import 
    org.apache.hadoop.hbase.DroppedSnapshotException;
    -089import 
    org.apache.hadoop.hbase.ExtendedCellBuilderFactory;
    -090import 
    org.apache.hadoop.hbase.HConstants;
    -091import 
    org.apache.hadoop.hbase.HConstants.OperationStatusCode;
    -092import 
    org.apache.hadoop.hbase.HDFSBlocksDistribution;
    -093import 
    org.apache.hadoop.hbase.HRegionInfo;
    -094import 
    org.apache.hadoop.hbase.KeyValue;
    -095import 
    org.apache.hadoop.hbase.KeyValueUtil;
    -096import 
    org.apache.hadoop.hbase.NamespaceDescriptor;
    -097import 
    org.apache.hadoop.hbase.NotServingRegionException;
    -098import 
    org.apache.hadoop.hbase.PrivateCellUtil;
    -099import 
    org.apache.hadoop.hbase.RegionTooBusyException;
    -100import 
    org.apache.hadoop.hbase.TableName;
    -101import org.apache.hadoop.hbase.Tag;
    -102import org.apache.hadoop.hbase.TagUtil;
    -103import 
    org.apache.hadoop.hbase.UnknownScannerException;
    -104import 
    org.apache.hadoop.hbase.client.Append;
    -105import 
    org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
    -106import 
    org.apache.hadoop.hbase.client.CompactionState;
    -107import 
    org.apache.hadoop.hbase.client.Delete;
    -108import 
    org.apache.hadoop.hbase.client.Durability;
    -109import 
    org.apache.hadoop.hbase.client.Get;
    -110import 
    org.apache.hadoop.hbase.client.Increment;
    -111import 
    org.apache.hadoop.hbase.client.IsolationLevel;
    -112import 
    org.apache.hadoop.hbase.client.Mutation;
    -113import 
    org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor;
    -114import 
    org.apache.hadoop.hbase.client.Put;
    -115import 
    org.apache.hadoop.hbase.client.RegionInfo;
    -116import 
    org.apache.hadoop.hbase.client.RegionReplicaUtil;
    -117import 
    org.apache.hadoop.hbase.client.Result;
    -118import 
    org.apache.hadoop.hbase.client.RowMutations;
    -119import 
    org.apache.hadoop.hbase.client.Scan;
    -120import 
    org.apache.hadoop.hbase.client.TableDescriptor;
    -121import 
    org.apache.hadoop.hbase.client.TableDescriptorBuilder;
    -122import 
    org.apache.hadoop.hbase.conf.ConfigurationManager;
    -123import 
    org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver;
    -124import 
    org.apache.hadoop.hbase.coprocessor.RegionObserver.MutationType;
    -125import 
    org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
    -126import 
    org.apache.hadoop.hbase.exceptions.FailedSanityCheckException;
    -127import 
    org.apache.hadoop.hbase.exceptions.TimeoutIOException;
    -128import 
    org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
    -129import 
    org.apache.hadoop.hbase.filter.ByteArrayComparable;
    -130import 
    org.apache.hadoop.hbase.filter.FilterWrapper;
    -131import 
    org.apache.hadoop.hbase.filter.IncompatibleFilterException;
    -132import 
    org.apache.hadoop.hbase.io.HFileLink;
    -133import 
    org.apache.hadoop.hbase.io.HeapSize;
    -134import 
    org.apache.hadoop.hbase.io.TimeRange;
    -135import 
    org.apache.hadoop.hbase.io.hfile.HFile;
    -136import 
    org.apache.hadoop.hbase.ipc.CallerDisconnectedException;
    -137import 
    org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
    -138import 
    org.apache.hadoop.hbase.ipc.RpcCall;
    -139import 
    org.apache.hadoop.hbase.ipc.RpcServer;
    -140import 
    org.apache.hadoop.hbase.monitoring.MonitoredTask;
    -141import 
    

    [09/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/193b4259/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
    index bd13b53..802b925 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
    @@ -900,7600 +900,7598 @@
     892if 
    (this.getRegionInfo().getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) {
     893  status.setStatus("Writing region 
    info on filesystem");
     894  fs.checkRegionInfoOnFilesystem();
    -895} else {
    -896  if (LOG.isDebugEnabled()) {
    -897LOG.debug("Skipping creation of 
    .regioninfo file for " + this.getRegionInfo());
    -898  }
    -899}
    -900
    -901// Initialize all the HStores
    -902status.setStatus("Initializing all 
    the Stores");
    -903long maxSeqId = 
    initializeStores(reporter, status);
    -904this.mvcc.advanceTo(maxSeqId);
    -905if 
    (ServerRegionReplicaUtil.shouldReplayRecoveredEdits(this)) {
    -906  CollectionHStore stores = 
    this.stores.values();
    -907  try {
    -908// update the stores that we are 
    replaying
    -909
    stores.forEach(HStore::startReplayingFromWAL);
    -910// Recover any edits if 
    available.
    -911maxSeqId = Math.max(maxSeqId,
    -912  
    replayRecoveredEditsIfAny(this.fs.getRegionDir(), maxSeqIdInStores, reporter, 
    status));
    -913// Make sure mvcc is up to max.
    -914this.mvcc.advanceTo(maxSeqId);
    -915  } finally {
    -916// update the stores that we are 
    done replaying
    -917
    stores.forEach(HStore::stopReplayingFromWAL);
    -918  }
    -919}
    -920this.lastReplayedOpenRegionSeqId = 
    maxSeqId;
    +895}
    +896
    +897// Initialize all the HStores
    +898status.setStatus("Initializing all 
    the Stores");
    +899long maxSeqId = 
    initializeStores(reporter, status);
    +900this.mvcc.advanceTo(maxSeqId);
    +901if 
    (ServerRegionReplicaUtil.shouldReplayRecoveredEdits(this)) {
    +902  CollectionHStore stores = 
    this.stores.values();
    +903  try {
    +904// update the stores that we are 
    replaying
    +905
    stores.forEach(HStore::startReplayingFromWAL);
    +906// Recover any edits if 
    available.
    +907maxSeqId = Math.max(maxSeqId,
    +908  
    replayRecoveredEditsIfAny(this.fs.getRegionDir(), maxSeqIdInStores, reporter, 
    status));
    +909// Make sure mvcc is up to max.
    +910this.mvcc.advanceTo(maxSeqId);
    +911  } finally {
    +912// update the stores that we are 
    done replaying
    +913
    stores.forEach(HStore::stopReplayingFromWAL);
    +914  }
    +915}
    +916this.lastReplayedOpenRegionSeqId = 
    maxSeqId;
    +917
    +918
    this.writestate.setReadOnly(ServerRegionReplicaUtil.isReadOnly(this));
    +919this.writestate.flushRequested = 
    false;
    +920this.writestate.compacting.set(0);
     921
    -922
    this.writestate.setReadOnly(ServerRegionReplicaUtil.isReadOnly(this));
    -923this.writestate.flushRequested = 
    false;
    -924this.writestate.compacting.set(0);
    -925
    -926if (this.writestate.writesEnabled) 
    {
    -927  // Remove temporary data left over 
    from old regions
    -928  status.setStatus("Cleaning up 
    temporary data from old regions");
    -929  fs.cleanupTempDir();
    -930}
    -931
    -932if (this.writestate.writesEnabled) 
    {
    -933  status.setStatus("Cleaning up 
    detritus from prior splits");
    -934  // Get rid of any splits or merges 
    that were lost in-progress.  Clean out
    -935  // these directories here on open.  
    We may be opening a region that was
    -936  // being split but we crashed in 
    the middle of it all.
    -937  fs.cleanupAnySplitDetritus();
    -938  fs.cleanupMergesDir();
    -939}
    -940
    -941// Initialize split policy
    -942this.splitPolicy = 
    RegionSplitPolicy.create(this, conf);
    -943
    -944// Initialize flush policy
    -945this.flushPolicy = 
    FlushPolicyFactory.create(this, conf);
    -946
    -947long lastFlushTime = 
    EnvironmentEdgeManager.currentTime();
    -948for (HStore store: stores.values()) 
    {
    -949  
    this.lastStoreFlushTimeMap.put(store, lastFlushTime);
    -950}
    -951
    -952// Use maximum of log sequenceid or 
    that which was found in stores
    -953// (particularly if no recovered 
    edits, seqid will be -1).
    -954long nextSeqid = maxSeqId;
    -955if (this.writestate.writesEnabled) 
    {
    -956  nextSeqid = 
    WALSplitter.writeRegionSequenceIdFile(this.fs.getFileSystem(),
    -957  this.fs.getRegionDir(), 
    nextSeqid, 1);
    -958} else {
    -959  nextSeqid++;
    -960}
    -961
    -962LOG.info("Onlined " + 
    this.getRegionInfo().getShortNameToLog() +
    -963  "; next sequenceid=" + 
    nextSeqid);
    +922if (this.writestate.writesEnabled) 
    {
    +923  // Remove temporary data left 

    [09/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/94208cfe/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/PeerProcedureInterface.PeerOperationType.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/PeerProcedureInterface.PeerOperationType.html
     
    b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/PeerProcedureInterface.PeerOperationType.html
    index 3628d68..bd2f966 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/PeerProcedureInterface.PeerOperationType.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/PeerProcedureInterface.PeerOperationType.html
    @@ -152,27 +152,27 @@ the order they are declared.
     
     
     PeerProcedureInterface.PeerOperationType
    -RefreshPeerProcedure.getPeerOperationType()
    +DisablePeerProcedure.getPeerOperationType()
     
     
     PeerProcedureInterface.PeerOperationType
    -DisablePeerProcedure.getPeerOperationType()
    +RemovePeerProcedure.getPeerOperationType()
     
     
     PeerProcedureInterface.PeerOperationType
    -UpdatePeerConfigProcedure.getPeerOperationType()
    +EnablePeerProcedure.getPeerOperationType()
     
     
     PeerProcedureInterface.PeerOperationType
    -AddPeerProcedure.getPeerOperationType()
    +RefreshPeerProcedure.getPeerOperationType()
     
     
     PeerProcedureInterface.PeerOperationType
    -EnablePeerProcedure.getPeerOperationType()
    +AddPeerProcedure.getPeerOperationType()
     
     
     PeerProcedureInterface.PeerOperationType
    -RemovePeerProcedure.getPeerOperationType()
    +UpdatePeerConfigProcedure.getPeerOperationType()
     
     
     private static PeerProcedureInterface.PeerOperationType
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/94208cfe/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/ProcedurePrepareLatch.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/ProcedurePrepareLatch.html
     
    b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/ProcedurePrepareLatch.html
    index 269bc46..f7a6279 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/ProcedurePrepareLatch.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/ProcedurePrepareLatch.html
    @@ -125,11 +125,11 @@
     
     
     private ProcedurePrepareLatch
    -RecoverMetaProcedure.syncLatch
    +AbstractStateMachineTableProcedure.syncLatch
     
     
     private ProcedurePrepareLatch
    -AbstractStateMachineTableProcedure.syncLatch
    +RecoverMetaProcedure.syncLatch
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/94208cfe/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/ServerProcedureInterface.ServerOperationType.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/ServerProcedureInterface.ServerOperationType.html
     
    b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/ServerProcedureInterface.ServerOperationType.html
    index 5e8085c..8b6ceb7 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/ServerProcedureInterface.ServerOperationType.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/ServerProcedureInterface.ServerOperationType.html
    @@ -104,14 +104,14 @@
     
     
     ServerProcedureInterface.ServerOperationType
    -ServerCrashProcedure.getServerOperationType()
    -
    -
    -ServerProcedureInterface.ServerOperationType
     ServerProcedureInterface.getServerOperationType()
     Given an operation type we can take decisions about what to 
    do with pending operations.
     
     
    +
    +ServerProcedureInterface.ServerOperationType
    +ServerCrashProcedure.getServerOperationType()
    +
     
     static ServerProcedureInterface.ServerOperationType
     ServerProcedureInterface.ServerOperationType.valueOf(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">Stringname)
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/94208cfe/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/TableProcedureInterface.TableOperationType.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/TableProcedureInterface.TableOperationType.html
     
    b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/TableProcedureInterface.TableOperationType.html
    index 046295e..e736f37 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/TableProcedureInterface.TableOperationType.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/TableProcedureInterface.TableOperationType.html
    @@ -112,19 +112,19 @@
     
     
     TableProcedureInterface.TableOperationType
    -MoveRegionProcedure.getTableOperationType()
    +UnassignProcedure.getTableOperationType()
     
     
     TableProcedureInterface.TableOperationType
    -GCMergedRegionsProcedure.getTableOperationType()
    

    [09/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0cd17dc5/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/BlockCacheKey.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/BlockCacheKey.html 
    b/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/BlockCacheKey.html
    index b8ce496..570fb68 100644
    --- a/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/BlockCacheKey.html
    +++ b/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/BlockCacheKey.html
    @@ -168,27 +168,39 @@
     
     
     void
    +CombinedBlockCache.cacheBlock(BlockCacheKeycacheKey,
    +  Cacheablebuf)
    +
    +
    +void
     BlockCache.cacheBlock(BlockCacheKeycacheKey,
       Cacheablebuf)
     Add block to cache (defaults to not in-memory).
     
     
    -
    +
     void
     LruBlockCache.cacheBlock(BlockCacheKeycacheKey,
       Cacheablebuf)
     Cache the block with the specified name and buffer.
     
     
    -
    +
     void
    -CombinedBlockCache.cacheBlock(BlockCacheKeycacheKey,
    +MemcachedBlockCache.cacheBlock(BlockCacheKeycacheKey,
       Cacheablebuf)
     
    +
    +void
    +CombinedBlockCache.cacheBlock(BlockCacheKeycacheKey,
    +  Cacheablebuf,
    +  booleaninMemory)
    +
     
     void
    -MemcachedBlockCache.cacheBlock(BlockCacheKeycacheKey,
    -  Cacheablebuf)
    +InclusiveCombinedBlockCache.cacheBlock(BlockCacheKeycacheKey,
    +  Cacheablebuf,
    +  booleaninMemory)
     
     
     void
    @@ -208,18 +220,6 @@
     
     
     void
    -CombinedBlockCache.cacheBlock(BlockCacheKeycacheKey,
    -  Cacheablebuf,
    -  booleaninMemory)
    -
    -
    -void
    -InclusiveCombinedBlockCache.cacheBlock(BlockCacheKeycacheKey,
    -  Cacheablebuf,
    -  booleaninMemory)
    -
    -
    -void
     MemcachedBlockCache.cacheBlock(BlockCacheKeycacheKey,
       Cacheablebuf,
       booleaninMemory)
    @@ -232,21 +232,21 @@
     
     
     boolean
    -BlockCache.evictBlock(BlockCacheKeycacheKey)
    -Evict block from cache.
    -
    +CombinedBlockCache.evictBlock(BlockCacheKeycacheKey)
     
     
     boolean
    -LruBlockCache.evictBlock(BlockCacheKeycacheKey)
    +InclusiveCombinedBlockCache.evictBlock(BlockCacheKeycacheKey)
     
     
     boolean
    -CombinedBlockCache.evictBlock(BlockCacheKeycacheKey)
    +BlockCache.evictBlock(BlockCacheKeycacheKey)
    +Evict block from cache.
    +
     
     
     boolean
    -InclusiveCombinedBlockCache.evictBlock(BlockCacheKeycacheKey)
    +LruBlockCache.evictBlock(BlockCacheKeycacheKey)
     
     
     boolean
    @@ -254,35 +254,35 @@
     
     
     Cacheable
    -BlockCache.getBlock(BlockCacheKeycacheKey,
    +CombinedBlockCache.getBlock(BlockCacheKeycacheKey,
     booleancaching,
     booleanrepeat,
    -booleanupdateCacheMetrics)
    -Fetch block from cache.
    -
    +booleanupdateCacheMetrics)
     
     
     Cacheable
    -LruBlockCache.getBlock(BlockCacheKeycacheKey,
    +InclusiveCombinedBlockCache.getBlock(BlockCacheKeycacheKey,
     booleancaching,
     booleanrepeat,
    -booleanupdateCacheMetrics)
    -Get the buffer of the block with the specified name.
    -
    +booleanupdateCacheMetrics)
     
     
     Cacheable
    -CombinedBlockCache.getBlock(BlockCacheKeycacheKey,
    +BlockCache.getBlock(BlockCacheKeycacheKey,
     booleancaching,
     booleanrepeat,
    -booleanupdateCacheMetrics)
    +booleanupdateCacheMetrics)
    +Fetch block from cache.
    +
     
     
     Cacheable
    -InclusiveCombinedBlockCache.getBlock(BlockCacheKeycacheKey,
    +LruBlockCache.getBlock(BlockCacheKeycacheKey,
     booleancaching,
     booleanrepeat,
    -booleanupdateCacheMetrics)
    +booleanupdateCacheMetrics)
    +Get the buffer of the block with the specified name.
    +
     
     
     Cacheable
    @@ -308,6 +308,11 @@
     CombinedBlockCache.getRefCount(BlockCacheKeycacheKey)
     
     
    +void
    +CombinedBlockCache.returnBlock(BlockCacheKeycacheKey,
    +   Cacheableblock)
    +
    +
     default void
     BlockCache.returnBlock(BlockCacheKeycacheKey,
    Cacheableblock)
    @@ -315,11 +320,6 @@
      is over.
     
     
    -
    -void
    -CombinedBlockCache.returnBlock(BlockCacheKeycacheKey,
    -   Cacheableblock)
    -
     
     
     
    @@ -497,13 +497,13 @@
     
     
     void
    -CachedEntryQueue.add(http://docs.oracle.com/javase/8/docs/api/java/util/Map.Entry.html?is-external=true;
     title="class or interface in java.util">Map.EntryBlockCacheKey,BucketCache.BucketEntryentry)
    -Attempt to add the specified entry to this queue.
    -
    +BucketCache.BucketEntryGroup.add(http://docs.oracle.com/javase/8/docs/api/java/util/Map.Entry.html?is-external=true;
     title="class or interface in java.util">Map.EntryBlockCacheKey,BucketCache.BucketEntryblock)
     
     
     void
    -BucketCache.BucketEntryGroup.add(http://docs.oracle.com/javase/8/docs/api/java/util/Map.Entry.html?is-external=true;
     title="class or interface in java.util">Map.EntryBlockCacheKey,BucketCache.BucketEntryblock)
    +CachedEntryQueue.add(http://docs.oracle.com/javase/8/docs/api/java/util/Map.Entry.html?is-external=true;
     title="class or interface in java.util">Map.EntryBlockCacheKey,BucketCache.BucketEntryentry)
    +Attempt to add the specified entry to this queue.
    +
     
     
     
    
    

    [09/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/828486ae/devapidocs/org/apache/hadoop/hbase/master/class-use/RegionPlan.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/master/class-use/RegionPlan.html 
    b/devapidocs/org/apache/hadoop/hbase/master/class-use/RegionPlan.html
    index d2c9cca..146b426 100644
    --- a/devapidocs/org/apache/hadoop/hbase/master/class-use/RegionPlan.html
    +++ b/devapidocs/org/apache/hadoop/hbase/master/class-use/RegionPlan.html
    @@ -282,7 +282,10 @@
     
     
     http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListRegionPlan
    -FavoredStochasticBalancer.balanceCluster(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
     title="class or interface in java.util">MapServerName,http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListRegionInfoclusterState)
    +SimpleLoadBalancer.balanceCluster(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
     title="class or interface in java.util">MapServerName,http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListRegionInfoclusterMap)
    +Generate a global load balancing plan according to the 
    specified map of
    + server information to the most loaded regions of each server.
    +
     
     
     http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListRegionPlan
    @@ -292,19 +295,16 @@
     
     
     http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListRegionPlan
    -SimpleLoadBalancer.balanceCluster(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
     title="class or interface in java.util">MapServerName,http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListRegionInfoclusterMap)
    -Generate a global load balancing plan according to the 
    specified map of
    - server information to the most loaded regions of each server.
    -
    +FavoredStochasticBalancer.balanceCluster(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
     title="class or interface in java.util">MapServerName,http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListRegionInfoclusterState)
     
     
     http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListRegionPlan
    -StochasticLoadBalancer.balanceCluster(TableNametableName,
    +SimpleLoadBalancer.balanceCluster(TableNametableName,
       http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
     title="class or interface in java.util">MapServerName,http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListRegionInfoclusterState)
     
     
     http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListRegionPlan
    -SimpleLoadBalancer.balanceCluster(TableNametableName,
    +StochasticLoadBalancer.balanceCluster(TableNametableName,
       http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
     title="class or interface in java.util">MapServerName,http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListRegionInfoclusterState)
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/828486ae/devapidocs/org/apache/hadoop/hbase/master/class-use/RegionState.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/master/class-use/RegionState.html 
    b/devapidocs/org/apache/hadoop/hbase/master/class-use/RegionState.html
    index 00f46c3..75603cc 100644
    --- a/devapidocs/org/apache/hadoop/hbase/master/class-use/RegionState.html
    +++ b/devapidocs/org/apache/hadoop/hbase/master/class-use/RegionState.html
    @@ -143,7 +143,7 @@
     
     
     http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListRegionState
    -ClusterMetrics.getRegionStatesInTransition()
    +ClusterMetricsBuilder.ClusterMetricsImpl.getRegionStatesInTransition()
     
     
     http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListRegionState
    @@ -153,7 +153,7 @@
     
     
     http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListRegionState
    -ClusterMetricsBuilder.ClusterMetricsImpl.getRegionStatesInTransition()
    +ClusterMetrics.getRegionStatesInTransition()
     
     
     
    
    

    [09/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f272b0e8/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDecodingContext.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDecodingContext.html
     
    b/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDecodingContext.html
    index 7161108..fe5ef34 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDecodingContext.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDecodingContext.html
    @@ -166,27 +166,27 @@
     
     
     DataBlockEncoder.EncodedSeeker
    -CopyKeyDataBlockEncoder.createSeeker(CellComparatorcomparator,
    +RowIndexCodecV1.createSeeker(CellComparatorcomparator,
     HFileBlockDecodingContextdecodingCtx)
     
     
     DataBlockEncoder.EncodedSeeker
    -PrefixKeyDeltaEncoder.createSeeker(CellComparatorcomparator,
    +CopyKeyDataBlockEncoder.createSeeker(CellComparatorcomparator,
     HFileBlockDecodingContextdecodingCtx)
     
     
     DataBlockEncoder.EncodedSeeker
    -FastDiffDeltaEncoder.createSeeker(CellComparatorcomparator,
    +DiffKeyDeltaEncoder.createSeeker(CellComparatorcomparator,
     HFileBlockDecodingContextdecodingCtx)
     
     
     DataBlockEncoder.EncodedSeeker
    -DiffKeyDeltaEncoder.createSeeker(CellComparatorcomparator,
    +FastDiffDeltaEncoder.createSeeker(CellComparatorcomparator,
     HFileBlockDecodingContextdecodingCtx)
     
     
     DataBlockEncoder.EncodedSeeker
    -RowIndexCodecV1.createSeeker(CellComparatorcomparator,
    +PrefixKeyDeltaEncoder.createSeeker(CellComparatorcomparator,
     HFileBlockDecodingContextdecodingCtx)
     
     
    @@ -198,13 +198,13 @@
     
     
     http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
     title="class or interface in java.nio">ByteBuffer
    -BufferedDataBlockEncoder.decodeKeyValues(http://docs.oracle.com/javase/8/docs/api/java/io/DataInputStream.html?is-external=true;
     title="class or interface in java.io">DataInputStreamsource,
    -   HFileBlockDecodingContextblkDecodingCtx)
    +RowIndexCodecV1.decodeKeyValues(http://docs.oracle.com/javase/8/docs/api/java/io/DataInputStream.html?is-external=true;
     title="class or interface in java.io">DataInputStreamsource,
    +   HFileBlockDecodingContextdecodingCtx)
     
     
     http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
     title="class or interface in java.nio">ByteBuffer
    -RowIndexCodecV1.decodeKeyValues(http://docs.oracle.com/javase/8/docs/api/java/io/DataInputStream.html?is-external=true;
     title="class or interface in java.io">DataInputStreamsource,
    -   HFileBlockDecodingContextdecodingCtx)
    +BufferedDataBlockEncoder.decodeKeyValues(http://docs.oracle.com/javase/8/docs/api/java/io/DataInputStream.html?is-external=true;
     title="class or interface in java.io">DataInputStreamsource,
    +   HFileBlockDecodingContextblkDecodingCtx)
     
     
     
    @@ -279,17 +279,17 @@
     
     
     HFileBlockDecodingContext
    -NoOpDataBlockEncoder.newDataBlockDecodingContext(HFileContextmeta)
    +HFileDataBlockEncoderImpl.newDataBlockDecodingContext(HFileContextfileContext)
     
     
     HFileBlockDecodingContext
    -HFileDataBlockEncoder.newDataBlockDecodingContext(HFileContextfileContext)
    -create a encoder specific decoding context for 
    reading.
    -
    +NoOpDataBlockEncoder.newDataBlockDecodingContext(HFileContextmeta)
     
     
     HFileBlockDecodingContext
    -HFileDataBlockEncoderImpl.newDataBlockDecodingContext(HFileContextfileContext)
    +HFileDataBlockEncoder.newDataBlockDecodingContext(HFileContextfileContext)
    +create a encoder specific decoding context for 
    reading.
    +
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f272b0e8/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDefaultDecodingContext.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDefaultDecodingContext.html
     
    b/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDefaultDecodingContext.html
    index 79b047f..66443b9 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDefaultDecodingContext.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDefaultDecodingContext.html
    @@ -116,36 +116,36 @@
      HFileBlockDefaultDecodingContextdecodingCtx)
     
     
    -protected abstract http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
     title="class or interface in java.nio">ByteBuffer
    -BufferedDataBlockEncoder.internalDecodeKeyValues(http://docs.oracle.com/javase/8/docs/api/java/io/DataInputStream.html?is-external=true;
     title="class or interface in java.io">DataInputStreamsource,
    +protected http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
     title="class or interface in java.nio">ByteBuffer
    

    [09/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c83a37c8/devapidocs/org/apache/hadoop/hbase/master/class-use/TableStateManager.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/master/class-use/TableStateManager.html 
    b/devapidocs/org/apache/hadoop/hbase/master/class-use/TableStateManager.html
    index 3117787..ba0cca5 100644
    --- a/devapidocs/org/apache/hadoop/hbase/master/class-use/TableStateManager.html
    +++ b/devapidocs/org/apache/hadoop/hbase/master/class-use/TableStateManager.html
    @@ -121,11 +121,11 @@
     
     
     TableStateManager
    -MasterServices.getTableStateManager()
    +HMaster.getTableStateManager()
     
     
     TableStateManager
    -HMaster.getTableStateManager()
    +MasterServices.getTableStateManager()
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c83a37c8/devapidocs/org/apache/hadoop/hbase/master/locking/class-use/LockManager.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/master/locking/class-use/LockManager.html 
    b/devapidocs/org/apache/hadoop/hbase/master/locking/class-use/LockManager.html
    index b5b7703..a444123 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/master/locking/class-use/LockManager.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/master/locking/class-use/LockManager.html
    @@ -117,11 +117,11 @@
     
     
     LockManager
    -MasterServices.getLockManager()
    +HMaster.getLockManager()
     
     
     LockManager
    -HMaster.getLockManager()
    +MasterServices.getLockManager()
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c83a37c8/devapidocs/org/apache/hadoop/hbase/master/normalizer/class-use/NormalizationPlan.PlanType.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/master/normalizer/class-use/NormalizationPlan.PlanType.html
     
    b/devapidocs/org/apache/hadoop/hbase/master/normalizer/class-use/NormalizationPlan.PlanType.html
    index f476c74..d589aaa 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/master/normalizer/class-use/NormalizationPlan.PlanType.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/master/normalizer/class-use/NormalizationPlan.PlanType.html
    @@ -104,15 +104,15 @@
     
     
     NormalizationPlan.PlanType
    -NormalizationPlan.getType()
    +MergeNormalizationPlan.getType()
     
     
     NormalizationPlan.PlanType
    -SplitNormalizationPlan.getType()
    +NormalizationPlan.getType()
     
     
     NormalizationPlan.PlanType
    -MergeNormalizationPlan.getType()
    +SplitNormalizationPlan.getType()
     
     
     NormalizationPlan.PlanType
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c83a37c8/devapidocs/org/apache/hadoop/hbase/master/normalizer/class-use/RegionNormalizer.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/master/normalizer/class-use/RegionNormalizer.html
     
    b/devapidocs/org/apache/hadoop/hbase/master/normalizer/class-use/RegionNormalizer.html
    index d8fb2f6..ad4e9b4 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/master/normalizer/class-use/RegionNormalizer.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/master/normalizer/class-use/RegionNormalizer.html
    @@ -125,11 +125,11 @@
     
     
     RegionNormalizer
    -MasterServices.getRegionNormalizer()
    +HMaster.getRegionNormalizer()
     
     
     RegionNormalizer
    -HMaster.getRegionNormalizer()
    +MasterServices.getRegionNormalizer()
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c83a37c8/devapidocs/org/apache/hadoop/hbase/master/package-tree.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/master/package-tree.html 
    b/devapidocs/org/apache/hadoop/hbase/master/package-tree.html
    index d1757b3..529d712 100644
    --- a/devapidocs/org/apache/hadoop/hbase/master/package-tree.html
    +++ b/devapidocs/org/apache/hadoop/hbase/master/package-tree.html
    @@ -332,11 +332,11 @@
     
     java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
     title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
     title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
     title="class or interface in java.io">Serializable)
     
    -org.apache.hadoop.hbase.master.MetricsMasterSourceFactoryImpl.FactoryStorage
    -org.apache.hadoop.hbase.master.MasterRpcServices.BalanceSwitchMode
    -org.apache.hadoop.hbase.master.SplitLogManager.TerminationStatus
     org.apache.hadoop.hbase.master.SplitLogManager.ResubmitDirective
    +org.apache.hadoop.hbase.master.MetricsMasterSourceFactoryImpl.FactoryStorage
     org.apache.hadoop.hbase.master.RegionState.State
    +org.apache.hadoop.hbase.master.SplitLogManager.TerminationStatus
    +org.apache.hadoop.hbase.master.MasterRpcServices.BalanceSwitchMode
     
     
     
    
    

    [09/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0ab8335e/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
     
    b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
    index 4d04e3e..9c13a58 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
    @@ -133,11 +133,11 @@
     
     
     ProcedureExecutorMasterProcedureEnv
    -HMaster.getMasterProcedureExecutor()
    +MasterServices.getMasterProcedureExecutor()
     
     
     ProcedureExecutorMasterProcedureEnv
    -MasterServices.getMasterProcedureExecutor()
    +HMaster.getMasterProcedureExecutor()
     
     
     private RemoteProcedureDispatcher.RemoteProcedureMasterProcedureEnv,?
    @@ -194,15 +194,15 @@
     
     
     protected Procedure.LockState
    -RegionTransitionProcedure.acquireLock(MasterProcedureEnvenv)
    +GCRegionProcedure.acquireLock(MasterProcedureEnvenv)
     
     
     protected Procedure.LockState
    -GCRegionProcedure.acquireLock(MasterProcedureEnvenv)
    +MergeTableRegionsProcedure.acquireLock(MasterProcedureEnvenv)
     
     
     protected Procedure.LockState
    -MergeTableRegionsProcedure.acquireLock(MasterProcedureEnvenv)
    +RegionTransitionProcedure.acquireLock(MasterProcedureEnvenv)
     
     
     protected boolean
    @@ -295,7 +295,7 @@
     
     
     protected void
    -UnassignProcedure.finishTransition(MasterProcedureEnvenv,
    +AssignProcedure.finishTransition(MasterProcedureEnvenv,
     RegionStates.RegionStateNoderegionNode)
     
     
    @@ -305,7 +305,7 @@
     
     
     protected void
    -AssignProcedure.finishTransition(MasterProcedureEnvenv,
    +UnassignProcedure.finishTransition(MasterProcedureEnvenv,
     RegionStates.RegionStateNoderegionNode)
     
     
    @@ -314,7 +314,7 @@
     
     
     protected ProcedureMetrics
    -UnassignProcedure.getProcedureMetrics(MasterProcedureEnvenv)
    +AssignProcedure.getProcedureMetrics(MasterProcedureEnvenv)
     
     
     protected ProcedureMetrics
    @@ -326,7 +326,7 @@
     
     
     protected ProcedureMetrics
    -AssignProcedure.getProcedureMetrics(MasterProcedureEnvenv)
    +UnassignProcedure.getProcedureMetrics(MasterProcedureEnvenv)
     
     
     (package private) static 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse
    @@ -357,7 +357,7 @@
     
     
     ServerName
    -UnassignProcedure.getServer(MasterProcedureEnvenv)
    +AssignProcedure.getServer(MasterProcedureEnvenv)
     
     
     abstract ServerName
    @@ -367,7 +367,7 @@
     
     
     ServerName
    -AssignProcedure.getServer(MasterProcedureEnvenv)
    +UnassignProcedure.getServer(MasterProcedureEnvenv)
     
     
     private ServerName
    @@ -384,19 +384,19 @@
     
     
     protected boolean
    -RegionTransitionProcedure.hasLock(MasterProcedureEnvenv)
    +MergeTableRegionsProcedure.hasLock(MasterProcedureEnvenv)
     
     
     protected boolean
    -MergeTableRegionsProcedure.hasLock(MasterProcedureEnvenv)
    +RegionTransitionProcedure.hasLock(MasterProcedureEnvenv)
     
     
     protected boolean
    -RegionTransitionProcedure.holdLock(MasterProcedureEnvenv)
    +MergeTableRegionsProcedure.holdLock(MasterProcedureEnvenv)
     
     
     protected boolean
    -MergeTableRegionsProcedure.holdLock(MasterProcedureEnvenv)
    +RegionTransitionProcedure.holdLock(MasterProcedureEnvenv)
     
     
     private boolean
    @@ -510,15 +510,15 @@
     
     
     protected void
    -RegionTransitionProcedure.releaseLock(MasterProcedureEnvenv)
    +MergeTableRegionsProcedure.releaseLock(MasterProcedureEnvenv)
     
     
     protected void
    -MergeTableRegionsProcedure.releaseLock(MasterProcedureEnvenv)
    +RegionTransitionProcedure.releaseLock(MasterProcedureEnvenv)
     
     
     RemoteProcedureDispatcher.RemoteOperation
    -UnassignProcedure.remoteCallBuild(MasterProcedureEnvenv,
    +AssignProcedure.remoteCallBuild(MasterProcedureEnvenv,
    ServerNameserverName)
     
     
    @@ -528,12 +528,12 @@
     
     
     RemoteProcedureDispatcher.RemoteOperation
    -AssignProcedure.remoteCallBuild(MasterProcedureEnvenv,
    +UnassignProcedure.remoteCallBuild(MasterProcedureEnvenv,
    ServerNameserverName)
     
     
     protected boolean
    -UnassignProcedure.remoteCallFailed(MasterProcedureEnvenv,
    +AssignProcedure.remoteCallFailed(MasterProcedureEnvenv,
     RegionStates.RegionStateNoderegionNode,
     http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in 
    java.io">IOExceptionexception)
     
    @@ -545,7 +545,7 @@
     
     
     protected boolean
    -AssignProcedure.remoteCallFailed(MasterProcedureEnvenv,
    +UnassignProcedure.remoteCallFailed(MasterProcedureEnvenv,
     RegionStates.RegionStateNoderegionNode,
     http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in 
    java.io">IOExceptionexception)
     
    @@ -566,10 +566,10 @@
     
     
     protected void
    -UnassignProcedure.reportTransition(MasterProcedureEnvenv,
    +AssignProcedure.reportTransition(MasterProcedureEnvenv,
     

    [09/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
    index 7515d7b..3c4825d 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
    @@ -762,7 +762,7 @@
     754boolean wasUp = 
    this.clusterStatusTracker.isClusterUp();
     755if (!wasUp) 
    this.clusterStatusTracker.setClusterUp();
     756
    -757LOG.info("Server active/primary 
    master=" + this.serverName +
    +757LOG.info("Active/primary master=" + 
    this.serverName +
     758", sessionid=0x" +
     759
    Long.toHexString(this.zooKeeper.getRecoverableZooKeeper().getSessionId()) +
     760", setting cluster-up flag (Was=" 
    + wasUp + ")");
    @@ -1161,7 +1161,7 @@
     1153   startProcedureExecutor();
     1154
     1155   // Start log cleaner thread
    -1156   int cleanerInterval = 
    conf.getInt("hbase.master.cleaner.interval", 60 * 1000);
    +1156   int cleanerInterval = 
    conf.getInt("hbase.master.cleaner.interval", 600 * 1000);
     1157   this.logCleaner =
     1158  new LogCleaner(cleanerInterval,
     1159 this, conf, 
    getMasterWalManager().getFileSystem(),
    @@ -1227,2368 +1227,2369 @@
     1219procedureExecutor = new 
    ProcedureExecutor(conf, procEnv, procedureStore, procedureScheduler);
     1220
    configurationManager.registerObserver(procEnv);
     1221
    -1222final int numThreads = 
    conf.getInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS,
    -1223
    Math.max(Runtime.getRuntime().availableProcessors(),
    -1224  
    MasterProcedureConstants.DEFAULT_MIN_MASTER_PROCEDURE_THREADS));
    -1225final boolean abortOnCorruption = 
    conf.getBoolean(
    -1226
    MasterProcedureConstants.EXECUTOR_ABORT_ON_CORRUPTION,
    -1227
    MasterProcedureConstants.DEFAULT_EXECUTOR_ABORT_ON_CORRUPTION);
    -1228procedureStore.start(numThreads);
    -1229procedureExecutor.start(numThreads, 
    abortOnCorruption);
    -1230
    procEnv.getRemoteDispatcher().start();
    -1231  }
    -1232
    -1233  private void stopProcedureExecutor() 
    {
    -1234if (procedureExecutor != null) {
    -1235  
    configurationManager.deregisterObserver(procedureExecutor.getEnvironment());
    -1236  
    procedureExecutor.getEnvironment().getRemoteDispatcher().stop();
    -1237  procedureExecutor.stop();
    -1238  procedureExecutor.join();
    -1239  procedureExecutor = null;
    -1240}
    -1241
    -1242if (procedureStore != null) {
    -1243  
    procedureStore.stop(isAborted());
    -1244  procedureStore = null;
    -1245}
    -1246  }
    -1247
    -1248  private void stopChores() {
    -1249if (this.expiredMobFileCleanerChore 
    != null) {
    -1250  
    this.expiredMobFileCleanerChore.cancel(true);
    -1251}
    -1252if (this.mobCompactChore != null) 
    {
    -1253  
    this.mobCompactChore.cancel(true);
    -1254}
    -1255if (this.balancerChore != null) {
    -1256  this.balancerChore.cancel(true);
    -1257}
    -1258if (this.normalizerChore != null) 
    {
    -1259  
    this.normalizerChore.cancel(true);
    -1260}
    -1261if (this.clusterStatusChore != null) 
    {
    -1262  
    this.clusterStatusChore.cancel(true);
    -1263}
    -1264if (this.catalogJanitorChore != 
    null) {
    -1265  
    this.catalogJanitorChore.cancel(true);
    -1266}
    -1267if (this.clusterStatusPublisherChore 
    != null){
    -1268  
    clusterStatusPublisherChore.cancel(true);
    -1269}
    -1270if (this.mobCompactThread != null) 
    {
    -1271  this.mobCompactThread.close();
    -1272}
    -1273
    -1274if (this.quotaObserverChore != null) 
    {
    -1275  quotaObserverChore.cancel();
    -1276}
    -1277if (this.snapshotQuotaChore != null) 
    {
    -1278  snapshotQuotaChore.cancel();
    -1279}
    -1280  }
    -1281
    -1282  /**
    -1283   * @return Get remote side's 
    InetAddress
    -1284   */
    -1285  InetAddress getRemoteInetAddress(final 
    int port,
    -1286  final long serverStartCode) throws 
    UnknownHostException {
    -1287// Do it out here in its own little 
    method so can fake an address when
    -1288// mocking up in tests.
    -1289InetAddress ia = 
    RpcServer.getRemoteIp();
    -1290
    -1291// The call could be from the local 
    regionserver,
    -1292// in which case, there is no remote 
    address.
    -1293if (ia == null  
    serverStartCode == startcode) {
    -1294  InetSocketAddress isa = 
    rpcServices.getSocketAddress();
    -1295  if (isa != null  
    isa.getPort() == port) {
    -1296ia = isa.getAddress();
    -1297  }
    -1298}
    -1299return ia;
    -1300  }
    -1301
    -1302  /**
    -1303   * @return Maximum time we should run 
    balancer for
    -1304   */
    -1305  private int getMaxBalancingTime() {
    -1306int maxBalancingTime = 
    getConfiguration().getInt(HConstants.HBASE_BALANCER_MAX_BALANCING, -1);
    -1307if (maxBalancingTime == -1) {
    -1308  

    [09/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1f2eeb22/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
    --
    diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html 
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
    index 4a87b9d..7515d7b 100644
    --- a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
    +++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
    @@ -25,3542 +25,3570 @@
     017 */
     018package org.apache.hadoop.hbase.master;
     019
    -020import com.google.protobuf.Descriptors;
    -021import com.google.protobuf.Service;
    -022import java.io.IOException;
    -023import java.io.InterruptedIOException;
    -024import java.lang.reflect.Constructor;
    -025import 
    java.lang.reflect.InvocationTargetException;
    -026import java.net.InetAddress;
    -027import java.net.InetSocketAddress;
    -028import java.net.UnknownHostException;
    -029import java.util.ArrayList;
    -030import java.util.Arrays;
    -031import java.util.Collection;
    -032import java.util.Collections;
    -033import java.util.Comparator;
    -034import java.util.EnumSet;
    -035import java.util.HashMap;
    -036import java.util.Iterator;
    -037import java.util.List;
    -038import java.util.Map;
    -039import java.util.Map.Entry;
    -040import java.util.Objects;
    -041import java.util.Set;
    -042import 
    java.util.concurrent.ExecutionException;
    -043import java.util.concurrent.Future;
    -044import java.util.concurrent.TimeUnit;
    -045import 
    java.util.concurrent.TimeoutException;
    -046import 
    java.util.concurrent.atomic.AtomicInteger;
    -047import 
    java.util.concurrent.atomic.AtomicReference;
    -048import java.util.function.Function;
    -049import java.util.regex.Pattern;
    -050import java.util.stream.Collectors;
    -051import javax.servlet.ServletException;
    -052import javax.servlet.http.HttpServlet;
    -053import 
    javax.servlet.http.HttpServletRequest;
    -054import 
    javax.servlet.http.HttpServletResponse;
    -055import 
    org.apache.commons.lang3.StringUtils;
    -056import 
    org.apache.hadoop.conf.Configuration;
    -057import org.apache.hadoop.fs.Path;
    -058import 
    org.apache.hadoop.hbase.ClusterId;
    -059import 
    org.apache.hadoop.hbase.ClusterMetrics;
    -060import 
    org.apache.hadoop.hbase.ClusterMetrics.Option;
    -061import 
    org.apache.hadoop.hbase.ClusterMetricsBuilder;
    -062import 
    org.apache.hadoop.hbase.CoordinatedStateException;
    -063import 
    org.apache.hadoop.hbase.DoNotRetryIOException;
    -064import 
    org.apache.hadoop.hbase.HBaseIOException;
    -065import 
    org.apache.hadoop.hbase.HBaseInterfaceAudience;
    -066import 
    org.apache.hadoop.hbase.HConstants;
    -067import 
    org.apache.hadoop.hbase.InvalidFamilyOperationException;
    -068import 
    org.apache.hadoop.hbase.MasterNotRunningException;
    -069import 
    org.apache.hadoop.hbase.MetaTableAccessor;
    -070import 
    org.apache.hadoop.hbase.NamespaceDescriptor;
    -071import 
    org.apache.hadoop.hbase.PleaseHoldException;
    -072import 
    org.apache.hadoop.hbase.ReplicationPeerNotFoundException;
    -073import 
    org.apache.hadoop.hbase.ServerLoad;
    -074import 
    org.apache.hadoop.hbase.ServerMetricsBuilder;
    -075import 
    org.apache.hadoop.hbase.ServerName;
    -076import 
    org.apache.hadoop.hbase.TableDescriptors;
    -077import 
    org.apache.hadoop.hbase.TableName;
    -078import 
    org.apache.hadoop.hbase.TableNotDisabledException;
    -079import 
    org.apache.hadoop.hbase.TableNotFoundException;
    -080import 
    org.apache.hadoop.hbase.UnknownRegionException;
    -081import 
    org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
    -082import 
    org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
    -083import 
    org.apache.hadoop.hbase.client.MasterSwitchType;
    -084import 
    org.apache.hadoop.hbase.client.RegionInfo;
    -085import 
    org.apache.hadoop.hbase.client.Result;
    -086import 
    org.apache.hadoop.hbase.client.TableDescriptor;
    -087import 
    org.apache.hadoop.hbase.client.TableDescriptorBuilder;
    -088import 
    org.apache.hadoop.hbase.client.TableState;
    -089import 
    org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
    -090import 
    org.apache.hadoop.hbase.exceptions.DeserializationException;
    -091import 
    org.apache.hadoop.hbase.exceptions.MergeRegionException;
    -092import 
    org.apache.hadoop.hbase.executor.ExecutorType;
    -093import 
    org.apache.hadoop.hbase.favored.FavoredNodesManager;
    -094import 
    org.apache.hadoop.hbase.favored.FavoredNodesPromoter;
    -095import 
    org.apache.hadoop.hbase.http.InfoServer;
    -096import 
    org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
    -097import 
    org.apache.hadoop.hbase.ipc.RpcServer;
    -098import 
    org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
    -099import 
    org.apache.hadoop.hbase.log.HBaseMarkers;
    -100import 
    org.apache.hadoop.hbase.master.MasterRpcServices.BalanceSwitchMode;
    -101import 
    org.apache.hadoop.hbase.master.assignment.AssignmentManager;
    -102import 
    org.apache.hadoop.hbase.master.assignment.MergeTableRegionsProcedure;
    -103import 
    org.apache.hadoop.hbase.master.assignment.RegionStates;
    -104import 
    org.apache.hadoop.hbase.master.assignment.RegionStates.RegionStateNode;
    -105import 
    

    [09/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/cc6597ec/testdevapidocs/org/apache/hadoop/hbase/client/TestCIBadHostname.html
    --
    diff --git 
    a/testdevapidocs/org/apache/hadoop/hbase/client/TestCIBadHostname.html 
    b/testdevapidocs/org/apache/hadoop/hbase/client/TestCIBadHostname.html
    index 866db4a..e622a21 100644
    --- a/testdevapidocs/org/apache/hadoop/hbase/client/TestCIBadHostname.html
    +++ b/testdevapidocs/org/apache/hadoop/hbase/client/TestCIBadHostname.html
    @@ -109,7 +109,7 @@ var activeTableTab = "activeTableTab";
     
     
     
    -public class TestCIBadHostname
    +public class TestCIBadHostname
     extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">Object
     Tests that we fail fast when hostname resolution is not 
    working and do not cache
      unresolved InetSocketAddresses.
    @@ -132,10 +132,14 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     Field and Description
     
     
    +static HBaseClassTestRule
    +CLASS_RULE
    +
    +
     private static 
    org.apache.hadoop.hbase.client.ConnectionImplementation
     conn
     
    -
    +
     private static HBaseTestingUtility
     testUtil
     
    @@ -209,13 +213,22 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     Field Detail
    +
    +
    +
    +
    +
    +CLASS_RULE
    +public static finalHBaseClassTestRule CLASS_RULE
    +
    +
     
     
     
     
     
     testUtil
    -private staticHBaseTestingUtility testUtil
    +private staticHBaseTestingUtility testUtil
     
     
     
    @@ -224,7 +237,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     conn
    -private 
    staticorg.apache.hadoop.hbase.client.ConnectionImplementation conn
    +private 
    staticorg.apache.hadoop.hbase.client.ConnectionImplementation conn
     
     
     
    @@ -241,7 +254,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     TestCIBadHostname
    -publicTestCIBadHostname()
    +publicTestCIBadHostname()
     
     
     
    @@ -258,7 +271,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     setupBeforeClass
    -public staticvoidsetupBeforeClass()
    +public staticvoidsetupBeforeClass()
      throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
     title="class or interface in java.lang">Exception
     
     Throws:
    @@ -272,7 +285,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     teardownAfterClass
    -public staticvoidteardownAfterClass()
    +public staticvoidteardownAfterClass()
    throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
     title="class or interface in java.lang">Exception
     
     Throws:
    @@ -286,7 +299,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     testGetAdminBadHostname
    -publicvoidtestGetAdminBadHostname()
    +publicvoidtestGetAdminBadHostname()
      throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
     title="class or interface in java.lang">Exception
     
     Throws:
    @@ -300,7 +313,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     testGetClientBadHostname
    -publicvoidtestGetClientBadHostname()
    +publicvoidtestGetClientBadHostname()
       throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
     title="class or interface in java.lang">Exception
     
     Throws:
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/cc6597ec/testdevapidocs/org/apache/hadoop/hbase/client/TestCIDeleteOperationTimeout.html
    --
    diff --git 
    a/testdevapidocs/org/apache/hadoop/hbase/client/TestCIDeleteOperationTimeout.html
     
    b/testdevapidocs/org/apache/hadoop/hbase/client/TestCIDeleteOperationTimeout.html
    index fee2ec8..ee543bf 100644
    --- 
    a/testdevapidocs/org/apache/hadoop/hbase/client/TestCIDeleteOperationTimeout.html
    +++ 
    b/testdevapidocs/org/apache/hadoop/hbase/client/TestCIDeleteOperationTimeout.html
    @@ -75,13 +75,13 @@ var activeTableTab = "activeTableTab";
     
     Summary:
     Nested|
    -Field|
    +Field|
     Constr|
     Method
     
     
     Detail:
    -Field|
    +Field|
     Constr|
     Method
     
    @@ -119,7 +119,7 @@ var activeTableTab = "activeTableTab";
     
     
     
    -public class TestCIDeleteOperationTimeout
    +public class TestCIDeleteOperationTimeout
     extends AbstractTestCIOperationTimeout
     
     
    @@ -148,6 +148,17 @@ extends 
    +Fields
    +
    +Modifier and Type
    +Field and Description
    +
    +
    +static HBaseClassTestRule
    +CLASS_RULE
    +
    +
     
     
     
    @@ -220,6 +231,23 @@ extends 
     
     
    +
    +
    +
    +
    +
    +Field Detail
    +
    +
    +
    +
    +
    +CLASS_RULE
    +public static finalHBaseClassTestRule CLASS_RULE
    +
    +
    +
    +
     
     
     
    @@ -232,7 +260,7 @@ extends 
     
     TestCIDeleteOperationTimeout
    -publicTestCIDeleteOperationTimeout()
    +publicTestCIDeleteOperationTimeout()
     
     
     
    @@ -249,7 +277,7 @@ extends 
     
     execute
    -protectedvoidexecute(org.apache.hadoop.hbase.client.Tabletable)
    

    [09/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aa7ffc92/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/TableBackupClient.Stage.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/TableBackupClient.Stage.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/TableBackupClient.Stage.html
    index add44d1..efa6d95 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/TableBackupClient.Stage.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/TableBackupClient.Stage.html
    @@ -43,13 +43,13 @@
     035import 
    org.apache.hadoop.hbase.backup.BackupType;
     036import 
    org.apache.hadoop.hbase.backup.HBackupFileSystem;
     037import 
    org.apache.hadoop.hbase.backup.impl.BackupManifest.BackupImage;
    -038import 
    org.apache.yetus.audience.InterfaceAudience;
    -039import org.slf4j.Logger;
    -040import org.slf4j.LoggerFactory;
    -041import 
    org.apache.hadoop.hbase.client.Admin;
    -042import 
    org.apache.hadoop.hbase.client.Connection;
    -043import 
    org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
    -044import 
    org.apache.hadoop.hbase.util.FSUtils;
    +038import 
    org.apache.hadoop.hbase.client.Admin;
    +039import 
    org.apache.hadoop.hbase.client.Connection;
    +040import 
    org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
    +041import 
    org.apache.hadoop.hbase.util.FSUtils;
    +042import 
    org.apache.yetus.audience.InterfaceAudience;
    +043import org.slf4j.Logger;
    +044import org.slf4j.LoggerFactory;
     045
     046import 
    org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
     047
    @@ -88,360 +88,355 @@
     080  }
     081
     082  public void init(final Connection conn, 
    final String backupId, BackupRequest request)
    -083  throws IOException
    -084  {
    -085if (request.getBackupType() == 
    BackupType.FULL) {
    -086  backupManager = new 
    BackupManager(conn, conn.getConfiguration());
    -087} else {
    -088  backupManager = new 
    IncrementalBackupManager(conn, conn.getConfiguration());
    -089}
    -090this.backupId = backupId;
    -091this.tableList = 
    request.getTableList();
    -092this.conn = conn;
    -093this.conf = 
    conn.getConfiguration();
    -094this.fs = 
    FSUtils.getCurrentFileSystem(conf);
    -095backupInfo =
    -096
    backupManager.createBackupInfo(backupId, request.getBackupType(), tableList,
    -097  request.getTargetRootDir(), 
    request.getTotalTasks(), request.getBandwidth());
    -098if (tableList == null || 
    tableList.isEmpty()) {
    -099  this.tableList = new 
    ArrayList(backupInfo.getTables());
    -100}
    -101// Start new session
    -102backupManager.startBackupSession();
    -103  }
    -104
    -105  /**
    -106   * Begin the overall backup.
    -107   * @param backupInfo backup info
    -108   * @throws IOException exception
    -109   */
    -110  protected void 
    beginBackup(BackupManager backupManager, BackupInfo backupInfo)
    -111  throws IOException {
    -112
    -113BackupSystemTable.snapshot(conn);
    -114
    backupManager.setBackupInfo(backupInfo);
    -115// set the start timestamp of the 
    overall backup
    -116long startTs = 
    EnvironmentEdgeManager.currentTime();
    -117backupInfo.setStartTs(startTs);
    -118// set overall backup status: 
    ongoing
    -119
    backupInfo.setState(BackupState.RUNNING);
    -120
    backupInfo.setPhase(BackupPhase.REQUEST);
    -121LOG.info("Backup " + 
    backupInfo.getBackupId() + " started at " + startTs + ".");
    -122
    -123
    backupManager.updateBackupInfo(backupInfo);
    -124if (LOG.isDebugEnabled()) {
    -125  LOG.debug("Backup session " + 
    backupInfo.getBackupId() + " has been started.");
    -126}
    -127  }
    -128
    -129  protected String getMessage(Exception 
    e) {
    -130String msg = e.getMessage();
    -131if (msg == null || msg.equals("")) 
    {
    -132  msg = e.getClass().getName();
    -133}
    -134return msg;
    -135  }
    -136
    -137  /**
    -138   * Delete HBase snapshot for backup.
    -139   * @param backupInfo backup info
    -140   * @throws Exception exception
    -141   */
    -142  protected static void 
    deleteSnapshots(final Connection conn, BackupInfo backupInfo, Configuration 
    conf)
    -143  throws IOException {
    -144LOG.debug("Trying to delete snapshot 
    for full backup.");
    -145for (String snapshotName : 
    backupInfo.getSnapshotNames()) {
    -146  if (snapshotName == null) {
    -147continue;
    -148  }
    -149  LOG.debug("Trying to delete 
    snapshot: " + snapshotName);
    -150
    -151  try (Admin admin = conn.getAdmin()) 
    {
    -152
    admin.deleteSnapshot(snapshotName);
    -153  }
    -154  LOG.debug("Deleting the snapshot " 
    + snapshotName + " for backup " + backupInfo.getBackupId()
    -155  + " succeeded.");
    -156}
    -157  }
    -158
    -159  /**
    -160   * Clean up directories with prefix 
    "exportSnapshot-", which are generated when exporting
    -161   * snapshots.
    -162   * @throws IOException exception
    -163   */
    -164  protected static void 
    cleanupExportSnapshotLog(Configuration conf) throws IOException {
    -165FileSystem fs = 
    

    [09/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/96e5e102/testdevapidocs/org/apache/hadoop/hbase/rest/model/TestStorageClusterVersionModel.html
    --
    diff --git 
    a/testdevapidocs/org/apache/hadoop/hbase/rest/model/TestStorageClusterVersionModel.html
     
    b/testdevapidocs/org/apache/hadoop/hbase/rest/model/TestStorageClusterVersionModel.html
    index 00ed486..dd532d4 100644
    --- 
    a/testdevapidocs/org/apache/hadoop/hbase/rest/model/TestStorageClusterVersionModel.html
    +++ 
    b/testdevapidocs/org/apache/hadoop/hbase/rest/model/TestStorageClusterVersionModel.html
    @@ -100,12 +100,6 @@ var activeTableTab = "activeTableTab";
     http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">java.lang.Object
     
     
    -junit.framework.Assert
    -
    -
    -junit.framework.TestCase
    -
    -
     org.apache.hadoop.hbase.rest.model.TestModelBaseorg.apache.hadoop.hbase.rest.model.StorageClusterVersionModel
     
     
    @@ -115,20 +109,12 @@ var activeTableTab = "activeTableTab";
     
     
     
    -
    -
    -
    -
     
     
     
    -
    -All Implemented Interfaces:
    -junit.framework.Test
    -
     
     
    -public class TestStorageClusterVersionModel
    +public class TestStorageClusterVersionModel
     extends TestModelBaseorg.apache.hadoop.hbase.rest.model.StorageClusterVersionModel
     
     
    @@ -212,18 +198,11 @@ extends fromJSON,
     fromPB,
     fromXML,
     testBuildModel,
     testFromJSON,
     testFromXML,
     testToJSON,
     testToXML,
     toJSON, toPB,
     toXML
     
     
    -
    -
    -
    -Methods inherited from classjunit.framework.TestCase
    -assertEquals, assertEquals, assertEquals, assertEquals, assertEquals, 
    assertEquals, assertEquals, assertEquals, assertEquals, assertEquals, 
    assertEquals, assertEquals, assertEquals, assertEquals, assertEquals, 
    assertEquals, assertEquals, assertEquals, assertEquals, assertEquals, 
    assertFalse, assertFalse, assertNotNull, assertNotNull, assertNotSame, 
    assertNotSame, assertNull, assertNull, assertSame, assertSame, assertTrue, 
    assertTrue, countTestCases, createResult, fail, fail, failNotEquals, 
    failNotSame, failSame, format, getName, run, run, runBare, runTest, setName, 
    setUp, tearDown, toString
    -
    -
     
     
     
     Methods inherited from classjava.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">Object
    -http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--;
     title="class or interface in java.lang">clone, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-;
     title="class or interface in java.lang">equals, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--;
     title="class or interface in java.lang">finalize, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--;
     title="class or interface in java.lang">getClass, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--;
     title="class or interface in java.lang">hashCode, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notify--;
     title="class or interface in java.lang">notify, http://docs.oracle.com/javase/8/docs/api/java/lang
     /Object.html?is-external=true#notifyAll--" title="class or interface in 
    java.lang">notifyAll, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait--;
     title="class or interface in java.lang">wait, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-;
     title="class or interface in java.lang">wait, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-int-;
     title="class or interface in java.lang">wait
    +http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--;
     title="class or interface in java.lang">clone, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-;
     title="class or interface in java.lang">equals, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--;
     title="class or interface in java.lang">finalize, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--;
     title="class or interface in java.lang">getClass, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--;
     title="class or interface in java.lang">hashCode, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notify--;
     title="class or interface in java.lang">notify, http://docs.oracle.com/javase/8/docs/api/java/lang
     /Object.html?is-external=true#notifyAll--" title="class or interface in 
    java.lang">notifyAll, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#toString--;
     title="class or interface in java.lang">toString, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait--;
     

    [09/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html 
    b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
    index 50cc17f..c270b81 100644
    --- a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
    +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
    @@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
     
     
     
    -static class HRegion.WriteState
    +static class HRegion.WriteState
     extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">Object
     
     
    @@ -239,7 +239,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     flushing
    -volatileboolean flushing
    +volatileboolean flushing
     
     
     
    @@ -248,7 +248,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     flushRequested
    -volatileboolean flushRequested
    +volatileboolean flushRequested
     
     
     
    @@ -257,7 +257,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     compacting
    -http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true;
     title="class or interface in java.util.concurrent.atomic">AtomicInteger compacting
    +http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true;
     title="class or interface in java.util.concurrent.atomic">AtomicInteger compacting
     
     
     
    @@ -266,7 +266,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     writesEnabled
    -volatileboolean writesEnabled
    +volatileboolean writesEnabled
     
     
     
    @@ -275,7 +275,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     readOnly
    -volatileboolean readOnly
    +volatileboolean readOnly
     
     
     
    @@ -284,7 +284,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     readsEnabled
    -volatileboolean readsEnabled
    +volatileboolean readsEnabled
     
     
     
    @@ -293,7 +293,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     HEAP_SIZE
    -static finallong HEAP_SIZE
    +static finallong HEAP_SIZE
     
     
     
    @@ -310,7 +310,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     WriteState
    -WriteState()
    +WriteState()
     
     
     
    @@ -327,7 +327,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     setReadOnly
    -voidsetReadOnly(booleanonOff)
    +voidsetReadOnly(booleanonOff)
     Set flags that make this region read-only.
     
     Parameters:
    @@ -341,7 +341,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     isReadOnly
    -booleanisReadOnly()
    +booleanisReadOnly()
     
     
     
    @@ -350,7 +350,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     isFlushRequested
    -booleanisFlushRequested()
    +booleanisFlushRequested()
     
     
     
    @@ -359,7 +359,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     setReadsEnabled
    -voidsetReadsEnabled(booleanreadsEnabled)
    +voidsetReadsEnabled(booleanreadsEnabled)
     
     
     
    
    
    

    [09/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/14db89d7/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.HangThenRSCrashExecutor.html
    --
    diff --git 
    a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.HangThenRSCrashExecutor.html
     
    b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.HangThenRSCrashExecutor.html
    index f1db5ca..d8515d7 100644
    --- 
    a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.HangThenRSCrashExecutor.html
    +++ 
    b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.HangThenRSCrashExecutor.html
    @@ -32,813 +32,820 @@
     024import static org.junit.Assert.fail;
     025
     026import java.io.IOException;
    -027import java.net.SocketTimeoutException;
    -028import java.util.NavigableMap;
    -029import java.util.Random;
    -030import java.util.Set;
    -031import java.util.SortedSet;
    -032import 
    java.util.concurrent.ConcurrentSkipListMap;
    -033import 
    java.util.concurrent.ConcurrentSkipListSet;
    -034import 
    java.util.concurrent.ExecutionException;
    -035import java.util.concurrent.Executors;
    -036import java.util.concurrent.Future;
    -037import 
    java.util.concurrent.ScheduledExecutorService;
    -038import java.util.concurrent.TimeUnit;
    -039
    -040import 
    org.apache.hadoop.conf.Configuration;
    -041import 
    org.apache.hadoop.hbase.CategoryBasedTimeout;
    -042import 
    org.apache.hadoop.hbase.DoNotRetryIOException;
    -043import 
    org.apache.hadoop.hbase.HBaseTestingUtility;
    -044import 
    org.apache.hadoop.hbase.NotServingRegionException;
    -045import 
    org.apache.hadoop.hbase.ServerName;
    -046import 
    org.apache.hadoop.hbase.TableName;
    -047import 
    org.apache.hadoop.hbase.client.RegionInfo;
    -048import 
    org.apache.hadoop.hbase.client.RegionInfoBuilder;
    -049import 
    org.apache.hadoop.hbase.client.RetriesExhaustedException;
    -050import 
    org.apache.hadoop.hbase.exceptions.UnexpectedStateException;
    -051import 
    org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
    -052import 
    org.apache.hadoop.hbase.master.MasterServices;
    -053import 
    org.apache.hadoop.hbase.master.RegionState.State;
    -054import 
    org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants;
    -055import 
    org.apache.hadoop.hbase.master.procedure.ProcedureSyncWait;
    -056import 
    org.apache.hadoop.hbase.master.procedure.RSProcedureDispatcher;
    -057import 
    org.apache.hadoop.hbase.procedure2.Procedure;
    -058import 
    org.apache.hadoop.hbase.procedure2.ProcedureMetrics;
    -059import 
    org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
    -060import 
    org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore;
    -061import 
    org.apache.hadoop.hbase.procedure2.util.StringUtils;
    -062import 
    org.apache.hadoop.hbase.regionserver.RegionServerAbortedException;
    -063import 
    org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
    -064import 
    org.apache.hadoop.hbase.testclassification.MasterTests;
    -065import 
    org.apache.hadoop.hbase.testclassification.MediumTests;
    -066import 
    org.apache.hadoop.hbase.util.Bytes;
    -067import 
    org.apache.hadoop.hbase.util.FSUtils;
    -068import 
    org.apache.hadoop.ipc.RemoteException;
    -069import org.junit.After;
    -070import org.junit.Before;
    -071import org.junit.Ignore;
    -072import org.junit.Rule;
    -073import org.junit.Test;
    -074import 
    org.junit.experimental.categories.Category;
    -075import 
    org.junit.rules.ExpectedException;
    -076import org.junit.rules.TestName;
    -077import org.junit.rules.TestRule;
    -078import org.slf4j.Logger;
    -079import org.slf4j.LoggerFactory;
    -080import 
    org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
    -081import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
    -082import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
    -083import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest;
    -084import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse;
    -085import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest;
    -086import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo;
    -087import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse;
    -088import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse.RegionOpeningState;
    -089import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition;
    -090import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
    -091import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest;
    -092
    -093@Category({MasterTests.class, 
    MediumTests.class})
    -094public class TestAssignmentManager {
    -095  private static final Logger LOG = 
    

    [09/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/testdevapidocs/src-html/org/apache/hadoop/hbase/TestCellUtil.TestCell.html
    --
    diff --git 
    a/testdevapidocs/src-html/org/apache/hadoop/hbase/TestCellUtil.TestCell.html 
    b/testdevapidocs/src-html/org/apache/hadoop/hbase/TestCellUtil.TestCell.html
    index 232ef56..bc3a6d0 100644
    --- a/testdevapidocs/src-html/org/apache/hadoop/hbase/TestCellUtil.TestCell.html
    +++ b/testdevapidocs/src-html/org/apache/hadoop/hbase/TestCellUtil.TestCell.html
    @@ -29,610 +29,626 @@
     021import static 
    org.junit.Assert.assertEquals;
     022import static 
    org.junit.Assert.assertFalse;
     023import static 
    org.junit.Assert.assertTrue;
    -024
    -025import java.io.ByteArrayOutputStream;
    -026import java.io.IOException;
    -027import java.math.BigDecimal;
    -028import java.nio.ByteBuffer;
    -029import java.util.ArrayList;
    -030import java.util.List;
    -031import java.util.NavigableMap;
    -032import java.util.TreeMap;
    -033import 
    org.apache.hadoop.hbase.testclassification.MiscTests;
    -034import 
    org.apache.hadoop.hbase.testclassification.SmallTests;
    -035import 
    org.apache.hadoop.hbase.util.Bytes;
    -036import org.junit.Assert;
    -037import org.junit.Test;
    -038import 
    org.junit.experimental.categories.Category;
    -039
    -040@Category({MiscTests.class, 
    SmallTests.class})
    -041public class TestCellUtil {
    -042  /**
    -043   * CellScannable used in test. Returns 
    a {@link TestCellScanner}
    -044   */
    -045  private static class TestCellScannable 
    implements CellScannable {
    -046private final int cellsCount;
    -047TestCellScannable(final int 
    cellsCount) {
    -048  this.cellsCount = cellsCount;
    -049}
    -050@Override
    -051public CellScanner cellScanner() {
    -052  return new 
    TestCellScanner(this.cellsCount);
    -053}
    -054  }
    -055
    -056  /**
    -057   * CellScanner used in test.
    -058   */
    -059  private static class TestCellScanner 
    implements CellScanner {
    -060private int count = 0;
    -061private Cell current = null;
    -062private final int cellsCount;
    -063
    -064TestCellScanner(final int cellsCount) 
    {
    -065  this.cellsCount = cellsCount;
    -066}
    -067
    -068@Override
    -069public Cell current() {
    -070  return this.current;
    -071}
    -072
    -073@Override
    -074public boolean advance() throws 
    IOException {
    -075  if (this.count  cellsCount) {
    -076this.current = new 
    TestCell(this.count);
    -077this.count++;
    -078return true;
    -079  }
    -080  return false;
    -081}
    -082  }
    -083
    -084  /**
    -085   * Cell used in test. Has row only.
    -086   */
    -087  private static class TestCell 
    implements Cell {
    -088private final byte [] row;
    -089
    -090TestCell(final int i) {
    -091  this.row = Bytes.toBytes(i);
    -092}
    -093
    -094@Override
    -095public byte[] getRowArray() {
    -096  return this.row;
    -097}
    -098
    -099@Override
    -100public int getRowOffset() {
    -101  return 0;
    -102}
    -103
    -104@Override
    -105public short getRowLength() {
    -106  return (short)this.row.length;
    -107}
    -108
    -109@Override
    -110public byte[] getFamilyArray() {
    -111  // TODO Auto-generated method 
    stub
    -112  return null;
    -113}
    -114
    -115@Override
    -116public int getFamilyOffset() {
    -117  // TODO Auto-generated method 
    stub
    -118  return 0;
    -119}
    -120
    -121@Override
    -122public byte getFamilyLength() {
    -123  // TODO Auto-generated method 
    stub
    -124  return 0;
    -125}
    -126
    -127@Override
    -128public byte[] getQualifierArray() {
    -129  // TODO Auto-generated method 
    stub
    -130  return null;
    -131}
    -132
    -133@Override
    -134public int getQualifierOffset() {
    -135  // TODO Auto-generated method 
    stub
    -136  return 0;
    -137}
    -138
    -139@Override
    -140public int getQualifierLength() {
    -141  // TODO Auto-generated method 
    stub
    -142  return 0;
    -143}
    -144
    -145@Override
    -146public long getTimestamp() {
    -147  // TODO Auto-generated method 
    stub
    -148  return 0;
    -149}
    -150
    -151@Override
    -152public byte getTypeByte() {
    -153  // TODO Auto-generated method 
    stub
    -154  return 0;
    -155}
    -156
    -157@Override
    -158public byte[] getValueArray() {
    -159  // TODO Auto-generated method 
    stub
    -160  return null;
    -161}
    -162
    -163@Override
    -164public int getValueOffset() {
    -165  // TODO Auto-generated method 
    stub
    -166  return 0;
    -167}
    -168
    -169@Override
    -170public int getValueLength() {
    -171  // TODO Auto-generated method 
    stub
    -172  return 0;
    -173}
    -174
    -175@Override
    -176public byte[] getTagsArray() {
    -177  // TODO Auto-generated method 
    stub
    -178  return null;
    -179}
    -180
    -181@Override
    -182public int getTagsOffset() {
    -183  // TODO Auto-generated method 
    stub
    -184  return 0;
    -185}
    -186
    -187@Override
    -188public long getSequenceId() {
    -189  // TODO Auto-generated method 
    

    [09/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHStore.html
    --
    diff --git 
    a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHStore.html 
    b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHStore.html
    index 9694561..2bcceb6 100644
    --- a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHStore.html
    +++ b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHStore.html
    @@ -109,7 +109,7 @@ var activeTableTab = "activeTableTab";
     
     
     
    -public class TestHStore
    +public class TestHStore
     extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">Object
     Test class for the HStore
     
    @@ -613,7 +613,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     LOG
    -private static finalorg.slf4j.Logger LOG
    +private static finalorg.slf4j.Logger LOG
     
     
     
    @@ -622,7 +622,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     name
    -publicorg.junit.rules.TestName name
    +publicorg.junit.rules.TestName name
     
     
     
    @@ -631,7 +631,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     region
    -org.apache.hadoop.hbase.regionserver.HRegion region
    +org.apache.hadoop.hbase.regionserver.HRegion region
     
     
     
    @@ -640,7 +640,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     store
    -org.apache.hadoop.hbase.regionserver.HStore store
    +org.apache.hadoop.hbase.regionserver.HStore store
     
     
     
    @@ -649,7 +649,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     table
    -byte[] table
    +byte[] table
     
     
     
    @@ -658,7 +658,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     family
    -byte[] family
    +byte[] family
     
     
     
    @@ -667,7 +667,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     row
    -byte[] row
    +byte[] row
     
     
     
    @@ -676,7 +676,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     row2
    -byte[] row2
    +byte[] row2
     
     
     
    @@ -685,7 +685,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     qf1
    -byte[] qf1
    +byte[] qf1
     
     
     
    @@ -694,7 +694,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     qf2
    -byte[] qf2
    +byte[] qf2
     
     
     
    @@ -703,7 +703,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     qf3
    -byte[] qf3
    +byte[] qf3
     
     
     
    @@ -712,7 +712,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     qf4
    -byte[] qf4
    +byte[] qf4
     
     
     
    @@ -721,7 +721,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     qf5
    -byte[] qf5
    +byte[] qf5
     
     
     
    @@ -730,7 +730,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     qf6
    -byte[] qf6
    +byte[] qf6
     
     
     
    @@ -739,7 +739,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     qualifiers
    -http://docs.oracle.com/javase/8/docs/api/java/util/NavigableSet.html?is-external=true;
     title="class or interface in java.util">NavigableSetbyte[] qualifiers
    +http://docs.oracle.com/javase/8/docs/api/java/util/NavigableSet.html?is-external=true;
     title="class or interface in java.util">NavigableSetbyte[] qualifiers
     
     
     
    @@ -748,7 +748,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     expected
    -http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in 
    java.util">Listorg.apache.hadoop.hbase.Cell expected
    +http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in 
    java.util">Listorg.apache.hadoop.hbase.Cell expected
     
     
     
    @@ -757,7 +757,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     result
    -http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in 
    java.util">Listorg.apache.hadoop.hbase.Cell result
    +http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in 
    java.util">Listorg.apache.hadoop.hbase.Cell result
     
     
     
    @@ -766,7 +766,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     id
    -long id
    +long id
     
     
     
    @@ -775,7 +775,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     get
    -org.apache.hadoop.hbase.client.Get get
    +org.apache.hadoop.hbase.client.Get get
     
     
     
    @@ -784,7 +784,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     TEST_UTIL
    -private static finalHBaseTestingUtility TEST_UTIL
    +private static finalHBaseTestingUtility TEST_UTIL
     
     
     
    @@ -793,7 +793,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     DIR
    -private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or 

    [09/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/49431b18/devapidocs/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.html 
    b/devapidocs/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.html
    index 1301239..59c238b 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.html
    @@ -303,7 +303,9 @@ extends 
     RemoteProcedureDispatcher.RemoteOperation
     remoteCallBuild(MasterProcedureEnvenv,
    -   ServerNameserverName)
    +   ServerNameserverName)
    +For building the remote operation.
    +
     
     
     protected boolean
    @@ -350,7 +352,7 @@ extends RegionTransitionProcedure
    -abort,
     acquireLock,
     addToRemoteDispatcher,
     execute,
     getRegionInfo,
     getRegionState,
     getTableName,
     getTransitionState,
     hasLock,
     holdLock,
     isMeta,
     isServerOnline,
     isServerOnline,
     releaseLock,
     remoteCallCompleted,
     remoteCallFailed,
     reportTransition,
     rollback,
     setRegionInfo,
     setTransitionState,
     shouldWaitClientAck,
     toStringState
    +abort,
     acquireLock,
     addToRemoteDispatcher,
     execute,
     getRegionInfo,
     getRegionState,
     getTableName,
     getTransitionState,
     hasLock,
     holdLock,
     isMeta,
     isServerOnline,
     isServerOnline,
     releaseLock,
     remoteCallFailed,
     remoteOperationCompleted,
     remoteOperationFailed,
     reportTransition,
     rollback,
     setRegionInfo, setTransitionState,
     shouldWaitClientAck,
     toStringState
     
     
     
    @@ -600,6 +602,8 @@ extends RemoteProcedureDispatcher.RemoteOperationremoteCallBuild(MasterProcedureEnvenv,
      ServerNameserverName)
    +Description copied from 
    interface:RemoteProcedureDispatcher.RemoteProcedure
    +For building the remote operation.
     
     Specified by:
     remoteCallBuildin
     interfaceRemoteProcedureDispatcher.RemoteProcedureMasterProcedureEnv,ServerName
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/49431b18/devapidocs/org/apache/hadoop/hbase/master/balancer/package-tree.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/master/balancer/package-tree.html 
    b/devapidocs/org/apache/hadoop/hbase/master/balancer/package-tree.html
    index b99a6b3..f82f773 100644
    --- a/devapidocs/org/apache/hadoop/hbase/master/balancer/package-tree.html
    +++ b/devapidocs/org/apache/hadoop/hbase/master/balancer/package-tree.html
    @@ -197,8 +197,8 @@
     
     java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
     title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
     title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
     title="class or interface in java.io">Serializable)
     
    -org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.LocalityType
     org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action.Type
    +org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.LocalityType
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/49431b18/devapidocs/org/apache/hadoop/hbase/master/cleaner/ReplicationMetaCleaner.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/master/cleaner/ReplicationMetaCleaner.html 
    b/devapidocs/org/apache/hadoop/hbase/master/cleaner/ReplicationMetaCleaner.html
    index f9e5288..f47901d 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/master/cleaner/ReplicationMetaCleaner.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/master/cleaner/ReplicationMetaCleaner.html
    @@ -50,7 +50,7 @@ var activeTableTab = "activeTableTab";
     
     
     PrevClass
    -NextClass
    +NextClass
     
     
     Frames
    @@ -344,7 +344,7 @@ extends 
     
     PrevClass
    -NextClass
    +NextClass
     
     
     Frames
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/49431b18/devapidocs/org/apache/hadoop/hbase/master/cleaner/ReplicationZKNodeCleaner.ReplicationQueueDeletor.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/master/cleaner/ReplicationZKNodeCleaner.ReplicationQueueDeletor.html
     
    b/devapidocs/org/apache/hadoop/hbase/master/cleaner/ReplicationZKNodeCleaner.ReplicationQueueDeletor.html
    deleted file mode 100644
    index 9b47c21..000
    --- 
    a/devapidocs/org/apache/hadoop/hbase/master/cleaner/ReplicationZKNodeCleaner.ReplicationQueueDeletor.html
    +++ /dev/null
    @@ -1,350 +0,0 @@
    -http://www.w3.org/TR/html4/loose.dtd;>
    -
    -
    -
    -
    -
    -ReplicationZKNodeCleaner.ReplicationQueueDeletor (Apache HBase 
    3.0.0-SNAPSHOT API)
    -
    -
    -
    -
    -
    +var methods = 
    {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10};
    +var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
    Methods"],8:["t4","Concrete Methods"]};
    +var altColor = "altColor";
    +var rowColor = "rowColor";
    +var tableTab = "tableTab";
    +var activeTableTab = "activeTableTab";
    +
    +
    +JavaScript is disabled on your browser.
    +
    +
    +
    +
    +
    +Skip navigation links
    +
    +
    +
    +
    +Overview
    +Package
    +Class
    +Use
    +Tree
    +Deprecated
    +Index
    +Help
    +
    +
    +
    +
    +PrevClass
    +NextClass
    +
    +
    +Frames
    +NoFrames
    +
    +
    +AllClasses
    +
    +
    +
    +
    +
    +
    +
    +Summary:
    +Nested|
    +Field|
    +Constr|
    +Method
    +
    +
    +Detail:
    +Field|
    +Constr|
    +Method
    +
    +
    +
    +
    +
    +
    +
    +
    +org.apache.hadoop.hbase.io.encoding
    +Class BufferedDataBlockEncoder.OffheapDecodedExtendedCell
    +
    +
    +
    +http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">java.lang.Object
    +
    +
    +org.apache.hadoop.hbase.ByteBufferExtendedCell
    +
    +
    +org.apache.hadoop.hbase.io.encoding.BufferedDataBlockEncoder.OffheapDecodedExtendedCell
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +All Implemented Interfaces:
    +http://docs.oracle.com/javase/8/docs/api/java/lang/Cloneable.html?is-external=true;
     title="class or interface in java.lang">Cloneable, Cell, ExtendedCell, HeapSize, RawCell
    +
    +
    +Enclosing class:
    +BufferedDataBlockEncoder
    +
    +
    +
    +protected static class BufferedDataBlockEncoder.OffheapDecodedExtendedCell
    +extends ByteBufferExtendedCell
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +Nested Class Summary
    +
    +
    +
    +
    +Nested classes/interfaces inherited from 
    interfaceorg.apache.hadoop.hbase.Cell
    +Cell.Type
    +
    +
    +
    +
    +
    +
    +
    +
    +Field Summary
    +
    +Fields
    +
    +Modifier and Type
    +Field and Description
    +
    +
    +private byte
    +familyLength
    +
    +
    +private int
    +familyOffset
    +
    +
    +private static long
    +FIXED_OVERHEAD
    +
    +
    +private http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
     title="class or interface in java.nio">ByteBuffer
    +keyBuffer
    +
    +
    +private int
    +qualifierLength
    +
    +
    +private int
    +qualifierOffset
    +
    +
    +private short
    +rowLength
    +
    +
    +private long
    +seqId
    +
    +
    +private http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
     title="class or interface in java.nio">ByteBuffer
    +tagsBuffer
    +
    +
    +private int
    +tagsLength
    +
    +
    +private int
    +tagsOffset
    +
    +
    +private long
    +timestamp
    +
    +
    +private byte
    +typeByte
    +
    +
    +private http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
     title="class or interface in java.nio">ByteBuffer
    +valueBuffer
    +
    +
    +private int
    +valueLength
    +
    +
    +private int
    +valueOffset
    +
    +
    +
    +
    +
    +
    +Fields inherited from interfaceorg.apache.hadoop.hbase.ExtendedCell
    +CELL_NOT_BASED_ON_CHUNK
    +
    +
    +
    +
    +
    +Fields inherited from interfaceorg.apache.hadoop.hbase.RawCell
    +MAX_TAGS_LENGTH
    +
    +
    +
    +
    +
    +
    +
    +
    +Constructor Summary
    +
    +Constructors
    +
    +Modifier
    +Constructor and Description
    +
    +
    +protected 
    +OffheapDecodedExtendedCell(http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
     title="class or interface in java.nio">ByteBufferkeyBuffer,
    +  shortrowLength,
    +  intfamilyOffset,
    +  bytefamilyLength,
    +  intqualOffset,
    +  intqualLength,
    +  longtimeStamp,
    +  bytetypeByte,
    +  http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
     title="class or interface in java.nio">ByteBuffervalueBuffer,
    +  

    [09/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d2b28a1a/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowCell.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowCell.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowCell.html
    index 3400507..2baa140 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowCell.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowCell.html
    @@ -28,3034 +28,2926 @@
     020import static 
    org.apache.hadoop.hbase.HConstants.EMPTY_BYTE_ARRAY;
     021import static 
    org.apache.hadoop.hbase.Tag.TAG_LENGTH_SIZE;
     022
    -023import 
    com.google.common.annotations.VisibleForTesting;
    -024
    -025import java.io.DataOutput;
    -026import java.io.DataOutputStream;
    -027import java.io.IOException;
    -028import java.io.OutputStream;
    -029import java.math.BigDecimal;
    -030import java.nio.ByteBuffer;
    -031import java.util.ArrayList;
    -032import java.util.Iterator;
    -033import java.util.List;
    -034import java.util.Optional;
    -035
    -036import 
    org.apache.hadoop.hbase.KeyValue.Type;
    -037import 
    org.apache.hadoop.hbase.filter.ByteArrayComparable;
    -038import 
    org.apache.hadoop.hbase.io.HeapSize;
    -039import 
    org.apache.hadoop.hbase.io.TagCompressionContext;
    -040import 
    org.apache.hadoop.hbase.io.util.Dictionary;
    -041import 
    org.apache.hadoop.hbase.io.util.StreamUtils;
    -042import 
    org.apache.hadoop.hbase.util.ByteBufferUtils;
    -043import 
    org.apache.hadoop.hbase.util.ByteRange;
    -044import 
    org.apache.hadoop.hbase.util.Bytes;
    -045import 
    org.apache.hadoop.hbase.util.ClassSize;
    -046import 
    org.apache.yetus.audience.InterfaceAudience;
    -047
    -048
    -049/**
    -050 * Utility methods helpful slinging 
    {@link Cell} instances. It has more powerful and
    -051 * rich set of APIs than those in {@link 
    CellUtil} for internal usage.
    -052 */
    -053@InterfaceAudience.Private
    -054public final class PrivateCellUtil {
    -055
    -056  /**
    -057   * Private constructor to keep this 
    class from being instantiated.
    -058   */
    -059  private PrivateCellUtil() {
    -060  }
    +023import java.io.DataOutput;
    +024import java.io.DataOutputStream;
    +025import java.io.IOException;
    +026import java.io.OutputStream;
    +027import java.math.BigDecimal;
    +028import java.nio.ByteBuffer;
    +029import java.util.ArrayList;
    +030import java.util.Iterator;
    +031import java.util.List;
    +032import java.util.Optional;
    +033import 
    org.apache.hadoop.hbase.KeyValue.Type;
    +034import 
    org.apache.hadoop.hbase.filter.ByteArrayComparable;
    +035import 
    org.apache.hadoop.hbase.io.HeapSize;
    +036import 
    org.apache.hadoop.hbase.io.TagCompressionContext;
    +037import 
    org.apache.hadoop.hbase.io.util.Dictionary;
    +038import 
    org.apache.hadoop.hbase.io.util.StreamUtils;
    +039import 
    org.apache.hadoop.hbase.util.ByteBufferUtils;
    +040import 
    org.apache.hadoop.hbase.util.ByteRange;
    +041import 
    org.apache.hadoop.hbase.util.Bytes;
    +042import 
    org.apache.hadoop.hbase.util.ClassSize;
    +043import 
    org.apache.yetus.audience.InterfaceAudience;
    +044
    +045import 
    org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
    +046
    +047/**
    +048 * Utility methods helpful slinging 
    {@link Cell} instances. It has more powerful and
    +049 * rich set of APIs than those in {@link 
    CellUtil} for internal usage.
    +050 */
    +051@InterfaceAudience.Private
    +052public final class PrivateCellUtil {
    +053
    +054  /**
    +055   * Private constructor to keep this 
    class from being instantiated.
    +056   */
    +057  private PrivateCellUtil() {
    +058  }
    +059
    +060  /*** ByteRange 
    ***/
     061
    -062  /*** ByteRange 
    ***/
    -063
    -064  public static ByteRange 
    fillRowRange(Cell cell, ByteRange range) {
    -065return range.set(cell.getRowArray(), 
    cell.getRowOffset(), cell.getRowLength());
    -066  }
    -067
    -068  public static ByteRange 
    fillFamilyRange(Cell cell, ByteRange range) {
    -069return 
    range.set(cell.getFamilyArray(), cell.getFamilyOffset(), 
    cell.getFamilyLength());
    -070  }
    -071
    -072  public static ByteRange 
    fillQualifierRange(Cell cell, ByteRange range) {
    -073return 
    range.set(cell.getQualifierArray(), cell.getQualifierOffset(),
    -074  cell.getQualifierLength());
    -075  }
    -076
    -077  public static ByteRange 
    fillValueRange(Cell cell, ByteRange range) {
    -078return 
    range.set(cell.getValueArray(), cell.getValueOffset(), 
    cell.getValueLength());
    -079  }
    -080
    -081  public static ByteRange 
    fillTagRange(Cell cell, ByteRange range) {
    -082return range.set(cell.getTagsArray(), 
    cell.getTagsOffset(), cell.getTagsLength());
    -083  }
    +062  public static ByteRange 
    fillRowRange(Cell cell, ByteRange range) {
    +063return range.set(cell.getRowArray(), 
    cell.getRowOffset(), cell.getRowLength());
    +064  }
    +065
    +066  public static ByteRange 
    fillFamilyRange(Cell cell, ByteRange range) {
    +067return 
    range.set(cell.getFamilyArray(), 

    [09/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
    index 9d49b9a..c36fdce 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
    @@ -165,3380 +165,3375 @@
     157import 
    org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy;
     158import 
    org.apache.hadoop.hbase.regionserver.compactions.FIFOCompactionPolicy;
     159import 
    org.apache.hadoop.hbase.replication.ReplicationException;
    -160import 
    org.apache.hadoop.hbase.replication.ReplicationFactory;
    -161import 
    org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
    -162import 
    org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
    -163import 
    org.apache.hadoop.hbase.replication.ReplicationQueuesZKImpl;
    -164import 
    org.apache.hadoop.hbase.replication.master.ReplicationPeerConfigUpgrader;
    -165import 
    org.apache.hadoop.hbase.replication.regionserver.Replication;
    -166import 
    org.apache.hadoop.hbase.security.AccessDeniedException;
    -167import 
    org.apache.hadoop.hbase.security.UserProvider;
    -168import 
    org.apache.hadoop.hbase.trace.TraceUtil;
    -169import 
    org.apache.hadoop.hbase.util.Addressing;
    -170import 
    org.apache.hadoop.hbase.util.Bytes;
    -171import 
    org.apache.hadoop.hbase.util.CompressionTest;
    -172import 
    org.apache.hadoop.hbase.util.EncryptionTest;
    -173import 
    org.apache.hadoop.hbase.util.FSUtils;
    -174import 
    org.apache.hadoop.hbase.util.HFileArchiveUtil;
    -175import 
    org.apache.hadoop.hbase.util.HasThread;
    -176import 
    org.apache.hadoop.hbase.util.IdLock;
    -177import 
    org.apache.hadoop.hbase.util.ModifyRegionUtils;
    -178import 
    org.apache.hadoop.hbase.util.Pair;
    -179import 
    org.apache.hadoop.hbase.util.Threads;
    -180import 
    org.apache.hadoop.hbase.util.VersionInfo;
    -181import 
    org.apache.hadoop.hbase.util.ZKDataMigrator;
    -182import 
    org.apache.hadoop.hbase.zookeeper.LoadBalancerTracker;
    -183import 
    org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
    -184import 
    org.apache.hadoop.hbase.zookeeper.MasterMaintenanceModeTracker;
    -185import 
    org.apache.hadoop.hbase.zookeeper.RegionNormalizerTracker;
    -186import 
    org.apache.hadoop.hbase.zookeeper.ZKClusterId;
    -187import 
    org.apache.hadoop.hbase.zookeeper.ZKUtil;
    -188import 
    org.apache.hadoop.hbase.zookeeper.ZKWatcher;
    -189import 
    org.apache.hadoop.hbase.zookeeper.ZNodePaths;
    -190import 
    org.apache.yetus.audience.InterfaceAudience;
    -191import 
    org.apache.zookeeper.KeeperException;
    -192import org.eclipse.jetty.server.Server;
    -193import 
    org.eclipse.jetty.server.ServerConnector;
    -194import 
    org.eclipse.jetty.servlet.ServletHolder;
    -195import 
    org.eclipse.jetty.webapp.WebAppContext;
    -196import org.slf4j.Logger;
    -197import org.slf4j.LoggerFactory;
    -198
    -199import 
    org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
    -200import 
    org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
    -201import 
    org.apache.hadoop.hbase.shaded.com.google.common.collect.Maps;
    -202import 
    org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
    -203import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState;
    -204import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionServerInfo;
    -205import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;
    -206import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceViolationPolicy;
    -207import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
    -208import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos;
    -209
    -210/**
    -211 * HMaster is the "master server" for 
    HBase. An HBase cluster has one active
    -212 * master.  If many masters are started, 
    all compete.  Whichever wins goes on to
    -213 * run the cluster.  All others park 
    themselves in their constructor until
    -214 * master or cluster shutdown or until 
    the active master loses its lease in
    -215 * zookeeper.  Thereafter, all running 
    master jostle to take over master role.
    -216 *
    -217 * pThe Master can be asked 
    shutdown the cluster. See {@link #shutdown()}.  In
    -218 * this case it will tell all 
    regionservers to go down and then wait on them
    -219 * all reporting in that they are down.  
    This master will then shut itself down.
    +160import 
    org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
    +161import 
    org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
    +162import 
    org.apache.hadoop.hbase.replication.master.ReplicationPeerConfigUpgrader;
    +163import 
    org.apache.hadoop.hbase.replication.regionserver.Replication;
    +164import 
    

    [09/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7c0589c0/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.TableFuture.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.TableFuture.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.TableFuture.html
    index 6fecbc9..2accda0 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.TableFuture.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.TableFuture.html
    @@ -34,4140 +34,4141 @@
     026import 
    java.nio.charset.StandardCharsets;
     027import java.util.ArrayList;
     028import java.util.Arrays;
    -029import java.util.Collection;
    -030import java.util.EnumSet;
    -031import java.util.HashMap;
    -032import java.util.Iterator;
    -033import java.util.LinkedList;
    -034import java.util.List;
    -035import java.util.Map;
    -036import java.util.Set;
    -037import java.util.concurrent.Callable;
    -038import 
    java.util.concurrent.ExecutionException;
    -039import java.util.concurrent.Future;
    -040import java.util.concurrent.TimeUnit;
    -041import 
    java.util.concurrent.TimeoutException;
    -042import 
    java.util.concurrent.atomic.AtomicInteger;
    -043import 
    java.util.concurrent.atomic.AtomicReference;
    -044import java.util.regex.Pattern;
    -045import java.util.stream.Collectors;
    -046import java.util.stream.Stream;
    -047import 
    org.apache.hadoop.conf.Configuration;
    -048import 
    org.apache.hadoop.hbase.Abortable;
    -049import 
    org.apache.hadoop.hbase.CacheEvictionStats;
    -050import 
    org.apache.hadoop.hbase.CacheEvictionStatsBuilder;
    -051import 
    org.apache.hadoop.hbase.ClusterMetrics.Option;
    -052import 
    org.apache.hadoop.hbase.ClusterStatus;
    -053import 
    org.apache.hadoop.hbase.DoNotRetryIOException;
    -054import 
    org.apache.hadoop.hbase.HBaseConfiguration;
    -055import 
    org.apache.hadoop.hbase.HConstants;
    -056import 
    org.apache.hadoop.hbase.HRegionInfo;
    -057import 
    org.apache.hadoop.hbase.HRegionLocation;
    -058import 
    org.apache.hadoop.hbase.HTableDescriptor;
    -059import 
    org.apache.hadoop.hbase.MasterNotRunningException;
    -060import 
    org.apache.hadoop.hbase.MetaTableAccessor;
    -061import 
    org.apache.hadoop.hbase.NamespaceDescriptor;
    -062import 
    org.apache.hadoop.hbase.NamespaceNotFoundException;
    -063import 
    org.apache.hadoop.hbase.NotServingRegionException;
    -064import 
    org.apache.hadoop.hbase.RegionLoad;
    -065import 
    org.apache.hadoop.hbase.RegionLocations;
    -066import 
    org.apache.hadoop.hbase.ServerName;
    -067import 
    org.apache.hadoop.hbase.TableExistsException;
    -068import 
    org.apache.hadoop.hbase.TableName;
    -069import 
    org.apache.hadoop.hbase.TableNotDisabledException;
    -070import 
    org.apache.hadoop.hbase.TableNotFoundException;
    -071import 
    org.apache.hadoop.hbase.UnknownRegionException;
    -072import 
    org.apache.hadoop.hbase.ZooKeeperConnectionException;
    -073import 
    org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil;
    -074import 
    org.apache.hadoop.hbase.client.replication.TableCFs;
    -075import 
    org.apache.hadoop.hbase.client.security.SecurityCapability;
    -076import 
    org.apache.hadoop.hbase.exceptions.TimeoutIOException;
    -077import 
    org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
    -078import 
    org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
    -079import 
    org.apache.hadoop.hbase.ipc.HBaseRpcController;
    -080import 
    org.apache.hadoop.hbase.ipc.RpcControllerFactory;
    -081import 
    org.apache.hadoop.hbase.quotas.QuotaFilter;
    -082import 
    org.apache.hadoop.hbase.quotas.QuotaRetriever;
    -083import 
    org.apache.hadoop.hbase.quotas.QuotaSettings;
    -084import 
    org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
    -085import 
    org.apache.hadoop.hbase.replication.ReplicationException;
    -086import 
    org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
    -087import 
    org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
    -088import 
    org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
    -089import 
    org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
    -090import 
    org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
    -091import 
    org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
    -092import 
    org.apache.hadoop.hbase.snapshot.UnknownSnapshotException;
    -093import 
    org.apache.hadoop.hbase.util.Addressing;
    -094import 
    org.apache.hadoop.hbase.util.Bytes;
    -095import 
    org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
    -096import 
    org.apache.hadoop.hbase.util.ForeignExceptionUtil;
    -097import 
    org.apache.hadoop.hbase.util.Pair;
    -098import 
    org.apache.hadoop.ipc.RemoteException;
    -099import 
    org.apache.hadoop.util.StringUtils;
    -100import 
    org.apache.yetus.audience.InterfaceAudience;
    -101import 
    org.apache.yetus.audience.InterfaceStability;
    -102import org.slf4j.Logger;
    -103import org.slf4j.LoggerFactory;
    -104
    -105import 
    org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
    -106import 
    org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
    -107import 
    

    [09/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c4b2cc17/devapidocs/org/apache/hadoop/hbase/ClusterMetricsBuilder.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/ClusterMetricsBuilder.html 
    b/devapidocs/org/apache/hadoop/hbase/ClusterMetricsBuilder.html
    new file mode 100644
    index 000..42ae20b
    --- /dev/null
    +++ b/devapidocs/org/apache/hadoop/hbase/ClusterMetricsBuilder.html
    @@ -0,0 +1,709 @@
    +http://www.w3.org/TR/html4/loose.dtd;>
    +
    +
    +
    +
    +
    +ClusterMetricsBuilder (Apache HBase 3.0.0-SNAPSHOT API)
    +
    +
    +
    +
    +
    +var methods = 
    {"i0":10,"i1":9,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":9,"i13":9,"i14":9,"i15":9,"i16":9,"i17":9};
    +var tabs = {65535:["t0","All Methods"],1:["t1","Static 
    Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
    +var altColor = "altColor";
    +var rowColor = "rowColor";
    +var tableTab = "tableTab";
    +var activeTableTab = "activeTableTab";
    +
    +
    +JavaScript is disabled on your browser.
    +
    +
    +
    +
    +
    +Skip navigation links
    +
    +
    +
    +
    +Overview
    +Package
    +Class
    +Use
    +Tree
    +Deprecated
    +Index
    +Help
    +
    +
    +
    +
    +PrevClass
    +NextClass
    +
    +
    +Frames
    +NoFrames
    +
    +
    +AllClasses
    +
    +
    +
    +
    +
    +
    +
    +Summary:
    +Nested|
    +Field|
    +Constr|
    +Method
    +
    +
    +Detail:
    +Field|
    +Constr|
    +Method
    +
    +
    +
    +
    +
    +
    +
    +
    +org.apache.hadoop.hbase
    +Class 
    ClusterMetricsBuilder
    +
    +
    +
    +http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">java.lang.Object
    +
    +
    +org.apache.hadoop.hbase.ClusterMetricsBuilder
    +
    +
    +
    +
    +
    +
    +
    +
    +@InterfaceAudience.Private
    +public final class ClusterMetricsBuilder
    +extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">Object
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +Nested Class Summary
    +
    +Nested Classes
    +
    +Modifier and Type
    +Class and Description
    +
    +
    +private static class
    +ClusterMetricsBuilder.ClusterMetricsImpl
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +Field Summary
    +
    +Fields
    +
    +Modifier and Type
    +Field and Description
    +
    +
    +private http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListServerName
    +backupMasterNames
    +
    +
    +private http://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
     title="class or interface in java.lang">Boolean
    +balancerOn
    +
    +
    +private http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String
    +clusterId
    +
    +
    +private http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListServerName
    +deadServerNames
    +
    +
    +private http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String
    +hbaseVersion
    +
    +
    +private http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
     title="class or interface in java.util">MapServerName,ServerMetrics
    +liveServerMetrics
    +
    +
    +private http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String
    +masterCoprocessorNames
    +
    +
    +private int
    +masterInfoPort
    +
    +
    +private ServerName
    +masterName
    +
    +
    +private http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListRegionState
    +regionsInTransition
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +Constructor Summary
    +
    +Constructors
    +
    +Modifier
    +Constructor and Description
    +
    +
    +private 
    +ClusterMetricsBuilder()
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +Method Summary
    +
    +All MethodsStatic MethodsInstance MethodsConcrete Methods
    +
    +Modifier and Type
    +Method and Description
    +
    +
    +ClusterMetrics
    +build()
    +
    +
    +static ClusterMetricsBuilder
    +newBuilder()
    +
    +
    +ClusterMetricsBuilder
    +setBackerMasterNames(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListServerNamevalue)
    +
    +
    +ClusterMetricsBuilder
    +setBalancerOn(http://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
     title="class or interface in 
    java.lang">Booleanvalue)
    +
    +
    +ClusterMetricsBuilder
    +setClusterId(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in 
    java.lang">Stringvalue)
    +
    +
    +ClusterMetricsBuilder
    

    [09/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/505bbb2e/devapidocs/org/apache/hadoop/hbase/backup/HFileArchiver.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/backup/HFileArchiver.html 
    b/devapidocs/org/apache/hadoop/hbase/backup/HFileArchiver.html
    index 2db8dcc..22a1749 100644
    --- a/devapidocs/org/apache/hadoop/hbase/backup/HFileArchiver.html
    +++ b/devapidocs/org/apache/hadoop/hbase/backup/HFileArchiver.html
    @@ -110,7 +110,7 @@ var activeTableTab = "activeTableTab";
     
     
     @InterfaceAudience.Private
    -public class HFileArchiver
    +public class HFileArchiver
     extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">Object
     Utility class to handle the removal of HFiles (or the 
    respective StoreFiles)
      for a HRegion from the FileSystem. The hfiles will be archived 
    or deleted, depending on
    @@ -198,7 +198,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     FUNC_FILE_TO_PATH
     
     
    -private static 
    org.apache.commons.logging.Log
    +private static org.slf4j.Logger
     LOG
     
     
    @@ -366,7 +366,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     LOG
    -private static finalorg.apache.commons.logging.Log LOG
    +private static finalorg.slf4j.Logger LOG
     
     
     
    @@ -375,7 +375,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     SEPARATOR
    -private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String SEPARATOR
    +private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String SEPARATOR
     
     See Also:
     Constant
     Field Values
    @@ -388,7 +388,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     DEFAULT_RETRIES_NUMBER
    -private static finalint DEFAULT_RETRIES_NUMBER
    +private static finalint DEFAULT_RETRIES_NUMBER
     Number of retries in case of fs operation failure
     
     See Also:
    @@ -402,7 +402,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     FUNC_FILE_TO_PATH
    -private static 
    finalorg.apache.hadoop.hbase.shaded.com.google.common.base.FunctionHFileArchiver.File,org.apache.hadoop.fs.Path
     FUNC_FILE_TO_PATH
    +private static 
    finalorg.apache.hadoop.hbase.shaded.com.google.common.base.FunctionHFileArchiver.File,org.apache.hadoop.fs.Path
     FUNC_FILE_TO_PATH
     
     
     
    @@ -419,7 +419,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     HFileArchiver
    -privateHFileArchiver()
    +privateHFileArchiver()
     
     
     
    @@ -436,7 +436,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     exists
    -public staticbooleanexists(org.apache.hadoop.conf.Configurationconf,
    +public staticbooleanexists(org.apache.hadoop.conf.Configurationconf,
      org.apache.hadoop.fs.FileSystemfs,
      RegionInfoinfo)
       throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException
    @@ -454,7 +454,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     archiveRegion
    -public staticvoidarchiveRegion(org.apache.hadoop.conf.Configurationconf,
    +public staticvoidarchiveRegion(org.apache.hadoop.conf.Configurationconf,
      org.apache.hadoop.fs.FileSystemfs,
      RegionInfoinfo)
       throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException
    @@ -476,7 +476,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     archiveRegion
    -public staticbooleanarchiveRegion(org.apache.hadoop.fs.FileSystemfs,
    +public staticbooleanarchiveRegion(org.apache.hadoop.fs.FileSystemfs,
     org.apache.hadoop.fs.Pathrootdir,
     org.apache.hadoop.fs.PathtableDir,
     org.apache.hadoop.fs.PathregionDir)
    @@ -503,7 +503,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     archiveFamily
    -public staticvoidarchiveFamily(org.apache.hadoop.fs.FileSystemfs,
    +public staticvoidarchiveFamily(org.apache.hadoop.fs.FileSystemfs,
      
    org.apache.hadoop.conf.Configurationconf,
      RegionInfoparent,
      org.apache.hadoop.fs.PathtableDir,
    @@ -529,7 +529,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     archiveFamilyByFamilyDir
    -public staticvoidarchiveFamilyByFamilyDir(org.apache.hadoop.fs.FileSystemfs,
    +public staticvoidarchiveFamilyByFamilyDir(org.apache.hadoop.fs.FileSystemfs,
     
    

    [09/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c4c0cfa5/apidocs/src-html/org/apache/hadoop/hbase/types/TerminatedWrapper.html
    --
    diff --git 
    a/apidocs/src-html/org/apache/hadoop/hbase/types/TerminatedWrapper.html 
    b/apidocs/src-html/org/apache/hadoop/hbase/types/TerminatedWrapper.html
    index 8ca79a6..5ed25c3 100644
    --- a/apidocs/src-html/org/apache/hadoop/hbase/types/TerminatedWrapper.html
    +++ b/apidocs/src-html/org/apache/hadoop/hbase/types/TerminatedWrapper.html
    @@ -25,11 +25,11 @@
     017 */
     018package org.apache.hadoop.hbase.types;
     019
    -020import 
    org.apache.yetus.audience.InterfaceAudience;
    -021import 
    org.apache.hadoop.hbase.util.Bytes;
    -022import 
    org.apache.hadoop.hbase.util.Order;
    -023import 
    org.apache.hadoop.hbase.util.PositionedByteRange;
    -024import 
    org.apache.hadoop.hbase.util.SimplePositionedMutableByteRange;
    +020import 
    org.apache.hadoop.hbase.util.Bytes;
    +021import 
    org.apache.hadoop.hbase.util.Order;
    +022import 
    org.apache.hadoop.hbase.util.PositionedByteRange;
    +023import 
    org.apache.hadoop.hbase.util.SimplePositionedMutableByteRange;
    +024import 
    org.apache.yetus.audience.InterfaceAudience;
     025
     026/**
     027 * Wraps an existing {@code DataType} 
    implementation as a terminated
    @@ -48,124 +48,145 @@
     040   * @throws IllegalArgumentException 
    when {@code term} is null or empty.
     041   */
     042  public 
    TerminatedWrapper(DataTypeT wrapped, byte[] term) {
    -043if (null == term || term.length == 
    0)
    +043if (null == term || term.length == 0) 
    {
     044  throw new 
    IllegalArgumentException("terminator must be non-null and non-empty.");
    -045this.wrapped = wrapped;
    -046wrapped.getOrder().apply(term);
    -047this.term = term;
    -048  }
    -049
    -050  /**
    -051   * Create a terminated version of the 
    {@code wrapped}.
    -052   * {@code term} is converted to a 
    {@code byte[]} using
    -053   * {@link Bytes#toBytes(String)}.
    -054   * @throws IllegalArgumentException 
    when {@code term} is null or empty.
    -055   */
    -056  public 
    TerminatedWrapper(DataTypeT wrapped, String term) {
    -057this(wrapped, Bytes.toBytes(term));
    -058  }
    -059
    -060  @Override
    -061  public boolean isOrderPreserving() { 
    return wrapped.isOrderPreserving(); }
    -062
    -063  @Override
    -064  public Order getOrder() { return 
    wrapped.getOrder(); }
    +045}
    +046this.wrapped = wrapped;
    +047wrapped.getOrder().apply(term);
    +048this.term = term;
    +049  }
    +050
    +051  /**
    +052   * Create a terminated version of the 
    {@code wrapped}.
    +053   * {@code term} is converted to a 
    {@code byte[]} using
    +054   * {@link Bytes#toBytes(String)}.
    +055   * @throws IllegalArgumentException 
    when {@code term} is null or empty.
    +056   */
    +057  public 
    TerminatedWrapper(DataTypeT wrapped, String term) {
    +058this(wrapped, Bytes.toBytes(term));
    +059  }
    +060
    +061  @Override
    +062  public boolean isOrderPreserving() {
    +063return wrapped.isOrderPreserving();
    +064  }
     065
     066  @Override
    -067  public boolean isNullable() { return 
    wrapped.isNullable(); }
    -068
    -069  @Override
    -070  public boolean isSkippable() { return 
    true; }
    -071
    -072  @Override
    -073  public int encodedLength(T val) {
    -074return wrapped.encodedLength(val) + 
    term.length;
    -075  }
    -076
    -077  @Override
    -078  public ClassT encodedClass() { 
    return wrapped.encodedClass(); }
    -079
    -080  /**
    -081   * Return the position at which {@code 
    term} begins within {@code src},
    -082   * or {@code -1} if {@code term} is not 
    found.
    -083   */
    -084  protected int 
    terminatorPosition(PositionedByteRange src) {
    -085byte[] a = src.getBytes();
    -086final int offset = src.getOffset();
    -087int i;
    -088SKIP: for (i = src.getPosition(); i 
     src.getLength(); i++) {
    -089  if (a[offset + i] != term[0]) 
    continue;
    -090  int j;
    -091  for (j = 1; j  term.length 
     offset + j  src.getLength(); j++) {
    -092if (a[offset + i + j] != term[j]) 
    continue SKIP;
    -093  }
    -094  if (j == term.length) return i; // 
    success
    -095}
    -096return -1;
    -097  }
    -098
    -099  /**
    -100   * Skip {@code src}'s position forward 
    over one encoded value.
    -101   * @param src the buffer containing the 
    encoded value.
    -102   * @return number of bytes skipped.
    -103   * @throws IllegalArgumentException 
    when the terminator sequence is not found.
    -104   */
    -105  @Override
    -106  public int skip(PositionedByteRange 
    src) {
    -107if (wrapped.isSkippable()) {
    -108  int ret = wrapped.skip(src);
    -109  src.setPosition(src.getPosition() + 
    term.length);
    -110  return ret + term.length;
    -111} else {
    -112  // find the terminator position
    -113  final int start = 
    src.getPosition();
    -114  int skipped = 
    terminatorPosition(src);
    -115  if (-1 == skipped) throw new 
    IllegalArgumentException("Terminator sequence not found.");
    -116  skipped += term.length;
    -117  src.setPosition(skipped);
    -118  return skipped - start;
    -119}
    -120  }
    -121
    +067  public Order getOrder() {
    +068return 

    [09/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/071f974b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.Callback.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.Callback.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.Callback.html
    index a9c1142..12ade22 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.Callback.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.Callback.html
    @@ -388,196 +388,200 @@
     380
     381  @Override
     382  public DatanodeInfo[] getPipeline() {
    -383State state = this.state;
    -384return state == State.STREAMING || 
    state == State.CLOSING ? locations : new DatanodeInfo[0];
    -385  }
    -386
    -387  private void 
    flushBuffer(CompletableFutureLong future, ByteBuf dataBuf,
    -388  long nextPacketOffsetInBlock, 
    boolean syncBlock) {
    -389int dataLen = 
    dataBuf.readableBytes();
    -390int chunkLen = 
    summer.getBytesPerChecksum();
    -391int trailingPartialChunkLen = dataLen 
    % chunkLen;
    -392int numChecks = dataLen / chunkLen + 
    (trailingPartialChunkLen != 0 ? 1 : 0);
    -393int checksumLen = numChecks * 
    summer.getChecksumSize();
    -394ByteBuf checksumBuf = 
    alloc.directBuffer(checksumLen);
    -395
    summer.calculateChunkedSums(dataBuf.nioBuffer(), checksumBuf.nioBuffer(0, 
    checksumLen));
    -396
    checksumBuf.writerIndex(checksumLen);
    -397PacketHeader header = new 
    PacketHeader(4 + checksumLen + dataLen, nextPacketOffsetInBlock,
    -398nextPacketSeqno, false, dataLen, 
    syncBlock);
    -399int headerLen = 
    header.getSerializedSize();
    -400ByteBuf headerBuf = 
    alloc.buffer(headerLen);
    -401
    header.putInBuffer(headerBuf.nioBuffer(0, headerLen));
    -402headerBuf.writerIndex(headerLen);
    -403Callback c = new Callback(future, 
    nextPacketOffsetInBlock + dataLen, datanodeList);
    -404waitingAckQueue.addLast(c);
    -405// recheck again after we pushed the 
    callback to queue
    -406if (state != State.STREAMING 
     waitingAckQueue.peekFirst() == c) {
    -407  future.completeExceptionally(new 
    IOException("stream already broken"));
    -408  // it's the one we have just pushed 
    or just a no-op
    -409  waitingAckQueue.removeFirst();
    -410  return;
    -411}
    -412datanodeList.forEach(ch - {
    -413  
    ch.write(headerBuf.retainedDuplicate());
    -414  
    ch.write(checksumBuf.retainedDuplicate());
    -415  
    ch.writeAndFlush(dataBuf.retainedDuplicate());
    -416});
    -417checksumBuf.release();
    -418headerBuf.release();
    -419dataBuf.release();
    -420nextPacketSeqno++;
    -421  }
    -422
    -423  private void 
    flush0(CompletableFutureLong future, boolean syncBlock) {
    -424if (state != State.STREAMING) {
    -425  future.completeExceptionally(new 
    IOException("stream already broken"));
    -426  return;
    -427}
    -428int dataLen = buf.readableBytes();
    -429if (dataLen == 
    trailingPartialChunkLength) {
    -430  // no new data
    -431  long lengthAfterFlush = 
    nextPacketOffsetInBlock + dataLen;
    -432  Callback lastFlush = 
    waitingAckQueue.peekLast();
    -433  if (lastFlush != null) {
    -434Callback c = new Callback(future, 
    lengthAfterFlush, Collections.emptyList());
    -435waitingAckQueue.addLast(c);
    -436// recheck here if we have 
    already removed the previous callback from the queue
    -437if (waitingAckQueue.peekFirst() 
    == c) {
    -438  // all previous callbacks have 
    been removed
    -439  // notice that this does mean 
    we will always win here because the background thread may
    -440  // have already started to mark 
    the future here as completed in the completed or failed
    -441  // methods but haven't removed 
    it from the queue yet. That's also why the removeFirst
    -442  // call below may be a no-op.
    -443  if (state != State.STREAMING) 
    {
    -444
    future.completeExceptionally(new IOException("stream already broken"));
    -445  } else {
    -446
    future.complete(lengthAfterFlush);
    -447  }
    -448  // it's the one we have just 
    pushed or just a no-op
    -449  
    waitingAckQueue.removeFirst();
    -450}
    -451  } else {
    -452// we must have acked all the 
    data so the ackedBlockLength must be same with
    -453// lengthAfterFlush
    -454
    future.complete(lengthAfterFlush);
    -455  }
    -456  return;
    -457}
    -458
    -459if (encryptor != null) {
    -460  ByteBuf encryptBuf = 
    alloc.directBuffer(dataLen);
    -461  buf.readBytes(encryptBuf, 
    trailingPartialChunkLength);
    -462  int toEncryptLength = dataLen - 
    trailingPartialChunkLength;
    -463  try {
    -464
    encryptor.encrypt(buf.nioBuffer(trailingPartialChunkLength, toEncryptLength),
    -465  
    encryptBuf.nioBuffer(trailingPartialChunkLength, toEncryptLength));
    -466  

    [09/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dc4e5c85/devapidocs/src-html/org/apache/hadoop/hbase/executor/EventType.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/executor/EventType.html 
    b/devapidocs/src-html/org/apache/hadoop/hbase/executor/EventType.html
    index 851e7d3..97311b3 100644
    --- a/devapidocs/src-html/org/apache/hadoop/hbase/executor/EventType.html
    +++ b/devapidocs/src-html/org/apache/hadoop/hbase/executor/EventType.html
    @@ -7,322 +7,318 @@
     
     
     001/**
    -002 *
    -003 * Licensed to the Apache Software 
    Foundation (ASF) under one
    -004 * or more contributor license 
    agreements.  See the NOTICE file
    -005 * distributed with this work for 
    additional information
    -006 * regarding copyright ownership.  The 
    ASF licenses this file
    -007 * to you under the Apache License, 
    Version 2.0 (the
    -008 * "License"); you may not use this file 
    except in compliance
    -009 * with the License.  You may obtain a 
    copy of the License at
    -010 *
    -011 * 
    http://www.apache.org/licenses/LICENSE-2.0
    -012 *
    -013 * Unless required by applicable law or 
    agreed to in writing, software
    -014 * distributed under the License is 
    distributed on an "AS IS" BASIS,
    -015 * WITHOUT WARRANTIES OR CONDITIONS OF 
    ANY KIND, either express or implied.
    -016 * See the License for the specific 
    language governing permissions and
    -017 * limitations under the License.
    -018 */
    +002 * Licensed to the Apache Software 
    Foundation (ASF) under one
    +003 * or more contributor license 
    agreements.  See the NOTICE file
    +004 * distributed with this work for 
    additional information
    +005 * regarding copyright ownership.  The 
    ASF licenses this file
    +006 * to you under the Apache License, 
    Version 2.0 (the
    +007 * "License"); you may not use this file 
    except in compliance
    +008 * with the License.  You may obtain a 
    copy of the License at
    +009 *
    +010 * 
    http://www.apache.org/licenses/LICENSE-2.0
    +011 *
    +012 * Unless required by applicable law or 
    agreed to in writing, software
    +013 * distributed under the License is 
    distributed on an "AS IS" BASIS,
    +014 * WITHOUT WARRANTIES OR CONDITIONS OF 
    ANY KIND, either express or implied.
    +015 * See the License for the specific 
    language governing permissions and
    +016 * limitations under the License.
    +017 */
    +018package 
    org.apache.hadoop.hbase.executor;
     019
    -020package 
    org.apache.hadoop.hbase.executor;
    +020import 
    org.apache.yetus.audience.InterfaceAudience;
     021
    -022import 
    org.apache.yetus.audience.InterfaceAudience;
    -023
    -024/**
    -025 * List of all HBase event handler types. 
     Event types are named by a
    -026 * convention: event type names specify 
    the component from which the event
    -027 * originated and then where its destined 
    -- e.g. RS2ZK_ prefix means the
    -028 * event came from a regionserver 
    destined for zookeeper -- and then what
    -029 * the even is; e.g. REGION_OPENING.
    -030 *
    -031 * pWe give the enums indices so 
    we can add types later and keep them
    -032 * grouped together rather than have to 
    add them always to the end as we
    -033 * would have to if we used raw enum 
    ordinals.
    -034 */
    -035@InterfaceAudience.Private
    -036public enum EventType {
    -037  // Messages originating from RS (NOTE: 
    there is NO direct communication from
    -038  // RS to Master). These are a result of 
    RS updates into ZK.
    -039  // RS_ZK_REGION_CLOSING(1),   // It 
    is replaced by M_ZK_REGION_CLOSING(HBASE-4739)
    -040
    -041  /**
    -042   * RS_ZK_REGION_CLOSEDbr
    -043   *
    -044   * RS has finished closing a region.
    -045   */
    -046  RS_ZK_REGION_CLOSED   (2, 
    ExecutorType.MASTER_CLOSE_REGION),
    -047  /**
    -048   * RS_ZK_REGION_OPENINGbr
    -049   *
    -050   * RS is in process of opening a 
    region.
    -051   */
    -052  RS_ZK_REGION_OPENING  (3, null),
    -053  /**
    -054   * RS_ZK_REGION_OPENEDbr
    -055   *
    -056   * RS has finished opening a region.
    -057   */
    -058  RS_ZK_REGION_OPENED   (4, 
    ExecutorType.MASTER_OPEN_REGION),
    -059  /**
    -060   * RS_ZK_REGION_SPLITTINGbr
    -061   *
    -062   * RS has started a region split after 
    master says it's ok to move on.
    -063   */
    -064  RS_ZK_REGION_SPLITTING(5, null),
    -065  /**
    -066   * RS_ZK_REGION_SPLITbr
    -067   *
    -068   * RS split has completed and is 
    notifying the master.
    -069   */
    -070  RS_ZK_REGION_SPLIT(6, 
    ExecutorType.MASTER_SERVER_OPERATIONS),
    -071  /**
    -072   * RS_ZK_REGION_FAILED_OPENbr
    -073   *
    -074   * RS failed to open a region.
    -075   */
    -076  RS_ZK_REGION_FAILED_OPEN  (7, 
    ExecutorType.MASTER_CLOSE_REGION),
    -077  /**
    -078   * RS_ZK_REGION_MERGINGbr
    -079   *
    -080   * RS has started merging regions after 
    master says it's ok to move on.
    -081   */
    -082  RS_ZK_REGION_MERGING  (8, null),
    -083  /**
    -084   * RS_ZK_REGION_MERGEbr
    -085   *
    -086   * RS region merge has completed and is 
    notifying the master.
    -087   */
    -088  RS_ZK_REGION_MERGED   (9, 
    ExecutorType.MASTER_SERVER_OPERATIONS),
    -089  /**
    -090   * 
    RS_ZK_REQUEST_REGION_SPLITbr
    -091   *
    -092   * RS has requested to split a region. 
    This 

    [09/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4abd958d/devapidocs/src-html/org/apache/hadoop/hbase/IndividualBytesFieldCell.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/IndividualBytesFieldCell.html 
    b/devapidocs/src-html/org/apache/hadoop/hbase/IndividualBytesFieldCell.html
    index 8c8c306..d6a8ea3 100644
    --- a/devapidocs/src-html/org/apache/hadoop/hbase/IndividualBytesFieldCell.html
    +++ b/devapidocs/src-html/org/apache/hadoop/hbase/IndividualBytesFieldCell.html
    @@ -26,7 +26,7 @@
     018
     019package org.apache.hadoop.hbase;
     020
    -021import 
    org.apache.hadoop.hbase.util.ArrayUtils;
    +021import 
    org.apache.commons.lang3.ArrayUtils;
     022import 
    org.apache.hadoop.hbase.util.Bytes;
     023import 
    org.apache.hadoop.hbase.util.ClassSize;
     024import 
    org.apache.yetus.audience.InterfaceAudience;
    @@ -69,12 +69,12 @@
     061
     062  public IndividualBytesFieldCell(byte[] 
    row, byte[] family, byte[] qualifier,
     063  long 
    timestamp, KeyValue.Type type, long seqId, byte[] value, byte[] tags) {
    -064this(row, 0, 
    ArrayUtils.length(row),
    -065family, 0, 
    ArrayUtils.length(family),
    -066qualifier, 0, 
    ArrayUtils.length(qualifier),
    +064this(row, 0, 
    ArrayUtils.getLength(row),
    +065family, 0, 
    ArrayUtils.getLength(family),
    +066qualifier, 0, 
    ArrayUtils.getLength(qualifier),
     067timestamp, type, seqId,
    -068value, 0, 
    ArrayUtils.length(value),
    -069tags, 0, 
    ArrayUtils.length(tags));
    +068value, 0, 
    ArrayUtils.getLength(value),
    +069tags, 0, 
    ArrayUtils.getLength(tags));
     070  }
     071
     072  public IndividualBytesFieldCell(byte[] 
    row, int rOffset, int rLength,
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4abd958d/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
    --
    diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/Version.html 
    b/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
    index 65a682f..80d98d6 100644
    --- a/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
    +++ b/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
    @@ -16,11 +16,11 @@
     008@InterfaceAudience.Private
     009public class Version {
     010  public static final String version = 
    "3.0.0-SNAPSHOT";
    -011  public static final String revision = 
    "2e813f106f2647f8704378efbf3531051c5aea78";
    +011  public static final String revision = 
    "7f586995a8bd2888721e2d9210368494bf8fc957";
     012  public static final String user = 
    "jenkins";
    -013  public static final String date = "Tue 
    Dec 12 14:42:35 UTC 2017";
    +013  public static final String date = "Wed 
    Dec 13 14:42:16 UTC 2017";
     014  public static final String url = 
    "git://asf920.gq1.ygridcore.net/home/jenkins/jenkins-slave/workspace/hbase_generate_website/hbase";
    -015  public static final String srcChecksum 
    = "5e16f126f518d8df265d748b4f2d28a5";
    +015  public static final String srcChecksum 
    = "22e0e51cc8c8efa2537ec94a00d6a348";
     016}
     
     
    
    
    

    [09/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e23b49ba/devapidocs/org/apache/hadoop/hbase/constraint/ConstraintProcessor.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/constraint/ConstraintProcessor.html 
    b/devapidocs/org/apache/hadoop/hbase/constraint/ConstraintProcessor.html
    index 5832fe5..736c955 100644
    --- a/devapidocs/org/apache/hadoop/hbase/constraint/ConstraintProcessor.html
    +++ b/devapidocs/org/apache/hadoop/hbase/constraint/ConstraintProcessor.html
    @@ -271,7 +271,7 @@ implements RegionObserver
    -postAppend,
     postBatchMutate,
     postBatchMutateIndispensably,
     postBulkLoadHFile,
     postCheckAndDelete,
     postCheckAndPut,
     postClose,
     postCloseRegionOperation, postCommitStoreFile,
     postCompact,
     postCompactSelection, postDelete,
     postExists,
     postFlush,
     postFlush, postGetOp,
     postIncrement,
     postInstantiateDeleteTracker,
     postMemStore
     Compaction, postMutationBeforeWAL,
     postOpen,
     postPut,
     postReplayWALs, postScannerClose,
     postScannerNext,
     postScannerOpen,
     postStartRegionOperatio
     n, postStoreFileReaderOpen,
     postWALRestore,
     preAppend,
     preAppendAfterRowLock,
     preBatchMutate,
     preBulkLoadHFile,
     preCheckAndDelete,
     preCheckAndDeleteAfterRowLock,
     preCheckAndPut,
     preCheckAndPutAfterRowLock,
     preClose,
     preCommitStoreFile,
     preCompact,
     preCompactScannerOpen,
     preCompactSelection,
     preDelete,
     preExis
     ts, preFlush,
     preFlush,
     preFlushScannerOpen,
     preGetOp, preIncrement,
     preIncrementAfterRowLock,
     preMemStoreCompaction,
     
     preMemStoreCompactionCompact, preMemStoreCompactionCompactScannerOpen,
     preOpen,
     prePrepareTimeStampForDeleteVersion,
     preReplayWALs, preScannerClose,
     preScannerNext,
     preScannerOpen,
     preStoreFileReaderOpen,
     preStoreScannerOpen,
     preWALRestore
    +postAppend,
     postBatchMutate,
     postBatchMutateIndispensably,
     postBulkLoadHFile,
     postCheckAndDelete,
     postCheckAndPut,
     postClose,
     postCloseRegionOperation, postCommitStoreFile,
     postCompact,
     postCompactSelection, postDelete,
     postExists,
     postFlush,
     postFlush, postGetOp,
     postIncrement,
     postInstantiateDeleteTracker,
     postMemStoreCompacti
     on, postMutationBeforeWAL,
     postOpen,
     postPut,
     po
     stReplayWALs, postScannerClose,
     postScannerNext,
     postScannerOpen,
     postStartRegionOperation,
     <
     a 
    href="../../../../../org/apache/hadoop/hbase/coprocessor/RegionObserver.html#postStoreFileReaderOpen-org.apache.hadoop.hbase.coprocessor.ObserverContext-org.apache.hadoop.fs.FileSystem-org.apache.hadoop.fs.Path-org.apache.hadoop.hbase.io.FSDataInputStreamWrapper-long-org.apache.hadoop.hbase.io.hfile.CacheConfig-org.apache.hadoop.hbase.io.Reference-org.apache.hadoop.hbase.regionserver.StoreFileReader-">postStoreFileReaderOpen,
     postWALRestore,
     preAppend,
     preAppendAfterRowLock,
     preBatchMutate,
     preBulkLoadHFile,
     preCheckAndDelete,
     preCheckAndDeleteAfterRowLock,
     preCheckAndPut,
     preCheckAndPutAfterRowLock,
     preClose,
     preCommitStoreFile,
     preCompact,
     preCompactScannerOpen,
     preCompactSelection,
     preDelete,
     preExists,
     
     preFlush,
     preFlush,
     preFlushScannerOpen,
     preGetOp, preIncrement,
     preIncrementAfterRowLock,
     preMemStoreCompaction,
     preMemSt
     oreCompactionCompact, preMemStoreCompactionCompactScannerOpen,
     preOpen,
     prePrepareTimeStampForDeleteVersion,
     preReplayWALs, preScannerClose,
     preScannerNext,
     preScannerOpen,
     preStoreFileReaderOpen,
     preStoreScannerOpen,
     preWALRestore
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e23b49ba/devapidocs/org/apache/hadoop/hbase/coprocessor/BaseEnvironment.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/coprocessor/BaseEnvironment.html 
    b/devapidocs/org/apache/hadoop/hbase/coprocessor/BaseEnvironment.html
    index 3cecc49..4a64995 100644
    --- a/devapidocs/org/apache/hadoop/hbase/coprocessor/BaseEnvironment.html
    +++ b/devapidocs/org/apache/hadoop/hbase/coprocessor/BaseEnvironment.html
    @@ -501,7 +501,8 @@ implements Specified by:
     getConfigurationin
     interfaceCoprocessorEnvironmentC extends Coprocessor
     Returns:
    -the configuration
    +a Read-only Configuration; throws http://docs.oracle.com/javase/8/docs/api/java/lang/UnsupportedOperationException.html?is-external=true;
     title="class or interface in 
    java.lang">UnsupportedOperationException if you try
    +   to set a configuration.
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e23b49ba/devapidocs/org/apache/hadoop/hbase/coprocessor/MasterCoprocessorEnvironment.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/coprocessor/MasterCoprocessorEnvironment.html
     
    b/devapidocs/org/apache/hadoop/hbase/coprocessor/MasterCoprocessorEnvironment.html
    index be05971..141225d 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/coprocessor/MasterCoprocessorEnvironment.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/coprocessor/MasterCoprocessorEnvironment.html
    @@ -134,13 +134,13 @@ extends 
     Connection
     

    [09/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d171b896/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependencies.html
    --
    diff --git 
    a/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependencies.html
     
    b/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependencies.html
    index cfdb581..836f61d 100644
    --- 
    a/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependencies.html
    +++ 
    b/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependencies.html
    @@ -7,7 +7,7 @@
       
     
     
    -
    +
     
     Apache HBase - Exemplar for hbase-client archetype  Project 
    Dependencies
     
    @@ -924,216 +924,223 @@
     jar
     https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
    2.0
     
    +org.apache.hbase
    +http://hbase.apache.org/hbase-build-configuration/hbase-zookeeper;>hbase-zookeeper
    +3.0.0-SNAPSHOT
    +tests
    +test-jar
    +https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
    2.0
    +
     org.apache.htrace
     http://incubator.apache.org/projects/htrace.html;>htrace-core
     3.2.0-incubating
     -
     jar
     http://www.apache.org/licenses/LICENSE-2.0.txt;>The Apache Software 
    License, Version 2.0
    -
    +
     org.apache.zookeeper
     zookeeper
     3.4.6
     tests
     test-jar
     -
    -
    +
     org.codehaus.jackson
     http://jackson.codehaus.org;>jackson-core-asl
     1.9.13
     -
     jar
     http://www.apache.org/licenses/LICENSE-2.0.txt;>The Apache Software 
    License, Version 2.0
    -
    +
     org.codehaus.jackson
     http://jackson.codehaus.org;>jackson-mapper-asl
     1.9.13
     -
     jar
     http://www.apache.org/licenses/LICENSE-2.0.txt;>The Apache Software 
    License, Version 2.0
    -
    +
     org.eclipse.jetty
     http://www.eclipse.org/jetty;>jetty-http
     9.3.19.v20170502
     -
     jar
     http://www.apache.org/licenses/LICENSE-2.0;>Apache Software License - 
    Version 2.0-http://www.eclipse.org/org/documents/epl-v10.php;>Eclipse Public License 
    - Version 1.0
    -
    +
     org.eclipse.jetty
     http://www.eclipse.org/jetty;>jetty-io
     9.3.19.v20170502
     -
     jar
     http://www.apache.org/licenses/LICENSE-2.0;>Apache Software License - 
    Version 2.0-http://www.eclipse.org/org/documents/epl-v10.php;>Eclipse Public License 
    - Version 1.0
    -
    +
     org.eclipse.jetty
     http://www.eclipse.org/jetty;>jetty-security
     9.3.19.v20170502
     -
     jar
     http://www.apache.org/licenses/LICENSE-2.0;>Apache Software License - 
    Version 2.0-http://www.eclipse.org/org/documents/epl-v10.php;>Eclipse Public License 
    - Version 1.0
    -
    +
     org.eclipse.jetty
     http://www.eclipse.org/jetty;>jetty-server
     9.3.19.v20170502
     -
     jar
     http://www.apache.org/licenses/LICENSE-2.0;>Apache Software License - 
    Version 2.0-http://www.eclipse.org/org/documents/epl-v10.php;>Eclipse Public License 
    - Version 1.0
    -
    +
     org.eclipse.jetty
     http://www.eclipse.org/jetty;>jetty-servlet
     9.3.19.v20170502
     -
     jar
     http://www.apache.org/licenses/LICENSE-2.0;>Apache Software License - 
    Version 2.0-http://www.eclipse.org/org/documents/epl-v10.php;>Eclipse Public License 
    - Version 1.0
    -
    +
     org.eclipse.jetty
     http://www.eclipse.org/jetty;>jetty-util
     9.3.19.v20170502
     -
     jar
     http://www.apache.org/licenses/LICENSE-2.0;>Apache Software License - 
    Version 2.0-http://www.eclipse.org/org/documents/epl-v10.php;>Eclipse Public License 
    - Version 1.0
    -
    +
     org.eclipse.jetty
     http://www.eclipse.org/jetty;>jetty-util-ajax
     9.3.19.v20170502
     -
     jar
     http://www.apache.org/licenses/LICENSE-2.0;>Apache Software License - 
    Version 2.0-http://www.eclipse.org/org/documents/epl-v10.php;>Eclipse Public License 
    - Version 1.0
    -
    +
     org.eclipse.jetty
     http://www.eclipse.org/jetty;>jetty-webapp
     9.3.19.v20170502
     -
     jar
     http://www.apache.org/licenses/LICENSE-2.0;>Apache Software License - 
    Version 2.0-http://www.eclipse.org/org/documents/epl-v10.php;>Eclipse Public License 
    - Version 1.0
    -
    +
     org.eclipse.jetty
     http://www.eclipse.org/jetty;>jetty-xml
     9.3.19.v20170502
     -
     jar
     http://www.apache.org/licenses/LICENSE-2.0;>Apache Software License - 
    Version 2.0-http://www.eclipse.org/org/documents/epl-v10.php;>Eclipse Public License 
    - Version 1.0
    -
    +
     org.fusesource.leveldbjni
     http://leveldbjni.fusesource.org/leveldbjni-all;>leveldbjni-all
     1.8
     -
     jar
     http://www.opensource.org/licenses/BSD-3-Clause;>The BSD 3-Clause 
    License
    -
    +
     org.glassfish
     http://uel.java.net;>javax.el
     3.0.1-b08
     -
     jar
     https://glassfish.dev.java.net/nonav/public/CDDL+GPL.html;>CDDL + GPLv2 
    with classpath exception
    -
    +
     org.glassfish.hk2
     https://hk2.java.net/hk2-api;>hk2-api
     2.5.0-b32
     -
     jar
     https://glassfish.java.net/nonav/public/CDDL+GPL_1_1.html;>CDDL + GPLv2 
    with classpath exception
    -
    +
     org.glassfish.hk2
     https://hk2.java.net/hk2-locator;>hk2-locator
     2.5.0-b32
     -
     jar
     https://glassfish.java.net/nonav/public/CDDL+GPL_1_1.html;>CDDL + GPLv2 
    with classpath exception
    -
    +
     org.glassfish.hk2
     https://hk2.java.net/hk2-utils;>hk2-utils
     2.5.0-b32
     -
     jar
     https://glassfish.java.net/nonav/public/CDDL+GPL_1_1.html;>CDDL + GPLv2 
    with classpath exception
    -
    +
     

    [09/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c54c242b/hbase-build-configuration/hbase-archetypes/hbase-client-project/license.html
    --
    diff --git 
    a/hbase-build-configuration/hbase-archetypes/hbase-client-project/license.html 
    b/hbase-build-configuration/hbase-archetypes/hbase-client-project/license.html
    index af8b9cf..8344654 100644
    --- 
    a/hbase-build-configuration/hbase-archetypes/hbase-client-project/license.html
    +++ 
    b/hbase-build-configuration/hbase-archetypes/hbase-client-project/license.html
    @@ -7,7 +7,7 @@
       
     
     
    -
    +
     
     Apache HBase - Exemplar for hbase-client archetype  Project 
    Licenses
     
    @@ -326,7 +326,7 @@
     https://www.apache.org/;>The Apache Software 
    Foundation.
     All rights reserved.  
     
    -  Last Published: 
    2017-12-02
    +  Last Published: 
    2017-12-03
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c54c242b/hbase-build-configuration/hbase-archetypes/hbase-client-project/mail-lists.html
    --
    diff --git 
    a/hbase-build-configuration/hbase-archetypes/hbase-client-project/mail-lists.html
     
    b/hbase-build-configuration/hbase-archetypes/hbase-client-project/mail-lists.html
    index afe6a90..ec3a91a 100644
    --- 
    a/hbase-build-configuration/hbase-archetypes/hbase-client-project/mail-lists.html
    +++ 
    b/hbase-build-configuration/hbase-archetypes/hbase-client-project/mail-lists.html
    @@ -7,7 +7,7 @@
       
     
     
    -
    +
     
     Apache HBase - Exemplar for hbase-client archetype  Project 
    Mailing Lists
     
    @@ -176,7 +176,7 @@
     https://www.apache.org/;>The Apache Software 
    Foundation.
     All rights reserved.  
     
    -  Last Published: 
    2017-12-02
    +  Last Published: 
    2017-12-03
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c54c242b/hbase-build-configuration/hbase-archetypes/hbase-client-project/plugin-management.html
    --
    diff --git 
    a/hbase-build-configuration/hbase-archetypes/hbase-client-project/plugin-management.html
     
    b/hbase-build-configuration/hbase-archetypes/hbase-client-project/plugin-management.html
    index 0191301..bea329e 100644
    --- 
    a/hbase-build-configuration/hbase-archetypes/hbase-client-project/plugin-management.html
    +++ 
    b/hbase-build-configuration/hbase-archetypes/hbase-client-project/plugin-management.html
    @@ -7,7 +7,7 @@
       
     
     
    -
    +
     
     Apache HBase - Exemplar for hbase-client archetype  Project 
    Plugin Management
     
    @@ -271,7 +271,7 @@
     https://www.apache.org/;>The Apache Software 
    Foundation.
     All rights reserved.  
     
    -  Last Published: 
    2017-12-02
    +  Last Published: 
    2017-12-03
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c54c242b/hbase-build-configuration/hbase-archetypes/hbase-client-project/plugins.html
    --
    diff --git 
    a/hbase-build-configuration/hbase-archetypes/hbase-client-project/plugins.html 
    b/hbase-build-configuration/hbase-archetypes/hbase-client-project/plugins.html
    index 967ed67..374e080 100644
    --- 
    a/hbase-build-configuration/hbase-archetypes/hbase-client-project/plugins.html
    +++ 
    b/hbase-build-configuration/hbase-archetypes/hbase-client-project/plugins.html
    @@ -7,7 +7,7 @@
       
     
     
    -
    +
     
     Apache HBase - Exemplar for hbase-client archetype  Project 
    Plugins
     
    @@ -226,7 +226,7 @@
     https://www.apache.org/;>The Apache Software 
    Foundation.
     All rights reserved.  
     
    -  Last Published: 
    2017-12-02
    +  Last Published: 
    2017-12-03
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c54c242b/hbase-build-configuration/hbase-archetypes/hbase-client-project/project-info.html
    --
    diff --git 
    a/hbase-build-configuration/hbase-archetypes/hbase-client-project/project-info.html
     
    b/hbase-build-configuration/hbase-archetypes/hbase-client-project/project-info.html
    index 7f55382..19b1cd2 100644
    --- 
    a/hbase-build-configuration/hbase-archetypes/hbase-client-project/project-info.html
    +++ 
    b/hbase-build-configuration/hbase-archetypes/hbase-client-project/project-info.html
    @@ -7,7 +7,7 @@
       
     
     
    -
    +
     
     Apache HBase - Exemplar for hbase-client archetype  Project 
    Information
     
    @@ -167,7 +167,7 @@
     https://www.apache.org/;>The Apache Software 
    Foundation.
     All rights reserved.  
       

    [09/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.LeaseManager.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.LeaseManager.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.LeaseManager.html
    index 25e368d..d0f781f 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.LeaseManager.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.LeaseManager.html
    @@ -25,798 +25,798 @@
     017 */
     018package 
    org.apache.hadoop.hbase.io.asyncfs;
     019
    -020import static 
    org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelOption.CONNECT_TIMEOUT_MILLIS;
    -021import static 
    org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleState.READER_IDLE;
    -022import static 
    org.apache.hadoop.fs.CreateFlag.CREATE;
    -023import static 
    org.apache.hadoop.fs.CreateFlag.OVERWRITE;
    -024import static 
    org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createEncryptor;
    -025import static 
    org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.trySaslNegotiate;
    +020import static 
    org.apache.hadoop.fs.CreateFlag.CREATE;
    +021import static 
    org.apache.hadoop.fs.CreateFlag.OVERWRITE;
    +022import static 
    org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createEncryptor;
    +023import static 
    org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.trySaslNegotiate;
    +024import static 
    org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelOption.CONNECT_TIMEOUT_MILLIS;
    +025import static 
    org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleState.READER_IDLE;
     026import static 
    org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY;
     027import static 
    org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME;
     028import static 
    org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT;
     029import static 
    org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage.PIPELINE_SETUP_CREATE;
     030
    -031import 
    org.apache.hadoop.hbase.shaded.com.google.common.base.Throwables;
    -032import 
    org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableMap;
    -033import 
    com.google.protobuf.CodedOutputStream;
    -034
    -035import 
    org.apache.hadoop.hbase.shaded.io.netty.bootstrap.Bootstrap;
    -036import 
    org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBuf;
    -037import 
    org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBufAllocator;
    -038import 
    org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBufOutputStream;
    -039import 
    org.apache.hadoop.hbase.shaded.io.netty.buffer.PooledByteBufAllocator;
    -040import 
    org.apache.hadoop.hbase.shaded.io.netty.channel.Channel;
    -041import 
    org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelFuture;
    -042import 
    org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelFutureListener;
    -043import 
    org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandler;
    -044import 
    org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandlerContext;
    -045import 
    org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelInitializer;
    -046import 
    org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelPipeline;
    -047import 
    org.apache.hadoop.hbase.shaded.io.netty.channel.EventLoop;
    -048import 
    org.apache.hadoop.hbase.shaded.io.netty.channel.SimpleChannelInboundHandler;
    -049import 
    org.apache.hadoop.hbase.shaded.io.netty.handler.codec.protobuf.ProtobufDecoder;
    -050import 
    org.apache.hadoop.hbase.shaded.io.netty.handler.codec.protobuf.ProtobufVarint32FrameDecoder;
    -051import 
    org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleStateEvent;
    -052import 
    org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleStateHandler;
    -053import 
    org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.Future;
    -054import 
    org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.FutureListener;
    -055import 
    org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.Promise;
    -056
    -057import java.io.IOException;
    -058import 
    java.lang.reflect.InvocationTargetException;
    -059import java.lang.reflect.Method;
    -060import java.util.ArrayList;
    -061import java.util.EnumSet;
    -062import java.util.List;
    -063import java.util.concurrent.TimeUnit;
    -064
    -065import org.apache.commons.logging.Log;
    -066import 
    org.apache.commons.logging.LogFactory;
    -067import 
    org.apache.hadoop.conf.Configuration;
    -068import 
    org.apache.hadoop.crypto.CryptoProtocolVersion;
    -069import 
    org.apache.hadoop.crypto.Encryptor;
    -070import org.apache.hadoop.fs.CreateFlag;
    -071import org.apache.hadoop.fs.FileSystem;
    -072import 
    org.apache.hadoop.fs.FileSystemLinkResolver;
    -073import org.apache.hadoop.fs.Path;
    -074import 
    org.apache.hadoop.fs.UnresolvedLinkException;
    -075import 
    

    [09/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.MergeTableRegionsFuture.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.MergeTableRegionsFuture.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.MergeTableRegionsFuture.html
    index d438f22..7c59e27 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.MergeTableRegionsFuture.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.MergeTableRegionsFuture.html
    @@ -1290,8 +1290,8 @@
     1282   CompactType 
    compactType) throws IOException {
     1283switch (compactType) {
     1284  case MOB:
    -1285
    compact(this.connection.getAdminForMaster(), getMobRegionInfo(tableName), 
    major,
    -1286  columnFamily);
    +1285
    compact(this.connection.getAdminForMaster(), 
    RegionInfo.createMobRegionInfo(tableName),
    +1286major, columnFamily);
     1287break;
     1288  case NORMAL:
     1289checkTableExists(tableName);
    @@ -3248,7 +3248,7 @@
     3240  new 
    CallableAdminProtos.GetRegionInfoResponse.CompactionState() {
     3241@Override
     3242public 
    AdminProtos.GetRegionInfoResponse.CompactionState call() throws Exception {
    -3243  RegionInfo info = 
    getMobRegionInfo(tableName);
    +3243  RegionInfo info = 
    RegionInfo.createMobRegionInfo(tableName);
     3244  GetRegionInfoRequest 
    request =
     3245
    RequestConverter.buildGetRegionInfoRequest(info.getRegionName(), true);
     3246  GetRegionInfoResponse 
    response = masterAdmin.getRegionInfo(rpcController, request);
    @@ -3312,7 +3312,7 @@
     3304}
     3305break;
     3306  default:
    -3307throw new 
    IllegalArgumentException("Unknowne compactType: " + compactType);
    +3307throw new 
    IllegalArgumentException("Unknown compactType: " + compactType);
     3308}
     3309if (state != null) {
     3310  return 
    ProtobufUtil.createCompactionState(state);
    @@ -3847,325 +3847,320 @@
     3839});
     3840  }
     3841
    -3842  private RegionInfo 
    getMobRegionInfo(TableName tableName) {
    -3843return 
    RegionInfoBuilder.newBuilder(tableName).setStartKey(Bytes.toBytes(".mob")).setRegionId(0)
    -3844.build();
    -3845  }
    -3846
    -3847  private RpcControllerFactory 
    getRpcControllerFactory() {
    -3848return this.rpcControllerFactory;
    -3849  }
    -3850
    -3851  @Override
    -3852  public void addReplicationPeer(String 
    peerId, ReplicationPeerConfig peerConfig, boolean enabled)
    -3853  throws IOException {
    -3854executeCallable(new 
    MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
    -3855  @Override
    -3856  protected Void rpcCall() throws 
    Exception {
    -3857
    master.addReplicationPeer(getRpcController(),
    -3858  
    RequestConverter.buildAddReplicationPeerRequest(peerId, peerConfig, 
    enabled));
    -3859return null;
    -3860  }
    -3861});
    -3862  }
    -3863
    -3864  @Override
    -3865  public void 
    removeReplicationPeer(String peerId) throws IOException {
    -3866executeCallable(new 
    MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
    -3867  @Override
    -3868  protected Void rpcCall() throws 
    Exception {
    -3869
    master.removeReplicationPeer(getRpcController(),
    -3870  
    RequestConverter.buildRemoveReplicationPeerRequest(peerId));
    -3871return null;
    -3872  }
    -3873});
    -3874  }
    -3875
    -3876  @Override
    -3877  public void 
    enableReplicationPeer(final String peerId) throws IOException {
    -3878executeCallable(new 
    MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
    -3879  @Override
    -3880  protected Void rpcCall() throws 
    Exception {
    -3881
    master.enableReplicationPeer(getRpcController(),
    -3882  
    RequestConverter.buildEnableReplicationPeerRequest(peerId));
    -3883return null;
    -3884  }
    -3885});
    -3886  }
    -3887
    -3888  @Override
    -3889  public void 
    disableReplicationPeer(final String peerId) throws IOException {
    -3890executeCallable(new 
    MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
    -3891  @Override
    -3892  protected Void rpcCall() throws 
    Exception {
    -3893
    master.disableReplicationPeer(getRpcController(),
    -3894  
    RequestConverter.buildDisableReplicationPeerRequest(peerId));
    -3895return null;
    -3896  }
    -3897});
    -3898  }
    -3899
    -3900  @Override
    -3901  public ReplicationPeerConfig 
    getReplicationPeerConfig(final String peerId) throws IOException {
    -3902return executeCallable(new 
    MasterCallableReplicationPeerConfig(getConnection(),
    -3903getRpcControllerFactory()) {
    -3904  @Override
    -3905  protected ReplicationPeerConfig 
    rpcCall() throws Exception {
    -3906GetReplicationPeerConfigResponse 
    response = master.getReplicationPeerConfig(
    -3907  getRpcController(), 
    

    [09/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
    index 29ea7b3..6ed75c9 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
    @@ -1313,7093 +1313,7082 @@
     1305
     1306  @Override
     1307  public boolean isSplittable() {
    -1308boolean result = isAvailable() 
     !hasReferences();
    -1309LOG.info("ASKED IF SPLITTABLE " + 
    result + " " + getRegionInfo().getShortNameToLog(),
    -1310  new Throwable("LOGGING: 
    REMOVE"));
    -1311// REMOVE BELOW
    -1312LOG.info("DEBUG LIST ALL FILES");
    -1313for (HStore store : 
    this.stores.values()) {
    -1314  LOG.info("store " + 
    store.getColumnFamilyName());
    -1315  for (HStoreFile sf : 
    store.getStorefiles()) {
    -1316
    LOG.info(sf.toStringDetailed());
    -1317  }
    -1318}
    -1319return result;
    -1320  }
    -1321
    -1322  @Override
    -1323  public boolean isMergeable() {
    -1324if (!isAvailable()) {
    -1325  LOG.debug("Region " + this
    -1326  + " is not mergeable because 
    it is closing or closed");
    -1327  return false;
    -1328}
    -1329if (hasReferences()) {
    -1330  LOG.debug("Region " + this
    -1331  + " is not mergeable because 
    it has references");
    -1332  return false;
    -1333}
    -1334
    -1335return true;
    +1308return isAvailable()  
    !hasReferences();
    +1309  }
    +1310
    +1311  @Override
    +1312  public boolean isMergeable() {
    +1313if (!isAvailable()) {
    +1314  LOG.debug("Region " + this
    +1315  + " is not mergeable because 
    it is closing or closed");
    +1316  return false;
    +1317}
    +1318if (hasReferences()) {
    +1319  LOG.debug("Region " + this
    +1320  + " is not mergeable because 
    it has references");
    +1321  return false;
    +1322}
    +1323
    +1324return true;
    +1325  }
    +1326
    +1327  public boolean areWritesEnabled() {
    +1328synchronized(this.writestate) {
    +1329  return 
    this.writestate.writesEnabled;
    +1330}
    +1331  }
    +1332
    +1333  @VisibleForTesting
    +1334  public MultiVersionConcurrencyControl 
    getMVCC() {
    +1335return mvcc;
     1336  }
     1337
    -1338  public boolean areWritesEnabled() {
    -1339synchronized(this.writestate) {
    -1340  return 
    this.writestate.writesEnabled;
    -1341}
    -1342  }
    -1343
    -1344  @VisibleForTesting
    -1345  public MultiVersionConcurrencyControl 
    getMVCC() {
    -1346return mvcc;
    -1347  }
    -1348
    -1349  @Override
    -1350  public long getMaxFlushedSeqId() {
    -1351return maxFlushedSeqId;
    +1338  @Override
    +1339  public long getMaxFlushedSeqId() {
    +1340return maxFlushedSeqId;
    +1341  }
    +1342
    +1343  /**
    +1344   * @return readpoint considering given 
    IsolationLevel. Pass {@code null} for default
    +1345   */
    +1346  public long 
    getReadPoint(IsolationLevel isolationLevel) {
    +1347if (isolationLevel != null 
     isolationLevel == IsolationLevel.READ_UNCOMMITTED) {
    +1348  // This scan can read even 
    uncommitted transactions
    +1349  return Long.MAX_VALUE;
    +1350}
    +1351return mvcc.getReadPoint();
     1352  }
     1353
    -1354  /**
    -1355   * @return readpoint considering given 
    IsolationLevel. Pass {@code null} for default
    -1356   */
    -1357  public long 
    getReadPoint(IsolationLevel isolationLevel) {
    -1358if (isolationLevel != null 
     isolationLevel == IsolationLevel.READ_UNCOMMITTED) {
    -1359  // This scan can read even 
    uncommitted transactions
    -1360  return Long.MAX_VALUE;
    -1361}
    -1362return mvcc.getReadPoint();
    -1363  }
    -1364
    -1365  public boolean 
    isLoadingCfsOnDemandDefault() {
    -1366return 
    this.isLoadingCfsOnDemandDefault;
    -1367  }
    -1368
    -1369  /**
    -1370   * Close down this HRegion.  Flush the 
    cache, shut down each HStore, don't
    -1371   * service any more calls.
    -1372   *
    -1373   * pThis method could take 
    some time to execute, so don't call it from a
    -1374   * time-sensitive thread.
    -1375   *
    -1376   * @return Vector of all the storage 
    files that the HRegion's component
    -1377   * HStores make use of.  It's a list 
    of all StoreFile objects. Returns empty
    -1378   * vector if already closed and null 
    if judged that it should not close.
    -1379   *
    -1380   * @throws IOException e
    -1381   * @throws DroppedSnapshotException 
    Thrown when replay of wal is required
    -1382   * because a Snapshot was not properly 
    persisted. The region is put in closing mode, and the
    -1383   * caller MUST abort after this.
    -1384   */
    -1385  public Mapbyte[], 
    ListHStoreFile close() throws IOException {
    -1386return close(false);
    -1387  }
    -1388
    -1389  private final Object closeLock = new 
    Object();
    -1390
    -1391  /** Conf key for the periodic flush 
    interval */
    

    [09/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b1eb7453/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.Type.html
    --
    diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.Type.html 
    b/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.Type.html
    index d98042d..d549086 100644
    --- a/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.Type.html
    +++ b/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.Type.html
    @@ -42,2537 +42,2536 @@
     034
     035import org.apache.commons.logging.Log;
     036import 
    org.apache.commons.logging.LogFactory;
    -037import 
    org.apache.yetus.audience.InterfaceAudience;
    +037import 
    org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
     038import 
    org.apache.hadoop.hbase.util.ByteBufferUtils;
     039import 
    org.apache.hadoop.hbase.util.Bytes;
     040import 
    org.apache.hadoop.hbase.util.ClassSize;
     041import 
    org.apache.hadoop.io.RawComparator;
    -042
    -043import 
    org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
    -044/**
    -045 * An HBase Key/Value. This is the 
    fundamental HBase Type.
    -046 * p
    -047 * HBase applications and users should 
    use the Cell interface and avoid directly using KeyValue and
    -048 * member functions not defined in 
    Cell.
    -049 * p
    -050 * If being used client-side, the primary 
    methods to access individual fields are
    -051 * {@link #getRowArray()}, {@link 
    #getFamilyArray()}, {@link #getQualifierArray()},
    -052 * {@link #getTimestamp()}, and {@link 
    #getValueArray()}. These methods allocate new byte arrays
    -053 * and return copies. Avoid their use 
    server-side.
    -054 * p
    -055 * Instances of this class are immutable. 
    They do not implement Comparable but Comparators are
    -056 * provided. Comparators change with 
    context, whether user table or a catalog table comparison. Its
    -057 * critical you use the appropriate 
    comparator. There are Comparators for normal HFiles, Meta's
    -058 * Hfiles, and bloom filter keys.
    -059 * p
    -060 * KeyValue wraps a byte array and takes 
    offsets and lengths into passed array at where to start
    -061 * interpreting the content as KeyValue. 
    The KeyValue format inside a byte array is:
    -062 * codelt;keylengthgt; 
    lt;valuelengthgt; lt;keygt; 
    lt;valuegt;/code Key is further
    -063 * decomposed as: 
    codelt;rowlengthgt; lt;rowgt; 
    lt;columnfamilylengthgt;
    -064 * lt;columnfamilygt; 
    lt;columnqualifiergt;
    -065 * lt;timestampgt; 
    lt;keytypegt;/code The coderowlength/code 
    maximum is
    -066 * 
    codeShort.MAX_SIZE/code, column family length maximum is 
    codeByte.MAX_SIZE/code, and
    -067 * column qualifier + key length must be 
    lt; codeInteger.MAX_SIZE/code. The column does not
    -068 * contain the family/qualifier 
    delimiter, {@link #COLUMN_FAMILY_DELIMITER}br
    -069 * KeyValue can optionally contain Tags. 
    When it contains tags, it is added in the byte array after
    -070 * the value part. The format for this 
    part is: 
    codelt;tagslengthgt;lt;tagsbytesgt;/code.
    -071 * codetagslength/code 
    maximum is codeShort.MAX_SIZE/code. The 
    codetagsbytes/code
    -072 * contain one or more tags where as each 
    tag is of the form
    -073 * 
    codelt;taglengthgt;lt;tagtypegt;lt;tagbytesgt;/code.
     codetagtype/code is one byte
    -074 * and codetaglength/code 
    maximum is codeShort.MAX_SIZE/code and it includes 1 byte 
    type
    -075 * length and actual tag bytes length.
    -076 */
    -077@InterfaceAudience.Private
    -078public class KeyValue implements 
    ExtendedCell {
    -079  private static final 
    ArrayListTag EMPTY_ARRAY_LIST = new ArrayList();
    -080
    -081  private static final Log LOG = 
    LogFactory.getLog(KeyValue.class);
    -082
    -083  public static final long FIXED_OVERHEAD 
    = ClassSize.OBJECT + // the KeyValue object itself
    -084  ClassSize.REFERENCE + // pointer to 
    "bytes"
    -085  2 * Bytes.SIZEOF_INT + // offset, 
    length
    -086  Bytes.SIZEOF_LONG;// memstoreTS
    -087
    -088  /**
    -089   * Colon character in UTF-8
    -090   */
    -091  public static final char 
    COLUMN_FAMILY_DELIMITER = ':';
    -092
    -093  public static final byte[] 
    COLUMN_FAMILY_DELIM_ARRAY =
    -094new 
    byte[]{COLUMN_FAMILY_DELIMITER};
    -095
    -096  /**
    -097   * Comparator for plain key/values; 
    i.e. non-catalog table key/values. Works on Key portion
    -098   * of KeyValue only.
    -099   * @deprecated Use {@link 
    CellComparator#getInstance()} instead. Deprecated for hbase 2.0, remove for 
    hbase 3.0.
    -100   */
    -101  @Deprecated
    -102  public static final KVComparator 
    COMPARATOR = new KVComparator();
    -103  /**
    -104   * A {@link KVComparator} for 
    codehbase:meta/code catalog table
    -105   * {@link KeyValue}s.
    -106   * @deprecated Use {@link 
    CellComparatorImpl#META_COMPARATOR} instead. Deprecated for hbase 2.0, remove 
    for hbase 3.0.
    -107   */
    -108  @Deprecated
    -109  public static final KVComparator 
    META_COMPARATOR = new MetaComparator();
    -110
    -111  /** Size of the key length field in 
    bytes*/
    -112  public static final int KEY_LENGTH_SIZE 
    = Bytes.SIZEOF_INT;
    -113
    -114  /** Size of the key type field in bytes 
    */
    -115  public 

      1   2   3   >