[03/51] [partial] hbase-site git commit: Published site at 620d70d6186fb800299bcc62ad7179fccfd1be41.

2019-01-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aa3fb87f/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.html
index f6f6104..845f1d3 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.html
@@ -1688,9 +1688,9 @@
 1680   *  Meta recovery WAL 
directory inside WAL directory path.
 1681   */
 1682  private void 
removeHBCKMetaRecoveryWALDir(String walFactoryId) throws IOException {
-1683Path rootdir = 
FSUtils.getRootDir(getConf());
-1684Path walLogDir = new Path(new 
Path(rootdir, HConstants.HREGION_LOGDIR_NAME), walFactoryId);
-1685FileSystem fs = 
FSUtils.getCurrentFileSystem(getConf());
+1683Path walLogDir = new Path(new 
Path(CommonFSUtils.getWALRootDir(getConf()),
+1684  
HConstants.HREGION_LOGDIR_NAME), walFactoryId);
+1685FileSystem fs = 
CommonFSUtils.getWALFileSystem(getConf());
 1686FileStatus[] walFiles = 
FSUtils.listStatus(fs, walLogDir, null);
 1687if (walFiles == null || 
walFiles.length == 0) {
 1688  LOG.info("HBCK meta recovery WAL 
directory is empty, removing it now.");

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aa3fb87f/downloads.html
--
diff --git a/downloads.html b/downloads.html
index 8616034..ea697d2 100644
--- a/downloads.html
+++ b/downloads.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Apache HBase Downloads
 
@@ -329,7 +329,7 @@ under the License. -->
 
   
 
-2.1.1
+2.1.2
   
   
 
@@ -337,21 +337,21 @@ under the License. -->
   
   
 
-https://apache.org/dist/hbase/2.1.1/compatibility_report_2.1.0_vs_2.1.1.html;>2.1.0
 vs 2.1.1
+https://apache.org/dist/hbase/2.1.2/compatibility_report_2.1.1vs2.1.2.html;>2.1.1
 vs 2.1.2
   
   
 
-https://apache.org/dist/hbase/2.1.1/CHANGES.md;>Changes
+https://apache.org/dist/hbase/2.1.2/CHANGES.md;>Changes
   
   
 
-https://apache.org/dist/hbase/2.1.1/RELEASENOTES.md;>Release Notes
+https://apache.org/dist/hbase/2.1.2/RELEASENOTES.md;>Release Notes
   
   
 
-https://www.apache.org/dyn/closer.lua/hbase/2.1.1/hbase-2.1.1-src.tar.gz;>src
 (https://apache.org/dist/hbase/2.1.1/hbase-2.1.1-src.tar.gz.sha512;>sha512
 https://apache.org/dist/hbase/2.1.1/hbase-2.1.1-src.tar.gz.asc;>asc) 

-https://www.apache.org/dyn/closer.lua/hbase/2.1.1/hbase-2.1.1-bin.tar.gz;>bin
 (https://apache.org/dist/hbase/2.1.1/hbase-2.1.1-bin.tar.gz.sha512;>sha512
 https://apache.org/dist/hbase/2.1.1/hbase-2.1.1-bin.tar.gz.asc;>asc) 

-https://www.apache.org/dyn/closer.lua/hbase/2.1.1/hbase-2.1.1-client-bin.tar.gz;>client-bin
 (https://apache.org/dist/hbase/2.1.1/hbase-2.1.1-client-bin.tar.gz.sha512;>sha512
 https://apache.org/dist/hbase/2.1.1/hbase-2.1.1-client-bin.tar.gz.asc;>asc)
+https://www.apache.org/dyn/closer.lua/hbase/2.1.2/hbase-2.1.2-src.tar.gz;>src
 (https://apache.org/dist/hbase/2.1.2/hbase-2.1.2-src.tar.gz.sha512;>sha512
 https://apache.org/dist/hbase/2.1.2/hbase-2.1.2-src.tar.gz.asc;>asc) 

+https://www.apache.org/dyn/closer.lua/hbase/2.1.2/hbase-2.1.2-bin.tar.gz;>bin
 (https://apache.org/dist/hbase/2.1.2/hbase-2.1.2-bin.tar.gz.sha512;>sha512
 https://apache.org/dist/hbase/2.1.2/hbase-2.1.2-bin.tar.gz.asc;>asc) 

+https://www.apache.org/dyn/closer.lua/hbase/2.1.2/hbase-2.1.2-client-bin.tar.gz;>client-bin
 (https://apache.org/dist/hbase/2.1.2/hbase-2.1.2-client-bin.tar.gz.sha512;>sha512
 https://apache.org/dist/hbase/2.1.2/hbase-2.1.2-client-bin.tar.gz.asc;>asc)
   
 
 
@@ -489,7 +489,7 @@ under the License. -->
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2019-01-08
+  Last Published: 
2019-01-09
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aa3fb87f/export_control.html
--
diff --git a/export_control.html b/export_control.html
index 290f089..975b832 100644
--- a/export_control.html
+++ b/export_control.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  
   Export Control
@@ -341,7 +341,7 @@ for more details.
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2019-01-08
+  Last Published: 
2019-01-09
 
 
 


[03/51] [partial] hbase-site git commit: Published site at 281d6429e55149cc4c05430dcc1d1dc136d8b245.

2019-01-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/901d593a/devapidocs/org/apache/hadoop/hbase/security/access/AccessController.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/security/access/AccessController.html 
b/devapidocs/org/apache/hadoop/hbase/security/access/AccessController.html
index 9e61fc1..8dd6360 100644
--- a/devapidocs/org/apache/hadoop/hbase/security/access/AccessController.html
+++ b/devapidocs/org/apache/hadoop/hbase/security/access/AccessController.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":9,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":9,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":9,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i109
 
":10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10,"i135":10,"i136":10,"i137":10,"i138":10,"i139":10,"i140":10,"i141":10,"i142":10};
+var methods = 
{"i0":9,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":9,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":9,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i109
 
":10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10,"i135":10,"i136":10,"i137":10,"i138":10,"i139":10,"i140":10,"i141":10,"i142":10,"i143":10,"i144":10};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -1007,25 +1007,31 @@ implements 
 void
+preIsRpcThrottleEnabled(ObserverContextMasterCoprocessorEnvironmentctx)
+Called before getting if is rpc throttle enabled.
+
+
+
+void
 preListDecommissionedRegionServers(ObserverContextMasterCoprocessorEnvironmentctx)
 Called before list decommissioned region servers.
 
 
-
+
 void
 preListReplicationPeers(ObserverContextMasterCoprocessorEnvironmentctx,
https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringregex)
 Called before list replication peers.
 
 
-
+
 void
 preListSnapshot(ObserverContextMasterCoprocessorEnvironmentctx,
SnapshotDescriptionsnapshot)
 Called before listSnapshots request has been 
processed.
 
 
-
+
 void
 preLockHeartbeat(ObserverContextMasterCoprocessorEnvironmentctx,
 TableNametableName,
@@ -1033,14 +1039,14 @@ implements Called before heartbeat to a lock.
 
 
-
+
 void
 preMergeRegions(ObserverContextMasterCoprocessorEnvironmentctx,
RegionInfo[]regionsToMerge)
 Called before merge regions request.
 
 
-
+
 void
 preModifyNamespace(ObserverContextMasterCoprocessorEnvironmentctx,
   NamespaceDescriptorcurrentNsDesc,
@@ -1048,7 +1054,7 @@ implements Called prior to modifying a namespace's properties.
 
 
-
+
 TableDescriptor
 preModifyTable(ObserverContextMasterCoprocessorEnvironmentc,
   TableNametableName,
@@ -1057,7 +1063,7 @@ implements Called 

[03/51] [partial] hbase-site git commit: Published site at 466fa920fee572fe20db3b77ebf539dc304d5f31.

2019-01-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2bf59208/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.EnableTableProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.EnableTableProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.EnableTableProcedureBiConsumer.html
index 736388b..197b99d 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.EnableTableProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.EnableTableProcedureBiConsumer.html
@@ -26,3624 +26,3599 @@
 018package org.apache.hadoop.hbase.client;
 019
 020import static 
org.apache.hadoop.hbase.TableName.META_TABLE_NAME;
-021
-022import com.google.protobuf.Message;
-023import com.google.protobuf.RpcChannel;
-024import java.io.IOException;
-025import java.util.ArrayList;
-026import java.util.Arrays;
-027import java.util.Collections;
-028import java.util.EnumSet;
-029import java.util.HashMap;
-030import java.util.List;
-031import java.util.Map;
-032import java.util.Optional;
-033import java.util.Set;
-034import 
java.util.concurrent.CompletableFuture;
-035import 
java.util.concurrent.ConcurrentHashMap;
-036import java.util.concurrent.TimeUnit;
-037import 
java.util.concurrent.atomic.AtomicReference;
-038import java.util.function.BiConsumer;
-039import java.util.function.Function;
-040import java.util.function.Supplier;
-041import java.util.regex.Pattern;
-042import java.util.stream.Collectors;
-043import java.util.stream.Stream;
-044import org.apache.commons.io.IOUtils;
-045import 
org.apache.hadoop.conf.Configuration;
-046import 
org.apache.hadoop.hbase.AsyncMetaTableAccessor;
-047import 
org.apache.hadoop.hbase.CacheEvictionStats;
-048import 
org.apache.hadoop.hbase.CacheEvictionStatsAggregator;
-049import 
org.apache.hadoop.hbase.ClusterMetrics;
-050import 
org.apache.hadoop.hbase.ClusterMetrics.Option;
-051import 
org.apache.hadoop.hbase.ClusterMetricsBuilder;
-052import 
org.apache.hadoop.hbase.HConstants;
-053import 
org.apache.hadoop.hbase.HRegionLocation;
-054import 
org.apache.hadoop.hbase.MetaTableAccessor;
-055import 
org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
-056import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-057import 
org.apache.hadoop.hbase.RegionLocations;
-058import 
org.apache.hadoop.hbase.RegionMetrics;
-059import 
org.apache.hadoop.hbase.RegionMetricsBuilder;
-060import 
org.apache.hadoop.hbase.ServerName;
-061import 
org.apache.hadoop.hbase.TableExistsException;
-062import 
org.apache.hadoop.hbase.TableName;
-063import 
org.apache.hadoop.hbase.TableNotDisabledException;
-064import 
org.apache.hadoop.hbase.TableNotEnabledException;
-065import 
org.apache.hadoop.hbase.TableNotFoundException;
-066import 
org.apache.hadoop.hbase.UnknownRegionException;
-067import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
-068import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
-069import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.ServerRequestCallerBuilder;
-070import 
org.apache.hadoop.hbase.client.Scan.ReadType;
-071import 
org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil;
-072import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-073import 
org.apache.hadoop.hbase.client.security.SecurityCapability;
-074import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-075import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-076import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-077import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-078import 
org.apache.hadoop.hbase.quotas.QuotaTableUtil;
-079import 
org.apache.hadoop.hbase.replication.ReplicationException;
-080import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-081import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-082import 
org.apache.hadoop.hbase.replication.SyncReplicationState;
-083import 
org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
-084import 
org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
-085import 
org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
-086import 
org.apache.hadoop.hbase.util.Bytes;
-087import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-088import 
org.apache.hadoop.hbase.util.ForeignExceptionUtil;
-089import 
org.apache.yetus.audience.InterfaceAudience;
-090import org.slf4j.Logger;
-091import org.slf4j.LoggerFactory;
-092
-093import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-094import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-095import 
org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback;
-096import 
org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer;
-097import 

[03/51] [partial] hbase-site git commit: Published site at e4b6b4afb933a961f543537875f87a2dc62d3757.

2019-01-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/849d84a8/testdevapidocs/org/apache/hadoop/hbase/wal/package-tree.html
--
diff --git a/testdevapidocs/org/apache/hadoop/hbase/wal/package-tree.html 
b/testdevapidocs/org/apache/hadoop/hbase/wal/package-tree.html
index f71c161..a8cb53c 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/wal/package-tree.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/wal/package-tree.html
@@ -147,9 +147,9 @@
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.wal.TestWALSplit.Corruptions
 org.apache.hadoop.hbase.wal.IOTestProvider.AllowedOperations
 org.apache.hadoop.hbase.wal.FaultyFSLog.FailureType
+org.apache.hadoop.hbase.wal.TestWALSplit.Corruptions
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/849d84a8/testdevapidocs/overview-tree.html
--
diff --git a/testdevapidocs/overview-tree.html 
b/testdevapidocs/overview-tree.html
index fb914b9..bf2fa0d 100644
--- a/testdevapidocs/overview-tree.html
+++ b/testdevapidocs/overview-tree.html
@@ -1176,6 +1176,15 @@
 org.apache.hadoop.hbase.util.HBaseHomePath
 org.apache.hadoop.hbase.security.HBaseKerberosUtils
 org.apache.hadoop.hbase.rest.HBaseRESTTestingUtility
+org.apache.hadoop.hbase.thrift.HBaseServiceHandler
+
+org.apache.hadoop.hbase.thrift.ThriftHBaseServiceHandler 
(implements org.apache.hadoop.hbase.thrift.generated.Hbase.Iface)
+
+org.apache.hadoop.hbase.thrift.TestThriftServer.MySlowHBaseHandler (implements 
org.apache.hadoop.hbase.thrift.generated.Hbase.Iface)
+
+
+
+
 org.apache.hadoop.hbase.HBaseTestingUtility.PortAllocator
 org.apache.hadoop.hbase.HBaseTestingUtility.SeenRowTracker
 org.apache.hadoop.hbase.util.hbck.HbckTestingUtil
@@ -3426,6 +3435,7 @@
 org.apache.hadoop.hbase.filter.TestRegexComparator.TestCase
 org.apache.hadoop.hbase.master.assignment.TestRegionAssignedToMultipleRegionServers
 org.apache.hadoop.hbase.master.assignment.TestRegionBypass
+org.apache.hadoop.hbase.coprocessor.TestRegionCoprocessorHost
 org.apache.hadoop.hbase.regionserver.TestRegionFavoredNodes
 org.apache.hadoop.hbase.regionserver.TestRegionIncrement
 org.apache.hadoop.hbase.regionserver.TestRegionInfoBuilder
@@ -3936,11 +3946,16 @@
 org.apache.hadoop.hbase.thrift2.TestThriftHBaseServiceHandlerWithReadOnly
 org.apache.hadoop.hbase.thrift.TestThriftHttpServer
 
+org.apache.hadoop.hbase.thrift2.TestThrift2HttpServer
 org.apache.hadoop.hbase.thrift.TestThriftSpnegoHttpServer
 
 
 org.apache.hadoop.hbase.thrift.TestThriftServer
-org.apache.hadoop.hbase.thrift.TestThriftServerCmdLine
+org.apache.hadoop.hbase.thrift.TestThriftServerCmdLine
+
+org.apache.hadoop.hbase.thrift2.TestThrift2ServerCmdLine
+
+
 org.apache.hadoop.hbase.io.hadoopbackport.TestThrottledInputStream
 org.apache.hadoop.hbase.quotas.TestThrottleSettings
 org.apache.hadoop.hbase.TestTimeout
@@ -4134,11 +4149,6 @@
 org.apache.hadoop.hbase.zookeeper.TestZKNodeTracker.WaitToGetDataThread
 
 
-org.apache.hadoop.hbase.thrift.ThriftServerRunner.HBaseHandler 
(implements org.apache.hadoop.hbase.thrift.generated.Hbase.Iface)
-
-org.apache.hadoop.hbase.thrift.TestThriftServer.MySlowHBaseHandler (implements 
org.apache.hadoop.hbase.thrift.generated.Hbase.Iface)
-
-
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true;
 title="class or interface in java.lang">Throwable (implements java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">Exception

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/849d84a8/testdevapidocs/serialized-form.html
--
diff --git a/testdevapidocs/serialized-form.html 
b/testdevapidocs/serialized-form.html
index da45683..ff3fb46 100644
--- a/testdevapidocs/serialized-form.html
+++ b/testdevapidocs/serialized-form.html
@@ -2307,41 +2307,41 @@
 0L
 
 
-
+
 
 
-Class org.apache.hadoop.hbase.thrift.ThriftHttpServlet extends 
org.apache.thrift.server.TServlet implements Serializable
-
-serialVersionUID:
-1L
-
+Class 
org.apache.hadoop.hbase.thrift.ThriftHBaseServiceHandler.IOErrorWithCause 
extends org.apache.hadoop.hbase.thrift.generated.IOError implements 
Serializable
 
 
 Serialized Fields
 
-
-doAsEnabled
-boolean doAsEnabled
-
 
-securityEnabled

[03/51] [partial] hbase-site git commit: Published site at 3ab895979b643a2980bcdb7fee2078f14b614210.

2019-01-01 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RackLocalityCostFunction.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RackLocalityCostFunction.html
 
b/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RackLocalityCostFunction.html
index 033f804..a314549 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RackLocalityCostFunction.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RackLocalityCostFunction.html
@@ -369,6 +369,6 @@ extends Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RandomCandidateGenerator.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RandomCandidateGenerator.html
 
b/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RandomCandidateGenerator.html
index 797c5a6..18c8df9 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RandomCandidateGenerator.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RandomCandidateGenerator.html
@@ -287,6 +287,6 @@ extends Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.ReadRequestCostFunction.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.ReadRequestCostFunction.html
 
b/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.ReadRequestCostFunction.html
index 509725e..737ba38 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.ReadRequestCostFunction.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.ReadRequestCostFunction.html
@@ -377,6 +377,6 @@ extends Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionCountSkewCostFunction.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionCountSkewCostFunction.html
 
b/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionCountSkewCostFunction.html
index 826bbae..ac8e713 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionCountSkewCostFunction.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionCountSkewCostFunction.html
@@ -366,6 +366,6 @@ extends Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaCandidateGenerator.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaCandidateGenerator.html
 
b/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaCandidateGenerator.html
index 61da737..28f42d3 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaCandidateGenerator.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaCandidateGenerator.html
@@ -360,6 +360,6 @@ extends Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaHostCostFunction.html

[03/51] [partial] hbase-site git commit: Published site at 7820ba1dbdba58b1002cdfde08eb21aa7a0bb6da.

2018-12-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/09ea0d5f/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
index 0f5a095..50bf692 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
@@ -78,8712 +78,8714 @@
 070import 
java.util.concurrent.locks.ReadWriteLock;
 071import 
java.util.concurrent.locks.ReentrantReadWriteLock;
 072import java.util.function.Function;
-073import 
org.apache.hadoop.conf.Configuration;
-074import org.apache.hadoop.fs.FileStatus;
-075import org.apache.hadoop.fs.FileSystem;
-076import 
org.apache.hadoop.fs.LocatedFileStatus;
-077import org.apache.hadoop.fs.Path;
-078import org.apache.hadoop.hbase.Cell;
-079import 
org.apache.hadoop.hbase.CellBuilderType;
-080import 
org.apache.hadoop.hbase.CellComparator;
-081import 
org.apache.hadoop.hbase.CellComparatorImpl;
-082import 
org.apache.hadoop.hbase.CellScanner;
-083import 
org.apache.hadoop.hbase.CellUtil;
-084import 
org.apache.hadoop.hbase.CompareOperator;
-085import 
org.apache.hadoop.hbase.CompoundConfiguration;
-086import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-087import 
org.apache.hadoop.hbase.DroppedSnapshotException;
-088import 
org.apache.hadoop.hbase.ExtendedCellBuilderFactory;
-089import 
org.apache.hadoop.hbase.HConstants;
-090import 
org.apache.hadoop.hbase.HConstants.OperationStatusCode;
-091import 
org.apache.hadoop.hbase.HDFSBlocksDistribution;
-092import 
org.apache.hadoop.hbase.KeyValue;
-093import 
org.apache.hadoop.hbase.KeyValueUtil;
-094import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-095import 
org.apache.hadoop.hbase.NotServingRegionException;
-096import 
org.apache.hadoop.hbase.PrivateCellUtil;
-097import 
org.apache.hadoop.hbase.RegionTooBusyException;
-098import org.apache.hadoop.hbase.Tag;
-099import org.apache.hadoop.hbase.TagUtil;
-100import 
org.apache.hadoop.hbase.UnknownScannerException;
-101import 
org.apache.hadoop.hbase.client.Append;
-102import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-103import 
org.apache.hadoop.hbase.client.CompactionState;
-104import 
org.apache.hadoop.hbase.client.Delete;
-105import 
org.apache.hadoop.hbase.client.Durability;
-106import 
org.apache.hadoop.hbase.client.Get;
-107import 
org.apache.hadoop.hbase.client.Increment;
-108import 
org.apache.hadoop.hbase.client.IsolationLevel;
-109import 
org.apache.hadoop.hbase.client.Mutation;
-110import 
org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor;
-111import 
org.apache.hadoop.hbase.client.Put;
-112import 
org.apache.hadoop.hbase.client.RegionInfo;
-113import 
org.apache.hadoop.hbase.client.RegionInfoBuilder;
-114import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
-115import 
org.apache.hadoop.hbase.client.Result;
-116import 
org.apache.hadoop.hbase.client.RowMutations;
-117import 
org.apache.hadoop.hbase.client.Scan;
-118import 
org.apache.hadoop.hbase.client.TableDescriptor;
-119import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-120import 
org.apache.hadoop.hbase.conf.ConfigurationManager;
-121import 
org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver;
-122import 
org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
-123import 
org.apache.hadoop.hbase.coprocessor.RegionObserver.MutationType;
-124import 
org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
-125import 
org.apache.hadoop.hbase.exceptions.FailedSanityCheckException;
-126import 
org.apache.hadoop.hbase.exceptions.TimeoutIOException;
-127import 
org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
-128import 
org.apache.hadoop.hbase.filter.ByteArrayComparable;
-129import 
org.apache.hadoop.hbase.filter.FilterWrapper;
-130import 
org.apache.hadoop.hbase.filter.IncompatibleFilterException;
-131import 
org.apache.hadoop.hbase.io.HFileLink;
-132import 
org.apache.hadoop.hbase.io.HeapSize;
-133import 
org.apache.hadoop.hbase.io.TimeRange;
-134import 
org.apache.hadoop.hbase.io.hfile.BlockCache;
-135import 
org.apache.hadoop.hbase.io.hfile.HFile;
-136import 
org.apache.hadoop.hbase.ipc.CallerDisconnectedException;
-137import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
-138import 
org.apache.hadoop.hbase.ipc.RpcCall;
-139import 
org.apache.hadoop.hbase.ipc.RpcServer;
-140import 
org.apache.hadoop.hbase.mob.MobFileCache;
-141import 
org.apache.hadoop.hbase.monitoring.MonitoredTask;
-142import 
org.apache.hadoop.hbase.monitoring.TaskMonitor;
-143import 
org.apache.hadoop.hbase.quotas.RegionServerSpaceQuotaManager;
-144import 
org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl.WriteEntry;
-145import 

[03/51] [partial] hbase-site git commit: Published site at c448604ceb987d113913f0583452b2abce04db0d.

2018-12-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4f8b8424/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.html
index 93aa689..8634180 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.html
@@ -550,301 +550,302 @@
 542   *  the cache key.
 543   */
 544  private void doCacheOnWrite(long 
offset) {
-545HFileBlock cacheFormatBlock = 
blockWriter.getBlockForCaching(cacheConf);
-546
cacheConf.getBlockCache().cacheBlock(
-547new BlockCacheKey(name, offset, 
true, cacheFormatBlock.getBlockType()),
-548cacheFormatBlock);
-549  }
-550
-551  /**
-552   * Ready a new block for writing.
-553   *
-554   * @throws IOException
-555   */
-556  protected void newBlock() throws 
IOException {
-557// This is where the next block 
begins.
-558
blockWriter.startWriting(BlockType.DATA);
-559firstCellInBlock = null;
-560if (lastCell != null) {
-561  lastCellOfPreviousBlock = 
lastCell;
-562}
-563  }
-564
-565  /**
-566   * Add a meta block to the end of the 
file. Call before close(). Metadata
-567   * blocks are expensive. Fill one with 
a bunch of serialized data rather than
-568   * do a metadata block per metadata 
instance. If metadata is small, consider
-569   * adding to file info using {@link 
#appendFileInfo(byte[], byte[])}
-570   *
-571   * @param metaBlockName
-572   *  name of the block
-573   * @param content
-574   *  will call readFields to get 
data later (DO NOT REUSE)
-575   */
-576  @Override
-577  public void appendMetaBlock(String 
metaBlockName, Writable content) {
-578byte[] key = 
Bytes.toBytes(metaBlockName);
-579int i;
-580for (i = 0; i  metaNames.size(); 
++i) {
-581  // stop when the current key is 
greater than our own
-582  byte[] cur = metaNames.get(i);
-583  if 
(Bytes.BYTES_RAWCOMPARATOR.compare(cur, 0, cur.length, key, 0,
-584  key.length)  0) {
-585break;
-586  }
-587}
-588metaNames.add(i, key);
-589metaData.add(i, content);
-590  }
-591
-592  @Override
-593  public void close() throws IOException 
{
-594if (outputStream == null) {
-595  return;
-596}
-597// Save data block encoder metadata 
in the file info.
-598blockEncoder.saveMetadata(this);
-599// Write out the end of the data 
blocks, then write meta data blocks.
-600// followed by fileinfo, data block 
index and meta block index.
-601
-602finishBlock();
-603writeInlineBlocks(true);
-604
-605FixedFileTrailer trailer = new 
FixedFileTrailer(getMajorVersion(), getMinorVersion());
-606
-607// Write out the metadata blocks if 
any.
-608if (!metaNames.isEmpty()) {
-609  for (int i = 0; i  
metaNames.size(); ++i) {
-610// store the beginning offset
-611long offset = 
outputStream.getPos();
-612// write the metadata content
-613DataOutputStream dos = 
blockWriter.startWriting(BlockType.META);
-614metaData.get(i).write(dos);
-615
-616
blockWriter.writeHeaderAndData(outputStream);
-617totalUncompressedBytes += 
blockWriter.getUncompressedSizeWithHeader();
-618
-619// Add the new meta block to the 
meta index.
-620
metaBlockIndexWriter.addEntry(metaNames.get(i), offset,
-621
blockWriter.getOnDiskSizeWithHeader());
-622  }
-623}
-624
-625// Load-on-open section.
-626
-627// Data block index.
-628//
-629// In version 2, this section of the 
file starts with the root level data
-630// block index. We call a function 
that writes intermediate-level blocks
-631// first, then root level, and 
returns the offset of the root level block
-632// index.
-633
-634long rootIndexOffset = 
dataBlockIndexWriter.writeIndexBlocks(outputStream);
-635
trailer.setLoadOnOpenOffset(rootIndexOffset);
-636
-637// Meta block index.
-638
metaBlockIndexWriter.writeSingleLevelIndex(blockWriter.startWriting(
-639BlockType.ROOT_INDEX), "meta");
-640
blockWriter.writeHeaderAndData(outputStream);
-641totalUncompressedBytes += 
blockWriter.getUncompressedSizeWithHeader();
-642
-643if 
(this.hFileContext.isIncludesMvcc()) {
-644  appendFileInfo(MAX_MEMSTORE_TS_KEY, 
Bytes.toBytes(maxMemstoreTS));
-645  appendFileInfo(KEY_VALUE_VERSION, 
Bytes.toBytes(KEY_VALUE_VER_WITH_MEMSTORE));
-646}
-647
-648// File info
-649writeFileInfo(trailer, 
blockWriter.startWriting(BlockType.FILE_INFO));
-650
blockWriter.writeHeaderAndData(outputStream);
-651totalUncompressedBytes += 
blockWriter.getUncompressedSizeWithHeader();
-652
-653// Load-on-open data supplied by 

[03/51] [partial] hbase-site git commit: Published site at 8bf966c8e936dec4d83bcbe85c5aab543f14a0df.

2018-12-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html
index a957d31..62f81b6 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html
@@ -142,5192 +142,5186 @@
 134import org.apache.hadoop.hbase.wal.WAL;
 135import 
org.apache.hadoop.hbase.wal.WALFactory;
 136import 
org.apache.hadoop.hbase.wal.WALSplitter;
-137import 
org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-138import 
org.apache.hadoop.hbase.zookeeper.ZKUtil;
-139import 
org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-140import 
org.apache.hadoop.hbase.zookeeper.ZNodePaths;
-141import 
org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
-142import 
org.apache.hadoop.ipc.RemoteException;
-143import 
org.apache.hadoop.security.UserGroupInformation;
-144import 
org.apache.hadoop.util.ReflectionUtils;
-145import org.apache.hadoop.util.Tool;
-146import 
org.apache.hadoop.util.ToolRunner;
-147import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-148import 
org.apache.hbase.thirdparty.com.google.common.collect.Sets;
-149import 
org.apache.yetus.audience.InterfaceAudience;
-150import 
org.apache.yetus.audience.InterfaceStability;
-151import 
org.apache.zookeeper.KeeperException;
-152import org.slf4j.Logger;
-153import org.slf4j.LoggerFactory;
-154
-155import 
org.apache.hbase.thirdparty.com.google.common.base.Joiner;
-156import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-157import 
org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;
-158import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
-159import 
org.apache.hbase.thirdparty.com.google.common.collect.Multimap;
-160import 
org.apache.hbase.thirdparty.com.google.common.collect.Ordering;
-161import 
org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;
-162
-163import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-164import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;
-165
-166/**
-167 * HBaseFsck (hbck) is a tool for 
checking and repairing region consistency and
-168 * table integrity problems in a 
corrupted HBase. This tool was written for hbase-1.x. It does not
-169 * work with hbase-2.x; it can read state 
but is not allowed to change state; i.e. effect 'repair'.
-170 * See hbck2 (HBASE-19121) for a hbck 
tool for hbase2.
-171 *
-172 * p
-173 * Region consistency checks verify that 
hbase:meta, region deployment on region
-174 * servers and the state of data in HDFS 
(.regioninfo files) all are in
-175 * accordance.
-176 * p
-177 * Table integrity checks verify that all 
possible row keys resolve to exactly
-178 * one region of a table.  This means 
there are no individual degenerate
-179 * or backwards regions; no holes between 
regions; and that there are no
-180 * overlapping regions.
-181 * p
-182 * The general repair strategy works in 
two phases:
-183 * ol
-184 * li Repair Table Integrity on 
HDFS. (merge or fabricate regions)
-185 * li Repair Region Consistency 
with hbase:meta and assignments
-186 * /ol
-187 * p
-188 * For table integrity repairs, the 
tables' region directories are scanned
-189 * for .regioninfo files.  Each table's 
integrity is then verified.  If there
-190 * are any orphan regions (regions with 
no .regioninfo files) or holes, new
-191 * regions are fabricated.  Backwards 
regions are sidelined as well as empty
-192 * degenerate (endkey==startkey) regions. 
 If there are any overlapping regions,
-193 * a new region is created and all data 
is merged into the new region.
-194 * p
-195 * Table integrity repairs deal solely 
with HDFS and could potentially be done
-196 * offline -- the hbase region servers or 
master do not need to be running.
-197 * This phase can eventually be used to 
completely reconstruct the hbase:meta table in
-198 * an offline fashion.
-199 * p
-200 * Region consistency requires three 
conditions -- 1) valid .regioninfo file
-201 * present in an HDFS region dir,  2) 
valid row with .regioninfo data in META,
-202 * and 3) a region is deployed only at 
the regionserver that was assigned to
-203 * with proper state in the master.
-204 * p
-205 * Region consistency repairs require 
hbase to be online so that hbck can
-206 * contact the HBase master and region 
servers.  The hbck#connect() method must
-207 * first be called successfully.  Much of 
the region consistency information
-208 * is transient and less risky to 
repair.
-209 * p
-210 * If hbck is run from the command line, 
there are a 

[03/51] [partial] hbase-site git commit: Published site at 1acbd36c903b048141866b143507bfce124a5c5f.

2018-11-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5299e667/devapidocs/src-html/org/apache/hadoop/hbase/client/TableDescriptorBuilder.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/TableDescriptorBuilder.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/TableDescriptorBuilder.html
index 16c2238..a626878 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/TableDescriptorBuilder.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/TableDescriptorBuilder.html
@@ -228,1032 +228,1032 @@
 220RESERVED_KEYWORDS.add(IS_META_KEY);
 221  }
 222
-223  @InterfaceAudience.Private
-224  public final static String 
NAMESPACE_FAMILY_INFO = "info";
-225  @InterfaceAudience.Private
-226  public final static byte[] 
NAMESPACE_FAMILY_INFO_BYTES = Bytes.toBytes(NAMESPACE_FAMILY_INFO);
+223  /**
+224   * @deprecated namespace table has been 
folded into the ns family in meta table, do not use this
+225   * any more.
+226   */
 227  @InterfaceAudience.Private
-228  public final static byte[] 
NAMESPACE_COL_DESC_BYTES = Bytes.toBytes("d");
-229
-230  /**
-231   * pre
-232   * Pattern that matches a coprocessor 
specification. Form is:
-233   * {@code coprocessor jar file 
location '|' class name ['|' priority ['|' 
arguments]]}
-234   * where arguments are {@code 
KEY '=' VALUE [,...]}
-235   * For example: {@code 
hdfs:///foo.jar|com.foo.FooRegionObserver|1001|arg1=1,arg2=2}
-236   * /pre
-237   */
-238  private static final Pattern 
CP_HTD_ATTR_VALUE_PATTERN =
-239
Pattern.compile("(^[^\\|]*)\\|([^\\|]+)\\|[\\s]*([\\d]*)[\\s]*(\\|.*)?$");
-240
-241  private static final String 
CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN = "[^=,]+";
-242  private static final String 
CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN = "[^,]+";
-243  private static final Pattern 
CP_HTD_ATTR_VALUE_PARAM_PATTERN = Pattern.compile(
-244"(" + 
CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN + ")=(" +
-245  
CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN + "),?");
-246  private static final Pattern 
CP_HTD_ATTR_KEY_PATTERN =
-247
Pattern.compile("^coprocessor\\$([0-9]+)$", Pattern.CASE_INSENSITIVE);
-248  /**
-249   * Table descriptor for namespace 
table
-250   */
-251  // TODO We used to set CacheDataInL1 
for NS table. When we have BucketCache in file mode, now the
-252  // NS data goes to File mode BC only. 
Test how that affect the system. If too much, we have to
-253  // rethink about adding back the 
setCacheDataInL1 for NS table.
-254  public static final TableDescriptor 
NAMESPACE_TABLEDESC
-255= 
TableDescriptorBuilder.newBuilder(TableName.NAMESPACE_TABLE_NAME)
-256  
.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(NAMESPACE_FAMILY_INFO_BYTES)
-257// Ten is arbitrary number.  Keep 
versions to help debugging.
-258.setMaxVersions(10)
-259.setInMemory(true)
-260.setBlocksize(8 * 1024)
-261
.setScope(HConstants.REPLICATION_SCOPE_LOCAL)
-262.build())
-263  .build();
-264  private final ModifyableTableDescriptor 
desc;
+228  @Deprecated
+229  public final static String 
NAMESPACE_FAMILY_INFO = "info";
+230
+231  /**
+232   * @deprecated namespace table has been 
folded into the ns family in meta table, do not use this
+233   * any more.
+234   */
+235  @InterfaceAudience.Private
+236  @Deprecated
+237  public final static byte[] 
NAMESPACE_FAMILY_INFO_BYTES = Bytes.toBytes(NAMESPACE_FAMILY_INFO);
+238
+239  /**
+240   * @deprecated namespace table has been 
folded into the ns family in meta table, do not use this
+241   * any more.
+242   */
+243  @InterfaceAudience.Private
+244  @Deprecated
+245  public final static byte[] 
NAMESPACE_COL_DESC_BYTES = Bytes.toBytes("d");
+246
+247  /**
+248   * pre
+249   * Pattern that matches a coprocessor 
specification. Form is:
+250   * {@code coprocessor jar file 
location '|' class name ['|' priority ['|' 
arguments]]}
+251   * where arguments are {@code 
KEY '=' VALUE [,...]}
+252   * For example: {@code 
hdfs:///foo.jar|com.foo.FooRegionObserver|1001|arg1=1,arg2=2}
+253   * /pre
+254   */
+255  private static final Pattern 
CP_HTD_ATTR_VALUE_PATTERN =
+256
Pattern.compile("(^[^\\|]*)\\|([^\\|]+)\\|[\\s]*([\\d]*)[\\s]*(\\|.*)?$");
+257
+258  private static final String 
CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN = "[^=,]+";
+259  private static final String 
CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN = "[^,]+";
+260  private static final Pattern 
CP_HTD_ATTR_VALUE_PARAM_PATTERN = Pattern.compile(
+261"(" + 
CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN + ")=(" +
+262  
CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN + "),?");
+263  private static final Pattern 
CP_HTD_ATTR_KEY_PATTERN =
+264
Pattern.compile("^coprocessor\\$([0-9]+)$", Pattern.CASE_INSENSITIVE);
 265
 266  /**
-267   * @param desc The table descriptor to 
serialize
-268   * @return This instance serialized 
with pb with pb magic prefix
-269  

[03/51] [partial] hbase-site git commit: Published site at 130057f13774f6b213cdb06952c805a29d59396e.

2018-11-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/68eae623/testdevapidocs/src-html/org/apache/hadoop/hbase/security/access/TestAccessController.BulkLoadHelper.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/security/access/TestAccessController.BulkLoadHelper.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/security/access/TestAccessController.BulkLoadHelper.html
index 5062e9b..23b4be7 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/security/access/TestAccessController.BulkLoadHelper.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/security/access/TestAccessController.BulkLoadHelper.html
@@ -282,7 +282,7 @@
 274  public static void tearDownAfterClass() 
throws Exception {
 275cleanUp();
 276TEST_UTIL.shutdownMiniCluster();
-277int total = 
TableAuthManager.getTotalRefCount();
+277int total = 
AuthManager.getTotalRefCount();
 278assertTrue("Unexpected reference 
count: " + total, total == 0);
 279  }
 280
@@ -1642,12 +1642,12 @@
 1634  }
 1635
 1636  UserPermission ownerperm =
-1637  new 
UserPermission(Bytes.toBytes(USER_OWNER.getName()), tableName, null, 
Action.values());
+1637  new 
UserPermission(USER_OWNER.getName(), tableName, Action.values());
 1638  assertTrue("Owner should have all 
permissions on table",
 1639
hasFoundUserPermission(ownerperm, perms));
 1640
 1641  User user = 
User.createUserForTesting(TEST_UTIL.getConfiguration(), "user", new 
String[0]);
-1642  byte[] userName = 
Bytes.toBytes(user.getShortName());
+1642  String userName = 
user.getShortName();
 1643
 1644  UserPermission up =
 1645  new UserPermission(userName, 
tableName, family1, qualifier, Permission.Action.READ);
@@ -1733,7 +1733,7 @@
 1725  }
 1726
 1727  UserPermission newOwnerperm =
-1728  new 
UserPermission(Bytes.toBytes(newOwner.getName()), tableName, null, 
Action.values());
+1728  new 
UserPermission(newOwner.getName(), tableName, Action.values());
 1729  assertTrue("New owner should have 
all permissions on table",
 1730
hasFoundUserPermission(newOwnerperm, perms));
 1731} finally {
@@ -1757,1888 +1757,1898 @@
 1749
 1750CollectionString superUsers 
= Superusers.getSuperUsers();
 1751ListUserPermission 
adminPerms = new ArrayList(superUsers.size() + 1);
-1752adminPerms.add(new 
UserPermission(Bytes.toBytes(USER_ADMIN.getShortName()),
-1753  AccessControlLists.ACL_TABLE_NAME, 
null, null, Bytes.toBytes("ACRW")));
-1754
-1755for(String user: superUsers) {
-1756  adminPerms.add(new 
UserPermission(Bytes.toBytes(user), AccessControlLists.ACL_TABLE_NAME,
-1757  null, null, 
Action.values()));
-1758}
-1759assertTrue("Only super users, global 
users and user admin has permission on table hbase:acl " +
-1760"per setup", perms.size() == 5 + 
superUsers.size() 
-1761
hasFoundUserPermission(adminPerms, perms));
-1762  }
-1763
-1764  /** global operations */
-1765  private void 
verifyGlobal(AccessTestAction action) throws Exception {
-1766verifyAllowed(action, SUPERUSER);
-1767
-1768verifyDenied(action, USER_CREATE, 
USER_RW, USER_NONE, USER_RO);
-1769  }
-1770
-1771  @Test
-1772  public void testCheckPermissions() 
throws Exception {
-1773// 
--
-1774// test global permissions
-1775AccessTestAction globalAdmin = new 
AccessTestAction() {
-1776  @Override
-1777  public Void run() throws Exception 
{
-1778checkGlobalPerms(TEST_UTIL, 
Permission.Action.ADMIN);
-1779return null;
-1780  }
-1781};
-1782// verify that only superuser can 
admin
-1783verifyGlobal(globalAdmin);
-1784
-1785// 
--
-1786// test multiple permissions
-1787AccessTestAction globalReadWrite = 
new AccessTestAction() {
-1788  @Override
-1789  public Void run() throws Exception 
{
-1790checkGlobalPerms(TEST_UTIL, 
Permission.Action.READ, Permission.Action.WRITE);
-1791return null;
-1792  }
-1793};
+1752adminPerms.add(new 
UserPermission(USER_ADMIN.getShortName(), Bytes.toBytes("ACRW")));
+1753for(String user: superUsers) {
+1754  // Global permission
+1755  adminPerms.add(new 
UserPermission(user, Action.values()));
+1756}
+1757assertTrue("Only super users, global 
users and user admin has permission on table hbase:acl " +
+1758"per setup", perms.size() == 5 + 
superUsers.size() 
+1759
hasFoundUserPermission(adminPerms, perms));
+1760  }
+1761
+1762  /** global operations */
+1763  private void 
verifyGlobal(AccessTestAction action) throws Exception {
+1764verifyAllowed(action, SUPERUSER);
+1765
+1766verifyDenied(action, USER_CREATE, 
USER_RW, USER_NONE, USER_RO);
+1767  }
+1768
+1769  @Test
+1770  public void testCheckPermissions() 
throws 

[03/51] [partial] hbase-site git commit: Published site at d5e4faacc354c1bc4d93efa71ca97ee3a056123e.

2018-10-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b5e107c3/testdevapidocs/src-html/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.TestRegionProcedure.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.TestRegionProcedure.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.TestRegionProcedure.html
index c9e0e55..f1b9105 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.TestRegionProcedure.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.TestRegionProcedure.html
@@ -37,157 +37,157 @@
 029import java.util.Arrays;
 030import java.util.List;
 031import 
org.apache.hadoop.hbase.HBaseClassTestRule;
-032import 
org.apache.hadoop.hbase.HRegionInfo;
-033import 
org.apache.hadoop.hbase.ServerName;
-034import 
org.apache.hadoop.hbase.TableName;
-035import 
org.apache.hadoop.hbase.client.RegionInfo;
-036import 
org.apache.hadoop.hbase.client.RegionInfoBuilder;
-037import 
org.apache.hadoop.hbase.master.locking.LockProcedure;
-038import 
org.apache.hadoop.hbase.master.procedure.TableProcedureInterface.TableOperationType;
-039import 
org.apache.hadoop.hbase.procedure2.LockType;
-040import 
org.apache.hadoop.hbase.procedure2.LockedResource;
-041import 
org.apache.hadoop.hbase.procedure2.LockedResourceType;
-042import 
org.apache.hadoop.hbase.procedure2.Procedure;
-043import 
org.apache.hadoop.hbase.procedure2.ProcedureEvent;
-044import 
org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility.TestProcedure;
-045import 
org.apache.hadoop.hbase.testclassification.MasterTests;
-046import 
org.apache.hadoop.hbase.testclassification.SmallTests;
-047import 
org.apache.hadoop.hbase.util.Bytes;
-048import org.junit.After;
-049import org.junit.Before;
-050import org.junit.ClassRule;
-051import org.junit.Rule;
-052import org.junit.Test;
-053import 
org.junit.experimental.categories.Category;
-054import org.junit.rules.TestName;
-055import org.slf4j.Logger;
-056import org.slf4j.LoggerFactory;
-057
-058@Category({MasterTests.class, 
SmallTests.class})
-059public class TestMasterProcedureScheduler 
{
-060
-061  @ClassRule
-062  public static final HBaseClassTestRule 
CLASS_RULE =
-063  
HBaseClassTestRule.forClass(TestMasterProcedureScheduler.class);
-064
-065  private static final Logger LOG = 
LoggerFactory.getLogger(TestMasterProcedureScheduler.class);
-066
-067  private MasterProcedureScheduler 
queue;
-068
-069  @Rule
-070  public TestName name = new 
TestName();
-071
-072  @Before
-073  public void setUp() throws IOException 
{
-074queue = new 
MasterProcedureScheduler();
-075queue.start();
-076  }
-077
-078  @After
-079  public void tearDown() throws 
IOException {
-080assertEquals("proc-queue expected to 
be empty", 0, queue.size());
-081queue.stop();
-082queue.clear();
-083  }
-084
-085  /**
-086   * Verify simple 
create/insert/fetch/delete of the table queue.
-087   */
-088  @Test
-089  public void testSimpleTableOpsQueues() 
throws Exception {
-090final int NUM_TABLES = 10;
-091final int NUM_ITEMS = 10;
-092
-093int count = 0;
-094for (int i = 1; i = NUM_TABLES; 
++i) {
-095  TableName tableName = 
TableName.valueOf(String.format("test-%04d", i));
-096  // insert items
-097  for (int j = 1; j = NUM_ITEMS; 
++j) {
-098queue.addBack(new 
TestTableProcedure(i * 1000 + j, tableName,
-099  
TableProcedureInterface.TableOperationType.EDIT));
-100assertEquals(++count, 
queue.size());
-101  }
-102}
-103assertEquals(NUM_TABLES * NUM_ITEMS, 
queue.size());
-104
-105for (int j = 1; j = NUM_ITEMS; 
++j) {
-106  for (int i = 1; i = NUM_TABLES; 
++i) {
-107Procedure proc = queue.poll();
-108assertTrue(proc != null);
-109TableName tableName = 
((TestTableProcedure)proc).getTableName();
-110
queue.waitTableExclusiveLock(proc, tableName);
-111
queue.wakeTableExclusiveLock(proc, tableName);
-112queue.completionCleanup(proc);
-113assertEquals(--count, 
queue.size());
-114assertEquals(i * 1000 + j, 
proc.getProcId());
-115  }
-116}
-117assertEquals(0, queue.size());
-118
-119for (int i = 1; i = NUM_TABLES; 
++i) {
-120  final TableName tableName = 
TableName.valueOf(String.format("test-%04d", i));
-121  final TestTableProcedure dummyProc 
= new TestTableProcedure(100, tableName,
-122
TableProcedureInterface.TableOperationType.DELETE);
-123  // complete the table deletion
-124  
assertTrue(queue.markTableAsDeleted(tableName, dummyProc));
-125}
-126  }
-127
-128  /**
-129   * Check that the table queue is not 
deletable until every procedure
-130   * in-progress is completed (this is a 
special case for write-locks).

[03/51] [partial] hbase-site git commit: Published site at 3fe8649b2c9ba1271c25e8f476548907e4c7a90d.

2018-10-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8f09a71d/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureTree.Iter.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureTree.Iter.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureTree.Iter.html
index 8ef0ff5..9545561 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureTree.Iter.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureTree.Iter.html
@@ -153,158 +153,161 @@
 145
 146  private void collectStackId(Entry 
entry, MapInteger, ListEntry stackId2Proc,
 147  MutableInt maxStackId) {
-148for (int i = 0, n = 
entry.proc.getStackIdCount(); i  n; i++) {
-149  int stackId = 
entry.proc.getStackId(i);
-150  if (stackId  
maxStackId.intValue()) {
-151maxStackId.setValue(stackId);
-152  }
-153  
stackId2Proc.computeIfAbsent(stackId, k - new 
ArrayList()).add(entry);
-154}
-155entry.subProcs.forEach(e - 
collectStackId(e, stackId2Proc, maxStackId));
-156  }
-157
-158  private void 
addAllToCorruptedAndRemoveFromProcMap(Entry entry,
-159  MapLong, Entry 
remainingProcMap) {
-160corruptedProcs.add(new 
ProtoAndProc(entry.proc));
-161
remainingProcMap.remove(entry.proc.getProcId());
-162for (Entry e : entry.subProcs) {
-163  
addAllToCorruptedAndRemoveFromProcMap(e, remainingProcMap);
-164}
-165  }
-166
-167  private void 
addAllToValidAndRemoveFromProcMap(Entry entry, MapLong, Entry 
remainingProcMap) {
-168validProcs.add(new 
ProtoAndProc(entry.proc));
-169
remainingProcMap.remove(entry.proc.getProcId());
-170for (Entry e : entry.subProcs) {
-171  
addAllToValidAndRemoveFromProcMap(e, remainingProcMap);
-172}
-173  }
-174
-175  // In this method first we will check 
whether the given root procedure and all its sub procedures
-176  // are valid, through the procedure 
stack. And we will also remove all these procedures from the
-177  // remainingProcMap, so at last, if 
there are still procedures in the map, we know that there are
-178  // orphan procedures.
-179  private void checkReady(Entry 
rootEntry, MapLong, Entry remainingProcMap) {
-180if (isFinished(rootEntry.proc)) {
-181  if (!rootEntry.subProcs.isEmpty()) 
{
-182LOG.error("unexpected active 
children for root-procedure: {}", rootEntry);
-183rootEntry.subProcs.forEach(e 
- LOG.error("unexpected active children: {}", e));
-184
addAllToCorruptedAndRemoveFromProcMap(rootEntry, remainingProcMap);
-185  } else {
-186
addAllToValidAndRemoveFromProcMap(rootEntry, remainingProcMap);
-187  }
-188  return;
-189}
-190MapInteger, ListEntry 
stackId2Proc = new HashMap();
-191MutableInt maxStackId = new 
MutableInt(Integer.MIN_VALUE);
-192collectStackId(rootEntry, 
stackId2Proc, maxStackId);
-193// the stack ids should start from 0 
and increase by one every time
-194boolean valid = true;
-195for (int i = 0; i = 
maxStackId.intValue(); i++) {
-196  ListEntry entries = 
stackId2Proc.get(i);
-197  if (entries == null) {
-198LOG.error("Missing stack id {}, 
max stack id is {}, root procedure is {}", i, maxStackId,
-199  rootEntry);
-200valid = false;
-201  } else if (entries.size()  1) 
{
-202LOG.error("Multiple procedures {} 
have the same stack id {}, max stack id is {}," +
-203  " root procedure is {}", 
entries, i, maxStackId, rootEntry);
-204valid = false;
-205  }
-206}
-207if (valid) {
-208  
addAllToValidAndRemoveFromProcMap(rootEntry, remainingProcMap);
-209} else {
-210  
addAllToCorruptedAndRemoveFromProcMap(rootEntry, remainingProcMap);
-211}
-212  }
-213
-214  private void checkOrphan(MapLong, 
Entry procMap) {
-215procMap.values().forEach(entry - 
{
-216  LOG.error("Orphan procedure: {}", 
entry);
-217  corruptedProcs.add(new 
ProtoAndProc(entry.proc));
-218});
-219  }
-220
-221  private static final class Iter 
implements ProcedureIterator {
-222
-223private final 
ListProtoAndProc procs;
-224
-225private IteratorProtoAndProc 
iter;
-226
-227private ProtoAndProc current;
-228
-229public Iter(ListProtoAndProc 
procs) {
-230  this.procs = procs;
-231  reset();
-232}
-233
-234@Override
-235public void reset() {
-236  iter = procs.iterator();
-237  if (iter.hasNext()) {
-238current = iter.next();
-239  } else {
-240current = null;
-241  }
-242}
-243
-244@Override
-245public boolean hasNext() {
-246  return current != null;
-247}
-248
-249private void checkNext() {
-250  if (!hasNext()) {
-251throw new 
NoSuchElementException();
-252  }
-253}
-254
-255@Override
-256public 

[03/51] [partial] hbase-site git commit: Published site at 7adf590106826b9e4432cfeee06acdc0ccff8c6e.

2018-10-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/425db230/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestHRegion.Incrementer.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestHRegion.Incrementer.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestHRegion.Incrementer.html
index ed3db7a..156dabb 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestHRegion.Incrementer.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestHRegion.Incrementer.html
@@ -5542,785 +5542,825 @@
 5534  }
 5535
 5536  @Test
-5537  public void testWriteRequestsCounter() 
throws IOException {
-5538byte[] fam = 
Bytes.toBytes("info");
-5539byte[][] families = { fam };
-5540this.region = initHRegion(tableName, 
method, CONF, families);
+5537  public void 
testReverseScanWhenPutCellsAfterOpenReverseScan() throws Exception {
+5538byte[] cf1 = Bytes.toBytes("CF1");
+5539byte[][] families = { cf1 };
+5540byte[] col = Bytes.toBytes("C");
 5541
-5542Assert.assertEquals(0L, 
region.getWriteRequestsCount());
-5543
-5544Put put = new Put(row);
-5545put.addColumn(fam, fam, fam);
-5546
-5547Assert.assertEquals(0L, 
region.getWriteRequestsCount());
-5548region.put(put);
-5549Assert.assertEquals(1L, 
region.getWriteRequestsCount());
-5550region.put(put);
-5551Assert.assertEquals(2L, 
region.getWriteRequestsCount());
-5552region.put(put);
-5553Assert.assertEquals(3L, 
region.getWriteRequestsCount());
-5554
-region.delete(new Delete(row));
-5556Assert.assertEquals(4L, 
region.getWriteRequestsCount());
-5557  }
-5558
-5559  @Test
-5560  public void 
testOpenRegionWrittenToWAL() throws Exception {
-5561final ServerName serverName = 
ServerName.valueOf(name.getMethodName(), 100, 42);
-5562final RegionServerServices rss = 
spy(TEST_UTIL.createMockRegionServerService(serverName));
-5563
-5564HTableDescriptor htd = new 
HTableDescriptor(TableName.valueOf(name.getMethodName()));
-5565htd.addFamily(new 
HColumnDescriptor(fam1));
-5566htd.addFamily(new 
HColumnDescriptor(fam2));
-5567
-5568HRegionInfo hri = new 
HRegionInfo(htd.getTableName(),
-5569  HConstants.EMPTY_BYTE_ARRAY, 
HConstants.EMPTY_BYTE_ARRAY);
-5570
-5571// open the region w/o rss and wal 
and flush some files
-5572region =
-5573 
HBaseTestingUtility.createRegionAndWAL(hri, TEST_UTIL.getDataTestDir(), 
TEST_UTIL
-5574 .getConfiguration(), 
htd);
-5575assertNotNull(region);
-5576
-5577// create a file in fam1 for the 
region before opening in OpenRegionHandler
-5578region.put(new 
Put(Bytes.toBytes("a")).addColumn(fam1, fam1, fam1));
-5579region.flush(true);
-5580
HBaseTestingUtility.closeRegionAndWAL(region);
+5542HBaseConfiguration conf = new 
HBaseConfiguration();
+5543this.region = initHRegion(tableName, 
method, conf, families);
+5544
+5545Put put = new 
Put(Bytes.toBytes("16"));
+5546put.addColumn(cf1, col, 
Bytes.toBytes("val"));
+5547region.put(put);
+5548Put put2 = new 
Put(Bytes.toBytes("15"));
+5549put2.addColumn(cf1, col, 
Bytes.toBytes("val"));
+5550region.put(put2);
+5551
+5552// Create a reverse scan
+5553Scan scan = new 
Scan(Bytes.toBytes("16"));
+5554scan.setReversed(true);
+RegionScannerImpl scanner = 
region.getScanner(scan);
+5556
+5557// Put a lot of cells that have 
sequenceIDs grater than the readPt of the reverse scan
+5558for (int i = 10; i  20; 
i++) {
+5559  Put p = new Put(Bytes.toBytes("" + 
i));
+5560  p.addColumn(cf1, col, 
Bytes.toBytes("" + i));
+5561  region.put(p);
+5562}
+5563ListCell currRow = new 
ArrayList();
+5564boolean hasNext;
+5565do {
+5566  hasNext = scanner.next(currRow);
+5567} while (hasNext);
+5568
+5569assertEquals(2, currRow.size());
+5570assertEquals("16", 
Bytes.toString(currRow.get(0).getRowArray(),
+5571  currRow.get(0).getRowOffset(), 
currRow.get(0).getRowLength()));
+5572assertEquals("15", 
Bytes.toString(currRow.get(1).getRowArray(),
+5573  currRow.get(1).getRowOffset(), 
currRow.get(1).getRowLength()));
+5574  }
+5575
+5576  @Test
+5577  public void testWriteRequestsCounter() 
throws IOException {
+5578byte[] fam = 
Bytes.toBytes("info");
+5579byte[][] families = { fam };
+5580this.region = initHRegion(tableName, 
method, CONF, families);
 5581
-5582ArgumentCaptorWALEdit 
editCaptor = ArgumentCaptor.forClass(WALEdit.class);
+5582Assert.assertEquals(0L, 
region.getWriteRequestsCount());
 5583
-5584// capture append() calls
-5585WAL wal = mockWAL();
-5586when(rss.getWAL((HRegionInfo) 
any())).thenReturn(wal);
-5587
-5588region = HRegion.openHRegion(hri, 
htd, rss.getWAL(hri),
-5589  TEST_UTIL.getConfiguration(), 

[03/51] [partial] hbase-site git commit: Published site at 5fbb227deb365fe812d433fe39b85ac4b0ddee20.

2018-10-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9ebe686/devapidocs/org/apache/hadoop/hbase/util/class-use/Bytes.ConverterHolder.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/util/class-use/Bytes.ConverterHolder.html 
b/devapidocs/org/apache/hadoop/hbase/util/class-use/Bytes.ConverterHolder.html
new file mode 100644
index 000..90b274f
--- /dev/null
+++ 
b/devapidocs/org/apache/hadoop/hbase/util/class-use/Bytes.ConverterHolder.html
@@ -0,0 +1,125 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+Uses of Class org.apache.hadoop.hbase.util.Bytes.ConverterHolder 
(Apache HBase 3.0.0-SNAPSHOT API)
+
+
+
+
+
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev
+Next
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+
+
+
+Uses of 
Classorg.apache.hadoop.hbase.util.Bytes.ConverterHolder
+
+No usage of 
org.apache.hadoop.hbase.util.Bytes.ConverterHolder
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev
+Next
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+
+
+Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+
+

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9ebe686/devapidocs/org/apache/hadoop/hbase/util/package-frame.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/util/package-frame.html 
b/devapidocs/org/apache/hadoop/hbase/util/package-frame.html
index b914b7c..c4fa8ff 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/package-frame.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/package-frame.html
@@ -65,9 +65,21 @@
 ByteBufferArray
 ByteBufferArray.BufferCreatorCallable
 ByteBufferUtils
+ByteBufferUtils.Comparer
+ByteBufferUtils.ComparerHolder
+ByteBufferUtils.ComparerHolder.PureJavaComparer
+ByteBufferUtils.ComparerHolder.UnsafeComparer
+ByteBufferUtils.Converter
+ByteBufferUtils.ConverterHolder
+ByteBufferUtils.ConverterHolder.PureJavaConverter
+ByteBufferUtils.ConverterHolder.UnsafeConverter
 ByteRangeUtils
 Bytes
 Bytes.ByteArrayComparator
+Bytes.Converter
+Bytes.ConverterHolder
+Bytes.ConverterHolder.PureJavaConverter
+Bytes.ConverterHolder.UnsafeConverter
 Bytes.LexicographicalComparerHolder
 Bytes.RowEndKeyComparator
 ByteStringer

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9ebe686/devapidocs/org/apache/hadoop/hbase/util/package-summary.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/util/package-summary.html 
b/devapidocs/org/apache/hadoop/hbase/util/package-summary.html
index 82a1d0a..9c8a985 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/package-summary.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/package-summary.html
@@ -377,6 +377,38 @@
 
 
 
+ByteBufferUtils.Comparer
+
+
+
+ByteBufferUtils.ComparerHolder
+
+
+
+ByteBufferUtils.ComparerHolder.PureJavaComparer
+
+
+
+ByteBufferUtils.ComparerHolder.UnsafeComparer
+
+
+
+ByteBufferUtils.Converter
+
+
+
+ByteBufferUtils.ConverterHolder
+
+
+
+ByteBufferUtils.ConverterHolder.PureJavaConverter
+
+
+
+ByteBufferUtils.ConverterHolder.UnsafeConverter
+
+
+
 ByteRangeUtils
 
 Utility methods for working with ByteRange.
@@ -397,6 +429,22 @@
 
 
 
+Bytes.Converter
+
+
+
+Bytes.ConverterHolder
+
+
+
+Bytes.ConverterHolder.PureJavaConverter
+
+
+
+Bytes.ConverterHolder.UnsafeConverter
+
+
+
 Bytes.LexicographicalComparerHolder
 
 Provides a lexicographical comparer implementation; either 
a Java

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9ebe686/devapidocs/org/apache/hadoop/hbase/util/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/util/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/util/package-tree.html
index 4ee911d..89dc634 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/package-tree.html
@@ -169,6 +169,20 @@
 
 
 org.apache.hadoop.hbase.util.ByteBufferUtils

[03/51] [partial] hbase-site git commit: Published site at 821e4d7de2d576189f4288d1c2acf9e9a9471f5c.

2018-10-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/323b17d9/testdevapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.LoadCounter.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.LoadCounter.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.LoadCounter.html
index edb675e..eb90a1f 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.LoadCounter.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.LoadCounter.html
@@ -408,184 +408,224 @@
 400}
 401  }
 402
-403  public static class TestProcedure 
extends NoopProcedureVoid {
-404private byte[] data = null;
-405
-406public TestProcedure() {}
+403  public static class 
NoopStateMachineProcedureTEnv, TState
+404  extends 
StateMachineProcedureTEnv, TState {
+405private TState initialState;
+406private TEnv env;
 407
-408public TestProcedure(long procId) {
-409  this(procId, 0);
-410}
-411
-412public TestProcedure(long procId, 
long parentId) {
-413  this(procId, parentId, null);
+408public NoopStateMachineProcedure() 
{
+409}
+410
+411public NoopStateMachineProcedure(TEnv 
env, TState initialState) {
+412  this.env = env;
+413  this.initialState = initialState;
 414}
 415
-416public TestProcedure(long procId, 
long parentId, byte[] data) {
-417  this(procId, parentId, parentId, 
data);
-418}
-419
-420public TestProcedure(long procId, 
long parentId, long rootId, byte[] data) {
-421  setData(data);
-422  setProcId(procId);
-423  if (parentId  0) {
-424setParentProcId(parentId);
-425  }
-426  if (rootId  0 || parentId  
0) {
-427setRootProcId(rootId);
-428  }
-429}
-430
-431public void addStackId(final int 
index) {
-432  addStackIndex(index);
-433}
-434
-435public void setSuccessState() {
-436  setState(ProcedureState.SUCCESS);
-437}
-438
-439public void setData(final byte[] 
data) {
-440  this.data = data;
-441}
+416@Override
+417protected Flow executeFromState(TEnv 
env, TState tState)
+418throws 
ProcedureSuspendedException, ProcedureYieldException, InterruptedException {
+419  return null;
+420}
+421
+422@Override
+423protected void rollbackState(TEnv 
env, TState tState) throws IOException, InterruptedException {
+424
+425}
+426
+427@Override
+428protected TState getState(int 
stateId) {
+429  return null;
+430}
+431
+432@Override
+433protected int getStateId(TState 
tState) {
+434  return 0;
+435}
+436
+437@Override
+438protected TState getInitialState() 
{
+439  return initialState;
+440}
+441  }
 442
-443@Override
-444protected void 
serializeStateData(ProcedureStateSerializer serializer)
-445throws IOException {
-446  ByteString dataString = 
ByteString.copyFrom((data == null) ? new byte[0] : data);
-447  BytesValue.Builder builder = 
BytesValue.newBuilder().setValue(dataString);
-448  
serializer.serialize(builder.build());
-449}
-450
-451@Override
-452protected void 
deserializeStateData(ProcedureStateSerializer serializer)
-453throws IOException {
-454  BytesValue bytesValue = 
serializer.deserialize(BytesValue.class);
-455  ByteString dataString = 
bytesValue.getValue();
-456
-457  if (dataString.isEmpty()) {
-458data = null;
-459  } else {
-460data = 
dataString.toByteArray();
-461  }
-462}
-463
-464// Mark acquire/release lock 
functions public for test uses.
-465@Override
-466public LockState acquireLock(Void 
env) {
-467  return LockState.LOCK_ACQUIRED;
-468}
-469
-470@Override
-471public void releaseLock(Void env) {
-472  // no-op
+443  public static class TestProcedure 
extends NoopProcedureVoid {
+444private byte[] data = null;
+445
+446public TestProcedure() {}
+447
+448public TestProcedure(long procId) {
+449  this(procId, 0);
+450}
+451
+452public TestProcedure(long procId, 
long parentId) {
+453  this(procId, parentId, null);
+454}
+455
+456public TestProcedure(long procId, 
long parentId, byte[] data) {
+457  this(procId, parentId, parentId, 
data);
+458}
+459
+460public TestProcedure(long procId, 
long parentId, long rootId, byte[] data) {
+461  setData(data);
+462  setProcId(procId);
+463  if (parentId  0) {
+464setParentProcId(parentId);
+465  }
+466  if (rootId  0 || parentId  
0) {
+467setRootProcId(rootId);
+468  }
+469}
+470
+471public void addStackId(final int 
index) {
+472  addStackIndex(index);
 473}
-474  }
-475
-476  public static class LoadCounter 
implements 

[03/51] [partial] hbase-site git commit: Published site at fa5fa6ecdd071b72b58971058ff3ab9d28c3e709.

2018-10-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d1341859/testdevapidocs/org/apache/hadoop/hbase/chaos/actions/Action.html
--
diff --git a/testdevapidocs/org/apache/hadoop/hbase/chaos/actions/Action.html 
b/testdevapidocs/org/apache/hadoop/hbase/chaos/actions/Action.html
index 3cec086..4796b38 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/chaos/actions/Action.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/chaos/actions/Action.html
@@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public class Action
+public class Action
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 A (possibly mischievous) action that the ChaosMonkey can 
perform.
 
@@ -438,7 +438,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 KILL_MASTER_TIMEOUT_KEY
-public static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String KILL_MASTER_TIMEOUT_KEY
+public static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String KILL_MASTER_TIMEOUT_KEY
 
 See Also:
 Constant
 Field Values
@@ -451,7 +451,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 START_MASTER_TIMEOUT_KEY
-public static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String START_MASTER_TIMEOUT_KEY
+public static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String START_MASTER_TIMEOUT_KEY
 
 See Also:
 Constant
 Field Values
@@ -464,7 +464,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 KILL_RS_TIMEOUT_KEY
-public static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String KILL_RS_TIMEOUT_KEY
+public static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String KILL_RS_TIMEOUT_KEY
 
 See Also:
 Constant
 Field Values
@@ -477,7 +477,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 START_RS_TIMEOUT_KEY
-public static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String START_RS_TIMEOUT_KEY
+public static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String START_RS_TIMEOUT_KEY
 
 See Also:
 Constant
 Field Values
@@ -490,7 +490,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 KILL_ZK_NODE_TIMEOUT_KEY
-public static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String KILL_ZK_NODE_TIMEOUT_KEY
+public static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String KILL_ZK_NODE_TIMEOUT_KEY
 
 See Also:
 Constant
 Field Values
@@ -503,7 +503,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 START_ZK_NODE_TIMEOUT_KEY
-public static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String START_ZK_NODE_TIMEOUT_KEY
+public static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String START_ZK_NODE_TIMEOUT_KEY
 
 See Also:
 Constant
 Field Values
@@ -516,7 +516,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 KILL_DATANODE_TIMEOUT_KEY
-public static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String KILL_DATANODE_TIMEOUT_KEY
+public static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String KILL_DATANODE_TIMEOUT_KEY
 
 See Also:
 Constant
 Field Values
@@ -529,7 +529,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 START_DATANODE_TIMEOUT_KEY
-public static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String START_DATANODE_TIMEOUT_KEY
+public static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String START_DATANODE_TIMEOUT_KEY
 
 See Also:
 Constant
 Field Values
@@ -542,7 +542,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 

[03/51] [partial] hbase-site git commit: Published site at 6bc7089f9e0793efc9bdd46a84f5ccd9bc4579ad.

2018-09-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/419d0338/testdevapidocs/src-html/org/apache/hadoop/hbase/ResourceCheckerJUnitListener.AvailableMemoryMBResourceAnalyzer.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/ResourceCheckerJUnitListener.AvailableMemoryMBResourceAnalyzer.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/ResourceCheckerJUnitListener.AvailableMemoryMBResourceAnalyzer.html
index 47aac2c..cb4c472 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/ResourceCheckerJUnitListener.AvailableMemoryMBResourceAnalyzer.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/ResourceCheckerJUnitListener.AvailableMemoryMBResourceAnalyzer.html
@@ -26,221 +26,179 @@
 018
 019package org.apache.hadoop.hbase;
 020
-021import 
java.lang.management.ManagementFactory;
-022import 
java.lang.management.MemoryUsage;
-023import java.util.ArrayList;
-024import java.util.HashSet;
-025import java.util.List;
-026import java.util.Map;
-027import java.util.Set;
-028import 
java.util.concurrent.ConcurrentHashMap;
-029import java.util.concurrent.TimeUnit;
-030import 
org.apache.hadoop.hbase.ResourceChecker.Phase;
-031import 
org.apache.hadoop.hbase.util.JVM;
-032import 
org.junit.runner.notification.RunListener;
-033
-034/**
-035 * Listen to the test progress and check 
the usage of:
-036 * ul
-037 * lithreads/li
-038 * liopen file 
descriptor/li
-039 * limax open file 
descriptor/li
-040 * /ul
-041 * p
-042 * When surefire 
forkMode=once/always/perthread, this code is executed on the forked process.
-043 */
-044public class ResourceCheckerJUnitListener 
extends RunListener {
-045  private MapString, 
ResourceChecker rcs = new ConcurrentHashMap();
-046
-047  static class ThreadResourceAnalyzer 
extends ResourceChecker.ResourceAnalyzer {
-048private static SetString 
initialThreadNames = new HashSet();
-049private static ListString 
stringsToLog = null;
-050
-051@Override
-052public int getVal(Phase phase) {
-053  MapThread, 
StackTraceElement[] stackTraces = Thread.getAllStackTraces();
-054  if (phase == Phase.INITIAL) {
-055stringsToLog = null;
-056for (Thread t : 
stackTraces.keySet()) {
-057  
initialThreadNames.add(t.getName());
-058}
-059  } else if (phase == Phase.END) {
-060if (stackTraces.size()  
initialThreadNames.size()) {
-061  stringsToLog = new 
ArrayList();
-062  for (Thread t : 
stackTraces.keySet()) {
-063if 
(!initialThreadNames.contains(t.getName())) {
-064  
stringsToLog.add("\nPotentially hanging thread: " + t.getName() + "\n");
-065  StackTraceElement[] 
stackElements = stackTraces.get(t);
-066  for (StackTraceElement ele 
: stackElements) {
-067stringsToLog.add("\t" + 
ele + "\n");
-068  }
-069}
-070  }
-071}
-072  }
-073  return stackTraces.size();
-074}
-075
-076@Override
-077public int getMax() {
-078  return 500;
-079}
-080
-081@Override
-082public ListString 
getStringsToLog() {
-083  return stringsToLog;
-084}
-085  }
-086
-087
-088  static class 
OpenFileDescriptorResourceAnalyzer extends ResourceChecker.ResourceAnalyzer {
-089@Override
-090public int getVal(Phase phase) {
-091  if (!JVM.isUnix()) {
-092return 0;
-093  }
-094  JVM jvm = new JVM();
-095  return (int) 
jvm.getOpenFileDescriptorCount();
-096}
-097
-098@Override
-099public int getMax() {
-100  return 1024;
-101}
-102  }
-103
-104  static class 
MaxFileDescriptorResourceAnalyzer extends ResourceChecker.ResourceAnalyzer {
-105@Override
-106public int getVal(Phase phase) {
-107  if (!JVM.isUnix()) {
-108return 0;
-109  }
-110  JVM jvm = new JVM();
-111  return (int) 
jvm.getMaxFileDescriptorCount();
-112}
-113  }
-114
-115  static class 
SystemLoadAverageResourceAnalyzer extends ResourceChecker.ResourceAnalyzer {
-116@Override
-117public int getVal(Phase phase) {
-118  if (!JVM.isUnix()) {
-119return 0;
-120  }
-121  return (int) (new 
JVM().getSystemLoadAverage() * 100);
-122}
-123  }
-124
-125  static class 
ProcessCountResourceAnalyzer extends ResourceChecker.ResourceAnalyzer {
-126@Override
-127public int getVal(Phase phase) {
-128  if (!JVM.isUnix()) {
-129return 0;
-130  }
-131  return new 
JVM().getNumberOfRunningProcess();
-132}
-133  }
-134
-135  static class 
AvailableMemoryMBResourceAnalyzer extends ResourceChecker.ResourceAnalyzer {
-136@Override
-137public int getVal(Phase phase) {
-138  if (!JVM.isUnix()) {
-139return 0;
-140  }
-141  return (int) (new 
JVM().getFreeMemory() / (1024L * 1024L));
-142}
-143  }
-144
-145  static class 
MaxHeapMemoryMBResourceAnalyzer extends 

[03/51] [partial] hbase-site git commit: Published site at d7e08317d2f214e4cca7b67578aba0ed7a567d54.

2018-09-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/37cf49a6/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
index 566f410..da040ad 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
@@ -341,8361 +341,8425 @@
 333  private final int 
rowLockWaitDuration;
 334  static final int 
DEFAULT_ROWLOCK_WAIT_DURATION = 3;
 335
-336  // The internal wait duration to 
acquire a lock before read/update
-337  // from the region. It is not per row. 
The purpose of this wait time
-338  // is to avoid waiting a long time 
while the region is busy, so that
-339  // we can release the IPC handler soon 
enough to improve the
-340  // availability of the region server. 
It can be adjusted by
-341  // tuning configuration 
"hbase.busy.wait.duration".
-342  final long busyWaitDuration;
-343  static final long 
DEFAULT_BUSY_WAIT_DURATION = HConstants.DEFAULT_HBASE_RPC_TIMEOUT;
-344
-345  // If updating multiple rows in one 
call, wait longer,
-346  // i.e. waiting for busyWaitDuration * 
# of rows. However,
-347  // we can limit the max multiplier.
-348  final int maxBusyWaitMultiplier;
-349
-350  // Max busy wait duration. There is no 
point to wait longer than the RPC
-351  // purge timeout, when a RPC call will 
be terminated by the RPC engine.
-352  final long maxBusyWaitDuration;
-353
-354  // Max cell size. If nonzero, the 
maximum allowed size for any given cell
-355  // in bytes
-356  final long maxCellSize;
-357
-358  // Number of mutations for minibatch 
processing.
-359  private final int miniBatchSize;
+336  private Path regionDir;
+337  private FileSystem walFS;
+338
+339  // The internal wait duration to 
acquire a lock before read/update
+340  // from the region. It is not per row. 
The purpose of this wait time
+341  // is to avoid waiting a long time 
while the region is busy, so that
+342  // we can release the IPC handler soon 
enough to improve the
+343  // availability of the region server. 
It can be adjusted by
+344  // tuning configuration 
"hbase.busy.wait.duration".
+345  final long busyWaitDuration;
+346  static final long 
DEFAULT_BUSY_WAIT_DURATION = HConstants.DEFAULT_HBASE_RPC_TIMEOUT;
+347
+348  // If updating multiple rows in one 
call, wait longer,
+349  // i.e. waiting for busyWaitDuration * 
# of rows. However,
+350  // we can limit the max multiplier.
+351  final int maxBusyWaitMultiplier;
+352
+353  // Max busy wait duration. There is no 
point to wait longer than the RPC
+354  // purge timeout, when a RPC call will 
be terminated by the RPC engine.
+355  final long maxBusyWaitDuration;
+356
+357  // Max cell size. If nonzero, the 
maximum allowed size for any given cell
+358  // in bytes
+359  final long maxCellSize;
 360
-361  // negative number indicates infinite 
timeout
-362  static final long 
DEFAULT_ROW_PROCESSOR_TIMEOUT = 60 * 1000L;
-363  final ExecutorService 
rowProcessorExecutor = Executors.newCachedThreadPool();
-364
-365  private final 
ConcurrentHashMapRegionScanner, Long scannerReadPoints;
-366
-367  /**
-368   * The sequence ID that was 
enLongAddered when this region was opened.
-369   */
-370  private long openSeqNum = 
HConstants.NO_SEQNUM;
-371
-372  /**
-373   * The default setting for whether to 
enable on-demand CF loading for
-374   * scan requests to this region. 
Requests can override it.
-375   */
-376  private boolean 
isLoadingCfsOnDemandDefault = false;
-377
-378  private final AtomicInteger 
majorInProgress = new AtomicInteger(0);
-379  private final AtomicInteger 
minorInProgress = new AtomicInteger(0);
+361  // Number of mutations for minibatch 
processing.
+362  private final int miniBatchSize;
+363
+364  // negative number indicates infinite 
timeout
+365  static final long 
DEFAULT_ROW_PROCESSOR_TIMEOUT = 60 * 1000L;
+366  final ExecutorService 
rowProcessorExecutor = Executors.newCachedThreadPool();
+367
+368  private final 
ConcurrentHashMapRegionScanner, Long scannerReadPoints;
+369
+370  /**
+371   * The sequence ID that was 
enLongAddered when this region was opened.
+372   */
+373  private long openSeqNum = 
HConstants.NO_SEQNUM;
+374
+375  /**
+376   * The default setting for whether to 
enable on-demand CF loading for
+377   * scan requests to this region. 
Requests can override it.
+378   */
+379  private boolean 
isLoadingCfsOnDemandDefault = false;
 380
-381  //
-382  // Context: During replay we want to 
ensure that we do not lose any data. So, we
-383  // have to be conservative in how we 
replay wals. For each store, we calculate
-384  // the maxSeqId up to which the store 
was flushed. And, 

[03/51] [partial] hbase-site git commit: Published site at 8eaaa63114a64bcaeaf0ed9bdd88615ee22255c1.

2018-09-25 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f6f9d4f3/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.html
index 4a11f27..7c7966d 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.html
@@ -49,287 +49,290 @@
 041 * Handles opening of a region on a 
region server.
 042 * p
 043 * This is executed after receiving an 
OPEN RPC from the master or client.
-044 */
-045@InterfaceAudience.Private
-046public class OpenRegionHandler extends 
EventHandler {
-047  private static final Logger LOG = 
LoggerFactory.getLogger(OpenRegionHandler.class);
-048
-049  protected final RegionServerServices 
rsServices;
-050
-051  private final RegionInfo regionInfo;
-052  private final TableDescriptor htd;
-053  private final long masterSystemTime;
-054
-055  public OpenRegionHandler(final Server 
server,
-056  final RegionServerServices 
rsServices, RegionInfo regionInfo,
-057  TableDescriptor htd, long 
masterSystemTime) {
-058this(server, rsServices, regionInfo, 
htd, masterSystemTime, EventType.M_RS_OPEN_REGION);
-059  }
-060
-061  protected OpenRegionHandler(final 
Server server,
-062  final 
RegionServerServices rsServices, final RegionInfo regionInfo,
-063  final 
TableDescriptor htd, long masterSystemTime, EventType eventType) {
-064super(server, eventType);
-065this.rsServices = rsServices;
-066this.regionInfo = regionInfo;
-067this.htd = htd;
-068this.masterSystemTime = 
masterSystemTime;
-069  }
-070
-071  public RegionInfo getRegionInfo() {
-072return regionInfo;
-073  }
-074
-075  @Override
-076  public void process() throws 
IOException {
-077boolean openSuccessful = false;
-078final String regionName = 
regionInfo.getRegionNameAsString();
-079HRegion region = null;
-080
-081try {
-082  if (this.server.isStopped() || 
this.rsServices.isStopping()) {
-083return;
-084  }
-085  final String encodedName = 
regionInfo.getEncodedName();
-086
-087  // 2 different difficult situations 
can occur
-088  // 1) The opening was cancelled. 
This is an expected situation
-089  // 2) The region is now marked as 
online while we're suppose to open. This would be a bug.
-090
-091  // Check that this region is not 
already online
-092  if 
(this.rsServices.getRegion(encodedName) != null) {
-093LOG.error("Region " + encodedName 
+
-094" was already online when we 
started processing the opening. " +
-095"Marking this new attempt as 
failed");
-096return;
-097  }
-098
-099  // Check that we're still supposed 
to open the region.
-100  // If fails, just return.  Someone 
stole the region from under us.
-101  if (!isRegionStillOpening()){
-102LOG.error("Region " + encodedName 
+ " opening cancelled");
-103return;
-104  }
-105
-106  // Open region.  After a successful 
open, failures in subsequent
-107  // processing needs to do a close 
as part of cleanup.
-108  region = openRegion();
-109  if (region == null) {
-110return;
-111  }
-112
-113  if (!updateMeta(region, 
masterSystemTime) || this.server.isStopped() ||
-114  this.rsServices.isStopping()) 
{
-115return;
-116  }
-117
-118  if (!isRegionStillOpening()) {
-119return;
-120  }
-121
-122  // Successful region open, and add 
it to MutableOnlineRegions
-123  
this.rsServices.addRegion(region);
-124  openSuccessful = true;
-125
-126  // Done!  Successful region open
-127  LOG.debug("Opened " + regionName + 
" on " + this.server.getServerName());
-128} finally {
-129  // Do all clean up here
-130  if (!openSuccessful) {
-131doCleanUpOnFailedOpen(region);
-132  }
-133  final Boolean current = 
this.rsServices.getRegionsInTransitionInRS().
-134  
remove(this.regionInfo.getEncodedNameAsBytes());
-135
-136  // Let's check if we have met a 
race condition on open cancellation
-137  // A better solution would be to 
not have any race condition.
-138  // 
this.rsServices.getRegionsInTransitionInRS().remove(
-139  //  
this.regionInfo.getEncodedNameAsBytes(), Boolean.TRUE);
-140  // would help.
-141  if (openSuccessful) {
-142if (current == null) { // Should 
NEVER happen, but let's be paranoid.
-143  LOG.error("Bad state: we've 
just opened a region that was NOT in transition. Region="
-144  + regionName);
-145} else if 

[03/51] [partial] hbase-site git commit: Published site at cd161d976ef47b84e904f2d54bac65d2f3417c2a.

2018-09-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fa1bebf8/devapidocs/src-html/org/apache/hadoop/hbase/security/HBasePolicyProvider.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/security/HBasePolicyProvider.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/security/HBasePolicyProvider.html
index a6adec4..ae951b0 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/security/HBasePolicyProvider.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/security/HBasePolicyProvider.html
@@ -25,7 +25,7 @@
 017 */
 018package 
org.apache.hadoop.hbase.security;
 019
-020import 
org.apache.yetus.audience.InterfaceAudience;
+020import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos;
 021import 
org.apache.hadoop.conf.Configuration;
 022import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
 023import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService;
@@ -35,34 +35,38 @@
 027import 
org.apache.hadoop.security.authorize.ProxyUsers;
 028import 
org.apache.hadoop.security.authorize.Service;
 029import 
org.apache.hadoop.security.authorize.ServiceAuthorizationManager;
-030
-031/**
-032 * Implementation of secure Hadoop policy 
provider for mapping
-033 * protocol interfaces to 
hbase-policy.xml entries.
-034 */
-035@InterfaceAudience.Private
-036public class HBasePolicyProvider extends 
PolicyProvider {
-037  protected final static Service[] 
services = {
-038  new 
Service("security.client.protocol.acl", 
ClientService.BlockingInterface.class),
-039  new 
Service("security.client.protocol.acl", 
AdminService.BlockingInterface.class),
-040  new 
Service("security.admin.protocol.acl", 
MasterService.BlockingInterface.class),
-041  new 
Service("security.masterregion.protocol.acl", 
RegionServerStatusService.BlockingInterface.class)
-042  };
-043
-044  @Override
-045  public Service[] getServices() {
-046return services;
-047  }
-048
-049  public static void init(Configuration 
conf, ServiceAuthorizationManager authManager) {
-050// set service-level authorization 
security policy
-051
System.setProperty("hadoop.policy.file", "hbase-policy.xml");
-052if 
(conf.getBoolean(ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, 
false)) {
-053  authManager.refresh(conf, new 
HBasePolicyProvider());
-054  
ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
-055}
-056  }
-057}
+030import 
org.apache.yetus.audience.InterfaceAudience;
+031
+032/**
+033 * Implementation of secure Hadoop policy 
provider for mapping
+034 * protocol interfaces to 
hbase-policy.xml entries.
+035 */
+036@InterfaceAudience.Private
+037public class HBasePolicyProvider extends 
PolicyProvider {
+038  protected final static Service[] 
services = {
+039new 
Service("security.client.protocol.acl", 
ClientService.BlockingInterface.class),
+040new 
Service("security.client.protocol.acl", 
AdminService.BlockingInterface.class),
+041new 
Service("security.client.protocol.acl",
+042  
MasterProtos.HbckService.BlockingInterface.class),
+043new 
Service("security.admin.protocol.acl", 
MasterService.BlockingInterface.class),
+044new 
Service("security.masterregion.protocol.acl",
+045  
RegionServerStatusService.BlockingInterface.class)
+046  };
+047
+048  @Override
+049  public Service[] getServices() {
+050return services;
+051  }
+052
+053  public static void init(Configuration 
conf, ServiceAuthorizationManager authManager) {
+054// set service-level authorization 
security policy
+055
System.setProperty("hadoop.policy.file", "hbase-policy.xml");
+056if 
(conf.getBoolean(ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, 
false)) {
+057  authManager.refresh(conf, new 
HBasePolicyProvider());
+058  
ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
+059}
+060  }
+061}
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fa1bebf8/devapidocs/src-html/org/apache/hadoop/hbase/security/SecurityInfo.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/security/SecurityInfo.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/security/SecurityInfo.html
index fb5e78e..54f6933 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/security/SecurityInfo.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/security/SecurityInfo.html
@@ -28,63 +28,70 @@
 020import 
java.util.concurrent.ConcurrentHashMap;
 021import 
java.util.concurrent.ConcurrentMap;
 022
-023import 
org.apache.yetus.audience.InterfaceAudience;
-024import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos;
-025import 
org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos.TokenIdentifier.Kind;
+023import 
org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos.TokenIdentifier.Kind;
+024import 

[03/51] [partial] hbase-site git commit: Published site at c6a65ba63fce85ac7c4b62b96ef2bbe6c35d2f00.

2018-09-04 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/293abb17/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.Monitor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.Monitor.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.Monitor.html
index 1b52048..ce887a2 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.Monitor.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.Monitor.html
@@ -594,1003 +594,1033 @@
 586  private boolean failOnError = true;
 587  private boolean regionServerMode = 
false;
 588  private boolean zookeeperMode = 
false;
-589  private boolean regionServerAllRegions 
= false;
-590  private boolean writeSniffing = 
false;
-591  private long 
configuredWriteTableTimeout = DEFAULT_TIMEOUT;
-592  private boolean treatFailureAsError = 
false;
-593  private TableName writeTableName = 
DEFAULT_WRITE_TABLE_NAME;
-594  private HashMapString, Long 
configuredReadTableTimeouts = new HashMap();
-595
-596  private ExecutorService executor; // 
threads to retrieve data from regionservers
-597
-598  public Canary() {
-599this(new 
ScheduledThreadPoolExecutor(1), new RegionServerStdOutSink());
-600  }
-601
-602  public Canary(ExecutorService executor, 
Sink sink) {
-603this.executor = executor;
-604this.sink = sink;
-605  }
-606
-607  @Override
-608  public Configuration getConf() {
-609return conf;
-610  }
-611
-612  @Override
-613  public void setConf(Configuration conf) 
{
-614this.conf = conf;
-615  }
-616
-617  private int parseArgs(String[] args) 
{
-618int index = -1;
-619// Process command line args
-620for (int i = 0; i  args.length; 
i++) {
-621  String cmd = args[i];
-622
-623  if (cmd.startsWith("-")) {
-624if (index = 0) {
-625  // command line args must be in 
the form: [opts] [table 1 [table 2 ...]]
-626  System.err.println("Invalid 
command line options");
-627  printUsageAndExit();
-628}
-629
-630if (cmd.equals("-help")) {
-631  // user asked for help, print 
the help and quit.
-632  printUsageAndExit();
-633} else if (cmd.equals("-daemon") 
 interval == 0) {
-634  // user asked for daemon mode, 
set a default interval between checks
-635  interval = DEFAULT_INTERVAL;
-636} else if 
(cmd.equals("-interval")) {
-637  // user has specified an 
interval for canary breaths (-interval N)
-638  i++;
-639
-640  if (i == args.length) {
-641System.err.println("-interval 
needs a numeric value argument.");
-642printUsageAndExit();
-643  }
-644
-645  try {
-646interval = 
Long.parseLong(args[i]) * 1000;
-647  } catch (NumberFormatException 
e) {
-648System.err.println("-interval 
needs a numeric value argument.");
-649printUsageAndExit();
-650  }
-651} else if 
(cmd.equals("-zookeeper")) {
-652  this.zookeeperMode = true;
-653} else 
if(cmd.equals("-regionserver")) {
-654  this.regionServerMode = true;
-655} else 
if(cmd.equals("-allRegions")) {
-656  this.regionServerAllRegions = 
true;
-657} else 
if(cmd.equals("-writeSniffing")) {
-658  this.writeSniffing = true;
-659} else 
if(cmd.equals("-treatFailureAsError")) {
-660  this.treatFailureAsError = 
true;
-661} else if (cmd.equals("-e")) {
-662  this.useRegExp = true;
-663} else if (cmd.equals("-t")) {
-664  i++;
-665
-666  if (i == args.length) {
-667System.err.println("-t needs 
a numeric value argument.");
-668printUsageAndExit();
-669  }
-670
-671  try {
-672this.timeout = 
Long.parseLong(args[i]);
-673  } catch (NumberFormatException 
e) {
-674System.err.println("-t needs 
a numeric value argument.");
-675printUsageAndExit();
-676  }
-677} else 
if(cmd.equals("-writeTableTimeout")) {
-678  i++;
-679
-680  if (i == args.length) {
-681
System.err.println("-writeTableTimeout needs a numeric value argument.");
-682printUsageAndExit();
-683  }
-684
-685  try {
-686
this.configuredWriteTableTimeout = Long.parseLong(args[i]);
-687  } catch (NumberFormatException 
e) {
-688
System.err.println("-writeTableTimeout needs a numeric value argument.");
-689printUsageAndExit();
-690  }
-691} else if 
(cmd.equals("-writeTable")) {
-692  i++;
-693
-694  if (i == args.length) {
-695
System.err.println("-writeTable needs a string value argument.");
-696printUsageAndExit();
-697  }
-698  this.writeTableName = 
TableName.valueOf(args[i]);
-699} else if 

[03/51] [partial] hbase-site git commit: Published site at 7c1fad4992a169a35b4457e6f4afcb30d04406e9.

2018-08-31 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/74f60271/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.Visitor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.Visitor.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.Visitor.html
index a8cb7c4..8ec6dad 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.Visitor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.Visitor.html
@@ -2831,5851 +2831,5852 @@
 2823status.setStatus(msg);
 2824
 2825if (rsServices != null  
rsServices.getMetrics() != null) {
-2826  
rsServices.getMetrics().updateFlush(time - startTime,
-2827  mss.getDataSize(), 
flushedOutputFileSize);
-2828}
-2829
-2830return new 
FlushResultImpl(compactionRequested ?
-2831
FlushResult.Result.FLUSHED_COMPACTION_NEEDED :
-2832  
FlushResult.Result.FLUSHED_NO_COMPACTION_NEEDED, flushOpSeqId);
-2833  }
-2834
-2835  /**
-2836   * Method to safely get the next 
sequence number.
-2837   * @return Next sequence number 
unassociated with any actual edit.
-2838   * @throws IOException
-2839   */
-2840  @VisibleForTesting
-2841  protected long getNextSequenceId(final 
WAL wal) throws IOException {
-2842WriteEntry we = mvcc.begin();
-2843mvcc.completeAndWait(we);
-2844return we.getWriteNumber();
-2845  }
-2846
-2847  
//
-2848  // get() methods for client use.
-2849  
//
-2850
-2851  @Override
-2852  public RegionScannerImpl 
getScanner(Scan scan) throws IOException {
-2853   return getScanner(scan, null);
-2854  }
-2855
-2856  @Override
-2857  public RegionScannerImpl 
getScanner(Scan scan, ListKeyValueScanner additionalScanners)
-2858  throws IOException {
-2859return getScanner(scan, 
additionalScanners, HConstants.NO_NONCE, HConstants.NO_NONCE);
-2860  }
-2861
-2862  private RegionScannerImpl 
getScanner(Scan scan, ListKeyValueScanner additionalScanners,
-2863  long nonceGroup, long nonce) 
throws IOException {
-2864
startRegionOperation(Operation.SCAN);
-2865try {
-2866  // Verify families are all valid
-2867  if (!scan.hasFamilies()) {
-2868// Adding all families to 
scanner
-2869for (byte[] family : 
this.htableDescriptor.getColumnFamilyNames()) {
-2870  scan.addFamily(family);
-2871}
-2872  } else {
-2873for (byte[] family : 
scan.getFamilyMap().keySet()) {
-2874  checkFamily(family);
-2875}
-2876  }
-2877  return 
instantiateRegionScanner(scan, additionalScanners, nonceGroup, nonce);
-2878} finally {
-2879  
closeRegionOperation(Operation.SCAN);
-2880}
-2881  }
-2882
-2883  protected RegionScanner 
instantiateRegionScanner(Scan scan,
-2884  ListKeyValueScanner 
additionalScanners) throws IOException {
-2885return 
instantiateRegionScanner(scan, additionalScanners, HConstants.NO_NONCE,
-2886  HConstants.NO_NONCE);
-2887  }
-2888
-2889  protected RegionScannerImpl 
instantiateRegionScanner(Scan scan,
-2890  ListKeyValueScanner 
additionalScanners, long nonceGroup, long nonce) throws IOException {
-2891if (scan.isReversed()) {
-2892  if (scan.getFilter() != null) {
-2893
scan.getFilter().setReversed(true);
-2894  }
-2895  return new 
ReversedRegionScannerImpl(scan, additionalScanners, this);
-2896}
-2897return new RegionScannerImpl(scan, 
additionalScanners, this, nonceGroup, nonce);
-2898  }
-2899
-2900  /**
-2901   * Prepare a delete for a row mutation 
processor
-2902   * @param delete The passed delete is 
modified by this method. WARNING!
-2903   * @throws IOException
-2904   */
-2905  public void prepareDelete(Delete 
delete) throws IOException {
-2906// Check to see if this is a 
deleteRow insert
-2907
if(delete.getFamilyCellMap().isEmpty()){
-2908  for(byte [] family : 
this.htableDescriptor.getColumnFamilyNames()){
-2909// Don't eat the timestamp
-2910delete.addFamily(family, 
delete.getTimestamp());
-2911  }
-2912} else {
-2913  for(byte [] family : 
delete.getFamilyCellMap().keySet()) {
-2914if(family == null) {
-2915  throw new 
NoSuchColumnFamilyException("Empty family is invalid");
-2916}
-2917checkFamily(family);
-2918  }
-2919}
-2920  }
-2921
-2922  @Override
-2923  public void delete(Delete delete) 
throws IOException {
-2924checkReadOnly();
-2925checkResources();
-2926
startRegionOperation(Operation.DELETE);
-2927try {
-2928  // All edits for the given row 
(across all column families) must happen atomically.
-2929  doBatchMutate(delete);
-2930   

[03/51] [partial] hbase-site git commit: Published site at 3afe9fb7e6ebfa71187cbe131558a83fae61cecd.

2018-08-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/424d7e41/testdevapidocs/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.RandomKVGeneratingMapper.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.RandomKVGeneratingMapper.html
 
b/testdevapidocs/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.RandomKVGeneratingMapper.html
index 54db65b..7e879e2 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.RandomKVGeneratingMapper.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.RandomKVGeneratingMapper.html
@@ -118,7 +118,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-static class TestHFileOutputFormat2.RandomKVGeneratingMapper
+static class TestHFileOutputFormat2.RandomKVGeneratingMapper
 extends 
org.apache.hadoop.mapreduce.Mapperorg.apache.hadoop.io.NullWritable,org.apache.hadoop.io.NullWritable,org.apache.hadoop.hbase.io.ImmutableBytesWritable,org.apache.hadoop.hbase.Cell
 Simple mapper that makes KeyValue output.
 
@@ -267,7 +267,7 @@ extends 
org.apache.hadoop.mapreduce.Mapperorg.apache.hadoop.io.NullWritable,
 
 
 keyLength
-privateint keyLength
+privateint keyLength
 
 
 
@@ -276,7 +276,7 @@ extends 
org.apache.hadoop.mapreduce.Mapperorg.apache.hadoop.io.NullWritable,
 
 
 KEYLEN_DEFAULT
-private static finalint KEYLEN_DEFAULT
+private static finalint KEYLEN_DEFAULT
 
 See Also:
 Constant
 Field Values
@@ -289,7 +289,7 @@ extends 
org.apache.hadoop.mapreduce.Mapperorg.apache.hadoop.io.NullWritable,
 
 
 KEYLEN_CONF
-private static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String KEYLEN_CONF
+private static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String KEYLEN_CONF
 
 See Also:
 Constant
 Field Values
@@ -302,7 +302,7 @@ extends 
org.apache.hadoop.mapreduce.Mapperorg.apache.hadoop.io.NullWritable,
 
 
 valLength
-privateint valLength
+privateint valLength
 
 
 
@@ -311,7 +311,7 @@ extends 
org.apache.hadoop.mapreduce.Mapperorg.apache.hadoop.io.NullWritable,
 
 
 VALLEN_DEFAULT
-private static finalint VALLEN_DEFAULT
+private static finalint VALLEN_DEFAULT
 
 See Also:
 Constant
 Field Values
@@ -324,7 +324,7 @@ extends 
org.apache.hadoop.mapreduce.Mapperorg.apache.hadoop.io.NullWritable,
 
 
 VALLEN_CONF
-private static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String VALLEN_CONF
+private static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String VALLEN_CONF
 
 See Also:
 Constant
 Field Values
@@ -337,7 +337,7 @@ extends 
org.apache.hadoop.mapreduce.Mapperorg.apache.hadoop.io.NullWritable,
 
 
 QUALIFIER
-private static finalbyte[] QUALIFIER
+private static finalbyte[] QUALIFIER
 
 
 
@@ -346,7 +346,7 @@ extends 
org.apache.hadoop.mapreduce.Mapperorg.apache.hadoop.io.NullWritable,
 
 
 multiTableMapper
-privateboolean multiTableMapper
+privateboolean multiTableMapper
 
 
 
@@ -355,7 +355,7 @@ extends 
org.apache.hadoop.mapreduce.Mapperorg.apache.hadoop.io.NullWritable,
 
 
 tables
-privateorg.apache.hadoop.hbase.TableName[] tables
+privateorg.apache.hadoop.hbase.TableName[] tables
 
 
 
@@ -372,7 +372,7 @@ extends 
org.apache.hadoop.mapreduce.Mapperorg.apache.hadoop.io.NullWritable,
 
 
 RandomKVGeneratingMapper
-RandomKVGeneratingMapper()
+RandomKVGeneratingMapper()
 
 
 
@@ -389,7 +389,7 @@ extends 
org.apache.hadoop.mapreduce.Mapperorg.apache.hadoop.io.NullWritable,
 
 
 setup
-protectedvoidsetup(org.apache.hadoop.mapreduce.Mapper.Contextcontext)
+protectedvoidsetup(org.apache.hadoop.mapreduce.Mapper.Contextcontext)
   throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException,
  https://docs.oracle.com/javase/8/docs/api/java/lang/InterruptedException.html?is-external=true;
 title="class or interface in java.lang">InterruptedException
 
@@ -407,7 +407,7 @@ extends 
org.apache.hadoop.mapreduce.Mapperorg.apache.hadoop.io.NullWritable,
 
 
 map
-protectedvoidmap(org.apache.hadoop.io.NullWritablen1,
+protectedvoidmap(org.apache.hadoop.io.NullWritablen1,
org.apache.hadoop.io.NullWritablen2,
org.apache.hadoop.mapreduce.Mapper.Contextcontext)
 throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException,

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/424d7e41/testdevapidocs/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.RandomPutGeneratingMapper.html

[03/51] [partial] hbase-site git commit: Published site at a452487a9b82bfd33bc10683c3f8b8ae74d58883.

2018-08-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0cf79db0/devapidocs/src-html/org/apache/hadoop/hbase/filter/CompareFilter.CompareOp.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/filter/CompareFilter.CompareOp.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/filter/CompareFilter.CompareOp.html
index ad7c82a..1dfa7b8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/filter/CompareFilter.CompareOp.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/filter/CompareFilter.CompareOp.html
@@ -29,307 +29,322 @@
 021
 022import java.io.IOException;
 023import java.util.ArrayList;
-024
-025import org.apache.hadoop.hbase.Cell;
-026import 
org.apache.hadoop.hbase.CompareOperator;
-027import 
org.apache.hadoop.hbase.PrivateCellUtil;
-028import 
org.apache.hadoop.hbase.util.Bytes;
-029import 
org.apache.yetus.audience.InterfaceAudience;
-030
-031import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-032
-033import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-034import 
org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos;
-035import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
-036import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.CompareType;
-037
-038/**
-039 * This is a generic filter to be used to 
filter by comparison.  It takes an
-040 * operator (equal, greater, not equal, 
etc) and a byte [] comparator.
-041 * p
-042 * To filter by row key, use {@link 
RowFilter}.
-043 * p
-044 * To filter by column family, use {@link 
FamilyFilter}.
-045 * p
-046 * To filter by column qualifier, use 
{@link QualifierFilter}.
-047 * p
-048 * To filter by value, use {@link 
ValueFilter}.
-049 * p
-050 * These filters can be wrapped with 
{@link SkipFilter} and {@link WhileMatchFilter}
-051 * to add more control.
-052 * p
-053 * Multiple filters can be combined using 
{@link FilterList}.
-054 */
-055@InterfaceAudience.Public
-056public abstract class CompareFilter 
extends FilterBase {
-057  /**
-058   * Comparison operators. For filters 
only!
-059   * Use {@link CompareOperator} 
otherwise.
-060   * It (intentionally) has at least the 
below enums with same names.
-061   * @deprecated  since 2.0.0. Will be 
removed in 3.0.0. Use {@link CompareOperator} instead.
-062   */
-063  @Deprecated
-064  @InterfaceAudience.Public
-065  public enum CompareOp {
-066/** less than */
-067LESS,
-068/** less than or equal to */
-069LESS_OR_EQUAL,
-070/** equals */
-071EQUAL,
-072/** not equal */
-073NOT_EQUAL,
-074/** greater than or equal to */
-075GREATER_OR_EQUAL,
-076/** greater than */
-077GREATER,
-078/** no operation */
-079NO_OP,
-080  }
-081
-082  protected CompareOperator op;
-083  protected ByteArrayComparable 
comparator;
-084
-085  /**
-086   * Constructor.
-087   * @param compareOp the compare op for 
row matching
-088   * @param comparator the comparator for 
row matching
-089   * @deprecated Since 2.0.0. Will be 
removed in 3.0.0. Use other constructor.
-090   */
-091  @Deprecated
-092  public CompareFilter(final CompareOp 
compareOp,
-093  final ByteArrayComparable 
comparator) {
-094
this(CompareOperator.valueOf(compareOp.name()), comparator);
-095  }
-096
-097  /**
-098   * Constructor.
-099   * @param op the compare op for row 
matching
-100   * @param comparator the comparator for 
row matching
-101   */
-102  public CompareFilter(final 
CompareOperator op,
-103   final 
ByteArrayComparable comparator) {
-104this.op = op;
-105this.comparator = comparator;
-106  }
-107
-108  /**
-109   * @return operator
-110   * @deprecated  since 2.0.0. Will be 
removed in 3.0.0. Use {@link #getCompareOperator()} instead.
-111   */
-112  @Deprecated
-113  public CompareOp getOperator() {
-114return 
CompareOp.valueOf(op.name());
-115  }
-116
-117  public CompareOperator 
getCompareOperator() {
-118return op;
-119  }
-120
-121  /**
-122   * @return the comparator
-123   */
-124  public ByteArrayComparable 
getComparator() {
-125return comparator;
-126  }
-127
-128  @Override
-129  public boolean filterRowKey(Cell cell) 
throws IOException {
-130// Impl in FilterBase might do 
unnecessary copy for Off heap backed Cells.
-131return false;
-132  }
-133
-134  /**
-135   * @deprecated Since 2.0.0. Will be 
removed in 3.0.0.
-136   * Use {@link 
#compareRow(CompareOperator, ByteArrayComparable, Cell)}
-137   */
-138  @Deprecated
-139  protected boolean compareRow(final 
CompareOp compareOp, final ByteArrayComparable comparator,
-140  final Cell cell) {
-141if (compareOp == CompareOp.NO_OP) {
-142  return true;
-143}
-144int compareResult = 
PrivateCellUtil.compareRow(cell, comparator);
-145return compare(compareOp, 
compareResult);
-146  }
-147
-148  protected boolean compareRow(final 
CompareOperator op, final 

[03/51] [partial] hbase-site git commit: Published site at 6a5b4f2a5c188f8eef4f2250b8b7db7dd1e750e4.

2018-08-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ff05a18/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHRegionWithInMemoryFlush.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHRegionWithInMemoryFlush.html
 
b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHRegionWithInMemoryFlush.html
index cfc19fe..db6ff4d 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHRegionWithInMemoryFlush.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHRegionWithInMemoryFlush.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = {"i0":10};
+var methods = {"i0":10,"i1":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -114,7 +114,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public class TestHRegionWithInMemoryFlush
+public class TestHRegionWithInMemoryFlush
 extends TestHRegion
 A test similar to TestHRegion, but with in-memory flush 
families.
  Also checks wal truncation after in-memory compaction.
@@ -204,6 +204,12 @@ extends 
+void
+testFlushAndMemstoreSizeCounting()
+A test case of HBASE-21041
+
+
 
 
 
@@ -239,7 +245,7 @@ extends 
 
 CLASS_RULE
-public static finalHBaseClassTestRule CLASS_RULE
+public static finalHBaseClassTestRule CLASS_RULE
 
 
 
@@ -256,7 +262,7 @@ extends 
 
 TestHRegionWithInMemoryFlush
-publicTestHRegionWithInMemoryFlush()
+publicTestHRegionWithInMemoryFlush()
 
 
 
@@ -270,10 +276,10 @@ extends 
 
 
-
+
 
 initHRegion
-publicorg.apache.hadoop.hbase.regionserver.HRegioninitHRegion(org.apache.hadoop.hbase.TableNametableName,
+publicorg.apache.hadoop.hbase.regionserver.HRegioninitHRegion(org.apache.hadoop.hbase.TableNametableName,
 
byte[]startKey,
 
byte[]stopKey,
 
booleanisReadOnly,
@@ -292,6 +298,23 @@ extends 
+
+
+
+
+testFlushAndMemstoreSizeCounting
+publicvoidtestFlushAndMemstoreSizeCounting()
+  throws https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">Exception
+A test case of HBASE-21041
+
+Overrides:
+testFlushAndMemstoreSizeCountingin
 classTestHRegion
+Throws:
+https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">Exception - Exception
+
+
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ff05a18/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHStore.MyCompactingMemStore.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHStore.MyCompactingMemStore.html
 
b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHStore.MyCompactingMemStore.html
index 4ae96ed..ee5fdef 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHStore.MyCompactingMemStore.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHStore.MyCompactingMemStore.html
@@ -187,7 +187,7 @@ extends 
org.apache.hadoop.hbase.regionserver.CompactingMemStore
 
 
 Fields inherited from 
classorg.apache.hadoop.hbase.regionserver.AbstractMemStore
-FIXED_OVERHEAD, snapshot, snapshotId
+FIXED_OVERHEAD, regionServices, snapshot, snapshotId
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ff05a18/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHStore.MyCompactingMemStoreWithCustomCompactor.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHStore.MyCompactingMemStoreWithCustomCompactor.html
 
b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHStore.MyCompactingMemStoreWithCustomCompactor.html
index a19a39b..80b8746 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHStore.MyCompactingMemStoreWithCustomCompactor.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHStore.MyCompactingMemStoreWithCustomCompactor.html
@@ -179,7 +179,7 @@ extends 
org.apache.hadoop.hbase.regionserver.CompactingMemStore
 
 
 Fields inherited from 
classorg.apache.hadoop.hbase.regionserver.AbstractMemStore
-FIXED_OVERHEAD, snapshot, snapshotId
+FIXED_OVERHEAD, regionServices, snapshot, snapshotId
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ff05a18/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.RegionServerAccountingStub.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.RegionServerAccountingStub.html
 

[03/51] [partial] hbase-site git commit: Published site at 63f2d3cbdc8151f5f61f33e0a078c51b9ac076a5.

2018-08-21 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7ae6a80c/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
index 81f5178..7df71bd 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
@@ -108,3669 +108,3727 @@
 100import 
org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
 101import 
org.apache.hadoop.hbase.log.HBaseMarkers;
 102import 
org.apache.hadoop.hbase.master.MasterRpcServices.BalanceSwitchMode;
-103import 
org.apache.hadoop.hbase.master.assignment.AssignmentManager;
-104import 
org.apache.hadoop.hbase.master.assignment.MergeTableRegionsProcedure;
-105import 
org.apache.hadoop.hbase.master.assignment.RegionStates;
-106import 
org.apache.hadoop.hbase.master.assignment.RegionStates.RegionStateNode;
-107import 
org.apache.hadoop.hbase.master.balancer.BalancerChore;
-108import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer;
-109import 
org.apache.hadoop.hbase.master.balancer.ClusterStatusChore;
-110import 
org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory;
-111import 
org.apache.hadoop.hbase.master.cleaner.CleanerChore;
-112import 
org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
-113import 
org.apache.hadoop.hbase.master.cleaner.LogCleaner;
-114import 
org.apache.hadoop.hbase.master.cleaner.ReplicationBarrierCleaner;
-115import 
org.apache.hadoop.hbase.master.locking.LockManager;
-116import 
org.apache.hadoop.hbase.master.normalizer.NormalizationPlan;
-117import 
org.apache.hadoop.hbase.master.normalizer.NormalizationPlan.PlanType;
-118import 
org.apache.hadoop.hbase.master.normalizer.RegionNormalizer;
-119import 
org.apache.hadoop.hbase.master.normalizer.RegionNormalizerChore;
-120import 
org.apache.hadoop.hbase.master.normalizer.RegionNormalizerFactory;
-121import 
org.apache.hadoop.hbase.master.procedure.CreateTableProcedure;
-122import 
org.apache.hadoop.hbase.master.procedure.DeleteNamespaceProcedure;
-123import 
org.apache.hadoop.hbase.master.procedure.DeleteTableProcedure;
-124import 
org.apache.hadoop.hbase.master.procedure.DisableTableProcedure;
-125import 
org.apache.hadoop.hbase.master.procedure.EnableTableProcedure;
-126import 
org.apache.hadoop.hbase.master.procedure.InitMetaProcedure;
-127import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants;
-128import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
-129import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureScheduler;
-130import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil;
-131import 
org.apache.hadoop.hbase.master.procedure.ModifyTableProcedure;
-132import 
org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch;
-133import 
org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure;
-134import 
org.apache.hadoop.hbase.master.procedure.TruncateTableProcedure;
-135import 
org.apache.hadoop.hbase.master.replication.AbstractPeerProcedure;
-136import 
org.apache.hadoop.hbase.master.replication.AddPeerProcedure;
-137import 
org.apache.hadoop.hbase.master.replication.DisablePeerProcedure;
-138import 
org.apache.hadoop.hbase.master.replication.EnablePeerProcedure;
-139import 
org.apache.hadoop.hbase.master.replication.RemovePeerProcedure;
-140import 
org.apache.hadoop.hbase.master.replication.ReplicationPeerManager;
-141import 
org.apache.hadoop.hbase.master.replication.SyncReplicationReplayWALManager;
-142import 
org.apache.hadoop.hbase.master.replication.TransitPeerSyncReplicationStateProcedure;
-143import 
org.apache.hadoop.hbase.master.replication.UpdatePeerConfigProcedure;
-144import 
org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
-145import 
org.apache.hadoop.hbase.master.zksyncer.MasterAddressSyncer;
-146import 
org.apache.hadoop.hbase.master.zksyncer.MetaLocationSyncer;
-147import 
org.apache.hadoop.hbase.mob.MobConstants;
-148import 
org.apache.hadoop.hbase.monitoring.MemoryBoundedLogMessageBuffer;
-149import 
org.apache.hadoop.hbase.monitoring.MonitoredTask;
-150import 
org.apache.hadoop.hbase.monitoring.TaskMonitor;
-151import 
org.apache.hadoop.hbase.procedure.MasterProcedureManagerHost;
-152import 
org.apache.hadoop.hbase.procedure.flush.MasterFlushTableProcedureManager;
-153import 
org.apache.hadoop.hbase.procedure2.LockedResource;
-154import 
org.apache.hadoop.hbase.procedure2.Procedure;
-155import 
org.apache.hadoop.hbase.procedure2.ProcedureEvent;
-156import 
org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
-157import 
org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher.RemoteProcedure;
-158import 
org.apache.hadoop.hbase.procedure2.RemoteProcedureException;
-159import 

[03/51] [partial] hbase-site git commit: Published site at 092efb42749bf7fc6ad338c96aae8e7b9d3a2c74.

2018-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/testdevapidocs/org/apache/hadoop/hbase/wal/TestWALSplit.html
--
diff --git a/testdevapidocs/org/apache/hadoop/hbase/wal/TestWALSplit.html 
b/testdevapidocs/org/apache/hadoop/hbase/wal/TestWALSplit.html
index 3cd501d..87020c8 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/wal/TestWALSplit.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/wal/TestWALSplit.html
@@ -244,22 +244,26 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 TEST_UTIL
 
 
+private https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+TMPDIRNAME
+
+
 private static byte[]
 VALUE
 
-
+
 private static https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 WAL_FILE_PREFIX
 
-
+
 private org.apache.hadoop.fs.Path
 WALDIR
 
-
+
 private 
org.apache.hadoop.hbase.wal.WALFactory
 wals
 
-
+
 private static https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 ZOMBIE
 
@@ -698,13 +702,22 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 privateorg.apache.hadoop.fs.Path TABLEDIR
 
 
+
+
+
+
+
+TMPDIRNAME
+privatehttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String TMPDIRNAME
+
+
 
 
 
 
 
 NUM_WRITERS
-private static finalint NUM_WRITERS
+private static finalint NUM_WRITERS
 
 See Also:
 Constant
 Field Values
@@ -717,7 +730,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 ENTRIES
-private static finalint ENTRIES
+private static finalint ENTRIES
 
 See Also:
 Constant
 Field Values
@@ -730,7 +743,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 FILENAME_BEING_SPLIT
-private static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String FILENAME_BEING_SPLIT
+private static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String FILENAME_BEING_SPLIT
 
 See Also:
 Constant
 Field Values
@@ -743,7 +756,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 TABLE_NAME
-private static finalorg.apache.hadoop.hbase.TableName TABLE_NAME
+private static finalorg.apache.hadoop.hbase.TableName TABLE_NAME
 
 
 
@@ -752,7 +765,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 FAMILY
-private static finalbyte[] FAMILY
+private static finalbyte[] FAMILY
 
 
 
@@ -761,7 +774,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 QUALIFIER
-private static finalbyte[] QUALIFIER
+private static finalbyte[] QUALIFIER
 
 
 
@@ -770,7 +783,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 VALUE
-private static finalbyte[] VALUE
+private static finalbyte[] VALUE
 
 
 
@@ -779,7 +792,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 WAL_FILE_PREFIX
-private static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String WAL_FILE_PREFIX
+private static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String WAL_FILE_PREFIX
 
 See Also:
 Constant
 Field Values
@@ -792,7 +805,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 REGIONS
-private statichttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String REGIONS
+private statichttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String REGIONS
 
 
 
@@ -801,7 +814,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 HBASE_SKIP_ERRORS
-private static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String HBASE_SKIP_ERRORS
+private static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String HBASE_SKIP_ERRORS
 
 See Also:
 Constant
 Field Values
@@ -814,7 +827,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 ROBBER
-private 

[03/51] [partial] hbase-site git commit: Published site at 613d831429960348dc42c3bdb6ea5d31be15c81c.

2018-08-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7cf6034b/testdevapidocs/org/apache/hadoop/hbase/HBaseCluster.html
--
diff --git a/testdevapidocs/org/apache/hadoop/hbase/HBaseCluster.html 
b/testdevapidocs/org/apache/hadoop/hbase/HBaseCluster.html
index d51b5ba..e6811bb 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/HBaseCluster.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/HBaseCluster.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":6,"i1":6,"i2":6,"i3":6,"i4":10,"i5":10,"i6":6,"i7":10,"i8":6,"i9":10,"i10":6,"i11":6,"i12":6,"i13":6,"i14":6,"i15":10,"i16":10,"i17":10,"i18":6,"i19":6,"i20":6,"i21":6,"i22":6,"i23":6,"i24":6,"i25":6,"i26":6,"i27":10,"i28":6,"i29":10,"i30":6,"i31":6,"i32":6,"i33":10,"i34":10,"i35":6,"i36":6,"i37":6,"i38":6};
+var methods = 
{"i0":6,"i1":6,"i2":6,"i3":6,"i4":10,"i5":10,"i6":6,"i7":10,"i8":6,"i9":10,"i10":6,"i11":6,"i12":6,"i13":6,"i14":6,"i15":6,"i16":10,"i17":10,"i18":10,"i19":6,"i20":6,"i21":6,"i22":6,"i23":6,"i24":6,"i25":6,"i26":6,"i27":6,"i28":6,"i29":6,"i30":10,"i31":6,"i32":10,"i33":6,"i34":6,"i35":6,"i36":10,"i37":6,"i38":6,"i39":10,"i40":6,"i41":6,"i42":6,"i43":6};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],4:["t3","Abstract Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -118,7 +118,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public abstract class HBaseCluster
+public abstract class HBaseCluster
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html?is-external=true;
 title="class or interface in java.io">Closeable, 
org.apache.hadoop.conf.Configurable
 This class defines methods that can help with managing 
HBase clusters
@@ -290,50 +290,57 @@ implements https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
 
 
 abstract void
+killNameNode(org.apache.hadoop.hbase.ServerNameserverName)
+Kills the namenode process if this is a distributed 
cluster, otherwise, this causes master to
+ exit doing basic clean up only.
+
+
+
+abstract void
 killRegionServer(org.apache.hadoop.hbase.ServerNameserverName)
 Kills the region server process if this is a distributed 
cluster, otherwise
  this causes the region server to exit doing basic clean up only.
 
 
-
+
 abstract void
 killZkNode(org.apache.hadoop.hbase.ServerNameserverName)
 Kills the zookeeper node process if this is a distributed 
cluster, otherwise,
  this causes master to exit doing basic clean up only.
 
 
-
+
 boolean
 restoreClusterMetrics(org.apache.hadoop.hbase.ClusterMetricsdesiredStatus)
 Restores the cluster to given state if this is a real 
cluster,
  otherwise does nothing.
 
 
-
+
 boolean
 restoreInitialStatus()
 Restores the cluster to it's initial state if this is a 
real cluster,
  otherwise does nothing.
 
 
-
+
 void
 setConf(org.apache.hadoop.conf.Configurationconf)
 
-
+
 abstract void
 shutdown()
 Shut down the HBase cluster
 
 
-
+
 abstract void
 startDataNode(org.apache.hadoop.hbase.ServerNameserverName)
 Starts a new datanode on the given hostname or if this is a 
mini/local cluster,
  silently logs warning message.
 
 
-
+
 abstract void
 startMaster(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringhostname,
intport)
@@ -341,7 +348,14 @@ implements https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
  starts a master locally.
 
 
-
+
+abstract void
+startNameNode(org.apache.hadoop.hbase.ServerNameserverName)
+Starts a new namenode on the given hostname or if this is a 
mini/local cluster, silently logs
+ warning message.
+
+
+
 abstract void
 startRegionServer(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringhostname,
  intport)
@@ -349,7 +363,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
  starts a region server locally.
 
 
-
+
 abstract void
 startZkNode(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringhostname,
intport)
@@ -357,78 +371,98 @@ implements https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.
  silently logs warning message.
 
 
-
+
 abstract void
 stopDataNode(org.apache.hadoop.hbase.ServerNameserverName)
 Stops the datanode if this is a distributed cluster, 
otherwise
  silently logs warning message.
 
 
-
+
 abstract void
 stopMaster(org.apache.hadoop.hbase.ServerNameserverName)
 Stops the given master, by attempting a gradual stop.
 
 
-
+
+abstract void
+stopNameNode(org.apache.hadoop.hbase.ServerNameserverName)
+Stops the namenode if this is a distributed cluster, 

[03/51] [partial] hbase-site git commit: Published site at ba5d1c1f28301adc99019d9d6c4a04fac98ae511.

2018-07-25 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/804782f0/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.html
index 02e56cf..8d2fe80 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.html
@@ -42,87 +42,87 @@
 034import 
org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher.RemoteOperation;
 035import 
org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher.RemoteProcedure;
 036import 
org.apache.hadoop.hbase.procedure2.RemoteProcedureException;
-037import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos;
-038import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionTransitionState;
-039import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
-040import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-041import 
org.apache.yetus.audience.InterfaceAudience;
-042import org.slf4j.Logger;
-043import org.slf4j.LoggerFactory;
-044
-045/**
-046 * Base class for the Assign and Unassign 
Procedure.
-047 *
-048 * Locking:
-049 * Takes exclusive lock on the region 
being assigned/unassigned. Thus, there can only be one
-050 * RegionTransitionProcedure per region 
running at a time (see MasterProcedureScheduler).
-051 *
-052 * pThis procedure is 
asynchronous and responds to external events.
-053 * The AssignmentManager will notify this 
procedure when the RS completes
-054 * the operation and reports the 
transitioned state
-055 * (see the Assign and Unassign class for 
more detail)./p
-056 *
-057 * pProcedures move from the 
REGION_TRANSITION_QUEUE state when they are
-058 * first submitted, to the 
REGION_TRANSITION_DISPATCH state when the request
-059 * to remote server is sent and the 
Procedure is suspended waiting on external
-060 * event to be woken again. Once the 
external event is triggered, Procedure
-061 * moves to the REGION_TRANSITION_FINISH 
state./p
-062 *
-063 * pNOTE: {@link AssignProcedure} 
and {@link UnassignProcedure} should not be thought of
-064 * as being asymmetric, at least 
currently.
-065 * ul
-066 * li{@link AssignProcedure} 
moves through all the above described states and implements methods
-067 * associated with each while {@link 
UnassignProcedure} starts at state
-068 * REGION_TRANSITION_DISPATCH and state 
REGION_TRANSITION_QUEUE is not supported./li
-069 *
-070 * liWhen any step in {@link 
AssignProcedure} fails, failure handler
-071 * 
AssignProcedure#handleFailure(MasterProcedureEnv, RegionStateNode) re-attempts 
the
-072 * assignment by setting the procedure 
state to REGION_TRANSITION_QUEUE and forces
-073 * assignment to a different target 
server by setting {@link AssignProcedure#forceNewPlan}. When
-074 * the number of attempts reaches 
threshold configuration 'hbase.assignment.maximum.attempts',
-075 * the procedure is aborted. For {@link 
UnassignProcedure}, similar re-attempts are
-076 * intentionally not implemented. It is a 
'one shot' procedure. See its class doc for how it
-077 * handles failure.
-078 * /li
-079 * liIf we find a region in an 
'unexpected' state, we'll complain and retry with backoff forever.
-080 * The 'unexpected' state needs to be 
fixed either by another running Procedure or by operator
-081 * intervention (Regions in 'unexpected' 
state indicates bug or unexpected transition type).
-082 * For this to work, subclasses need to 
persist the 'attempt' counter kept in this class when
-083 * they do serializeStateData and restore 
it inside their deserializeStateData, just as they do
-084 * for {@link #regionInfo}.
-085 * /li
-086 * /ul
-087 * /p
-088 *
-089 * pTODO: Considering it is a 
priority doing all we can to get make a region available as soon as
-090 * possible, re-attempting with any 
target makes sense if specified target fails in case of
-091 * {@link AssignProcedure}. For {@link 
UnassignProcedure}, our concern is preventing data loss
-092 * on failed unassign. See class doc for 
explanation.
-093 */
-094@InterfaceAudience.Private
-095public abstract class 
RegionTransitionProcedure
-096extends 
ProcedureMasterProcedureEnv
-097implements TableProcedureInterface,
-098  
RemoteProcedureMasterProcedureEnv, ServerName {
-099  private static final Logger LOG = 
LoggerFactory.getLogger(RegionTransitionProcedure.class);
-100
-101  protected final AtomicBoolean aborted = 
new AtomicBoolean(false);
+037import 
org.apache.yetus.audience.InterfaceAudience;
+038import org.slf4j.Logger;
+039import org.slf4j.LoggerFactory;
+040

[03/51] [partial] hbase-site git commit: Published site at b4759ce6e72f50ccd9d410bd5917dc5a515414f1.

2018-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.RegionReplicaOutputSink.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.RegionReplicaOutputSink.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.RegionReplicaOutputSink.html
index 3d7093a..9917ee8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.RegionReplicaOutputSink.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.RegionReplicaOutputSink.html
@@ -39,594 +39,612 @@
 031import 
java.util.concurrent.ThreadPoolExecutor;
 032import java.util.concurrent.TimeUnit;
 033import 
java.util.concurrent.atomic.AtomicLong;
-034
-035import 
org.apache.hadoop.conf.Configuration;
-036import org.apache.hadoop.fs.Path;
-037import 
org.apache.hadoop.hbase.CellScanner;
-038import 
org.apache.hadoop.hbase.CellUtil;
-039import 
org.apache.hadoop.hbase.HBaseConfiguration;
-040import 
org.apache.hadoop.hbase.HBaseIOException;
-041import 
org.apache.hadoop.hbase.HConstants;
-042import 
org.apache.hadoop.hbase.HRegionLocation;
-043import 
org.apache.hadoop.hbase.RegionLocations;
-044import 
org.apache.hadoop.hbase.TableDescriptors;
-045import 
org.apache.hadoop.hbase.TableName;
-046import 
org.apache.hadoop.hbase.TableNotFoundException;
-047import 
org.apache.hadoop.hbase.client.ClusterConnection;
-048import 
org.apache.hadoop.hbase.client.ConnectionFactory;
-049import 
org.apache.hadoop.hbase.client.RegionAdminServiceCallable;
-050import 
org.apache.hadoop.hbase.client.RegionInfo;
-051import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
-052import 
org.apache.hadoop.hbase.client.RetryingCallable;
-053import 
org.apache.hadoop.hbase.client.RpcRetryingCallerFactory;
-054import 
org.apache.hadoop.hbase.client.TableDescriptor;
-055import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-056import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-057import 
org.apache.hadoop.hbase.protobuf.ReplicationProtbufUtil;
-058import 
org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint;
-059import 
org.apache.hadoop.hbase.replication.WALEntryFilter;
-060import 
org.apache.hadoop.hbase.util.Bytes;
-061import 
org.apache.hadoop.hbase.util.Pair;
-062import 
org.apache.hadoop.hbase.util.Threads;
-063import 
org.apache.hadoop.hbase.wal.WAL.Entry;
-064import 
org.apache.hadoop.hbase.wal.WALSplitter.EntryBuffers;
-065import 
org.apache.hadoop.hbase.wal.WALSplitter.OutputSink;
-066import 
org.apache.hadoop.hbase.wal.WALSplitter.PipelineController;
-067import 
org.apache.hadoop.hbase.wal.WALSplitter.RegionEntryBuffer;
-068import 
org.apache.hadoop.hbase.wal.WALSplitter.SinkWriter;
-069import 
org.apache.hadoop.util.StringUtils;
-070import 
org.apache.yetus.audience.InterfaceAudience;
-071import org.slf4j.Logger;
-072import org.slf4j.LoggerFactory;
+034import 
org.apache.hadoop.conf.Configuration;
+035import org.apache.hadoop.fs.Path;
+036import 
org.apache.hadoop.hbase.CellScanner;
+037import 
org.apache.hadoop.hbase.CellUtil;
+038import 
org.apache.hadoop.hbase.HBaseConfiguration;
+039import 
org.apache.hadoop.hbase.HBaseIOException;
+040import 
org.apache.hadoop.hbase.HConstants;
+041import 
org.apache.hadoop.hbase.HRegionLocation;
+042import 
org.apache.hadoop.hbase.RegionLocations;
+043import 
org.apache.hadoop.hbase.TableDescriptors;
+044import 
org.apache.hadoop.hbase.TableName;
+045import 
org.apache.hadoop.hbase.TableNotFoundException;
+046import 
org.apache.hadoop.hbase.client.ClusterConnection;
+047import 
org.apache.hadoop.hbase.client.ConnectionFactory;
+048import 
org.apache.hadoop.hbase.client.RegionAdminServiceCallable;
+049import 
org.apache.hadoop.hbase.client.RegionInfo;
+050import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
+051import 
org.apache.hadoop.hbase.client.RetryingCallable;
+052import 
org.apache.hadoop.hbase.client.RpcRetryingCallerFactory;
+053import 
org.apache.hadoop.hbase.client.TableDescriptor;
+054import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
+055import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
+056import 
org.apache.hadoop.hbase.protobuf.ReplicationProtbufUtil;
+057import 
org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint;
+058import 
org.apache.hadoop.hbase.replication.WALEntryFilter;
+059import 
org.apache.hadoop.hbase.util.Bytes;
+060import 
org.apache.hadoop.hbase.util.Pair;
+061import 
org.apache.hadoop.hbase.util.Threads;
+062import 
org.apache.hadoop.hbase.wal.WAL.Entry;
+063import 
org.apache.hadoop.hbase.wal.WALSplitter.EntryBuffers;
+064import 
org.apache.hadoop.hbase.wal.WALSplitter.OutputSink;
+065import 

[03/51] [partial] hbase-site git commit: Published site at e66a6603e36ecd67237ca16acd5e2de03f0d372d.

2018-07-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0c6f447e/apidocs/org/apache/hadoop/hbase/class-use/HRegionInfo.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/class-use/HRegionInfo.html 
b/apidocs/org/apache/hadoop/hbase/class-use/HRegionInfo.html
index 8ea63cd..1957877 100644
--- a/apidocs/org/apache/hadoop/hbase/class-use/HRegionInfo.html
+++ b/apidocs/org/apache/hadoop/hbase/class-use/HRegionInfo.html
@@ -1,10 +1,10 @@
 http://www.w3.org/TR/html4/loose.dtd;>
 
-
+
 
 
 
-类 org.apache.hadoop.hbase.HRegionInfo的使用 (Apache HBase 
3.0.0-SNAPSHOT API)
+Uses of Class org.apache.hadoop.hbase.HRegionInfo (Apache HBase 
3.0.0-SNAPSHOT API)
 
 
 
@@ -12,7 +12,7 @@
 
 
 
-您的浏览器已禁用 JavaScript。
+JavaScript is disabled on your browser.
 
 
 
 
 
-跳过导航链接
+Skip navigation links
 
 
 
-
-概览
-程序包
-ç±»
-使用
-树
-已过时
-索引
-帮助
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
 
 
 
 
-上一个
-下一个
+Prev
+Next
 
 
-框架
-无框架
+Frames
+NoFrames
 
 
-所有类
+AllClasses
 
 
 
 
-

类的使用
org.apache.hadoop.hbase.HRegionInfo

+

Uses of Class
org.apache.hadoop.hbase.HRegionInfo

  • - - +
    使用HRegionInfo的程序包  
    + - - + + @@ -89,17 +89,7 @@ @@ -110,34 +100,34 @@ Table of Contents
  • -

    org.apache.hadoop.hbase中HRegionInfo的使用

    -
  • Packages that use HRegionInfo 
    程序包说明PackageDescription
    org.apache.hadoop.hbase.client -
    Provides HBase Client - -Table of Contents - - Overview -Example API Usage - - - Overview - To administer HBase, create and drop tables, list and alter tables, - use Admin.
    +
    Provides HBase Client
    - +

    Uses of HRegionInfo in org.apache.hadoop.hbase

    +
    声明为HRegionInfo的org.apache.hadoop.hbase中的字段 
    +
    Fields in org.apache.hadoop.hbase

    [03/51] [partial] hbase-site git commit: Published site at e66a6603e36ecd67237ca16acd5e2de03f0d372d.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5427a45e/apidocs/org/apache/hadoop/hbase/class-use/HColumnDescriptor.html
    --
    diff --git a/apidocs/org/apache/hadoop/hbase/class-use/HColumnDescriptor.html 
    b/apidocs/org/apache/hadoop/hbase/class-use/HColumnDescriptor.html
    index 9b34e4d..03beb7e 100644
    --- a/apidocs/org/apache/hadoop/hbase/class-use/HColumnDescriptor.html
    +++ b/apidocs/org/apache/hadoop/hbase/class-use/HColumnDescriptor.html
    @@ -1,10 +1,10 @@
     http://www.w3.org/TR/html4/loose.dtd;>
     
    -
    +
     
     
     
    -Uses of Class org.apache.hadoop.hbase.HColumnDescriptor (Apache HBase 
    3.0.0-SNAPSHOT API)
    +类 org.apache.hadoop.hbase.HColumnDescriptor的使用 (Apache HBase 
    3.0.0-SNAPSHOT API)
     
     
     
    @@ -12,7 +12,7 @@
     
     
     
    -JavaScript is disabled on your browser.
    +您的浏览器已禁用 JavaScript。
     
     
     
     
     
    -Skip navigation links
    +跳过导航链接
     
     
     
    -
    -Overview
    -Package
    -Class
    -Use
    -Tree
    -Deprecated
    -Index
    -Help
    +
    +概览
    +程序包
    +ç±»
    +使用
    +树
    +已过时
    +索引
    +帮助
     
     
     
     
    -Prev
    -Next
    +上一个
    +下一个
     
     
    -Frames
    -NoFrames
    +框架
    +无框架
     
     
    -AllClasses
    +所有类
     
     
     
     
    -

    Uses of Class
    org.apache.hadoop.hbase.HColumnDescriptor

    +

    类的使用
    org.apache.hadoop.hbase.HColumnDescriptor

    • - - +
      Packages that use HColumnDescriptor 
      + - - + + @@ -94,70 +94,70 @@
    • -

      Uses of HColumnDescriptor in org.apache.hadoop.hbase

      -
    • 使用HColumnDescriptor的程序包  
      PackageDescription程序包说明
      - +

      org.apache.hadoop.hbase中HColumnDescriptor的使用

      +
      Methods in org.apache.hadoop.hbase that return HColumnDescriptor 
      + - - + + - +
      返回HColumnDescriptor的org.apache.hadoop.hbase中的方法 
      Modifier and TypeMethod and Description限定符和类型方法和说明
      HColumnDescriptor[]

      [03/51] [partial] hbase-site git commit: Published site at 0f23784182ab88649de340d75804e0ff20dcd0fc.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bcb555af/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/LruBlockCache.StatisticsThread.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/LruBlockCache.StatisticsThread.html
       
      b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/LruBlockCache.StatisticsThread.html
      index 67f4551..017124c 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/LruBlockCache.StatisticsThread.html
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/LruBlockCache.StatisticsThread.html
      @@ -387,817 +387,804 @@
       379}
       380
       381LruCachedBlock cb = 
      map.get(cacheKey);
      -382if (cb != null) {
      -383  int comparison = 
      BlockCacheUtil.validateBlockAddition(cb.getBuffer(), buf, cacheKey);
      -384  if (comparison != 0) {
      -385if (comparison  0) {
      -386  LOG.warn("Cached block contents 
      differ by nextBlockOnDiskSize. Keeping cached block.");
      -387  return;
      -388} else {
      -389  LOG.warn("Cached block contents 
      differ by nextBlockOnDiskSize. Caching new block.");
      -390}
      -391  } else {
      -392String msg = "Cached an already 
      cached block: " + cacheKey + " cb:" + cb.getCacheKey();
      -393msg += ". This is harmless and 
      can happen in rare cases (see HBASE-8547)";
      -394LOG.debug(msg);
      -395return;
      -396  }
      -397}
      -398long currentSize = size.get();
      -399long currentAcceptableSize = 
      acceptableSize();
      -400long hardLimitSize = (long) 
      (hardCapacityLimitFactor * currentAcceptableSize);
      -401if (currentSize = hardLimitSize) 
      {
      -402  stats.failInsert();
      -403  if (LOG.isTraceEnabled()) {
      -404LOG.trace("LruBlockCache current 
      size " + StringUtils.byteDesc(currentSize)
      -405  + " has exceeded acceptable 
      size " + StringUtils.byteDesc(currentAcceptableSize) + "."
      -406  + " The hard limit size is " + 
      StringUtils.byteDesc(hardLimitSize)
      -407  + ", failed to put cacheKey:" + 
      cacheKey + " into LruBlockCache.");
      -408  }
      -409  if (!evictionInProgress) {
      -410runEviction();
      -411  }
      -412  return;
      -413}
      -414cb = new LruCachedBlock(cacheKey, 
      buf, count.incrementAndGet(), inMemory);
      -415long newSize = updateSizeMetrics(cb, 
      false);
      -416map.put(cacheKey, cb);
      -417long val = 
      elements.incrementAndGet();
      -418if (buf.getBlockType().isData()) {
      -419   dataBlockElements.increment();
      -420}
      -421if (LOG.isTraceEnabled()) {
      -422  long size = map.size();
      -423  assertCounterSanity(size, val);
      -424}
      -425if (newSize  
      currentAcceptableSize  !evictionInProgress) {
      -426  runEviction();
      -427}
      -428  }
      -429
      -430  /**
      -431   * Sanity-checking for parity between 
      actual block cache content and metrics.
      -432   * Intended only for use with TRACE 
      level logging and -ea JVM.
      -433   */
      -434  private static void 
      assertCounterSanity(long mapSize, long counterVal) {
      -435if (counterVal  0) {
      -436  LOG.trace("counterVal overflow. 
      Assertions unreliable. counterVal=" + counterVal +
      -437", mapSize=" + mapSize);
      -438  return;
      -439}
      -440if (mapSize  Integer.MAX_VALUE) 
      {
      -441  double pct_diff = 
      Math.absdouble) counterVal) / ((double) mapSize)) - 1.);
      -442  if (pct_diff  0.05) {
      -443LOG.trace("delta between reported 
      and actual size  5%. counterVal=" + counterVal +
      -444  ", mapSize=" + mapSize);
      -445  }
      -446}
      -447  }
      -448
      -449  /**
      -450   * Cache the block with the specified 
      name and buffer.
      -451   * p
      -452   *
      -453   * @param cacheKey block's cache key
      -454   * @param buf  block buffer
      -455   */
      -456  @Override
      -457  public void cacheBlock(BlockCacheKey 
      cacheKey, Cacheable buf) {
      -458cacheBlock(cacheKey, buf, false);
      -459  }
      -460
      -461  /**
      -462   * Helper function that updates the 
      local size counter and also updates any
      -463   * per-cf or per-blocktype metrics it 
      can discern from given
      -464   * {@link LruCachedBlock}
      -465   */
      -466  private long 
      updateSizeMetrics(LruCachedBlock cb, boolean evict) {
      -467long heapsize = cb.heapSize();
      -468BlockType bt = 
      cb.getBuffer().getBlockType();
      -469if (evict) {
      -470  heapsize *= -1;
      -471}
      -472if (bt != null  
      bt.isData()) {
      -473   dataBlockSize.add(heapsize);
      -474}
      -475return size.addAndGet(heapsize);
      -476  }
      -477
      -478  /**
      -479   * Get the buffer of the block with the 
      specified name.
      -480   *
      -481   * @param cacheKey   block's 
      cache key
      -482   * @param cachingtrue if 
      the caller caches blocks on cache misses
      -483   * @param repeat Whether 
      this is a repeat lookup for the same block
      -484   *   (used to 
      avoid double counting cache misses when doing double-check
      -485   *   locking)
      -486   * @param updateCacheMetrics Whether to 
      update cache metrics or not
      -487   *
      -488   * 

      [03/51] [partial] hbase-site git commit: Published site at 85b41f36e01214b6485c9352875c84ebf877dab3.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a5c66de0/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.RestoreSnapshotFuture.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.RestoreSnapshotFuture.html
       
      b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.RestoreSnapshotFuture.html
      index c10cfbf..a3e2f4a 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.RestoreSnapshotFuture.html
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.RestoreSnapshotFuture.html
      @@ -3371,7 +3371,7 @@
       3363private V result = null;
       3364
       3365private final HBaseAdmin admin;
      -3366private final Long procId;
      +3366protected final Long procId;
       3367
       3368public ProcedureFuture(final 
      HBaseAdmin admin, final Long procId) {
       3369  this.admin = admin;
      @@ -3653,653 +3653,651 @@
       3645 * @return a description of the 
      operation
       3646 */
       3647protected String getDescription() 
      {
      -3648  return "Operation: " + 
      getOperationType() + ", "
      -3649  + "Table Name: " + 
      tableName.getNameWithNamespaceInclAsString();
      -3650
      -3651}
      -3652
      -3653protected abstract class 
      TableWaitForStateCallable implements WaitForStateCallable {
      -3654  @Override
      -3655  public void 
      throwInterruptedException() throws InterruptedIOException {
      -3656throw new 
      InterruptedIOException("Interrupted while waiting for operation: "
      -3657+ getOperationType() + " on 
      table: " + tableName.getNameWithNamespaceInclAsString());
      -3658  }
      -3659
      -3660  @Override
      -3661  public void 
      throwTimeoutException(long elapsedTime) throws TimeoutException {
      -3662throw new TimeoutException("The 
      operation: " + getOperationType() + " on table: " +
      -3663tableName.getNameAsString() 
      + " has not completed after " + elapsedTime + "ms");
      -3664  }
      -3665}
      -3666
      -3667@Override
      -3668protected V 
      postOperationResult(final V result, final long deadlineTs)
      -3669throws IOException, 
      TimeoutException {
      -3670  LOG.info(getDescription() + " 
      completed");
      -3671  return 
      super.postOperationResult(result, deadlineTs);
      -3672}
      -3673
      -3674@Override
      -3675protected V 
      postOperationFailure(final IOException exception, final long deadlineTs)
      -3676throws IOException, 
      TimeoutException {
      -3677  LOG.info(getDescription() + " 
      failed with " + exception.getMessage());
      -3678  return 
      super.postOperationFailure(exception, deadlineTs);
      -3679}
      -3680
      -3681protected void 
      waitForTableEnabled(final long deadlineTs)
      -3682throws IOException, 
      TimeoutException {
      -3683  waitForState(deadlineTs, new 
      TableWaitForStateCallable() {
      -3684@Override
      -3685public boolean checkState(int 
      tries) throws IOException {
      -3686  try {
      -3687if 
      (getAdmin().isTableAvailable(tableName)) {
      -3688  return true;
      -3689}
      -3690  } catch 
      (TableNotFoundException tnfe) {
      -3691LOG.debug("Table " + 
      tableName.getNameWithNamespaceInclAsString()
      -3692+ " was not enabled, 
      sleeping. tries=" + tries);
      -3693  }
      -3694  return false;
      -3695}
      -3696  });
      -3697}
      -3698
      -3699protected void 
      waitForTableDisabled(final long deadlineTs)
      -3700throws IOException, 
      TimeoutException {
      -3701  waitForState(deadlineTs, new 
      TableWaitForStateCallable() {
      -3702@Override
      -3703public boolean checkState(int 
      tries) throws IOException {
      -3704  return 
      getAdmin().isTableDisabled(tableName);
      -3705}
      -3706  });
      -3707}
      -3708
      -3709protected void 
      waitTableNotFound(final long deadlineTs)
      -3710throws IOException, 
      TimeoutException {
      -3711  waitForState(deadlineTs, new 
      TableWaitForStateCallable() {
      -3712@Override
      -3713public boolean checkState(int 
      tries) throws IOException {
      -3714  return 
      !getAdmin().tableExists(tableName);
      -3715}
      -3716  });
      -3717}
      -3718
      -3719protected void 
      waitForSchemaUpdate(final long deadlineTs)
      -3720throws IOException, 
      TimeoutException {
      -3721  waitForState(deadlineTs, new 
      TableWaitForStateCallable() {
      -3722@Override
      -3723public boolean checkState(int 
      tries) throws IOException {
      -3724  return 
      getAdmin().getAlterStatus(tableName).getFirst() == 0;
      -3725}
      -3726  });
      -3727}
      -3728
      -3729protected void 
      waitForAllRegionsOnline(final long deadlineTs, final byte[][] splitKeys)
      -3730throws IOException, 
      TimeoutException {
      -3731  final TableDescriptor desc = 
      getTableDescriptor();
      -3732  final AtomicInteger actualRegCount 
      = new AtomicInteger(0);
      -3733  final MetaTableAccessor.Visitor 
      visitor = new MetaTableAccessor.Visitor() {
      -3734@Override
      -3735public boolean visit(Result 
      rowResult) throws 

      [03/51] [partial] hbase-site git commit: Published site at 6198e1fc7dfa85c3bc6b2855f9a5fb5f4b2354ff.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/eb5d2c62/devapidocs/org/apache/hadoop/hbase/master/replication/SyncReplicationReplayWALManager.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/master/replication/SyncReplicationReplayWALManager.html
       
      b/devapidocs/org/apache/hadoop/hbase/master/replication/SyncReplicationReplayWALManager.html
      new file mode 100644
      index 000..c449893
      --- /dev/null
      +++ 
      b/devapidocs/org/apache/hadoop/hbase/master/replication/SyncReplicationReplayWALManager.html
      @@ -0,0 +1,700 @@
      +http://www.w3.org/TR/html4/loose.dtd;>
      +
      +
      +
      +
      +
      +SyncReplicationReplayWALManager (Apache HBase 3.0.0-SNAPSHOT 
      API)
      +
      +
      +
      +
      +
      +var methods = 
      {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10};
      +var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
      Methods"],8:["t4","Concrete Methods"]};
      +var altColor = "altColor";
      +var rowColor = "rowColor";
      +var tableTab = "tableTab";
      +var activeTableTab = "activeTableTab";
      +
      +
      +JavaScript is disabled on your browser.
      +
      +
      +
      +
      +
      +Skip navigation links
      +
      +
      +
      +
      +Overview
      +Package
      +Class
      +Use
      +Tree
      +Deprecated
      +Index
      +Help
      +
      +
      +
      +
      +PrevClass
      +NextClass
      +
      +
      +Frames
      +NoFrames
      +
      +
      +AllClasses
      +
      +
      +
      +
      +
      +
      +
      +Summary:
      +Nested|
      +Field|
      +Constr|
      +Method
      +
      +
      +Detail:
      +Field|
      +Constr|
      +Method
      +
      +
      +
      +
      +
      +
      +
      +
      +org.apache.hadoop.hbase.master.replication
      +Class 
      SyncReplicationReplayWALManager
      +
      +
      +
      +https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
       title="class or interface in java.lang">java.lang.Object
      +
      +
      +org.apache.hadoop.hbase.master.replication.SyncReplicationReplayWALManager
      +
      +
      +
      +
      +
      +
      +
      +
      +@InterfaceAudience.Private
      +public class SyncReplicationReplayWALManager
      +extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
       title="class or interface in java.lang">Object
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +Field Summary
      +
      +Fields
      +
      +Modifier and Type
      +Field and Description
      +
      +
      +private org.apache.hadoop.fs.FileSystem
      +fs
      +
      +
      +private static org.slf4j.Logger
      +LOG
      +
      +
      +private org.apache.hadoop.fs.Path
      +remoteWALDir
      +
      +
      +private MasterServices
      +services
      +
      +
      +private org.apache.hadoop.fs.Path
      +walRootDir
      +
      +
      +private https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
       title="class or interface in java.lang">Object
      +workerLock
      +
      +
      +private https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
       title="class or interface in java.util">Maphttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">String,https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
       title="class or interface in java.util">SetServerName
      +workers
      +
      +
      +private ZKSyncReplicationReplayWALWorkerStorage
      +workerStorage
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +Constructor Summary
      +
      +Constructors
      +
      +Constructor and Description
      +
      +
      +SyncReplicationReplayWALManager(MasterServicesservices)
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +Method Summary
      +
      +All MethodsInstance MethodsConcrete Methods
      +
      +Modifier and Type
      +Method and Description
      +
      +
      +private void
      +checkReplayingWALDir()
      +
      +
      +void
      +createPeerRemoteWALDir(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in 
      java.lang">StringpeerId)
      +
      +
      +private void
      +deleteDir(org.apache.hadoop.fs.Pathdir,
      + https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in 
      java.lang">StringpeerId)
      +
      +
      +void
      +finishReplayWAL(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">Stringwal)
      +
      +
      +ServerName
      +getPeerWorker(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in 
      java.lang">StringpeerId)
      +
      +
      +org.apache.hadoop.fs.Path
      +getRemoteWALDir()
      +
      +
      +https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
       title="class or interface in 
      java.util">Listorg.apache.hadoop.fs.Path
      +getReplayWALsAndCleanUpUnusedFiles(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in 
      java.lang">StringpeerId)
      +
      +
      +boolean
      +isReplayWALFinished(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">Stringwal)
      +
      +
      +void
      

      [03/51] [partial] hbase-site git commit: Published site at 14087cc919da9f2e0b1a68f701f6365ad9d1d71f.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/55ce8d97/devapidocs/src-html/org/apache/hadoop/hbase/RegionMetricsBuilder.RegionMetricsImpl.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/RegionMetricsBuilder.RegionMetricsImpl.html
       
      b/devapidocs/src-html/org/apache/hadoop/hbase/RegionMetricsBuilder.RegionMetricsImpl.html
      index 49e37f9..3d2c9cb 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/RegionMetricsBuilder.RegionMetricsImpl.html
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/RegionMetricsBuilder.RegionMetricsImpl.html
      @@ -65,392 +65,410 @@
       057
      .setLastMajorCompactionTimestamp(regionLoadPB.getLastMajorCompactionTs())
       058.setMemStoreSize(new 
      Size(regionLoadPB.getMemStoreSizeMB(), Size.Unit.MEGABYTE))
       059
      .setReadRequestCount(regionLoadPB.getReadRequestsCount())
      -060
      .setWriteRequestCount(regionLoadPB.getWriteRequestsCount())
      -061.setStoreFileIndexSize(new 
      Size(regionLoadPB.getStorefileIndexSizeKB(),
      -062  Size.Unit.KILOBYTE))
      -063
      .setStoreFileRootLevelIndexSize(new Size(regionLoadPB.getRootIndexSizeKB(),
      -064  Size.Unit.KILOBYTE))
      -065
      .setStoreCount(regionLoadPB.getStores())
      -066
      .setStoreFileCount(regionLoadPB.getStorefiles())
      -067.setStoreFileSize(new 
      Size(regionLoadPB.getStorefileSizeMB(), Size.Unit.MEGABYTE))
      -068
      .setStoreSequenceIds(regionLoadPB.getStoreCompleteSequenceIdList().stream()
      -069  .collect(Collectors.toMap(
      -070
      (ClusterStatusProtos.StoreSequenceId s) - 
      s.getFamilyName().toByteArray(),
      -071  
      ClusterStatusProtos.StoreSequenceId::getSequenceId)))
      -072.setUncompressedStoreFileSize(
      -073  new 
      Size(regionLoadPB.getStoreUncompressedSizeMB(),Size.Unit.MEGABYTE))
      -074.build();
      -075  }
      -076
      -077  private static 
      ListClusterStatusProtos.StoreSequenceId toStoreSequenceId(
      -078  Mapbyte[], Long ids) {
      -079return ids.entrySet().stream()
      -080.map(e - 
      ClusterStatusProtos.StoreSequenceId.newBuilder()
      -081  
      .setFamilyName(UnsafeByteOperations.unsafeWrap(e.getKey()))
      -082  .setSequenceId(e.getValue())
      -083  .build())
      -084.collect(Collectors.toList());
      -085  }
      -086
      -087  public static 
      ClusterStatusProtos.RegionLoad toRegionLoad(RegionMetrics regionMetrics) {
      -088return 
      ClusterStatusProtos.RegionLoad.newBuilder()
      -089
      .setRegionSpecifier(HBaseProtos.RegionSpecifier
      -090  
      .newBuilder().setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME)
      -091  
      .setValue(UnsafeByteOperations.unsafeWrap(regionMetrics.getRegionName()))
      -092  .build())
      -093.setTotalStaticBloomSizeKB((int) 
      regionMetrics.getBloomFilterSize()
      -094  .get(Size.Unit.KILOBYTE))
      -095
      .setCurrentCompactedKVs(regionMetrics.getCompactedCellCount())
      -096
      .setTotalCompactingKVs(regionMetrics.getCompactingCellCount())
      -097
      .setCompleteSequenceId(regionMetrics.getCompletedSequenceId())
      -098
      .setDataLocality(regionMetrics.getDataLocality())
      -099
      .setFilteredReadRequestsCount(regionMetrics.getFilteredReadRequestCount())
      -100.setTotalStaticIndexSizeKB((int) 
      regionMetrics.getStoreFileUncompressedDataIndexSize()
      -101  .get(Size.Unit.KILOBYTE))
      -102
      .setLastMajorCompactionTs(regionMetrics.getLastMajorCompactionTimestamp())
      -103.setMemStoreSizeMB((int) 
      regionMetrics.getMemStoreSize().get(Size.Unit.MEGABYTE))
      -104
      .setReadRequestsCount(regionMetrics.getReadRequestCount())
      -105
      .setWriteRequestsCount(regionMetrics.getWriteRequestCount())
      -106.setStorefileIndexSizeKB((long) 
      regionMetrics.getStoreFileIndexSize()
      -107  .get(Size.Unit.KILOBYTE))
      -108.setRootIndexSizeKB((int) 
      regionMetrics.getStoreFileRootLevelIndexSize()
      +060
      .setCpRequestCount(regionLoadPB.getCpRequestsCount())
      +061
      .setWriteRequestCount(regionLoadPB.getWriteRequestsCount())
      +062.setStoreFileIndexSize(new 
      Size(regionLoadPB.getStorefileIndexSizeKB(),
      +063  Size.Unit.KILOBYTE))
      +064
      .setStoreFileRootLevelIndexSize(new Size(regionLoadPB.getRootIndexSizeKB(),
      +065  Size.Unit.KILOBYTE))
      +066
      .setStoreCount(regionLoadPB.getStores())
      +067
      .setStoreFileCount(regionLoadPB.getStorefiles())
      +068.setStoreFileSize(new 
      Size(regionLoadPB.getStorefileSizeMB(), Size.Unit.MEGABYTE))
      +069
      .setStoreSequenceIds(regionLoadPB.getStoreCompleteSequenceIdList().stream()
      +070  .collect(Collectors.toMap(
      +071
      (ClusterStatusProtos.StoreSequenceId s) - 
      s.getFamilyName().toByteArray(),
      +072  
      ClusterStatusProtos.StoreSequenceId::getSequenceId)))
      +073.setUncompressedStoreFileSize(
      +074  new 
      Size(regionLoadPB.getStoreUncompressedSizeMB(),Size.Unit.MEGABYTE))
      +075.build();
      +076  }
      +077
      +078  

      [03/51] [partial] hbase-site git commit: Published site at 72784c2d836a4b977667449d3adec5e8d15453f5.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2b11656f/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionStateNode.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionStateNode.html
       
      b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionStateNode.html
      index 541beed..1100e95 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionStateNode.html
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionStateNode.html
      @@ -42,1015 +42,1038 @@
       034import 
      java.util.concurrent.ConcurrentHashMap;
       035import 
      java.util.concurrent.ConcurrentSkipListMap;
       036import 
      java.util.concurrent.atomic.AtomicInteger;
      -037
      -038import 
      org.apache.hadoop.hbase.HConstants;
      -039import 
      org.apache.hadoop.hbase.ServerName;
      -040import 
      org.apache.hadoop.hbase.TableName;
      -041import 
      org.apache.hadoop.hbase.client.RegionInfo;
      -042import 
      org.apache.hadoop.hbase.exceptions.UnexpectedStateException;
      -043import 
      org.apache.hadoop.hbase.master.RegionState;
      -044import 
      org.apache.hadoop.hbase.master.RegionState.State;
      -045import 
      org.apache.hadoop.hbase.procedure2.ProcedureEvent;
      -046import 
      org.apache.hadoop.hbase.util.Bytes;
      -047import 
      org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
      -048import 
      org.apache.yetus.audience.InterfaceAudience;
      -049import org.slf4j.Logger;
      -050import org.slf4j.LoggerFactory;
      -051import 
      org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
      -052
      -053/**
      -054 * RegionStates contains a set of Maps 
      that describes the in-memory state of the AM, with
      -055 * the regions available in the system, 
      the region in transition, the offline regions and
      -056 * the servers holding regions.
      -057 */
      -058@InterfaceAudience.Private
      -059public class RegionStates {
      -060  private static final Logger LOG = 
      LoggerFactory.getLogger(RegionStates.class);
      -061
      -062  protected static final State[] 
      STATES_EXPECTED_ON_OPEN = new State[] {
      -063State.OPEN, // State may already be 
      OPEN if we died after receiving the OPEN from regionserver
      -064// but before complete 
      finish of AssignProcedure. HBASE-20100.
      -065State.OFFLINE, State.CLOSED,  // 
      disable/offline
      -066State.SPLITTING, State.SPLIT, // 
      ServerCrashProcedure
      -067State.OPENING, State.FAILED_OPEN, // 
      already in-progress (retrying)
      -068  };
      -069
      -070  protected static final State[] 
      STATES_EXPECTED_ON_CLOSE = new State[] {
      -071State.SPLITTING, State.SPLIT, 
      State.MERGING, // ServerCrashProcedure
      -072State.OPEN,   // 
      enabled/open
      -073State.CLOSING // 
      already in-progress (retrying)
      -074  };
      -075
      -076  private static class 
      AssignmentProcedureEvent extends ProcedureEventRegionInfo {
      -077public AssignmentProcedureEvent(final 
      RegionInfo regionInfo) {
      -078  super(regionInfo);
      -079}
      -080  }
      -081
      -082  private static class ServerReportEvent 
      extends ProcedureEventServerName {
      -083public ServerReportEvent(final 
      ServerName serverName) {
      -084  super(serverName);
      -085}
      -086  }
      -087
      -088  /**
      -089   * Current Region State.
      -090   * In-memory only. Not persisted.
      -091   */
      -092  // Mutable/Immutable? Changes have to 
      be synchronized or not?
      -093  // Data members are volatile which 
      seems to say multi-threaded access is fine.
      -094  // In the below we do check and set but 
      the check state could change before
      -095  // we do the set because no 
      synchronizationwhich seems dodgy. Clear up
      -096  // understanding here... how many 
      threads accessing? Do locks make it so one
      -097  // thread at a time working on a single 
      Region's RegionStateNode? Lets presume
      -098  // so for now. Odd is that elsewhere in 
      this RegionStates, we synchronize on
      -099  // the RegionStateNode instance. 
      TODO.
      -100  public static class RegionStateNode 
      implements ComparableRegionStateNode {
      -101private final RegionInfo 
      regionInfo;
      -102private final ProcedureEvent? 
      event;
      -103
      -104private volatile 
      RegionTransitionProcedure procedure = null;
      -105private volatile ServerName 
      regionLocation = null;
      -106private volatile ServerName lastHost 
      = null;
      -107/**
      -108 * A Region-in-Transition (RIT) moves 
      through states.
      -109 * See {@link State} for complete 
      list. A Region that
      -110 * is opened moves from OFFLINE = 
      OPENING = OPENED.
      -111 */
      -112private volatile State state = 
      State.OFFLINE;
      -113
      -114/**
      -115 * Updated whenever a call to {@link 
      #setRegionLocation(ServerName)}
      -116 * or {@link #setState(State, 
      State...)}.
      -117 */
      -118private volatile long lastUpdate = 
      0;
      -119
      -120private volatile long openSeqNum = 
      HConstants.NO_SEQNUM;
      -121
      -122public RegionStateNode(final 
      RegionInfo regionInfo) {
      -123  this.regionInfo = regionInfo;
      -124  this.event = new 
      

      [03/51] [partial] hbase-site git commit: Published site at 9101fc246f86445006bfbcdfda5cc495016dc280.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/65565d77/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
       
      b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
      index 4b5d00c..96ecbf8 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
      @@ -6,7 +6,7 @@
       
       
       
      -001/*
      +001/**
       002 * Licensed to the Apache Software 
      Foundation (ASF) under one
       003 * or more contributor license 
      agreements.  See the NOTICE file
       004 * distributed with this work for 
      additional information
      @@ -23,1981 +23,1894 @@
       015 * See the License for the specific 
      language governing permissions and
       016 * limitations under the License.
       017 */
      -018
      -019package 
      org.apache.hadoop.hbase.master.assignment;
      -020
      -021import java.io.IOException;
      -022import java.util.ArrayList;
      -023import java.util.Arrays;
      -024import java.util.Collection;
      -025import java.util.Collections;
      -026import java.util.HashMap;
      -027import java.util.HashSet;
      -028import java.util.List;
      -029import java.util.Map;
      -030import java.util.Set;
      -031import 
      java.util.concurrent.CopyOnWriteArrayList;
      -032import java.util.concurrent.Future;
      -033import java.util.concurrent.TimeUnit;
      -034import 
      java.util.concurrent.atomic.AtomicBoolean;
      -035import 
      java.util.concurrent.locks.Condition;
      -036import 
      java.util.concurrent.locks.ReentrantLock;
      -037import java.util.stream.Collectors;
      -038import 
      org.apache.hadoop.conf.Configuration;
      -039import 
      org.apache.hadoop.hbase.HBaseIOException;
      -040import 
      org.apache.hadoop.hbase.HConstants;
      -041import 
      org.apache.hadoop.hbase.PleaseHoldException;
      -042import 
      org.apache.hadoop.hbase.RegionException;
      -043import 
      org.apache.hadoop.hbase.RegionStateListener;
      -044import 
      org.apache.hadoop.hbase.ServerName;
      -045import 
      org.apache.hadoop.hbase.TableName;
      -046import 
      org.apache.hadoop.hbase.YouAreDeadException;
      -047import 
      org.apache.hadoop.hbase.client.RegionInfo;
      -048import 
      org.apache.hadoop.hbase.client.RegionInfoBuilder;
      -049import 
      org.apache.hadoop.hbase.client.RegionReplicaUtil;
      -050import 
      org.apache.hadoop.hbase.client.Result;
      -051import 
      org.apache.hadoop.hbase.client.TableState;
      -052import 
      org.apache.hadoop.hbase.exceptions.UnexpectedStateException;
      -053import 
      org.apache.hadoop.hbase.favored.FavoredNodesManager;
      -054import 
      org.apache.hadoop.hbase.favored.FavoredNodesPromoter;
      -055import 
      org.apache.hadoop.hbase.master.AssignmentListener;
      -056import 
      org.apache.hadoop.hbase.master.LoadBalancer;
      -057import 
      org.apache.hadoop.hbase.master.MasterServices;
      -058import 
      org.apache.hadoop.hbase.master.MetricsAssignmentManager;
      -059import 
      org.apache.hadoop.hbase.master.NoSuchProcedureException;
      -060import 
      org.apache.hadoop.hbase.master.RegionPlan;
      -061import 
      org.apache.hadoop.hbase.master.RegionState;
      -062import 
      org.apache.hadoop.hbase.master.RegionState.State;
      -063import 
      org.apache.hadoop.hbase.master.ServerListener;
      -064import 
      org.apache.hadoop.hbase.master.TableStateManager;
      -065import 
      org.apache.hadoop.hbase.master.assignment.RegionStates.RegionStateNode;
      -066import 
      org.apache.hadoop.hbase.master.assignment.RegionStates.ServerState;
      -067import 
      org.apache.hadoop.hbase.master.assignment.RegionStates.ServerStateNode;
      -068import 
      org.apache.hadoop.hbase.master.balancer.FavoredStochasticBalancer;
      -069import 
      org.apache.hadoop.hbase.master.normalizer.RegionNormalizer;
      -070import 
      org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
      -071import 
      org.apache.hadoop.hbase.master.procedure.MasterProcedureScheduler;
      -072import 
      org.apache.hadoop.hbase.master.procedure.ProcedureSyncWait;
      -073import 
      org.apache.hadoop.hbase.master.procedure.ServerCrashException;
      -074import 
      org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure;
      -075import 
      org.apache.hadoop.hbase.procedure2.Procedure;
      -076import 
      org.apache.hadoop.hbase.procedure2.ProcedureEvent;
      -077import 
      org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
      -078import 
      org.apache.hadoop.hbase.procedure2.ProcedureInMemoryChore;
      -079import 
      org.apache.hadoop.hbase.procedure2.util.StringUtils;
      -080import 
      org.apache.hadoop.hbase.regionserver.SequenceId;
      -081import 
      org.apache.hadoop.hbase.util.Bytes;
      -082import 
      org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
      -083import 
      org.apache.hadoop.hbase.util.HasThread;
      -084import 
      org.apache.hadoop.hbase.util.Pair;
      -085import 
      org.apache.hadoop.hbase.util.Threads;
      -086import 
      org.apache.hadoop.hbase.util.VersionInfo;
      -087import 
      org.apache.yetus.audience.InterfaceAudience;
      -088import org.slf4j.Logger;
      -089import org.slf4j.LoggerFactory;
      -090
      -091import 
      org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
      -092
      -093import 
      

      [03/51] [partial] hbase-site git commit: Published site at 0b28155d274910b4e667b949d51f78809a1eff0b.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e11cf2cb/devapidocs/src-html/org/apache/hadoop/hbase/wal/WAL.html
      --
      diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WAL.html 
      b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WAL.html
      index e31f5c6..f4d1eb0 100644
      --- a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WAL.html
      +++ b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WAL.html
      @@ -31,277 +31,266 @@
       023import java.util.Set;
       024import 
      org.apache.hadoop.hbase.HConstants;
       025import 
      org.apache.hadoop.hbase.client.RegionInfo;
      -026import 
      org.apache.hadoop.hbase.regionserver.wal.CompressionContext;
      -027import 
      org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
      -028import 
      org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
      -029import 
      org.apache.hadoop.hbase.regionserver.wal.WALCoprocessorHost;
      -030import 
      org.apache.hadoop.hbase.replication.regionserver.WALFileLengthProvider;
      -031import 
      org.apache.yetus.audience.InterfaceAudience;
      -032import 
      org.apache.yetus.audience.InterfaceStability;
      -033
      -034import 
      org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
      -035
      -036/**
      -037 * A Write Ahead Log (WAL) provides 
      service for reading, writing waledits. This interface provides
      -038 * APIs for WAL users (such as 
      RegionServer) to use the WAL (do append, sync, etc).
      -039 *
      -040 * Note that some internals, such as log 
      rolling and performance evaluation tools, will use
      -041 * WAL.equals to determine if they have 
      already seen a given WAL.
      -042 */
      -043@InterfaceAudience.Private
      -044@InterfaceStability.Evolving
      -045public interface WAL extends Closeable, 
      WALFileLengthProvider {
      -046
      -047  /**
      -048   * Registers WALActionsListener
      -049   */
      -050  void registerWALActionsListener(final 
      WALActionsListener listener);
      -051
      -052  /**
      -053   * Unregisters WALActionsListener
      -054   */
      -055  boolean 
      unregisterWALActionsListener(final WALActionsListener listener);
      -056
      -057  /**
      -058   * Roll the log writer. That is, start 
      writing log messages to a new file.
      -059   *
      -060   * p
      -061   * The implementation is synchronized 
      in order to make sure there's one rollWriter
      -062   * running at any given time.
      -063   *
      -064   * @return If lots of logs, flush the 
      returned regions so next time through we
      -065   * can clean logs. Returns null 
      if nothing to flush. Names are actual
      -066   * region names as returned by 
      {@link RegionInfo#getEncodedName()}
      -067   */
      -068  byte[][] rollWriter() throws 
      FailedLogCloseException, IOException;
      -069
      -070  /**
      -071   * Roll the log writer. That is, start 
      writing log messages to a new file.
      -072   *
      -073   * p
      -074   * The implementation is synchronized 
      in order to make sure there's one rollWriter
      -075   * running at any given time.
      -076   *
      -077   * @param force
      -078   *  If true, force creation of 
      a new writer even if no entries have
      -079   *  been written to the current 
      writer
      -080   * @return If lots of logs, flush the 
      returned regions so next time through we
      -081   * can clean logs. Returns null 
      if nothing to flush. Names are actual
      -082   * region names as returned by 
      {@link RegionInfo#getEncodedName()}
      -083   */
      -084  byte[][] rollWriter(boolean force) 
      throws FailedLogCloseException, IOException;
      -085
      -086  /**
      -087   * Stop accepting new writes. If we 
      have unsynced writes still in buffer, sync them.
      -088   * Extant edits are left in place in 
      backing storage to be replayed later.
      -089   */
      -090  void shutdown() throws IOException;
      -091
      -092  /**
      -093   * Caller no longer needs any edits 
      from this WAL. Implementers are free to reclaim
      -094   * underlying resources after this 
      call; i.e. filesystem based WALs can archive or
      -095   * delete files.
      -096   */
      -097  @Override
      -098  void close() throws IOException;
      -099
      -100  /**
      -101   * Append a set of edits to the WAL. 
      The WAL is not flushed/sync'd after this transaction
      -102   * completes BUT on return this edit 
      must have its region edit/sequence id assigned
      -103   * else it messes up our unification of 
      mvcc and sequenceid.  On return codekey/code will
      -104   * have the region edit/sequence id 
      filled in.
      -105   * @param info the regioninfo 
      associated with append
      -106   * @param key Modified by this call; we 
      add to it this edits region edit/sequence id.
      -107   * @param edits Edits to append. MAY 
      CONTAIN NO EDITS for case where we want to get an edit
      -108   * sequence id that is after all 
      currently appended edits.
      -109   * @param inMemstore Always true except 
      for case where we are writing a compaction completion
      -110   * record into the WAL; in this case 
      the entry is just so we can finish an unfinished compaction
      -111   * -- it is not an edit for memstore.
      -112   * @return Returns a 'transaction id' 
      and codekey/code will have the region edit/sequence id
      -113   * in it.
      -114   */
      -115  long append(RegionInfo info, WALKeyImpl 
      key, 

      [03/51] [partial] hbase-site git commit: Published site at 7d3750bd9fc9747623549c242cc4171e224b3eaf.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.TimeoutExecutorThread.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.TimeoutExecutorThread.html
       
      b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.TimeoutExecutorThread.html
      index 594ef24..17d5c40 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.TimeoutExecutorThread.html
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.TimeoutExecutorThread.html
      @@ -170,241 +170,242 @@
       162  }
       163
       164  /**
      -165   * Add a remote rpc. Be sure to check 
      result for successful add.
      +165   * Add a remote rpc.
       166   * @param key the node identifier
      -167   * @return True if we successfully 
      added the operation.
      -168   */
      -169  public boolean addOperationToNode(final 
      TRemote key, RemoteProcedure rp) {
      +167   */
      +168  public void addOperationToNode(final 
      TRemote key, RemoteProcedure rp)
      +169  throws 
      NullTargetServerDispatchException, NoServerDispatchException, 
      NoNodeDispatchException {
       170if (key == null) {
      -171  // Key is remote server name. Be 
      careful. It could have been nulled by a concurrent
      -172  // ServerCrashProcedure shutting 
      down outstanding RPC requests. See remoteCallFailed.
      -173  return false;
      -174}
      -175assert key != null : "found null key 
      for node";
      -176BufferNode node = nodeMap.get(key);
      -177if (node == null) {
      -178  return false;
      -179}
      -180node.add(rp);
      -181// Check our node still in the map; 
      could have been removed by #removeNode.
      -182return nodeMap.containsValue(node);
      -183  }
      -184
      -185  /**
      -186   * Remove a remote node
      -187   * @param key the node identifier
      -188   */
      -189  public boolean removeNode(final TRemote 
      key) {
      -190final BufferNode node = 
      nodeMap.remove(key);
      -191if (node == null) return false;
      -192node.abortOperationsInQueue();
      -193return true;
      -194  }
      -195
      -196  // 
      
      -197  //  Task Helpers
      -198  // 
      
      -199  protected FutureVoid 
      submitTask(CallableVoid task) {
      -200return threadPool.submit(task);
      -201  }
      -202
      -203  protected FutureVoid 
      submitTask(CallableVoid task, long delay, TimeUnit unit) {
      -204final FutureTaskVoid 
      futureTask = new FutureTask(task);
      -205timeoutExecutor.add(new 
      DelayedTask(futureTask, delay, unit));
      -206return futureTask;
      -207  }
      -208
      -209  protected abstract void 
      remoteDispatch(TRemote key, SetRemoteProcedure operations);
      -210  protected abstract void 
      abortPendingOperations(TRemote key, SetRemoteProcedure operations);
      -211
      -212  /**
      -213   * Data structure with reference to 
      remote operation.
      -214   */
      -215  public static abstract class 
      RemoteOperation {
      -216private final RemoteProcedure 
      remoteProcedure;
      -217
      -218protected RemoteOperation(final 
      RemoteProcedure remoteProcedure) {
      -219  this.remoteProcedure = 
      remoteProcedure;
      -220}
      -221
      -222public RemoteProcedure 
      getRemoteProcedure() {
      -223  return remoteProcedure;
      -224}
      -225  }
      -226
      -227  /**
      -228   * Remote procedure reference.
      -229   */
      -230  public interface 
      RemoteProcedureTEnv, TRemote {
      -231/**
      -232 * For building the remote 
      operation.
      -233 */
      -234RemoteOperation remoteCallBuild(TEnv 
      env, TRemote remote);
      -235
      -236/**
      -237 * Called when the executeProcedure 
      call is failed.
      -238 */
      -239void remoteCallFailed(TEnv env, 
      TRemote remote, IOException exception);
      -240
      -241/**
      -242 * Called when RS tells the remote 
      procedure is succeeded through the
      -243 * {@code reportProcedureDone} 
      method.
      -244 */
      -245void remoteOperationCompleted(TEnv 
      env);
      -246
      -247/**
      -248 * Called when RS tells the remote 
      procedure is failed through the {@code reportProcedureDone}
      -249 * method.
      -250 */
      -251void remoteOperationFailed(TEnv env, 
      RemoteProcedureException error);
      -252  }
      -253
      -254  /**
      -255   * Account of what procedures are 
      running on remote node.
      -256   * @param TEnv
      -257   * @param TRemote
      -258   */
      -259  public interface RemoteNodeTEnv, 
      TRemote {
      -260TRemote getKey();
      -261void add(RemoteProcedureTEnv, 
      TRemote operation);
      -262void dispatch();
      -263  }
      -264
      -265  protected 
      ArrayListMultimapClass?, RemoteOperation 
      buildAndGroupRequestByType(final TEnv env,
      -266  final TRemote remote, final 
      SetRemoteProcedure remoteProcedures) {
      -267final 
      ArrayListMultimapClass?, RemoteOperation requestByType = 
      ArrayListMultimap.create();
      -268for (RemoteProcedure proc: 
      remoteProcedures) {
      -269  RemoteOperation operation = 
      proc.remoteCallBuild(env, remote);
      

      [03/51] [partial] hbase-site git commit: Published site at 997747076d8ec0b4346d7cb99c4b0667a7c14905.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4df09ed9/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.PortAllocator.AvailablePortChecker.html
      --
      diff --git 
      a/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.PortAllocator.AvailablePortChecker.html
       
      b/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.PortAllocator.AvailablePortChecker.html
      index 3b08b86..80483ee 100644
      --- 
      a/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.PortAllocator.AvailablePortChecker.html
      +++ 
      b/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.PortAllocator.AvailablePortChecker.html
      @@ -598,7 +598,7 @@
       590   * Start a minidfscluster.
       591   * @param servers How many DNs to 
      start.
       592   * @throws Exception
      -593   * @see {@link 
      #shutdownMiniDFSCluster()}
      +593   * @see #shutdownMiniDFSCluster()
       594   * @return The mini dfs cluster 
      created.
       595   */
       596  public MiniDFSCluster 
      startMiniDFSCluster(int servers) throws Exception {
      @@ -613,7 +613,7 @@
       605   * datanodes will have the same host 
      name.
       606   * @param hosts hostnames DNs to run 
      on.
       607   * @throws Exception
      -608   * @see {@link 
      #shutdownMiniDFSCluster()}
      +608   * @see #shutdownMiniDFSCluster()
       609   * @return The mini dfs cluster 
      created.
       610   */
       611  public MiniDFSCluster 
      startMiniDFSCluster(final String hosts[])
      @@ -631,7 +631,7 @@
       623   * @param servers How many DNs to 
      start.
       624   * @param hosts hostnames DNs to run 
      on.
       625   * @throws Exception
      -626   * @see {@link 
      #shutdownMiniDFSCluster()}
      +626   * @see #shutdownMiniDFSCluster()
       627   * @return The mini dfs cluster 
      created.
       628   */
       629  public MiniDFSCluster 
      startMiniDFSCluster(int servers, final String hosts[])
      @@ -775,7 +775,7 @@
       767   * Start up a minicluster of hbase, 
      dfs, and zookeeper.
       768   * @throws Exception
       769   * @return Mini hbase cluster instance 
      created.
      -770   * @see {@link 
      #shutdownMiniDFSCluster()}
      +770   * @see #shutdownMiniDFSCluster()
       771   */
       772  public MiniHBaseCluster 
      startMiniCluster() throws Exception {
       773return startMiniCluster(1, 1);
      @@ -785,7 +785,7 @@
       777   * Start up a minicluster of hbase, 
      dfs, and zookeeper where WAL's walDir is created separately.
       778   * @throws Exception
       779   * @return Mini hbase cluster instance 
      created.
      -780   * @see {@link 
      #shutdownMiniDFSCluster()}
      +780   * @see #shutdownMiniDFSCluster()
       781   */
       782  public MiniHBaseCluster 
      startMiniCluster(boolean withWALDir) throws Exception {
       783return startMiniCluster(1, 1, 1, 
      null, null, null, false, withWALDir);
      @@ -797,7 +797,7 @@
       789   * (will overwrite if dir already 
      exists)
       790   * @throws Exception
       791   * @return Mini hbase cluster instance 
      created.
      -792   * @see {@link 
      #shutdownMiniDFSCluster()}
      +792   * @see #shutdownMiniDFSCluster()
       793   */
       794  public MiniHBaseCluster 
      startMiniCluster(final int numSlaves, boolean create)
       795  throws Exception {
      @@ -814,7 +814,7 @@
       806   * hbase.regionserver.info.port is -1 
      (i.e. no ui per regionserver) otherwise
       807   * bind errors.
       808   * @throws Exception
      -809   * @see {@link 
      #shutdownMiniCluster()}
      +809   * @see #shutdownMiniCluster()
       810   * @return Mini hbase cluster instance 
      created.
       811   */
       812  public MiniHBaseCluster 
      startMiniCluster(final int numSlaves)
      @@ -831,7 +831,7 @@
       823   * Start minicluster. Whether to create 
      a new root or data dir path even if such a path
       824   * has been created earlier is decided 
      based on flag codecreate/code
       825   * @throws Exception
      -826   * @see {@link 
      #shutdownMiniCluster()}
      +826   * @see #shutdownMiniCluster()
       827   * @return Mini hbase cluster instance 
      created.
       828   */
       829  public MiniHBaseCluster 
      startMiniCluster(final int numMasters,
      @@ -843,7 +843,7 @@
       835  /**
       836   * start minicluster
       837   * @throws Exception
      -838   * @see {@link 
      #shutdownMiniCluster()}
      +838   * @see #shutdownMiniCluster()
       839   * @return Mini hbase cluster instance 
      created.
       840   */
       841  public MiniHBaseCluster 
      startMiniCluster(final int numMasters,
      @@ -880,7 +880,7 @@
       872   * If you start MiniDFSCluster without 
      host names,
       873   * all instances of the datanodes will 
      have the same host name.
       874   * @throws Exception
      -875   * @see {@link 
      #shutdownMiniCluster()}
      +875   * @see #shutdownMiniCluster()
       876   * @return Mini hbase cluster instance 
      created.
       877   */
       878  public MiniHBaseCluster 
      startMiniCluster(final int numMasters,
      @@ -922,7 +922,7 @@
       914   * @param regionserverClass The class 
      to use as HRegionServer, or null for
       915   * default
       916   * @throws Exception
      -917   * @see {@link 
      #shutdownMiniCluster()}
      +917   * @see #shutdownMiniCluster()
       918   * @return Mini hbase cluster instance 
      created.
       919   */
       920  public MiniHBaseCluster 
      startMiniCluster(final int numMasters,
      @@ -1011,7 +1011,7 @@
       1003   * @return Reference to the hbase mini 
      hbase cluster.
       1004   * 

      [03/51] [partial] hbase-site git commit: Published site at f3d1c021de2264301f68eadb9ef126ff83d7ef53.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/883dde2f/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.AppendTest.html
      --
      diff --git 
      a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.AppendTest.html 
      b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.AppendTest.html
      index fc0a9f1..458b775 100644
      --- 
      a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.AppendTest.html
      +++ 
      b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.AppendTest.html
      @@ -133,7 +133,7 @@ var activeTableTab = "activeTableTab";
       
       
       
      -static class PerformanceEvaluation.AppendTest
      +static class PerformanceEvaluation.AppendTest
       extends PerformanceEvaluation.CASTableTest
       
       
      @@ -202,8 +202,10 @@ extends Method and Description
       
       
      -(package private) void
      -testRow(inti)
      +(package private) boolean
      +testRow(inti)
      +Test for individual row.
      +
       
       
       
      @@ -221,13 +223,6 @@ extends onStartup,
       onTakedown
       
       
      -
      -
      -
      -Methods inherited from classorg.apache.hadoop.hbase.PerformanceEvaluation.Test
      -closeConnection,
       createConnection
      -
      -
       
       
       
      @@ -261,7 +256,7 @@ extends 
       
       AppendTest
      -AppendTest(org.apache.hadoop.hbase.client.Connectioncon,
      +AppendTest(org.apache.hadoop.hbase.client.Connectioncon,
      PerformanceEvaluation.TestOptionsoptions,
      PerformanceEvaluation.Statusstatus)
       
      @@ -280,11 +275,19 @@ extends 
       
       testRow
      -voidtestRow(inti)
      -  throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
       title="class or interface in java.io">IOException
      +booleantestRow(inti)
      + throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
       title="class or interface in java.io">IOException
      +Description copied from 
      class:PerformanceEvaluation.TestBase
      +Test for individual row.
       
       Specified by:
       testRowin
       classPerformanceEvaluation.TestBase
      +Parameters:
      +i - Row index.
      +Returns:
      +true if the row was sent to server and need to record metrics.
      + False if not, multiGet and multiPut e.g., the rows are sent
      + to server only if enough gets/puts are gathered.
       Throws:
       https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
       title="class or interface in java.io">IOException
       
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/883dde2f/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncRandomReadTest.html
      --
      diff --git 
      a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncRandomReadTest.html
       
      b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncRandomReadTest.html
      index a6d7d96..02a13a6 100644
      --- 
      a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncRandomReadTest.html
      +++ 
      b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncRandomReadTest.html
      @@ -128,7 +128,7 @@ var activeTableTab = "activeTableTab";
       
       
       
      -static class PerformanceEvaluation.AsyncRandomReadTest
      +static class PerformanceEvaluation.AsyncRandomReadTest
       extends PerformanceEvaluation.AsyncTableTest
       
       
      @@ -228,8 +228,10 @@ extends runtime(https://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true;
       title="class or interface in java.lang">Throwablee)
       
       
      -(package private) void
      -testRow(inti)
      +(package private) boolean
      +testRow(inti)
      +Test for individual row.
      +
       
       
       protected void
      @@ -244,13 +246,6 @@ extends onStartup,
       onTakedown
       
       
      -
      -
      -
      -Methods inherited from classorg.apache.hadoop.hbase.PerformanceEvaluation.AsyncTest
      -closeConnection,
       createConnection
      -
      -
       
       
       
      @@ -284,7 +279,7 @@ extends 
       
       consistency
      -private finalorg.apache.hadoop.hbase.client.Consistency consistency
      +private finalorg.apache.hadoop.hbase.client.Consistency consistency
       
       
       
      @@ -293,7 +288,7 @@ extends 
       
       gets
      -privatehttps://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
       title="class or interface in 
      java.util">ArrayListorg.apache.hadoop.hbase.client.Get gets
      +privatehttps://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
       title="class or interface in 
      java.util">ArrayListorg.apache.hadoop.hbase.client.Get gets
       
       
       
      @@ -302,7 +297,7 @@ extends 
       
       rd
      -privatehttps://docs.oracle.com/javase/8/docs/api/java/util/Random.html?is-external=true;
       title="class or interface in java.util">Random rd
      +privatehttps://docs.oracle.com/javase/8/docs/api/java/util/Random.html?is-external=true;
       title="class or interface in java.util">Random rd
       
       
       
      @@ -319,7 +314,7 @@ extends 
       
       AsyncRandomReadTest
      -AsyncRandomReadTest(org.apache.hadoop.hbase.client.AsyncConnectioncon,
      +AsyncRandomReadTest(org.apache.hadoop.hbase.client.AsyncConnectioncon,
       PerformanceEvaluation.TestOptionsoptions,
       PerformanceEvaluation.Statusstatus)
       
      @@ -338,12 +333,20 @@ extends 
       
       testRow
      -voidtestRow(inti)
      -  

      [03/51] [partial] hbase-site git commit: Published site at cf529f18a9959589fa635f78df4840472526ea2c.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndMutateTest.html
      --
      diff --git 
      a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndMutateTest.html
       
      b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndMutateTest.html
      index 3f8844b..cdb9398 100644
      --- 
      a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndMutateTest.html
      +++ 
      b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndMutateTest.html
      @@ -140,2712 +140,2713 @@
       132public class PerformanceEvaluation 
      extends Configured implements Tool {
       133  static final String RANDOM_SEEK_SCAN = 
      "randomSeekScan";
       134  static final String RANDOM_READ = 
      "randomRead";
      -135  private static final Logger LOG = 
      LoggerFactory.getLogger(PerformanceEvaluation.class.getName());
      -136  private static final ObjectMapper 
      MAPPER = new ObjectMapper();
      -137  static {
      -138
      MAPPER.configure(MapperFeature.SORT_PROPERTIES_ALPHABETICALLY, true);
      -139  }
      -140
      -141  public static final String TABLE_NAME = 
      "TestTable";
      -142  public static final String 
      FAMILY_NAME_BASE = "info";
      -143  public static final byte[] FAMILY_ZERO 
      = Bytes.toBytes("info0");
      -144  public static final byte[] COLUMN_ZERO 
      = Bytes.toBytes("" + 0);
      -145  public static final int 
      DEFAULT_VALUE_LENGTH = 1000;
      -146  public static final int ROW_LENGTH = 
      26;
      -147
      -148  private static final int ONE_GB = 1024 
      * 1024 * 1000;
      -149  private static final int 
      DEFAULT_ROWS_PER_GB = ONE_GB / DEFAULT_VALUE_LENGTH;
      -150  // TODO : should we make this 
      configurable
      -151  private static final int TAG_LENGTH = 
      256;
      -152  private static final DecimalFormat FMT 
      = new DecimalFormat("0.##");
      -153  private static final MathContext CXT = 
      MathContext.DECIMAL64;
      -154  private static final BigDecimal 
      MS_PER_SEC = BigDecimal.valueOf(1000);
      -155  private static final BigDecimal 
      BYTES_PER_MB = BigDecimal.valueOf(1024 * 1024);
      -156  private static final TestOptions 
      DEFAULT_OPTS = new TestOptions();
      -157
      -158  private static MapString, 
      CmdDescriptor COMMANDS = new TreeMap();
      -159  private static final Path PERF_EVAL_DIR 
      = new Path("performance_evaluation");
      -160
      -161  static {
      -162
      addCommandDescriptor(AsyncRandomReadTest.class, "asyncRandomRead",
      -163"Run async random read test");
      -164
      addCommandDescriptor(AsyncRandomWriteTest.class, "asyncRandomWrite",
      -165"Run async random write test");
      -166
      addCommandDescriptor(AsyncSequentialReadTest.class, "asyncSequentialRead",
      -167"Run async sequential read 
      test");
      -168
      addCommandDescriptor(AsyncSequentialWriteTest.class, "asyncSequentialWrite",
      -169"Run async sequential write 
      test");
      -170
      addCommandDescriptor(AsyncScanTest.class, "asyncScan",
      -171"Run async scan test (read every 
      row)");
      -172
      addCommandDescriptor(RandomReadTest.class, RANDOM_READ,
      -173  "Run random read test");
      -174
      addCommandDescriptor(RandomSeekScanTest.class, RANDOM_SEEK_SCAN,
      -175  "Run random seek and scan 100 
      test");
      -176
      addCommandDescriptor(RandomScanWithRange10Test.class, "scanRange10",
      -177  "Run random seek scan with both 
      start and stop row (max 10 rows)");
      -178
      addCommandDescriptor(RandomScanWithRange100Test.class, "scanRange100",
      -179  "Run random seek scan with both 
      start and stop row (max 100 rows)");
      -180
      addCommandDescriptor(RandomScanWithRange1000Test.class, "scanRange1000",
      -181  "Run random seek scan with both 
      start and stop row (max 1000 rows)");
      -182
      addCommandDescriptor(RandomScanWithRange1Test.class, "scanRange1",
      -183  "Run random seek scan with both 
      start and stop row (max 1 rows)");
      -184
      addCommandDescriptor(RandomWriteTest.class, "randomWrite",
      -185  "Run random write test");
      -186
      addCommandDescriptor(SequentialReadTest.class, "sequentialRead",
      -187  "Run sequential read test");
      -188
      addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite",
      -189  "Run sequential write test");
      -190addCommandDescriptor(ScanTest.class, 
      "scan",
      -191  "Run scan test (read every 
      row)");
      -192
      addCommandDescriptor(FilteredScanTest.class, "filterScan",
      -193  "Run scan test using a filter to 
      find a specific row based on it's value " +
      -194  "(make sure to use --rows=20)");
      -195
      addCommandDescriptor(IncrementTest.class, "increment",
      -196  "Increment on each row; clients 
      overlap on keyspace so some concurrent operations");
      -197
      addCommandDescriptor(AppendTest.class, "append",
      -198  "Append on each row; clients 
      overlap on keyspace so some concurrent operations");
      -199
      addCommandDescriptor(CheckAndMutateTest.class, "checkAndMutate",
      -200  "CheckAndMutate on each row; 
      clients overlap on keyspace so some concurrent operations");
      -201
      

      [03/51] [partial] hbase-site git commit: Published site at 021f66d11d2cbb7308308093e29e69d6e7661ee9.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/92a26cfb/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/Segment.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/Segment.html 
      b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/Segment.html
      index 4d2c914..0b1cae9 100644
      --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/Segment.html
      +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/Segment.html
      @@ -53,7 +53,7 @@
       045 * segments from active set to snapshot 
      set in the default implementation.
       046 */
       047@InterfaceAudience.Private
      -048public abstract class Segment {
      +048public abstract class Segment implements 
      MemStoreSizing {
       049
       050  public final static long FIXED_OVERHEAD 
      = ClassSize.align(ClassSize.OBJECT
       051  + 5 * ClassSize.REFERENCE // 
      cellSet, comparator, memStoreLAB, memStoreSizing,
      @@ -67,9 +67,9 @@
       059  private final CellComparator 
      comparator;
       060  protected long minSequenceId;
       061  private MemStoreLAB memStoreLAB;
      -062  // Sum of sizes of all Cells added to 
      this Segment. Cell's heapSize is considered. This is not
      +062  // Sum of sizes of all Cells added to 
      this Segment. Cell's HeapSize is considered. This is not
       063  // including the heap overhead of this 
      class.
      -064  protected final MemStoreSizing 
      segmentSize;
      +064  protected final MemStoreSizing 
      memStoreSizing;
       065  protected final TimeRangeTracker 
      timeRangeTracker;
       066  protected volatile boolean 
      tagsPresent;
       067
      @@ -77,352 +77,348 @@
       069  // and there is no need in true 
      Segments state
       070  protected Segment(CellComparator 
      comparator, TimeRangeTracker trt) {
       071this.comparator = comparator;
      -072this.segmentSize = new 
      MemStoreSizing();
      -073this.timeRangeTracker = trt;
      -074  }
      -075
      -076  protected Segment(CellComparator 
      comparator, ListImmutableSegment segments,
      -077  TimeRangeTracker trt) {
      -078long dataSize = 0;
      -079long heapSize = 0;
      -080long OffHeapSize = 0;
      -081for (Segment segment : segments) {
      -082  MemStoreSize memStoreSize = 
      segment.getMemStoreSize();
      -083  dataSize += 
      memStoreSize.getDataSize();
      -084  heapSize += 
      memStoreSize.getHeapSize();
      -085  OffHeapSize += 
      memStoreSize.getOffHeapSize();
      -086}
      -087this.comparator = comparator;
      -088this.segmentSize = new 
      MemStoreSizing(dataSize, heapSize, OffHeapSize);
      -089this.timeRangeTracker = trt;
      -090  }
      -091
      -092  // This constructor is used to create 
      empty Segments.
      -093  protected Segment(CellSet cellSet, 
      CellComparator comparator, MemStoreLAB memStoreLAB, TimeRangeTracker trt) {
      -094this.cellSet.set(cellSet);
      -095this.comparator = comparator;
      -096this.minSequenceId = 
      Long.MAX_VALUE;
      -097this.memStoreLAB = memStoreLAB;
      -098this.segmentSize = new 
      MemStoreSizing();
      -099this.tagsPresent = false;
      -100this.timeRangeTracker = trt;
      -101  }
      -102
      -103  protected Segment(Segment segment) {
      -104
      this.cellSet.set(segment.getCellSet());
      -105this.comparator = 
      segment.getComparator();
      -106this.minSequenceId = 
      segment.getMinSequenceId();
      -107this.memStoreLAB = 
      segment.getMemStoreLAB();
      -108this.segmentSize = new 
      MemStoreSizing(segment.getMemStoreSize());
      -109this.tagsPresent = 
      segment.isTagsPresent();
      -110this.timeRangeTracker = 
      segment.getTimeRangeTracker();
      -111  }
      -112
      -113  /**
      -114   * Creates the scanner for the given 
      read point
      -115   * @return a scanner for the given read 
      point
      -116   */
      -117  protected KeyValueScanner 
      getScanner(long readPoint) {
      -118return new SegmentScanner(this, 
      readPoint);
      -119  }
      -120
      -121  public ListKeyValueScanner 
      getScanners(long readPoint) {
      -122return Collections.singletonList(new 
      SegmentScanner(this, readPoint));
      -123  }
      -124
      -125  /**
      -126   * @return whether the segment has any 
      cells
      -127   */
      -128  public boolean isEmpty() {
      -129return getCellSet().isEmpty();
      -130  }
      -131
      -132  /**
      -133   * @return number of cells in segment
      -134   */
      -135  public int getCellsCount() {
      -136return getCellSet().size();
      -137  }
      -138
      -139  /**
      -140   * Closing a segment before it is being 
      discarded
      -141   */
      -142  public void close() {
      -143if (this.memStoreLAB != null) {
      -144  this.memStoreLAB.close();
      -145}
      -146// do not set MSLab to null as 
      scanners may still be reading the data here and need to decrease
      -147// the counter when they finish
      -148  }
      -149
      -150  /**
      -151   * If the segment has a memory 
      allocator the cell is being cloned to this space, and returned;
      -152   * otherwise the given cell is 
      returned
      -153   *
      -154   * When a cell's size is too big 
      (bigger than maxAlloc), it is not allocated on MSLAB.
      -155   * Since the process of flattening to 
      CellChunkMap assumes that all cells
      -156   * are allocated on MSLAB, during this 
      process, the input parameter
      -157   * forceCloneOfBigCell is set to 'true' 
      and the cell is copied 

      [03/51] [partial] hbase-site git commit: Published site at acd0d1e446c164d9c54bfb461b2d449c8d717c07.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f2065178/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.SequentialWriteTest.html
      --
      diff --git 
      a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.SequentialWriteTest.html
       
      b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.SequentialWriteTest.html
      index 2510283..418c60c 100644
      --- 
      a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.SequentialWriteTest.html
      +++ 
      b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.SequentialWriteTest.html
      @@ -77,77 +77,77 @@
       069import 
      org.apache.hadoop.hbase.client.RowMutations;
       070import 
      org.apache.hadoop.hbase.client.Scan;
       071import 
      org.apache.hadoop.hbase.client.Table;
      -072import 
      org.apache.hadoop.hbase.filter.BinaryComparator;
      -073import 
      org.apache.hadoop.hbase.filter.Filter;
      -074import 
      org.apache.hadoop.hbase.filter.FilterAllFilter;
      -075import 
      org.apache.hadoop.hbase.filter.FilterList;
      -076import 
      org.apache.hadoop.hbase.filter.PageFilter;
      -077import 
      org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
      -078import 
      org.apache.hadoop.hbase.filter.WhileMatchFilter;
      -079import 
      org.apache.hadoop.hbase.io.compress.Compression;
      -080import 
      org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
      -081import 
      org.apache.hadoop.hbase.io.hfile.RandomDistribution;
      -082import 
      org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
      -083import 
      org.apache.hadoop.hbase.regionserver.BloomType;
      -084import 
      org.apache.hadoop.hbase.regionserver.CompactingMemStore;
      -085import 
      org.apache.hadoop.hbase.trace.HBaseHTraceConfiguration;
      -086import 
      org.apache.hadoop.hbase.trace.SpanReceiverHost;
      -087import 
      org.apache.hadoop.hbase.trace.TraceUtil;
      -088import 
      org.apache.hadoop.hbase.util.ByteArrayHashKey;
      -089import 
      org.apache.hadoop.hbase.util.Bytes;
      -090import 
      org.apache.hadoop.hbase.util.Hash;
      -091import 
      org.apache.hadoop.hbase.util.MurmurHash;
      -092import 
      org.apache.hadoop.hbase.util.Pair;
      -093import 
      org.apache.hadoop.hbase.util.YammerHistogramUtils;
      -094import 
      org.apache.hadoop.io.LongWritable;
      -095import org.apache.hadoop.io.Text;
      -096import org.apache.hadoop.mapreduce.Job;
      -097import 
      org.apache.hadoop.mapreduce.Mapper;
      -098import 
      org.apache.hadoop.mapreduce.lib.input.NLineInputFormat;
      -099import 
      org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
      -100import 
      org.apache.hadoop.mapreduce.lib.reduce.LongSumReducer;
      -101import org.apache.hadoop.util.Tool;
      -102import 
      org.apache.hadoop.util.ToolRunner;
      -103import 
      org.apache.htrace.core.ProbabilitySampler;
      -104import org.apache.htrace.core.Sampler;
      -105import 
      org.apache.htrace.core.TraceScope;
      -106import 
      org.apache.yetus.audience.InterfaceAudience;
      -107import org.slf4j.Logger;
      -108import org.slf4j.LoggerFactory;
      -109import 
      org.apache.hbase.thirdparty.com.google.common.base.MoreObjects;
      -110import 
      org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
      -111
      -112/**
      -113 * Script used evaluating HBase 
      performance and scalability.  Runs a HBase
      -114 * client that steps through one of a set 
      of hardcoded tests or 'experiments'
      -115 * (e.g. a random reads test, a random 
      writes test, etc.). Pass on the
      -116 * command-line which test to run and how 
      many clients are participating in
      -117 * this experiment. Run {@code 
      PerformanceEvaluation --help} to obtain usage.
      -118 *
      -119 * pThis class sets up and runs 
      the evaluation programs described in
      -120 * Section 7, iPerformance 
      Evaluation/i, of the a
      -121 * 
      href="http://labs.google.com/papers/bigtable.html"Bigtable/a;
      -122 * paper, pages 8-10.
      -123 *
      -124 * pBy default, runs as a 
      mapreduce job where each mapper runs a single test
      -125 * client. Can also run as a 
      non-mapreduce, multithreaded application by
      -126 * specifying {@code --nomapred}. Each 
      client does about 1GB of data, unless
      -127 * specified otherwise.
      -128 */
      -129@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
      -130public class PerformanceEvaluation 
      extends Configured implements Tool {
      -131  static final String RANDOM_SEEK_SCAN = 
      "randomSeekScan";
      -132  static final String RANDOM_READ = 
      "randomRead";
      -133  private static final Logger LOG = 
      LoggerFactory.getLogger(PerformanceEvaluation.class.getName());
      -134  private static final ObjectMapper 
      MAPPER = new ObjectMapper();
      -135  static {
      -136
      MAPPER.configure(MapperFeature.SORT_PROPERTIES_ALPHABETICALLY, true);
      -137  }
      -138
      -139  public static final String TABLE_NAME = 
      "TestTable";
      -140  public static final byte[] FAMILY_NAME 
      = Bytes.toBytes("info");
      -141  public static final byte [] COLUMN_ZERO 
      = Bytes.toBytes("" + 0);
      -142  public static final byte [] 
      QUALIFIER_NAME = COLUMN_ZERO;
      +072import 
      org.apache.hadoop.hbase.client.metrics.ScanMetrics;
      +073import 
      org.apache.hadoop.hbase.filter.BinaryComparator;
      +074import 
      org.apache.hadoop.hbase.filter.Filter;
      +075import 
      

      [03/51] [partial] hbase-site git commit: Published site at 87f5b5f3411d96c31b4cb61b9a57ced22be91d1f.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/de18d468/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.InterClusterReplicationEndpointForTest.DummyReplicator.html
      --
      diff --git 
      a/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.InterClusterReplicationEndpointForTest.DummyReplicator.html
       
      b/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.InterClusterReplicationEndpointForTest.DummyReplicator.html
      deleted file mode 100644
      index 7a938de..000
      --- 
      a/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.InterClusterReplicationEndpointForTest.DummyReplicator.html
      +++ /dev/null
      @@ -1,632 +0,0 @@
      -http://www.w3.org/TR/html4/loose.dtd;>
      -
      -
      -Source code
      -
      -
      -
      -
      -001/**
      -002 * Licensed to the Apache Software 
      Foundation (ASF) under one
      -003 * or more contributor license 
      agreements.  See the NOTICE file
      -004 * distributed with this work for 
      additional information
      -005 * regarding copyright ownership.  The 
      ASF licenses this file
      -006 * to you under the Apache License, 
      Version 2.0 (the
      -007 * "License"); you may not use this file 
      except in compliance
      -008 * with the License.  You may obtain a 
      copy of the License at
      -009 *
      -010 * 
      http://www.apache.org/licenses/LICENSE-2.0
      -011 *
      -012 * Unless required by applicable law or 
      agreed to in writing, software
      -013 * distributed under the License is 
      distributed on an "AS IS" BASIS,
      -014 * WITHOUT WARRANTIES OR CONDITIONS OF 
      ANY KIND, either express or implied.
      -015 * See the License for the specific 
      language governing permissions and
      -016 * limitations under the License.
      -017 */
      -018package 
      org.apache.hadoop.hbase.replication;
      -019
      -020import static org.mockito.Mockito.mock;
      -021import static 
      org.mockito.Mockito.verify;
      -022import static org.mockito.Mockito.when;
      -023
      -024import java.io.IOException;
      -025import java.util.ArrayList;
      -026import java.util.List;
      -027import java.util.UUID;
      -028import 
      java.util.concurrent.atomic.AtomicBoolean;
      -029import 
      java.util.concurrent.atomic.AtomicInteger;
      -030import 
      java.util.concurrent.atomic.AtomicReference;
      -031import org.apache.hadoop.hbase.Cell;
      -032import 
      org.apache.hadoop.hbase.HBaseClassTestRule;
      -033import org.apache.hadoop.hbase.Waiter;
      -034import 
      org.apache.hadoop.hbase.client.Connection;
      -035import 
      org.apache.hadoop.hbase.client.ConnectionFactory;
      -036import 
      org.apache.hadoop.hbase.client.Put;
      -037import 
      org.apache.hadoop.hbase.client.RegionInfo;
      -038import 
      org.apache.hadoop.hbase.client.Table;
      -039import 
      org.apache.hadoop.hbase.regionserver.HRegion;
      -040import 
      org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint;
      -041import 
      org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationGlobalSourceSource;
      -042import 
      org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSourceImpl;
      -043import 
      org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSourceSource;
      -044import 
      org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSourceSourceImpl;
      -045import 
      org.apache.hadoop.hbase.replication.regionserver.MetricsSource;
      -046import 
      org.apache.hadoop.hbase.testclassification.MediumTests;
      -047import 
      org.apache.hadoop.hbase.testclassification.ReplicationTests;
      -048import 
      org.apache.hadoop.hbase.util.Bytes;
      -049import 
      org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
      -050import 
      org.apache.hadoop.hbase.util.Threads;
      -051import 
      org.apache.hadoop.hbase.wal.WAL.Entry;
      -052import 
      org.apache.hadoop.hbase.zookeeper.ZKConfig;
      -053import 
      org.apache.hadoop.metrics2.lib.DynamicMetricsRegistry;
      -054import org.junit.AfterClass;
      -055import org.junit.Assert;
      -056import org.junit.Before;
      -057import org.junit.BeforeClass;
      -058import org.junit.ClassRule;
      -059import org.junit.Test;
      -060import 
      org.junit.experimental.categories.Category;
      -061import org.slf4j.Logger;
      -062import org.slf4j.LoggerFactory;
      -063
      -064/**
      -065 * Tests ReplicationSource and 
      ReplicationEndpoint interactions
      -066 */
      -067@Category({ ReplicationTests.class, 
      MediumTests.class })
      -068public class TestReplicationEndpoint 
      extends TestReplicationBase {
      -069
      -070  @ClassRule
      -071  public static final HBaseClassTestRule 
      CLASS_RULE =
      -072  
      HBaseClassTestRule.forClass(TestReplicationEndpoint.class);
      -073
      -074  private static final Logger LOG = 
      LoggerFactory.getLogger(TestReplicationEndpoint.class);
      -075
      -076  static int numRegionServers;
      -077
      -078  @BeforeClass
      -079  public static void setUpBeforeClass() 
      throws Exception {
      -080
      TestReplicationBase.setUpBeforeClass();
      -081numRegionServers = 
      utility1.getHBaseCluster().getRegionServerThreads().size();
      -082  }
      -083
      -084  @AfterClass
      -085  public static void tearDownAfterClass() 
      throws Exception {
      -086
      TestReplicationBase.tearDownAfterClass();
      -087// check stop is called
      -088
      

      [03/51] [partial] hbase-site git commit: Published site at 2912c953551bedbfbf30c32c156ed7bb187d54c3.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html
       
      b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html
      index 8302e28..c370eb9 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsRegionInfo.html
      @@ -2113,3031 +2113,3033 @@
       2105
      errors.reportError(ERROR_CODE.ORPHAN_TABLE_STATE,
       2106tableName + " unable to 
      delete dangling table state " + tableState);
       2107  }
      -2108} else {
      -2109  
      errors.reportError(ERROR_CODE.ORPHAN_TABLE_STATE,
      -2110  tableName + " has dangling 
      table state " + tableState);
      -2111}
      -2112  }
      -2113}
      -2114// check that all tables have 
      states
      -2115for (TableName tableName : 
      tablesInfo.keySet()) {
      -2116  if (isTableIncluded(tableName) 
       !tableStates.containsKey(tableName)) {
      -2117if (fixMeta) {
      -2118  
      MetaTableAccessor.updateTableState(connection, tableName, 
      TableState.State.ENABLED);
      -2119  TableState newState = 
      MetaTableAccessor.getTableState(connection, tableName);
      -2120  if (newState == null) {
      -2121
      errors.reportError(ERROR_CODE.NO_TABLE_STATE,
      -2122"Unable to change state 
      for table " + tableName + " in meta ");
      -2123  }
      -2124} else {
      -2125  
      errors.reportError(ERROR_CODE.NO_TABLE_STATE,
      -2126  tableName + " has no state 
      in meta ");
      -2127}
      -2128  }
      -2129}
      -2130  }
      -2131
      -2132  private void preCheckPermission() 
      throws IOException, AccessDeniedException {
      -2133if 
      (shouldIgnorePreCheckPermission()) {
      -2134  return;
      -2135}
      -2136
      -2137Path hbaseDir = 
      FSUtils.getRootDir(getConf());
      -2138FileSystem fs = 
      hbaseDir.getFileSystem(getConf());
      -2139UserProvider userProvider = 
      UserProvider.instantiate(getConf());
      -2140UserGroupInformation ugi = 
      userProvider.getCurrent().getUGI();
      -2141FileStatus[] files = 
      fs.listStatus(hbaseDir);
      -2142for (FileStatus file : files) {
      -2143  try {
      -2144FSUtils.checkAccess(ugi, file, 
      FsAction.WRITE);
      -2145  } catch (AccessDeniedException 
      ace) {
      -2146LOG.warn("Got 
      AccessDeniedException when preCheckPermission ", ace);
      -2147
      errors.reportError(ERROR_CODE.WRONG_USAGE, "Current user " + 
      ugi.getUserName()
      -2148  + " does not have write perms 
      to " + file.getPath()
      -2149  + ". Please rerun hbck as hdfs 
      user " + file.getOwner());
      -2150throw ace;
      -2151  }
      -2152}
      -2153  }
      -2154
      -2155  /**
      -2156   * Deletes region from meta table
      -2157   */
      -2158  private void deleteMetaRegion(HbckInfo 
      hi) throws IOException {
      -2159
      deleteMetaRegion(hi.metaEntry.getRegionName());
      -2160  }
      -2161
      -2162  /**
      -2163   * Deletes region from meta table
      -2164   */
      -2165  private void deleteMetaRegion(byte[] 
      metaKey) throws IOException {
      -2166Delete d = new Delete(metaKey);
      -2167meta.delete(d);
      -2168LOG.info("Deleted " + 
      Bytes.toString(metaKey) + " from META" );
      -2169  }
      -2170
      -2171  /**
      -2172   * Reset the split parent region info 
      in meta table
      -2173   */
      -2174  private void resetSplitParent(HbckInfo 
      hi) throws IOException {
      -2175RowMutations mutations = new 
      RowMutations(hi.metaEntry.getRegionName());
      -2176Delete d = new 
      Delete(hi.metaEntry.getRegionName());
      -2177
      d.addColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER);
      -2178
      d.addColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER);
      -2179mutations.add(d);
      -2180
      -2181RegionInfo hri = 
      RegionInfoBuilder.newBuilder(hi.metaEntry)
      -2182.setOffline(false)
      -2183.setSplit(false)
      -2184.build();
      -2185Put p = 
      MetaTableAccessor.makePutFromRegionInfo(hri, 
      EnvironmentEdgeManager.currentTime());
      -2186mutations.add(p);
      -2187
      -2188meta.mutateRow(mutations);
      -2189LOG.info("Reset split parent " + 
      hi.metaEntry.getRegionNameAsString() + " in META" );
      -2190  }
      -2191
      -2192  /**
      -2193   * This backwards-compatibility 
      wrapper for permanently offlining a region
      -2194   * that should not be alive.  If the 
      region server does not support the
      -2195   * "offline" method, it will use the 
      closest unassign method instead.  This
      -2196   * will basically work until one 
      attempts to disable or delete the affected
      -2197   * table.  The problem has to do with 
      in-memory only master state, so
      -2198   * restarting the HMaster or failing 
      over to another should fix this.
      -2199   */
      -2200  private void offline(byte[] 
      regionName) throws IOException {
      -2201String regionString = 
      Bytes.toStringBinary(regionName);
      -2202if 

      [03/51] [partial] hbase-site git commit: Published site at 2a2258656b2fcd92b967131b6c1f037363553bc4.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e0fb1fde/devapidocs/src-html/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.RandomizedMatrix.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.RandomizedMatrix.html
       
      b/devapidocs/src-html/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.RandomizedMatrix.html
      index bcb65f1..a9d5986 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.RandomizedMatrix.html
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.RandomizedMatrix.html
      @@ -39,1086 +39,1087 @@
       031import java.util.Scanner;
       032import java.util.Set;
       033import java.util.TreeMap;
      -034import 
      org.apache.commons.cli.CommandLine;
      -035import 
      org.apache.commons.cli.GnuParser;
      -036import 
      org.apache.commons.cli.HelpFormatter;
      -037import org.apache.commons.cli.Options;
      -038import 
      org.apache.commons.cli.ParseException;
      -039import 
      org.apache.commons.lang3.StringUtils;
      -040import 
      org.apache.hadoop.conf.Configuration;
      -041import org.apache.hadoop.fs.FileSystem;
      -042import 
      org.apache.hadoop.hbase.ClusterMetrics.Option;
      -043import 
      org.apache.hadoop.hbase.HBaseConfiguration;
      -044import 
      org.apache.hadoop.hbase.HConstants;
      -045import 
      org.apache.hadoop.hbase.ServerName;
      -046import 
      org.apache.hadoop.hbase.TableName;
      -047import 
      org.apache.hadoop.hbase.client.Admin;
      -048import 
      org.apache.hadoop.hbase.client.ClusterConnection;
      -049import 
      org.apache.hadoop.hbase.client.Connection;
      -050import 
      org.apache.hadoop.hbase.client.ConnectionFactory;
      -051import 
      org.apache.hadoop.hbase.client.RegionInfo;
      -052import 
      org.apache.hadoop.hbase.favored.FavoredNodeAssignmentHelper;
      -053import 
      org.apache.hadoop.hbase.favored.FavoredNodesPlan;
      -054import 
      org.apache.hadoop.hbase.util.FSUtils;
      -055import 
      org.apache.hadoop.hbase.util.MunkresAssignment;
      -056import 
      org.apache.hadoop.hbase.util.Pair;
      -057import 
      org.apache.yetus.audience.InterfaceAudience;
      -058import org.slf4j.Logger;
      -059import org.slf4j.LoggerFactory;
      -060
      -061import 
      org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
      -062import 
      org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
      -063import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;
      -064import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest;
      -065import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse;
      -066
      -067/**
      -068 * A tool that is used for manipulating 
      and viewing favored nodes information
      -069 * for regions. Run with -h to get a list 
      of the options
      -070 */
      -071@InterfaceAudience.Private
      -072// TODO: Remove? Unused. Partially 
      implemented only.
      -073public class RegionPlacementMaintainer 
      {
      -074  private static final Logger LOG = 
      LoggerFactory.getLogger(RegionPlacementMaintainer.class
      -075  .getName());
      -076  //The cost of a placement that should 
      never be assigned.
      -077  private static final float MAX_COST = 
      Float.POSITIVE_INFINITY;
      -078
      -079  // The cost of a placement that is 
      undesirable but acceptable.
      -080  private static final float AVOID_COST = 
      10f;
      -081
      -082  // The amount by which the cost of a 
      placement is increased if it is the
      -083  // last slot of the server. This is 
      done to more evenly distribute the slop
      -084  // amongst servers.
      -085  private static final float 
      LAST_SLOT_COST_PENALTY = 0.5f;
      -086
      -087  // The amount by which the cost of a 
      primary placement is penalized if it is
      -088  // not the host currently serving the 
      region. This is done to minimize moves.
      -089  private static final float 
      NOT_CURRENT_HOST_PENALTY = 0.1f;
      -090
      -091  private static boolean 
      USE_MUNKRES_FOR_PLACING_SECONDARY_AND_TERTIARY = false;
      -092
      -093  private Configuration conf;
      -094  private final boolean 
      enforceLocality;
      -095  private final boolean 
      enforceMinAssignmentMove;
      -096  private RackManager rackManager;
      -097  private SetTableName 
      targetTableSet;
      -098  private final Connection connection;
      -099
      -100  public 
      RegionPlacementMaintainer(Configuration conf) {
      -101this(conf, true, true);
      -102  }
      -103
      -104  public 
      RegionPlacementMaintainer(Configuration conf, boolean enforceLocality,
      -105  boolean enforceMinAssignmentMove) 
      {
      -106this.conf = conf;
      -107this.enforceLocality = 
      enforceLocality;
      -108this.enforceMinAssignmentMove = 
      enforceMinAssignmentMove;
      -109this.targetTableSet = new 
      HashSet();
      -110this.rackManager = new 
      RackManager(conf);
      -111try {
      -112  this.connection = 
      ConnectionFactory.createConnection(this.conf);
      -113} catch (IOException e) {
      -114  throw new RuntimeException(e);
      -115}
      -116  }
      -117
      -118  private static void printHelp(Options 
      opt) {
      -119new HelpFormatter().printHelp(
      -120"RegionPlacement  -w | -u | 
      -n | -v | -t | -h | -overwrite -r regionName -f favoredNodes " 

      [03/51] [partial] hbase-site git commit: Published site at e468b4022f76688851b3e0c34722f01a56bd624f.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/16541468/devapidocs/org/apache/hadoop/hbase/client/Scan.html
      --
      diff --git a/devapidocs/org/apache/hadoop/hbase/client/Scan.html 
      b/devapidocs/org/apache/hadoop/hbase/client/Scan.html
      index c1f1d24..55bfe4f 100644
      --- a/devapidocs/org/apache/hadoop/hbase/client/Scan.html
      +++ b/devapidocs/org/apache/hadoop/hbase/client/Scan.html
      @@ -18,7 +18,7 @@
       catch(err) {
       }
       //-->
      -var methods = 
      {"i0":10,"i1":10,"i2":10,"i3":9,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":42,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":42,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":42,"i58":42,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":42,"i71":42,"i72":42,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10};
      +var methods = 
      {"i0":10,"i1":10,"i2":10,"i3":9,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":42,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":42,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":42,"i58":42,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":42,"i71":42,"i72":42,"i73":10,"i74":10,"i75":42,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10};
       var tabs = {65535:["t0","All Methods"],1:["t1","Static 
      Methods"],2:["t2","Instance Methods"],8:["t4","Concrete 
      Methods"],32:["t6","Deprecated Methods"]};
       var altColor = "altColor";
       var rowColor = "rowColor";
      @@ -151,7 +151,7 @@ extends setTimeRange.
        
      - To only retrieve columns with a specific timestamp, call setTimestamp
      + To only retrieve columns with a specific timestamp, call setTimestamp
        .
        
        To limit the number of versions of each column to be returned, call setMaxVersions.
      @@ -861,37 +861,46 @@ extends 
       
       Scan
      -setTimeStamp(longtimestamp)
      +setTimestamp(longtimestamp)
       Get versions of columns with the specified timestamp.
       
       
       
      +Scan
      +setTimeStamp(longtimestamp)
      +Deprecated.
      +As of release 2.0.0, this 
      will be removed in HBase 3.0.0.
      + Use setTimestamp(long)
       instead
      +
      +
      +
      +
       https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
       title="class or interface in java.util">Maphttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">String,https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
       title="class or interface in java.lang">Object
       toMap(intmaxCols)
       Compile the details beyond the scope of getFingerprint 
      (row, columns,
        timestamps, etc.) into a Map along with the fingerprinted information.
       
       
      -
      +
       Scan
       withStartRow(byte[]startRow)
       Set the start row of the scan.
       
       
      -
      +
       Scan
       withStartRow(byte[]startRow,
       booleaninclusive)
       Set the start row of the scan.
       
       
      -
      +
       Scan
       withStopRow(byte[]stopRow)
       Set the stop row of the scan.
       
       
      -
      +
       Scan
       withStopRow(byte[]stopRow,
      booleaninclusive)
      @@ -1436,8 +1445,11 @@ public
       
       setTimeStamp
      -publicScansetTimeStamp(longtimestamp)
      -  throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
       title="class or interface in java.io">IOException
      +https://docs.oracle.com/javase/8/docs/api/java/lang/Deprecated.html?is-external=true;
       title="class or interface in java.lang">@Deprecated
      +publicScansetTimeStamp(longtimestamp)
      +  throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
       title="class or interface in java.io">IOException
      +Deprecated.As of release 2.0.0, this will be removed in HBase 
      3.0.0.
      + Use setTimestamp(long)
       instead
       Get versions of columns with the specified timestamp. Note, 
      default maximum
        versions to return is 1.  If your time range spans more than one version
        and you want all versions returned, up the number of versions beyond the
      @@ -1455,13 +1467,35 @@ public
      +
      +
      +
      +
      +setTimestamp
      +publicScansetTimestamp(longtimestamp)
      +Get versions of columns with the specified timestamp. Note, 
      default maximum
      + versions to return is 1.  If your time range spans more than one version
      + and you want all versions returned, up the number of versions beyond the
      + defaut.
      +
      +Parameters:
      +timestamp - version timestamp
      +Returns:
      +this
      

      [03/51] [partial] hbase-site git commit: Published site at 64061f896fe21512504e3886a400759e88b519da.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f1ebf5b6/devapidocs/org/apache/hadoop/hbase/master/class-use/SplitLogManager.Task.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/master/class-use/SplitLogManager.Task.html 
      b/devapidocs/org/apache/hadoop/hbase/master/class-use/SplitLogManager.Task.html
      index d9798c9..5e23e90 100644
      --- 
      a/devapidocs/org/apache/hadoop/hbase/master/class-use/SplitLogManager.Task.html
      +++ 
      b/devapidocs/org/apache/hadoop/hbase/master/class-use/SplitLogManager.Task.html
      @@ -147,17 +147,17 @@
       
       
       boolean
      -SplitLogManagerCoordination.resubmitTask(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">StringtaskName,
      +ZKSplitLogManagerCoordination.resubmitTask(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">Stringpath,
       SplitLogManager.Tasktask,
      -SplitLogManager.ResubmitDirectiveforce)
      -Resubmit the task in case if found unassigned or 
      failed
      -
      +SplitLogManager.ResubmitDirectivedirective)
       
       
       boolean
      -ZKSplitLogManagerCoordination.resubmitTask(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">Stringpath,
      +SplitLogManagerCoordination.resubmitTask(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">StringtaskName,
       SplitLogManager.Tasktask,
      -SplitLogManager.ResubmitDirectivedirective)
      +SplitLogManager.ResubmitDirectiveforce)
      +Resubmit the task in case if found unassigned or 
      failed
      +
       
       
       
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f1ebf5b6/devapidocs/org/apache/hadoop/hbase/master/class-use/TableStateManager.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/master/class-use/TableStateManager.html 
      b/devapidocs/org/apache/hadoop/hbase/master/class-use/TableStateManager.html
      index a154e61..650fbb7 100644
      --- a/devapidocs/org/apache/hadoop/hbase/master/class-use/TableStateManager.html
      +++ b/devapidocs/org/apache/hadoop/hbase/master/class-use/TableStateManager.html
      @@ -138,11 +138,11 @@
       
       
       TableStateManager
      -MasterServices.getTableStateManager()
      +HMaster.getTableStateManager()
       
       
       TableStateManager
      -HMaster.getTableStateManager()
      +MasterServices.getTableStateManager()
       
       
       
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f1ebf5b6/devapidocs/org/apache/hadoop/hbase/master/locking/class-use/LockManager.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/master/locking/class-use/LockManager.html 
      b/devapidocs/org/apache/hadoop/hbase/master/locking/class-use/LockManager.html
      index b5b7703..a444123 100644
      --- 
      a/devapidocs/org/apache/hadoop/hbase/master/locking/class-use/LockManager.html
      +++ 
      b/devapidocs/org/apache/hadoop/hbase/master/locking/class-use/LockManager.html
      @@ -117,11 +117,11 @@
       
       
       LockManager
      -MasterServices.getLockManager()
      +HMaster.getLockManager()
       
       
       LockManager
      -HMaster.getLockManager()
      +MasterServices.getLockManager()
       
       
       
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f1ebf5b6/devapidocs/org/apache/hadoop/hbase/master/normalizer/class-use/NormalizationPlan.PlanType.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/master/normalizer/class-use/NormalizationPlan.PlanType.html
       
      b/devapidocs/org/apache/hadoop/hbase/master/normalizer/class-use/NormalizationPlan.PlanType.html
      index e7cc074..029d065 100644
      --- 
      a/devapidocs/org/apache/hadoop/hbase/master/normalizer/class-use/NormalizationPlan.PlanType.html
      +++ 
      b/devapidocs/org/apache/hadoop/hbase/master/normalizer/class-use/NormalizationPlan.PlanType.html
      @@ -104,15 +104,15 @@
       
       
       NormalizationPlan.PlanType
      -NormalizationPlan.getType()
      +MergeNormalizationPlan.getType()
       
       
       NormalizationPlan.PlanType
      -SplitNormalizationPlan.getType()
      +NormalizationPlan.getType()
       
       
       NormalizationPlan.PlanType
      -MergeNormalizationPlan.getType()
      +SplitNormalizationPlan.getType()
       
       
       NormalizationPlan.PlanType
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f1ebf5b6/devapidocs/org/apache/hadoop/hbase/master/normalizer/class-use/RegionNormalizer.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/master/normalizer/class-use/RegionNormalizer.html
       
      b/devapidocs/org/apache/hadoop/hbase/master/normalizer/class-use/RegionNormalizer.html
      index d8fb2f6..ad4e9b4 100644
      --- 
      a/devapidocs/org/apache/hadoop/hbase/master/normalizer/class-use/RegionNormalizer.html
      +++ 
      b/devapidocs/org/apache/hadoop/hbase/master/normalizer/class-use/RegionNormalizer.html
      @@ -125,11 +125,11 @@
       
       
       

      [03/51] [partial] hbase-site git commit: Published site at 4cb40e6d846ce1f28ffb40d388c9efb753197813.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4dc2a2e8/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDecodingContext.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDecodingContext.html
       
      b/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDecodingContext.html
      index d30ee5e..b58c054 100644
      --- 
      a/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDecodingContext.html
      +++ 
      b/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDecodingContext.html
      @@ -166,27 +166,27 @@
       
       
       DataBlockEncoder.EncodedSeeker
      -CopyKeyDataBlockEncoder.createSeeker(CellComparatorcomparator,
      +RowIndexCodecV1.createSeeker(CellComparatorcomparator,
       HFileBlockDecodingContextdecodingCtx)
       
       
       DataBlockEncoder.EncodedSeeker
      -PrefixKeyDeltaEncoder.createSeeker(CellComparatorcomparator,
      +CopyKeyDataBlockEncoder.createSeeker(CellComparatorcomparator,
       HFileBlockDecodingContextdecodingCtx)
       
       
       DataBlockEncoder.EncodedSeeker
      -FastDiffDeltaEncoder.createSeeker(CellComparatorcomparator,
      +DiffKeyDeltaEncoder.createSeeker(CellComparatorcomparator,
       HFileBlockDecodingContextdecodingCtx)
       
       
       DataBlockEncoder.EncodedSeeker
      -DiffKeyDeltaEncoder.createSeeker(CellComparatorcomparator,
      +FastDiffDeltaEncoder.createSeeker(CellComparatorcomparator,
       HFileBlockDecodingContextdecodingCtx)
       
       
       DataBlockEncoder.EncodedSeeker
      -RowIndexCodecV1.createSeeker(CellComparatorcomparator,
      +PrefixKeyDeltaEncoder.createSeeker(CellComparatorcomparator,
       HFileBlockDecodingContextdecodingCtx)
       
       
      @@ -198,13 +198,13 @@
       
       
       https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
       title="class or interface in java.nio">ByteBuffer
      -BufferedDataBlockEncoder.decodeKeyValues(https://docs.oracle.com/javase/8/docs/api/java/io/DataInputStream.html?is-external=true;
       title="class or interface in java.io">DataInputStreamsource,
      -   HFileBlockDecodingContextblkDecodingCtx)
      +RowIndexCodecV1.decodeKeyValues(https://docs.oracle.com/javase/8/docs/api/java/io/DataInputStream.html?is-external=true;
       title="class or interface in java.io">DataInputStreamsource,
      +   HFileBlockDecodingContextdecodingCtx)
       
       
       https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
       title="class or interface in java.nio">ByteBuffer
      -RowIndexCodecV1.decodeKeyValues(https://docs.oracle.com/javase/8/docs/api/java/io/DataInputStream.html?is-external=true;
       title="class or interface in java.io">DataInputStreamsource,
      -   HFileBlockDecodingContextdecodingCtx)
      +BufferedDataBlockEncoder.decodeKeyValues(https://docs.oracle.com/javase/8/docs/api/java/io/DataInputStream.html?is-external=true;
       title="class or interface in java.io">DataInputStreamsource,
      +   HFileBlockDecodingContextblkDecodingCtx)
       
       
       
      @@ -279,17 +279,17 @@
       
       
       HFileBlockDecodingContext
      -NoOpDataBlockEncoder.newDataBlockDecodingContext(HFileContextmeta)
      +HFileDataBlockEncoderImpl.newDataBlockDecodingContext(HFileContextfileContext)
       
       
       HFileBlockDecodingContext
      -HFileDataBlockEncoder.newDataBlockDecodingContext(HFileContextfileContext)
      -create a encoder specific decoding context for 
      reading.
      -
      +NoOpDataBlockEncoder.newDataBlockDecodingContext(HFileContextmeta)
       
       
       HFileBlockDecodingContext
      -HFileDataBlockEncoderImpl.newDataBlockDecodingContext(HFileContextfileContext)
      +HFileDataBlockEncoder.newDataBlockDecodingContext(HFileContextfileContext)
      +create a encoder specific decoding context for 
      reading.
      +
       
       
       
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4dc2a2e8/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDefaultDecodingContext.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDefaultDecodingContext.html
       
      b/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDefaultDecodingContext.html
      index cbdb3c8..468913a 100644
      --- 
      a/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDefaultDecodingContext.html
      +++ 
      b/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDefaultDecodingContext.html
      @@ -116,36 +116,36 @@
        HFileBlockDefaultDecodingContextdecodingCtx)
       
       
      -protected abstract https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
       title="class or interface in java.nio">ByteBuffer
      -BufferedDataBlockEncoder.internalDecodeKeyValues(https://docs.oracle.com/javase/8/docs/api/java/io/DataInputStream.html?is-external=true;
       title="class or interface in java.io">DataInputStreamsource,
      +protected https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
       title="class or interface in java.nio">ByteBuffer
      

      [03/51] [partial] hbase-site git commit: Published site at 8ab7b20f48951d77945181024f5e15842bc253c4.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6eb695c8/testdevapidocs/org/apache/hadoop/hbase/class-use/HBaseClassTestRule.html
      --
      diff --git 
      a/testdevapidocs/org/apache/hadoop/hbase/class-use/HBaseClassTestRule.html 
      b/testdevapidocs/org/apache/hadoop/hbase/class-use/HBaseClassTestRule.html
      index f0d831a..81af282 100644
      --- a/testdevapidocs/org/apache/hadoop/hbase/class-use/HBaseClassTestRule.html
      +++ b/testdevapidocs/org/apache/hadoop/hbase/class-use/HBaseClassTestRule.html
      @@ -3557,22 +3557,26 @@
       
       
       static HBaseClassTestRule
      -TestDeleteNamespaceProcedure.CLASS_RULE
      +TestRecoverMetaProcedure.CLASS_RULE
       
       
       static HBaseClassTestRule
      -TestServerCrashProcedure.CLASS_RULE
      +TestDeleteNamespaceProcedure.CLASS_RULE
       
       
       static HBaseClassTestRule
      -TestEnableTableProcedure.CLASS_RULE
      +TestServerCrashProcedure.CLASS_RULE
       
       
       static HBaseClassTestRule
      -TestFastFailOnProcedureNotRegistered.CLASS_RULE
      +TestEnableTableProcedure.CLASS_RULE
       
       
       static HBaseClassTestRule
      +TestFastFailOnProcedureNotRegistered.CLASS_RULE
      +
      +
      +static HBaseClassTestRule
       TestDeleteTableProcedure.CLASS_RULE
       
       
      @@ -4927,86 +4931,90 @@
       
       
       static HBaseClassTestRule
      -TestSecureWALReplay.CLASS_RULE
      +TestWALDurability.CLASS_RULE
       
       
       static HBaseClassTestRule
      -TestLogRollingNoCluster.CLASS_RULE
      +TestSecureWALReplay.CLASS_RULE
       
       
       static HBaseClassTestRule
      -TestSequenceIdAccounting.CLASS_RULE
      +TestLogRollingNoCluster.CLASS_RULE
       
       
       static HBaseClassTestRule
      -TestLogRollPeriod.CLASS_RULE
      +TestSequenceIdAccounting.CLASS_RULE
       
       
       static HBaseClassTestRule
      -TestAsyncLogRolling.CLASS_RULE
      +TestLogRollPeriod.CLASS_RULE
       
       
       static HBaseClassTestRule
      -TestWALCellCodecWithCompression.CLASS_RULE
      +TestAsyncLogRolling.CLASS_RULE
       
       
       static HBaseClassTestRule
      -TestWALActionsListener.CLASS_RULE
      +TestWALCellCodecWithCompression.CLASS_RULE
       
       
       static HBaseClassTestRule
      -TestAsyncWALReplay.CLASS_RULE
      +TestWALActionsListener.CLASS_RULE
       
       
       static HBaseClassTestRule
      -TestFSHLog.CLASS_RULE
      +TestAsyncWALReplay.CLASS_RULE
       
       
       static HBaseClassTestRule
      -TestProtobufLog.CLASS_RULE
      +TestFSHLog.CLASS_RULE
       
       
       static HBaseClassTestRule
      -TestWALReplay.CLASS_RULE
      +TestProtobufLog.CLASS_RULE
       
       
       static HBaseClassTestRule
      -TestWALReplayBoundedLogWriterCreation.CLASS_RULE
      +TestWALReplay.CLASS_RULE
       
       
       static HBaseClassTestRule
      -TestWALReplayCompressed.CLASS_RULE
      +TestWALReplayBoundedLogWriterCreation.CLASS_RULE
       
       
       static HBaseClassTestRule
      -TestAsyncFSWAL.CLASS_RULE
      +TestWALReplayCompressed.CLASS_RULE
       
       
       static HBaseClassTestRule
      -TestCompressor.CLASS_RULE
      +TestAsyncFSWAL.CLASS_RULE
       
       
       static HBaseClassTestRule
      -TestAsyncLogRollPeriod.CLASS_RULE
      +TestCompressor.CLASS_RULE
       
       
       static HBaseClassTestRule
      -TestLogRollAbort.CLASS_RULE
      +TestAsyncLogRollPeriod.CLASS_RULE
       
       
       static HBaseClassTestRule
      -TestCustomWALCellCodec.CLASS_RULE
      +TestLogRollAbort.CLASS_RULE
       
       
       static HBaseClassTestRule
      -TestDurability.CLASS_RULE
      +TestCustomWALCellCodec.CLASS_RULE
       
       
       static HBaseClassTestRule
      -TestLogRolling.CLASS_RULE
      +TestDurability.CLASS_RULE
       
       
       static HBaseClassTestRule
      +TestLogRolling.CLASS_RULE
      +
      +
      +static HBaseClassTestRule
       TestAsyncProtobufLog.CLASS_RULE
       
       
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6eb695c8/testdevapidocs/org/apache/hadoop/hbase/class-use/HBaseTestingUtility.html
      --
      diff --git 
      a/testdevapidocs/org/apache/hadoop/hbase/class-use/HBaseTestingUtility.html 
      b/testdevapidocs/org/apache/hadoop/hbase/class-use/HBaseTestingUtility.html
      index e9edcac..53a9af9 100644
      --- a/testdevapidocs/org/apache/hadoop/hbase/class-use/HBaseTestingUtility.html
      +++ b/testdevapidocs/org/apache/hadoop/hbase/class-use/HBaseTestingUtility.html
      @@ -2336,6 +2336,10 @@
       MasterProcedureSchedulerPerformanceEvaluation.UTIL
       
       
      +private static HBaseTestingUtility
      +TestRecoverMetaProcedure.UTIL
      +
      +
       protected static HBaseTestingUtility
       TestDeleteNamespaceProcedure.UTIL
       
      @@ -3119,37 +3123,41 @@
       
       
       private static HBaseTestingUtility
      -TestLogRollingNoCluster.TEST_UTIL
      +TestWALDurability.TEST_UTIL
       
       
      +private static HBaseTestingUtility
      +TestLogRollingNoCluster.TEST_UTIL
      +
      +
       (package private) static HBaseTestingUtility
       AbstractTestWALReplay.TEST_UTIL
       
      -
      +
       private static HBaseTestingUtility
       TestWALActionsListener.TEST_UTIL
       
      -
      +
       protected static HBaseTestingUtility
       AbstractTestProtobufLog.TEST_UTIL
       
      -
      +
       protected static HBaseTestingUtility
       AbstractTestLogRolling.TEST_UTIL
       
      -
      +
       protected static HBaseTestingUtility
       AbstractTestLogRollPeriod.TEST_UTIL
       
      -
      +
       protected static HBaseTestingUtility
       TestLogRollAbort.TEST_UTIL
       
      -
      +
       protected static HBaseTestingUtility
       AbstractTestFSWAL.TEST_UTIL
       
      -
      +
       private static HBaseTestingUtility
       TestDurability.TEST_UTIL
       
      
      

      [03/51] [partial] hbase-site git commit: Published site at 00095a2ef9442e3fd86c04876c9d91f2f8b23ad8.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerThread.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerThread.html
       
      b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerThread.html
      index 3bc66bb..97aa79c 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerThread.html
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerThread.html
      @@ -1435,459 +1435,460 @@
       1427   */
       1428  private void execProcedure(final 
      RootProcedureState procStack,
       1429  final 
      ProcedureTEnvironment procedure) {
      -1430
      Preconditions.checkArgument(procedure.getState() == ProcedureState.RUNNABLE);
      -1431
      -1432// Procedures can suspend 
      themselves. They skip out by throwing a ProcedureSuspendedException.
      -1433// The exception is caught below and 
      then we hurry to the exit without disturbing state. The
      -1434// idea is that the processing of 
      this procedure will be unsuspended later by an external event
      -1435// such the report of a region open. 
      TODO: Currently, its possible for two worker threads
      -1436// to be working on the same 
      procedure concurrently (locking in procedures is NOT about
      -1437// concurrency but about tying an 
      entity to a procedure; i.e. a region to a particular
      -1438// procedure instance). This can 
      make for issues if both threads are changing state.
      -1439// See 
      env.getProcedureScheduler().wakeEvent(regionNode.getProcedureEvent());
      -1440// in 
      RegionTransitionProcedure#reportTransition for example of Procedure putting
      -1441// itself back on the scheduler 
      making it possible for two threads running against
      -1442// the one Procedure. Might be ok if 
      they are both doing different, idempotent sections.
      -1443boolean suspended = false;
      -1444
      -1445// Whether to 're-' -execute; run 
      through the loop again.
      -1446boolean reExecute = false;
      -1447
      -1448ProcedureTEnvironment[] 
      subprocs = null;
      -1449do {
      -1450  reExecute = false;
      -1451  try {
      -1452subprocs = 
      procedure.doExecute(getEnvironment());
      -1453if (subprocs != null  
      subprocs.length == 0) {
      -1454  subprocs = null;
      -1455}
      -1456  } catch 
      (ProcedureSuspendedException e) {
      -1457if (LOG.isTraceEnabled()) {
      -1458  LOG.trace("Suspend " + 
      procedure);
      -1459}
      -1460suspended = true;
      -1461  } catch (ProcedureYieldException 
      e) {
      -1462if (LOG.isTraceEnabled()) {
      -1463  LOG.trace("Yield " + procedure 
      + ": " + e.getMessage(), e);
      -1464}
      -1465scheduler.yield(procedure);
      -1466return;
      -1467  } catch (InterruptedException e) 
      {
      -1468if (LOG.isTraceEnabled()) {
      -1469  LOG.trace("Yield interrupt " + 
      procedure + ": " + e.getMessage(), e);
      -1470}
      -1471
      handleInterruptedException(procedure, e);
      -1472scheduler.yield(procedure);
      -1473return;
      -1474  } catch (Throwable e) {
      -1475// Catch NullPointerExceptions 
      or similar errors...
      -1476String msg = "CODE-BUG: Uncaught 
      runtime exception: " + procedure;
      -1477LOG.error(msg, e);
      -1478procedure.setFailure(new 
      RemoteProcedureException(msg, e));
      -1479  }
      -1480
      -1481  if (!procedure.isFailed()) {
      -1482if (subprocs != null) {
      -1483  if (subprocs.length == 1 
       subprocs[0] == procedure) {
      -1484// Procedure returned 
      itself. Quick-shortcut for a state machine-like procedure;
      -1485// i.e. we go around this 
      loop again rather than go back out on the scheduler queue.
      -1486subprocs = null;
      -1487reExecute = true;
      -1488if (LOG.isTraceEnabled()) 
      {
      -1489  LOG.trace("Short-circuit 
      to next step on pid=" + procedure.getProcId());
      -1490}
      -1491  } else {
      -1492// Yield the current 
      procedure, and make the subprocedure runnable
      -1493// subprocs may come back 
      'null'.
      -1494subprocs = 
      initializeChildren(procStack, procedure, subprocs);
      -1495LOG.info("Initialized 
      subprocedures=" +
      -1496  (subprocs == null? null:
      -1497
      Stream.of(subprocs).map(e - "{" + e.toString() + "}").
      -1498
      collect(Collectors.toList()).toString()));
      -1499  }
      -1500} else if (procedure.getState() 
      == ProcedureState.WAITING_TIMEOUT) {
      -1501  if (LOG.isTraceEnabled()) {
      -1502LOG.trace("Added to 
      timeoutExecutor " + procedure);
      -1503  }
      -1504  
      timeoutExecutor.add(procedure);
      -1505} else if (!suspended) {
      -1506  // No subtask, so we are 
      done
      -1507  
      procedure.setState(ProcedureState.SUCCESS);
      -1508}
      -1509  }
      -1510
      -1511  

      [03/51] [partial] hbase-site git commit: Published site at 22f4def942f8a3367d0ca6598317e9b9a7d0cfcd.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8b1eaec1/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.html
      --
      diff --git 
      a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.html
       
      b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.html
      index 13d376b..249cd71 100644
      --- 
      a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.html
      +++ 
      b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.html
      @@ -55,389 +55,388 @@
       047import 
      org.apache.hadoop.hbase.util.JVMClusterUtil;
       048import 
      org.apache.hadoop.hbase.util.Threads;
       049import org.junit.ClassRule;
      -050import org.junit.Ignore;
      -051import org.junit.Test;
      -052import 
      org.junit.experimental.categories.Category;
      -053import org.junit.runner.RunWith;
      -054import org.junit.runners.Parameterized;
      -055
      -056/**
      -057 * Class to test asynchronous region 
      admin operations.
      -058 * @see TestAsyncRegionAdminApi2 This 
      test and it used to be joined it was taking longer than our
      -059 * ten minute timeout so they were 
      split.
      -060 */
      -061@RunWith(Parameterized.class)
      -062@Category({ LargeTests.class, 
      ClientTests.class })
      -063public class TestAsyncRegionAdminApi 
      extends TestAsyncAdminBase {
      -064  @ClassRule
      -065  public static final HBaseClassTestRule 
      CLASS_RULE =
      -066  
      HBaseClassTestRule.forClass(TestAsyncRegionAdminApi.class);
      -067
      -068  @Test
      -069  public void 
      testAssignRegionAndUnassignRegion() throws Exception {
      -070
      createTableWithDefaultConf(tableName);
      -071
      -072// assign region.
      -073HMaster master = 
      TEST_UTIL.getHBaseCluster().getMaster();
      -074AssignmentManager am = 
      master.getAssignmentManager();
      -075RegionInfo hri = 
      am.getRegionStates().getRegionsOfTable(tableName).get(0);
      -076
      -077// assert region on server
      -078RegionStates regionStates = 
      am.getRegionStates();
      -079ServerName serverName = 
      regionStates.getRegionServerOfRegion(hri);
      -080TEST_UTIL.assertRegionOnServer(hri, 
      serverName, 200);
      -081
      assertTrue(regionStates.getRegionState(hri).isOpened());
      -082
      -083// Region is assigned now. Let's 
      assign it again.
      -084// Master should not abort, and 
      region should stay assigned.
      -085
      admin.assign(hri.getRegionName()).get();
      -086try {
      -087  am.waitForAssignment(hri);
      -088  fail("Expected 
      NoSuchProcedureException");
      -089} catch (NoSuchProcedureException e) 
      {
      -090  // Expected
      -091}
      -092
      assertTrue(regionStates.getRegionState(hri).isOpened());
      -093
      -094// unassign region
      -095admin.unassign(hri.getRegionName(), 
      true).get();
      -096try {
      -097  am.waitForAssignment(hri);
      -098  fail("Expected 
      NoSuchProcedureException");
      -099} catch (NoSuchProcedureException e) 
      {
      -100  // Expected
      -101}
      -102
      assertTrue(regionStates.getRegionState(hri).isClosed());
      -103  }
      -104
      -105  RegionInfo 
      createTableAndGetOneRegion(final TableName tableName)
      -106  throws IOException, 
      InterruptedException, ExecutionException {
      -107TableDescriptor desc =
      -108
      TableDescriptorBuilder.newBuilder(tableName)
      -109
      .addColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)).build();
      -110admin.createTable(desc, 
      Bytes.toBytes("A"), Bytes.toBytes("Z"), 5).get();
      -111
      -112// wait till the table is assigned
      -113HMaster master = 
      TEST_UTIL.getHBaseCluster().getMaster();
      -114long timeoutTime = 
      System.currentTimeMillis() + 3000;
      -115while (true) {
      -116  ListRegionInfo regions =
      -117  
      master.getAssignmentManager().getRegionStates().getRegionsOfTable(tableName);
      -118  if (regions.size()  3) {
      -119return regions.get(2);
      -120  }
      -121  long now = 
      System.currentTimeMillis();
      -122  if (now  timeoutTime) {
      -123fail("Could not find an online 
      region");
      -124  }
      -125  Thread.sleep(10);
      -126}
      -127  }
      -128
      -129  @Test
      -130  public void 
      testGetRegionByStateOfTable() throws Exception {
      -131RegionInfo hri = 
      createTableAndGetOneRegion(tableName);
      -132
      -133RegionStates regionStates =
      -134
      TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager().getRegionStates();
      -135
      assertTrue(regionStates.getRegionByStateOfTable(tableName).get(RegionState.State.OPEN)
      -136.stream().anyMatch(r - 
      RegionInfo.COMPARATOR.compare(r, hri) == 0));
      -137
      assertFalse(regionStates.getRegionByStateOfTable(TableName.valueOf("I_am_the_phantom"))
      -138
      .get(RegionState.State.OPEN).stream().anyMatch(r - 
      RegionInfo.COMPARATOR.compare(r, hri) == 0));
      -139  }
      -140
      -141  @Test
      -142  public void testMoveRegion() throws 
      Exception {
      -143admin.balancerSwitch(false).join();
      -144
      -145RegionInfo hri = 
      createTableAndGetOneRegion(tableName);
      -146RawAsyncHBaseAdmin rawAdmin = 
      (RawAsyncHBaseAdmin) ASYNC_CONN.getAdmin();
      -147ServerName serverName = 
      

      [03/51] [partial] hbase-site git commit: Published site at 31da4d0bce69b3a47066a5df675756087ce4dc60.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a754d895/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
      --
      diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html 
      b/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
      index 8b1ac9b..dcd9fce 100644
      --- a/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
      +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
      @@ -704,20 +704,20 @@
       
       java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
       title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
       title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
       title="class or interface in java.io">Serializable)
       
      -org.apache.hadoop.hbase.regionserver.CompactingMemStore.IndexType
      -org.apache.hadoop.hbase.regionserver.ScanType
      -org.apache.hadoop.hbase.regionserver.ChunkCreator.ChunkType
      -org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope
      -org.apache.hadoop.hbase.regionserver.FlushType
      +org.apache.hadoop.hbase.regionserver.BloomType
       org.apache.hadoop.hbase.regionserver.HRegion.FlushResult.Result
      -org.apache.hadoop.hbase.regionserver.MemStoreCompactionStrategy.Action
       org.apache.hadoop.hbase.regionserver.SplitLogWorker.TaskExecutor.Status
      -org.apache.hadoop.hbase.regionserver.Region.Operation
      +org.apache.hadoop.hbase.regionserver.ChunkCreator.ChunkType
       org.apache.hadoop.hbase.regionserver.DefaultHeapMemoryTuner.StepDirection
      -org.apache.hadoop.hbase.regionserver.TimeRangeTracker.Type
      +org.apache.hadoop.hbase.regionserver.CompactingMemStore.IndexType
       org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactoryImpl.FactoryStorage
      -org.apache.hadoop.hbase.regionserver.BloomType
      +org.apache.hadoop.hbase.regionserver.ScanType
       org.apache.hadoop.hbase.regionserver.ScannerContext.NextState
      +org.apache.hadoop.hbase.regionserver.TimeRangeTracker.Type
      +org.apache.hadoop.hbase.regionserver.FlushType
      +org.apache.hadoop.hbase.regionserver.MemStoreCompactionStrategy.Action
      +org.apache.hadoop.hbase.regionserver.Region.Operation
      +org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope
       
       
       
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a754d895/devapidocs/org/apache/hadoop/hbase/regionserver/package-use.html
      --
      diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/package-use.html 
      b/devapidocs/org/apache/hadoop/hbase/regionserver/package-use.html
      index a62f000..7208218 100644
      --- a/devapidocs/org/apache/hadoop/hbase/regionserver/package-use.html
      +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/package-use.html
      @@ -2116,6 +2116,11 @@ Input/OutputFormats, a table indexing MapReduce job, and 
      utility methods.
       
       
       
      +Region
      +Region is a subset of HRegion with operations required for 
      the Coprocessors.
      +
      +
      +
       RegionServerServices
       A curated subset of services provided by HRegionServer.
       
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a754d895/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
       
      b/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
      index 23060c2..2731576 100644
      --- 
      a/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
      +++ 
      b/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
      @@ -130,9 +130,9 @@
       
       java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
       title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
       title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
       title="class or interface in java.io">Serializable)
       
      -org.apache.hadoop.hbase.regionserver.querymatcher.StripeCompactionScanQueryMatcher.DropDeletesInOutput
      -org.apache.hadoop.hbase.regionserver.querymatcher.DeleteTracker.DeleteResult
       org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher.MatchCode
      +org.apache.hadoop.hbase.regionserver.querymatcher.DeleteTracker.DeleteResult
      +org.apache.hadoop.hbase.regionserver.querymatcher.StripeCompactionScanQueryMatcher.DropDeletesInOutput
       
       
       
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a754d895/devapidocs/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareThroughputController.ActiveOperation.html
      --
      diff --git 
      

      [03/51] [partial] hbase-site git commit: Published site at 6b77786dfc46d25ac5bb5f1c8a4a9eb47b52a604.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/81cde4ce/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/PeerProcedureInterface.PeerOperationType.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/PeerProcedureInterface.PeerOperationType.html
       
      b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/PeerProcedureInterface.PeerOperationType.html
      index 238fee7..262cf46 100644
      --- 
      a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/PeerProcedureInterface.PeerOperationType.html
      +++ 
      b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/PeerProcedureInterface.PeerOperationType.html
      @@ -152,27 +152,27 @@ the order they are declared.
       
       
       PeerProcedureInterface.PeerOperationType
      -RefreshPeerProcedure.getPeerOperationType()
      +DisablePeerProcedure.getPeerOperationType()
       
       
       PeerProcedureInterface.PeerOperationType
      -DisablePeerProcedure.getPeerOperationType()
      +RemovePeerProcedure.getPeerOperationType()
       
       
       PeerProcedureInterface.PeerOperationType
      -UpdatePeerConfigProcedure.getPeerOperationType()
      +EnablePeerProcedure.getPeerOperationType()
       
       
       PeerProcedureInterface.PeerOperationType
      -AddPeerProcedure.getPeerOperationType()
      +RefreshPeerProcedure.getPeerOperationType()
       
       
       PeerProcedureInterface.PeerOperationType
      -EnablePeerProcedure.getPeerOperationType()
      +AddPeerProcedure.getPeerOperationType()
       
       
       PeerProcedureInterface.PeerOperationType
      -RemovePeerProcedure.getPeerOperationType()
      +UpdatePeerConfigProcedure.getPeerOperationType()
       
       
       private static PeerProcedureInterface.PeerOperationType
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/81cde4ce/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/ProcedurePrepareLatch.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/ProcedurePrepareLatch.html
       
      b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/ProcedurePrepareLatch.html
      index 52693ba..f5001f1 100644
      --- 
      a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/ProcedurePrepareLatch.html
      +++ 
      b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/ProcedurePrepareLatch.html
      @@ -185,7 +185,7 @@
       
       
       private ProcedurePrepareLatch
      -RecoverMetaProcedure.syncLatch
      +AbstractStateMachineNamespaceProcedure.syncLatch
       
       
       private ProcedurePrepareLatch
      @@ -193,7 +193,7 @@
       
       
       private ProcedurePrepareLatch
      -AbstractStateMachineNamespaceProcedure.syncLatch
      +RecoverMetaProcedure.syncLatch
       
       
       
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/81cde4ce/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/ServerProcedureInterface.ServerOperationType.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/ServerProcedureInterface.ServerOperationType.html
       
      b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/ServerProcedureInterface.ServerOperationType.html
      index 280fbd6..5929a91 100644
      --- 
      a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/ServerProcedureInterface.ServerOperationType.html
      +++ 
      b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/ServerProcedureInterface.ServerOperationType.html
      @@ -104,14 +104,14 @@
       
       
       ServerProcedureInterface.ServerOperationType
      -ServerCrashProcedure.getServerOperationType()
      -
      -
      -ServerProcedureInterface.ServerOperationType
       ServerProcedureInterface.getServerOperationType()
       Given an operation type we can take decisions about what to 
      do with pending operations.
       
       
      +
      +ServerProcedureInterface.ServerOperationType
      +ServerCrashProcedure.getServerOperationType()
      +
       
       static ServerProcedureInterface.ServerOperationType
       ServerProcedureInterface.ServerOperationType.valueOf(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">Stringname)
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/81cde4ce/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/TableProcedureInterface.TableOperationType.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/TableProcedureInterface.TableOperationType.html
       
      b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/TableProcedureInterface.TableOperationType.html
      index f9a40a6..9f856b9 100644
      --- 
      a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/TableProcedureInterface.TableOperationType.html
      +++ 
      b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/TableProcedureInterface.TableOperationType.html
      @@ -112,19 +112,19 @@
       
       
       TableProcedureInterface.TableOperationType
      -MoveRegionProcedure.getTableOperationType()
      +UnassignProcedure.getTableOperationType()
       
       
       TableProcedureInterface.TableOperationType
      

      [03/51] [partial] hbase-site git commit: Published site at 1384da71375427b522b09f06862bb5d629cef52f.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d347bde8/devapidocs/org/apache/hadoop/hbase/master/HMasterCommandLine.LocalHMaster.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/master/HMasterCommandLine.LocalHMaster.html
       
      b/devapidocs/org/apache/hadoop/hbase/master/HMasterCommandLine.LocalHMaster.html
      index f8352b7..8b1c4bd 100644
      --- 
      a/devapidocs/org/apache/hadoop/hbase/master/HMasterCommandLine.LocalHMaster.html
      +++ 
      b/devapidocs/org/apache/hadoop/hbase/master/HMasterCommandLine.LocalHMaster.html
      @@ -247,14 +247,14 @@ extends 
       
       Methods inherited from classorg.apache.hadoop.hbase.master.HMaster
      -abort,
       abortProcedure,
       addColumn,
       addReplicationPeer,
       balance,
       balance,
       balanceSwitch,
       canCreateBaseZNode, canUpdateTableDescriptor,
       checkIfShouldMoveSystemRegionAsync,
       checkInitialized,
       checkServiceStarted,
       checkTableModifiable,
       configureInfoServer,
       constructMaster,
       createMetaBootstrap,
       createNamespace,
       createQuotaSnapshotNotifier,
       createRpcServices,
       createServerManager,
       createSystemTable,
       createTable, 
      decommissionRegionServers,
       decorateMasterConfiguration,
       deleteColumn,
       deleteNamespace,
       deleteTable,
       disableReplicationPeer,
       disableTable,
       enableReplicationPeer,
       enableTable,
       getAssignmentManager,
       getAverageLoad,
       getCatalogJanitor,
       getClientIdAuditPrefix,
       getClusterMetrics,
       getClusterMetrics,
       getClusterMetricsWithoutCoprocessor,
       getClusterMetricsWithoutCoprocessor,
       getClusterSchema,
       getDumpServlet,
       getFavoredNodesManager,
       getHFileCleaner,
       getInitializedEvent,
       getLastMajorCompactionTimestamp,
       getLastMajorCompactionTimestampForRegion,
       getLoadBalancer,
       getLoadBalancerClassName,
       getLoadedCoprocessors,
       getLockManager,
       getLocks,
       getLogCleaner,
       getMasterActiveTime,
       getMasterCoprocessorHost,
       getMasterCoprocessors,
       getMasterFileSystem,
       getMasterFinishedInitializationTime,
       getMasterMetrics,
       getMasterProcedureExecutor,
       getMasterProcedureManagerHost,
       getMasterQuotaManager,
       getMasterRpcServices,
       getMasterStartTime,
       getMasterWalManager,
       getMergePlanCount,
       getMetaTableObserver,
       getMobCompactionState,
       getNamespace,
       getNamespaces, getNumWALFiles,
       getProcedures,
       getProcessName,
       getQuotaObserverChore,
       getRegionNormalizer,
       getRegionNormalizerTracker,
       getRegionServerFatalLogBuffer,
       
       getRegionServerInfoPort, getRegionServerVersion,
       getRemoteInetAddress,
       getReplicationPeerConfig,
       getReplicationPeerManager,
       getServerCrashProcessingEnabledEvent,
       getServerManager,
       getServerName,
       getSnapshotManager, getSpaceQuotaSnapshotNotifier,
       getSplitOrMergeTracker,
       getSplitPlanCount,
       getTableDescriptors,
       getTableRegionForRow,
       getTableStateManager,
       getUseThisHostnameInstead,
       getWalProcedureStore,
       getZooKeeper,
       initClusterSchemaService,
       initializeZKBasedSystemTrackers,
       initQuotaManager,
       isActiveMaster,
       isBalancerOn,
       isCatalogJanitorEnabled,
       isCleanerChoreEnabled, isInitialized,
       isInMaintenanceMode,
       isNormalizerOn,
       isServerCrashProcessingEnabled,
       isSplitOrMergeEnabled,
       listDecommissionedRegionServers,
       listReplicationPeers,
       listTableDescriptors,
       listTableDescriptorsByNamespace,
       listTableNames,
       listTableNamesByNamespace,
       login,
       main,
       mergeRegions,
       modifyColumn,
       modifyNamespace,
       modifyTable,
       move,
       normalizeRegions,
       recommissionRegionServer,
       recoverMeta, registerService,
       remoteProcedureCompleted,
       remoteProcedureFailed,
       removeReplicationPeer,
       reportMobCompactionEnd,
       reportMobCompactionStart,
       requestMobCompaction,
       restoreSnapshot,
       setCatalogJanitorEnabled,
       setInitialized,
       setServerCrashProcessingEnabled,
       shutdown,
       splitRegion,
       stop, stopMaster,
       stopServiceThreads,
       truncateTable,
       updateConfigurationForSpaceQuotaObserver,
       updateReplicationPeerConfig,
       waitForMasterActive
      +abort,
       abortProcedure,
       addColumn,
       addReplicationPeer,
       balance,
       balance,
       balanceSwitch,
       canCreateBaseZNode, canUpdateTableDescriptor,
       checkIfShouldMoveSystemRegionAsync,
       checkInitialized,
       checkServiceStarted,
       checkTableModifiable,
       configureInfoServer,
       constructMaster,
       createMetaBootstrap,
       createNamespace,
       createQuotaSnapshotNotifier,
       createRpcServices,
       createServerManager,
       createSystemTable,
       createTable, 
      decommissionRegionServers,
       decorateMasterConfiguration,
       deleteColumn,
       deleteNamespace,
       deleteTable,
       disableReplicationPeer,
       disableTable,
       enableReplicationPeer,
       enableTable,
       getAssignmentManager,
       getAverageLoad,
       getCatalogJanitor,
       getClientIdAuditPrefix,
       getClusterMetrics,
       getClusterMetrics,
       getClusterMetricsWithoutCoprocessor,
       getClusterMetricsWithoutCoprocessor,
       getClusterSchema,
       getDumpServlet,
       getFavoredNodesManager,
       getHFileCleaner,
       getInitializedEvent,
       getLastMajorCompactionTimestamp,
       getLastMajorCompactionTimestampForRegion,
       getLoadBalancer,
       getLoadBalancerClassName,
       getLoadedCoprocessors,
       getLockManager,
       getLocks,
       getLogCleaner,
       getMasterActiveTime,
       getMasterCoprocessorHost,
       getMasterCoprocessors,
       

      [03/51] [partial] hbase-site git commit: Published site at b7b86839250bf9b295ebc1948826f43a88736d6c.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6b94a2f2/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStateStore.RegionStateVisitor.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStateStore.RegionStateVisitor.html
       
      b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStateStore.RegionStateVisitor.html
      index c751af0..7f98047 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStateStore.RegionStateVisitor.html
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStateStore.RegionStateVisitor.html
      @@ -7,48 +7,48 @@
       
       
       001/**
      -002 *
      -003 * Licensed to the Apache Software 
      Foundation (ASF) under one
      -004 * or more contributor license 
      agreements.  See the NOTICE file
      -005 * distributed with this work for 
      additional information
      -006 * regarding copyright ownership.  The 
      ASF licenses this file
      -007 * to you under the Apache License, 
      Version 2.0 (the
      -008 * "License"); you may not use this file 
      except in compliance
      -009 * with the License.  You may obtain a 
      copy of the License at
      -010 *
      -011 * 
      http://www.apache.org/licenses/LICENSE-2.0
      -012 *
      -013 * Unless required by applicable law or 
      agreed to in writing, software
      -014 * distributed under the License is 
      distributed on an "AS IS" BASIS,
      -015 * WITHOUT WARRANTIES OR CONDITIONS OF 
      ANY KIND, either express or implied.
      -016 * See the License for the specific 
      language governing permissions and
      -017 * limitations under the License.
      -018 */
      +002 * Licensed to the Apache Software 
      Foundation (ASF) under one
      +003 * or more contributor license 
      agreements.  See the NOTICE file
      +004 * distributed with this work for 
      additional information
      +005 * regarding copyright ownership.  The 
      ASF licenses this file
      +006 * to you under the Apache License, 
      Version 2.0 (the
      +007 * "License"); you may not use this file 
      except in compliance
      +008 * with the License.  You may obtain a 
      copy of the License at
      +009 *
      +010 * 
      http://www.apache.org/licenses/LICENSE-2.0
      +011 *
      +012 * Unless required by applicable law or 
      agreed to in writing, software
      +013 * distributed under the License is 
      distributed on an "AS IS" BASIS,
      +014 * WITHOUT WARRANTIES OR CONDITIONS OF 
      ANY KIND, either express or implied.
      +015 * See the License for the specific 
      language governing permissions and
      +016 * limitations under the License.
      +017 */
      +018package 
      org.apache.hadoop.hbase.master.assignment;
       019
      -020package 
      org.apache.hadoop.hbase.master.assignment;
      -021
      -022import java.io.IOException;
      -023import java.util.Collections;
      -024import java.util.List;
      -025import org.apache.hadoop.hbase.Cell;
      -026import 
      org.apache.hadoop.hbase.CellBuilderFactory;
      -027import 
      org.apache.hadoop.hbase.CellBuilderType;
      -028import 
      org.apache.hadoop.hbase.HConstants;
      -029import 
      org.apache.hadoop.hbase.HRegionLocation;
      -030import 
      org.apache.hadoop.hbase.MetaTableAccessor;
      -031import 
      org.apache.hadoop.hbase.RegionLocations;
      -032import 
      org.apache.hadoop.hbase.ServerName;
      -033import 
      org.apache.hadoop.hbase.TableName;
      -034import 
      org.apache.hadoop.hbase.client.Put;
      -035import 
      org.apache.hadoop.hbase.client.RegionInfo;
      -036import 
      org.apache.hadoop.hbase.client.Result;
      -037import 
      org.apache.hadoop.hbase.client.Table;
      -038import 
      org.apache.hadoop.hbase.client.TableDescriptor;
      -039import 
      org.apache.hadoop.hbase.master.MasterServices;
      -040import 
      org.apache.hadoop.hbase.master.RegionState.State;
      -041import 
      org.apache.hadoop.hbase.procedure2.util.StringUtils;
      -042import 
      org.apache.hadoop.hbase.util.Bytes;
      -043import 
      org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
      +020import java.io.IOException;
      +021import java.util.Collections;
      +022import java.util.List;
      +023import org.apache.hadoop.hbase.Cell;
      +024import 
      org.apache.hadoop.hbase.CellBuilderFactory;
      +025import 
      org.apache.hadoop.hbase.CellBuilderType;
      +026import 
      org.apache.hadoop.hbase.HConstants;
      +027import 
      org.apache.hadoop.hbase.HRegionLocation;
      +028import 
      org.apache.hadoop.hbase.MetaTableAccessor;
      +029import 
      org.apache.hadoop.hbase.RegionLocations;
      +030import 
      org.apache.hadoop.hbase.ServerName;
      +031import 
      org.apache.hadoop.hbase.TableName;
      +032import 
      org.apache.hadoop.hbase.client.Put;
      +033import 
      org.apache.hadoop.hbase.client.RegionInfo;
      +034import 
      org.apache.hadoop.hbase.client.Result;
      +035import 
      org.apache.hadoop.hbase.client.Table;
      +036import 
      org.apache.hadoop.hbase.client.TableDescriptor;
      +037import 
      org.apache.hadoop.hbase.master.MasterFileSystem;
      +038import 
      org.apache.hadoop.hbase.master.MasterServices;
      +039import 
      org.apache.hadoop.hbase.master.RegionState.State;
      +040import 
      org.apache.hadoop.hbase.procedure2.util.StringUtils;
      +041import 
      org.apache.hadoop.hbase.util.Bytes;
      +042import 
      org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
      +043import 
      org.apache.hadoop.hbase.wal.WALSplitter;
       044import 
      

      [03/51] [partial] hbase-site git commit: Published site at 1d25b60831b8cc8f7ad5fd366f1867de5c20d2f3.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/eb05e3e3/apidocs/org/apache/hadoop/hbase/client/TableDescriptorBuilder.html
      --
      diff --git a/apidocs/org/apache/hadoop/hbase/client/TableDescriptorBuilder.html 
      b/apidocs/org/apache/hadoop/hbase/client/TableDescriptorBuilder.html
      index 04248c5..81c7c35 100644
      --- a/apidocs/org/apache/hadoop/hbase/client/TableDescriptorBuilder.html
      +++ b/apidocs/org/apache/hadoop/hbase/client/TableDescriptorBuilder.html
      @@ -97,7 +97,7 @@ var activeTableTab = "activeTableTab";
       
       
       
      -http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
       title="class or interface in java.lang">java.lang.Object
      +https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
       title="class or interface in java.lang">java.lang.Object
       
       
       org.apache.hadoop.hbase.client.TableDescriptorBuilder
      @@ -110,8 +110,8 @@ var activeTableTab = "activeTableTab";
       
       
       @InterfaceAudience.Public
      -public class TableDescriptorBuilder
      -extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
       title="class or interface in java.lang">Object
      +public class TableDescriptorBuilder
      +extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
       title="class or interface in java.lang">Object
       
       Since:
       2.0.0
      @@ -198,18 +198,18 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       TableDescriptorBuilder
      -addCoprocessor(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in 
      java.lang">StringclassName)
      +addCoprocessor(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in 
      java.lang">StringclassName)
       
       
       TableDescriptorBuilder
      -addCoprocessor(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">StringclassName,
      +addCoprocessor(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">StringclassName,
         org.apache.hadoop.fs.PathjarFilePath,
         intpriority,
      -  http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
       title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">String,http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in 
      java.lang">Stringkvs)
      +  https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
       title="class or interface in java.util">Maphttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">String,https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in 
      java.lang">Stringkvs)
       
       
       TableDescriptorBuilder
      -addCoprocessorWithSpec(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in 
      java.lang">StringspecStr)
      +addCoprocessorWithSpec(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in 
      java.lang">StringspecStr)
       
       
       TableDescriptor
      @@ -250,7 +250,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       TableDescriptorBuilder
      -removeCoprocessor(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in 
      java.lang">StringclassName)
      +removeCoprocessor(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in 
      java.lang">StringclassName)
       
       
       TableDescriptorBuilder
      @@ -270,7 +270,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       TableDescriptorBuilder
      -setFlushPolicyClassName(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in 
      java.lang">Stringclazz)
      +setFlushPolicyClassName(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in 
      java.lang">Stringclazz)
       
       
       TableDescriptorBuilder
      @@ -292,7 +292,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       TableDescriptorBuilder
      -setOwnerString(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">StringownerString)
      +setOwnerString(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">StringownerString)
       Deprecated.
       
       
      @@ -314,7 +314,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       TableDescriptorBuilder
      

      [03/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ec8bf761/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
       
      b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
      index 7edb3ff..665071c 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
      @@ -1221,2378 +1221,2377 @@
       1213
      configurationManager.registerObserver(procEnv);
       1214
       1215int cpus = 
      Runtime.getRuntime().availableProcessors();
      -1216final int numThreads = 
      conf.getInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS,
      -1217Math.max((cpus  0? cpus/4: 
      0),
      -1218
      MasterProcedureConstants.DEFAULT_MIN_MASTER_PROCEDURE_THREADS));
      -1219final boolean abortOnCorruption = 
      conf.getBoolean(
      -1220
      MasterProcedureConstants.EXECUTOR_ABORT_ON_CORRUPTION,
      -1221
      MasterProcedureConstants.DEFAULT_EXECUTOR_ABORT_ON_CORRUPTION);
      -1222procedureStore.start(numThreads);
      -1223procedureExecutor.start(numThreads, 
      abortOnCorruption);
      -1224
      procEnv.getRemoteDispatcher().start();
      -1225  }
      -1226
      -1227  private void stopProcedureExecutor() 
      {
      -1228if (procedureExecutor != null) {
      -1229  
      configurationManager.deregisterObserver(procedureExecutor.getEnvironment());
      -1230  
      procedureExecutor.getEnvironment().getRemoteDispatcher().stop();
      -1231  procedureExecutor.stop();
      -1232  procedureExecutor.join();
      -1233  procedureExecutor = null;
      -1234}
      -1235
      -1236if (procedureStore != null) {
      -1237  
      procedureStore.stop(isAborted());
      -1238  procedureStore = null;
      -1239}
      -1240  }
      -1241
      -1242  private void stopChores() {
      -1243if (this.expiredMobFileCleanerChore 
      != null) {
      -1244  
      this.expiredMobFileCleanerChore.cancel(true);
      -1245}
      -1246if (this.mobCompactChore != null) 
      {
      -1247  
      this.mobCompactChore.cancel(true);
      -1248}
      -1249if (this.balancerChore != null) {
      -1250  this.balancerChore.cancel(true);
      -1251}
      -1252if (this.normalizerChore != null) 
      {
      -1253  
      this.normalizerChore.cancel(true);
      -1254}
      -1255if (this.clusterStatusChore != null) 
      {
      -1256  
      this.clusterStatusChore.cancel(true);
      -1257}
      -1258if (this.catalogJanitorChore != 
      null) {
      -1259  
      this.catalogJanitorChore.cancel(true);
      -1260}
      -1261if (this.clusterStatusPublisherChore 
      != null){
      -1262  
      clusterStatusPublisherChore.cancel(true);
      -1263}
      -1264if (this.mobCompactThread != null) 
      {
      -1265  this.mobCompactThread.close();
      -1266}
      -1267
      -1268if (this.quotaObserverChore != null) 
      {
      -1269  quotaObserverChore.cancel();
      -1270}
      -1271if (this.snapshotQuotaChore != null) 
      {
      -1272  snapshotQuotaChore.cancel();
      -1273}
      -1274  }
      -1275
      -1276  /**
      -1277   * @return Get remote side's 
      InetAddress
      -1278   */
      -1279  InetAddress getRemoteInetAddress(final 
      int port,
      -1280  final long serverStartCode) throws 
      UnknownHostException {
      -1281// Do it out here in its own little 
      method so can fake an address when
      -1282// mocking up in tests.
      -1283InetAddress ia = 
      RpcServer.getRemoteIp();
      -1284
      -1285// The call could be from the local 
      regionserver,
      -1286// in which case, there is no remote 
      address.
      -1287if (ia == null  
      serverStartCode == startcode) {
      -1288  InetSocketAddress isa = 
      rpcServices.getSocketAddress();
      -1289  if (isa != null  
      isa.getPort() == port) {
      -1290ia = isa.getAddress();
      -1291  }
      -1292}
      -1293return ia;
      -1294  }
      -1295
      -1296  /**
      -1297   * @return Maximum time we should run 
      balancer for
      -1298   */
      -1299  private int getMaxBalancingTime() {
      -1300int maxBalancingTime = 
      getConfiguration().getInt(HConstants.HBASE_BALANCER_MAX_BALANCING, -1);
      -1301if (maxBalancingTime == -1) {
      -1302  // if max balancing time isn't 
      set, defaulting it to period time
      -1303  maxBalancingTime = 
      getConfiguration().getInt(HConstants.HBASE_BALANCER_PERIOD,
      -1304
      HConstants.DEFAULT_HBASE_BALANCER_PERIOD);
      -1305}
      -1306return maxBalancingTime;
      -1307  }
      -1308
      -1309  /**
      -1310   * @return Maximum number of regions 
      in transition
      -1311   */
      -1312  private int 
      getMaxRegionsInTransition() {
      -1313int numRegions = 
      this.assignmentManager.getRegionStates().getRegionAssignments().size();
      -1314return Math.max((int) 
      Math.floor(numRegions * this.maxRitPercent), 1);
      -1315  }
      -1316
      -1317  /**
      -1318   * It first sleep to the next balance 
      plan start time. Meanwhile, throttling by the max
      -1319   * number regions in transition to 
      protect availability.
      -1320   * @param nextBalanceStartTime The 
      next balance plan start time
      -1321   * @param maxRegionsInTransition max 
      number of regions in transition
      -1322   * @param 

      [03/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
       
      b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
      index 802b925..a3e80ab 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
      @@ -73,229 +73,229 @@
       065import 
      java.util.concurrent.TimeoutException;
       066import 
      java.util.concurrent.atomic.AtomicBoolean;
       067import 
      java.util.concurrent.atomic.AtomicInteger;
      -068import 
      java.util.concurrent.atomic.AtomicLong;
      -069import 
      java.util.concurrent.atomic.LongAdder;
      -070import java.util.concurrent.locks.Lock;
      -071import 
      java.util.concurrent.locks.ReadWriteLock;
      -072import 
      java.util.concurrent.locks.ReentrantReadWriteLock;
      -073import java.util.function.Function;
      -074import 
      org.apache.hadoop.conf.Configuration;
      -075import org.apache.hadoop.fs.FileStatus;
      -076import org.apache.hadoop.fs.FileSystem;
      -077import 
      org.apache.hadoop.fs.LocatedFileStatus;
      -078import org.apache.hadoop.fs.Path;
      -079import org.apache.hadoop.hbase.Cell;
      -080import 
      org.apache.hadoop.hbase.CellBuilderType;
      -081import 
      org.apache.hadoop.hbase.CellComparator;
      -082import 
      org.apache.hadoop.hbase.CellComparatorImpl;
      -083import 
      org.apache.hadoop.hbase.CellScanner;
      -084import 
      org.apache.hadoop.hbase.CellUtil;
      -085import 
      org.apache.hadoop.hbase.CompareOperator;
      -086import 
      org.apache.hadoop.hbase.CompoundConfiguration;
      -087import 
      org.apache.hadoop.hbase.DoNotRetryIOException;
      -088import 
      org.apache.hadoop.hbase.DroppedSnapshotException;
      -089import 
      org.apache.hadoop.hbase.ExtendedCellBuilderFactory;
      -090import 
      org.apache.hadoop.hbase.HConstants;
      -091import 
      org.apache.hadoop.hbase.HConstants.OperationStatusCode;
      -092import 
      org.apache.hadoop.hbase.HDFSBlocksDistribution;
      -093import 
      org.apache.hadoop.hbase.HRegionInfo;
      -094import 
      org.apache.hadoop.hbase.KeyValue;
      -095import 
      org.apache.hadoop.hbase.KeyValueUtil;
      -096import 
      org.apache.hadoop.hbase.NamespaceDescriptor;
      -097import 
      org.apache.hadoop.hbase.NotServingRegionException;
      -098import 
      org.apache.hadoop.hbase.PrivateCellUtil;
      -099import 
      org.apache.hadoop.hbase.RegionTooBusyException;
      -100import 
      org.apache.hadoop.hbase.TableName;
      -101import org.apache.hadoop.hbase.Tag;
      -102import org.apache.hadoop.hbase.TagUtil;
      -103import 
      org.apache.hadoop.hbase.UnknownScannerException;
      -104import 
      org.apache.hadoop.hbase.client.Append;
      -105import 
      org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
      -106import 
      org.apache.hadoop.hbase.client.CompactionState;
      -107import 
      org.apache.hadoop.hbase.client.Delete;
      -108import 
      org.apache.hadoop.hbase.client.Durability;
      -109import 
      org.apache.hadoop.hbase.client.Get;
      -110import 
      org.apache.hadoop.hbase.client.Increment;
      -111import 
      org.apache.hadoop.hbase.client.IsolationLevel;
      -112import 
      org.apache.hadoop.hbase.client.Mutation;
      -113import 
      org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor;
      -114import 
      org.apache.hadoop.hbase.client.Put;
      -115import 
      org.apache.hadoop.hbase.client.RegionInfo;
      -116import 
      org.apache.hadoop.hbase.client.RegionReplicaUtil;
      -117import 
      org.apache.hadoop.hbase.client.Result;
      -118import 
      org.apache.hadoop.hbase.client.RowMutations;
      -119import 
      org.apache.hadoop.hbase.client.Scan;
      -120import 
      org.apache.hadoop.hbase.client.TableDescriptor;
      -121import 
      org.apache.hadoop.hbase.client.TableDescriptorBuilder;
      -122import 
      org.apache.hadoop.hbase.conf.ConfigurationManager;
      -123import 
      org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver;
      -124import 
      org.apache.hadoop.hbase.coprocessor.RegionObserver.MutationType;
      -125import 
      org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
      -126import 
      org.apache.hadoop.hbase.exceptions.FailedSanityCheckException;
      -127import 
      org.apache.hadoop.hbase.exceptions.TimeoutIOException;
      -128import 
      org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
      -129import 
      org.apache.hadoop.hbase.filter.ByteArrayComparable;
      -130import 
      org.apache.hadoop.hbase.filter.FilterWrapper;
      -131import 
      org.apache.hadoop.hbase.filter.IncompatibleFilterException;
      -132import 
      org.apache.hadoop.hbase.io.HFileLink;
      -133import 
      org.apache.hadoop.hbase.io.HeapSize;
      -134import 
      org.apache.hadoop.hbase.io.TimeRange;
      -135import 
      org.apache.hadoop.hbase.io.hfile.HFile;
      -136import 
      org.apache.hadoop.hbase.ipc.CallerDisconnectedException;
      -137import 
      org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
      -138import 
      org.apache.hadoop.hbase.ipc.RpcCall;
      -139import 
      org.apache.hadoop.hbase.ipc.RpcServer;
      -140import 
      org.apache.hadoop.hbase.monitoring.MonitoredTask;
      -141import 
      org.apache.hadoop.hbase.monitoring.TaskMonitor;
      -142import 
      

      [03/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/193b4259/testdevapidocs/index-all.html
      --
      diff --git a/testdevapidocs/index-all.html b/testdevapidocs/index-all.html
      index 24aa34d..f4d6287 100644
      --- a/testdevapidocs/index-all.html
      +++ b/testdevapidocs/index-all.html
      @@ -5809,6 +5809,8 @@
       
       CLASS_RULE
       - Static variable in class org.apache.hadoop.hbase.security.token.TestTokenAuthentication
       
      +CLASS_RULE
       - Static variable in class org.apache.hadoop.hbase.security.token.TestTokenUtil
      +
       CLASS_RULE
       - Static variable in class org.apache.hadoop.hbase.security.token.TestZKSecretWatcher
       
       CLASS_RULE
       - Static variable in class org.apache.hadoop.hbase.security.token.TestZKSecretWatcherRefreshKeys
      @@ -25261,8 +25263,6 @@
       
       MyAsyncProcess(ClusterConnection,
       Configuration, AtomicInteger) - Constructor for class 
      org.apache.hadoop.hbase.client.TestAsyncProcess.MyAsyncProcess
       
      -MyAsyncProcess(ClusterConnection,
       Configuration, boolean) - Constructor for class 
      org.apache.hadoop.hbase.client.TestAsyncProcess.MyAsyncProcess
      -
       MyAsyncProcess(ClusterConnection,
       Configuration) - Constructor for class 
      org.apache.hadoop.hbase.client.TestAsyncProcessWithRegionException.MyAsyncProcess
       
       MyAsyncProcessWithReplicas(ClusterConnection,
       Configuration) - Constructor for class 
      org.apache.hadoop.hbase.client.TestAsyncProcess.MyAsyncProcessWithReplicas
      @@ -47436,6 +47436,8 @@
       
       testErrorPropagation()
       - Method in class org.apache.hadoop.hbase.procedure.TestProcedure
       
      +testErrors()
       - Method in class org.apache.hadoop.hbase.client.TestAsyncProcess
      +
       testErrorsServers()
       - Method in class org.apache.hadoop.hbase.client.TestAsyncProcess
       
       testEscape()
       - Method in class org.apache.hadoop.hbase.util.TestJRubyFormat
      @@ -47662,6 +47664,8 @@
       
       testFailedPut()
       - Method in class org.apache.hadoop.hbase.client.TestAsyncProcessWithRegionException
       
      +testFailedPutAndNewPut()
       - Method in class org.apache.hadoop.hbase.client.TestAsyncProcess
      +
       testFailedPutWithoutActionException()
       - Method in class org.apache.hadoop.hbase.client.TestAsyncProcessWithRegionException
       
       testFailedServer()
       - Method in class org.apache.hadoop.hbase.ipc.TestHBaseClient
      @@ -49124,8 +49128,6 @@
       
       testGlobalAuthorizationForNewRegisteredRS()
       - Method in class org.apache.hadoop.hbase.security.access.TestAccessController
       
      -testGlobalErrors()
       - Method in class org.apache.hadoop.hbase.client.TestAsyncProcess
      -
       TestGlobalFilter - Class in org.apache.hadoop.hbase.http
       
       TestGlobalFilter()
       - Constructor for class org.apache.hadoop.hbase.http.TestGlobalFilter
      @@ -49906,8 +49908,6 @@
       
       testHTableExistsMethodSingleRegionSingleGet()
       - Method in class org.apache.hadoop.hbase.client.TestFromClientSide3
       
      -testHTableFailedPutAndNewPut()
       - Method in class org.apache.hadoop.hbase.client.TestAsyncProcess
      -
       testHTableInterfaceMethods()
       - Method in class org.apache.hadoop.hbase.coprocessor.TestAppendTimeRange
       
       testHTableInterfaceMethods()
       - Method in class org.apache.hadoop.hbase.coprocessor.TestIncrementTimeRange
      @@ -53690,6 +53690,8 @@
       
       testObserverAddedByDefault()
       - Method in class org.apache.hadoop.hbase.quotas.TestMasterSpaceQuotaObserver
       
      +testObtainToken()
       - Method in class org.apache.hadoop.hbase.security.token.TestTokenUtil
      +
       testOddSizedBlocks()
       - Method in class org.apache.hadoop.hbase.io.crypto.TestEncryption
       
       testOfferInStealJobQueueShouldUnblock()
       - Method in class org.apache.hadoop.hbase.util.TestStealJobQueue
      @@ -60680,6 +60682,10 @@
       
       testTokenCreation()
       - Method in class org.apache.hadoop.hbase.security.token.TestTokenAuthentication
       
      +TestTokenUtil - Class in org.apache.hadoop.hbase.security.token
      +
      +TestTokenUtil()
       - Constructor for class org.apache.hadoop.hbase.security.token.TestTokenUtil
      +
       testToLong()
       - Method in class org.apache.hadoop.hbase.util.TestBytes
       
       testTooBigEntry()
       - Method in class org.apache.hadoop.hbase.io.hfile.bucket.TestBucketWriterThread
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/193b4259/testdevapidocs/org/apache/hadoop/hbase/TestLocalHBaseCluster.MyHMaster.html
      --
      diff --git 
      a/testdevapidocs/org/apache/hadoop/hbase/TestLocalHBaseCluster.MyHMaster.html 
      b/testdevapidocs/org/apache/hadoop/hbase/TestLocalHBaseCluster.MyHMaster.html
      index 55d0251..2dc7e47 100644
      --- 
      a/testdevapidocs/org/apache/hadoop/hbase/TestLocalHBaseCluster.MyHMaster.html
      +++ 
      b/testdevapidocs/org/apache/hadoop/hbase/TestLocalHBaseCluster.MyHMaster.html
      @@ -153,7 +153,7 @@ extends org.apache.hadoop.hbase.master.HMaster
       
       
       Nested classes/interfaces inherited from 
      classorg.apache.hadoop.hbase.master.HMaster
      -org.apache.hadoop.hbase.master.HMaster.RedirectServlet
      +org.apache.hadoop.hbase.master.HMaster.MasterStoppedException, 
      org.apache.hadoop.hbase.master.HMaster.RedirectServlet
       
       
       
      
      

      [03/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/94208cfe/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HStoreFile.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HStoreFile.html 
      b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HStoreFile.html
      index 75077a6..5da3ef8 100644
      --- a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HStoreFile.html
      +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HStoreFile.html
      @@ -521,33 +521,33 @@
       
       
       
      -org.apache.hbase.thirdparty.com.google.common.collect.ImmutableCollectionHStoreFile
      -StripeStoreFileManager.clearCompactedFiles()
      -
      -
       http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
       title="class or interface in java.util">CollectionHStoreFile
       DefaultStoreFileManager.clearCompactedFiles()
       
      -
      +
       http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
       title="class or interface in java.util">CollectionHStoreFile
       StoreFileManager.clearCompactedFiles()
       Clears all the compacted files and returns them.
       
       
      -
      +
       org.apache.hbase.thirdparty.com.google.common.collect.ImmutableCollectionHStoreFile
      -StripeStoreFileManager.clearFiles()
      +StripeStoreFileManager.clearCompactedFiles()
       
      -
      +
       org.apache.hbase.thirdparty.com.google.common.collect.ImmutableCollectionHStoreFile
       DefaultStoreFileManager.clearFiles()
       
      -
      +
       org.apache.hbase.thirdparty.com.google.common.collect.ImmutableCollectionHStoreFile
       StoreFileManager.clearFiles()
       Clears all the files currently in use and returns 
      them.
       
       
      +
      +org.apache.hbase.thirdparty.com.google.common.collect.ImmutableCollectionHStoreFile
      +StripeStoreFileManager.clearFiles()
      +
       
       http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
       title="class or interface in java.util">Mapbyte[],http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
       title="class or interface in java.util">ListHStoreFile
       HRegion.close()
      @@ -597,36 +597,36 @@
       
       
       http://docs.oracle.com/javase/8/docs/api/java/util/Iterator.html?is-external=true;
       title="class or interface in java.util">IteratorHStoreFile
      -StripeStoreFileManager.getCandidateFilesForRowKeyBefore(KeyValuetargetKey)
      -See StoreFileManager.getCandidateFilesForRowKeyBefore(KeyValue)
      - for details on this methods.
      -
      +DefaultStoreFileManager.getCandidateFilesForRowKeyBefore(KeyValuetargetKey)
       
       
       http://docs.oracle.com/javase/8/docs/api/java/util/Iterator.html?is-external=true;
       title="class or interface in java.util">IteratorHStoreFile
      -DefaultStoreFileManager.getCandidateFilesForRowKeyBefore(KeyValuetargetKey)
      +StoreFileManager.getCandidateFilesForRowKeyBefore(KeyValuetargetKey)
      +Gets initial, full list of candidate store files to check 
      for row-key-before.
      +
       
       
       http://docs.oracle.com/javase/8/docs/api/java/util/Iterator.html?is-external=true;
       title="class or interface in java.util">IteratorHStoreFile
      -StoreFileManager.getCandidateFilesForRowKeyBefore(KeyValuetargetKey)
      -Gets initial, full list of candidate store files to check 
      for row-key-before.
      +StripeStoreFileManager.getCandidateFilesForRowKeyBefore(KeyValuetargetKey)
      +See StoreFileManager.getCandidateFilesForRowKeyBefore(KeyValue)
      + for details on this methods.
       
       
       
       http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
       title="class or interface in java.util">CollectionHStoreFile
      -StripeStoreFileManager.getCompactedfiles()
      -
      -
      -http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
       title="class or interface in java.util">CollectionHStoreFile
       DefaultStoreFileManager.getCompactedfiles()
       
      -
      +
       http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
       title="class or interface in java.util">CollectionHStoreFile
       StoreFileManager.getCompactedfiles()
       List of compacted files inside this store that needs to be 
      excluded in reads
        because further new reads will be using only the newly created files out of 
      compaction.
       
       
      +
      +http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
       title="class or interface in java.util">CollectionHStoreFile
      +StripeStoreFileManager.getCompactedfiles()
      +
       
       http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
       title="class or interface in java.util">CollectionHStoreFile
       HStore.getCompactedFiles()
      @@ -637,26 +637,26 @@
       
       
       http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
       title="class or interface in java.util">CollectionHStoreFile
      -StripeStoreFileManager.getFilesForScan(byte[]startRow,
      +DefaultStoreFileManager.getFilesForScan(byte[]startRow,
      booleanincludeStartRow,
      byte[]stopRow,
      booleanincludeStopRow)
       
       
       http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
       title="class or interface in 

      [03/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0cd17dc5/devapidocs/org/apache/hadoop/hbase/master/class-use/MetricsMaster.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/master/class-use/MetricsMaster.html 
      b/devapidocs/org/apache/hadoop/hbase/master/class-use/MetricsMaster.html
      index 09f9400..c864853 100644
      --- a/devapidocs/org/apache/hadoop/hbase/master/class-use/MetricsMaster.html
      +++ b/devapidocs/org/apache/hadoop/hbase/master/class-use/MetricsMaster.html
      @@ -156,11 +156,11 @@
       
       
       MetricsMaster
      -HMaster.getMasterMetrics()
      +MasterServices.getMasterMetrics()
       
       
       MetricsMaster
      -MasterServices.getMasterMetrics()
      +HMaster.getMasterMetrics()
       
       
       
      @@ -212,17 +212,17 @@
       
       
       
      +void
      +MasterProcedureManagerHost.initialize(MasterServicesmaster,
      +  MetricsMastermetricsMaster)
      +
      +
       abstract void
       MasterProcedureManager.initialize(MasterServicesmaster,
         MetricsMastermetricsMaster)
       Initialize a globally barriered procedure for master.
       
       
      -
      -void
      -MasterProcedureManagerHost.initialize(MasterServicesmaster,
      -  MetricsMastermetricsMaster)
      -
       
       
       
      @@ -258,11 +258,11 @@
       
       
       private MetricsMaster
      -SnapshotQuotaObserverChore.metrics
      +QuotaObserverChore.metrics
       
       
       private MetricsMaster
      -QuotaObserverChore.metrics
      +SnapshotQuotaObserverChore.metrics
       
       
       
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0cd17dc5/devapidocs/org/apache/hadoop/hbase/master/class-use/MetricsMasterWrapper.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/master/class-use/MetricsMasterWrapper.html 
      b/devapidocs/org/apache/hadoop/hbase/master/class-use/MetricsMasterWrapper.html
      index de9d1bc..814497c 100644
      --- 
      a/devapidocs/org/apache/hadoop/hbase/master/class-use/MetricsMasterWrapper.html
      +++ 
      b/devapidocs/org/apache/hadoop/hbase/master/class-use/MetricsMasterWrapper.html
      @@ -139,29 +139,29 @@
       
       
       
      -MetricsMasterProcSource
      -MetricsMasterProcSourceFactory.create(MetricsMasterWrappermasterWrapper)
      +MetricsMasterSource
      +MetricsMasterSourceFactory.create(MetricsMasterWrappermasterWrapper)
       
       
       MetricsMasterQuotaSource
       MetricsMasterQuotaSourceFactory.create(MetricsMasterWrappermasterWrapper)
       
       
      -MetricsMasterSource
      -MetricsMasterSourceFactory.create(MetricsMasterWrappermasterWrapper)
      +MetricsMasterProcSource
      +MetricsMasterProcSourceFactory.create(MetricsMasterWrappermasterWrapper)
       
       
       MetricsMasterQuotaSource
       MetricsMasterQuotaSourceFactoryImpl.create(MetricsMasterWrappermasterWrapper)
       
       
      -MetricsMasterSource
      -MetricsMasterSourceFactoryImpl.create(MetricsMasterWrappermasterWrapper)
      -
      -
       MetricsMasterProcSource
       MetricsMasterProcSourceFactoryImpl.create(MetricsMasterWrappermasterWrapper)
       
      +
      +MetricsMasterSource
      +MetricsMasterSourceFactoryImpl.create(MetricsMasterWrappermasterWrapper)
      +
       
       
       
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0cd17dc5/devapidocs/org/apache/hadoop/hbase/master/class-use/RackManager.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/master/class-use/RackManager.html 
      b/devapidocs/org/apache/hadoop/hbase/master/class-use/RackManager.html
      index 5216a4d..c91791d 100644
      --- a/devapidocs/org/apache/hadoop/hbase/master/class-use/RackManager.html
      +++ b/devapidocs/org/apache/hadoop/hbase/master/class-use/RackManager.html
      @@ -116,11 +116,11 @@
       
       
       private RackManager
      -FavoredNodeLoadBalancer.rackManager
      +FavoredNodesManager.rackManager
       
       
       private RackManager
      -FavoredNodesManager.rackManager
      +FavoredNodeLoadBalancer.rackManager
       
       
       
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0cd17dc5/devapidocs/org/apache/hadoop/hbase/master/class-use/RegionPlan.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/master/class-use/RegionPlan.html 
      b/devapidocs/org/apache/hadoop/hbase/master/class-use/RegionPlan.html
      index 146b426..d2c9cca 100644
      --- a/devapidocs/org/apache/hadoop/hbase/master/class-use/RegionPlan.html
      +++ b/devapidocs/org/apache/hadoop/hbase/master/class-use/RegionPlan.html
      @@ -282,10 +282,7 @@
       
       
       http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
       title="class or interface in java.util">ListRegionPlan
      -SimpleLoadBalancer.balanceCluster(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
       title="class or interface in java.util">MapServerName,http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
       title="class or interface in java.util">ListRegionInfoclusterMap)
      -Generate a global load balancing plan according to the 
      specified map of
      - server information to the most loaded regions of each server.
      -
      +FavoredStochasticBalancer.balanceCluster(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
       title="class or interface in 

      [03/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/828486ae/devapidocs/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.PushType.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.PushType.html
       
      b/devapidocs/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.PushType.html
      index ed15d9b..3d03e17 100644
      --- 
      a/devapidocs/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.PushType.html
      +++ 
      b/devapidocs/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.PushType.html
      @@ -248,7 +248,7 @@ the order they are declared.
       
       
       values
      -public staticWALProcedureStore.PushType[]values()
      +public staticWALProcedureStore.PushType[]values()
       Returns an array containing the constants of this enum 
      type, in
       the order they are declared.  This method may be used to iterate
       over the constants as follows:
      @@ -268,7 +268,7 @@ for (WALProcedureStore.PushType c : 
      WALProcedureStore.PushType.values())
       
       
       valueOf
      -public staticWALProcedureStore.PushTypevalueOf(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">Stringname)
      +public staticWALProcedureStore.PushTypevalueOf(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">Stringname)
       Returns the enum constant of this type with the specified 
      name.
       The string must match exactly an identifier used to declare an
       enum constant in this type.  (Extraneous whitespace characters are 
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/828486ae/devapidocs/org/apache/hadoop/hbase/procedure2/util/class-use/DelayedUtil.DelayedWithTimeout.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/procedure2/util/class-use/DelayedUtil.DelayedWithTimeout.html
       
      b/devapidocs/org/apache/hadoop/hbase/procedure2/util/class-use/DelayedUtil.DelayedWithTimeout.html
      index c6f6a46..5bd2115 100644
      --- 
      a/devapidocs/org/apache/hadoop/hbase/procedure2/util/class-use/DelayedUtil.DelayedWithTimeout.html
      +++ 
      b/devapidocs/org/apache/hadoop/hbase/procedure2/util/class-use/DelayedUtil.DelayedWithTimeout.html
      @@ -141,11 +141,11 @@
       
       
       private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/DelayQueue.html?is-external=true;
       title="class or interface in java.util.concurrent">DelayQueueDelayedUtil.DelayedWithTimeout
      -ProcedureExecutor.TimeoutExecutorThread.queue
      +RemoteProcedureDispatcher.TimeoutExecutorThread.queue
       
       
       private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/DelayQueue.html?is-external=true;
       title="class or interface in java.util.concurrent">DelayQueueDelayedUtil.DelayedWithTimeout
      -RemoteProcedureDispatcher.TimeoutExecutorThread.queue
      +ProcedureExecutor.TimeoutExecutorThread.queue
       
       
       
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/828486ae/devapidocs/org/apache/hadoop/hbase/quotas/class-use/MasterQuotaManager.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/quotas/class-use/MasterQuotaManager.html 
      b/devapidocs/org/apache/hadoop/hbase/quotas/class-use/MasterQuotaManager.html
      index 934c2fa..dd6045b 100644
      --- 
      a/devapidocs/org/apache/hadoop/hbase/quotas/class-use/MasterQuotaManager.html
      +++ 
      b/devapidocs/org/apache/hadoop/hbase/quotas/class-use/MasterQuotaManager.html
      @@ -125,11 +125,11 @@
       
       
       MasterQuotaManager
      -MasterServices.getMasterQuotaManager()
      +HMaster.getMasterQuotaManager()
       
       
       MasterQuotaManager
      -HMaster.getMasterQuotaManager()
      +MasterServices.getMasterQuotaManager()
       
       
       
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/828486ae/devapidocs/org/apache/hadoop/hbase/quotas/class-use/QuotaFilter.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/quotas/class-use/QuotaFilter.html 
      b/devapidocs/org/apache/hadoop/hbase/quotas/class-use/QuotaFilter.html
      index a495cd1..d81fa5e 100644
      --- a/devapidocs/org/apache/hadoop/hbase/quotas/class-use/QuotaFilter.html
      +++ b/devapidocs/org/apache/hadoop/hbase/quotas/class-use/QuotaFilter.html
      @@ -110,9 +110,7 @@
       
       
       http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
       title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
       title="class or interface in java.util">ListQuotaSettings
      -AsyncAdmin.getQuota(QuotaFilterfilter)
      -List the quotas based on the filter.
      -
      +AsyncHBaseAdmin.getQuota(QuotaFilterfilter)
       
       
       http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
       title="class or interface in java.util">ListQuotaSettings
      @@ -121,16 +119,18 @@
       
       
       
      

      [03/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f272b0e8/devapidocs/org/apache/hadoop/hbase/master/TableStateManager.html
      --
      diff --git a/devapidocs/org/apache/hadoop/hbase/master/TableStateManager.html 
      b/devapidocs/org/apache/hadoop/hbase/master/TableStateManager.html
      index 399dc36..5436db5 100644
      --- a/devapidocs/org/apache/hadoop/hbase/master/TableStateManager.html
      +++ b/devapidocs/org/apache/hadoop/hbase/master/TableStateManager.html
      @@ -18,8 +18,8 @@
       catch(err) {
       }
       //-->
      -var methods = 
      {"i0":9,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10};
      -var tabs = {65535:["t0","All Methods"],1:["t1","Static 
      Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
      +var methods = 
      {"i0":42,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":42,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10};
      +var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
      Methods"],8:["t4","Concrete Methods"],32:["t6","Deprecated Methods"]};
       var altColor = "altColor";
       var rowColor = "rowColor";
       var tableTab = "tableTab";
      @@ -50,7 +50,7 @@ var activeTableTab = "activeTableTab";
       
       
       PrevClass
      -NextClass
      +NextClass
       
       
       Frames
      @@ -74,7 +74,7 @@ var activeTableTab = "activeTableTab";
       
       
       Summary:
      -Nested|
      +Nested|
       Field|
       Constr|
       Method
      @@ -107,13 +107,18 @@ var activeTableTab = "activeTableTab";
       
       
       
      +
      +Direct Known Subclasses:
      +MirroringTableStateManager
      +
       
       
       @InterfaceAudience.Private
      -public class TableStateManager
      +public class TableStateManager
       extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
       title="class or interface in java.lang">Object
       This is a helper class used to manage table states.
      - States persisted in tableinfo and cached internally.
      + This class uses hbase:meta as its store for table state so hbase:meta must be 
      online before
      + start()
       is called.
        TODO: Cache state. Cut down on meta looksups.
       
       
      @@ -121,6 +126,25 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       
      +
      +
      +
      +
      +
      +Nested Class Summary
      +
      +Nested Classes
      +
      +Modifier and Type
      +Class and Description
      +
      +
      +static class
      +TableStateManager.TableStateNotFoundException
      +
      +
      +
      +
       
       
       
      @@ -134,7 +158,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       Field and Description
       
       
      -private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/locks/ReadWriteLock.html?is-external=true;
       title="class or interface in 
      java.util.concurrent.locks">ReadWriteLock
      +(package private) http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/locks/ReadWriteLock.html?is-external=true;
       title="class or interface in 
      java.util.concurrent.locks">ReadWriteLock
       lock
       
       
      @@ -142,9 +166,16 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       LOG
       
       
      -private MasterServices
      +(package private) MasterServices
       master
       
      +
      +(package private) static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">String
      +MIGRATE_TABLE_STATE_FROM_ZK_KEY
      +Set this key to false in Configuration to disable migrating 
      table state from zookeeper
      + so hbase:meta table.
      +
      +
       
       
       
      @@ -172,51 +203,73 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       Method Summary
       
      -All MethodsStatic MethodsInstance MethodsConcrete Methods
      +All MethodsInstance MethodsConcrete MethodsDeprecated Methods
       
       Modifier and Type
       Method and Description
       
       
      -static void
      +protected void
      +deleteZooKeeper(TableNametableName)
      +Deprecated.
      +Since 2.0.0. To be removed 
      in hbase-3.0.0.
      +
      +
      +
      +
      +protected void
      +fixTableState(TableStatetableState)
      +For subclasses in case they want to do fixup post 
      hbase:meta.
      +
      +
      +
      +private void
       fixTableStates(TableDescriptorstableDescriptors,
         Connectionconnection)
       
      -
      +
       http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
       title="class or interface in java.util">SetTableName
       getTablesInStates(TableState.State...states)
       Return all tables in given states.
       
       
      -
      +
       TableState.State
       getTableState(TableNametableName)
       
      -
      +
       boolean
       isTablePresent(TableNametableName)
       
      -
      +
       boolean
       isTableState(TableNametableName,
       TableState.State...states)
       
      -
      +
      +private void
      +migrateZooKeeper()
      +Deprecated.
      +Since 2.0.0. Remove in 
      hbase-3.0.0.
      +
      +
      +
      +
       protected TableState
       readMetaState(TableNametableName)
       
      -
      +
       void
       setDeletedTable(TableNametableName)
       
      -
      +
       void
       setTableState(TableNametableName,
        TableState.StatenewState)
       Set table state to provided.
       
       
      -
      +
       TableState.State
       setTableStateIfInStates(TableNametableName,
      TableState.StatenewState,
      @@ -225,7 +278,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
        Caller should lock table on write.
       
       
      -
      +
       boolean
       setTableStateIfNotInStates(TableNametableName,
       

      [03/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c83a37c8/devapidocs/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.PushType.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.PushType.html
       
      b/devapidocs/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.PushType.html
      index ed15d9b..3d03e17 100644
      --- 
      a/devapidocs/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.PushType.html
      +++ 
      b/devapidocs/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.PushType.html
      @@ -248,7 +248,7 @@ the order they are declared.
       
       
       values
      -public staticWALProcedureStore.PushType[]values()
      +public staticWALProcedureStore.PushType[]values()
       Returns an array containing the constants of this enum 
      type, in
       the order they are declared.  This method may be used to iterate
       over the constants as follows:
      @@ -268,7 +268,7 @@ for (WALProcedureStore.PushType c : 
      WALProcedureStore.PushType.values())
       
       
       valueOf
      -public staticWALProcedureStore.PushTypevalueOf(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">Stringname)
      +public staticWALProcedureStore.PushTypevalueOf(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">Stringname)
       Returns the enum constant of this type with the specified 
      name.
       The string must match exactly an identifier used to declare an
       enum constant in this type.  (Extraneous whitespace characters are 
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c83a37c8/devapidocs/org/apache/hadoop/hbase/procedure2/util/class-use/DelayedUtil.DelayedWithTimeout.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/procedure2/util/class-use/DelayedUtil.DelayedWithTimeout.html
       
      b/devapidocs/org/apache/hadoop/hbase/procedure2/util/class-use/DelayedUtil.DelayedWithTimeout.html
      index c6f6a46..5bd2115 100644
      --- 
      a/devapidocs/org/apache/hadoop/hbase/procedure2/util/class-use/DelayedUtil.DelayedWithTimeout.html
      +++ 
      b/devapidocs/org/apache/hadoop/hbase/procedure2/util/class-use/DelayedUtil.DelayedWithTimeout.html
      @@ -141,11 +141,11 @@
       
       
       private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/DelayQueue.html?is-external=true;
       title="class or interface in java.util.concurrent">DelayQueueDelayedUtil.DelayedWithTimeout
      -ProcedureExecutor.TimeoutExecutorThread.queue
      +RemoteProcedureDispatcher.TimeoutExecutorThread.queue
       
       
       private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/DelayQueue.html?is-external=true;
       title="class or interface in java.util.concurrent">DelayQueueDelayedUtil.DelayedWithTimeout
      -RemoteProcedureDispatcher.TimeoutExecutorThread.queue
      +ProcedureExecutor.TimeoutExecutorThread.queue
       
       
       
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c83a37c8/devapidocs/org/apache/hadoop/hbase/quotas/class-use/MasterQuotaManager.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/quotas/class-use/MasterQuotaManager.html 
      b/devapidocs/org/apache/hadoop/hbase/quotas/class-use/MasterQuotaManager.html
      index 934c2fa..dd6045b 100644
      --- 
      a/devapidocs/org/apache/hadoop/hbase/quotas/class-use/MasterQuotaManager.html
      +++ 
      b/devapidocs/org/apache/hadoop/hbase/quotas/class-use/MasterQuotaManager.html
      @@ -125,11 +125,11 @@
       
       
       MasterQuotaManager
      -MasterServices.getMasterQuotaManager()
      +HMaster.getMasterQuotaManager()
       
       
       MasterQuotaManager
      -HMaster.getMasterQuotaManager()
      +MasterServices.getMasterQuotaManager()
       
       
       
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c83a37c8/devapidocs/org/apache/hadoop/hbase/quotas/class-use/QuotaFilter.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/quotas/class-use/QuotaFilter.html 
      b/devapidocs/org/apache/hadoop/hbase/quotas/class-use/QuotaFilter.html
      index a495cd1..d81fa5e 100644
      --- a/devapidocs/org/apache/hadoop/hbase/quotas/class-use/QuotaFilter.html
      +++ b/devapidocs/org/apache/hadoop/hbase/quotas/class-use/QuotaFilter.html
      @@ -110,9 +110,7 @@
       
       
       http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
       title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
       title="class or interface in java.util">ListQuotaSettings
      -AsyncAdmin.getQuota(QuotaFilterfilter)
      -List the quotas based on the filter.
      -
      +AsyncHBaseAdmin.getQuota(QuotaFilterfilter)
       
       
       http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
       title="class or interface in java.util">ListQuotaSettings
      @@ -121,16 +119,18 @@
       
       
       
      

      [03/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0ab8335e/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html
      --
      diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html 
      b/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html
      index 42f8bc2..82c1efb 100644
      --- a/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html
      +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html
      @@ -118,7 +118,7 @@ var activeTableTab = "activeTableTab";
       
       
       @InterfaceAudience.Private
      -public class HStore
      +public class HStore
       extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
       title="class or interface in java.lang">Object
       implements Store, HeapSize, StoreConfigInformation, PropagatingConfigurationObserver
       A Store holds a column family in a Region.  Its a memstore 
      and a set of zero
      @@ -218,11 +218,11 @@ implements COMPACTCHECKER_INTERVAL_MULTIPLIER_KEY
       
       
      -private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true;
       title="class or interface in 
      java.util.concurrent.atomic">AtomicLong
      +private long
       compactedCellsCount
       
       
      -private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true;
       title="class or interface in 
      java.util.concurrent.atomic">AtomicLong
      +private long
       compactedCellsSize
       
       
      @@ -278,15 +278,15 @@ implements FIXED_OVERHEAD
       
       
      -private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true;
       title="class or interface in 
      java.util.concurrent.atomic">AtomicLong
      +private long
       flushedCellsCount
       
       
      -private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true;
       title="class or interface in 
      java.util.concurrent.atomic">AtomicLong
      +private long
       flushedCellsSize
       
       
      -private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true;
       title="class or interface in 
      java.util.concurrent.atomic">AtomicLong
      +private long
       flushedOutputFileSize
       
       
      @@ -316,11 +316,11 @@ implements LOG
       
       
      -private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true;
       title="class or interface in 
      java.util.concurrent.atomic">AtomicLong
      +private long
       majorCompactedCellsCount
       
       
      -private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true;
       title="class or interface in 
      java.util.concurrent.atomic">AtomicLong
      +private long
       majorCompactedCellsSize
       
       
      @@ -356,11 +356,11 @@ implements storeEngine
       
       
      -private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true;
       title="class or interface in 
      java.util.concurrent.atomic">AtomicLong
      +private long
       storeSize
       
       
      -private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true;
       title="class or interface in 
      java.util.concurrent.atomic">AtomicLong
      +private long
       totalUncompressedBytes
       
       
      @@ -1226,7 +1226,7 @@ implements 
       
       MEMSTORE_CLASS_NAME
      -public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">String MEMSTORE_CLASS_NAME
      +public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">String MEMSTORE_CLASS_NAME
       
       See Also:
       Constant
       Field Values
      @@ -1239,7 +1239,7 @@ implements 
       
       COMPACTCHECKER_INTERVAL_MULTIPLIER_KEY
      -public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">String COMPACTCHECKER_INTERVAL_MULTIPLIER_KEY
      +public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">String COMPACTCHECKER_INTERVAL_MULTIPLIER_KEY
       
       See Also:
       Constant
       Field Values
      @@ -1252,7 +1252,7 @@ implements 
       
       BLOCKING_STOREFILES_KEY
      -public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">String BLOCKING_STOREFILES_KEY
      +public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">String BLOCKING_STOREFILES_KEY
       
       See Also:
       Constant
       Field Values
      @@ -1265,7 +1265,7 @@ implements 
       
       BLOCK_STORAGE_POLICY_KEY
      -public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">String BLOCK_STORAGE_POLICY_KEY
      +public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">String BLOCK_STORAGE_POLICY_KEY
       
       See Also:
       Constant
       Field Values
      @@ -1278,7 +1278,7 @@ implements 
       
       

      [03/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/src-html/org/apache/hadoop/hbase/master/SplitLogManager.TerminationStatus.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/master/SplitLogManager.TerminationStatus.html
       
      b/devapidocs/src-html/org/apache/hadoop/hbase/master/SplitLogManager.TerminationStatus.html
      index 2939a56..681e263 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/master/SplitLogManager.TerminationStatus.html
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/master/SplitLogManager.TerminationStatus.html
      @@ -61,602 +61,608 @@
       053import 
      org.apache.hadoop.hbase.monitoring.TaskMonitor;
       054import 
      org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
       055import 
      org.apache.hadoop.hbase.util.FSUtils;
      -056import 
      org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
      -057import 
      org.apache.yetus.audience.InterfaceAudience;
      -058import org.slf4j.Logger;
      -059import org.slf4j.LoggerFactory;
      -060import 
      org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
      -061
      -062/**
      -063 * Distributes the task of log splitting 
      to the available region servers.
      -064 * Coordination happens via coordination 
      engine. For every log file that has to be split a
      -065 * task is created. SplitLogWorkers race 
      to grab a task.
      -066 *
      -067 * pSplitLogManager monitors the 
      tasks that it creates using the
      -068 * timeoutMonitor thread. If a task's 
      progress is slow then
      -069 * {@link 
      SplitLogManagerCoordination#checkTasks} will take away the
      -070 * task from the owner {@link 
      org.apache.hadoop.hbase.regionserver.SplitLogWorker}
      -071 * and the task will be up for grabs 
      again. When the task is done then it is
      -072 * deleted by SplitLogManager.
      -073 *
      -074 * pClients call {@link 
      #splitLogDistributed(Path)} to split a region server's
      -075 * log files. The caller thread waits in 
      this method until all the log files
      -076 * have been split.
      -077 *
      -078 * pAll the coordination calls 
      made by this class are asynchronous. This is mainly
      -079 * to help reduce response time seen by 
      the callers.
      -080 *
      -081 * pThere is race in this design 
      between the SplitLogManager and the
      -082 * SplitLogWorker. SplitLogManager might 
      re-queue a task that has in reality
      -083 * already been completed by a 
      SplitLogWorker. We rely on the idempotency of
      -084 * the log splitting task for 
      correctness.
      -085 *
      -086 * pIt is also assumed that every 
      log splitting task is unique and once
      -087 * completed (either with success or with 
      error) it will be not be submitted
      -088 * again. If a task is resubmitted then 
      there is a risk that old "delete task"
      -089 * can delete the re-submission.
      -090 */
      -091@InterfaceAudience.Private
      -092public class SplitLogManager {
      -093  private static final Logger LOG = 
      LoggerFactory.getLogger(SplitLogManager.class);
      -094
      -095  private final MasterServices server;
      -096
      -097  private final Configuration conf;
      -098  private final ChoreService 
      choreService;
      -099
      -100  public static final int 
      DEFAULT_UNASSIGNED_TIMEOUT = (3 * 60 * 1000); // 3 min
      -101
      -102  private long unassignedTimeout;
      -103  private long lastTaskCreateTime = 
      Long.MAX_VALUE;
      -104
      -105  @VisibleForTesting
      -106  final ConcurrentMapString, Task 
      tasks = new ConcurrentHashMap();
      -107  private TimeoutMonitor 
      timeoutMonitor;
      -108
      -109  private volatile SetServerName 
      deadWorkers = null;
      -110  private final Object deadWorkersLock = 
      new Object();
      -111
      -112  /**
      -113   * Its OK to construct this object even 
      when region-servers are not online. It does lookup the
      -114   * orphan tasks in coordination engine 
      but it doesn't block waiting for them to be done.
      -115   * @param master the master services
      -116   * @param conf the HBase 
      configuration
      -117   * @throws IOException
      -118   */
      -119  public SplitLogManager(MasterServices 
      master, Configuration conf)
      -120  throws IOException {
      -121this.server = master;
      -122this.conf = conf;
      -123this.choreService = new 
      ChoreService(master.getServerName() + "_splitLogManager_");
      -124if 
      (server.getCoordinatedStateManager() != null) {
      -125  SplitLogManagerCoordination 
      coordination = getSplitLogManagerCoordination();
      -126  SetString failedDeletions = 
      Collections.synchronizedSet(new HashSetString());
      -127  SplitLogManagerDetails details = 
      new SplitLogManagerDetails(tasks, master, failedDeletions);
      -128  coordination.setDetails(details);
      -129  coordination.init();
      -130}
      -131this.unassignedTimeout =
      -132
      conf.getInt("hbase.splitlog.manager.unassigned.timeout", 
      DEFAULT_UNASSIGNED_TIMEOUT);
      -133this.timeoutMonitor =
      -134new 
      TimeoutMonitor(conf.getInt("hbase.splitlog.manager.timeoutmonitor.period", 
      1000),
      -135master);
      -136
      choreService.scheduleChore(timeoutMonitor);
      -137  }
      -138
      -139  private SplitLogManagerCoordination 
      getSplitLogManagerCoordination() {
      -140return 
      

      [03/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1f2eeb22/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.Visitor.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.Visitor.html
       
      b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.Visitor.html
      index b8e6dfa..7b512ba 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.Visitor.html
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.Visitor.html
      @@ -28,8473 +28,8472 @@
       020import static 
      org.apache.hadoop.hbase.HConstants.REPLICATION_SCOPE_LOCAL;
       021import static 
      org.apache.hadoop.hbase.regionserver.HStoreFile.MAJOR_COMPACTION_KEY;
       022import static 
      org.apache.hadoop.hbase.util.CollectionUtils.computeIfAbsent;
      -023import java.io.EOFException;
      -024import java.io.FileNotFoundException;
      -025import java.io.IOException;
      -026import java.io.InterruptedIOException;
      -027import java.lang.reflect.Constructor;
      -028import java.nio.ByteBuffer;
      -029import 
      java.nio.charset.StandardCharsets;
      -030import java.text.ParseException;
      -031import java.util.AbstractList;
      -032import java.util.ArrayList;
      -033import java.util.Arrays;
      -034import java.util.Collection;
      -035import java.util.Collections;
      -036import java.util.HashMap;
      -037import java.util.HashSet;
      -038import java.util.Iterator;
      -039import java.util.List;
      -040import java.util.Map;
      -041import java.util.Map.Entry;
      -042import java.util.NavigableMap;
      -043import java.util.NavigableSet;
      -044import java.util.Optional;
      -045import java.util.RandomAccess;
      -046import java.util.Set;
      -047import java.util.TreeMap;
      -048import java.util.UUID;
      -049import java.util.concurrent.Callable;
      -050import 
      java.util.concurrent.CompletionService;
      -051import 
      java.util.concurrent.ConcurrentHashMap;
      -052import 
      java.util.concurrent.ConcurrentMap;
      -053import 
      java.util.concurrent.ConcurrentSkipListMap;
      -054import 
      java.util.concurrent.ExecutionException;
      -055import 
      java.util.concurrent.ExecutorCompletionService;
      -056import 
      java.util.concurrent.ExecutorService;
      -057import java.util.concurrent.Executors;
      -058import java.util.concurrent.Future;
      -059import java.util.concurrent.FutureTask;
      -060import 
      java.util.concurrent.ThreadFactory;
      -061import 
      java.util.concurrent.ThreadPoolExecutor;
      -062import java.util.concurrent.TimeUnit;
      -063import 
      java.util.concurrent.TimeoutException;
      -064import 
      java.util.concurrent.atomic.AtomicBoolean;
      -065import 
      java.util.concurrent.atomic.AtomicInteger;
      -066import 
      java.util.concurrent.atomic.AtomicLong;
      -067import 
      java.util.concurrent.atomic.LongAdder;
      -068import java.util.concurrent.locks.Lock;
      -069import 
      java.util.concurrent.locks.ReadWriteLock;
      -070import 
      java.util.concurrent.locks.ReentrantReadWriteLock;
      -071import java.util.function.Function;
      -072
      -073import 
      org.apache.hadoop.conf.Configuration;
      -074import org.apache.hadoop.fs.FileStatus;
      -075import org.apache.hadoop.fs.FileSystem;
      -076import 
      org.apache.hadoop.fs.LocatedFileStatus;
      -077import org.apache.hadoop.fs.Path;
      -078import org.apache.hadoop.hbase.Cell;
      -079import 
      org.apache.hadoop.hbase.CellBuilderType;
      -080import 
      org.apache.hadoop.hbase.CellComparator;
      -081import 
      org.apache.hadoop.hbase.CellComparatorImpl;
      -082import 
      org.apache.hadoop.hbase.CellScanner;
      -083import 
      org.apache.hadoop.hbase.CellUtil;
      -084import 
      org.apache.hadoop.hbase.CompareOperator;
      -085import 
      org.apache.hadoop.hbase.CompoundConfiguration;
      -086import 
      org.apache.hadoop.hbase.DoNotRetryIOException;
      -087import 
      org.apache.hadoop.hbase.DroppedSnapshotException;
      -088import 
      org.apache.hadoop.hbase.ExtendedCellBuilderFactory;
      -089import 
      org.apache.hadoop.hbase.HConstants;
      -090import 
      org.apache.hadoop.hbase.HConstants.OperationStatusCode;
      -091import 
      org.apache.hadoop.hbase.HDFSBlocksDistribution;
      -092import 
      org.apache.hadoop.hbase.HRegionInfo;
      -093import 
      org.apache.hadoop.hbase.KeyValue;
      -094import 
      org.apache.hadoop.hbase.KeyValueUtil;
      -095import 
      org.apache.hadoop.hbase.NamespaceDescriptor;
      -096import 
      org.apache.hadoop.hbase.NotServingRegionException;
      -097import 
      org.apache.hadoop.hbase.PrivateCellUtil;
      -098import 
      org.apache.hadoop.hbase.RegionTooBusyException;
      -099import 
      org.apache.hadoop.hbase.TableName;
      -100import org.apache.hadoop.hbase.Tag;
      -101import org.apache.hadoop.hbase.TagUtil;
      -102import 
      org.apache.hadoop.hbase.UnknownScannerException;
      -103import 
      org.apache.hadoop.hbase.client.Append;
      -104import 
      org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
      -105import 
      org.apache.hadoop.hbase.client.CompactionState;
      -106import 
      org.apache.hadoop.hbase.client.Delete;
      -107import 
      org.apache.hadoop.hbase.client.Durability;
      -108import 
      org.apache.hadoop.hbase.client.Get;
      -109import 
      org.apache.hadoop.hbase.client.Increment;
      -110import 
      org.apache.hadoop.hbase.client.IsolationLevel;
      -111import 
      

      [03/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/cc6597ec/testdevapidocs/org/apache/hadoop/hbase/client/TestFromClientSide.html
      --
      diff --git 
      a/testdevapidocs/org/apache/hadoop/hbase/client/TestFromClientSide.html 
      b/testdevapidocs/org/apache/hadoop/hbase/client/TestFromClientSide.html
      index 1981ca2..8b7b594 100644
      --- a/testdevapidocs/org/apache/hadoop/hbase/client/TestFromClientSide.html
      +++ b/testdevapidocs/org/apache/hadoop/hbase/client/TestFromClientSide.html
      @@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
       
       
       
      -public class TestFromClientSide
      +public class TestFromClientSide
       extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
       title="class or interface in java.lang">Object
       Run tests that use the HBase clients; Table.
        Sets up the HBase mini cluster once at start and runs through all client 
      tests.
      @@ -137,38 +137,42 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       Field and Description
       
       
      +static HBaseClassTestRule
      +CLASS_RULE
      +
      +
       private static byte[]
       FAMILY
       
      -
      +
       private static byte[]
       INVALID_FAMILY
       
      -
      +
       private static org.slf4j.Logger
       LOG
       
      -
      +
       org.junit.rules.TestName
       name
       
      -
      +
       private static byte[]
       QUALIFIER
       
      -
      +
       private static byte[]
       ROW
       
      -
      +
       protected static int
       SLAVES
       
      -
      +
       protected static HBaseTestingUtility
       TEST_UTIL
       
      -
      +
       private static byte[]
       VALUE
       
      @@ -1039,13 +1043,22 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       Field Detail
      +
      +
      +
      +
      +
      +CLASS_RULE
      +public static finalHBaseClassTestRule CLASS_RULE
      +
      +
       
       
       
       
       
       LOG
      -private static finalorg.slf4j.Logger LOG
      +private static finalorg.slf4j.Logger LOG
       
       
       
      @@ -1054,7 +1067,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       TEST_UTIL
      -protected static finalHBaseTestingUtility TEST_UTIL
      +protected static finalHBaseTestingUtility TEST_UTIL
       
       
       
      @@ -1063,7 +1076,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       ROW
      -private staticbyte[] ROW
      +private staticbyte[] ROW
       
       
       
      @@ -1072,7 +1085,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       FAMILY
      -private staticbyte[] FAMILY
      +private staticbyte[] FAMILY
       
       
       
      @@ -1081,7 +1094,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       INVALID_FAMILY
      -private static finalbyte[] INVALID_FAMILY
      +private static finalbyte[] INVALID_FAMILY
       
       
       
      @@ -1090,7 +1103,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       QUALIFIER
      -private staticbyte[] QUALIFIER
      +private staticbyte[] QUALIFIER
       
       
       
      @@ -1099,7 +1112,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       VALUE
      -private staticbyte[] VALUE
      +private staticbyte[] VALUE
       
       
       
      @@ -1108,7 +1121,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       SLAVES
      -protected staticint SLAVES
      +protected staticint SLAVES
       
       
       
      @@ -1117,7 +1130,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       name
      -publicorg.junit.rules.TestName name
      +publicorg.junit.rules.TestName name
       
       
       
      @@ -1134,7 +1147,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       TestFromClientSide
      -publicTestFromClientSide()
      +publicTestFromClientSide()
       
       
       
      @@ -1151,7 +1164,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       setUpBeforeClass
      -public staticvoidsetUpBeforeClass()
      +public staticvoidsetUpBeforeClass()
        throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
       title="class or interface in java.lang">Exception
       
       Throws:
      @@ -1165,7 +1178,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       tearDownAfterClass
      -public staticvoidtearDownAfterClass()
      +public staticvoidtearDownAfterClass()
      throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
       title="class or interface in java.lang">Exception
       
       Throws:
      @@ -1179,7 +1192,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       testDuplicateAppend
      -publicvoidtestDuplicateAppend()
      +publicvoidtestDuplicateAppend()
        throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
       title="class or interface in java.lang">Exception
       Test append result when there are duplicate rpc 
      request.
       
      @@ -1194,7 +1207,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       testKeepDeletedCells
      -publicvoidtestKeepDeletedCells()
      +publicvoidtestKeepDeletedCells()
         throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
       title="class or interface in java.lang">Exception
       Basic client side validation of HBASE-4536
       
      @@ -1209,7 +1222,7 @@ extends 

      [03/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aa7ffc92/devapidocs/src-html/org/apache/hadoop/hbase/backup/util/RestoreTool.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/backup/util/RestoreTool.html 
      b/devapidocs/src-html/org/apache/hadoop/hbase/backup/util/RestoreTool.html
      index 4277d0a..36dbc3c 100644
      --- a/devapidocs/src-html/org/apache/hadoop/hbase/backup/util/RestoreTool.html
      +++ b/devapidocs/src-html/org/apache/hadoop/hbase/backup/util/RestoreTool.html
      @@ -44,36 +44,36 @@
       036import 
      org.apache.hadoop.hbase.backup.BackupRestoreFactory;
       037import 
      org.apache.hadoop.hbase.backup.HBackupFileSystem;
       038import 
      org.apache.hadoop.hbase.backup.RestoreJob;
      -039import 
      org.apache.yetus.audience.InterfaceAudience;
      -040import org.slf4j.Logger;
      -041import org.slf4j.LoggerFactory;
      -042import 
      org.apache.hadoop.hbase.client.Admin;
      -043import 
      org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
      -044import 
      org.apache.hadoop.hbase.client.Connection;
      -045import 
      org.apache.hadoop.hbase.client.TableDescriptor;
      -046import 
      org.apache.hadoop.hbase.client.TableDescriptorBuilder;
      -047import 
      org.apache.hadoop.hbase.io.HFileLink;
      -048import 
      org.apache.hadoop.hbase.io.hfile.HFile;
      +039import 
      org.apache.hadoop.hbase.client.Admin;
      +040import 
      org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
      +041import 
      org.apache.hadoop.hbase.client.Connection;
      +042import 
      org.apache.hadoop.hbase.client.TableDescriptor;
      +043import 
      org.apache.hadoop.hbase.client.TableDescriptorBuilder;
      +044import 
      org.apache.hadoop.hbase.io.HFileLink;
      +045import 
      org.apache.hadoop.hbase.io.hfile.HFile;
      +046import 
      org.apache.hadoop.hbase.regionserver.StoreFileInfo;
      +047import 
      org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
      +048import 
      org.apache.hadoop.hbase.snapshot.SnapshotManifest;
       049import 
      org.apache.hadoop.hbase.tool.LoadIncrementalHFiles;
      -050import 
      org.apache.hadoop.hbase.regionserver.StoreFileInfo;
      -051import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
      -052import 
      org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
      -053import 
      org.apache.hadoop.hbase.snapshot.SnapshotManifest;
      -054import 
      org.apache.hadoop.hbase.util.Bytes;
      -055import 
      org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
      -056import 
      org.apache.hadoop.hbase.util.FSTableDescriptors;
      -057
      -058/**
      -059 * A collection for methods used by 
      multiple classes to restore HBase tables.
      -060 */
      -061@InterfaceAudience.Private
      -062public class RestoreTool {
      -063
      +050import 
      org.apache.hadoop.hbase.util.Bytes;
      +051import 
      org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
      +052import 
      org.apache.hadoop.hbase.util.FSTableDescriptors;
      +053import 
      org.apache.yetus.audience.InterfaceAudience;
      +054import org.slf4j.Logger;
      +055import org.slf4j.LoggerFactory;
      +056
      +057import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
      +058
      +059/**
      +060 * A collection for methods used by 
      multiple classes to restore HBase tables.
      +061 */
      +062@InterfaceAudience.Private
      +063public class RestoreTool {
       064  public static final Logger LOG = 
      LoggerFactory.getLogger(BackupUtils.class);
       065  private final static long 
      TABLE_AVAILABILITY_WAIT_TIME = 18;
       066
       067  private final String[] ignoreDirs = { 
      HConstants.RECOVERED_EDITS_DIR };
      -068  protected Configuration conf = null;
      +068  protected Configuration conf;
       069  protected Path backupRootPath;
       070  protected String backupId;
       071  protected FileSystem fs;
      @@ -97,433 +97,426 @@
       089   * @throws IOException exception
       090   */
       091  Path getTableArchivePath(TableName 
      tableName) throws IOException {
      -092
      -093Path baseDir =
      -094new 
      Path(HBackupFileSystem.getTableBackupPath(tableName, backupRootPath, 
      backupId),
      -095
      HConstants.HFILE_ARCHIVE_DIRECTORY);
      -096Path dataDir = new Path(baseDir, 
      HConstants.BASE_NAMESPACE_DIR);
      -097Path archivePath = new Path(dataDir, 
      tableName.getNamespaceAsString());
      -098Path tableArchivePath = new 
      Path(archivePath, tableName.getQualifierAsString());
      -099if (!fs.exists(tableArchivePath) || 
      !fs.getFileStatus(tableArchivePath).isDirectory()) {
      -100  LOG.debug("Folder tableArchivePath: 
      " + tableArchivePath.toString() + " does not exists");
      -101  tableArchivePath = null; // empty 
      table has no archive
      -102}
      -103return tableArchivePath;
      -104  }
      -105
      -106  /**
      -107   * Gets region list
      -108   * @param tableName table name
      -109   * @return RegionList region list
      -110   * @throws FileNotFoundException 
      exception
      -111   * @throws IOException exception
      -112   */
      -113  ArrayListPath 
      getRegionList(TableName tableName) throws FileNotFoundException, IOException 
      {
      -114Path tableArchivePath = 
      getTableArchivePath(tableName);
      -115ArrayListPath regionDirList = 
      new ArrayListPath();
      -116FileStatus[] children = 
      fs.listStatus(tableArchivePath);
      -117for (FileStatus 

      [03/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/96e5e102/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.HeartbeatKVHeap.html
      --
      diff --git 
      a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.HeartbeatKVHeap.html
       
      b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.HeartbeatKVHeap.html
      index 281c243..1a84ee1 100644
      --- 
      a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.HeartbeatKVHeap.html
      +++ 
      b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.HeartbeatKVHeap.html
      @@ -152,433 +152,461 @@
       144
       145  /**
       146   * Make puts to put the input value 
      into each combination of row, family, and qualifier
      -147   * @param rows
      -148   * @param families
      -149   * @param qualifiers
      -150   * @param value
      -151   * @return
      -152   * @throws IOException
      -153   */
      -154  static ArrayListPut 
      createPuts(byte[][] rows, byte[][] families, byte[][] qualifiers,
      -155  byte[] value) throws IOException 
      {
      -156Put put;
      -157ArrayListPut puts = new 
      ArrayList();
      -158
      -159for (int row = 0; row  
      rows.length; row++) {
      -160  put = new Put(rows[row]);
      -161  for (int fam = 0; fam  
      families.length; fam++) {
      -162for (int qual = 0; qual  
      qualifiers.length; qual++) {
      -163  KeyValue kv = new 
      KeyValue(rows[row], families[fam], qualifiers[qual], qual, value);
      -164  put.add(kv);
      -165}
      -166  }
      -167  puts.add(put);
      -168}
      -169
      -170return puts;
      -171  }
      -172
      -173  @AfterClass
      -174  public static void tearDownAfterClass() 
      throws Exception {
      -175TEST_UTIL.shutdownMiniCluster();
      -176  }
      -177
      -178  @Before
      -179  public void setupBeforeTest() throws 
      Exception {
      -180disableSleeping();
      -181  }
      -182
      -183  @After
      -184  public void teardownAfterTest() throws 
      Exception {
      -185disableSleeping();
      -186  }
      -187
      -188  /**
      -189   * Run the test callable when 
      heartbeats are enabled/disabled. We expect all tests to only pass
      -190   * when heartbeat messages are enabled 
      (otherwise the test is pointless). When heartbeats are
      -191   * disabled, the test should throw an 
      exception.
      -192   * @param testCallable
      -193   * @throws InterruptedException
      -194   */
      -195  private void 
      testImportanceOfHeartbeats(CallableVoid testCallable) throws 
      InterruptedException {
      -196
      HeartbeatRPCServices.heartbeatsEnabled = true;
      -197
      +147   */
      +148  static ArrayListPut 
      createPuts(byte[][] rows, byte[][] families, byte[][] qualifiers,
      +149  byte[] value) throws IOException 
      {
      +150Put put;
      +151ArrayListPut puts = new 
      ArrayList();
      +152
      +153for (int row = 0; row  
      rows.length; row++) {
      +154  put = new Put(rows[row]);
      +155  for (int fam = 0; fam  
      families.length; fam++) {
      +156for (int qual = 0; qual  
      qualifiers.length; qual++) {
      +157  KeyValue kv = new 
      KeyValue(rows[row], families[fam], qualifiers[qual], qual, value);
      +158  put.add(kv);
      +159}
      +160  }
      +161  puts.add(put);
      +162}
      +163
      +164return puts;
      +165  }
      +166
      +167  @AfterClass
      +168  public static void tearDownAfterClass() 
      throws Exception {
      +169TEST_UTIL.shutdownMiniCluster();
      +170  }
      +171
      +172  @Before
      +173  public void setupBeforeTest() throws 
      Exception {
      +174disableSleeping();
      +175  }
      +176
      +177  @After
      +178  public void teardownAfterTest() throws 
      Exception {
      +179disableSleeping();
      +180  }
      +181
      +182  /**
      +183   * Run the test callable when 
      heartbeats are enabled/disabled. We expect all tests to only pass
      +184   * when heartbeat messages are enabled 
      (otherwise the test is pointless). When heartbeats are
      +185   * disabled, the test should throw an 
      exception.
      +186   */
      +187  private void 
      testImportanceOfHeartbeats(CallableVoid testCallable) throws 
      InterruptedException {
      +188
      HeartbeatRPCServices.heartbeatsEnabled = true;
      +189
      +190try {
      +191  testCallable.call();
      +192} catch (Exception e) {
      +193  fail("Heartbeat messages are 
      enabled, exceptions should NOT be thrown. Exception trace:"
      +194  + 
      ExceptionUtils.getStackTrace(e));
      +195}
      +196
      +197
      HeartbeatRPCServices.heartbeatsEnabled = false;
       198try {
       199  testCallable.call();
       200} catch (Exception e) {
      -201  fail("Heartbeat messages are 
      enabled, exceptions should NOT be thrown. Exception trace:"
      -202  + 
      ExceptionUtils.getStackTrace(e));
      -203}
      -204
      -205
      HeartbeatRPCServices.heartbeatsEnabled = false;
      -206try {
      -207  testCallable.call();
      -208} catch (Exception e) {
      -209  return;
      -210} finally {
      -211  
      HeartbeatRPCServices.heartbeatsEnabled = true;
      -212}
      -213fail("Heartbeats messages are 
      disabled, an exception should be thrown. If an exception "
      -214+ " is not thrown, the test case 
      is not testing the importance of heartbeat 

      [03/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/regionserver/InternalScanner.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/regionserver/InternalScanner.html 
      b/devapidocs/org/apache/hadoop/hbase/regionserver/InternalScanner.html
      index 358b5e2..5788cf7 100644
      --- a/devapidocs/org/apache/hadoop/hbase/regionserver/InternalScanner.html
      +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/InternalScanner.html
      @@ -227,7 +227,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html
       
       
       close
      -voidclose()
      +voidclose()
       throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
       title="class or interface in java.io">IOException
       Closes the scanner and releases any resources it has 
      allocated
       
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/regionserver/KeyValueHeap.KVScannerComparator.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/regionserver/KeyValueHeap.KVScannerComparator.html
       
      b/devapidocs/org/apache/hadoop/hbase/regionserver/KeyValueHeap.KVScannerComparator.html
      index 4132807..5428e2f 100644
      --- 
      a/devapidocs/org/apache/hadoop/hbase/regionserver/KeyValueHeap.KVScannerComparator.html
      +++ 
      b/devapidocs/org/apache/hadoop/hbase/regionserver/KeyValueHeap.KVScannerComparator.html
      @@ -121,7 +121,7 @@ var activeTableTab = "activeTableTab";
       
       
       
      -protected static class KeyValueHeap.KVScannerComparator
      +protected static class KeyValueHeap.KVScannerComparator
       extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
       title="class or interface in java.lang">Object
       implements http://docs.oracle.com/javase/8/docs/api/java/util/Comparator.html?is-external=true;
       title="class or interface in java.util">ComparatorKeyValueScanner
       
      @@ -231,7 +231,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/util/Comparato
       
       
       kvComparator
      -protectedCellComparator kvComparator
      +protectedCellComparator kvComparator
       
       
       
      @@ -248,7 +248,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/util/Comparato
       
       
       KVScannerComparator
      -publicKVScannerComparator(CellComparatorkvComparator)
      +publicKVScannerComparator(CellComparatorkvComparator)
       Constructor
       
       Parameters:
      @@ -270,7 +270,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/util/Comparato
       
       
       compare
      -publicintcompare(KeyValueScannerleft,
      +publicintcompare(KeyValueScannerleft,
      KeyValueScannerright)
       
       Specified by:
      @@ -284,7 +284,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/util/Comparato
       
       
       compare
      -publicintcompare(Cellleft,
      +publicintcompare(Cellleft,
      Cellright)
       Compares two KeyValue
       
      @@ -302,7 +302,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/util/Comparato
       
       
       getComparator
      -publicCellComparatorgetComparator()
      +publicCellComparatorgetComparator()
       
       Returns:
       KVComparator
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/regionserver/KeyValueHeap.html
      --
      diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/KeyValueHeap.html 
      b/devapidocs/org/apache/hadoop/hbase/regionserver/KeyValueHeap.html
      index 9195a40..67934f0 100644
      --- a/devapidocs/org/apache/hadoop/hbase/regionserver/KeyValueHeap.html
      +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/KeyValueHeap.html
      @@ -498,7 +498,7 @@ implements 
       
       peek
      -publicCellpeek()
      +publicCellpeek()
       Description copied from 
      interface:KeyValueScanner
       Look at the next Cell in this scanner, but do not iterate 
      scanner.
        NOTICE: The returned cell has not been passed into ScanQueryMatcher. So it 
      may not be what the
      @@ -517,7 +517,7 @@ implements 
       
       next
      -publicCellnext()
      +publicCellnext()
         throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
       title="class or interface in java.io">IOException
       Description copied from 
      interface:KeyValueScanner
       Return the next Cell in this scanner, iterating the 
      scanner
      @@ -537,7 +537,7 @@ implements 
       
       next
      -publicbooleannext(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
       title="class or interface in java.util">ListCellresult,
      +publicbooleannext(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
       title="class or interface in java.util">ListCellresult,
       ScannerContextscannerContext)
        throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
       title="class or interface in java.io">IOException
       Gets the next row of keys from the top-most scanner.
      @@ -564,7 +564,7 @@ implements 
       
       close
      -publicvoidclose()
      +publicvoidclose()
       Description copied from 
      

      [03/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/14db89d7/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.RandRsExecutor.html
      --
      diff --git 
      a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.RandRsExecutor.html
       
      b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.RandRsExecutor.html
      index f1db5ca..d8515d7 100644
      --- 
      a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.RandRsExecutor.html
      +++ 
      b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.RandRsExecutor.html
      @@ -32,813 +32,820 @@
       024import static org.junit.Assert.fail;
       025
       026import java.io.IOException;
      -027import java.net.SocketTimeoutException;
      -028import java.util.NavigableMap;
      -029import java.util.Random;
      -030import java.util.Set;
      -031import java.util.SortedSet;
      -032import 
      java.util.concurrent.ConcurrentSkipListMap;
      -033import 
      java.util.concurrent.ConcurrentSkipListSet;
      -034import 
      java.util.concurrent.ExecutionException;
      -035import java.util.concurrent.Executors;
      -036import java.util.concurrent.Future;
      -037import 
      java.util.concurrent.ScheduledExecutorService;
      -038import java.util.concurrent.TimeUnit;
      -039
      -040import 
      org.apache.hadoop.conf.Configuration;
      -041import 
      org.apache.hadoop.hbase.CategoryBasedTimeout;
      -042import 
      org.apache.hadoop.hbase.DoNotRetryIOException;
      -043import 
      org.apache.hadoop.hbase.HBaseTestingUtility;
      -044import 
      org.apache.hadoop.hbase.NotServingRegionException;
      -045import 
      org.apache.hadoop.hbase.ServerName;
      -046import 
      org.apache.hadoop.hbase.TableName;
      -047import 
      org.apache.hadoop.hbase.client.RegionInfo;
      -048import 
      org.apache.hadoop.hbase.client.RegionInfoBuilder;
      -049import 
      org.apache.hadoop.hbase.client.RetriesExhaustedException;
      -050import 
      org.apache.hadoop.hbase.exceptions.UnexpectedStateException;
      -051import 
      org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
      -052import 
      org.apache.hadoop.hbase.master.MasterServices;
      -053import 
      org.apache.hadoop.hbase.master.RegionState.State;
      -054import 
      org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants;
      -055import 
      org.apache.hadoop.hbase.master.procedure.ProcedureSyncWait;
      -056import 
      org.apache.hadoop.hbase.master.procedure.RSProcedureDispatcher;
      -057import 
      org.apache.hadoop.hbase.procedure2.Procedure;
      -058import 
      org.apache.hadoop.hbase.procedure2.ProcedureMetrics;
      -059import 
      org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
      -060import 
      org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore;
      -061import 
      org.apache.hadoop.hbase.procedure2.util.StringUtils;
      -062import 
      org.apache.hadoop.hbase.regionserver.RegionServerAbortedException;
      -063import 
      org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
      -064import 
      org.apache.hadoop.hbase.testclassification.MasterTests;
      -065import 
      org.apache.hadoop.hbase.testclassification.MediumTests;
      -066import 
      org.apache.hadoop.hbase.util.Bytes;
      -067import 
      org.apache.hadoop.hbase.util.FSUtils;
      -068import 
      org.apache.hadoop.ipc.RemoteException;
      -069import org.junit.After;
      -070import org.junit.Before;
      -071import org.junit.Ignore;
      -072import org.junit.Rule;
      -073import org.junit.Test;
      -074import 
      org.junit.experimental.categories.Category;
      -075import 
      org.junit.rules.ExpectedException;
      -076import org.junit.rules.TestName;
      -077import org.junit.rules.TestRule;
      -078import org.slf4j.Logger;
      -079import org.slf4j.LoggerFactory;
      -080import 
      org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
      -081import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
      -082import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
      -083import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest;
      -084import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse;
      -085import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest;
      -086import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo;
      -087import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse;
      -088import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse.RegionOpeningState;
      -089import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition;
      -090import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
      -091import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest;
      -092
      -093@Category({MasterTests.class, 
      MediumTests.class})
      -094public class TestAssignmentManager {
      -095  private static final Logger LOG = 
      LoggerFactory.getLogger(TestAssignmentManager.class);
      

      [03/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestConnectionImplementation.BlockingFilter.html
      --
      diff --git 
      a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestConnectionImplementation.BlockingFilter.html
       
      b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestConnectionImplementation.BlockingFilter.html
      new file mode 100644
      index 000..a1ce1bf
      --- /dev/null
      +++ 
      b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestConnectionImplementation.BlockingFilter.html
      @@ -0,0 +1,1116 @@
      +http://www.w3.org/TR/html4/loose.dtd;>
      +
      +
      +Source code
      +
      +
      +
      +
      +001/*
      +002 * Licensed to the Apache Software 
      Foundation (ASF) under one
      +003 * or more contributor license 
      agreements.  See the NOTICE file
      +004 * distributed with this work for 
      additional information
      +005 * regarding copyright ownership.  The 
      ASF licenses this file
      +006 * to you under the Apache License, 
      Version 2.0 (the
      +007 * "License"); you may not use this file 
      except in compliance
      +008 * with the License.  You may obtain a 
      copy of the License at
      +009 *
      +010 * 
      http://www.apache.org/licenses/LICENSE-2.0
      +011 *
      +012 * Unless required by applicable law or 
      agreed to in writing, software
      +013 * distributed under the License is 
      distributed on an "AS IS" BASIS,
      +014 * WITHOUT WARRANTIES OR CONDITIONS OF 
      ANY KIND, either express or implied.
      +015 * See the License for the specific 
      language governing permissions and
      +016 * limitations under the License.
      +017 */
      +018package org.apache.hadoop.hbase.client;
      +019
      +020import static 
      org.junit.Assert.assertEquals;
      +021import static 
      org.junit.Assert.assertFalse;
      +022import static 
      org.junit.Assert.assertNotNull;
      +023import static 
      org.junit.Assert.assertNull;
      +024import static 
      org.junit.Assert.assertTrue;
      +025
      +026import java.io.IOException;
      +027import java.lang.reflect.Field;
      +028import java.lang.reflect.Modifier;
      +029import java.net.SocketTimeoutException;
      +030import java.util.ArrayList;
      +031import java.util.List;
      +032import 
      java.util.concurrent.ExecutorService;
      +033import 
      java.util.concurrent.SynchronousQueue;
      +034import 
      java.util.concurrent.ThreadLocalRandom;
      +035import 
      java.util.concurrent.ThreadPoolExecutor;
      +036import java.util.concurrent.TimeUnit;
      +037import 
      java.util.concurrent.atomic.AtomicBoolean;
      +038import 
      java.util.concurrent.atomic.AtomicInteger;
      +039import 
      java.util.concurrent.atomic.AtomicReference;
      +040import 
      org.apache.hadoop.conf.Configuration;
      +041import 
      org.apache.hadoop.hbase.CategoryBasedTimeout;
      +042import org.apache.hadoop.hbase.Cell;
      +043import 
      org.apache.hadoop.hbase.HBaseTestingUtility;
      +044import 
      org.apache.hadoop.hbase.HConstants;
      +045import 
      org.apache.hadoop.hbase.HRegionLocation;
      +046import 
      org.apache.hadoop.hbase.RegionLocations;
      +047import 
      org.apache.hadoop.hbase.ServerName;
      +048import 
      org.apache.hadoop.hbase.TableName;
      +049import org.apache.hadoop.hbase.Waiter;
      +050import 
      org.apache.hadoop.hbase.exceptions.ClientExceptionsUtil;
      +051import 
      org.apache.hadoop.hbase.exceptions.DeserializationException;
      +052import 
      org.apache.hadoop.hbase.exceptions.RegionMovedException;
      +053import 
      org.apache.hadoop.hbase.filter.Filter;
      +054import 
      org.apache.hadoop.hbase.filter.FilterBase;
      +055import 
      org.apache.hadoop.hbase.ipc.RpcClient;
      +056import 
      org.apache.hadoop.hbase.master.HMaster;
      +057import 
      org.apache.hadoop.hbase.regionserver.HRegion;
      +058import 
      org.apache.hadoop.hbase.regionserver.HRegionServer;
      +059import 
      org.apache.hadoop.hbase.regionserver.Region;
      +060import 
      org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
      +061import 
      org.apache.hadoop.hbase.testclassification.LargeTests;
      +062import 
      org.apache.hadoop.hbase.util.Bytes;
      +063import 
      org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
      +064import 
      org.apache.hadoop.hbase.util.JVMClusterUtil;
      +065import 
      org.apache.hadoop.hbase.util.ManualEnvironmentEdge;
      +066import 
      org.apache.hadoop.hbase.util.Threads;
      +067import org.junit.AfterClass;
      +068import org.junit.Assert;
      +069import org.junit.BeforeClass;
      +070import org.junit.Ignore;
      +071import org.junit.Rule;
      +072import org.junit.Test;
      +073import 
      org.junit.experimental.categories.Category;
      +074import org.junit.rules.TestName;
      +075import org.junit.rules.TestRule;
      +076import org.slf4j.Logger;
      +077import org.slf4j.LoggerFactory;
      +078
      +079import 
      org.apache.hbase.thirdparty.com.google.common.collect.Lists;
      +080
      +081/**
      +082 * This class is for testing 
      HBaseConnectionManager features
      +083 */
      +084@Category({LargeTests.class})
      +085public class TestConnectionImplementation 
      {
      +086  @Rule
      +087  public final TestRule timeout = 
      CategoryBasedTimeout.builder().withTimeout(this.getClass())
      +088  
      .withLookingForStuckThread(true).build();
      +089  private static final Logger LOG = 
      LoggerFactory.getLogger(TestConnectionImplementation.class);
      +090  private final static 
      HBaseTestingUtility 

      [03/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/testdevapidocs/org/apache/hadoop/hbase/wal/TestWALReaderOnSecureWAL.html
      --
      diff --git 
      a/testdevapidocs/org/apache/hadoop/hbase/wal/TestWALReaderOnSecureWAL.html 
      b/testdevapidocs/org/apache/hadoop/hbase/wal/TestWALReaderOnSecureWAL.html
      index 20e11b1..4b5f191 100644
      --- a/testdevapidocs/org/apache/hadoop/hbase/wal/TestWALReaderOnSecureWAL.html
      +++ b/testdevapidocs/org/apache/hadoop/hbase/wal/TestWALReaderOnSecureWAL.html
      @@ -109,7 +109,7 @@ var activeTableTab = "activeTableTab";
       
       
       
      -public class TestWALReaderOnSecureWAL
      +public class TestWALReaderOnSecureWAL
       extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
       title="class or interface in java.lang">Object
       
       
      @@ -231,7 +231,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       TEST_UTIL
      -static finalHBaseTestingUtility TEST_UTIL
      +static finalHBaseTestingUtility TEST_UTIL
       
       
       
      @@ -240,7 +240,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       value
      -finalbyte[] value
      +finalbyte[] value
       
       
       
      @@ -249,7 +249,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       WAL_ENCRYPTION
      -private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">String WAL_ENCRYPTION
      +private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">String WAL_ENCRYPTION
       
       See Also:
       Constant
       Field Values
      @@ -262,7 +262,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       currentTest
      -publicorg.junit.rules.TestName currentTest
      +publicorg.junit.rules.TestName currentTest
       
       
       
      @@ -279,7 +279,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       TestWALReaderOnSecureWAL
      -publicTestWALReaderOnSecureWAL()
      +publicTestWALReaderOnSecureWAL()
       
       
       
      @@ -296,7 +296,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       setUpBeforeClass
      -public staticvoidsetUpBeforeClass()
      +public staticvoidsetUpBeforeClass()
        throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
       title="class or interface in java.lang">Exception
       
       Throws:
      @@ -310,7 +310,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       writeWAL
      -privateorg.apache.hadoop.fs.PathwriteWAL(org.apache.hadoop.hbase.wal.WALFactorywals,
      +privateorg.apache.hadoop.fs.PathwriteWAL(org.apache.hadoop.hbase.wal.WALFactorywals,
      http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">StringtblName,
      booleanoffheap)
       throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
       title="class or interface in java.io">IOException
      @@ -326,7 +326,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       testWALReaderOnSecureWALWithKeyValues
      -publicvoidtestWALReaderOnSecureWALWithKeyValues()
      +publicvoidtestWALReaderOnSecureWALWithKeyValues()
      throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
       title="class or interface in java.lang">Exception
       
       Throws:
      @@ -340,7 +340,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       testWALReaderOnSecureWALWithOffheapKeyValues
      -publicvoidtestWALReaderOnSecureWALWithOffheapKeyValues()
      +publicvoidtestWALReaderOnSecureWALWithOffheapKeyValues()
         throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
       title="class or interface in java.lang">Exception
       
       Throws:
      @@ -354,7 +354,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       testSecureWALInternal
      -privatevoidtestSecureWALInternal(booleanoffheap)
      +privatevoidtestSecureWALInternal(booleanoffheap)
       throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
       title="class or interface in java.io">IOException,
      http://docs.oracle.com/javase/8/docs/api/java/io/FileNotFoundException.html?is-external=true;
       title="class or interface in java.io">FileNotFoundException
       
      @@ -370,7 +370,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       testSecureWALReaderOnWAL
      -publicvoidtestSecureWALReaderOnWAL()
      +publicvoidtestSecureWALReaderOnWAL()
         throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
       title="class or interface in java.lang">Exception
       
       Throws:
      

      [03/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/49431b18/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.ExecuteProceduresRemoteCall.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.ExecuteProceduresRemoteCall.html
       
      b/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.ExecuteProceduresRemoteCall.html
      index 63567ad..8841740 100644
      --- 
      a/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.ExecuteProceduresRemoteCall.html
      +++ 
      b/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.ExecuteProceduresRemoteCall.html
      @@ -122,7 +122,7 @@ var activeTableTab = "activeTableTab";
       
       
       
      -protected class RSProcedureDispatcher.ExecuteProceduresRemoteCall
      +protected class RSProcedureDispatcher.ExecuteProceduresRemoteCall
       extends RSProcedureDispatcher.AbstractRSRemoteCall
       implements RSProcedureDispatcher.RemoteProcedureResolver
       
      @@ -199,9 +199,9 @@ implements http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
       title="class or interface in java.util">ListRSProcedureDispatcher.RegionOpenOperationoperations)
       
       
      -private void
      -remoteCallCompleted(MasterProcedureEnvenv,
      -   
      org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponseresponse)
      +void
      +dispatchServerOperations(MasterProcedureEnvenv,
      +http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
       title="class or interface in java.util">ListRSProcedureDispatcher.ServerOperationoperations)
       
       
       private void
      @@ -248,7 +248,7 @@ implements 
       
       remoteProcedures
      -private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
       title="class or interface in java.util">SetRemoteProcedureDispatcher.RemoteProcedure
       remoteProcedures
      +private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
       title="class or interface in java.util">SetRemoteProcedureDispatcher.RemoteProcedure
       remoteProcedures
       
       
       
      @@ -257,7 +257,7 @@ implements 
       
       request
      -privateorg.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest.Builder
       request
      +privateorg.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest.Builder
       request
       
       
       
      @@ -274,7 +274,7 @@ implements 
       
       ExecuteProceduresRemoteCall
      -publicExecuteProceduresRemoteCall(ServerNameserverName,
      +publicExecuteProceduresRemoteCall(ServerNameserverName,
      http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
       title="class or interface in java.util">SetRemoteProcedureDispatcher.RemoteProcedureremoteProcedures)
       
       
      @@ -292,7 +292,7 @@ implements 
       
       call
      -publichttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
       title="class or interface in java.lang">Voidcall()
      +publichttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
       title="class or interface in java.lang">Voidcall()
       
       Specified by:
       http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Callable.html?is-external=true#call--;
       title="class or interface in java.util.concurrent">callin 
      interfacehttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Callable.html?is-external=true;
       title="class or interface in java.util.concurrent">Callablehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
       title="class or interface in java.lang">Void
      @@ -307,7 +307,7 @@ implements 
       
       dispatchOpenRequests
      -publicvoiddispatchOpenRequests(MasterProcedureEnvenv,
      +publicvoiddispatchOpenRequests(MasterProcedureEnvenv,
        http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
       title="class or interface in java.util">ListRSProcedureDispatcher.RegionOpenOperationoperations)
       
       Specified by:
      @@ -321,7 +321,7 @@ implements 
       
       dispatchCloseRequests
      -publicvoiddispatchCloseRequests(MasterProcedureEnvenv,
      +publicvoiddispatchCloseRequests(MasterProcedureEnvenv,
         http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
       title="class or interface in java.util">ListRSProcedureDispatcher.RegionCloseOperationoperations)
       
       Specified by:
      @@ -329,29 +329,33 @@ implements 
      +
       
       
       
       
      -sendRequest
      -protectedorg.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponsesendRequest(ServerNameserverName,
      -   
         
      org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequestrequest)
      -   
      throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
       

      [03/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7c40c62/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.OnlineEntry.html
      --
      diff --git a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.OnlineEntry.html 
      b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.OnlineEntry.html
      index 3cef254..0f033c6 100644
      --- a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.OnlineEntry.html
      +++ b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.OnlineEntry.html
      @@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
       
       
       
      -static class HBaseFsck.OnlineEntry
      +static class HBaseFsck.OnlineEntry
       extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
       title="class or interface in java.lang">Object
       Stores the regioninfo retrieved from Online region 
      servers.
       
      @@ -206,7 +206,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       hri
      -RegionInfo hri
      +RegionInfo hri
       
       
       
      @@ -215,7 +215,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       hsa
      -ServerName hsa
      +ServerName hsa
       
       
       
      @@ -232,7 +232,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       OnlineEntry
      -OnlineEntry()
      +OnlineEntry()
       
       
       
      @@ -249,7 +249,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       toString
      -publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">StringtoString()
      +publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">StringtoString()
       
       Overrides:
       http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#toString--;
       title="class or interface in java.lang">toStringin 
      classhttp://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
       title="class or interface in java.lang">Object
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7c40c62/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html 
      b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html
      index c1666dc..f98492d 100644
      --- 
      a/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html
      +++ 
      b/devapidocs/org/apache/hadoop/hbase/util/HBaseFsck.PrintingErrorReporter.html
      @@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
       
       
       
      -static class HBaseFsck.PrintingErrorReporter
      +static class HBaseFsck.PrintingErrorReporter
       extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
       title="class or interface in java.lang">Object
       implements HBaseFsck.ErrorReporter
       
      @@ -301,7 +301,7 @@ implements 
       
       errorCount
      -publicint errorCount
      +publicint errorCount
       
       
       
      @@ -310,7 +310,7 @@ implements 
       
       showProgress
      -privateint showProgress
      +privateint showProgress
       
       
       
      @@ -319,7 +319,7 @@ implements 
       
       progressThreshold
      -private static finalint progressThreshold
      +private static finalint progressThreshold
       
       See Also:
       Constant
       Field Values
      @@ -332,7 +332,7 @@ implements 
       
       errorTables
      -http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
       title="class or interface in java.util">SetHBaseFsck.TableInfo errorTables
      +http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
       title="class or interface in java.util">SetHBaseFsck.TableInfo errorTables
       
       
       
      @@ -341,7 +341,7 @@ implements 
       
       errorList
      -privatehttp://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
       title="class or interface in java.util">ArrayListHBaseFsck.ErrorReporter.ERROR_CODE errorList
      +privatehttp://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
       title="class or interface in java.util">ArrayListHBaseFsck.ErrorReporter.ERROR_CODE errorList
       
       
       
      @@ -358,7 +358,7 @@ implements 
       
       PrintingErrorReporter
      -PrintingErrorReporter()
      +PrintingErrorReporter()
       
       
       
      @@ -375,7 +375,7 @@ implements 
       
       clear
      -publicvoidclear()
      +publicvoidclear()
       
       Specified by:
       clearin
       interfaceHBaseFsck.ErrorReporter
      @@ -388,7 +388,7 @@ implements 
       
       reportError
      -publicvoidreportError(HBaseFsck.ErrorReporter.ERROR_CODEerrorCode,
      +publicvoidreportError(HBaseFsck.ErrorReporter.ERROR_CODEerrorCode,
       http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">Stringmessage)
       
       Specified by:
      @@ -402,7 +402,7 @@ implements 
       
       reportError
      -publicvoidreportError(HBaseFsck.ErrorReporter.ERROR_CODEerrorCode,
      +publicvoidreportError(HBaseFsck.ErrorReporter.ERROR_CODEerrorCode,
       http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in 

      [03/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bb398572/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterObserverOperation.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterObserverOperation.html
       
      b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterObserverOperation.html
      index 1318b95..841130a 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterObserverOperation.html
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterObserverOperation.html
      @@ -55,1647 +55,1615 @@
       047import 
      org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
       048import 
      org.apache.hadoop.hbase.coprocessor.MasterObserver;
       049import 
      org.apache.hadoop.hbase.coprocessor.MetricsCoprocessor;
      -050import 
      org.apache.hadoop.hbase.coprocessor.ObserverContext;
      -051import 
      org.apache.hadoop.hbase.master.locking.LockProcedure;
      -052import 
      org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
      -053import 
      org.apache.hadoop.hbase.metrics.MetricRegistry;
      -054import 
      org.apache.hadoop.hbase.net.Address;
      -055import 
      org.apache.hadoop.hbase.procedure2.LockType;
      -056import 
      org.apache.hadoop.hbase.procedure2.LockedResource;
      -057import 
      org.apache.hadoop.hbase.procedure2.Procedure;
      -058import 
      org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
      -059import 
      org.apache.hadoop.hbase.quotas.GlobalQuotaSettings;
      -060import 
      org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
      -061import 
      org.apache.hadoop.hbase.security.User;
      -062import 
      org.apache.yetus.audience.InterfaceAudience;
      -063import org.slf4j.Logger;
      -064import org.slf4j.LoggerFactory;
      -065
      -066/**
      -067 * Provides the coprocessor framework and 
      environment for master oriented
      -068 * operations.  {@link HMaster} interacts 
      with the loaded coprocessors
      -069 * through this class.
      -070 */
      -071@InterfaceAudience.Private
      -072public class MasterCoprocessorHost
      -073extends 
      CoprocessorHostMasterCoprocessor, MasterCoprocessorEnvironment {
      -074
      -075  private static final Logger LOG = 
      LoggerFactory.getLogger(MasterCoprocessorHost.class);
      -076
      -077  /**
      -078   * Coprocessor environment extension 
      providing access to master related
      -079   * services.
      -080   */
      -081  private static class MasterEnvironment 
      extends BaseEnvironmentMasterCoprocessor
      -082  implements 
      MasterCoprocessorEnvironment {
      -083private final boolean 
      supportGroupCPs;
      -084private final MetricRegistry 
      metricRegistry;
      -085private final MasterServices 
      services;
      -086
      -087public MasterEnvironment(final 
      MasterCoprocessor impl, final int priority, final int seq,
      -088final Configuration conf, final 
      MasterServices services) {
      -089  super(impl, priority, seq, conf);
      -090  this.services = services;
      -091  supportGroupCPs = 
      !useLegacyMethod(impl.getClass(),
      -092  "preBalanceRSGroup", 
      ObserverContext.class, String.class);
      -093  this.metricRegistry =
      -094  
      MetricsCoprocessor.createRegistryForMasterCoprocessor(impl.getClass().getName());
      -095}
      -096
      -097@Override
      -098public ServerName getServerName() {
      -099  return 
      this.services.getServerName();
      -100}
      -101
      -102@Override
      -103public Connection getConnection() {
      -104  return new 
      SharedConnection(this.services.getConnection());
      -105}
      -106
      -107@Override
      -108public Connection 
      createConnection(Configuration conf) throws IOException {
      -109  return 
      this.services.createConnection(conf);
      -110}
      -111
      -112@Override
      -113public MetricRegistry 
      getMetricRegistryForMaster() {
      -114  return metricRegistry;
      -115}
      -116
      -117@Override
      -118public void shutdown() {
      -119  super.shutdown();
      -120  
      MetricsCoprocessor.removeRegistry(this.metricRegistry);
      -121}
      -122  }
      -123
      -124  /**
      -125   * Special version of MasterEnvironment 
      that exposes MasterServices for Core Coprocessors only.
      -126   * Temporary hack until Core 
      Coprocessors are integrated into Core.
      -127   */
      -128  private static class 
      MasterEnvironmentForCoreCoprocessors extends MasterEnvironment
      -129  implements HasMasterServices {
      -130private final MasterServices 
      masterServices;
      -131
      -132public 
      MasterEnvironmentForCoreCoprocessors(final MasterCoprocessor impl, final int 
      priority,
      -133final int seq, final 
      Configuration conf, final MasterServices services) {
      -134  super(impl, priority, seq, conf, 
      services);
      -135  this.masterServices = services;
      -136}
      -137
      -138/**
      -139 * @return An instance of 
      MasterServices, an object NOT for general user-space Coprocessor
      -140 * consumption.
      -141 */
      -142public MasterServices 
      getMasterServices() {
      -143  return this.masterServices;
      -144}
      -145  }
      -146
      -147  private MasterServices 
      masterServices;
      -148
      -149  public 

      [03/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/master/SplitLogManager.TimeoutMonitor.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/master/SplitLogManager.TimeoutMonitor.html 
      b/devapidocs/org/apache/hadoop/hbase/master/SplitLogManager.TimeoutMonitor.html
      index e650154..4f1a4d3 100644
      --- 
      a/devapidocs/org/apache/hadoop/hbase/master/SplitLogManager.TimeoutMonitor.html
      +++ 
      b/devapidocs/org/apache/hadoop/hbase/master/SplitLogManager.TimeoutMonitor.html
      @@ -334,6 +334,6 @@ extends 
       
       
      -Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
      reserved.
      +Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
      reserved.
       
       
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/master/SplitLogManager.html
      --
      diff --git a/devapidocs/org/apache/hadoop/hbase/master/SplitLogManager.html 
      b/devapidocs/org/apache/hadoop/hbase/master/SplitLogManager.html
      index edfdbac..9128281 100644
      --- a/devapidocs/org/apache/hadoop/hbase/master/SplitLogManager.html
      +++ b/devapidocs/org/apache/hadoop/hbase/master/SplitLogManager.html
      @@ -786,6 +786,6 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       
      -Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
      reserved.
      +Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
      reserved.
       
       
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/master/SplitOrMergeTracker.SwitchStateTracker.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/master/SplitOrMergeTracker.SwitchStateTracker.html
       
      b/devapidocs/org/apache/hadoop/hbase/master/SplitOrMergeTracker.SwitchStateTracker.html
      index f69e178..b10fbd0 100644
      --- 
      a/devapidocs/org/apache/hadoop/hbase/master/SplitOrMergeTracker.SwitchStateTracker.html
      +++ 
      b/devapidocs/org/apache/hadoop/hbase/master/SplitOrMergeTracker.SwitchStateTracker.html
      @@ -378,6 +378,6 @@ extends Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
      reserved.
      +Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
      reserved.
       
       
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/master/SplitOrMergeTracker.html
      --
      diff --git a/devapidocs/org/apache/hadoop/hbase/master/SplitOrMergeTracker.html 
      b/devapidocs/org/apache/hadoop/hbase/master/SplitOrMergeTracker.html
      index 6965a51..e5e8408 100644
      --- a/devapidocs/org/apache/hadoop/hbase/master/SplitOrMergeTracker.html
      +++ b/devapidocs/org/apache/hadoop/hbase/master/SplitOrMergeTracker.html
      @@ -400,6 +400,6 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       
      -Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
      reserved.
      +Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
      reserved.
       
       
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/master/TableNamespaceManager.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/master/TableNamespaceManager.html 
      b/devapidocs/org/apache/hadoop/hbase/master/TableNamespaceManager.html
      index c49546e..9912127 100644
      --- a/devapidocs/org/apache/hadoop/hbase/master/TableNamespaceManager.html
      +++ b/devapidocs/org/apache/hadoop/hbase/master/TableNamespaceManager.html
      @@ -823,6 +823,6 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       
      -Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
      reserved.
      +Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
      reserved.
       
       
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/master/TableStateManager.html
      --
      diff --git a/devapidocs/org/apache/hadoop/hbase/master/TableStateManager.html 
      b/devapidocs/org/apache/hadoop/hbase/master/TableStateManager.html
      index cf82a3d..1de3398 100644
      --- a/devapidocs/org/apache/hadoop/hbase/master/TableStateManager.html
      +++ b/devapidocs/org/apache/hadoop/hbase/master/TableStateManager.html
      @@ -583,6 +583,6 @@ protectedCopyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
      reserved.
      +Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
      reserved.
       
       
      
      

      [03/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.MyConnectionImpl2.html
      --
      diff --git 
      a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.MyConnectionImpl2.html
       
      b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.MyConnectionImpl2.html
      index bbd91b8..4f76302 100644
      --- 
      a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.MyConnectionImpl2.html
      +++ 
      b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.MyConnectionImpl2.html
      @@ -56,1641 +56,1753 @@
       048import 
      java.util.concurrent.atomic.AtomicBoolean;
       049import 
      java.util.concurrent.atomic.AtomicInteger;
       050import 
      java.util.concurrent.atomic.AtomicLong;
      -051
      -052import 
      org.apache.hadoop.conf.Configuration;
      -053import 
      org.apache.hadoop.hbase.CallQueueTooBigException;
      -054import 
      org.apache.hadoop.hbase.CategoryBasedTimeout;
      -055import org.apache.hadoop.hbase.Cell;
      -056import 
      org.apache.hadoop.hbase.HConstants;
      -057import 
      org.apache.hadoop.hbase.HRegionInfo;
      -058import 
      org.apache.hadoop.hbase.HRegionLocation;
      -059import 
      org.apache.hadoop.hbase.RegionLocations;
      -060import 
      org.apache.hadoop.hbase.ServerName;
      -061import 
      org.apache.hadoop.hbase.TableName;
      -062import 
      org.apache.hadoop.hbase.client.AsyncProcessTask.ListRowAccess;
      -063import 
      org.apache.hadoop.hbase.client.AsyncProcessTask.SubmittedRows;
      -064import 
      org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicy;
      -065import 
      org.apache.hadoop.hbase.client.backoff.ServerStatistics;
      -066import 
      org.apache.hadoop.hbase.client.coprocessor.Batch;
      -067import 
      org.apache.hadoop.hbase.ipc.RpcControllerFactory;
      -068import 
      org.apache.hadoop.hbase.testclassification.ClientTests;
      -069import 
      org.apache.hadoop.hbase.testclassification.MediumTests;
      -070import 
      org.apache.hadoop.hbase.util.Bytes;
      -071import 
      org.apache.hadoop.hbase.util.Threads;
      -072import org.junit.Assert;
      -073import org.junit.BeforeClass;
      -074import org.junit.Ignore;
      -075import org.junit.Rule;
      -076import org.junit.Test;
      -077import 
      org.junit.experimental.categories.Category;
      -078import org.junit.rules.TestRule;
      -079import org.mockito.Mockito;
      -080import org.slf4j.Logger;
      -081import org.slf4j.LoggerFactory;
      -082
      -083@Category({ClientTests.class, 
      MediumTests.class})
      -084public class TestAsyncProcess {
      -085  @Rule public final TestRule timeout = 
      CategoryBasedTimeout.builder().withTimeout(this.getClass()).
      -086  
      withLookingForStuckThread(true).build();
      -087  private static final Logger LOG = 
      LoggerFactory.getLogger(TestAsyncProcess.class);
      -088  private static final TableName 
      DUMMY_TABLE =
      -089  TableName.valueOf("DUMMY_TABLE");
      -090  private static final byte[] 
      DUMMY_BYTES_1 = Bytes.toBytes("DUMMY_BYTES_1");
      -091  private static final byte[] 
      DUMMY_BYTES_2 = Bytes.toBytes("DUMMY_BYTES_2");
      -092  private static final byte[] 
      DUMMY_BYTES_3 = Bytes.toBytes("DUMMY_BYTES_3");
      -093  private static final byte[] FAILS = 
      Bytes.toBytes("FAILS");
      -094  private static final Configuration CONF 
      = new Configuration();
      -095  private static final 
      ConnectionConfiguration CONNECTION_CONFIG =
      -096  new 
      ConnectionConfiguration(CONF);
      -097  private static final ServerName sn = 
      ServerName.valueOf("s1,1,1");
      -098  private static final ServerName sn2 = 
      ServerName.valueOf("s2,2,2");
      -099  private static final ServerName sn3 = 
      ServerName.valueOf("s3,3,3");
      -100  private static final HRegionInfo hri1 
      =
      -101  new HRegionInfo(DUMMY_TABLE, 
      DUMMY_BYTES_1, DUMMY_BYTES_2, false, 1);
      -102  private static final HRegionInfo hri2 
      =
      -103  new HRegionInfo(DUMMY_TABLE, 
      DUMMY_BYTES_2, HConstants.EMPTY_END_ROW, false, 2);
      -104  private static final HRegionInfo hri3 
      =
      -105  new HRegionInfo(DUMMY_TABLE, 
      DUMMY_BYTES_3, HConstants.EMPTY_END_ROW, false, 3);
      -106  private static final HRegionLocation 
      loc1 = new HRegionLocation(hri1, sn);
      -107  private static final HRegionLocation 
      loc2 = new HRegionLocation(hri2, sn);
      -108  private static final HRegionLocation 
      loc3 = new HRegionLocation(hri3, sn2);
      -109
      -110  // Replica stuff
      -111  private static final RegionInfo hri1r1 
      = RegionReplicaUtil.getRegionInfoForReplica(hri1, 1);
      -112  private static final RegionInfo hri1r2 
      = RegionReplicaUtil.getRegionInfoForReplica(hri1, 2);
      -113  private static final RegionInfo hri2r1 
      = RegionReplicaUtil.getRegionInfoForReplica(hri2, 1);
      -114  private static final RegionLocations 
      hrls1 = new RegionLocations(new HRegionLocation(hri1, sn),
      -115  new HRegionLocation(hri1r1, sn2), 
      new HRegionLocation(hri1r2, sn3));
      -116  private static final RegionLocations 
      hrls2 = new RegionLocations(new HRegionLocation(hri2, sn2),
      -117  new HRegionLocation(hri2r1, 
      sn3));
      -118  private static final RegionLocations 
      hrls3 =
      -119  new RegionLocations(new 
      HRegionLocation(hri3, sn3), null);
      -120
      -121  private static final 

      [03/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/63d6f712/devapidocs/org/apache/hadoop/hbase/regionserver/Segment.html
      --
      diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/Segment.html 
      b/devapidocs/org/apache/hadoop/hbase/regionserver/Segment.html
      index b209f49..6fcf813 100644
      --- a/devapidocs/org/apache/hadoop/hbase/regionserver/Segment.html
      +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/Segment.html
      @@ -361,9 +361,12 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       Cell
      -maybeCloneWithAllocator(Cellcell)
      +maybeCloneWithAllocator(Cellcell,
      +   booleanforceCloneOfBigCell)
       If the segment has a memory allocator the cell is being 
      cloned to this space, and returned;
      - otherwise the given cell is returned
      + otherwise the given cell is returned
      +
      + When a cell's size is too big (bigger than maxAlloc), it is not allocated on 
      MSLAB.
       
       
       
      @@ -649,15 +652,21 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       Closing a segment before it is being discarded
       
       
      -
      +
       
       
       
       
       maybeCloneWithAllocator
      -publicCellmaybeCloneWithAllocator(Cellcell)
      +publicCellmaybeCloneWithAllocator(Cellcell,
      +booleanforceCloneOfBigCell)
       If the segment has a memory allocator the cell is being 
      cloned to this space, and returned;
      - otherwise the given cell is returned
      + otherwise the given cell is returned
      +
      + When a cell's size is too big (bigger than maxAlloc), it is not allocated on 
      MSLAB.
      + Since the process of flattening to CellChunkMap assumes that all cells
      + are allocated on MSLAB, during this process, the input parameter
      + forceCloneOfBigCell is set to 'true' and the cell is copied into MSLAB.
       
       Returns:
       either the given cell or its clone
      @@ -670,7 +679,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       getCellLength
      -staticintgetCellLength(Cellcell)
      +staticintgetCellLength(Cellcell)
       Get cell length after serialized in KeyValue
       
       
      @@ -680,7 +689,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       shouldSeek
      -publicbooleanshouldSeek(TimeRangetr,
      +publicbooleanshouldSeek(TimeRangetr,
         longoldestUnexpiredTS)
       
       
      @@ -690,7 +699,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       isTagsPresent
      -publicbooleanisTagsPresent()
      +publicbooleanisTagsPresent()
       
       
       
      @@ -699,7 +708,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       incScannerCount
      -publicvoidincScannerCount()
      +publicvoidincScannerCount()
       
       
       
      @@ -708,7 +717,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       decScannerCount
      -publicvoiddecScannerCount()
      +publicvoiddecScannerCount()
       
       
       
      @@ -717,7 +726,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       setCellSet
      -protectedSegmentsetCellSet(CellSetcellSetOld,
      +protectedSegmentsetCellSet(CellSetcellSetOld,
        CellSetcellSetNew)
       Setting the CellSet of the segment - used only for flat 
      immutable segment for setting
        immutable CellSet after its creation in immutable segment constructor
      @@ -733,7 +742,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       keySize
      -publiclongkeySize()
      +publiclongkeySize()
       
       Returns:
       Sum of all cell's size.
      @@ -746,7 +755,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       heapSize
      -publiclongheapSize()
      +publiclongheapSize()
       
       Returns:
       The heap size of this segment.
      @@ -759,7 +768,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       incSize
      -protectedvoidincSize(longdelta,
      +protectedvoidincSize(longdelta,
      longheapOverhead)
       Updates the size counters of the segment by the given 
      delta
       
      @@ -770,7 +779,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       getMinSequenceId
      -publiclonggetMinSequenceId()
      +publiclonggetMinSequenceId()
       
       
       
      @@ -779,7 +788,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       getTimeRangeTracker
      -publicTimeRangeTrackergetTimeRangeTracker()
      +publicTimeRangeTrackergetTimeRangeTracker()
       
       
       
      @@ -788,7 +797,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       last
      -publicCelllast()
      +publicCelllast()
       
       
       
      @@ -797,7 +806,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       iterator
      -publichttp://docs.oracle.com/javase/8/docs/api/java/util/Iterator.html?is-external=true;
       title="class or interface in java.util">IteratorCelliterator()
      +publichttp://docs.oracle.com/javase/8/docs/api/java/util/Iterator.html?is-external=true;
       title="class or interface in java.util">IteratorCelliterator()
       
       
       
      @@ -806,7 +815,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       headSet
      

      [03/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d449e87f/devapidocs/org/apache/hadoop/hbase/master/cleaner/CleanerChore.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/master/cleaner/CleanerChore.html 
      b/devapidocs/org/apache/hadoop/hbase/master/cleaner/CleanerChore.html
      index 6fac503..cf9ce85 100644
      --- a/devapidocs/org/apache/hadoop/hbase/master/cleaner/CleanerChore.html
      +++ b/devapidocs/org/apache/hadoop/hbase/master/cleaner/CleanerChore.html
      @@ -647,7 +647,7 @@ implements 
       
       updateChorePoolSize
      -privatevoidupdateChorePoolSize(intupdatedSize)
      +privatevoidupdateChorePoolSize(intupdatedSize)
       
       
       
      @@ -656,7 +656,7 @@ implements 
       
       newFileCleaner
      -privateTnewFileCleaner(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">StringclassName,
      +privateTnewFileCleaner(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">StringclassName,
        org.apache.hadoop.conf.Configurationconf)
       A utility method to create new instances of 
      LogCleanerDelegate based on the class name of the
        LogCleanerDelegate.
      @@ -675,7 +675,7 @@ implements 
       
       chore
      -protectedvoidchore()
      +protectedvoidchore()
       Description copied from 
      class:ScheduledChore
       The task to execute on each scheduled execution of the 
      Chore
       
      @@ -690,7 +690,7 @@ implements 
       
       preRunCleaner
      -privatevoidpreRunCleaner()
      +privatevoidpreRunCleaner()
       
       
       
      @@ -699,7 +699,7 @@ implements 
       
       runCleaner
      -publichttp://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
       title="class or interface in java.lang">BooleanrunCleaner()
      +publichttp://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
       title="class or interface in java.lang">BooleanrunCleaner()
       
       
       
      @@ -708,7 +708,7 @@ implements 
       
       sortByConsumedSpace
      -privatevoidsortByConsumedSpace(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
       title="class or interface in 
      java.util">Listorg.apache.hadoop.fs.FileStatusdirs)
      +privatevoidsortByConsumedSpace(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
       title="class or interface in 
      java.util">Listorg.apache.hadoop.fs.FileStatusdirs)
       Sort the given list in (descending) order of the space each 
      element takes
       
       Parameters:
      @@ -722,7 +722,7 @@ implements 
       
       checkAndDeleteFiles
      -privatebooleancheckAndDeleteFiles(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
       title="class or interface in 
      java.util">Listorg.apache.hadoop.fs.FileStatusfiles)
      +privatebooleancheckAndDeleteFiles(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
       title="class or interface in 
      java.util">Listorg.apache.hadoop.fs.FileStatusfiles)
       Run the given files through each of the cleaners to see if 
      it should be deleted, deleting it if
        necessary.
       
      @@ -739,7 +739,7 @@ implements 
       
       deleteFiles
      -protectedintdeleteFiles(http://docs.oracle.com/javase/8/docs/api/java/lang/Iterable.html?is-external=true;
       title="class or interface in 
      java.lang">Iterableorg.apache.hadoop.fs.FileStatusfilesToDelete)
      +protectedintdeleteFiles(http://docs.oracle.com/javase/8/docs/api/java/lang/Iterable.html?is-external=true;
       title="class or interface in 
      java.lang">Iterableorg.apache.hadoop.fs.FileStatusfilesToDelete)
       Delete the given files
       
       Parameters:
      @@ -755,7 +755,7 @@ implements 
       
       cleanup
      -publicvoidcleanup()
      +publicvoidcleanup()
       Description copied from 
      class:ScheduledChore
       Override to run cleanup tasks when the Chore encounters an 
      error and must stop running
       
      @@ -770,7 +770,7 @@ implements 
       
       getChorePoolSize
      -intgetChorePoolSize()
      +intgetChorePoolSize()
       
       
       
      @@ -779,7 +779,7 @@ implements 
       
       setEnabled
      -publicbooleansetEnabled(booleanenabled)
      +publicbooleansetEnabled(booleanenabled)
       
       Parameters:
       enabled - 
      @@ -792,7 +792,7 @@ implements 
       
       getEnabled
      -publicbooleangetEnabled()
      +publicbooleangetEnabled()
       
       
       
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d449e87f/devapidocs/org/apache/hadoop/hbase/master/cleaner/class-use/CleanerChore.CleanerTask.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/master/cleaner/class-use/CleanerChore.CleanerTask.html
       
      b/devapidocs/org/apache/hadoop/hbase/master/cleaner/class-use/CleanerChore.CleanerTask.html
      index 39a043b..794142f 100644
      --- 
      a/devapidocs/org/apache/hadoop/hbase/master/cleaner/class-use/CleanerChore.CleanerTask.html
      +++ 
      b/devapidocs/org/apache/hadoop/hbase/master/cleaner/class-use/CleanerChore.CleanerTask.html
      @@ -104,7 +104,7 @@
       
       
       private boolean
      -CleanerChore.CleanerTask.getCleanRusult(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
       title="class or interface in java.util">ListCleanerChore.CleanerTasktasks)
      

      [03/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d2b28a1a/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.LastOnRowByteBufferCell.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.LastOnRowByteBufferCell.html
       
      b/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.LastOnRowByteBufferCell.html
      index 3400507..2baa140 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.LastOnRowByteBufferCell.html
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.LastOnRowByteBufferCell.html
      @@ -28,3034 +28,2926 @@
       020import static 
      org.apache.hadoop.hbase.HConstants.EMPTY_BYTE_ARRAY;
       021import static 
      org.apache.hadoop.hbase.Tag.TAG_LENGTH_SIZE;
       022
      -023import 
      com.google.common.annotations.VisibleForTesting;
      -024
      -025import java.io.DataOutput;
      -026import java.io.DataOutputStream;
      -027import java.io.IOException;
      -028import java.io.OutputStream;
      -029import java.math.BigDecimal;
      -030import java.nio.ByteBuffer;
      -031import java.util.ArrayList;
      -032import java.util.Iterator;
      -033import java.util.List;
      -034import java.util.Optional;
      -035
      -036import 
      org.apache.hadoop.hbase.KeyValue.Type;
      -037import 
      org.apache.hadoop.hbase.filter.ByteArrayComparable;
      -038import 
      org.apache.hadoop.hbase.io.HeapSize;
      -039import 
      org.apache.hadoop.hbase.io.TagCompressionContext;
      -040import 
      org.apache.hadoop.hbase.io.util.Dictionary;
      -041import 
      org.apache.hadoop.hbase.io.util.StreamUtils;
      -042import 
      org.apache.hadoop.hbase.util.ByteBufferUtils;
      -043import 
      org.apache.hadoop.hbase.util.ByteRange;
      -044import 
      org.apache.hadoop.hbase.util.Bytes;
      -045import 
      org.apache.hadoop.hbase.util.ClassSize;
      -046import 
      org.apache.yetus.audience.InterfaceAudience;
      -047
      -048
      -049/**
      -050 * Utility methods helpful slinging 
      {@link Cell} instances. It has more powerful and
      -051 * rich set of APIs than those in {@link 
      CellUtil} for internal usage.
      -052 */
      -053@InterfaceAudience.Private
      -054public final class PrivateCellUtil {
      -055
      -056  /**
      -057   * Private constructor to keep this 
      class from being instantiated.
      -058   */
      -059  private PrivateCellUtil() {
      -060  }
      +023import java.io.DataOutput;
      +024import java.io.DataOutputStream;
      +025import java.io.IOException;
      +026import java.io.OutputStream;
      +027import java.math.BigDecimal;
      +028import java.nio.ByteBuffer;
      +029import java.util.ArrayList;
      +030import java.util.Iterator;
      +031import java.util.List;
      +032import java.util.Optional;
      +033import 
      org.apache.hadoop.hbase.KeyValue.Type;
      +034import 
      org.apache.hadoop.hbase.filter.ByteArrayComparable;
      +035import 
      org.apache.hadoop.hbase.io.HeapSize;
      +036import 
      org.apache.hadoop.hbase.io.TagCompressionContext;
      +037import 
      org.apache.hadoop.hbase.io.util.Dictionary;
      +038import 
      org.apache.hadoop.hbase.io.util.StreamUtils;
      +039import 
      org.apache.hadoop.hbase.util.ByteBufferUtils;
      +040import 
      org.apache.hadoop.hbase.util.ByteRange;
      +041import 
      org.apache.hadoop.hbase.util.Bytes;
      +042import 
      org.apache.hadoop.hbase.util.ClassSize;
      +043import 
      org.apache.yetus.audience.InterfaceAudience;
      +044
      +045import 
      org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
      +046
      +047/**
      +048 * Utility methods helpful slinging 
      {@link Cell} instances. It has more powerful and
      +049 * rich set of APIs than those in {@link 
      CellUtil} for internal usage.
      +050 */
      +051@InterfaceAudience.Private
      +052public final class PrivateCellUtil {
      +053
      +054  /**
      +055   * Private constructor to keep this 
      class from being instantiated.
      +056   */
      +057  private PrivateCellUtil() {
      +058  }
      +059
      +060  /*** ByteRange 
      ***/
       061
      -062  /*** ByteRange 
      ***/
      -063
      -064  public static ByteRange 
      fillRowRange(Cell cell, ByteRange range) {
      -065return range.set(cell.getRowArray(), 
      cell.getRowOffset(), cell.getRowLength());
      -066  }
      -067
      -068  public static ByteRange 
      fillFamilyRange(Cell cell, ByteRange range) {
      -069return 
      range.set(cell.getFamilyArray(), cell.getFamilyOffset(), 
      cell.getFamilyLength());
      -070  }
      -071
      -072  public static ByteRange 
      fillQualifierRange(Cell cell, ByteRange range) {
      -073return 
      range.set(cell.getQualifierArray(), cell.getQualifierOffset(),
      -074  cell.getQualifierLength());
      -075  }
      -076
      -077  public static ByteRange 
      fillValueRange(Cell cell, ByteRange range) {
      -078return 
      range.set(cell.getValueArray(), cell.getValueOffset(), 
      cell.getValueLength());
      -079  }
      -080
      -081  public static ByteRange 
      fillTagRange(Cell cell, ByteRange range) {
      -082return range.set(cell.getTagsArray(), 
      cell.getTagsOffset(), cell.getTagsLength());
      -083  }
      +062  public static ByteRange 
      fillRowRange(Cell cell, ByteRange range) {
      +063return range.set(cell.getRowArray(), 
      cell.getRowOffset(), cell.getRowLength());
      +064  }
      +065
      +066  public static ByteRange 
      fillFamilyRange(Cell cell, ByteRange range) {
      +067return 
      

      [03/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/devapidocs/src-html/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.html
       
      b/devapidocs/src-html/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.html
      index ce8c56c..1ebcb9e 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.html
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.html
      @@ -29,535 +29,532 @@
       021import java.io.IOException;
       022import java.util.ArrayList;
       023import java.util.Collection;
      -024import java.util.HashMap;
      -025import java.util.List;
      -026import java.util.Map;
      -027import java.util.Set;
      -028import java.util.TreeMap;
      -029import 
      java.util.concurrent.ConcurrentHashMap;
      -030import 
      java.util.concurrent.ConcurrentMap;
      -031
      -032import 
      org.apache.hadoop.conf.Configuration;
      -033import 
      org.apache.hadoop.hbase.Abortable;
      -034import 
      org.apache.hadoop.hbase.CompoundConfiguration;
      -035import 
      org.apache.hadoop.hbase.HBaseConfiguration;
      -036import 
      org.apache.hadoop.hbase.TableName;
      -037import 
      org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil;
      -038import 
      org.apache.hadoop.hbase.exceptions.DeserializationException;
      -039import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos;
      -040import 
      org.apache.hadoop.hbase.replication.ReplicationPeer.PeerState;
      -041import 
      org.apache.hadoop.hbase.util.Bytes;
      -042import 
      org.apache.hadoop.hbase.util.Pair;
      -043import 
      org.apache.hadoop.hbase.zookeeper.ZKConfig;
      -044import 
      org.apache.hadoop.hbase.zookeeper.ZKUtil;
      -045import 
      org.apache.hadoop.hbase.zookeeper.ZKUtil.ZKUtilOp;
      -046import 
      org.apache.hadoop.hbase.zookeeper.ZKWatcher;
      -047import 
      org.apache.hadoop.hbase.zookeeper.ZNodePaths;
      -048import 
      org.apache.yetus.audience.InterfaceAudience;
      -049import 
      org.apache.zookeeper.KeeperException;
      -050import org.slf4j.Logger;
      -051import org.slf4j.LoggerFactory;
      -052
      -053/**
      -054 * This class provides an implementation 
      of the ReplicationPeers interface using ZooKeeper. The
      -055 * peers znode contains a list of all 
      peer replication clusters and the current replication state of
      -056 * those clusters. It has one child peer 
      znode for each peer cluster. The peer znode is named with
      -057 * the cluster id provided by the user in 
      the HBase shell. The value of the peer znode contains the
      -058 * peers cluster key provided by the user 
      in the HBase Shell. The cluster key contains a list of
      -059 * zookeeper quorum peers, the client 
      port for the zookeeper quorum, and the base znode for HBase.
      -060 * For example:
      -061 *
      -062 *  /hbase/replication/peers/1 [Value: 
      zk1.host.com,zk2.host.com,zk3.host.com:2181:/hbase]
      -063 *  /hbase/replication/peers/2 [Value: 
      zk5.host.com,zk6.host.com,zk7.host.com:2181:/hbase]
      -064 *
      -065 * Each of these peer znodes has a child 
      znode that indicates whether or not replication is enabled
      -066 * on that peer cluster. These peer-state 
      znodes do not have child znodes and simply contain a
      -067 * boolean value (i.e. ENABLED or 
      DISABLED). This value is read/maintained by the
      -068 * ReplicationPeer.PeerStateTracker 
      class. For example:
      +024import java.util.List;
      +025import java.util.Map;
      +026import java.util.Set;
      +027import java.util.TreeMap;
      +028import 
      java.util.concurrent.ConcurrentHashMap;
      +029import 
      java.util.concurrent.ConcurrentMap;
      +030
      +031import 
      org.apache.hadoop.conf.Configuration;
      +032import 
      org.apache.hadoop.hbase.Abortable;
      +033import 
      org.apache.hadoop.hbase.CompoundConfiguration;
      +034import 
      org.apache.hadoop.hbase.HBaseConfiguration;
      +035import 
      org.apache.hadoop.hbase.TableName;
      +036import 
      org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil;
      +037import 
      org.apache.hadoop.hbase.exceptions.DeserializationException;
      +038import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos;
      +039import 
      org.apache.hadoop.hbase.replication.ReplicationPeer.PeerState;
      +040import 
      org.apache.hadoop.hbase.util.Pair;
      +041import 
      org.apache.hadoop.hbase.zookeeper.ZKConfig;
      +042import 
      org.apache.hadoop.hbase.zookeeper.ZKUtil;
      +043import 
      org.apache.hadoop.hbase.zookeeper.ZKUtil.ZKUtilOp;
      +044import 
      org.apache.hadoop.hbase.zookeeper.ZKWatcher;
      +045import 
      org.apache.hadoop.hbase.zookeeper.ZNodePaths;
      +046import 
      org.apache.yetus.audience.InterfaceAudience;
      +047import 
      org.apache.zookeeper.KeeperException;
      +048import org.slf4j.Logger;
      +049import org.slf4j.LoggerFactory;
      +050
      +051/**
      +052 * This class provides an implementation 
      of the ReplicationPeers interface using ZooKeeper. The
      +053 * peers znode contains a list of all 
      peer replication clusters and the current replication state of
      +054 * those clusters. It has one child peer 
      znode for each peer cluster. The peer znode is named with
      +055 * the cluster id provided by the user in 
      the HBase shell. 

      [03/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7c0589c0/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.Converter.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.Converter.html
       
      b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.Converter.html
      index f7fbfbf..88ebcbc 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.Converter.html
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.Converter.html
      @@ -34,1583 +34,1583 @@
       026import java.io.IOException;
       027import java.util.ArrayList;
       028import java.util.Arrays;
      -029import java.util.Collection;
      -030import java.util.Collections;
      -031import java.util.EnumSet;
      -032import java.util.HashMap;
      -033import java.util.List;
      -034import java.util.Map;
      -035import java.util.Optional;
      -036import java.util.Set;
      -037import 
      java.util.concurrent.CompletableFuture;
      -038import java.util.concurrent.TimeUnit;
      -039import 
      java.util.concurrent.atomic.AtomicReference;
      -040import java.util.function.BiConsumer;
      -041import java.util.function.Function;
      -042import java.util.regex.Pattern;
      -043import java.util.stream.Collectors;
      -044import java.util.stream.Stream;
      -045import org.apache.commons.io.IOUtils;
      -046import 
      org.apache.hadoop.conf.Configuration;
      -047import 
      org.apache.hadoop.hbase.AsyncMetaTableAccessor;
      -048import 
      org.apache.hadoop.hbase.ClusterMetrics.Option;
      -049import 
      org.apache.hadoop.hbase.ClusterStatus;
      -050import 
      org.apache.hadoop.hbase.HConstants;
      -051import 
      org.apache.hadoop.hbase.HRegionLocation;
      -052import 
      org.apache.hadoop.hbase.MetaTableAccessor;
      -053import 
      org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
      -054import 
      org.apache.hadoop.hbase.NamespaceDescriptor;
      -055import 
      org.apache.hadoop.hbase.RegionLoad;
      -056import 
      org.apache.hadoop.hbase.RegionLocations;
      -057import 
      org.apache.hadoop.hbase.ServerName;
      -058import 
      org.apache.hadoop.hbase.TableExistsException;
      -059import 
      org.apache.hadoop.hbase.TableName;
      -060import 
      org.apache.hadoop.hbase.TableNotDisabledException;
      -061import 
      org.apache.hadoop.hbase.TableNotEnabledException;
      -062import 
      org.apache.hadoop.hbase.TableNotFoundException;
      -063import 
      org.apache.hadoop.hbase.UnknownRegionException;
      -064import 
      org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
      -065import 
      org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
      -066import 
      org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.ServerRequestCallerBuilder;
      -067import 
      org.apache.hadoop.hbase.client.Scan.ReadType;
      -068import 
      org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil;
      -069import 
      org.apache.hadoop.hbase.client.replication.TableCFs;
      -070import 
      org.apache.hadoop.hbase.client.security.SecurityCapability;
      -071import 
      org.apache.hadoop.hbase.exceptions.DeserializationException;
      -072import 
      org.apache.hadoop.hbase.ipc.HBaseRpcController;
      -073import 
      org.apache.hadoop.hbase.quotas.QuotaFilter;
      -074import 
      org.apache.hadoop.hbase.quotas.QuotaSettings;
      -075import 
      org.apache.hadoop.hbase.quotas.QuotaTableUtil;
      -076import 
      org.apache.hadoop.hbase.replication.ReplicationException;
      -077import 
      org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
      -078import 
      org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
      -079import 
      org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
      -080import 
      org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
      -081import 
      org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
      -082import 
      org.apache.hadoop.hbase.util.Bytes;
      -083import 
      org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
      -084import 
      org.apache.hadoop.hbase.util.ForeignExceptionUtil;
      -085import 
      org.apache.yetus.audience.InterfaceAudience;
      -086import org.slf4j.Logger;
      -087import org.slf4j.LoggerFactory;
      -088
      -089import 
      org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
      -090import 
      org.apache.hadoop.hbase.shaded.io.netty.util.HashedWheelTimer;
      -091import 
      org.apache.hadoop.hbase.shaded.io.netty.util.Timeout;
      -092import 
      org.apache.hadoop.hbase.shaded.io.netty.util.TimerTask;
      -093import 
      org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
      -094import 
      org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
      -095import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
      -096import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest;
      -097import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesResponse;
      -098import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
      -099import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
      -100import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
      

      [03/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c4b2cc17/devapidocs/org/apache/hadoop/hbase/RegionMetrics.html
      --
      diff --git a/devapidocs/org/apache/hadoop/hbase/RegionMetrics.html 
      b/devapidocs/org/apache/hadoop/hbase/RegionMetrics.html
      new file mode 100644
      index 000..92ca29d
      --- /dev/null
      +++ b/devapidocs/org/apache/hadoop/hbase/RegionMetrics.html
      @@ -0,0 +1,582 @@
      +http://www.w3.org/TR/html4/loose.dtd;>
      +
      +
      +
      +
      +
      +RegionMetrics (Apache HBase 3.0.0-SNAPSHOT API)
      +
      +
      +
      +
      +
      +var methods = 
      {"i0":6,"i1":6,"i2":6,"i3":6,"i4":6,"i5":6,"i6":6,"i7":6,"i8":18,"i9":6,"i10":6,"i11":18,"i12":6,"i13":6,"i14":6,"i15":6,"i16":6,"i17":6,"i18":6,"i19":6,"i20":6};
      +var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
      Methods"],4:["t3","Abstract Methods"],16:["t5","Default Methods"]};
      +var altColor = "altColor";
      +var rowColor = "rowColor";
      +var tableTab = "tableTab";
      +var activeTableTab = "activeTableTab";
      +
      +
      +JavaScript is disabled on your browser.
      +
      +
      +
      +
      +
      +Skip navigation links
      +
      +
      +
      +
      +Overview
      +Package
      +Class
      +Use
      +Tree
      +Deprecated
      +Index
      +Help
      +
      +
      +
      +
      +PrevClass
      +NextClass
      +
      +
      +Frames
      +NoFrames
      +
      +
      +AllClasses
      +
      +
      +
      +
      +
      +
      +
      +Summary:
      +Nested|
      +Field|
      +Constr|
      +Method
      +
      +
      +Detail:
      +Field|
      +Constr|
      +Method
      +
      +
      +
      +
      +
      +
      +
      +
      +org.apache.hadoop.hbase
      +Interface RegionMetrics
      +
      +
      +
      +
      +
      +
      +All Known Implementing Classes:
      +RegionLoad, RegionMetricsBuilder.RegionMetricsImpl
      +
      +
      +
      +@InterfaceAudience.Public
      +public interface RegionMetrics
      +Encapsulates per-region load metrics.
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +Method Summary
      +
      +All MethodsInstance MethodsAbstract MethodsDefault Methods
      +
      +Modifier and Type
      +Method and Description
      +
      +
      +Size
      +getBloomFilterSize()
      +
      +
      +long
      +getCompactedCellCount()
      +
      +
      +long
      +getCompactingCellCount()
      +
      +
      +long
      +getCompletedSequenceId()
      +This does not really belong inside RegionLoad but its being 
      done in the name of expediency.
      +
      +
      +
      +float
      +getDataLocality()
      +
      +
      +long
      +getFilteredReadRequestCount()
      +
      +
      +long
      +getLastMajorCompactionTimestamp()
      +
      +
      +Size
      +getMemStoreSize()
      +
      +
      +default http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">String
      +getNameAsString()
      +
      +
      +long
      +getReadRequestCount()
      +
      +
      +byte[]
      +getRegionName()
      +
      +
      +default long
      +getRequestCount()
      +
      +
      +int
      +getStoreCount()
      +
      +
      +int
      +getStoreFileCount()
      +
      +
      +Size
      +getStoreFileIndexSize()
      +TODO: why we pass the same value to different counters? 
      Currently, the value from
      + getStoreFileIndexSize() is same with getStoreFileRootLevelIndexSize()
      + see HRegionServer#createRegionLoad.
      +
      +
      +
      +Size
      +getStoreFileRootLevelIndexSize()
      +
      +
      +Size
      +getStoreFileSize()
      +
      +
      +Size
      +getStoreFileUncompressedDataIndexSize()
      +
      +
      +http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
       title="class or interface in java.util">Mapbyte[],http://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true;
       title="class or interface in java.lang">Long
      +getStoreSequenceId()
      +
      +
      +Size
      +getUncompressedStoreFileSize()
      +
      +
      +long
      +getWriteRequestCount()
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +Method Detail
      +
      +
      +
      +
      +
      +getRegionName
      +byte[]getRegionName()
      +
      +Returns:
      +the region name
      +
      +
      +
      +
      +
      +
      +
      +
      +getStoreCount
      +intgetStoreCount()
      +
      +Returns:
      +the number of stores
      +
      +
      +
      +
      +
      +
      +
      +
      +getStoreFileCount
      +intgetStoreFileCount()
      +
      +Returns:
      +the number of storefiles
      +
      +
      +
      +
      +
      +
      +
      +
      +getStoreFileSize
      +SizegetStoreFileSize()
      +
      +Returns:
      +the total size of the storefiles
      +
      +
      +
      +
      +
      +
      +
      +
      +getMemStoreSize
      +SizegetMemStoreSize()
      +
      +Returns:
      +the memstore size
      +
      +
      +
      +
      +
      +
      +
      +
      +getReadRequestCount
      +longgetReadRequestCount()
      +
      +Returns:
      +the number of read requests made to region
      +
      +
      +
      +
      +
      +
      +
      +
      +getWriteRequestCount
      +longgetWriteRequestCount()
      +
      +Returns:
      +the number of write requests made to region
      +
      +
      +
      +
      +
      +
      +
      +
      +getRequestCount
      +defaultlonggetRequestCount()
      +
      +Returns:
      +the number of write requests and read requests made to region
      +
      +
      +
      +
      +
      +
      +
      +
      +getNameAsString
      +defaulthttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">StringgetNameAsString()
      +
      +Returns:
      +the region name as a string
      +
      +
      +
      +
      +
      +
      +
      +
      +getFilteredReadRequestCount
      +longgetFilteredReadRequestCount()
      +
      +Returns:
      +the number of filtered read requests made to region
      +
      +
      +
      +
      +
      +
      +
      +
      +getStoreFileIndexSize
      +SizegetStoreFileIndexSize()
      +TODO: why we pass the same value to different counters? 
      Currently, the value from
      + getStoreFileIndexSize() is same with 

      [03/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/505bbb2e/devapidocs/org/apache/hadoop/hbase/client/HTableMultiplexer.FlushWorker.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/client/HTableMultiplexer.FlushWorker.html 
      b/devapidocs/org/apache/hadoop/hbase/client/HTableMultiplexer.FlushWorker.html
      index a631eea..569ac21 100644
      --- 
      a/devapidocs/org/apache/hadoop/hbase/client/HTableMultiplexer.FlushWorker.html
      +++ 
      b/devapidocs/org/apache/hadoop/hbase/client/HTableMultiplexer.FlushWorker.html
      @@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
       
       
       
      -static class HTableMultiplexer.FlushWorker
      +static class HTableMultiplexer.FlushWorker
       extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
       title="class or interface in java.lang">Object
       implements http://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.html?is-external=true;
       title="class or interface in java.lang">Runnable
       
      @@ -317,7 +317,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.
       
       
       addr
      -private finalHRegionLocation addr
      +private finalHRegionLocation addr
       
       
       
      @@ -326,7 +326,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.
       
       
       queue
      -private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/LinkedBlockingQueue.html?is-external=true;
       title="class or interface in 
      java.util.concurrent">LinkedBlockingQueueHTableMultiplexer.PutStatus queue
      +private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/LinkedBlockingQueue.html?is-external=true;
       title="class or interface in 
      java.util.concurrent">LinkedBlockingQueueHTableMultiplexer.PutStatus queue
       
       
       
      @@ -335,7 +335,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.
       
       
       multiplexer
      -private finalHTableMultiplexer multiplexer
      +private finalHTableMultiplexer multiplexer
       
       
       
      @@ -344,7 +344,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.
       
       
       totalFailedPutCount
      -private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true;
       title="class or interface in java.util.concurrent.atomic">AtomicLong totalFailedPutCount
      +private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true;
       title="class or interface in java.util.concurrent.atomic">AtomicLong totalFailedPutCount
       
       
       
      @@ -353,7 +353,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.
       
       
       currentProcessingCount
      -private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true;
       title="class or interface in java.util.concurrent.atomic">AtomicInteger currentProcessingCount
      +private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true;
       title="class or interface in java.util.concurrent.atomic">AtomicInteger currentProcessingCount
       
       
       
      @@ -362,7 +362,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.
       
       
       averageLatency
      -private finalHTableMultiplexer.AtomicAverageCounter averageLatency
      +private finalHTableMultiplexer.AtomicAverageCounter averageLatency
       
       
       
      @@ -371,7 +371,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.
       
       
       maxLatency
      -private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true;
       title="class or interface in java.util.concurrent.atomic">AtomicLong maxLatency
      +private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true;
       title="class or interface in java.util.concurrent.atomic">AtomicLong maxLatency
       
       
       
      @@ -380,7 +380,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.
       
       
       ap
      -private finalAsyncProcess ap
      +private finalAsyncProcess ap
       
       
       
      @@ -389,7 +389,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.
       
       
       processingList
      -private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
       title="class or interface in java.util">ListHTableMultiplexer.PutStatus processingList
      +private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
       title="class or interface in java.util">ListHTableMultiplexer.PutStatus processingList
       
       
       
      @@ -398,7 +398,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.
       
       
       executor
      -private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ScheduledExecutorService.html?is-external=true;
       title="class or interface in 
      java.util.concurrent">ScheduledExecutorService executor
      +private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ScheduledExecutorService.html?is-external=true;
       title="class or interface in 
      java.util.concurrent">ScheduledExecutorService executor
       
       
       
      

      [03/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c4c0cfa5/apidocs/src-html/org/apache/hadoop/hbase/util/OrderedBytes.html
      --
      diff --git a/apidocs/src-html/org/apache/hadoop/hbase/util/OrderedBytes.html 
      b/apidocs/src-html/org/apache/hadoop/hbase/util/OrderedBytes.html
      index 78834c4..51bbeaa 100644
      --- a/apidocs/src-html/org/apache/hadoop/hbase/util/OrderedBytes.html
      +++ b/apidocs/src-html/org/apache/hadoop/hbase/util/OrderedBytes.html
      @@ -511,17 +511,17 @@
       503x = src.get();
       504a1 = ord.apply(x)  0xff;
       505if (-1 == unsignedCmp(a0, 249)) {
      -506  return (a0 - 241) * 256 + a1 + 
      240;
      +506  return (a0 - 241L) * 256 + a1 + 
      240;
       507}
       508x = src.get();
       509a2 = ord.apply(x)  0xff;
       510if (a0 == 249) {
      -511  return 2288 + 256 * a1 + a2;
      +511  return 2288L + 256 * a1 + a2;
       512}
       513x = src.get();
       514a3 = ord.apply(x)  0xff;
       515if (a0 == 250) {
      -516  return (a1  16) | (a2 
       8) | a3;
      +516  return ((long) a1  16L) | 
      (a2  8) | a3;
       517}
       518x = src.get();
       519a4 = ord.apply(x)  0xff;
      @@ -671,1099 +671,1101 @@
       663  dst.put((byte) ((2 * d + 1)  
      0xff));
       664  abs = 
      abs.subtract(BigDecimal.valueOf(d));
       665}
      -666a[offset + dst.getPosition() - 1] 
      = 0xfe; // terminal digit should be 2x
      -667if (isNeg) {
      -668  // negative values encoded as ~M
      -669  DESCENDING.apply(a, offset + 
      startM, dst.getPosition() - startM);
      -670}
      -671return dst.getPosition() - start;
      -672  }
      -673
      -674  /**
      -675   * Encode the large magnitude floating 
      point number {@code val} using
      -676   * the key encoding. The caller 
      guarantees that {@code val} will be
      -677   * finite and abs(val) = 1.0.
      -678   * p
      -679   * A floating point value is encoded as 
      an integer exponent {@code E}
      -680   * and a mantissa {@code M}. The 
      original value is equal to
      -681   * {@code (M * 100^E)}. {@code E} is 
      set to the smallest value
      -682   * possible without making {@code M} 
      greater than or equal to 1.0.
      -683   * /p
      -684   * p
      -685   * Each centimal digit of the mantissa 
      is stored in a byte. If the value of
      -686   * the centimal digit is {@code X} 
      (hence {@code X=0} and
      -687   * {@code X=99}) then the byte 
      value will be {@code 2*X+1} for
      -688   * every byte of the mantissa, except 
      for the last byte which will be
      -689   * {@code 2*X+0}. The mantissa must be 
      the minimum number of bytes
      -690   * necessary to represent the value; 
      trailing {@code X==0} digits are
      -691   * omitted. This means that the 
      mantissa will never contain a byte with the
      -692   * value {@code 0x00}.
      -693   * /p
      -694   * p
      -695   * If {@code E  10}, then this 
      routine writes of {@code E} as a
      -696   * varint followed by the mantissa as 
      described above. Otherwise, if
      -697   * {@code E = 10}, this routine 
      only writes the mantissa and leaves
      -698   * the {@code E} value to be encoded as 
      part of the opening byte of the
      -699   * field by the calling function.
      -700   *
      -701   * pre
      -702   *   Encoding:  M   (if E=10)
      -703   *  E M (if E10)
      -704   * /pre
      -705   * /p
      -706   * @param dst The destination to which 
      encoded digits are written.
      -707   * @param val The value to encode.
      -708   * @return the number of bytes 
      written.
      -709   */
      -710  private static int 
      encodeNumericLarge(PositionedByteRange dst, BigDecimal val) {
      -711// TODO: this can be done faster
      -712BigDecimal abs = val.abs();
      -713byte[] a = dst.getBytes();
      -714boolean isNeg = val.signum() == -1;
      -715final int start = dst.getPosition(), 
      offset = dst.getOffset();
      -716int e = 0, d, startM;
      -717
      -718if (isNeg) { /* Large negative 
      number: 0x08, ~E, ~M */
      -719  dst.put(NEG_LARGE);
      -720} else { /* Large positive number: 
      0x22, E, M */
      -721  dst.put(POS_LARGE);
      -722}
      -723
      -724// normalize abs(val) to determine 
      E
      -725while (abs.compareTo(E32) = 0 
       e = 350) { abs = abs.movePointLeft(32); e +=16; }
      -726while (abs.compareTo(E8) = 0 
       e = 350) { abs = abs.movePointLeft(8); e+= 4; }
      -727while (abs.compareTo(BigDecimal.ONE) 
      = 0  e = 350) { abs = abs.movePointLeft(2); e++; }
      -728
      -729// encode appropriate header byte 
      and/or E value.
      -730if (e  10) { /* large number, 
      write out {~,}E */
      -731  putVaruint64(dst, e, isNeg);
      -732} else {
      -733  if (isNeg) { /* Medium negative 
      number: 0x13-E, ~M */
      -734dst.put(start, (byte) 
      (NEG_MED_MAX - e));
      -735  } else { /* Medium positive number: 
      0x17+E, M */
      -736dst.put(start, (byte) 
      (POS_MED_MIN + e));
      -737  }
      -738}
      -739
      -740// encode M by peeling off centimal 
      digits, encoding x as 2x+1
      -741startM = dst.getPosition();
      -742// TODO: 18 is an arbitrary encoding 
      limit. Reevaluate once we have a better handling of
      -743// numeric scale.
      -744for (int i = 0; i  18  
      abs.compareTo(BigDecimal.ZERO) != 0; i++) {
      -745  abs = abs.movePointRight(2);
      -746  d = 

      [03/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/071f974b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/SimpleRpcServer.ConnectionManager.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/SimpleRpcServer.ConnectionManager.html
       
      b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/SimpleRpcServer.ConnectionManager.html
      index 67e6eae..a83310a 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/SimpleRpcServer.ConnectionManager.html
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/SimpleRpcServer.ConnectionManager.html
      @@ -51,10 +51,10 @@
       043
       044import 
      org.apache.hadoop.conf.Configuration;
       045import 
      org.apache.hadoop.hbase.CellScanner;
      -046import 
      org.apache.hadoop.hbase.HConstants;
      -047import org.apache.hadoop.hbase.Server;
      -048import 
      org.apache.yetus.audience.InterfaceAudience;
      -049import 
      org.apache.yetus.audience.InterfaceStability;
      +046import 
      org.apache.hadoop.hbase.HBaseInterfaceAudience;
      +047import 
      org.apache.hadoop.hbase.HConstants;
      +048import org.apache.hadoop.hbase.Server;
      +049import 
      org.apache.yetus.audience.InterfaceAudience;
       050import 
      org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler;
       051import 
      org.apache.hadoop.hbase.security.HBasePolicyProvider;
       052import 
      org.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingService;
      @@ -89,624 +89,623 @@
       081 *
       082 * @see BlockingRpcClient
       083 */
      -084@InterfaceAudience.Private
      -085@InterfaceStability.Evolving
      -086public class SimpleRpcServer extends 
      RpcServer {
      -087
      -088  protected int port; 
      // port we listen on
      -089  protected InetSocketAddress address;
      // inet address we listen on
      -090  private int readThreads;
      // number of read threads
      -091
      -092  protected int socketSendBufferSize;
      -093  protected final long purgeTimeout;
      // in milliseconds
      -094
      -095  // maintains the set of client 
      connections and handles idle timeouts
      -096  private ConnectionManager 
      connectionManager;
      -097  private Listener listener = null;
      -098  protected SimpleRpcServerResponder 
      responder = null;
      -099
      -100  /** Listens on the socket. Creates jobs 
      for the handler threads*/
      -101  private class Listener extends Thread 
      {
      -102
      -103private ServerSocketChannel 
      acceptChannel = null; //the accept channel
      -104private Selector selector = null; 
      //the selector that we use for the server
      -105private Reader[] readers = null;
      -106private int currentReader = 0;
      -107private final int 
      readerPendingConnectionQueueLength;
      -108
      -109private ExecutorService readPool;
      -110
      -111public Listener(final String name) 
      throws IOException {
      -112  super(name);
      -113  // The backlog of requests that we 
      will have the serversocket carry.
      -114  int backlogLength = 
      conf.getInt("hbase.ipc.server.listen.queue.size", 128);
      -115  readerPendingConnectionQueueLength 
      =
      -116  
      conf.getInt("hbase.ipc.server.read.connection-queue.size", 100);
      -117  // Create a new server socket and 
      set to non blocking mode
      -118  acceptChannel = 
      ServerSocketChannel.open();
      -119  
      acceptChannel.configureBlocking(false);
      -120
      -121  // Bind the server socket to the 
      binding addrees (can be different from the default interface)
      -122  bind(acceptChannel.socket(), 
      bindAddress, backlogLength);
      -123  port = 
      acceptChannel.socket().getLocalPort(); //Could be an ephemeral port
      -124  address = 
      (InetSocketAddress)acceptChannel.socket().getLocalSocketAddress();
      -125  // create a selector;
      -126  selector = Selector.open();
      -127
      -128  readers = new 
      Reader[readThreads];
      -129  // Why this executor thing? Why not 
      like hadoop just start up all the threads? I suppose it
      -130  // has an advantage in that it is 
      easy to shutdown the pool.
      -131  readPool = 
      Executors.newFixedThreadPool(readThreads,
      -132new 
      ThreadFactoryBuilder().setNameFormat(
      -133  "Reader=%d,bindAddress=" + 
      bindAddress.getHostName() +
      -134  ",port=" + 
      port).setDaemon(true)
      -135
      .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build());
      -136  for (int i = 0; i  readThreads; 
      ++i) {
      -137Reader reader = new Reader();
      -138readers[i] = reader;
      -139readPool.execute(reader);
      -140  }
      -141  LOG.info(getName() + ": started " + 
      readThreads + " reader(s) listening on port=" + port);
      -142
      -143  // Register accepts on the server 
      socket with the selector.
      -144  acceptChannel.register(selector, 
      SelectionKey.OP_ACCEPT);
      -145  this.setName("Listener,port=" + 
      port);
      -146  this.setDaemon(true);
      -147}
      +084@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.CONFIG})
      +085public class SimpleRpcServer extends 
      RpcServer {
      +086
      +087  protected int port; 
      // port we listen on
      +088  protected InetSocketAddress address;
      // inet address we listen on
      +089  

      [03/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dc4e5c85/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.StatisticsThread.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.StatisticsThread.html
       
      b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.StatisticsThread.html
      index 219283e..2b5d70b 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.StatisticsThread.html
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.StatisticsThread.html
      @@ -435,1198 +435,1203 @@
       427
       428if (backingMap.containsKey(cacheKey)) 
      {
       429  Cacheable existingBlock = 
      getBlock(cacheKey, false, false, false);
      -430  if 
      (BlockCacheUtil.compareCacheBlock(cachedItem, existingBlock) != 0) {
      -431throw new 
      RuntimeException("Cached block contents differ, which should not have 
      happened."
      -432+ "cacheKey:" + cacheKey);
      -433  }
      -434   String msg = "Caching an already 
      cached block: " + cacheKey;
      -435   msg += ". This is harmless and can 
      happen in rare cases (see HBASE-8547)";
      -436   LOG.warn(msg);
      -437  return;
      -438}
      -439
      -440/*
      -441 * Stuff the entry into the RAM cache 
      so it can get drained to the persistent store
      -442 */
      -443RAMQueueEntry re =
      -444new RAMQueueEntry(cacheKey, 
      cachedItem, accessCount.incrementAndGet(), inMemory);
      -445if (ramCache.putIfAbsent(cacheKey, 
      re) != null) {
      -446  return;
      -447}
      -448int queueNum = (cacheKey.hashCode() 
       0x7FFF) % writerQueues.size();
      -449BlockingQueueRAMQueueEntry bq 
      = writerQueues.get(queueNum);
      -450boolean successfulAddition = false;
      -451if (wait) {
      -452  try {
      -453successfulAddition = bq.offer(re, 
      DEFAULT_CACHE_WAIT_TIME, TimeUnit.MILLISECONDS);
      -454  } catch (InterruptedException e) 
      {
      -455
      Thread.currentThread().interrupt();
      -456  }
      -457} else {
      -458  successfulAddition = 
      bq.offer(re);
      -459}
      -460if (!successfulAddition) {
      -461  ramCache.remove(cacheKey);
      -462  cacheStats.failInsert();
      -463} else {
      -464  this.blockNumber.increment();
      -465  
      this.heapSize.add(cachedItem.heapSize());
      -466  blocksByHFile.add(cacheKey);
      -467}
      -468  }
      -469
      -470  /**
      -471   * Get the buffer of the block with the 
      specified key.
      -472   * @param key block's cache key
      -473   * @param caching true if the caller 
      caches blocks on cache misses
      -474   * @param repeat Whether this is a 
      repeat lookup for the same block
      -475   * @param updateCacheMetrics Whether we 
      should update cache metrics or not
      -476   * @return buffer of specified cache 
      key, or null if not in cache
      -477   */
      -478  @Override
      -479  public Cacheable getBlock(BlockCacheKey 
      key, boolean caching, boolean repeat,
      -480  boolean updateCacheMetrics) {
      -481if (!cacheEnabled) {
      -482  return null;
      -483}
      -484RAMQueueEntry re = 
      ramCache.get(key);
      -485if (re != null) {
      -486  if (updateCacheMetrics) {
      -487cacheStats.hit(caching, 
      key.isPrimary(), key.getBlockType());
      -488  }
      -489  
      re.access(accessCount.incrementAndGet());
      -490  return re.getData();
      -491}
      -492BucketEntry bucketEntry = 
      backingMap.get(key);
      -493if (bucketEntry != null) {
      -494  long start = System.nanoTime();
      -495  ReentrantReadWriteLock lock = 
      offsetLock.getLock(bucketEntry.offset());
      -496  try {
      -497lock.readLock().lock();
      -498// We can not read here even if 
      backingMap does contain the given key because its offset
      -499// maybe changed. If we lock 
      BlockCacheKey instead of offset, then we can only check
      -500// existence here.
      -501if 
      (bucketEntry.equals(backingMap.get(key))) {
      -502  // TODO : change this area - 
      should be removed after server cells and
      -503  // 12295 are available
      -504  int len = 
      bucketEntry.getLength();
      -505  if (LOG.isTraceEnabled()) {
      -506LOG.trace("Read offset=" + 
      bucketEntry.offset() + ", len=" + len);
      -507  }
      -508  Cacheable cachedBlock = 
      ioEngine.read(bucketEntry.offset(), len,
      -509  
      bucketEntry.deserializerReference(this.deserialiserMap));
      -510  long timeTaken = 
      System.nanoTime() - start;
      -511  if (updateCacheMetrics) {
      -512cacheStats.hit(caching, 
      key.isPrimary(), key.getBlockType());
      -513
      cacheStats.ioHit(timeTaken);
      -514  }
      -515  if (cachedBlock.getMemoryType() 
      == MemoryType.SHARED) {
      -516
      bucketEntry.refCount.incrementAndGet();
      -517  }
      -518  
      bucketEntry.access(accessCount.incrementAndGet());
      -519  if (this.ioErrorStartTime  
      0) {
      -520ioErrorStartTime = -1;
      -521  }
      -522  return cachedBlock;
      -523}
      -524  } catch (IOException ioex) {
      -525

      [03/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4abd958d/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncTable.CoprocessorServiceBuilder.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncTable.CoprocessorServiceBuilder.html
       
      b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncTable.CoprocessorServiceBuilder.html
      index 7cece5c..6361a24 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncTable.CoprocessorServiceBuilder.html
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncTable.CoprocessorServiceBuilder.html
      @@ -248,379 +248,383 @@
       240 */
       241CheckAndMutateBuilder 
      ifNotExists();
       242
      -243default CheckAndMutateBuilder 
      ifEquals(byte[] value) {
      -244  return 
      ifMatches(CompareOperator.EQUAL, value);
      -245}
      -246
      -247/**
      -248 * @param compareOp comparison 
      operator to use
      -249 * @param value the expected value
      -250 */
      -251CheckAndMutateBuilder 
      ifMatches(CompareOperator compareOp, byte[] value);
      -252
      -253/**
      -254 * @param put data to put if check 
      succeeds
      -255 * @return {@code true} if the new 
      put was executed, {@code false} otherwise. The return value
      -256 * will be wrapped by a 
      {@link CompletableFuture}.
      -257 */
      -258CompletableFutureBoolean 
      thenPut(Put put);
      -259
      -260/**
      -261 * @param delete data to delete if 
      check succeeds
      -262 * @return {@code true} if the new 
      delete was executed, {@code false} otherwise. The return
      -263 * value will be wrapped by a 
      {@link CompletableFuture}.
      -264 */
      -265CompletableFutureBoolean 
      thenDelete(Delete delete);
      -266
      -267/**
      -268 * @param mutation mutations to 
      perform if check succeeds
      -269 * @return true if the new mutation 
      was executed, false otherwise. The return value will be
      -270 * wrapped by a {@link 
      CompletableFuture}.
      -271 */
      -272CompletableFutureBoolean 
      thenMutate(RowMutations mutation);
      -273  }
      -274
      -275  /**
      -276   * Performs multiple mutations 
      atomically on a single row. Currently {@link Put} and
      -277   * {@link Delete} are supported.
      -278   * @param mutation object that 
      specifies the set of mutations to perform atomically
      -279   * @return A {@link CompletableFuture} 
      that always returns null when complete normally.
      -280   */
      -281  CompletableFutureVoid 
      mutateRow(RowMutations mutation);
      -282
      -283  /**
      -284   * The scan API uses the observer 
      pattern.
      -285   * @param scan A configured {@link 
      Scan} object.
      -286   * @param consumer the consumer used to 
      receive results.
      -287   * @see ScanResultConsumer
      -288   * @see AdvancedScanResultConsumer
      -289   */
      -290  void scan(Scan scan, C consumer);
      -291
      -292  /**
      -293   * Gets a scanner on the current table 
      for the given family.
      -294   * @param family The column family to 
      scan.
      -295   * @return A scanner.
      -296   */
      -297  default ResultScanner getScanner(byte[] 
      family) {
      -298return getScanner(new 
      Scan().addFamily(family));
      -299  }
      -300
      -301  /**
      -302   * Gets a scanner on the current table 
      for the given family and qualifier.
      -303   * @param family The column family to 
      scan.
      -304   * @param qualifier The column 
      qualifier to scan.
      -305   * @return A scanner.
      -306   */
      -307  default ResultScanner getScanner(byte[] 
      family, byte[] qualifier) {
      -308return getScanner(new 
      Scan().addColumn(family, qualifier));
      -309  }
      -310
      -311  /**
      -312   * Returns a scanner on the current 
      table as specified by the {@link Scan} object.
      -313   * @param scan A configured {@link 
      Scan} object.
      -314   * @return A scanner.
      -315   */
      -316  ResultScanner getScanner(Scan scan);
      -317
      -318  /**
      -319   * Return all the results that match 
      the given scan object.
      -320   * p
      -321   * Notice that usually you should use 
      this method with a {@link Scan} object that has limit set.
      -322   * For example, if you want to get the 
      closest row after a given row, you could do this:
      -323   * p
      -324   *
      -325   * pre
      -326   * code
      -327   * table.scanAll(new 
      Scan().withStartRow(row, false).setLimit(1)).thenAccept(results - {
      -328   *   if (results.isEmpty()) {
      -329   *  System.out.println("No row 
      after " + Bytes.toStringBinary(row));
      -330   *   } else {
      -331   * System.out.println("The closest 
      row after " + Bytes.toStringBinary(row) + " is "
      -332   * + 
      Bytes.toStringBinary(results.stream().findFirst().get().getRow()));
      -333   *   }
      -334   * });
      -335   * /code
      -336   * /pre
      -337   * p
      -338   * If your result set is very large, 
      you should use other scan method to get a scanner or use
      -339   * callback to process the results. 
      They will do chunking to prevent OOM. The scanAll method will
      -340   * fetch all the results and store them 
      in a List and then return the list to you.
      +243/**
      +244 * Check for equality.
      +245 * @param value the expected value
      +246 */
      +247default CheckAndMutateBuilder 
      ifEquals(byte[] value) {
      +248  return 

      [03/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e23b49ba/devapidocs/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.html
       
      b/devapidocs/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.html
      index 7ce259d..2b126d3 100644
      --- 
      a/devapidocs/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.html
      +++ 
      b/devapidocs/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.html
      @@ -738,7 +738,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       createChecksumCreater28
      -private staticFanOutOneBlockAsyncDFSOutputHelper.ChecksumCreatercreateChecksumCreater28(http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true;
       title="class or interface in java.lang">Class?confClass)
      +private staticFanOutOneBlockAsyncDFSOutputHelper.ChecksumCreatercreateChecksumCreater28(http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true;
       title="class or interface in java.lang">Class?confClass)
      
      throws http://docs.oracle.com/javase/8/docs/api/java/lang/NoSuchMethodException.html?is-external=true;
       title="class or interface in java.lang">NoSuchMethodException
       
       Throws:
      @@ -752,7 +752,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       createChecksumCreater27
      -private staticFanOutOneBlockAsyncDFSOutputHelper.ChecksumCreatercreateChecksumCreater27(http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true;
       title="class or interface in java.lang">Class?confClass)
      +private staticFanOutOneBlockAsyncDFSOutputHelper.ChecksumCreatercreateChecksumCreater27(http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true;
       title="class or interface in java.lang">Class?confClass)
      
      throws http://docs.oracle.com/javase/8/docs/api/java/lang/NoSuchMethodException.html?is-external=true;
       title="class or interface in java.lang">NoSuchMethodException
       
       Throws:
      @@ -766,7 +766,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       createChecksumCreater
      -private staticFanOutOneBlockAsyncDFSOutputHelper.ChecksumCreatercreateChecksumCreater()
      +private staticFanOutOneBlockAsyncDFSOutputHelper.ChecksumCreatercreateChecksumCreater()
      
        throws http://docs.oracle.com/javase/8/docs/api/java/lang/NoSuchMethodException.html?is-external=true;
       title="class or interface in java.lang">NoSuchMethodException,
      
       http://docs.oracle.com/javase/8/docs/api/java/lang/ClassNotFoundException.html?is-external=true;
       title="class or interface in java.lang">ClassNotFoundException
       
      @@ -782,7 +782,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       createFileCreator3
      -private staticFanOutOneBlockAsyncDFSOutputHelper.FileCreatorcreateFileCreator3()
      +private staticFanOutOneBlockAsyncDFSOutputHelper.FileCreatorcreateFileCreator3()
         
      throws http://docs.oracle.com/javase/8/docs/api/java/lang/NoSuchMethodException.html?is-external=true;
       title="class or interface in java.lang">NoSuchMethodException
       
       Throws:
      @@ -796,7 +796,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       createFileCreator2
      -private staticFanOutOneBlockAsyncDFSOutputHelper.FileCreatorcreateFileCreator2()
      +private staticFanOutOneBlockAsyncDFSOutputHelper.FileCreatorcreateFileCreator2()
         
      throws http://docs.oracle.com/javase/8/docs/api/java/lang/NoSuchMethodException.html?is-external=true;
       title="class or interface in java.lang">NoSuchMethodException
       
       Throws:
      @@ -810,7 +810,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       createFileCreator
      -private staticFanOutOneBlockAsyncDFSOutputHelper.FileCreatorcreateFileCreator()
      +private staticFanOutOneBlockAsyncDFSOutputHelper.FileCreatorcreateFileCreator()
        
      throws http://docs.oracle.com/javase/8/docs/api/java/lang/NoSuchMethodException.html?is-external=true;
       title="class or interface in java.lang">NoSuchMethodException
       
       Throws:
      @@ -824,7 +824,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       beginFileLease
      -staticvoidbeginFileLease(org.apache.hadoop.hdfs.DFSClientclient,
      +staticvoidbeginFileLease(org.apache.hadoop.hdfs.DFSClientclient,
      longinodeId)
       
       
      @@ -834,7 

      [03/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d171b896/hbase-build-configuration/hbase-spark/dependencies.html
      --
      diff --git a/hbase-build-configuration/hbase-spark/dependencies.html 
      b/hbase-build-configuration/hbase-spark/dependencies.html
      index be3032c..1997852 100644
      --- a/hbase-build-configuration/hbase-spark/dependencies.html
      +++ b/hbase-build-configuration/hbase-spark/dependencies.html
      @@ -7,7 +7,7 @@
         
       
       
      -
      +
       
       Apache HBase - Spark  Project Dependencies
       
      @@ -196,12 +196,18 @@
       jar
       https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
      2.0
       
      +org.apache.hbase
      +http://hbase.apache.org/hbase-build-configuration/hbase-zookeeper;>hbase-zookeeper
      +3.0.0-SNAPSHOT
      +jar
      +https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
      2.0
      +
       org.apache.hbase.thirdparty
       http://hbase.apache.org/hbase-shaded-miscellaneous;>hbase-shaded-miscellaneous
       1.0.1
       jar
       https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
      2.0
      -
      +
       org.apache.yetus
       https://yetus.apache.org/audience-annotations;>audience-annotations
       0.5.0
      @@ -282,20 +288,27 @@
       test-jar
       https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
      2.0
       
      +org.apache.hbase
      +http://hbase.apache.org/hbase-build-configuration/hbase-zookeeper;>hbase-zookeeper
      +3.0.0-SNAPSHOT
      +tests
      +test-jar
      +https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
      2.0
      +
       org.apache.spark
       http://spark.apache.org/;>spark-streaming_2.10
       1.6.0
       tests
       test-jar
       http://www.apache.org/licenses/LICENSE-2.0.html;>Apache 2.0 
      License
      -
      +
       org.scalamock
       http://scalamock.org/;>scalamock-scalatest-support_2.10
       3.1.4
       -
       jar
       http://www.opensource.org/licenses/bsd-license.php;>BSD-style
      -
      +
       org.scalatest
       http://www.scalatest.org;>scalatest_2.10
       2.2.4
      @@ -800,294 +813,288 @@
       jar
       https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
      2.0
       
      -org.apache.hbase
      -http://hbase.apache.org/hbase-build-configuration/hbase-zookeeper;>hbase-zookeeper
      -3.0.0-SNAPSHOT
      -jar
      -https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
      2.0
      -
       org.apache.hbase.thirdparty
       http://hbase.apache.org/hbase-shaded-netty;>hbase-shaded-netty
       1.0.1
       jar
       https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
      2.0
      -
      +
       org.apache.hbase.thirdparty
       http://hbase.apache.org/hbase-shaded-protobuf;>hbase-shaded-protobuf
       1.0.1
       jar
       https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
      2.0
      -
      +
       org.apache.htrace
       http://incubator.apache.org/projects/htrace.html;>htrace-core
       3.2.0-incubating
       jar
       http://www.apache.org/licenses/LICENSE-2.0.txt;>The Apache Software 
      License, Version 2.0
      -
      +
       org.apache.htrace
       http://incubator.apache.org/projects/htrace.html;>htrace-core4
       4.2.0-incubating
       jar
       http://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
      2.0
      -
      +
       org.apache.httpcomponents
       http://hc.apache.org/httpcomponents-client;>httpclient
       4.5.3
       jar
       http://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
      2.0
      -
      +
       org.apache.httpcomponents
       http://hc.apache.org/httpcomponents-core-ga;>httpcore
       4.4.6
       jar
       http://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
      2.0
      -
      +
       org.apache.zookeeper
       zookeeper
       3.4.10
       jar
       http://www.apache.org/licenses/LICENSE-2.0.txt;>The Apache Software 
      License, Version 2.0
      -
      +
       org.codehaus.jackson
       http://jackson.codehaus.org;>jackson-core-asl
       1.9.13
       jar
       http://www.apache.org/licenses/LICENSE-2.0.txt;>The Apache Software 
      License, Version 2.0
      -
      +
       org.codehaus.jackson
       http://jackson.codehaus.org;>jackson-jaxrs
       1.8.3
       jar
       http://www.apache.org/licenses/LICENSE-2.0.txt;>The Apache Software 
      License, Version 2.0-http://www.fsf.org/licensing/licenses/lgpl.txt;>GNU Lesser General Public 
      License (LGPL), Version 2.1
      -
      +
       org.codehaus.jackson
       http://jackson.codehaus.org;>jackson-mapper-asl
       1.9.13
       jar
       http://www.apache.org/licenses/LICENSE-2.0.txt;>The Apache Software 
      License, Version 2.0
      -
      +
       org.codehaus.jackson
       http://jackson.codehaus.org;>jackson-xc
       1.8.3
       jar
       http://www.apache.org/licenses/LICENSE-2.0.txt;>The Apache Software 
      License, Version 2.0-http://www.fsf.org/licensing/licenses/lgpl.txt;>GNU Lesser General Public 
      License (LGPL), Version 2.1
      -
      +
       org.codehaus.jettison
       https://github.com/jettison-json/jettison;>jettison
       1.3.8
       jar
       http://www.apache.org/licenses/LICENSE-2.0;>Apache License, Version 
      2.0
      -
      +
       org.eclipse.jetty
       http://www.eclipse.org/jetty;>jetty-http
       9.3.19.v20170502
       jar
       http://www.apache.org/licenses/LICENSE-2.0;>Apache Software License - 
      Version 2.0-http://www.eclipse.org/org/documents/epl-v10.php;>Eclipse Public License 
      - Version 1.0
      -
      +
       org.eclipse.jetty
       http://www.eclipse.org/jetty;>jetty-io
       9.3.19.v20170502
       jar
       http://www.apache.org/licenses/LICENSE-2.0;>Apache Software License - 
      Version 

      [03/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c54c242b/testdevapidocs/org/apache/hadoop/hbase/class-use/HBaseCommonTestingUtility.html
      --
      diff --git 
      a/testdevapidocs/org/apache/hadoop/hbase/class-use/HBaseCommonTestingUtility.html
       
      b/testdevapidocs/org/apache/hadoop/hbase/class-use/HBaseCommonTestingUtility.html
      index 892e00d..e1445dc 100644
      --- 
      a/testdevapidocs/org/apache/hadoop/hbase/class-use/HBaseCommonTestingUtility.html
      +++ 
      b/testdevapidocs/org/apache/hadoop/hbase/class-use/HBaseCommonTestingUtility.html
      @@ -114,6 +114,10 @@
       org.apache.hadoop.hbase.util
       
       
      +
      +org.apache.hadoop.hbase.zookeeper
      +
      +
       
       
       
      @@ -352,6 +356,24 @@
       
       
       
      +
      +
      +
      +Uses of HBaseCommonTestingUtility in org.apache.hadoop.hbase.zookeeper
      +
      +Fields in org.apache.hadoop.hbase.zookeeper
       declared as HBaseCommonTestingUtility
      +
      +Modifier and Type
      +Field and Description
      +
      +
      +
      +private static HBaseCommonTestingUtility
      +TestReadOnlyZKClient.UTIL
      +
      +
      +
      +
       
       
       
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c54c242b/testdevapidocs/org/apache/hadoop/hbase/client/TestZKAsyncRegistry.html
      --
      diff --git 
      a/testdevapidocs/org/apache/hadoop/hbase/client/TestZKAsyncRegistry.html 
      b/testdevapidocs/org/apache/hadoop/hbase/client/TestZKAsyncRegistry.html
      index ddd8139..30f3d80 100644
      --- a/testdevapidocs/org/apache/hadoop/hbase/client/TestZKAsyncRegistry.html
      +++ b/testdevapidocs/org/apache/hadoop/hbase/client/TestZKAsyncRegistry.html
      @@ -109,7 +109,7 @@ var activeTableTab = "activeTableTab";
       
       
       
      -public class TestZKAsyncRegistry
      +public class TestZKAsyncRegistry
       extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
       title="class or interface in java.lang">Object
       
       
      @@ -217,7 +217,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       TEST_UTIL
      -private static finalHBaseTestingUtility TEST_UTIL
      +private static finalHBaseTestingUtility TEST_UTIL
       
       
       
      @@ -226,7 +226,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       REGISTRY
      -private staticorg.apache.hadoop.hbase.client.ZKAsyncRegistry REGISTRY
      +private staticorg.apache.hadoop.hbase.client.ZKAsyncRegistry REGISTRY
       
       
       
      @@ -243,7 +243,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       TestZKAsyncRegistry
      -publicTestZKAsyncRegistry()
      +publicTestZKAsyncRegistry()
       
       
       
      @@ -260,7 +260,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       waitUntilAllReplicasHavingRegionLocation
      -staticvoidwaitUntilAllReplicasHavingRegionLocation(org.apache.hadoop.hbase.TableNametbl)
      +staticvoidwaitUntilAllReplicasHavingRegionLocation(org.apache.hadoop.hbase.TableNametbl)
         throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
       title="class or interface in java.io">IOException
       
       Throws:
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c54c242b/testdevapidocs/org/apache/hadoop/hbase/mapreduce/TestImportExport.TableWALActionListener.html
      --
      diff --git 
      a/testdevapidocs/org/apache/hadoop/hbase/mapreduce/TestImportExport.TableWALActionListener.html
       
      b/testdevapidocs/org/apache/hadoop/hbase/mapreduce/TestImportExport.TableWALActionListener.html
      index 462c087..f8ca93a 100644
      --- 
      a/testdevapidocs/org/apache/hadoop/hbase/mapreduce/TestImportExport.TableWALActionListener.html
      +++ 
      b/testdevapidocs/org/apache/hadoop/hbase/mapreduce/TestImportExport.TableWALActionListener.html
      @@ -100,15 +100,10 @@ var activeTableTab = "activeTableTab";
       http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
       title="class or interface in java.lang">java.lang.Object
       
       
      -org.apache.hadoop.hbase.regionserver.wal.WALActionsListener.Base
      -
      -
       
      org.apache.hadoop.hbase.mapreduce.TestImportExport.TableWALActionListener
       
       
       
      -
      -
       
       
       
      @@ -123,8 +118,9 @@ var activeTableTab = "activeTableTab";
       
       
       private static class TestImportExport.TableWALActionListener
      -extends org.apache.hadoop.hbase.regionserver.wal.WALActionsListener.Base
      -This listens to the 
      #visitLogEntryBeforeWrite(RegionInfo, WALKey, WALEdit) to
      +extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
       title="class or interface in java.lang">Object
      +implements org.apache.hadoop.hbase.regionserver.wal.WALActionsListener
      +This listens to the 
      WALActionsListener.visitLogEntryBeforeWrite(RegionInfo, WALKey, 
      WALEdit) to
        identify that an entry is written to the Write Ahead Log for the given 
      table.
       
       
      @@ -132,21 +128,6 @@ extends 
      org.apache.hadoop.hbase.regionserver.wal.WALActionsListener.Base
       
       
       
      -
      -
      -
      -
      -
      -Nested Class Summary
      -
      -
      -
      -
      -Nested classes/interfaces inherited from 
      interfaceorg.apache.hadoop.hbase.regionserver.wal.WALActionsListener
      

      [03/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/SendBufSizePredictor.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/SendBufSizePredictor.html
       
      b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/SendBufSizePredictor.html
      new file mode 100644
      index 000..8a522f0
      --- /dev/null
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/SendBufSizePredictor.html
      @@ -0,0 +1,129 @@
      +http://www.w3.org/TR/html4/loose.dtd;>
      +
      +
      +Source code
      +
      +
      +
      +
      +001/**
      +002 * Licensed to the Apache Software 
      Foundation (ASF) under one
      +003 * or more contributor license 
      agreements.  See the NOTICE file
      +004 * distributed with this work for 
      additional information
      +005 * regarding copyright ownership.  The 
      ASF licenses this file
      +006 * to you under the Apache License, 
      Version 2.0 (the
      +007 * "License"); you may not use this file 
      except in compliance
      +008 * with the License.  You may obtain a 
      copy of the License at
      +009 *
      +010 * 
      http://www.apache.org/licenses/LICENSE-2.0
      +011 *
      +012 * Unless required by applicable law or 
      agreed to in writing, software
      +013 * distributed under the License is 
      distributed on an "AS IS" BASIS,
      +014 * WITHOUT WARRANTIES OR CONDITIONS OF 
      ANY KIND, either express or implied.
      +015 * See the License for the specific 
      language governing permissions and
      +016 * limitations under the License.
      +017 */
      +018package 
      org.apache.hadoop.hbase.io.asyncfs;
      +019
      +020import 
      org.apache.yetus.audience.InterfaceAudience;
      +021
      +022/**
      +023 * Used to predict the next send buffer 
      size.
      +024 */
      +025@InterfaceAudience.Private
      +026class SendBufSizePredictor {
      +027
      +028  // LIMIT is 128MB
      +029  private static final int LIMIT = 128 * 
      1024 * 1024;
      +030
      +031  // buf's initial capacity - 4KB
      +032  private int capacity = 4 * 1024;
      +033
      +034  int initialSize() {
      +035return capacity;
      +036  }
      +037
      +038  int guess(int bytesWritten) {
      +039// if the bytesWritten is greater 
      than the current capacity
      +040// always increase the capacity in 
      powers of 2.
      +041if (bytesWritten  this.capacity) 
      {
      +042  // Ensure we don't cross the 
      LIMIT
      +043  if ((this.capacity  1) 
      = LIMIT) {
      +044// increase the capacity in the 
      range of power of 2
      +045this.capacity = this.capacity 
       1;
      +046  }
      +047} else {
      +048  // if we see that the bytesWritten 
      is lesser we could again decrease
      +049  // the capacity by dividing it by 2 
      if the bytesWritten is satisfied by
      +050  // that reduction
      +051  if ((this.capacity  1) 
      = bytesWritten) {
      +052this.capacity = this.capacity 
       1;
      +053  }
      +054}
      +055return this.capacity;
      +056  }
      +057}
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/src-html/org/apache/hadoop/hbase/ipc/NettyRpcFrameDecoder.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/NettyRpcFrameDecoder.html 
      b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/NettyRpcFrameDecoder.html
      new file mode 100644
      index 000..48e79b7
      --- /dev/null
      +++ b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/NettyRpcFrameDecoder.html
      @@ -0,0 +1,309 @@
      +http://www.w3.org/TR/html4/loose.dtd;>
      +
      +
      +Source code
      +
      +
      +
      +
      +001/**
      +002 * Licensed to the Apache Software 
      Foundation (ASF) under one
      +003 * or more contributor license 
      agreements.  See the NOTICE file
      +004 * distributed with this work for 
      additional information
      +005 * regarding copyright ownership.  The 
      ASF licenses this file
      +006 * to you under the Apache License, 
      Version 2.0 (the
      +007 * "License"); you may not use this file 
      except in compliance
      +008 * with the License.  You may obtain a 
      copy of the License at
      +009 * p
      +010 * 
      http://www.apache.org/licenses/LICENSE-2.0
      +011 * p
      +012 * Unless required by applicable law or 
      agreed to in writing, software
      +013 * distributed under the License is 
      distributed on an "AS IS" BASIS,
      +014 * WITHOUT WARRANTIES OR CONDITIONS OF 
      ANY KIND, either express or implied.
      +015 * See the License for the specific 
      language governing permissions and
      +016 * limitations under the License.
      +017 */
      +018package org.apache.hadoop.hbase.ipc;
      +019
      +020import java.io.IOException;
      +021import java.util.List;
      +022
      +023import 
      org.apache.hadoop.hbase.DoNotRetryIOException;
      +024import 
      org.apache.hadoop.hbase.client.VersionInfoUtil;
      +025import 
      org.apache.hadoop.hbase.exceptions.RequestTooBigException;
      +026import 
      org.apache.yetus.audience.InterfaceAudience;
      +027
      +028import 
      org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBuf;
      +029import 
      org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelFutureListener;
      +030import 
      org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandlerContext;
      

      [03/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.RestoreSnapshotFuture.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.RestoreSnapshotFuture.html
       
      b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.RestoreSnapshotFuture.html
      index d438f22..7c59e27 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.RestoreSnapshotFuture.html
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.RestoreSnapshotFuture.html
      @@ -1290,8 +1290,8 @@
       1282   CompactType 
      compactType) throws IOException {
       1283switch (compactType) {
       1284  case MOB:
      -1285
      compact(this.connection.getAdminForMaster(), getMobRegionInfo(tableName), 
      major,
      -1286  columnFamily);
      +1285
      compact(this.connection.getAdminForMaster(), 
      RegionInfo.createMobRegionInfo(tableName),
      +1286major, columnFamily);
       1287break;
       1288  case NORMAL:
       1289checkTableExists(tableName);
      @@ -3248,7 +3248,7 @@
       3240  new 
      CallableAdminProtos.GetRegionInfoResponse.CompactionState() {
       3241@Override
       3242public 
      AdminProtos.GetRegionInfoResponse.CompactionState call() throws Exception {
      -3243  RegionInfo info = 
      getMobRegionInfo(tableName);
      +3243  RegionInfo info = 
      RegionInfo.createMobRegionInfo(tableName);
       3244  GetRegionInfoRequest 
      request =
       3245
      RequestConverter.buildGetRegionInfoRequest(info.getRegionName(), true);
       3246  GetRegionInfoResponse 
      response = masterAdmin.getRegionInfo(rpcController, request);
      @@ -3312,7 +3312,7 @@
       3304}
       3305break;
       3306  default:
      -3307throw new 
      IllegalArgumentException("Unknowne compactType: " + compactType);
      +3307throw new 
      IllegalArgumentException("Unknown compactType: " + compactType);
       3308}
       3309if (state != null) {
       3310  return 
      ProtobufUtil.createCompactionState(state);
      @@ -3847,325 +3847,320 @@
       3839});
       3840  }
       3841
      -3842  private RegionInfo 
      getMobRegionInfo(TableName tableName) {
      -3843return 
      RegionInfoBuilder.newBuilder(tableName).setStartKey(Bytes.toBytes(".mob")).setRegionId(0)
      -3844.build();
      -3845  }
      -3846
      -3847  private RpcControllerFactory 
      getRpcControllerFactory() {
      -3848return this.rpcControllerFactory;
      -3849  }
      -3850
      -3851  @Override
      -3852  public void addReplicationPeer(String 
      peerId, ReplicationPeerConfig peerConfig, boolean enabled)
      -3853  throws IOException {
      -3854executeCallable(new 
      MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
      -3855  @Override
      -3856  protected Void rpcCall() throws 
      Exception {
      -3857
      master.addReplicationPeer(getRpcController(),
      -3858  
      RequestConverter.buildAddReplicationPeerRequest(peerId, peerConfig, 
      enabled));
      -3859return null;
      -3860  }
      -3861});
      -3862  }
      -3863
      -3864  @Override
      -3865  public void 
      removeReplicationPeer(String peerId) throws IOException {
      -3866executeCallable(new 
      MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
      -3867  @Override
      -3868  protected Void rpcCall() throws 
      Exception {
      -3869
      master.removeReplicationPeer(getRpcController(),
      -3870  
      RequestConverter.buildRemoveReplicationPeerRequest(peerId));
      -3871return null;
      -3872  }
      -3873});
      -3874  }
      -3875
      -3876  @Override
      -3877  public void 
      enableReplicationPeer(final String peerId) throws IOException {
      -3878executeCallable(new 
      MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
      -3879  @Override
      -3880  protected Void rpcCall() throws 
      Exception {
      -3881
      master.enableReplicationPeer(getRpcController(),
      -3882  
      RequestConverter.buildEnableReplicationPeerRequest(peerId));
      -3883return null;
      -3884  }
      -3885});
      -3886  }
      -3887
      -3888  @Override
      -3889  public void 
      disableReplicationPeer(final String peerId) throws IOException {
      -3890executeCallable(new 
      MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
      -3891  @Override
      -3892  protected Void rpcCall() throws 
      Exception {
      -3893
      master.disableReplicationPeer(getRpcController(),
      -3894  
      RequestConverter.buildDisableReplicationPeerRequest(peerId));
      -3895return null;
      -3896  }
      -3897});
      -3898  }
      -3899
      -3900  @Override
      -3901  public ReplicationPeerConfig 
      getReplicationPeerConfig(final String peerId) throws IOException {
      -3902return executeCallable(new 
      MasterCallableReplicationPeerConfig(getConnection(),
      -3903getRpcControllerFactory()) {
      -3904  @Override
      -3905  protected ReplicationPeerConfig 
      rpcCall() throws Exception {
      -3906GetReplicationPeerConfigResponse 
      response = master.getReplicationPeerConfig(
      -3907  getRpcController(), 
      

      [03/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html 
      b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html
      index 29ea7b3..6ed75c9 100644
      --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html
      +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html
      @@ -1313,7093 +1313,7082 @@
       1305
       1306  @Override
       1307  public boolean isSplittable() {
      -1308boolean result = isAvailable() 
       !hasReferences();
      -1309LOG.info("ASKED IF SPLITTABLE " + 
      result + " " + getRegionInfo().getShortNameToLog(),
      -1310  new Throwable("LOGGING: 
      REMOVE"));
      -1311// REMOVE BELOW
      -1312LOG.info("DEBUG LIST ALL FILES");
      -1313for (HStore store : 
      this.stores.values()) {
      -1314  LOG.info("store " + 
      store.getColumnFamilyName());
      -1315  for (HStoreFile sf : 
      store.getStorefiles()) {
      -1316
      LOG.info(sf.toStringDetailed());
      -1317  }
      -1318}
      -1319return result;
      -1320  }
      -1321
      -1322  @Override
      -1323  public boolean isMergeable() {
      -1324if (!isAvailable()) {
      -1325  LOG.debug("Region " + this
      -1326  + " is not mergeable because 
      it is closing or closed");
      -1327  return false;
      -1328}
      -1329if (hasReferences()) {
      -1330  LOG.debug("Region " + this
      -1331  + " is not mergeable because 
      it has references");
      -1332  return false;
      -1333}
      -1334
      -1335return true;
      +1308return isAvailable()  
      !hasReferences();
      +1309  }
      +1310
      +1311  @Override
      +1312  public boolean isMergeable() {
      +1313if (!isAvailable()) {
      +1314  LOG.debug("Region " + this
      +1315  + " is not mergeable because 
      it is closing or closed");
      +1316  return false;
      +1317}
      +1318if (hasReferences()) {
      +1319  LOG.debug("Region " + this
      +1320  + " is not mergeable because 
      it has references");
      +1321  return false;
      +1322}
      +1323
      +1324return true;
      +1325  }
      +1326
      +1327  public boolean areWritesEnabled() {
      +1328synchronized(this.writestate) {
      +1329  return 
      this.writestate.writesEnabled;
      +1330}
      +1331  }
      +1332
      +1333  @VisibleForTesting
      +1334  public MultiVersionConcurrencyControl 
      getMVCC() {
      +1335return mvcc;
       1336  }
       1337
      -1338  public boolean areWritesEnabled() {
      -1339synchronized(this.writestate) {
      -1340  return 
      this.writestate.writesEnabled;
      -1341}
      -1342  }
      -1343
      -1344  @VisibleForTesting
      -1345  public MultiVersionConcurrencyControl 
      getMVCC() {
      -1346return mvcc;
      -1347  }
      -1348
      -1349  @Override
      -1350  public long getMaxFlushedSeqId() {
      -1351return maxFlushedSeqId;
      +1338  @Override
      +1339  public long getMaxFlushedSeqId() {
      +1340return maxFlushedSeqId;
      +1341  }
      +1342
      +1343  /**
      +1344   * @return readpoint considering given 
      IsolationLevel. Pass {@code null} for default
      +1345   */
      +1346  public long 
      getReadPoint(IsolationLevel isolationLevel) {
      +1347if (isolationLevel != null 
       isolationLevel == IsolationLevel.READ_UNCOMMITTED) {
      +1348  // This scan can read even 
      uncommitted transactions
      +1349  return Long.MAX_VALUE;
      +1350}
      +1351return mvcc.getReadPoint();
       1352  }
       1353
      -1354  /**
      -1355   * @return readpoint considering given 
      IsolationLevel. Pass {@code null} for default
      -1356   */
      -1357  public long 
      getReadPoint(IsolationLevel isolationLevel) {
      -1358if (isolationLevel != null 
       isolationLevel == IsolationLevel.READ_UNCOMMITTED) {
      -1359  // This scan can read even 
      uncommitted transactions
      -1360  return Long.MAX_VALUE;
      -1361}
      -1362return mvcc.getReadPoint();
      -1363  }
      -1364
      -1365  public boolean 
      isLoadingCfsOnDemandDefault() {
      -1366return 
      this.isLoadingCfsOnDemandDefault;
      -1367  }
      -1368
      -1369  /**
      -1370   * Close down this HRegion.  Flush the 
      cache, shut down each HStore, don't
      -1371   * service any more calls.
      -1372   *
      -1373   * pThis method could take 
      some time to execute, so don't call it from a
      -1374   * time-sensitive thread.
      -1375   *
      -1376   * @return Vector of all the storage 
      files that the HRegion's component
      -1377   * HStores make use of.  It's a list 
      of all StoreFile objects. Returns empty
      -1378   * vector if already closed and null 
      if judged that it should not close.
      -1379   *
      -1380   * @throws IOException e
      -1381   * @throws DroppedSnapshotException 
      Thrown when replay of wal is required
      -1382   * because a Snapshot was not properly 
      persisted. The region is put in closing mode, and the
      -1383   * caller MUST abort after this.
      -1384   */
      -1385  public Mapbyte[], 
      ListHStoreFile close() throws IOException {
      -1386return close(false);
      -1387  }
      -1388
      -1389  private final Object closeLock = new 
      Object();
      -1390
      -1391  /** Conf key for the periodic flush 
      interval */
      -1392  public static final String 
      MEMSTORE_PERIODIC_FLUSH_INTERVAL =
      -1393  
      

      [03/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b1eb7453/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowCell.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowCell.html
       
      b/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowCell.html
      index 9098105..b05691f 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowCell.html
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowCell.html
      @@ -37,1514 +37,1514 @@
       029import java.util.ArrayList;
       030import java.util.Iterator;
       031import java.util.List;
      -032
      -033import 
      org.apache.hadoop.hbase.KeyValue.Type;
      -034import 
      org.apache.hadoop.hbase.filter.ByteArrayComparable;
      -035import 
      org.apache.hadoop.hbase.io.HeapSize;
      -036import 
      org.apache.hadoop.hbase.io.TagCompressionContext;
      -037import 
      org.apache.hadoop.hbase.io.util.Dictionary;
      -038import 
      org.apache.hadoop.hbase.io.util.StreamUtils;
      -039import 
      org.apache.hadoop.hbase.util.ByteBufferUtils;
      -040import 
      org.apache.hadoop.hbase.util.ByteRange;
      -041import 
      org.apache.hadoop.hbase.util.Bytes;
      -042import 
      org.apache.hadoop.hbase.util.ClassSize;
      -043import 
      org.apache.yetus.audience.InterfaceAudience;
      -044
      -045import 
      com.google.common.annotations.VisibleForTesting;
      -046
      -047/**
      -048 * Utility methods helpful slinging 
      {@link Cell} instances. It has more powerful and
      -049 * rich set of APIs than those in {@link 
      CellUtil} for internal usage.
      -050 */
      -051@InterfaceAudience.Private
      -052// TODO : Make Tag IA.LimitedPrivate and 
      move some of the Util methods to CP exposed Util class
      -053public class PrivateCellUtil {
      +032import java.util.Optional;
      +033
      +034import 
      org.apache.hadoop.hbase.KeyValue.Type;
      +035import 
      org.apache.hadoop.hbase.filter.ByteArrayComparable;
      +036import 
      org.apache.hadoop.hbase.io.HeapSize;
      +037import 
      org.apache.hadoop.hbase.io.TagCompressionContext;
      +038import 
      org.apache.hadoop.hbase.io.util.Dictionary;
      +039import 
      org.apache.hadoop.hbase.io.util.StreamUtils;
      +040import 
      org.apache.hadoop.hbase.util.ByteBufferUtils;
      +041import 
      org.apache.hadoop.hbase.util.ByteRange;
      +042import 
      org.apache.hadoop.hbase.util.Bytes;
      +043import 
      org.apache.hadoop.hbase.util.ClassSize;
      +044import 
      org.apache.yetus.audience.InterfaceAudience;
      +045
      +046import 
      com.google.common.annotations.VisibleForTesting;
      +047
      +048/**
      +049 * Utility methods helpful slinging 
      {@link Cell} instances. It has more powerful and
      +050 * rich set of APIs than those in {@link 
      CellUtil} for internal usage.
      +051 */
      +052@InterfaceAudience.Private
      +053public final class PrivateCellUtil {
       054
       055  /**
       056   * Private constructor to keep this 
      class from being instantiated.
       057   */
       058  private PrivateCellUtil() {
      -059
      -060  }
      -061
      -062  /*** ByteRange 
      ***/
      -063
      -064  public static ByteRange 
      fillRowRange(Cell cell, ByteRange range) {
      -065return range.set(cell.getRowArray(), 
      cell.getRowOffset(), cell.getRowLength());
      -066  }
      -067
      -068  public static ByteRange 
      fillFamilyRange(Cell cell, ByteRange range) {
      -069return 
      range.set(cell.getFamilyArray(), cell.getFamilyOffset(), 
      cell.getFamilyLength());
      -070  }
      -071
      -072  public static ByteRange 
      fillQualifierRange(Cell cell, ByteRange range) {
      -073return 
      range.set(cell.getQualifierArray(), cell.getQualifierOffset(),
      -074  cell.getQualifierLength());
      -075  }
      -076
      -077  public static ByteRange 
      fillValueRange(Cell cell, ByteRange range) {
      -078return 
      range.set(cell.getValueArray(), cell.getValueOffset(), 
      cell.getValueLength());
      -079  }
      -080
      -081  public static ByteRange 
      fillTagRange(Cell cell, ByteRange range) {
      -082return range.set(cell.getTagsArray(), 
      cell.getTagsOffset(), cell.getTagsLength());
      -083  }
      -084
      -085  /**
      -086   * Returns tag value in a new byte 
      array. If server-side, use {@link Tag#getValueArray()} with
      -087   * appropriate {@link 
      Tag#getValueOffset()} and {@link Tag#getValueLength()} instead to save on
      -088   * allocations.
      -089   * @param cell
      -090   * @return tag value in a new byte 
      array.
      -091   */
      -092  public static byte[] getTagsArray(Cell 
      cell) {
      -093byte[] output = new 
      byte[cell.getTagsLength()];
      -094copyTagsTo(cell, output, 0);
      -095return output;
      -096  }
      -097
      -098  public static byte[] cloneTags(Cell 
      cell) {
      -099byte[] output = new 
      byte[cell.getTagsLength()];
      -100copyTagsTo(cell, output, 0);
      -101return output;
      -102  }
      -103
      -104  /**
      -105   * Copies the tags info into the tag 
      portion of the cell
      -106   * @param cell
      -107   * @param destination
      -108   * @param destinationOffset
      -109   * @return position after tags
      +059  }
      +060
      +061  /*** ByteRange 
      ***/
      +062
      +063  public static ByteRange 
      fillRowRange(Cell cell, ByteRange range) {
      +064return range.set(cell.getRowArray(), 
      cell.getRowOffset(), 

        1   2   3   >