[05/51] [partial] hbase-site git commit: Published site at 620d70d6186fb800299bcc62ad7179fccfd1be41.

2019-01-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aa3fb87f/devapidocs/src-html/org/apache/hadoop/hbase/thrift2/client/ThriftTable.Scanner.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/thrift2/client/ThriftTable.Scanner.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/thrift2/client/ThriftTable.Scanner.html
new file mode 100644
index 000..94888eb
--- /dev/null
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/thrift2/client/ThriftTable.Scanner.html
@@ -0,0 +1,564 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+Source code
+
+
+
+
+001/**
+002 *
+003 * Licensed to the Apache Software 
Foundation (ASF) under one
+004 * or more contributor license 
agreements.  See the NOTICE file
+005 * distributed with this work for 
additional information
+006 * regarding copyright ownership.  The 
ASF licenses this file
+007 * to you under the Apache License, 
Version 2.0 (the
+008 * "License"); you may not use this file 
except in compliance
+009 * with the License.  You may obtain a 
copy of the License at
+010 *
+011 * 
http://www.apache.org/licenses/LICENSE-2.0
+012 *
+013 * Unless required by applicable law or 
agreed to in writing, software
+014 * distributed under the License is 
distributed on an "AS IS" BASIS,
+015 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
+016 * See the License for the specific 
language governing permissions and
+017 * limitations under the License.
+018 */
+019package 
org.apache.hadoop.hbase.thrift2.client;
+020
+021import static 
org.apache.hadoop.hbase.thrift.Constants.HBASE_THRIFT_CLIENT_SCANNER_CACHING;
+022import static 
org.apache.hadoop.hbase.thrift.Constants.HBASE_THRIFT_CLIENT_SCANNER_CACHING_DEFAULT;
+023
+024import java.io.IOException;
+025import java.nio.ByteBuffer;
+026import java.util.ArrayDeque;
+027import java.util.ArrayList;
+028import java.util.Arrays;
+029import java.util.List;
+030import java.util.Queue;
+031import java.util.concurrent.TimeUnit;
+032
+033import 
org.apache.commons.lang3.NotImplementedException;
+034import 
org.apache.hadoop.conf.Configuration;
+035import 
org.apache.hadoop.hbase.CompareOperator;
+036import 
org.apache.hadoop.hbase.HConstants;
+037import 
org.apache.hadoop.hbase.TableName;
+038import 
org.apache.hadoop.hbase.client.Append;
+039import 
org.apache.hadoop.hbase.client.Delete;
+040import 
org.apache.hadoop.hbase.client.Get;
+041import 
org.apache.hadoop.hbase.client.Increment;
+042import 
org.apache.hadoop.hbase.client.Put;
+043import 
org.apache.hadoop.hbase.client.Result;
+044import 
org.apache.hadoop.hbase.client.ResultScanner;
+045import 
org.apache.hadoop.hbase.client.Row;
+046import 
org.apache.hadoop.hbase.client.RowMutations;
+047import 
org.apache.hadoop.hbase.client.Scan;
+048import 
org.apache.hadoop.hbase.client.Table;
+049import 
org.apache.hadoop.hbase.client.TableDescriptor;
+050import 
org.apache.hadoop.hbase.client.coprocessor.Batch;
+051import 
org.apache.hadoop.hbase.client.metrics.ScanMetrics;
+052import 
org.apache.hadoop.hbase.io.TimeRange;
+053import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
+054import 
org.apache.hadoop.hbase.thrift2.ThriftUtilities;
+055import 
org.apache.hadoop.hbase.thrift2.generated.TAppend;
+056import 
org.apache.hadoop.hbase.thrift2.generated.TDelete;
+057import 
org.apache.hadoop.hbase.thrift2.generated.TGet;
+058import 
org.apache.hadoop.hbase.thrift2.generated.THBaseService;
+059import 
org.apache.hadoop.hbase.thrift2.generated.TIncrement;
+060import 
org.apache.hadoop.hbase.thrift2.generated.TPut;
+061import 
org.apache.hadoop.hbase.thrift2.generated.TResult;
+062import 
org.apache.hadoop.hbase.thrift2.generated.TRowMutations;
+063import 
org.apache.hadoop.hbase.thrift2.generated.TScan;
+064import 
org.apache.hadoop.hbase.thrift2.generated.TTableDescriptor;
+065import 
org.apache.hadoop.hbase.util.Bytes;
+066import org.apache.thrift.TException;
+067import 
org.apache.thrift.transport.TTransport;
+068import 
org.apache.yetus.audience.InterfaceAudience;
+069
+070import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
+071import 
org.apache.hbase.thirdparty.com.google.common.primitives.Booleans;
+072
+073@InterfaceAudience.Private
+074public class ThriftTable implements Table 
{
+075
+076  private TableName tableName;
+077  private Configuration conf;
+078  private TTransport tTransport;
+079  private THBaseService.Client client;
+080  private ByteBuffer tableNameInBytes;
+081  private int operationTimeout;
+082
+083  private final int scannerCaching;
+084
+085  public ThriftTable(TableName tableName, 
THBaseService.Client client, TTransport tTransport,
+086  Configuration conf) {
+087this.tableName = tableName;
+088this.tableNameInBytes = 
ByteBuffer.wrap(tableName.toBytes());
+089this.conf = conf;
+090this.tTransport = tTransport;
+091this.client = client;
+092this.scannerCaching = 

[05/51] [partial] hbase-site git commit: Published site at 281d6429e55149cc4c05430dcc1d1dc136d8b245.

2019-01-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/901d593a/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
index 05e88b5..81f8710 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
@@ -480,6 +480,7 @@
 org.apache.hadoop.hbase.regionserver.SimpleRpcSchedulerFactory (implements 
org.apache.hadoop.hbase.regionserver.RpcSchedulerFactory)
 org.apache.hadoop.hbase.regionserver.SplitLogWorker (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.html?is-external=true;
 title="class or interface in java.lang">Runnable)
 org.apache.hadoop.hbase.regionserver.SplitRequest (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.html?is-external=true;
 title="class or interface in java.lang">Runnable)
+org.apache.hadoop.hbase.regionserver.SplitWALCallable (implements 
org.apache.hadoop.hbase.procedure2.RSProcedureCallable)
 org.apache.hadoop.hbase.regionserver.StoreEngineSF,CP,C,SFM
 
 org.apache.hadoop.hbase.regionserver.DateTieredStoreEngine
@@ -716,20 +717,20 @@
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.regionserver.CompactingMemStore.IndexType
-org.apache.hadoop.hbase.regionserver.HRegion.FlushResult.Result
 org.apache.hadoop.hbase.regionserver.ScannerContext.NextState
-org.apache.hadoop.hbase.regionserver.BloomType
-org.apache.hadoop.hbase.regionserver.TimeRangeTracker.Type
 org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope
-org.apache.hadoop.hbase.regionserver.FlushType
+org.apache.hadoop.hbase.regionserver.TimeRangeTracker.Type
+org.apache.hadoop.hbase.regionserver.ChunkCreator.ChunkType
 org.apache.hadoop.hbase.regionserver.Region.Operation
+org.apache.hadoop.hbase.regionserver.BloomType
+org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactoryImpl.FactoryStorage
 org.apache.hadoop.hbase.regionserver.MemStoreCompactionStrategy.Action
-org.apache.hadoop.hbase.regionserver.ScanType
+org.apache.hadoop.hbase.regionserver.HRegion.FlushResult.Result
 org.apache.hadoop.hbase.regionserver.DefaultHeapMemoryTuner.StepDirection
 org.apache.hadoop.hbase.regionserver.SplitLogWorker.TaskExecutor.Status
-org.apache.hadoop.hbase.regionserver.ChunkCreator.ChunkType
-org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactoryImpl.FactoryStorage
+org.apache.hadoop.hbase.regionserver.ScanType
+org.apache.hadoop.hbase.regionserver.CompactingMemStore.IndexType
+org.apache.hadoop.hbase.regionserver.FlushType
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/901d593a/devapidocs/org/apache/hadoop/hbase/regionserver/wal/package-tree.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/wal/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/wal/package-tree.html
index 0a43c84..a2d3d45 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/wal/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/wal/package-tree.html
@@ -247,9 +247,9 @@
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.regionserver.wal.RingBufferTruck.Type
 org.apache.hadoop.hbase.regionserver.wal.ProtobufLogReader.WALHdrResult
 org.apache.hadoop.hbase.regionserver.wal.CompressionContext.DictionaryIndex
+org.apache.hadoop.hbase.regionserver.wal.RingBufferTruck.Type
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/901d593a/devapidocs/org/apache/hadoop/hbase/replication/regionserver/SourceFSConfigurationProvider.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/replication/regionserver/SourceFSConfigurationProvider.html
 
b/devapidocs/org/apache/hadoop/hbase/replication/regionserver/SourceFSConfigurationProvider.html
index effa3f9..79c73d2 100644
--- 

[05/51] [partial] hbase-site git commit: Published site at 466fa920fee572fe20db3b77ebf539dc304d5f31.

2019-01-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2bf59208/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteTableProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteTableProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteTableProcedureBiConsumer.html
index 736388b..197b99d 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteTableProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteTableProcedureBiConsumer.html
@@ -26,3624 +26,3599 @@
 018package org.apache.hadoop.hbase.client;
 019
 020import static 
org.apache.hadoop.hbase.TableName.META_TABLE_NAME;
-021
-022import com.google.protobuf.Message;
-023import com.google.protobuf.RpcChannel;
-024import java.io.IOException;
-025import java.util.ArrayList;
-026import java.util.Arrays;
-027import java.util.Collections;
-028import java.util.EnumSet;
-029import java.util.HashMap;
-030import java.util.List;
-031import java.util.Map;
-032import java.util.Optional;
-033import java.util.Set;
-034import 
java.util.concurrent.CompletableFuture;
-035import 
java.util.concurrent.ConcurrentHashMap;
-036import java.util.concurrent.TimeUnit;
-037import 
java.util.concurrent.atomic.AtomicReference;
-038import java.util.function.BiConsumer;
-039import java.util.function.Function;
-040import java.util.function.Supplier;
-041import java.util.regex.Pattern;
-042import java.util.stream.Collectors;
-043import java.util.stream.Stream;
-044import org.apache.commons.io.IOUtils;
-045import 
org.apache.hadoop.conf.Configuration;
-046import 
org.apache.hadoop.hbase.AsyncMetaTableAccessor;
-047import 
org.apache.hadoop.hbase.CacheEvictionStats;
-048import 
org.apache.hadoop.hbase.CacheEvictionStatsAggregator;
-049import 
org.apache.hadoop.hbase.ClusterMetrics;
-050import 
org.apache.hadoop.hbase.ClusterMetrics.Option;
-051import 
org.apache.hadoop.hbase.ClusterMetricsBuilder;
-052import 
org.apache.hadoop.hbase.HConstants;
-053import 
org.apache.hadoop.hbase.HRegionLocation;
-054import 
org.apache.hadoop.hbase.MetaTableAccessor;
-055import 
org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
-056import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-057import 
org.apache.hadoop.hbase.RegionLocations;
-058import 
org.apache.hadoop.hbase.RegionMetrics;
-059import 
org.apache.hadoop.hbase.RegionMetricsBuilder;
-060import 
org.apache.hadoop.hbase.ServerName;
-061import 
org.apache.hadoop.hbase.TableExistsException;
-062import 
org.apache.hadoop.hbase.TableName;
-063import 
org.apache.hadoop.hbase.TableNotDisabledException;
-064import 
org.apache.hadoop.hbase.TableNotEnabledException;
-065import 
org.apache.hadoop.hbase.TableNotFoundException;
-066import 
org.apache.hadoop.hbase.UnknownRegionException;
-067import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
-068import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
-069import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.ServerRequestCallerBuilder;
-070import 
org.apache.hadoop.hbase.client.Scan.ReadType;
-071import 
org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil;
-072import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-073import 
org.apache.hadoop.hbase.client.security.SecurityCapability;
-074import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-075import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-076import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-077import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-078import 
org.apache.hadoop.hbase.quotas.QuotaTableUtil;
-079import 
org.apache.hadoop.hbase.replication.ReplicationException;
-080import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-081import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-082import 
org.apache.hadoop.hbase.replication.SyncReplicationState;
-083import 
org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
-084import 
org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
-085import 
org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
-086import 
org.apache.hadoop.hbase.util.Bytes;
-087import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-088import 
org.apache.hadoop.hbase.util.ForeignExceptionUtil;
-089import 
org.apache.yetus.audience.InterfaceAudience;
-090import org.slf4j.Logger;
-091import org.slf4j.LoggerFactory;
-092
-093import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-094import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-095import 
org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback;
-096import 
org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer;
-097import 

[05/51] [partial] hbase-site git commit: Published site at e4b6b4afb933a961f543537875f87a2dc62d3757.

2019-01-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/849d84a8/testdevapidocs/org/apache/hadoop/hbase/thrift/TestThriftServerCmdLine.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/thrift/TestThriftServerCmdLine.html 
b/testdevapidocs/org/apache/hadoop/hbase/thrift/TestThriftServerCmdLine.html
index 895ce40..49fd434 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/thrift/TestThriftServerCmdLine.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/thrift/TestThriftServerCmdLine.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = {"i0":9,"i1":10,"i2":9,"i3":10,"i4":10,"i5":10,"i6":9,"i7":10};
+var methods = 
{"i0":10,"i1":9,"i2":10,"i3":9,"i4":10,"i5":10,"i6":10,"i7":9,"i8":10};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -107,9 +107,13 @@ var activeTableTab = "activeTableTab";
 
 
 
+
+Direct Known Subclasses:
+TestThrift2ServerCmdLine
+
 
 
-public class TestThriftServerCmdLine
+public class TestThriftServerCmdLine
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 Start the HBase Thrift server on a random port through the 
command-line
  interface and talk to it from client side.
@@ -148,7 +152,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 cmdLineThread
 
 
-private 
org.apache.hadoop.hbase.thrift.ThriftServerRunner.ImplType
+protected 
org.apache.hadoop.hbase.thrift.ImplType
 implType
 
 
@@ -156,27 +160,27 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 LOG
 
 
-private int
+protected int
 port
 
 
-private boolean
+protected boolean
 specifyBindIP
 
 
-private boolean
+protected boolean
 specifyCompact
 
 
-private boolean
+protected boolean
 specifyFramed
 
 
-private static boolean
+protected static boolean
 tableCreated
 
 
-private static HBaseTestingUtility
+protected static HBaseTestingUtility
 TEST_UTIL
 
 
@@ -198,7 +202,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 Constructor and Description
 
 
-TestThriftServerCmdLine(org.apache.hadoop.hbase.thrift.ThriftServerRunner.ImplTypeimplType,
+TestThriftServerCmdLine(org.apache.hadoop.hbase.thrift.ImplTypeimplType,
booleanspecifyFramed,
booleanspecifyBindIP,
booleanspecifyCompact)
@@ -219,34 +223,38 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 Method and Description
 
 
+protected 
org.apache.hadoop.hbase.thrift.ThriftServer
+createThriftServer()
+
+
 static https://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">Collectionhttps://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object[]
 getParameters()
 
-
+
 private https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 getParametersString()
 
-
+
 static void
 setUpBeforeClass()
 
-
+
 private void
 startCmdLineThread(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">String[]args)
 
-
+
 private void
 stopCmdLineThread()
 
-
-private void
+
+protected void
 talkToThriftServer()
 
-
+
 static void
 tearDownAfterClass()
 
-
+
 void
 testRunThriftServer()
 
@@ -278,7 +286,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 CLASS_RULE
-public static finalHBaseClassTestRule CLASS_RULE
+public static finalHBaseClassTestRule CLASS_RULE
 
 
 
@@ -287,7 +295,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 LOG
-private static finalorg.slf4j.Logger LOG
+private static finalorg.slf4j.Logger LOG
 
 
 
@@ -296,7 +304,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 implType
-private 
finalorg.apache.hadoop.hbase.thrift.ThriftServerRunner.ImplType implType
+protected finalorg.apache.hadoop.hbase.thrift.ImplType implType
 
 
 
@@ -305,7 +313,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 specifyFramed
-privateboolean specifyFramed
+protectedboolean specifyFramed
 
 
 
@@ -314,7 +322,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 specifyBindIP
-privateboolean specifyBindIP
+protectedboolean specifyBindIP
 
 
 
@@ -323,7 +331,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 specifyCompact
-privateboolean specifyCompact
+protectedboolean specifyCompact
 
 
 
@@ -332,7 +340,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 TEST_UTIL
-private static finalHBaseTestingUtility TEST_UTIL

[05/51] [partial] hbase-site git commit: Published site at 3ab895979b643a2980bcdb7fee2078f14b614210.

2019-01-01 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/master/MasterAnnotationReadingPriorityFunction.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/MasterAnnotationReadingPriorityFunction.html
 
b/devapidocs/org/apache/hadoop/hbase/master/MasterAnnotationReadingPriorityFunction.html
index bfa0e39..074c3a9 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/MasterAnnotationReadingPriorityFunction.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/MasterAnnotationReadingPriorityFunction.html
@@ -345,6 +345,6 @@ extends Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterEnvironment.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterEnvironment.html
 
b/devapidocs/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterEnvironment.html
index d29140e..9500016 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterEnvironment.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterEnvironment.html
@@ -505,6 +505,6 @@ implements Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterEnvironmentForCoreCoprocessors.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterEnvironmentForCoreCoprocessors.html
 
b/devapidocs/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterEnvironmentForCoreCoprocessors.html
index 1514ad9..0d62143 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterEnvironmentForCoreCoprocessors.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterEnvironmentForCoreCoprocessors.html
@@ -367,6 +367,6 @@ implements Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterObserverOperation.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterObserverOperation.html
 
b/devapidocs/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterObserverOperation.html
index ec9fddc..2021eb9 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterObserverOperation.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterObserverOperation.html
@@ -315,6 +315,6 @@ extends Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/master/MasterCoprocessorHost.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/MasterCoprocessorHost.html 
b/devapidocs/org/apache/hadoop/hbase/master/MasterCoprocessorHost.html
index 821a13f..b5bbaac 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/MasterCoprocessorHost.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/MasterCoprocessorHost.html
@@ -3424,6 +3424,6 @@ extends Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/master/MasterDumpServlet.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/master/MasterDumpServlet.html 
b/devapidocs/org/apache/hadoop/hbase/master/MasterDumpServlet.html
index 63973fc..6957345 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/MasterDumpServlet.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/MasterDumpServlet.html
@@ -408,6 +408,6 @@ extends Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 

[05/51] [partial] hbase-site git commit: Published site at 7820ba1dbdba58b1002cdfde08eb21aa7a0bb6da.

2018-12-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/09ea0d5f/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ReplayBatchOperation.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ReplayBatchOperation.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ReplayBatchOperation.html
index 0f5a095..50bf692 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ReplayBatchOperation.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ReplayBatchOperation.html
@@ -78,8712 +78,8714 @@
 070import 
java.util.concurrent.locks.ReadWriteLock;
 071import 
java.util.concurrent.locks.ReentrantReadWriteLock;
 072import java.util.function.Function;
-073import 
org.apache.hadoop.conf.Configuration;
-074import org.apache.hadoop.fs.FileStatus;
-075import org.apache.hadoop.fs.FileSystem;
-076import 
org.apache.hadoop.fs.LocatedFileStatus;
-077import org.apache.hadoop.fs.Path;
-078import org.apache.hadoop.hbase.Cell;
-079import 
org.apache.hadoop.hbase.CellBuilderType;
-080import 
org.apache.hadoop.hbase.CellComparator;
-081import 
org.apache.hadoop.hbase.CellComparatorImpl;
-082import 
org.apache.hadoop.hbase.CellScanner;
-083import 
org.apache.hadoop.hbase.CellUtil;
-084import 
org.apache.hadoop.hbase.CompareOperator;
-085import 
org.apache.hadoop.hbase.CompoundConfiguration;
-086import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-087import 
org.apache.hadoop.hbase.DroppedSnapshotException;
-088import 
org.apache.hadoop.hbase.ExtendedCellBuilderFactory;
-089import 
org.apache.hadoop.hbase.HConstants;
-090import 
org.apache.hadoop.hbase.HConstants.OperationStatusCode;
-091import 
org.apache.hadoop.hbase.HDFSBlocksDistribution;
-092import 
org.apache.hadoop.hbase.KeyValue;
-093import 
org.apache.hadoop.hbase.KeyValueUtil;
-094import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-095import 
org.apache.hadoop.hbase.NotServingRegionException;
-096import 
org.apache.hadoop.hbase.PrivateCellUtil;
-097import 
org.apache.hadoop.hbase.RegionTooBusyException;
-098import org.apache.hadoop.hbase.Tag;
-099import org.apache.hadoop.hbase.TagUtil;
-100import 
org.apache.hadoop.hbase.UnknownScannerException;
-101import 
org.apache.hadoop.hbase.client.Append;
-102import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-103import 
org.apache.hadoop.hbase.client.CompactionState;
-104import 
org.apache.hadoop.hbase.client.Delete;
-105import 
org.apache.hadoop.hbase.client.Durability;
-106import 
org.apache.hadoop.hbase.client.Get;
-107import 
org.apache.hadoop.hbase.client.Increment;
-108import 
org.apache.hadoop.hbase.client.IsolationLevel;
-109import 
org.apache.hadoop.hbase.client.Mutation;
-110import 
org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor;
-111import 
org.apache.hadoop.hbase.client.Put;
-112import 
org.apache.hadoop.hbase.client.RegionInfo;
-113import 
org.apache.hadoop.hbase.client.RegionInfoBuilder;
-114import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
-115import 
org.apache.hadoop.hbase.client.Result;
-116import 
org.apache.hadoop.hbase.client.RowMutations;
-117import 
org.apache.hadoop.hbase.client.Scan;
-118import 
org.apache.hadoop.hbase.client.TableDescriptor;
-119import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-120import 
org.apache.hadoop.hbase.conf.ConfigurationManager;
-121import 
org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver;
-122import 
org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
-123import 
org.apache.hadoop.hbase.coprocessor.RegionObserver.MutationType;
-124import 
org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
-125import 
org.apache.hadoop.hbase.exceptions.FailedSanityCheckException;
-126import 
org.apache.hadoop.hbase.exceptions.TimeoutIOException;
-127import 
org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
-128import 
org.apache.hadoop.hbase.filter.ByteArrayComparable;
-129import 
org.apache.hadoop.hbase.filter.FilterWrapper;
-130import 
org.apache.hadoop.hbase.filter.IncompatibleFilterException;
-131import 
org.apache.hadoop.hbase.io.HFileLink;
-132import 
org.apache.hadoop.hbase.io.HeapSize;
-133import 
org.apache.hadoop.hbase.io.TimeRange;
-134import 
org.apache.hadoop.hbase.io.hfile.BlockCache;
-135import 
org.apache.hadoop.hbase.io.hfile.HFile;
-136import 
org.apache.hadoop.hbase.ipc.CallerDisconnectedException;
-137import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
-138import 
org.apache.hadoop.hbase.ipc.RpcCall;
-139import 
org.apache.hadoop.hbase.ipc.RpcServer;
-140import 
org.apache.hadoop.hbase.mob.MobFileCache;
-141import 
org.apache.hadoop.hbase.monitoring.MonitoredTask;
-142import 
org.apache.hadoop.hbase.monitoring.TaskMonitor;
-143import 
org.apache.hadoop.hbase.quotas.RegionServerSpaceQuotaManager;
-144import 
org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl.WriteEntry;
-145import 

[05/51] [partial] hbase-site git commit: Published site at c448604ceb987d113913f0583452b2abce04db0d.

2018-12-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4f8b8424/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.NotSeekedException.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.NotSeekedException.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.NotSeekedException.html
index 79cb21b..d8d391b 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.NotSeekedException.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.NotSeekedException.html
@@ -378,1508 +378,1510 @@
 370
 371  @Override
 372  public void returnBlock(HFileBlock 
block) {
-373BlockCache blockCache = 
this.cacheConf.getBlockCache();
-374if (blockCache != null  
block != null) {
-375  BlockCacheKey cacheKey = new 
BlockCacheKey(this.getFileContext().getHFileName(),
-376  block.getOffset(), 
this.isPrimaryReplicaReader(), block.getBlockType());
-377  blockCache.returnBlock(cacheKey, 
block);
-378}
-379  }
-380  /**
-381   * @return the first key in the file. 
May be null if file has no entries. Note
-382   * that this is not the first 
row key, but rather the byte form of the
-383   * first KeyValue.
-384   */
-385  @Override
-386  public OptionalCell 
getFirstKey() {
-387if (dataBlockIndexReader == null) {
-388  throw new 
BlockIndexNotLoadedException();
-389}
-390return dataBlockIndexReader.isEmpty() 
? Optional.empty()
-391: 
Optional.of(dataBlockIndexReader.getRootBlockKey(0));
-392  }
-393
-394  /**
-395   * TODO left from {@link HFile} version 
1: move this to StoreFile after Ryan's
-396   * patch goes in to eliminate {@link 
KeyValue} here.
-397   *
-398   * @return the first row key, or null 
if the file is empty.
-399   */
-400  @Override
-401  public Optionalbyte[] 
getFirstRowKey() {
-402// We have to copy the row part to 
form the row key alone
-403return 
getFirstKey().map(CellUtil::cloneRow);
-404  }
-405
-406  /**
-407   * TODO left from {@link HFile} version 
1: move this to StoreFile after
-408   * Ryan's patch goes in to eliminate 
{@link KeyValue} here.
-409   *
-410   * @return the last row key, or null if 
the file is empty.
-411   */
-412  @Override
-413  public Optionalbyte[] 
getLastRowKey() {
-414// We have to copy the row part to 
form the row key alone
-415return 
getLastKey().map(CellUtil::cloneRow);
-416  }
-417
-418  /** @return number of KV entries in 
this HFile */
-419  @Override
-420  public long getEntries() {
-421return trailer.getEntryCount();
-422  }
-423
-424  /** @return comparator */
-425  @Override
-426  public CellComparator getComparator() 
{
-427return comparator;
-428  }
-429
-430  /** @return compression algorithm */
-431  @Override
-432  public Compression.Algorithm 
getCompressionAlgorithm() {
-433return compressAlgo;
-434  }
-435
-436  /**
-437   * @return the total heap size of data 
and meta block indexes in bytes. Does
-438   * not take into account 
non-root blocks of a multilevel data index.
-439   */
-440  @Override
-441  public long indexSize() {
-442return (dataBlockIndexReader != null 
? dataBlockIndexReader.heapSize() : 0)
-443+ ((metaBlockIndexReader != null) 
? metaBlockIndexReader.heapSize()
-444: 0);
-445  }
-446
-447  @Override
-448  public String getName() {
-449return name;
-450  }
-451
-452  @Override
-453  public HFileBlockIndex.BlockIndexReader 
getDataBlockIndexReader() {
-454return dataBlockIndexReader;
-455  }
-456
-457  @Override
-458  public FixedFileTrailer getTrailer() 
{
-459return trailer;
-460  }
-461
-462  @Override
-463  public boolean isPrimaryReplicaReader() 
{
-464return primaryReplicaReader;
-465  }
-466
-467  @Override
-468  public FileInfo loadFileInfo() throws 
IOException {
-469return fileInfo;
-470  }
-471
-472  /**
-473   * An exception thrown when an 
operation requiring a scanner to be seeked
-474   * is invoked on a scanner that is not 
seeked.
-475   */
-476  @SuppressWarnings("serial")
-477  public static class NotSeekedException 
extends IllegalStateException {
-478public NotSeekedException() {
-479  super("Not seeked to a 
key/value");
-480}
-481  }
-482
-483  protected static class HFileScannerImpl 
implements HFileScanner {
-484private ByteBuff blockBuffer;
-485protected final boolean 
cacheBlocks;
-486protected final boolean pread;
-487protected final boolean 
isCompaction;
-488private int currKeyLen;
-489private int currValueLen;
-490private int currMemstoreTSLen;
-491private long currMemstoreTS;
-492// Updated but never read?
-493protected AtomicInteger blockFetches 
= new AtomicInteger(0);
-494protected final HFile.Reader 
reader;
-495private int currTagsLen;
-496// buffer backed keyonlyKV
-497private 

[05/51] [partial] hbase-site git commit: Published site at 8bf966c8e936dec4d83bcbe85c5aab543f14a0df.

2018-12-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html
index a957d31..62f81b6 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html
@@ -142,5192 +142,5186 @@
 134import org.apache.hadoop.hbase.wal.WAL;
 135import 
org.apache.hadoop.hbase.wal.WALFactory;
 136import 
org.apache.hadoop.hbase.wal.WALSplitter;
-137import 
org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-138import 
org.apache.hadoop.hbase.zookeeper.ZKUtil;
-139import 
org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-140import 
org.apache.hadoop.hbase.zookeeper.ZNodePaths;
-141import 
org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
-142import 
org.apache.hadoop.ipc.RemoteException;
-143import 
org.apache.hadoop.security.UserGroupInformation;
-144import 
org.apache.hadoop.util.ReflectionUtils;
-145import org.apache.hadoop.util.Tool;
-146import 
org.apache.hadoop.util.ToolRunner;
-147import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-148import 
org.apache.hbase.thirdparty.com.google.common.collect.Sets;
-149import 
org.apache.yetus.audience.InterfaceAudience;
-150import 
org.apache.yetus.audience.InterfaceStability;
-151import 
org.apache.zookeeper.KeeperException;
-152import org.slf4j.Logger;
-153import org.slf4j.LoggerFactory;
-154
-155import 
org.apache.hbase.thirdparty.com.google.common.base.Joiner;
-156import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-157import 
org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;
-158import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
-159import 
org.apache.hbase.thirdparty.com.google.common.collect.Multimap;
-160import 
org.apache.hbase.thirdparty.com.google.common.collect.Ordering;
-161import 
org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;
-162
-163import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-164import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;
-165
-166/**
-167 * HBaseFsck (hbck) is a tool for 
checking and repairing region consistency and
-168 * table integrity problems in a 
corrupted HBase. This tool was written for hbase-1.x. It does not
-169 * work with hbase-2.x; it can read state 
but is not allowed to change state; i.e. effect 'repair'.
-170 * See hbck2 (HBASE-19121) for a hbck 
tool for hbase2.
-171 *
-172 * p
-173 * Region consistency checks verify that 
hbase:meta, region deployment on region
-174 * servers and the state of data in HDFS 
(.regioninfo files) all are in
-175 * accordance.
-176 * p
-177 * Table integrity checks verify that all 
possible row keys resolve to exactly
-178 * one region of a table.  This means 
there are no individual degenerate
-179 * or backwards regions; no holes between 
regions; and that there are no
-180 * overlapping regions.
-181 * p
-182 * The general repair strategy works in 
two phases:
-183 * ol
-184 * li Repair Table Integrity on 
HDFS. (merge or fabricate regions)
-185 * li Repair Region Consistency 
with hbase:meta and assignments
-186 * /ol
-187 * p
-188 * For table integrity repairs, the 
tables' region directories are scanned
-189 * for .regioninfo files.  Each table's 
integrity is then verified.  If there
-190 * are any orphan regions (regions with 
no .regioninfo files) or holes, new
-191 * regions are fabricated.  Backwards 
regions are sidelined as well as empty
-192 * degenerate (endkey==startkey) regions. 
 If there are any overlapping regions,
-193 * a new region is created and all data 
is merged into the new region.
-194 * p
-195 * Table integrity repairs deal solely 
with HDFS and could potentially be done
-196 * offline -- the hbase region servers or 
master do not need to be running.
-197 * This phase can eventually be used to 
completely reconstruct the hbase:meta table in
-198 * an offline fashion.
-199 * p
-200 * Region consistency requires three 
conditions -- 1) valid .regioninfo file
-201 * present in an HDFS region dir,  2) 
valid row with .regioninfo data in META,
-202 * and 3) a region is deployed only at 
the regionserver that was assigned to
-203 * with proper state in the master.
-204 * p
-205 * Region consistency repairs require 
hbase to be online so that hbck can
-206 * contact the HBase master and region 
servers.  The hbck#connect() method must
-207 * first be called successfully.  Much of 
the region consistency information
-208 * is transient and less risky to 
repair.
-209 * p
-210 * If hbck is run from the command line, 
there are a handful of arguments that
-211 * can be used to limit the kinds of 

[05/51] [partial] hbase-site git commit: Published site at 1acbd36c903b048141866b143507bfce124a5c5f.

2018-11-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5299e667/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/Version.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
index c7cfd62..da83514 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
@@ -18,9 +18,9 @@
 010  public static final String version = 
"3.0.0-SNAPSHOT";
 011  public static final String revision = 
"";
 012  public static final String user = 
"jenkins";
-013  public static final String date = "Sat 
Nov 24 14:43:25 UTC 2018";
+013  public static final String date = "Mon 
Nov 26 14:44:10 UTC 2018";
 014  public static final String url = 
"git://jenkins-websites1.apache.org/home/jenkins/jenkins-slave/workspace/hbase_generate_website/hbase";
-015  public static final String srcChecksum 
= "2ddf86e8061a880c4fd8d153857342a6";
+015  public static final String srcChecksum 
= "4d3ee036754273b7c20a302784193204";
 016}
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5299e667/devapidocs/src-html/org/apache/hadoop/hbase/ZKNamespaceManager.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/ZKNamespaceManager.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/ZKNamespaceManager.html
deleted file mode 100644
index 1d548a3..000
--- a/devapidocs/src-html/org/apache/hadoop/hbase/ZKNamespaceManager.html
+++ /dev/null
@@ -1,287 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd;>
-
-
-Source code
-
-
-
-
-001/**
-002 * Licensed to the Apache Software 
Foundation (ASF) under one
-003 * or more contributor license 
agreements.  See the NOTICE file
-004 * distributed with this work for 
additional information
-005 * regarding copyright ownership.  The 
ASF licenses this file
-006 * to you under the Apache License, 
Version 2.0 (the
-007 * "License"); you may not use this file 
except in compliance
-008 * with the License.  You may obtain a 
copy of the License at
-009 *
-010 * 
http://www.apache.org/licenses/LICENSE-2.0
-011 *
-012 * Unless required by applicable law or 
agreed to in writing, software
-013 * distributed under the License is 
distributed on an "AS IS" BASIS,
-014 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
-015 * See the License for the specific 
language governing permissions and
-016 * limitations under the License.
-017 */
-018
-019package org.apache.hadoop.hbase;
-020
-021import java.io.IOException;
-022import java.util.List;
-023import java.util.NavigableMap;
-024import java.util.NavigableSet;
-025import 
java.util.concurrent.ConcurrentSkipListMap;
-026
-027import 
org.apache.hadoop.hbase.util.Bytes;
-028import 
org.apache.hadoop.hbase.zookeeper.ZKListener;
-029import 
org.apache.hadoop.hbase.zookeeper.ZKUtil;
-030import 
org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-031import 
org.apache.hadoop.hbase.zookeeper.ZNodePaths;
-032import 
org.apache.yetus.audience.InterfaceAudience;
-033import 
org.apache.zookeeper.KeeperException;
-034import org.slf4j.Logger;
-035import org.slf4j.LoggerFactory;
-036
-037import 
org.apache.hbase.thirdparty.com.google.common.collect.Sets;
-038import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-039import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
-040
-041/**
-042 * Class servers two purposes:
-043 *
-044 * 1. Broadcast NamespaceDescriptor 
information via ZK
-045 * (Done by the Master)
-046 * 2. Consume broadcasted 
NamespaceDescriptor changes
-047 * (Done by the RegionServers)
-048 *
-049 */
-050@InterfaceAudience.Private
-051public class ZKNamespaceManager extends 
ZKListener {
-052  private static final Logger LOG = 
LoggerFactory.getLogger(ZKNamespaceManager.class);
-053  private final String nsZNode;
-054  private final 
NavigableMapString,NamespaceDescriptor cache;
-055
-056  public ZKNamespaceManager(ZKWatcher 
zkw) throws IOException {
-057super(zkw);
-058nsZNode = 
zkw.getZNodePaths().namespaceZNode;
-059cache = new 
ConcurrentSkipListMap();
-060  }
-061
-062  public void start() throws IOException 
{
-063watcher.registerListener(this);
-064try {
-065  if 
(ZKUtil.watchAndCheckExists(watcher, nsZNode)) {
-066ListZKUtil.NodeAndData 
existing =
-067
ZKUtil.getChildDataAndWatchForNewChildren(watcher, nsZNode);
-068if (existing != null) {
-069  refreshNodes(existing);
-070}
-071  } else {
-072ZKUtil.createWithParents(watcher, 
nsZNode);
-073  }
-074} catch (KeeperException e) {
-075  throw new IOException("Failed to 
initialize ZKNamespaceManager", e);
-076}
-077  }
-078
-079  public void stop() throws IOException 
{
-080
this.watcher.unregisterListener(this);
-081  }
-082
-083  public NamespaceDescriptor get(String 

[05/51] [partial] hbase-site git commit: Published site at 130057f13774f6b213cdb06952c805a29d59396e.

2018-11-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/68eae623/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestRegionAssignedToMultipleRegionServers.AssignmentManagerForTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestRegionAssignedToMultipleRegionServers.AssignmentManagerForTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestRegionAssignedToMultipleRegionServers.AssignmentManagerForTest.html
new file mode 100644
index 000..8ba5775
--- /dev/null
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestRegionAssignedToMultipleRegionServers.AssignmentManagerForTest.html
@@ -0,0 +1,252 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+Source code
+
+
+
+
+001/**
+002 * Licensed to the Apache Software 
Foundation (ASF) under one
+003 * or more contributor license 
agreements.  See the NOTICE file
+004 * distributed with this work for 
additional information
+005 * regarding copyright ownership.  The 
ASF licenses this file
+006 * to you under the Apache License, 
Version 2.0 (the
+007 * "License"); you may not use this file 
except in compliance
+008 * with the License.  You may obtain a 
copy of the License at
+009 *
+010 * 
http://www.apache.org/licenses/LICENSE-2.0
+011 *
+012 * Unless required by applicable law or 
agreed to in writing, software
+013 * distributed under the License is 
distributed on an "AS IS" BASIS,
+014 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
+015 * See the License for the specific 
language governing permissions and
+016 * limitations under the License.
+017 */
+018package 
org.apache.hadoop.hbase.master.assignment;
+019
+020import static 
org.junit.Assert.assertNotNull;
+021import static 
org.junit.Assert.assertNull;
+022
+023import java.io.IOException;
+024import java.util.ArrayList;
+025import java.util.List;
+026import 
java.util.concurrent.CountDownLatch;
+027import 
org.apache.hadoop.conf.Configuration;
+028import 
org.apache.hadoop.hbase.HBaseClassTestRule;
+029import 
org.apache.hadoop.hbase.HBaseTestingUtility;
+030import 
org.apache.hadoop.hbase.HConstants;
+031import 
org.apache.hadoop.hbase.PleaseHoldException;
+032import 
org.apache.hadoop.hbase.ServerName;
+033import 
org.apache.hadoop.hbase.StartMiniClusterOption;
+034import 
org.apache.hadoop.hbase.TableName;
+035import 
org.apache.hadoop.hbase.client.RegionInfo;
+036import 
org.apache.hadoop.hbase.master.HMaster;
+037import 
org.apache.hadoop.hbase.master.MasterServices;
+038import 
org.apache.hadoop.hbase.master.RegionPlan;
+039import 
org.apache.hadoop.hbase.master.ServerManager;
+040import 
org.apache.hadoop.hbase.regionserver.HRegionServer;
+041import 
org.apache.hadoop.hbase.testclassification.MasterTests;
+042import 
org.apache.hadoop.hbase.testclassification.MediumTests;
+043import 
org.apache.hadoop.hbase.util.Bytes;
+044import 
org.apache.zookeeper.KeeperException;
+045import org.junit.AfterClass;
+046import org.junit.BeforeClass;
+047import org.junit.ClassRule;
+048import org.junit.Test;
+049import 
org.junit.experimental.categories.Category;
+050
+051import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
+052import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest;
+053import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse;
+054
+055@Category({ MasterTests.class, 
MediumTests.class })
+056public class 
TestRegionAssignedToMultipleRegionServers {
+057
+058  @ClassRule
+059  public static final HBaseClassTestRule 
CLASS_RULE =
+060
HBaseClassTestRule.forClass(TestRegionAssignedToMultipleRegionServers.class);
+061
+062  private static final 
ListServerName EXCLUDE_SERVERS = new ArrayList();
+063
+064  private static boolean HALT = false;
+065
+066  private static boolean KILL = false;
+067
+068  private static CountDownLatch ARRIVE;
+069
+070  private static final class 
ServerManagerForTest extends ServerManager {
+071
+072public 
ServerManagerForTest(MasterServices master) {
+073  super(master);
+074}
+075
+076@Override
+077public ListServerName 
createDestinationServersList() {
+078  return 
super.createDestinationServersList(EXCLUDE_SERVERS);
+079}
+080  }
+081
+082  private static final class 
AssignmentManagerForTest extends AssignmentManager {
+083
+084public 
AssignmentManagerForTest(MasterServices master) {
+085  super(master);
+086}
+087
+088@Override
+089public 
ReportRegionStateTransitionResponse reportRegionStateTransition(
+090
ReportRegionStateTransitionRequest req) throws PleaseHoldException {
+091  if 
(req.getTransition(0).getTransitionCode() == TransitionCode.OPENED) {
+092if (ARRIVE != null) {
+093   

[05/51] [partial] hbase-site git commit: Published site at 3fe8649b2c9ba1271c25e8f476548907e4c7a90d.

2018-10-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8f09a71d/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.SyncMetrics.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.SyncMetrics.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.SyncMetrics.html
index 9b964f6..98ef11a 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.SyncMetrics.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.SyncMetrics.html
@@ -105,7 +105,7 @@
 097 * will first be initialized to the 
oldest file's tracker(which is stored in the trailer), using the
 098 * method {@link 
ProcedureStoreTracker#resetTo(ProcedureStoreTracker, boolean)}, and then merge 
it
 099 * with the tracker of every newer wal 
files, using the
-100 * {@link 
ProcedureStoreTracker#setDeletedIfModifiedInBoth(ProcedureStoreTracker, 
boolean)}.
+100 * {@link 
ProcedureStoreTracker#setDeletedIfModifiedInBoth(ProcedureStoreTracker)}.
 101 * If we find out
 102 * that all the modified procedures for 
the oldest wal file are modified or deleted in newer wal
 103 * files, then we can delete it. This is 
because that, every time we call
@@ -1181,244 +1181,243 @@
 1173}
 1174
 1175// compute the holding tracker.
-1176//  - the first WAL is used for the 
'updates'
-1177//  - the global tracker is passed 
in first to decide which procedures are not
-1178//exist anymore, so we can mark 
them as deleted in holdingCleanupTracker.
-1179//Only global tracker have the 
whole picture here.
-1180//  - the other WALs are scanned to 
remove procs already updated in a newer wal.
-1181//If it is updated in a newer 
wal, we can mark it as delelted in holdingCleanupTracker
-1182//But, we can not delete it if 
it was shown deleted in the newer wal, as said
-1183//above.
-1184// TODO: exit early if 
holdingCleanupTracker.isEmpty()
-1185
holdingCleanupTracker.resetTo(logs.getFirst().getTracker(), true);
-1186//Passing in the global tracker, we 
can delete the procedures not in the global
-1187//tracker, because they are deleted 
in the later logs
-1188
holdingCleanupTracker.setDeletedIfModifiedInBoth(storeTracker, true);
-1189for (int i = 1, size = logs.size() - 
1; i  size; ++i) {
-1190  // Set deleteIfNotExists to false 
since a single log's tracker is passed in.
-1191  // Since a specific procedure may 
not show up in the log at all(not executed or
-1192  // updated during the time), we 
can not delete the procedure just because this log
-1193  // don't have the info of the 
procedure. We can delete the procedure only if
-1194  // in this log's tracker, it was 
cleanly showed that the procedure is modified or deleted
-1195  // in the corresponding 
BitSetNode.
-1196  
holdingCleanupTracker.setDeletedIfModifiedInBoth(logs.get(i).getTracker(), 
false);
-1197}
-1198  }
-1199
-1200  /**
-1201   * Remove all logs with logId = 
{@code lastLogId}.
-1202   */
-1203  private void removeAllLogs(long 
lastLogId, String why) {
-1204if (logs.size() = 1) {
-1205  return;
-1206}
-1207
-1208LOG.info("Remove all state logs with 
ID less than {}, since {}", lastLogId, why);
-1209
-1210boolean removed = false;
-1211while (logs.size()  1) {
-1212  ProcedureWALFile log = 
logs.getFirst();
-1213  if (lastLogId  log.getLogId()) 
{
-1214break;
-1215  }
-1216  removeLogFile(log, 
walArchiveDir);
-1217  removed = true;
-1218}
-1219
-1220if (removed) {
-1221  buildHoldingCleanupTracker();
-1222}
-1223  }
-1224
-1225  private boolean removeLogFile(final 
ProcedureWALFile log, final Path walArchiveDir) {
-1226try {
-1227  LOG.trace("Removing log={}", 
log);
-1228  log.removeFile(walArchiveDir);
-1229  logs.remove(log);
-1230  LOG.debug("Removed log={}, 
activeLogs={}", log, logs);
-1231  assert logs.size()  0 : 
"expected at least one log";
-1232} catch (IOException e) {
-1233  LOG.error("Unable to remove log: " 
+ log, e);
-1234  return false;
-1235}
-1236return true;
-1237  }
-1238
-1239  // 
==
-1240  //  FileSystem Log Files helpers
-1241  // 
==
-1242  public Path getWALDir() {
-1243return this.walDir;
-1244  }
-1245
-1246  @VisibleForTesting
-1247  Path getWalArchiveDir() {
-1248return this.walArchiveDir;
-1249  }
-1250
-1251  public FileSystem getFileSystem() {
-1252return this.fs;
-1253  }
-1254
-1255  protected Path getLogFilePath(final 
long logId) throws IOException {
-1256return new Path(walDir, 
String.format(LOG_PREFIX + "%020d.log", 

[05/51] [partial] hbase-site git commit: Published site at 7adf590106826b9e4432cfeee06acdc0ccff8c6e.

2018-10-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/425db230/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestHRegion.HRegionWithSeqId.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestHRegion.HRegionWithSeqId.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestHRegion.HRegionWithSeqId.html
index ed3db7a..156dabb 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestHRegion.HRegionWithSeqId.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestHRegion.HRegionWithSeqId.html
@@ -5542,785 +5542,825 @@
 5534  }
 5535
 5536  @Test
-5537  public void testWriteRequestsCounter() 
throws IOException {
-5538byte[] fam = 
Bytes.toBytes("info");
-5539byte[][] families = { fam };
-5540this.region = initHRegion(tableName, 
method, CONF, families);
+5537  public void 
testReverseScanWhenPutCellsAfterOpenReverseScan() throws Exception {
+5538byte[] cf1 = Bytes.toBytes("CF1");
+5539byte[][] families = { cf1 };
+5540byte[] col = Bytes.toBytes("C");
 5541
-5542Assert.assertEquals(0L, 
region.getWriteRequestsCount());
-5543
-5544Put put = new Put(row);
-5545put.addColumn(fam, fam, fam);
-5546
-5547Assert.assertEquals(0L, 
region.getWriteRequestsCount());
-5548region.put(put);
-5549Assert.assertEquals(1L, 
region.getWriteRequestsCount());
-5550region.put(put);
-5551Assert.assertEquals(2L, 
region.getWriteRequestsCount());
-5552region.put(put);
-5553Assert.assertEquals(3L, 
region.getWriteRequestsCount());
-5554
-region.delete(new Delete(row));
-5556Assert.assertEquals(4L, 
region.getWriteRequestsCount());
-5557  }
-5558
-5559  @Test
-5560  public void 
testOpenRegionWrittenToWAL() throws Exception {
-5561final ServerName serverName = 
ServerName.valueOf(name.getMethodName(), 100, 42);
-5562final RegionServerServices rss = 
spy(TEST_UTIL.createMockRegionServerService(serverName));
-5563
-5564HTableDescriptor htd = new 
HTableDescriptor(TableName.valueOf(name.getMethodName()));
-5565htd.addFamily(new 
HColumnDescriptor(fam1));
-5566htd.addFamily(new 
HColumnDescriptor(fam2));
-5567
-5568HRegionInfo hri = new 
HRegionInfo(htd.getTableName(),
-5569  HConstants.EMPTY_BYTE_ARRAY, 
HConstants.EMPTY_BYTE_ARRAY);
-5570
-5571// open the region w/o rss and wal 
and flush some files
-5572region =
-5573 
HBaseTestingUtility.createRegionAndWAL(hri, TEST_UTIL.getDataTestDir(), 
TEST_UTIL
-5574 .getConfiguration(), 
htd);
-5575assertNotNull(region);
-5576
-5577// create a file in fam1 for the 
region before opening in OpenRegionHandler
-5578region.put(new 
Put(Bytes.toBytes("a")).addColumn(fam1, fam1, fam1));
-5579region.flush(true);
-5580
HBaseTestingUtility.closeRegionAndWAL(region);
+5542HBaseConfiguration conf = new 
HBaseConfiguration();
+5543this.region = initHRegion(tableName, 
method, conf, families);
+5544
+5545Put put = new 
Put(Bytes.toBytes("16"));
+5546put.addColumn(cf1, col, 
Bytes.toBytes("val"));
+5547region.put(put);
+5548Put put2 = new 
Put(Bytes.toBytes("15"));
+5549put2.addColumn(cf1, col, 
Bytes.toBytes("val"));
+5550region.put(put2);
+5551
+5552// Create a reverse scan
+5553Scan scan = new 
Scan(Bytes.toBytes("16"));
+5554scan.setReversed(true);
+RegionScannerImpl scanner = 
region.getScanner(scan);
+5556
+5557// Put a lot of cells that have 
sequenceIDs grater than the readPt of the reverse scan
+5558for (int i = 10; i  20; 
i++) {
+5559  Put p = new Put(Bytes.toBytes("" + 
i));
+5560  p.addColumn(cf1, col, 
Bytes.toBytes("" + i));
+5561  region.put(p);
+5562}
+5563ListCell currRow = new 
ArrayList();
+5564boolean hasNext;
+5565do {
+5566  hasNext = scanner.next(currRow);
+5567} while (hasNext);
+5568
+5569assertEquals(2, currRow.size());
+5570assertEquals("16", 
Bytes.toString(currRow.get(0).getRowArray(),
+5571  currRow.get(0).getRowOffset(), 
currRow.get(0).getRowLength()));
+5572assertEquals("15", 
Bytes.toString(currRow.get(1).getRowArray(),
+5573  currRow.get(1).getRowOffset(), 
currRow.get(1).getRowLength()));
+5574  }
+5575
+5576  @Test
+5577  public void testWriteRequestsCounter() 
throws IOException {
+5578byte[] fam = 
Bytes.toBytes("info");
+5579byte[][] families = { fam };
+5580this.region = initHRegion(tableName, 
method, CONF, families);
 5581
-5582ArgumentCaptorWALEdit 
editCaptor = ArgumentCaptor.forClass(WALEdit.class);
+5582Assert.assertEquals(0L, 
region.getWriteRequestsCount());
 5583
-5584// capture append() calls
-5585WAL wal = mockWAL();
-5586when(rss.getWAL((HRegionInfo) 
any())).thenReturn(wal);
-5587
-5588region = HRegion.openHRegion(hri, 
htd, rss.getWAL(hri),
-5589  

[05/51] [partial] hbase-site git commit: Published site at 5fbb227deb365fe812d433fe39b85ac4b0ddee20.

2018-10-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9ebe686/devapidocs/org/apache/hadoop/hbase/util/Bytes.LexicographicalComparerHolder.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/util/Bytes.LexicographicalComparerHolder.html
 
b/devapidocs/org/apache/hadoop/hbase/util/Bytes.LexicographicalComparerHolder.html
index 18ada74..ab6bfd9 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/util/Bytes.LexicographicalComparerHolder.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/util/Bytes.LexicographicalComparerHolder.html
@@ -49,7 +49,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-PrevClass
+PrevClass
 NextClass
 
 
@@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-static class Bytes.LexicographicalComparerHolder
+static class Bytes.LexicographicalComparerHolder
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 Provides a lexicographical comparer implementation; either 
a Java
  implementation or a faster implementation based on Unsafe.
@@ -236,7 +236,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 UNSAFE_COMPARER_NAME
-static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String UNSAFE_COMPARER_NAME
+static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String UNSAFE_COMPARER_NAME
 
 
 
@@ -245,7 +245,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 BEST_COMPARER
-static finalBytes.Comparerbyte[] BEST_COMPARER
+static finalBytes.Comparerbyte[] BEST_COMPARER
 
 
 
@@ -262,7 +262,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 LexicographicalComparerHolder
-LexicographicalComparerHolder()
+LexicographicalComparerHolder()
 
 
 
@@ -279,7 +279,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 getBestComparer
-staticBytes.Comparerbyte[]getBestComparer()
+staticBytes.Comparerbyte[]getBestComparer()
 Returns the Unsafe-using Comparer, or falls back to the 
pure-Java
  implementation if unable to do so.
 
@@ -312,7 +312,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 
-PrevClass
+PrevClass
 NextClass
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9ebe686/devapidocs/org/apache/hadoop/hbase/util/Bytes.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/util/Bytes.html 
b/devapidocs/org/apache/hadoop/hbase/util/Bytes.html
index 9c965b8..6a226f0 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/Bytes.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/Bytes.html
@@ -150,6 +150,14 @@ implements https://docs.oracle.com/javase/8/docs/api/java/lang/Comparab
 
 
 (package private) static class
+Bytes.Converter
+
+
+(package private) static class
+Bytes.ConverterHolder
+
+
+(package private) static class
 Bytes.LexicographicalComparerHolder
 Provides a lexicographical comparer implementation; either 
a Java
  implementation or a faster implementation based on Unsafe.
@@ -1527,7 +1535,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/lang/Comparab
 
 
 RNG
-private static finalhttps://docs.oracle.com/javase/8/docs/api/java/security/SecureRandom.html?is-external=true;
 title="class or interface in java.security">SecureRandom RNG
+private static finalhttps://docs.oracle.com/javase/8/docs/api/java/security/SecureRandom.html?is-external=true;
 title="class or interface in java.security">SecureRandom RNG
 
 
 
@@ -1536,7 +1544,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/lang/Comparab
 
 
 HEX_CHARS
-private static finalchar[] HEX_CHARS
+private static finalchar[] HEX_CHARS
 
 
 
@@ -2341,7 +2349,7 @@ publiccom.google.protobuf.ByteString
 
 explainWrongLengthOrOffset
-private statichttps://docs.oracle.com/javase/8/docs/api/java/lang/IllegalArgumentException.html?is-external=true;
 title="class or interface in java.lang">IllegalArgumentExceptionexplainWrongLengthOrOffset(byte[]bytes,
+private statichttps://docs.oracle.com/javase/8/docs/api/java/lang/IllegalArgumentException.html?is-external=true;
 title="class or interface in java.lang">IllegalArgumentExceptionexplainWrongLengthOrOffset(byte[]bytes,

intoffset,

intlength,

intexpectedLength)
@@ -2353,7 +2361,7 @@ publiccom.google.protobuf.ByteString
 
 putLong
-public staticintputLong(byte[]bytes,
+public staticintputLong(byte[]bytes,
   intoffset,
   longval)
 Put a long value out to the specified byte array 

[05/51] [partial] hbase-site git commit: Published site at 821e4d7de2d576189f4288d1c2acf9e9a9471f5c.

2018-10-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/323b17d9/testdevapidocs/src-html/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.CreateTableProcedureOnHDFSFailure.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.CreateTableProcedureOnHDFSFailure.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.CreateTableProcedureOnHDFSFailure.html
index f11fbc0..0f43b85 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.CreateTableProcedureOnHDFSFailure.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.CreateTableProcedureOnHDFSFailure.html
@@ -194,8 +194,8 @@
 186long procId = 
procExec.submitProcedure(
 187  new 
CreateTableProcedure(procExec.getEnvironment(), htd, regions));
 188
-189int numberOfSteps = 0; // failing at 
pre operation
-190
MasterProcedureTestingUtility.testRollbackAndDoubleExecution(procExec, procId, 
numberOfSteps);
+189int lastStep = 2; // failing before 
CREATE_TABLE_WRITE_FS_LAYOUT
+190
MasterProcedureTestingUtility.testRollbackAndDoubleExecution(procExec, procId, 
lastStep);
 191
 192TableName tableName = 
htd.getTableName();
 193
MasterProcedureTestingUtility.validateTableDeletion(getMaster(), tableName);
@@ -247,7 +247,7 @@
 239}
 240  }
 241
-242  @Test(timeout = 6)
+242  @Test
 243  public void testOnHDFSFailure() throws 
Exception {
 244final TableName tableName = 
TableName.valueOf(name.getMethodName());
 245

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/323b17d9/testdevapidocs/src-html/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.html
index f11fbc0..0f43b85 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.html
@@ -194,8 +194,8 @@
 186long procId = 
procExec.submitProcedure(
 187  new 
CreateTableProcedure(procExec.getEnvironment(), htd, regions));
 188
-189int numberOfSteps = 0; // failing at 
pre operation
-190
MasterProcedureTestingUtility.testRollbackAndDoubleExecution(procExec, procId, 
numberOfSteps);
+189int lastStep = 2; // failing before 
CREATE_TABLE_WRITE_FS_LAYOUT
+190
MasterProcedureTestingUtility.testRollbackAndDoubleExecution(procExec, procId, 
lastStep);
 191
 192TableName tableName = 
htd.getTableName();
 193
MasterProcedureTestingUtility.validateTableDeletion(getMaster(), tableName);
@@ -247,7 +247,7 @@
 239}
 240  }
 241
-242  @Test(timeout = 6)
+242  @Test
 243  public void testOnHDFSFailure() throws 
Exception {
 244final TableName tableName = 
TableName.valueOf(name.getMethodName());
 245

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/323b17d9/testdevapidocs/src-html/org/apache/hadoop/hbase/master/procedure/TestDeleteNamespaceProcedure.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/procedure/TestDeleteNamespaceProcedure.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/procedure/TestDeleteNamespaceProcedure.html
index e95bd8b..f1f69f6 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/procedure/TestDeleteNamespaceProcedure.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/procedure/TestDeleteNamespaceProcedure.html
@@ -33,10 +33,10 @@
 025import 
org.apache.hadoop.conf.Configuration;
 026import 
org.apache.hadoop.hbase.HBaseClassTestRule;
 027import 
org.apache.hadoop.hbase.HBaseTestingUtility;
-028import 
org.apache.hadoop.hbase.HTableDescriptor;
-029import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-030import 
org.apache.hadoop.hbase.NamespaceNotFoundException;
-031import 
org.apache.hadoop.hbase.TableName;
+028import 
org.apache.hadoop.hbase.NamespaceDescriptor;
+029import 
org.apache.hadoop.hbase.NamespaceNotFoundException;
+030import 
org.apache.hadoop.hbase.TableName;
+031import 
org.apache.hadoop.hbase.client.TableDescriptor;
 032import 
org.apache.hadoop.hbase.constraint.ConstraintException;
 033import 
org.apache.hadoop.hbase.procedure2.Procedure;
 034import 
org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
@@ -96,7 +96,7 @@
 088  @After
 089  public void tearDown() throws Exception 
{
 090
ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(getMasterProcedureExecutor(),
 false);
-091for (HTableDescriptor htd: 

[05/51] [partial] hbase-site git commit: Published site at fa5fa6ecdd071b72b58971058ff3ab9d28c3e709.

2018-10-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d1341859/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALFactory.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALFactory.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALFactory.html
index d69bb8c..92967f2 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALFactory.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALFactory.html
@@ -88,428 +88,404 @@
 080
 081  public static final String WAL_PROVIDER 
= "hbase.wal.provider";
 082  static final String 
DEFAULT_WAL_PROVIDER = Providers.defaultProvider.name();
-083  public static final String 
WAL_PROVIDER_CLASS = "hbase.wal.provider.class";
-084  static final Class? extends 
WALProvider DEFAULT_WAL_PROVIDER_CLASS = AsyncFSWALProvider.class;
+083
+084  public static final String 
META_WAL_PROVIDER = "hbase.wal.meta_provider";
 085
-086  public static final String 
META_WAL_PROVIDER = "hbase.wal.meta_provider";
-087  public static final String 
META_WAL_PROVIDER_CLASS = "hbase.wal.meta_provider.class";
-088
-089  final String factoryId;
-090  private final WALProvider provider;
-091  // The meta updates are written to a 
different wal. If this
-092  // regionserver holds meta regions, 
then this ref will be non-null.
-093  // lazily intialized; most 
RegionServers don't deal with META
-094  private final 
AtomicReferenceWALProvider metaProvider = new 
AtomicReference();
-095
-096  /**
-097   * Configuration-specified WAL Reader 
used when a custom reader is requested
-098   */
-099  private final Class? extends 
AbstractFSWALProvider.Reader logReaderClass;
-100
-101  /**
-102   * How long to attempt opening 
in-recovery wals
-103   */
-104  private final int timeoutMillis;
-105
-106  private final Configuration conf;
-107
-108  // Used for the singleton WALFactory, 
see below.
-109  private WALFactory(Configuration conf) 
{
-110// this code is duplicated here so we 
can keep our members final.
-111// until we've moved reader/writer 
construction down into providers, this initialization must
-112// happen prior to provider 
initialization, in case they need to instantiate a reader/writer.
-113timeoutMillis = 
conf.getInt("hbase.hlog.open.timeout", 30);
-114/* TODO Both of these are probably 
specific to the fs wal provider */
-115logReaderClass = 
conf.getClass("hbase.regionserver.hlog.reader.impl", ProtobufLogReader.class,
-116  
AbstractFSWALProvider.Reader.class);
-117this.conf = conf;
-118// end required early 
initialization
-119
-120// this instance can't create wals, 
just reader/writers.
-121provider = null;
-122factoryId = SINGLETON_ID;
-123  }
-124
-125  @VisibleForTesting
-126  Providers getDefaultProvider() {
-127return Providers.defaultProvider;
-128  }
-129
-130  @VisibleForTesting
-131  /*
-132   * @param clsKey config key for 
provider classname
-133   * @param key config key for provider 
enum
-134   * @param defaultValue default value 
for provider enum
-135   * @return Class which extends 
WALProvider
-136   */
-137  public Class? extends 
WALProvider getProviderClass(String clsKey, String key,
-138  String defaultValue) {
-139String clsName = conf.get(clsKey);
-140if (clsName == null || 
clsName.isEmpty()) {
-141  clsName = conf.get(key, 
defaultValue);
-142}
-143if (clsName != null  
!clsName.isEmpty()) {
-144  try {
-145return (Class? extends 
WALProvider) Class.forName(clsName);
-146  } catch (ClassNotFoundException 
exception) {
-147// try with enum key next
-148  }
-149}
-150try {
-151  Providers provider = 
Providers.valueOf(conf.get(key, defaultValue));
-152
-153  // AsyncFSWALProvider is not 
guaranteed to work on all Hadoop versions, when it's chosen as
-154  // the default and we can't use it, 
we want to fall back to FSHLog which we know works on
-155  // all versions.
-156  if (provider == 
getDefaultProvider()  provider.clazz == AsyncFSWALProvider.class
-157   
!AsyncFSWALProvider.load()) {
-158// AsyncFSWAL has better 
performance in most cases, and also uses less resources, we will
-159// try to use it if possible. It 
deeply hacks into the internal of DFSClient so will be
-160// easily broken when upgrading 
hadoop.
-161LOG.warn("Failed to load 
AsyncFSWALProvider, falling back to FSHLogProvider");
-162return FSHLogProvider.class;
-163  }
-164
-165  // N.b. If the user specifically 
requested AsyncFSWALProvider but their environment doesn't
-166  // support using it (e.g. 
AsyncFSWALProvider.load() == false), we should let this fail and
-167  // not fall back to 
FSHLogProvider.
-168  return provider.clazz;
-169} catch (IllegalArgumentException 
exception) {
-170  // Fall back to them specifying a 
class name
-171  // Note that the 

[05/51] [partial] hbase-site git commit: Published site at 6bc7089f9e0793efc9bdd46a84f5ccd9bc4579ad.

2018-09-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/419d0338/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureSkipPersistence.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureSkipPersistence.html
 
b/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureSkipPersistence.html
new file mode 100644
index 000..fc56f48
--- /dev/null
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureSkipPersistence.html
@@ -0,0 +1,463 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+TestProcedureSkipPersistence (Apache HBase 3.0.0-SNAPSHOT Test 
API)
+
+
+
+
+
+var methods = {"i0":10,"i1":10,"i2":10};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase.procedure2
+Class 
TestProcedureSkipPersistence
+
+
+
+https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.procedure2.TestProcedureSkipPersistence
+
+
+
+
+
+
+
+
+public class TestProcedureSkipPersistence
+extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+
+
+
+
+
+
+
+
+
+
+
+Nested Class Summary
+
+Nested Classes
+
+Modifier and Type
+Class and Description
+
+
+class
+TestProcedureSkipPersistence.ProcEnv
+
+
+static class
+TestProcedureSkipPersistence.TestProcedure
+
+
+
+
+
+
+
+
+
+Field Summary
+
+Fields
+
+Modifier and Type
+Field and Description
+
+
+static HBaseClassTestRule
+CLASS_RULE
+
+
+private org.apache.hadoop.fs.FileSystem
+fs
+
+
+private HBaseCommonTestingUtility
+htu
+
+
+private org.apache.hadoop.fs.Path
+logDir
+
+
+private 
org.apache.hadoop.hbase.procedure2.ProcedureExecutorTestProcedureSkipPersistence.ProcEnv
+procExecutor
+
+
+private 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore
+procStore
+
+
+private static int
+STEP
+
+
+private org.apache.hadoop.fs.Path
+testDir
+
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors
+
+Constructor and Description
+
+
+TestProcedureSkipPersistence()
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All MethodsInstance MethodsConcrete Methods
+
+Modifier and Type
+Method and Description
+
+
+void
+setUp()
+
+
+void
+tearDown()
+
+
+void
+test()
+
+
+
+
+
+
+Methods inherited from classjava.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--;
 title="class or interface in java.lang">clone, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-;
 title="class or interface in java.lang">equals, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--;
 title="class or interface in java.lang">finalize, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--;
 title="class or interface in java.lang">getClass, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--;
 title="class or interface in java.lang">hashCode, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notify--;
 title="class or interface in java.lang">notify, https://docs.oracle.com/javase/8/docs/api/ja
 va/lang/Object.html?is-external=true#notifyAll--" title="class or interface in 
java.lang">notifyAll, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#toString--;
 title="class or interface in java.lang">toString, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait--;
 title="class or interface in java.lang">wait, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-;
 title="class or interface in java.lang">wait, 

[05/51] [partial] hbase-site git commit: Published site at d7e08317d2f214e4cca7b67578aba0ed7a567d54.

2018-09-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/37cf49a6/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
index 566f410..da040ad 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
@@ -341,8361 +341,8425 @@
 333  private final int 
rowLockWaitDuration;
 334  static final int 
DEFAULT_ROWLOCK_WAIT_DURATION = 3;
 335
-336  // The internal wait duration to 
acquire a lock before read/update
-337  // from the region. It is not per row. 
The purpose of this wait time
-338  // is to avoid waiting a long time 
while the region is busy, so that
-339  // we can release the IPC handler soon 
enough to improve the
-340  // availability of the region server. 
It can be adjusted by
-341  // tuning configuration 
"hbase.busy.wait.duration".
-342  final long busyWaitDuration;
-343  static final long 
DEFAULT_BUSY_WAIT_DURATION = HConstants.DEFAULT_HBASE_RPC_TIMEOUT;
-344
-345  // If updating multiple rows in one 
call, wait longer,
-346  // i.e. waiting for busyWaitDuration * 
# of rows. However,
-347  // we can limit the max multiplier.
-348  final int maxBusyWaitMultiplier;
-349
-350  // Max busy wait duration. There is no 
point to wait longer than the RPC
-351  // purge timeout, when a RPC call will 
be terminated by the RPC engine.
-352  final long maxBusyWaitDuration;
-353
-354  // Max cell size. If nonzero, the 
maximum allowed size for any given cell
-355  // in bytes
-356  final long maxCellSize;
-357
-358  // Number of mutations for minibatch 
processing.
-359  private final int miniBatchSize;
+336  private Path regionDir;
+337  private FileSystem walFS;
+338
+339  // The internal wait duration to 
acquire a lock before read/update
+340  // from the region. It is not per row. 
The purpose of this wait time
+341  // is to avoid waiting a long time 
while the region is busy, so that
+342  // we can release the IPC handler soon 
enough to improve the
+343  // availability of the region server. 
It can be adjusted by
+344  // tuning configuration 
"hbase.busy.wait.duration".
+345  final long busyWaitDuration;
+346  static final long 
DEFAULT_BUSY_WAIT_DURATION = HConstants.DEFAULT_HBASE_RPC_TIMEOUT;
+347
+348  // If updating multiple rows in one 
call, wait longer,
+349  // i.e. waiting for busyWaitDuration * 
# of rows. However,
+350  // we can limit the max multiplier.
+351  final int maxBusyWaitMultiplier;
+352
+353  // Max busy wait duration. There is no 
point to wait longer than the RPC
+354  // purge timeout, when a RPC call will 
be terminated by the RPC engine.
+355  final long maxBusyWaitDuration;
+356
+357  // Max cell size. If nonzero, the 
maximum allowed size for any given cell
+358  // in bytes
+359  final long maxCellSize;
 360
-361  // negative number indicates infinite 
timeout
-362  static final long 
DEFAULT_ROW_PROCESSOR_TIMEOUT = 60 * 1000L;
-363  final ExecutorService 
rowProcessorExecutor = Executors.newCachedThreadPool();
-364
-365  private final 
ConcurrentHashMapRegionScanner, Long scannerReadPoints;
-366
-367  /**
-368   * The sequence ID that was 
enLongAddered when this region was opened.
-369   */
-370  private long openSeqNum = 
HConstants.NO_SEQNUM;
-371
-372  /**
-373   * The default setting for whether to 
enable on-demand CF loading for
-374   * scan requests to this region. 
Requests can override it.
-375   */
-376  private boolean 
isLoadingCfsOnDemandDefault = false;
-377
-378  private final AtomicInteger 
majorInProgress = new AtomicInteger(0);
-379  private final AtomicInteger 
minorInProgress = new AtomicInteger(0);
+361  // Number of mutations for minibatch 
processing.
+362  private final int miniBatchSize;
+363
+364  // negative number indicates infinite 
timeout
+365  static final long 
DEFAULT_ROW_PROCESSOR_TIMEOUT = 60 * 1000L;
+366  final ExecutorService 
rowProcessorExecutor = Executors.newCachedThreadPool();
+367
+368  private final 
ConcurrentHashMapRegionScanner, Long scannerReadPoints;
+369
+370  /**
+371   * The sequence ID that was 
enLongAddered when this region was opened.
+372   */
+373  private long openSeqNum = 
HConstants.NO_SEQNUM;
+374
+375  /**
+376   * The default setting for whether to 
enable on-demand CF loading for
+377   * scan requests to this region. 
Requests can override it.
+378   */
+379  private boolean 
isLoadingCfsOnDemandDefault = false;
 380
-381  //
-382  // Context: During replay we want to 
ensure that we do not lose any data. So, we
-383  // have to be conservative in how we 
replay wals. For each store, we calculate
-384  // the maxSeqId 

[05/51] [partial] hbase-site git commit: Published site at 8eaaa63114a64bcaeaf0ed9bdd88615ee22255c1.

2018-09-25 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f6f9d4f3/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RegionServerServices.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RegionServerServices.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RegionServerServices.html
index acc491f..e6c6561 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RegionServerServices.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RegionServerServices.html
@@ -26,256 +26,255 @@
 018 */
 019package 
org.apache.hadoop.hbase.regionserver;
 020
-021import java.io.IOException;
-022import java.util.Collection;
-023import java.util.List;
-024import java.util.Map.Entry;
-025import 
java.util.concurrent.ConcurrentMap;
-026
+021import com.google.protobuf.Service;
+022import java.io.IOException;
+023import java.util.Collection;
+024import java.util.List;
+025import java.util.Map.Entry;
+026import 
java.util.concurrent.ConcurrentMap;
 027import 
org.apache.hadoop.hbase.Abortable;
 028import org.apache.hadoop.hbase.Server;
-029import 
org.apache.hadoop.hbase.TableName;
-030import 
org.apache.hadoop.hbase.client.RegionInfo;
-031import 
org.apache.hadoop.hbase.client.locking.EntityLock;
-032import 
org.apache.hadoop.hbase.executor.ExecutorService;
-033import 
org.apache.hadoop.hbase.ipc.RpcServerInterface;
-034import 
org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager;
-035import 
org.apache.hadoop.hbase.quotas.RegionServerSpaceQuotaManager;
-036import 
org.apache.hadoop.hbase.quotas.RegionSizeStore;
-037import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionRequester;
-038import 
org.apache.hadoop.hbase.regionserver.throttle.ThroughputController;
-039import org.apache.hadoop.hbase.wal.WAL;
-040import 
org.apache.yetus.audience.InterfaceAudience;
-041import 
org.apache.zookeeper.KeeperException;
+029import 
org.apache.hadoop.hbase.TableDescriptors;
+030import 
org.apache.hadoop.hbase.TableName;
+031import 
org.apache.hadoop.hbase.client.RegionInfo;
+032import 
org.apache.hadoop.hbase.client.locking.EntityLock;
+033import 
org.apache.hadoop.hbase.executor.ExecutorService;
+034import 
org.apache.hadoop.hbase.ipc.RpcServerInterface;
+035import 
org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager;
+036import 
org.apache.hadoop.hbase.quotas.RegionServerSpaceQuotaManager;
+037import 
org.apache.hadoop.hbase.quotas.RegionSizeStore;
+038import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionRequester;
+039import 
org.apache.hadoop.hbase.regionserver.throttle.ThroughputController;
+040import org.apache.hadoop.hbase.wal.WAL;
+041import 
org.apache.yetus.audience.InterfaceAudience;
 042
 043import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
 044
-045import com.google.protobuf.Service;
-046
-047/**
-048 * A curated subset of services provided 
by {@link HRegionServer}.
-049 * For use internally only. Passed to 
Managers, Services and Chores so can pass less-than-a
-050 * full-on HRegionServer at test-time. Be 
judicious adding API. Changes cause ripples through
-051 * the code base.
-052 */
-053@InterfaceAudience.Private
-054public interface RegionServerServices 
extends Server, MutableOnlineRegions, FavoredNodesForRegion {
-055
-056  /** @return the WAL for a particular 
region. Pass null for getting the
-057   * default (common) WAL */
-058  WAL getWAL(RegionInfo regionInfo) 
throws IOException;
-059
-060  /** @return the List of WALs that are 
used by this server
-061   *  Doesn't include the meta WAL
-062   */
-063  ListWAL getWALs() throws 
IOException;
-064
-065  /**
-066   * @return Implementation of {@link 
FlushRequester} or null. Usually it will not be null unless
-067   * during intialization.
-068   */
-069  FlushRequester getFlushRequester();
-070
-071  /**
-072   * @return Implementation of {@link 
CompactionRequester} or null. Usually it will not be null
-073   * unless during 
intialization.
-074   */
-075  CompactionRequester 
getCompactionRequestor();
-076
-077  /**
-078   * @return the RegionServerAccounting 
for this Region Server
-079   */
-080  RegionServerAccounting 
getRegionServerAccounting();
-081
-082  /**
-083   * @return RegionServer's instance of 
{@link RegionServerRpcQuotaManager}
-084   */
-085  RegionServerRpcQuotaManager 
getRegionServerRpcQuotaManager();
-086
-087  /**
-088   * @return RegionServer's instance of 
{@link SecureBulkLoadManager}
-089   */
-090  SecureBulkLoadManager 
getSecureBulkLoadManager();
-091
-092  /**
-093   * @return RegionServer's instance of 
{@link RegionServerSpaceQuotaManager}
-094   */
-095  RegionServerSpaceQuotaManager 
getRegionServerSpaceQuotaManager();
-096
-097  /**
-098   * Context for postOpenDeployTasks().
-099   */
-100  class PostOpenDeployContext {
-101private final HRegion region;

[05/51] [partial] hbase-site git commit: Published site at cd161d976ef47b84e904f2d54bac65d2f3417c2a.

2018-09-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fa1bebf8/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerThread.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerThread.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerThread.html
index 2c14c50..43c66a8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerThread.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerThread.html
@@ -46,2104 +46,2113 @@
 038import 
java.util.concurrent.atomic.AtomicLong;
 039import java.util.stream.Collectors;
 040import java.util.stream.Stream;
-041import 
org.apache.hadoop.conf.Configuration;
-042import 
org.apache.hadoop.hbase.HConstants;
-043import 
org.apache.hadoop.hbase.exceptions.IllegalArgumentIOException;
-044import 
org.apache.hadoop.hbase.log.HBaseMarkers;
-045import 
org.apache.hadoop.hbase.procedure2.Procedure.LockState;
-046import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore;
-047import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureIterator;
-048import 
org.apache.hadoop.hbase.procedure2.util.StringUtils;
-049import 
org.apache.hadoop.hbase.security.User;
-050import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-051import 
org.apache.hadoop.hbase.util.IdLock;
-052import 
org.apache.hadoop.hbase.util.NonceKey;
-053import 
org.apache.hadoop.hbase.util.Threads;
-054import 
org.apache.yetus.audience.InterfaceAudience;
-055import org.slf4j.Logger;
-056import org.slf4j.LoggerFactory;
-057
-058import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-059import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-060
-061import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState;
-062
-063/**
-064 * Thread Pool that executes the 
submitted procedures.
-065 * The executor has a ProcedureStore 
associated.
-066 * Each operation is logged and on 
restart the pending procedures are resumed.
-067 *
-068 * Unless the Procedure code throws an 
error (e.g. invalid user input)
-069 * the procedure will complete (at some 
point in time), On restart the pending
-070 * procedures are resumed and the once 
failed will be rolledback.
-071 *
-072 * The user can add procedures to the 
executor via submitProcedure(proc)
-073 * check for the finished state via 
isFinished(procId)
-074 * and get the result via 
getResult(procId)
-075 */
-076@InterfaceAudience.Private
-077public class 
ProcedureExecutorTEnvironment {
-078  private static final Logger LOG = 
LoggerFactory.getLogger(ProcedureExecutor.class);
-079
-080  public static final String 
CHECK_OWNER_SET_CONF_KEY = "hbase.procedure.check.owner.set";
-081  private static final boolean 
DEFAULT_CHECK_OWNER_SET = false;
-082
-083  public static final String 
WORKER_KEEP_ALIVE_TIME_CONF_KEY =
-084  
"hbase.procedure.worker.keep.alive.time.msec";
-085  private static final long 
DEFAULT_WORKER_KEEP_ALIVE_TIME = TimeUnit.MINUTES.toMillis(1);
-086
-087  /**
-088   * {@link #testing} is non-null when 
ProcedureExecutor is being tested. Tests will try to
-089   * break PE having it fail at various 
junctures. When non-null, testing is set to an instance of
-090   * the below internal {@link Testing} 
class with flags set for the particular test.
-091   */
-092  Testing testing = null;
-093
-094  /**
-095   * Class with parameters describing how 
to fail/die when in testing-context.
-096   */
-097  public static class Testing {
-098protected boolean killIfHasParent = 
true;
-099protected boolean killIfSuspended = 
false;
-100
-101/**
-102 * Kill the PE BEFORE we store state 
to the WAL. Good for figuring out if a Procedure is
-103 * persisting all the state it needs 
to recover after a crash.
-104 */
-105protected boolean 
killBeforeStoreUpdate = false;
-106protected boolean 
toggleKillBeforeStoreUpdate = false;
-107
-108/**
-109 * Set when we want to fail AFTER 
state has been stored into the WAL. Rarely used. HBASE-20978
-110 * is about a case where memory-state 
was being set after store to WAL where a crash could
-111 * cause us to get stuck. This flag 
allows killing at what was a vulnerable time.
-112 */
-113protected boolean 
killAfterStoreUpdate = false;
-114protected boolean 
toggleKillAfterStoreUpdate = false;
-115
-116protected boolean 
shouldKillBeforeStoreUpdate() {
-117  final boolean kill = 
this.killBeforeStoreUpdate;
-118  if 
(this.toggleKillBeforeStoreUpdate) {
-119this.killBeforeStoreUpdate = 
!kill;
-120LOG.warn("Toggle KILL before 
store update to: " + this.killBeforeStoreUpdate);
-121  }
-122  return kill;
-123}
-124
-125protected boolean 
shouldKillBeforeStoreUpdate(boolean 

[05/51] [partial] hbase-site git commit: Published site at c6a65ba63fce85ac7c4b62b96ef2bbe6c35d2f00.

2018-09-04 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/293abb17/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.html
index c372545..af3b364 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.html
@@ -1279,322 +1279,339 @@
 1271ListRegionInfo 
lastFewRegions = new ArrayList();
 1272// assign the remaining by going 
through the list and try to assign to servers one-by-one
 1273int serverIdx = 
RANDOM.nextInt(numServers);
-1274for (RegionInfo region : 
unassignedRegions) {
+1274OUTER : for (RegionInfo region : 
unassignedRegions) {
 1275  boolean assigned = false;
-1276  for (int j = 0; j  numServers; 
j++) { // try all servers one by one
+1276  INNER : for (int j = 0; j  
numServers; j++) { // try all servers one by one
 1277ServerName serverName = 
servers.get((j + serverIdx) % numServers);
 1278if 
(!cluster.wouldLowerAvailability(region, serverName)) {
 1279  ListRegionInfo 
serverRegions =
 1280  
assignments.computeIfAbsent(serverName, k - new ArrayList());
-1281  serverRegions.add(region);
-1282  cluster.doAssignRegion(region, 
serverName);
-1283  serverIdx = (j + serverIdx + 
1) % numServers; //remain from next server
-1284  assigned = true;
-1285  break;
-1286}
-1287  }
-1288  if (!assigned) {
-1289lastFewRegions.add(region);
-1290  }
-1291}
-1292// just sprinkle the rest of the 
regions on random regionservers. The balanceCluster will
-1293// make it optimal later. we can end 
up with this if numReplicas  numServers.
-1294for (RegionInfo region : 
lastFewRegions) {
-1295  int i = 
RANDOM.nextInt(numServers);
-1296  ServerName server = 
servers.get(i);
-1297  ListRegionInfo 
serverRegions = assignments.computeIfAbsent(server, k - new 
ArrayList());
-1298  serverRegions.add(region);
-1299  cluster.doAssignRegion(region, 
server);
-1300}
-1301return assignments;
-1302  }
-1303
-1304  protected Cluster 
createCluster(ListServerName servers, CollectionRegionInfo 
regions) {
-1305// Get the snapshot of the current 
assignments for the regions in question, and then create
-1306// a cluster out of it. Note that we 
might have replicas already assigned to some servers
-1307// earlier. So we want to get the 
snapshot to see those assignments, but this will only contain
-1308// replicas of the regions that are 
passed (for performance).
-1309MapServerName, 
ListRegionInfo clusterState = 
getRegionAssignmentsByServer(regions);
-1310
-1311for (ServerName server : servers) 
{
-1312  if 
(!clusterState.containsKey(server)) {
-1313clusterState.put(server, 
EMPTY_REGION_LIST);
-1314  }
-1315}
-1316return new Cluster(regions, 
clusterState, null, this.regionFinder,
-1317rackManager);
-1318  }
-1319
-1320  private ListServerName 
findIdleServers(ListServerName servers) {
-1321return 
this.services.getServerManager()
-1322
.getOnlineServersListWithPredicator(servers, IDLE_SERVER_PREDICATOR);
-1323  }
-1324
-1325  /**
-1326   * Used to assign a single region to a 
random server.
-1327   */
-1328  @Override
-1329  public ServerName 
randomAssignment(RegionInfo regionInfo, ListServerName servers)
-1330  throws HBaseIOException {
-1331
metricsBalancer.incrMiscInvocations();
-1332if (servers != null  
servers.contains(masterServerName)) {
-1333  if (shouldBeOnMaster(regionInfo)) 
{
-1334return masterServerName;
-1335  }
-1336  if 
(!LoadBalancer.isTablesOnMaster(getConf())) {
-1337// Guarantee we do not put any 
regions on master
-1338servers = new 
ArrayList(servers);
-1339
servers.remove(masterServerName);
-1340  }
-1341}
-1342
-1343int numServers = servers == null ? 0 
: servers.size();
-1344if (numServers == 0) {
-1345  LOG.warn("Wanted to retain 
assignment but no servers to assign to");
-1346  return null;
-1347}
-1348if (numServers == 1) { // Only one 
server, nothing fancy we can do here
-1349  return servers.get(0);
-1350}
-1351ListServerName idleServers = 
findIdleServers(servers);
-1352if (idleServers.size() == 1) {
-1353  return idleServers.get(0);
-1354}
-1355final ListServerName 
finalServers = idleServers.isEmpty() ?
-1356servers : idleServers;
-1357ListRegionInfo regions = 
Lists.newArrayList(regionInfo);
-1358Cluster cluster = 
createCluster(finalServers, regions);
-1359return randomAssignment(cluster, 
regionInfo, 

[05/51] [partial] hbase-site git commit: Published site at 7c1fad4992a169a35b4457e6f4afcb30d04406e9.

2018-08-31 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/74f60271/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.html
index d11176a..2c14c50 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.html
@@ -982,1050 +982,1168 @@
 974  }
 975
 976  /**
-977   * Add a new root-procedure to the 
executor.
-978   * @param proc the new procedure to 
execute.
-979   * @param nonceKey the registered 
unique identifier for this operation from the client or process.
-980   * @return the procedure id, that can 
be used to monitor the operation
-981   */
-982  
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NP_NULL_ON_SOME_PATH",
-983  justification = "FindBugs is blind 
to the check-for-null")
-984  public long 
submitProcedure(ProcedureTEnvironment proc, NonceKey nonceKey) {
-985
Preconditions.checkArgument(lastProcId.get() = 0);
-986
-987prepareProcedure(proc);
-988
-989final Long currentProcId;
-990if (nonceKey != null) {
-991  currentProcId = 
nonceKeysToProcIdsMap.get(nonceKey);
-992  
Preconditions.checkArgument(currentProcId != null,
-993"Expected nonceKey=" + nonceKey + 
" to be reserved, use registerNonce(); proc=" + proc);
-994} else {
-995  currentProcId = nextProcId();
-996}
-997
-998// Initialize the procedure
-999proc.setNonceKey(nonceKey);
-1000
proc.setProcId(currentProcId.longValue());
-1001
-1002// Commit the transaction
-1003store.insert(proc, null);
-1004LOG.debug("Stored {}", proc);
-1005
-1006// Add the procedure to the 
executor
-1007return pushProcedure(proc);
-1008  }
-1009
-1010  /**
-1011   * Add a set of new root-procedure to 
the executor.
-1012   * @param procs the new procedures to 
execute.
-1013   */
-1014  // TODO: Do we need to take nonces 
here?
-1015  public void 
submitProcedures(ProcedureTEnvironment[] procs) {
-1016
Preconditions.checkArgument(lastProcId.get() = 0);
-1017if (procs == null || procs.length 
= 0) {
-1018  return;
-1019}
-1020
-1021// Prepare procedure
-1022for (int i = 0; i  procs.length; 
++i) {
-1023  
prepareProcedure(procs[i]).setProcId(nextProcId());
-1024}
-1025
-1026// Commit the transaction
-1027store.insert(procs);
-1028if (LOG.isDebugEnabled()) {
-1029  LOG.debug("Stored " + 
Arrays.toString(procs));
-1030}
-1031
-1032// Add the procedure to the 
executor
-1033for (int i = 0; i  procs.length; 
++i) {
-1034  pushProcedure(procs[i]);
-1035}
-1036  }
-1037
-1038  private ProcedureTEnvironment 
prepareProcedure(ProcedureTEnvironment proc) {
-1039
Preconditions.checkArgument(proc.getState() == ProcedureState.INITIALIZING);
-1040
Preconditions.checkArgument(!proc.hasParent(), "unexpected parent", proc);
-1041if (this.checkOwnerSet) {
-1042  
Preconditions.checkArgument(proc.hasOwner(), "missing owner");
-1043}
-1044return proc;
-1045  }
-1046
-1047  private long 
pushProcedure(ProcedureTEnvironment proc) {
-1048final long currentProcId = 
proc.getProcId();
+977   * Bypass a procedure. If the procedure 
is set to bypass, all the logic in
+978   * execute/rollback will be ignored and 
it will return success, whatever.
+979   * It is used to recover buggy stuck 
procedures, releasing the lock resources
+980   * and letting other procedures to run. 
Bypassing one procedure (and its ancestors will
+981   * be bypassed automatically) may leave 
the cluster in a middle state, e.g. region
+982   * not assigned, or some hdfs files 
left behind. After getting rid of those stuck procedures,
+983   * the operators may have to do some 
clean up on hdfs or schedule some assign procedures
+984   * to let region online. DO AT YOUR OWN 
RISK.
+985   * p
+986   * A procedure can be bypassed only 
if
+987   * 1. The procedure is in state of 
RUNNABLE, WAITING, WAITING_TIMEOUT
+988   * or it is a root procedure without 
any child.
+989   * 2. No other worker thread is 
executing it
+990   * 3. No child procedure has been 
submitted
+991   *
+992   * p
+993   * If all the requirements are meet, 
the procedure and its ancestors will be
+994   * bypassed and persisted to WAL.
+995   *
+996   * p
+997   * If the procedure is in WAITING 
state, will set it to RUNNABLE add it to run queue.
+998   * TODO: What about WAITING_TIMEOUT?
+999   * @param id the procedure id
+1000   * @param lockWait time to wait lock
+1001   * @param force if force set to true, 
we will bypass the procedure even if it is executing.
+1002   *  This is for procedures 
which can't break out during executing(due to bug, mostly)
+1003   * 

[05/51] [partial] hbase-site git commit: Published site at 3afe9fb7e6ebfa71187cbe131558a83fae61cecd.

2018-08-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/424d7e41/testdevapidocs/org/apache/hadoop/hbase/client/TestHbck.html
--
diff --git a/testdevapidocs/org/apache/hadoop/hbase/client/TestHbck.html 
b/testdevapidocs/org/apache/hadoop/hbase/client/TestHbck.html
new file mode 100644
index 000..cbe3b39
--- /dev/null
+++ b/testdevapidocs/org/apache/hadoop/hbase/client/TestHbck.html
@@ -0,0 +1,466 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+TestHbck (Apache HBase 3.0.0-SNAPSHOT Test API)
+
+
+
+
+
+var methods = {"i0":10,"i1":9,"i2":10,"i3":9,"i4":10};
+var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase.client
+Class TestHbck
+
+
+
+https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.client.TestHbck
+
+
+
+
+
+
+
+
+public class TestHbck
+extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+Class to test HBaseHbck.
+ Spins up the minicluster once at test start and then takes it down afterward.
+ Add any testing of HBaseHbck functionality here.
+
+
+
+
+
+
+
+
+
+
+
+Field Summary
+
+Fields
+
+Modifier and Type
+Field and Description
+
+
+private 
org.apache.hadoop.hbase.client.Admin
+admin
+
+
+static HBaseClassTestRule
+CLASS_RULE
+
+
+private 
org.apache.hadoop.hbase.client.Hbck
+hbck
+
+
+private static org.slf4j.Logger
+LOG
+
+
+org.junit.rules.TestName
+name
+
+
+private static 
org.apache.hadoop.hbase.TableName
+tableName
+
+
+private static HBaseTestingUtility
+TEST_UTIL
+
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors
+
+Constructor and Description
+
+
+TestHbck()
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All MethodsStatic MethodsInstance MethodsConcrete Methods
+
+Modifier and Type
+Method and Description
+
+
+void
+setUp()
+
+
+static void
+setUpBeforeClass()
+
+
+void
+tearDown()
+
+
+static void
+tearDownAfterClass()
+
+
+void
+testSetTableStateInMeta()
+
+
+
+
+
+
+Methods inherited from classjava.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--;
 title="class or interface in java.lang">clone, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-;
 title="class or interface in java.lang">equals, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--;
 title="class or interface in java.lang">finalize, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--;
 title="class or interface in java.lang">getClass, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--;
 title="class or interface in java.lang">hashCode, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notify--;
 title="class or interface in java.lang">notify, https://docs.oracle.com/javase/8/docs/api/ja
 va/lang/Object.html?is-external=true#notifyAll--" title="class or interface in 
java.lang">notifyAll, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#toString--;
 title="class or interface in java.lang">toString, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait--;
 title="class or interface in java.lang">wait, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-;
 title="class or interface in java.lang">wait, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-int-;
 title="class or interface in java.lang">wait
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Field Detail
+
+
+
+
+
+CLASS_RULE
+public static finalHBaseClassTestRule CLASS_RULE
+
+
+
+
+
+
+
+LOG
+private static finalorg.slf4j.Logger LOG
+
+
+

[05/51] [partial] hbase-site git commit: Published site at a452487a9b82bfd33bc10683c3f8b8ae74d58883.

2018-08-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0cf79db0/devapidocs/src-html/org/apache/hadoop/hbase/filter/ColumnPrefixFilter.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/filter/ColumnPrefixFilter.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/filter/ColumnPrefixFilter.html
index d206019..9475950 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/filter/ColumnPrefixFilter.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/filter/ColumnPrefixFilter.html
@@ -29,147 +29,162 @@
 021
 022import java.io.IOException;
 023import java.util.ArrayList;
-024
-025import 
org.apache.hadoop.hbase.ByteBufferExtendedCell;
-026import org.apache.hadoop.hbase.Cell;
-027import 
org.apache.hadoop.hbase.PrivateCellUtil;
-028import 
org.apache.yetus.audience.InterfaceAudience;
-029import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-030import 
org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos;
-031import 
org.apache.hadoop.hbase.util.ByteBufferUtils;
-032import 
org.apache.hadoop.hbase.util.Bytes;
-033
-034import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-035import 
org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException;
-036import 
org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations;
-037
-038/**
-039 * This filter is used for selecting only 
those keys with columns that matches
-040 * a particular prefix. For example, if 
prefix is 'an', it will pass keys with
-041 * columns like 'and', 'anti' but not 
keys with columns like 'ball', 'act'.
-042 */
-043@InterfaceAudience.Public
-044public class ColumnPrefixFilter extends 
FilterBase {
-045  protected byte [] prefix = null;
-046
-047  public ColumnPrefixFilter(final byte [] 
prefix) {
-048this.prefix = prefix;
-049  }
-050
-051  public byte[] getPrefix() {
-052return prefix;
-053  }
-054
-055  @Override
-056  public boolean filterRowKey(Cell cell) 
throws IOException {
-057// Impl in FilterBase might do 
unnecessary copy for Off heap backed Cells.
-058return false;
-059  }
-060
-061  @Deprecated
-062  @Override
-063  public ReturnCode filterKeyValue(final 
Cell c) {
-064return filterCell(c);
-065  }
-066
-067  @Override
-068  public ReturnCode filterCell(final Cell 
cell) {
-069if (this.prefix == null) {
-070  return ReturnCode.INCLUDE;
-071} else {
-072  return filterColumn(cell);
-073}
-074  }
-075
-076  public ReturnCode filterColumn(Cell 
cell) {
-077int qualifierLength = 
cell.getQualifierLength();
-078if (qualifierLength  
prefix.length) {
-079  int cmp = 
compareQualifierPart(cell, qualifierLength, this.prefix);
-080  if (cmp = 0) {
-081return 
ReturnCode.SEEK_NEXT_USING_HINT;
-082  } else {
-083return ReturnCode.NEXT_ROW;
-084  }
-085} else {
-086  int cmp = 
compareQualifierPart(cell, this.prefix.length, this.prefix);
-087  if (cmp  0) {
-088return 
ReturnCode.SEEK_NEXT_USING_HINT;
-089  } else if (cmp  0) {
-090return ReturnCode.NEXT_ROW;
-091  } else {
-092return ReturnCode.INCLUDE;
-093  }
-094}
-095  }
-096
-097  private static int 
compareQualifierPart(Cell cell, int length, byte[] prefix) {
-098if (cell instanceof 
ByteBufferExtendedCell) {
-099  return 
ByteBufferUtils.compareTo(((ByteBufferExtendedCell) 
cell).getQualifierByteBuffer(),
-100  ((ByteBufferExtendedCell) 
cell).getQualifierPosition(), length, prefix, 0, length);
-101}
-102return 
Bytes.compareTo(cell.getQualifierArray(), cell.getQualifierOffset(), length, 
prefix, 0,
-103length);
-104  }
-105
-106  public static Filter 
createFilterFromArguments(ArrayListbyte [] filterArguments) {
-107
Preconditions.checkArgument(filterArguments.size() == 1,
-108"Expected 
1 but got: %s", filterArguments.size());
-109byte [] columnPrefix = 
ParseFilter.removeQuotesFromByteArray(filterArguments.get(0));
-110return new 
ColumnPrefixFilter(columnPrefix);
-111  }
-112
-113  /**
-114   * @return The filter serialized using 
pb
-115   */
-116  @Override
-117  public byte [] toByteArray() {
-118
FilterProtos.ColumnPrefixFilter.Builder builder =
-119  
FilterProtos.ColumnPrefixFilter.newBuilder();
-120if (this.prefix != null) 
builder.setPrefix(UnsafeByteOperations.unsafeWrap(this.prefix));
-121return 
builder.build().toByteArray();
-122  }
-123
-124  /**
-125   * @param pbBytes A pb serialized 
{@link ColumnPrefixFilter} instance
-126   * @return An instance of {@link 
ColumnPrefixFilter} made from codebytes/code
-127   * @throws 
org.apache.hadoop.hbase.exceptions.DeserializationException
-128   * @see #toByteArray
-129   */
-130  public static ColumnPrefixFilter 
parseFrom(final byte [] pbBytes)
-131  throws DeserializationException {
-132FilterProtos.ColumnPrefixFilter 
proto;
-133try {

[05/51] [partial] hbase-site git commit: Published site at 6a5b4f2a5c188f8eef4f2250b8b7db7dd1e750e4.

2018-08-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ff05a18/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHRegion.FlushThread.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHRegion.FlushThread.html
 
b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHRegion.FlushThread.html
index 6cf1758..c19b494 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHRegion.FlushThread.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHRegion.FlushThread.html
@@ -122,7 +122,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-protected class TestHRegion.FlushThread
+protected class TestHRegion.FlushThread
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html?is-external=true;
 title="class or interface in java.lang">Thread
 
 
@@ -255,7 +255,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html
 
 
 done
-private volatileboolean done
+private volatileboolean done
 
 
 
@@ -264,7 +264,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html
 
 
 error
-privatehttps://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true;
 title="class or interface in java.lang">Throwable error
+privatehttps://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true;
 title="class or interface in java.lang">Throwable error
 
 
 
@@ -281,7 +281,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html
 
 
 FlushThread
-FlushThread()
+FlushThread()
 
 
 
@@ -298,7 +298,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html
 
 
 done
-publicvoiddone()
+publicvoiddone()
 
 
 
@@ -307,7 +307,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html
 
 
 checkNoError
-publicvoidcheckNoError()
+publicvoidcheckNoError()
 
 
 
@@ -316,7 +316,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html
 
 
 run
-publicvoidrun()
+publicvoidrun()
 
 Specified by:
 https://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.html?is-external=true#run--;
 title="class or interface in java.lang">runin 
interfacehttps://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.html?is-external=true;
 title="class or interface in java.lang">Runnable
@@ -331,7 +331,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html
 
 
 flush
-publicvoidflush()
+publicvoidflush()
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ff05a18/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHRegion.GetTillDoneOrException.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHRegion.GetTillDoneOrException.html
 
b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHRegion.GetTillDoneOrException.html
index dfcc2f4..ed2474e 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHRegion.GetTillDoneOrException.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHRegion.GetTillDoneOrException.html
@@ -122,7 +122,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-class TestHRegion.GetTillDoneOrException
+class TestHRegion.GetTillDoneOrException
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html?is-external=true;
 title="class or interface in java.lang">Thread
 
 
@@ -254,7 +254,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html
 
 
 g
-private finalorg.apache.hadoop.hbase.client.Get g
+private finalorg.apache.hadoop.hbase.client.Get g
 
 
 
@@ -263,7 +263,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html
 
 
 done
-private finalhttps://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicBoolean.html?is-external=true;
 title="class or interface in java.util.concurrent.atomic">AtomicBoolean done
+private finalhttps://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicBoolean.html?is-external=true;
 title="class or interface in java.util.concurrent.atomic">AtomicBoolean done
 
 
 
@@ -272,7 +272,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html
 
 
 count
-private finalhttps://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true;
 title="class or interface in java.util.concurrent.atomic">AtomicInteger count
+private finalhttps://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true;
 title="class or interface in java.util.concurrent.atomic">AtomicInteger count
 
 
 
@@ -281,7 +281,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html
 
 
 e
-privatehttps://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">Exception e

[05/51] [partial] hbase-site git commit: Published site at 63f2d3cbdc8151f5f61f33e0a078c51b9ac076a5.

2018-08-21 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7ae6a80c/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
index 81f5178..7df71bd 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
@@ -108,3669 +108,3727 @@
 100import 
org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
 101import 
org.apache.hadoop.hbase.log.HBaseMarkers;
 102import 
org.apache.hadoop.hbase.master.MasterRpcServices.BalanceSwitchMode;
-103import 
org.apache.hadoop.hbase.master.assignment.AssignmentManager;
-104import 
org.apache.hadoop.hbase.master.assignment.MergeTableRegionsProcedure;
-105import 
org.apache.hadoop.hbase.master.assignment.RegionStates;
-106import 
org.apache.hadoop.hbase.master.assignment.RegionStates.RegionStateNode;
-107import 
org.apache.hadoop.hbase.master.balancer.BalancerChore;
-108import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer;
-109import 
org.apache.hadoop.hbase.master.balancer.ClusterStatusChore;
-110import 
org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory;
-111import 
org.apache.hadoop.hbase.master.cleaner.CleanerChore;
-112import 
org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
-113import 
org.apache.hadoop.hbase.master.cleaner.LogCleaner;
-114import 
org.apache.hadoop.hbase.master.cleaner.ReplicationBarrierCleaner;
-115import 
org.apache.hadoop.hbase.master.locking.LockManager;
-116import 
org.apache.hadoop.hbase.master.normalizer.NormalizationPlan;
-117import 
org.apache.hadoop.hbase.master.normalizer.NormalizationPlan.PlanType;
-118import 
org.apache.hadoop.hbase.master.normalizer.RegionNormalizer;
-119import 
org.apache.hadoop.hbase.master.normalizer.RegionNormalizerChore;
-120import 
org.apache.hadoop.hbase.master.normalizer.RegionNormalizerFactory;
-121import 
org.apache.hadoop.hbase.master.procedure.CreateTableProcedure;
-122import 
org.apache.hadoop.hbase.master.procedure.DeleteNamespaceProcedure;
-123import 
org.apache.hadoop.hbase.master.procedure.DeleteTableProcedure;
-124import 
org.apache.hadoop.hbase.master.procedure.DisableTableProcedure;
-125import 
org.apache.hadoop.hbase.master.procedure.EnableTableProcedure;
-126import 
org.apache.hadoop.hbase.master.procedure.InitMetaProcedure;
-127import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants;
-128import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
-129import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureScheduler;
-130import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil;
-131import 
org.apache.hadoop.hbase.master.procedure.ModifyTableProcedure;
-132import 
org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch;
-133import 
org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure;
-134import 
org.apache.hadoop.hbase.master.procedure.TruncateTableProcedure;
-135import 
org.apache.hadoop.hbase.master.replication.AbstractPeerProcedure;
-136import 
org.apache.hadoop.hbase.master.replication.AddPeerProcedure;
-137import 
org.apache.hadoop.hbase.master.replication.DisablePeerProcedure;
-138import 
org.apache.hadoop.hbase.master.replication.EnablePeerProcedure;
-139import 
org.apache.hadoop.hbase.master.replication.RemovePeerProcedure;
-140import 
org.apache.hadoop.hbase.master.replication.ReplicationPeerManager;
-141import 
org.apache.hadoop.hbase.master.replication.SyncReplicationReplayWALManager;
-142import 
org.apache.hadoop.hbase.master.replication.TransitPeerSyncReplicationStateProcedure;
-143import 
org.apache.hadoop.hbase.master.replication.UpdatePeerConfigProcedure;
-144import 
org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
-145import 
org.apache.hadoop.hbase.master.zksyncer.MasterAddressSyncer;
-146import 
org.apache.hadoop.hbase.master.zksyncer.MetaLocationSyncer;
-147import 
org.apache.hadoop.hbase.mob.MobConstants;
-148import 
org.apache.hadoop.hbase.monitoring.MemoryBoundedLogMessageBuffer;
-149import 
org.apache.hadoop.hbase.monitoring.MonitoredTask;
-150import 
org.apache.hadoop.hbase.monitoring.TaskMonitor;
-151import 
org.apache.hadoop.hbase.procedure.MasterProcedureManagerHost;
-152import 
org.apache.hadoop.hbase.procedure.flush.MasterFlushTableProcedureManager;
-153import 
org.apache.hadoop.hbase.procedure2.LockedResource;
-154import 
org.apache.hadoop.hbase.procedure2.Procedure;
-155import 
org.apache.hadoop.hbase.procedure2.ProcedureEvent;
-156import 
org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
-157import 
org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher.RemoteProcedure;
-158import 
org.apache.hadoop.hbase.procedure2.RemoteProcedureException;

[05/51] [partial] hbase-site git commit: Published site at 092efb42749bf7fc6ad338c96aae8e7b9d3a2c74.

2018-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.html
index 63e4b46..514f830 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.html
@@ -468,15 +468,15 @@
 460   * creating it if necessary.
 461   * @param logEntry
 462   * @param fileNameBeingSplit the file 
being split currently. Used to generate tmp file name.
-463   * @param conf
-464   * @return Path to file into which to 
dump split log edits.
-465   * @throws IOException
-466   */
-467  @SuppressWarnings("deprecation")
-468  @VisibleForTesting
-469  static Path 
getRegionSplitEditsPath(final Entry logEntry, String fileNameBeingSplit,
-470  Configuration conf)
-471  throws IOException {
+463   * @param tmpDirName of the directory 
used to sideline old recovered edits file
+464   * @param conf
+465   * @return Path to file into which to 
dump split log edits.
+466   * @throws IOException
+467   */
+468  @SuppressWarnings("deprecation")
+469  @VisibleForTesting
+470  static Path 
getRegionSplitEditsPath(final Entry logEntry, String fileNameBeingSplit,
+471  String tmpDirName, Configuration 
conf) throws IOException {
 472FileSystem fs = 
FileSystem.get(conf);
 473Path rootDir = 
FSUtils.getRootDir(conf);
 474Path tableDir = 
FSUtils.getTableDir(rootDir, logEntry.getKey().getTableName());
@@ -491,7 +491,7 @@
 483  return null;
 484}
 485if (fs.exists(dir)  
fs.isFile(dir)) {
-486  Path tmp = new Path("/tmp");
+486  Path tmp = new Path(tmpDirName);
 487  if (!fs.exists(tmp)) {
 488fs.mkdirs(tmp);
 489  }
@@ -1520,411 +1520,413 @@
 1512 * @return a path with a write for 
that path. caller should close.
 1513 */
 1514WriterAndPath createWAP(byte[] 
region, Entry entry) throws IOException {
-1515  Path regionedits = 
getRegionSplitEditsPath(entry,
-1516  
fileBeingSplit.getPath().getName(), conf);
-1517  if (regionedits == null) {
-1518return null;
-1519  }
-1520  FileSystem rootFs = 
FileSystem.get(conf);
-1521  if (rootFs.exists(regionedits)) 
{
-1522LOG.warn("Found old edits file. 
It could be the "
-1523+ "result of a previous 
failed split attempt. Deleting " + regionedits + ", length="
-1524+ 
rootFs.getFileStatus(regionedits).getLen());
-1525if (!rootFs.delete(regionedits, 
false)) {
-1526  LOG.warn("Failed delete of old 
{}", regionedits);
-1527}
-1528  }
-1529  Writer w = 
createWriter(regionedits);
-1530  LOG.debug("Creating writer 
path={}", regionedits);
-1531  return new 
WriterAndPath(regionedits, w, entry.getKey().getSequenceId());
-1532}
-1533
-1534void filterCellByStore(Entry 
logEntry) {
-1535  Mapbyte[], Long 
maxSeqIdInStores =
-1536  
regionMaxSeqIdInStores.get(Bytes.toString(logEntry.getKey().getEncodedRegionName()));
-1537  if 
(MapUtils.isEmpty(maxSeqIdInStores)) {
-1538return;
-1539  }
-1540  // Create the array list for the 
cells that aren't filtered.
-1541  // We make the assumption that 
most cells will be kept.
-1542  ArrayListCell keptCells = 
new ArrayList(logEntry.getEdit().getCells().size());
-1543  for (Cell cell : 
logEntry.getEdit().getCells()) {
-1544if 
(CellUtil.matchingFamily(cell, WALEdit.METAFAMILY)) {
-1545  keptCells.add(cell);
-1546} else {
-1547  byte[] family = 
CellUtil.cloneFamily(cell);
-1548  Long maxSeqId = 
maxSeqIdInStores.get(family);
-1549  // Do not skip cell even if 
maxSeqId is null. Maybe we are in a rolling upgrade,
-1550  // or the master was crashed 
before and we can not get the information.
-1551  if (maxSeqId == null || 
maxSeqId.longValue()  logEntry.getKey().getSequenceId()) {
-1552keptCells.add(cell);
-1553  }
-1554}
-1555  }
-1556
-1557  // Anything in the keptCells array 
list is still live.
-1558  // So rather than removing the 
cells from the array list
-1559  // which would be an O(n^2) 
operation, we just replace the list
-1560  
logEntry.getEdit().setCells(keptCells);
-1561}
-1562
-1563@Override
-1564public void append(RegionEntryBuffer 
buffer) throws IOException {
-1565  appendBuffer(buffer, true);
-1566}
-1567
-1568WriterAndPath 
appendBuffer(RegionEntryBuffer buffer, boolean reusable) throws IOException{
-1569  ListEntry entries = 
buffer.entryBuffer;
-1570  if (entries.isEmpty()) {
-1571LOG.warn("got an empty buffer, 
skipping");
-1572return null;
-1573  }
-1574
-1575  WriterAndPath wap = null;
+1515  

[05/51] [partial] hbase-site git commit: Published site at 613d831429960348dc42c3bdb6ea5d31be15c81c.

2018-08-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7cf6034b/testapidocs/src-html/org/apache/hadoop/hbase/MiniHBaseCluster.html
--
diff --git a/testapidocs/src-html/org/apache/hadoop/hbase/MiniHBaseCluster.html 
b/testapidocs/src-html/org/apache/hadoop/hbase/MiniHBaseCluster.html
index af2e5b1..fe2a7c8 100644
--- a/testapidocs/src-html/org/apache/hadoop/hbase/MiniHBaseCluster.html
+++ b/testapidocs/src-html/org/apache/hadoop/hbase/MiniHBaseCluster.html
@@ -32,889 +32,915 @@
 024import java.util.HashSet;
 025import java.util.List;
 026import java.util.Set;
-027import 
org.apache.hadoop.conf.Configuration;
-028import org.apache.hadoop.fs.FileSystem;
-029import 
org.apache.hadoop.hbase.master.HMaster;
-030import 
org.apache.hadoop.hbase.regionserver.HRegion;
-031import 
org.apache.hadoop.hbase.regionserver.HRegion.FlushResult;
-032import 
org.apache.hadoop.hbase.regionserver.HRegionServer;
-033import 
org.apache.hadoop.hbase.regionserver.Region;
-034import 
org.apache.hadoop.hbase.security.User;
-035import 
org.apache.hadoop.hbase.test.MetricsAssertHelper;
-036import 
org.apache.hadoop.hbase.util.JVMClusterUtil;
-037import 
org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
-038import 
org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
-039import 
org.apache.hadoop.hbase.util.Threads;
-040import 
org.apache.yetus.audience.InterfaceAudience;
-041import org.slf4j.Logger;
-042import org.slf4j.LoggerFactory;
-043
-044import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-045import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService;
-046import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService;
-047import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse;
-048
-049/**
-050 * This class creates a single process 
HBase cluster.
-051 * each server.  The master uses the 
'default' FileSystem.  The RegionServers,
-052 * if we are running on 
DistributedFilesystem, create a FileSystem instance
-053 * each and will close down their 
instance on the way out.
-054 */
-055@InterfaceAudience.Public
-056public class MiniHBaseCluster extends 
HBaseCluster {
-057  private static final Logger LOG = 
LoggerFactory.getLogger(MiniHBaseCluster.class.getName());
-058  public LocalHBaseCluster 
hbaseCluster;
-059  private static int index;
-060
-061  /**
-062   * Start a MiniHBaseCluster.
-063   * @param conf Configuration to be used 
for cluster
-064   * @param numRegionServers initial 
number of region servers to start.
-065   * @throws IOException
-066   */
-067  public MiniHBaseCluster(Configuration 
conf, int numRegionServers)
-068  throws IOException, 
InterruptedException {
-069this(conf, 1, numRegionServers);
-070  }
-071
-072  /**
-073   * Start a MiniHBaseCluster.
-074   * @param conf Configuration to be used 
for cluster
-075   * @param numMasters initial number of 
masters to start.
-076   * @param numRegionServers initial 
number of region servers to start.
-077   * @throws IOException
-078   */
-079  public MiniHBaseCluster(Configuration 
conf, int numMasters, int numRegionServers)
-080  throws IOException, 
InterruptedException {
-081this(conf, numMasters, 
numRegionServers, null, null);
-082  }
-083
-084  /**
-085   * Start a MiniHBaseCluster.
-086   * @param conf Configuration to be used 
for cluster
-087   * @param numMasters initial number of 
masters to start.
-088   * @param numRegionServers initial 
number of region servers to start.
-089   */
-090  public MiniHBaseCluster(Configuration 
conf, int numMasters, int numRegionServers,
-091 Class? extends HMaster 
masterClass,
-092 Class? extends 
MiniHBaseCluster.MiniHBaseClusterRegionServer regionserverClass)
-093  throws IOException, 
InterruptedException {
-094this(conf, numMasters, 
numRegionServers, null, masterClass, regionserverClass);
-095  }
-096
-097  /**
-098   * @param rsPorts Ports that 
RegionServer should use; pass ports if you want to test cluster
-099   *   restart where for sure the 
regionservers come up on same address+port (but
-100   *   just with different startcode); by 
default mini hbase clusters choose new
-101   *   arbitrary ports on each cluster 
start.
-102   * @throws IOException
-103   * @throws InterruptedException
-104   */
-105  public MiniHBaseCluster(Configuration 
conf, int numMasters, int numRegionServers,
-106 ListInteger rsPorts,
-107 Class? extends HMaster 
masterClass,
-108 Class? extends 
MiniHBaseCluster.MiniHBaseClusterRegionServer regionserverClass)
-109  throws IOException, 
InterruptedException {
-110super(conf);
-111
-112// Hadoop 2
-113
CompatibilityFactory.getInstance(MetricsAssertHelper.class).init();
-114
-115init(numMasters, numRegionServers, 
rsPorts, masterClass, regionserverClass);
-116

[05/51] [partial] hbase-site git commit: Published site at ba5d1c1f28301adc99019d9d6c4a04fac98ae511.

2018-07-25 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/804782f0/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html
index 9babac2..2e75659 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html
@@ -1024,7 +1024,7 @@
 1016  if 
(regionNode.isInState(State.OPENING, State.OPEN)) {
 1017if 
(!regionNode.getRegionLocation().equals(serverName)) {
 1018  throw new 
UnexpectedStateException(regionNode.toString() +
-1019"reported OPEN on 
server=" + serverName +
+1019" reported OPEN on 
server=" + serverName +
 1020" but state has 
otherwise.");
 1021} else if 
(regionNode.isInState(State.OPENING)) {
 1022  try {

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/804782f0/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionStat.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionStat.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionStat.html
index 9babac2..2e75659 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionStat.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionStat.html
@@ -1024,7 +1024,7 @@
 1016  if 
(regionNode.isInState(State.OPENING, State.OPEN)) {
 1017if 
(!regionNode.getRegionLocation().equals(serverName)) {
 1018  throw new 
UnexpectedStateException(regionNode.toString() +
-1019"reported OPEN on 
server=" + serverName +
+1019" reported OPEN on 
server=" + serverName +
 1020" but state has 
otherwise.");
 1021} else if 
(regionNode.isInState(State.OPENING)) {
 1022  try {

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/804782f0/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
index 9babac2..2e75659 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
@@ -1024,7 +1024,7 @@
 1016  if 
(regionNode.isInState(State.OPENING, State.OPEN)) {
 1017if 
(!regionNode.getRegionLocation().equals(serverName)) {
 1018  throw new 
UnexpectedStateException(regionNode.toString() +
-1019"reported OPEN on 
server=" + serverName +
+1019" reported OPEN on 
server=" + serverName +
 1020" but state has 
otherwise.");
 1021} else if 
(regionNode.isInState(State.OPENING)) {
 1022  try {

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/804782f0/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/GCRegionProcedure.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/GCRegionProcedure.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/GCRegionProcedure.html
index 622208f..0484c0c 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/GCRegionProcedure.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/GCRegionProcedure.html
@@ -156,12 +156,7 @@
 148
serializer.deserialize(MasterProcedureProtos.GCRegionStateData.class);
 149
setRegion(ProtobufUtil.toRegionInfo(msg.getRegionInfo()));
 150  }
-151
-152  @Override
-153  protected 
org.apache.hadoop.hbase.procedure2.Procedure.LockState 
acquireLock(MasterProcedureEnv env) {
-154return super.acquireLock(env);
-155  }
-156}
+151}
 
 
 



[05/51] [partial] hbase-site git commit: Published site at b4759ce6e72f50ccd9d410bd5917dc5a515414f1.

2018-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.html
index b341b0d..10ab3d1 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.html
@@ -42,473 +42,534 @@
 034import 
org.apache.hadoop.hbase.TableNotFoundException;
 035import 
org.apache.hadoop.hbase.client.Connection;
 036import 
org.apache.hadoop.hbase.client.RegionInfo;
-037import 
org.apache.hadoop.hbase.client.TableDescriptor;
-038import 
org.apache.hadoop.hbase.errorhandling.ForeignException;
-039import 
org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
-040import 
org.apache.hadoop.hbase.master.MasterFileSystem;
-041import 
org.apache.hadoop.hbase.master.MetricsSnapshot;
-042import 
org.apache.hadoop.hbase.monitoring.MonitoredTask;
-043import 
org.apache.hadoop.hbase.monitoring.TaskMonitor;
-044import 
org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
-045import 
org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
-046import 
org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper;
-047import 
org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
-048import 
org.apache.hadoop.hbase.snapshot.SnapshotManifest;
-049import 
org.apache.hadoop.hbase.util.Pair;
-050import 
org.apache.yetus.audience.InterfaceAudience;
-051import org.slf4j.Logger;
-052import org.slf4j.LoggerFactory;
-053import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-054import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
-055import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
-056import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RestoreSnapshotState;
-057import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
-058
-059@InterfaceAudience.Private
-060public class RestoreSnapshotProcedure
-061extends 
AbstractStateMachineTableProcedureRestoreSnapshotState {
-062  private static final Logger LOG = 
LoggerFactory.getLogger(RestoreSnapshotProcedure.class);
-063
-064  private TableDescriptor 
modifiedTableDescriptor;
-065  private ListRegionInfo 
regionsToRestore = null;
-066  private ListRegionInfo 
regionsToRemove = null;
-067  private ListRegionInfo 
regionsToAdd = null;
-068  private MapString, PairString, 
String parentsToChildrenPairMap = new HashMap();
-069
-070  private SnapshotDescription snapshot;
-071  private boolean restoreAcl;
-072
-073  // Monitor
-074  private MonitoredTask monitorStatus = 
null;
-075
-076  private Boolean traceEnabled = null;
-077
-078  /**
-079   * Constructor (for failover)
-080   */
-081  public RestoreSnapshotProcedure() {
-082  }
-083
-084  public RestoreSnapshotProcedure(final 
MasterProcedureEnv env,
-085  final TableDescriptor 
tableDescriptor, final SnapshotDescription snapshot)
-086  throws HBaseIOException {
-087this(env, tableDescriptor, snapshot, 
false);
-088  }
-089  /**
-090   * Constructor
-091   * @param env MasterProcedureEnv
-092   * @param tableDescriptor the table to 
operate on
-093   * @param snapshot snapshot to restore 
from
-094   * @throws IOException
-095   */
-096  public RestoreSnapshotProcedure(
-097  final MasterProcedureEnv env,
-098  final TableDescriptor 
tableDescriptor,
-099  final SnapshotDescription 
snapshot,
-100  final boolean restoreAcl)
-101  throws HBaseIOException {
-102super(env);
-103// This is the new schema we are 
going to write out as this modification.
-104this.modifiedTableDescriptor = 
tableDescriptor;
-105preflightChecks(env, null/*Table can 
be online when restore is called?*/);
-106// Snapshot information
-107this.snapshot = snapshot;
-108this.restoreAcl = restoreAcl;
-109
-110// Monitor
-111getMonitorStatus();
-112  }
+037import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
+038import 
org.apache.hadoop.hbase.client.TableDescriptor;
+039import 
org.apache.hadoop.hbase.errorhandling.ForeignException;
+040import 
org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
+041import 
org.apache.hadoop.hbase.favored.FavoredNodesManager;
+042import 
org.apache.hadoop.hbase.master.MasterFileSystem;
+043import 
org.apache.hadoop.hbase.master.MetricsSnapshot;
+044import 
org.apache.hadoop.hbase.master.RegionState;
+045import 
org.apache.hadoop.hbase.master.assignment.AssignmentManager;
+046import 
org.apache.hadoop.hbase.monitoring.MonitoredTask;
+047import 
org.apache.hadoop.hbase.monitoring.TaskMonitor;
+048import 

[05/51] [partial] hbase-site git commit: Published site at e66a6603e36ecd67237ca16acd5e2de03f0d372d.

2018-07-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0c6f447e/apidocs/org/apache/hadoop/hbase/class-use/HBaseIOException.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/class-use/HBaseIOException.html 
b/apidocs/org/apache/hadoop/hbase/class-use/HBaseIOException.html
index c8387a5..3a6e532 100644
--- a/apidocs/org/apache/hadoop/hbase/class-use/HBaseIOException.html
+++ b/apidocs/org/apache/hadoop/hbase/class-use/HBaseIOException.html
@@ -1,10 +1,10 @@
 http://www.w3.org/TR/html4/loose.dtd;>
 
-
+
 
 
 
-类 org.apache.hadoop.hbase.HBaseIOException的使用 (Apache HBase 
3.0.0-SNAPSHOT API)
+Uses of Class org.apache.hadoop.hbase.HBaseIOException (Apache HBase 
3.0.0-SNAPSHOT API)
 
 
 
@@ -12,7 +12,7 @@
 
 
 
-您的浏览器已禁用 JavaScript。
+JavaScript is disabled on your browser.
 
 
 
 
 
-跳过导航链接
+Skip navigation links
 
 
 
-
-概览
-程序包
-ç±»
-使用
-树
-已过时
-索引
-帮助
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
 
 
 
 
-上一个
-下一个
+Prev
+Next
 
 
-框架
-无框架
+Frames
+NoFrames
 
 
-所有类
+AllClasses
 
 
 
 
-

类的使用
org.apache.hadoop.hbase.HBaseIOException

+

Uses of Class
org.apache.hadoop.hbase.HBaseIOException

  • - - +
    使用HBaseIOException的程序包  
    + - - + + @@ -89,33 +89,13 @@ @@ -156,54 +136,54 @@ Coprocessors are code that runs in-process on each region server.
  • -

    org.apache.hadoop.hbase中

    [05/51] [partial] hbase-site git commit: Published site at e66a6603e36ecd67237ca16acd5e2de03f0d372d.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5427a45e/apidocs/org/apache/hadoop/hbase/class-use/DoNotRetryIOException.html
    --
    diff --git 
    a/apidocs/org/apache/hadoop/hbase/class-use/DoNotRetryIOException.html 
    b/apidocs/org/apache/hadoop/hbase/class-use/DoNotRetryIOException.html
    index 0a58d3f..a652985 100644
    --- a/apidocs/org/apache/hadoop/hbase/class-use/DoNotRetryIOException.html
    +++ b/apidocs/org/apache/hadoop/hbase/class-use/DoNotRetryIOException.html
    @@ -1,10 +1,10 @@
     http://www.w3.org/TR/html4/loose.dtd;>
     
    -
    +
     
     
     
    -Uses of Class org.apache.hadoop.hbase.DoNotRetryIOException (Apache 
    HBase 3.0.0-SNAPSHOT API)
    +类 org.apache.hadoop.hbase.DoNotRetryIOException的使用 (Apache 
    HBase 3.0.0-SNAPSHOT API)
     
     
     
    @@ -12,7 +12,7 @@
     
     
     
    -JavaScript is disabled on your browser.
    +您的浏览器已禁用 JavaScript。
     
     
     
     
     
    -Skip navigation links
    +跳过导航链接
     
     
     
    -
    -Overview
    -Package
    -Class
    -Use
    -Tree
    -Deprecated
    -Index
    -Help
    +
    +概览
    +程序包
    +ç±»
    +使用
    +树
    +已过时
    +索引
    +帮助
     
     
     
     
    -Prev
    -Next
    +上一个
    +下一个
     
     
    -Frames
    -NoFrames
    +框架
    +无框架
     
     
    -AllClasses
    +所有类
     
     
     
     
    -

    Uses of Class
    org.apache.hadoop.hbase.DoNotRetryIOException

    +

    类的使用
    org.apache.hadoop.hbase.DoNotRetryIOException

    • -

  • Packages that use HBaseIOException 
    程序包说明PackageDescription
    org.apache.hadoop.hbase.client -
    Provides HBase Client - -Table of Contents - - Overview -Example API Usage - - - Overview - To administer HBase, create and drop tables, list and alter tables, - use Admin.
    +
    Provides HBase Client
    org.apache.hadoop.hbase.coprocessor -
    Table of Contents - -Overview -Coprocessor -RegionObserver -Endpoint -Coprocessor loading - - -Overview -Coprocessors are code that runs in-process on each region server.
    +
    Table of Contents
    - +
    Packages that use DoNotRetryIOException 
    + - - + + @@ -89,13 +89,33 @@ @@ -132,78 +152,78 @@
  • -

    Uses of [05/51] [partial] hbase-site git commit: Published site at 0f23784182ab88649de340d75804e0ff20dcd0fc.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bcb555af/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/LruBlockCache.BlockBucket.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/LruBlockCache.BlockBucket.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/LruBlockCache.BlockBucket.html
    index 67f4551..017124c 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/LruBlockCache.BlockBucket.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/LruBlockCache.BlockBucket.html
    @@ -387,817 +387,804 @@
     379}
     380
     381LruCachedBlock cb = 
    map.get(cacheKey);
    -382if (cb != null) {
    -383  int comparison = 
    BlockCacheUtil.validateBlockAddition(cb.getBuffer(), buf, cacheKey);
    -384  if (comparison != 0) {
    -385if (comparison  0) {
    -386  LOG.warn("Cached block contents 
    differ by nextBlockOnDiskSize. Keeping cached block.");
    -387  return;
    -388} else {
    -389  LOG.warn("Cached block contents 
    differ by nextBlockOnDiskSize. Caching new block.");
    -390}
    -391  } else {
    -392String msg = "Cached an already 
    cached block: " + cacheKey + " cb:" + cb.getCacheKey();
    -393msg += ". This is harmless and 
    can happen in rare cases (see HBASE-8547)";
    -394LOG.debug(msg);
    -395return;
    -396  }
    -397}
    -398long currentSize = size.get();
    -399long currentAcceptableSize = 
    acceptableSize();
    -400long hardLimitSize = (long) 
    (hardCapacityLimitFactor * currentAcceptableSize);
    -401if (currentSize = hardLimitSize) 
    {
    -402  stats.failInsert();
    -403  if (LOG.isTraceEnabled()) {
    -404LOG.trace("LruBlockCache current 
    size " + StringUtils.byteDesc(currentSize)
    -405  + " has exceeded acceptable 
    size " + StringUtils.byteDesc(currentAcceptableSize) + "."
    -406  + " The hard limit size is " + 
    StringUtils.byteDesc(hardLimitSize)
    -407  + ", failed to put cacheKey:" + 
    cacheKey + " into LruBlockCache.");
    -408  }
    -409  if (!evictionInProgress) {
    -410runEviction();
    -411  }
    -412  return;
    -413}
    -414cb = new LruCachedBlock(cacheKey, 
    buf, count.incrementAndGet(), inMemory);
    -415long newSize = updateSizeMetrics(cb, 
    false);
    -416map.put(cacheKey, cb);
    -417long val = 
    elements.incrementAndGet();
    -418if (buf.getBlockType().isData()) {
    -419   dataBlockElements.increment();
    -420}
    -421if (LOG.isTraceEnabled()) {
    -422  long size = map.size();
    -423  assertCounterSanity(size, val);
    -424}
    -425if (newSize  
    currentAcceptableSize  !evictionInProgress) {
    -426  runEviction();
    -427}
    -428  }
    -429
    -430  /**
    -431   * Sanity-checking for parity between 
    actual block cache content and metrics.
    -432   * Intended only for use with TRACE 
    level logging and -ea JVM.
    -433   */
    -434  private static void 
    assertCounterSanity(long mapSize, long counterVal) {
    -435if (counterVal  0) {
    -436  LOG.trace("counterVal overflow. 
    Assertions unreliable. counterVal=" + counterVal +
    -437", mapSize=" + mapSize);
    -438  return;
    -439}
    -440if (mapSize  Integer.MAX_VALUE) 
    {
    -441  double pct_diff = 
    Math.absdouble) counterVal) / ((double) mapSize)) - 1.);
    -442  if (pct_diff  0.05) {
    -443LOG.trace("delta between reported 
    and actual size  5%. counterVal=" + counterVal +
    -444  ", mapSize=" + mapSize);
    -445  }
    -446}
    -447  }
    -448
    -449  /**
    -450   * Cache the block with the specified 
    name and buffer.
    -451   * p
    -452   *
    -453   * @param cacheKey block's cache key
    -454   * @param buf  block buffer
    -455   */
    -456  @Override
    -457  public void cacheBlock(BlockCacheKey 
    cacheKey, Cacheable buf) {
    -458cacheBlock(cacheKey, buf, false);
    -459  }
    -460
    -461  /**
    -462   * Helper function that updates the 
    local size counter and also updates any
    -463   * per-cf or per-blocktype metrics it 
    can discern from given
    -464   * {@link LruCachedBlock}
    -465   */
    -466  private long 
    updateSizeMetrics(LruCachedBlock cb, boolean evict) {
    -467long heapsize = cb.heapSize();
    -468BlockType bt = 
    cb.getBuffer().getBlockType();
    -469if (evict) {
    -470  heapsize *= -1;
    -471}
    -472if (bt != null  
    bt.isData()) {
    -473   dataBlockSize.add(heapsize);
    -474}
    -475return size.addAndGet(heapsize);
    -476  }
    -477
    -478  /**
    -479   * Get the buffer of the block with the 
    specified name.
    -480   *
    -481   * @param cacheKey   block's 
    cache key
    -482   * @param cachingtrue if 
    the caller caches blocks on cache misses
    -483   * @param repeat Whether 
    this is a repeat lookup for the same block
    -484   *   (used to 
    avoid double counting cache misses when doing double-check
    -485   *   locking)
    -486   * @param updateCacheMetrics Whether to 
    update cache metrics or not
    -487   *
    -488   * @return buffer of 

    [05/51] [partial] hbase-site git commit: Published site at 85b41f36e01214b6485c9352875c84ebf877dab3.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a5c66de0/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.html
    index c10cfbf..a3e2f4a 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.html
    @@ -3371,7 +3371,7 @@
     3363private V result = null;
     3364
     3365private final HBaseAdmin admin;
    -3366private final Long procId;
    +3366protected final Long procId;
     3367
     3368public ProcedureFuture(final 
    HBaseAdmin admin, final Long procId) {
     3369  this.admin = admin;
    @@ -3653,653 +3653,651 @@
     3645 * @return a description of the 
    operation
     3646 */
     3647protected String getDescription() 
    {
    -3648  return "Operation: " + 
    getOperationType() + ", "
    -3649  + "Table Name: " + 
    tableName.getNameWithNamespaceInclAsString();
    -3650
    -3651}
    -3652
    -3653protected abstract class 
    TableWaitForStateCallable implements WaitForStateCallable {
    -3654  @Override
    -3655  public void 
    throwInterruptedException() throws InterruptedIOException {
    -3656throw new 
    InterruptedIOException("Interrupted while waiting for operation: "
    -3657+ getOperationType() + " on 
    table: " + tableName.getNameWithNamespaceInclAsString());
    -3658  }
    -3659
    -3660  @Override
    -3661  public void 
    throwTimeoutException(long elapsedTime) throws TimeoutException {
    -3662throw new TimeoutException("The 
    operation: " + getOperationType() + " on table: " +
    -3663tableName.getNameAsString() 
    + " has not completed after " + elapsedTime + "ms");
    -3664  }
    -3665}
    -3666
    -3667@Override
    -3668protected V 
    postOperationResult(final V result, final long deadlineTs)
    -3669throws IOException, 
    TimeoutException {
    -3670  LOG.info(getDescription() + " 
    completed");
    -3671  return 
    super.postOperationResult(result, deadlineTs);
    -3672}
    -3673
    -3674@Override
    -3675protected V 
    postOperationFailure(final IOException exception, final long deadlineTs)
    -3676throws IOException, 
    TimeoutException {
    -3677  LOG.info(getDescription() + " 
    failed with " + exception.getMessage());
    -3678  return 
    super.postOperationFailure(exception, deadlineTs);
    -3679}
    -3680
    -3681protected void 
    waitForTableEnabled(final long deadlineTs)
    -3682throws IOException, 
    TimeoutException {
    -3683  waitForState(deadlineTs, new 
    TableWaitForStateCallable() {
    -3684@Override
    -3685public boolean checkState(int 
    tries) throws IOException {
    -3686  try {
    -3687if 
    (getAdmin().isTableAvailable(tableName)) {
    -3688  return true;
    -3689}
    -3690  } catch 
    (TableNotFoundException tnfe) {
    -3691LOG.debug("Table " + 
    tableName.getNameWithNamespaceInclAsString()
    -3692+ " was not enabled, 
    sleeping. tries=" + tries);
    -3693  }
    -3694  return false;
    -3695}
    -3696  });
    -3697}
    -3698
    -3699protected void 
    waitForTableDisabled(final long deadlineTs)
    -3700throws IOException, 
    TimeoutException {
    -3701  waitForState(deadlineTs, new 
    TableWaitForStateCallable() {
    -3702@Override
    -3703public boolean checkState(int 
    tries) throws IOException {
    -3704  return 
    getAdmin().isTableDisabled(tableName);
    -3705}
    -3706  });
    -3707}
    -3708
    -3709protected void 
    waitTableNotFound(final long deadlineTs)
    -3710throws IOException, 
    TimeoutException {
    -3711  waitForState(deadlineTs, new 
    TableWaitForStateCallable() {
    -3712@Override
    -3713public boolean checkState(int 
    tries) throws IOException {
    -3714  return 
    !getAdmin().tableExists(tableName);
    -3715}
    -3716  });
    -3717}
    -3718
    -3719protected void 
    waitForSchemaUpdate(final long deadlineTs)
    -3720throws IOException, 
    TimeoutException {
    -3721  waitForState(deadlineTs, new 
    TableWaitForStateCallable() {
    -3722@Override
    -3723public boolean checkState(int 
    tries) throws IOException {
    -3724  return 
    getAdmin().getAlterStatus(tableName).getFirst() == 0;
    -3725}
    -3726  });
    -3727}
    -3728
    -3729protected void 
    waitForAllRegionsOnline(final long deadlineTs, final byte[][] splitKeys)
    -3730throws IOException, 
    TimeoutException {
    -3731  final TableDescriptor desc = 
    getTableDescriptor();
    -3732  final AtomicInteger actualRegCount 
    = new AtomicInteger(0);
    -3733  final MetaTableAccessor.Visitor 
    visitor = new MetaTableAccessor.Visitor() {
    -3734@Override
    -3735public boolean visit(Result 
    rowResult) throws IOException {
    -3736  

    [05/51] [partial] hbase-site git commit: Published site at 6198e1fc7dfa85c3bc6b2855f9a5fb5f4b2354ff.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/eb5d2c62/devapidocs/org/apache/hadoop/hbase/master/replication/RecoverStandbyProcedure.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/master/replication/RecoverStandbyProcedure.html
     
    b/devapidocs/org/apache/hadoop/hbase/master/replication/RecoverStandbyProcedure.html
    new file mode 100644
    index 000..70e2384
    --- /dev/null
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/master/replication/RecoverStandbyProcedure.html
    @@ -0,0 +1,601 @@
    +http://www.w3.org/TR/html4/loose.dtd;>
    +
    +
    +
    +
    +
    +RecoverStandbyProcedure (Apache HBase 3.0.0-SNAPSHOT API)
    +
    +
    +
    +
    +
    +var methods = 
    {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10};
    +var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
    Methods"],8:["t4","Concrete Methods"]};
    +var altColor = "altColor";
    +var rowColor = "rowColor";
    +var tableTab = "tableTab";
    +var activeTableTab = "activeTableTab";
    +
    +
    +JavaScript is disabled on your browser.
    +
    +
    +
    +
    +
    +Skip navigation links
    +
    +
    +
    +
    +Overview
    +Package
    +Class
    +Use
    +Tree
    +Deprecated
    +Index
    +Help
    +
    +
    +
    +
    +PrevClass
    +NextClass
    +
    +
    +Frames
    +NoFrames
    +
    +
    +AllClasses
    +
    +
    +
    +
    +
    +
    +
    +Summary:
    +Nested|
    +Field|
    +Constr|
    +Method
    +
    +
    +Detail:
    +Field|
    +Constr|
    +Method
    +
    +
    +
    +
    +
    +
    +
    +
    +org.apache.hadoop.hbase.master.replication
    +Class 
    RecoverStandbyProcedure
    +
    +
    +
    +https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">java.lang.Object
    +
    +
    +org.apache.hadoop.hbase.procedure2.ProcedureTEnvironment
    +
    +
    +org.apache.hadoop.hbase.procedure2.StateMachineProcedureMasterProcedureEnv,TState
    +
    +
    +org.apache.hadoop.hbase.master.replication.AbstractPeerProcedureorg.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RecoverStandbyState
    +
    +
    +org.apache.hadoop.hbase.master.replication.RecoverStandbyProcedure
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +All Implemented Interfaces:
    +https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
     title="class or interface in java.lang">ComparableProcedureMasterProcedureEnv, PeerProcedureInterface
    +
    +
    +
    +@InterfaceAudience.Private
    +public class RecoverStandbyProcedure
    +extends AbstractPeerProcedureorg.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RecoverStandbyState
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +Nested Class Summary
    +
    +
    +
    +
    +Nested classes/interfaces inherited from 
    classorg.apache.hadoop.hbase.procedure2.StateMachineProcedure
    +StateMachineProcedure.Flow
    +
    +
    +
    +
    +
    +Nested classes/interfaces inherited from 
    classorg.apache.hadoop.hbase.procedure2.Procedure
    +Procedure.LockState
    +
    +
    +
    +
    +
    +Nested classes/interfaces inherited from 
    interfaceorg.apache.hadoop.hbase.master.procedure.PeerProcedureInterface
    +PeerProcedureInterface.PeerOperationType
    +
    +
    +
    +
    +
    +
    +
    +
    +Field Summary
    +
    +Fields
    +
    +Modifier and Type
    +Field and Description
    +
    +
    +private static org.slf4j.Logger
    +LOG
    +
    +
    +private boolean
    +serial
    +
    +
    +
    +
    +
    +
    +Fields inherited from 
    classorg.apache.hadoop.hbase.master.replication.AbstractPeerProcedure
    +latch,
     peerId
    +
    +
    +
    +
    +
    +Fields inherited from classorg.apache.hadoop.hbase.procedure2.Procedure
    +NO_PROC_ID,
     NO_TIMEOUT
    +
    +
    +
    +
    +
    +
    +
    +
    +Constructor Summary
    +
    +Constructors
    +
    +Constructor and Description
    +
    +
    +RecoverStandbyProcedure()
    +
    +
    +RecoverStandbyProcedure(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringpeerId,
    +   booleanserial)
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +Method Summary
    +
    +All MethodsInstance MethodsConcrete Methods
    +
    +Modifier and Type
    +Method and Description
    +
    +
    +protected void
    +deserializeStateData(ProcedureStateSerializerserializer)
    +Called on store load to allow the user to decode the 
    previously serialized
    + state.
    +
    +
    +
    +private void
    +dispathWals(SyncReplicationReplayWALManagersyncReplicationReplayWALManager)
    +
    +
    +protected StateMachineProcedure.Flow
    +executeFromState(MasterProcedureEnvenv,
    +
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RecoverStandbyStatestate)
    +called to perform a single step of the specified 'state' of 
    the procedure
    +
    +
    +
    +protected 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RecoverStandbyState
    +getInitialState()
    +Return the initial state object that will be used for the 
    first call to executeFromState().
    +
    +
    +
    +PeerProcedureInterface.PeerOperationType
    +getPeerOperationType()
    +
    +
    +protected 
    

    [05/51] [partial] hbase-site git commit: Published site at 14087cc919da9f2e0b1a68f701f6365ad9d1d71f.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/55ce8d97/devapidocs/org/apache/hadoop/hbase/rest/model/StorageClusterStatusModel.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/rest/model/StorageClusterStatusModel.html 
    b/devapidocs/org/apache/hadoop/hbase/rest/model/StorageClusterStatusModel.html
    index 0adfff9..13b45ee 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/rest/model/StorageClusterStatusModel.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/rest/model/StorageClusterStatusModel.html
    @@ -114,7 +114,7 @@ var activeTableTab = "activeTableTab";
     
     
     @InterfaceAudience.Private
    -public class StorageClusterStatusModel
    +public class StorageClusterStatusModel
     extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">Object
     implements https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
     title="class or interface in java.io">Serializable, ProtobufMessageHandler
     Representation of the status of a storage cluster:
    @@ -163,6 +163,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/io/Serializab
    attribute name="memstoreSizeMB" type="int"/attribute
    attribute name="storefileIndexSizeMB" type="int"/attribute
    attribute name="readRequestsCount" type="int"/attribute
    +   attribute name="cpRequestsCount" type="int"/attribute
    attribute name="writeRequestsCount" type="int"/attribute
    attribute name="rootIndexSizeKB" type="int"/attribute
    attribute name="totalStaticIndexSizeKB" type="int"/attribute
    @@ -377,7 +378,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/io/Serializab
     
     
     serialVersionUID
    -private static finallong serialVersionUID
    +private static finallong serialVersionUID
     
     See Also:
     Constant
     Field Values
    @@ -390,7 +391,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/io/Serializab
     
     
     liveNodes
    -privatehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListStorageClusterStatusModel.Node liveNodes
    +privatehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListStorageClusterStatusModel.Node liveNodes
     
     
     
    @@ -399,7 +400,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/io/Serializab
     
     
     deadNodes
    -privatehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">Listhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String deadNodes
    +privatehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">Listhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String deadNodes
     
     
     
    @@ -408,7 +409,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/io/Serializab
     
     
     regions
    -privateint regions
    +privateint regions
     
     
     
    @@ -417,7 +418,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/io/Serializab
     
     
     requests
    -privatelong requests
    +privatelong requests
     
     
     
    @@ -426,7 +427,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/io/Serializab
     
     
     averageLoad
    -privatedouble averageLoad
    +privatedouble averageLoad
     
     
     
    @@ -443,7 +444,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/io/Serializab
     
     
     StorageClusterStatusModel
    -publicStorageClusterStatusModel()
    +publicStorageClusterStatusModel()
     Default constructor
     
     
    @@ -461,7 +462,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/io/Serializab
     
     
     addLiveNode
    -publicStorageClusterStatusModel.NodeaddLiveNode(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">Stringname,
    +publicStorageClusterStatusModel.NodeaddLiveNode(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">Stringname,
       longstartCode,
       intheapSizeMB,
       intmaxHeapSizeMB)
    @@ -481,7 +482,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/io/Serializab
     
     
     getLiveNode
    -publicStorageClusterStatusModel.NodegetLiveNode(intindex)
    +publicStorageClusterStatusModel.NodegetLiveNode(intindex)
     
     Parameters:
     index - the index
    @@ -496,7 +497,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/io/Serializab
     
     
     addDeadNode
    -publicvoidaddDeadNode(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">Stringnode)
    +publicvoidaddDeadNode(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     

    [05/51] [partial] hbase-site git commit: Published site at 72784c2d836a4b977667449d3adec5e8d15453f5.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2b11656f/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.AssignmentProcedureEvent.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.AssignmentProcedureEvent.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.AssignmentProcedureEvent.html
    index 541beed..1100e95 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.AssignmentProcedureEvent.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.AssignmentProcedureEvent.html
    @@ -42,1015 +42,1038 @@
     034import 
    java.util.concurrent.ConcurrentHashMap;
     035import 
    java.util.concurrent.ConcurrentSkipListMap;
     036import 
    java.util.concurrent.atomic.AtomicInteger;
    -037
    -038import 
    org.apache.hadoop.hbase.HConstants;
    -039import 
    org.apache.hadoop.hbase.ServerName;
    -040import 
    org.apache.hadoop.hbase.TableName;
    -041import 
    org.apache.hadoop.hbase.client.RegionInfo;
    -042import 
    org.apache.hadoop.hbase.exceptions.UnexpectedStateException;
    -043import 
    org.apache.hadoop.hbase.master.RegionState;
    -044import 
    org.apache.hadoop.hbase.master.RegionState.State;
    -045import 
    org.apache.hadoop.hbase.procedure2.ProcedureEvent;
    -046import 
    org.apache.hadoop.hbase.util.Bytes;
    -047import 
    org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
    -048import 
    org.apache.yetus.audience.InterfaceAudience;
    -049import org.slf4j.Logger;
    -050import org.slf4j.LoggerFactory;
    -051import 
    org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
    -052
    -053/**
    -054 * RegionStates contains a set of Maps 
    that describes the in-memory state of the AM, with
    -055 * the regions available in the system, 
    the region in transition, the offline regions and
    -056 * the servers holding regions.
    -057 */
    -058@InterfaceAudience.Private
    -059public class RegionStates {
    -060  private static final Logger LOG = 
    LoggerFactory.getLogger(RegionStates.class);
    -061
    -062  protected static final State[] 
    STATES_EXPECTED_ON_OPEN = new State[] {
    -063State.OPEN, // State may already be 
    OPEN if we died after receiving the OPEN from regionserver
    -064// but before complete 
    finish of AssignProcedure. HBASE-20100.
    -065State.OFFLINE, State.CLOSED,  // 
    disable/offline
    -066State.SPLITTING, State.SPLIT, // 
    ServerCrashProcedure
    -067State.OPENING, State.FAILED_OPEN, // 
    already in-progress (retrying)
    -068  };
    -069
    -070  protected static final State[] 
    STATES_EXPECTED_ON_CLOSE = new State[] {
    -071State.SPLITTING, State.SPLIT, 
    State.MERGING, // ServerCrashProcedure
    -072State.OPEN,   // 
    enabled/open
    -073State.CLOSING // 
    already in-progress (retrying)
    -074  };
    -075
    -076  private static class 
    AssignmentProcedureEvent extends ProcedureEventRegionInfo {
    -077public AssignmentProcedureEvent(final 
    RegionInfo regionInfo) {
    -078  super(regionInfo);
    -079}
    -080  }
    -081
    -082  private static class ServerReportEvent 
    extends ProcedureEventServerName {
    -083public ServerReportEvent(final 
    ServerName serverName) {
    -084  super(serverName);
    -085}
    -086  }
    -087
    -088  /**
    -089   * Current Region State.
    -090   * In-memory only. Not persisted.
    -091   */
    -092  // Mutable/Immutable? Changes have to 
    be synchronized or not?
    -093  // Data members are volatile which 
    seems to say multi-threaded access is fine.
    -094  // In the below we do check and set but 
    the check state could change before
    -095  // we do the set because no 
    synchronizationwhich seems dodgy. Clear up
    -096  // understanding here... how many 
    threads accessing? Do locks make it so one
    -097  // thread at a time working on a single 
    Region's RegionStateNode? Lets presume
    -098  // so for now. Odd is that elsewhere in 
    this RegionStates, we synchronize on
    -099  // the RegionStateNode instance. 
    TODO.
    -100  public static class RegionStateNode 
    implements ComparableRegionStateNode {
    -101private final RegionInfo 
    regionInfo;
    -102private final ProcedureEvent? 
    event;
    -103
    -104private volatile 
    RegionTransitionProcedure procedure = null;
    -105private volatile ServerName 
    regionLocation = null;
    -106private volatile ServerName lastHost 
    = null;
    -107/**
    -108 * A Region-in-Transition (RIT) moves 
    through states.
    -109 * See {@link State} for complete 
    list. A Region that
    -110 * is opened moves from OFFLINE = 
    OPENING = OPENED.
    -111 */
    -112private volatile State state = 
    State.OFFLINE;
    -113
    -114/**
    -115 * Updated whenever a call to {@link 
    #setRegionLocation(ServerName)}
    -116 * or {@link #setState(State, 
    State...)}.
    -117 */
    -118private volatile long lastUpdate = 
    0;
    -119
    -120private volatile long openSeqNum = 
    HConstants.NO_SEQNUM;
    -121
    -122public RegionStateNode(final 
    RegionInfo regionInfo) {
    -123  this.regionInfo = 

    [05/51] [partial] hbase-site git commit: Published site at 9101fc246f86445006bfbcdfda5cc495016dc280.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/65565d77/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html
    index 4b5d00c..96ecbf8 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html
    @@ -6,7 +6,7 @@
     
     
     
    -001/*
    +001/**
     002 * Licensed to the Apache Software 
    Foundation (ASF) under one
     003 * or more contributor license 
    agreements.  See the NOTICE file
     004 * distributed with this work for 
    additional information
    @@ -23,1981 +23,1894 @@
     015 * See the License for the specific 
    language governing permissions and
     016 * limitations under the License.
     017 */
    -018
    -019package 
    org.apache.hadoop.hbase.master.assignment;
    -020
    -021import java.io.IOException;
    -022import java.util.ArrayList;
    -023import java.util.Arrays;
    -024import java.util.Collection;
    -025import java.util.Collections;
    -026import java.util.HashMap;
    -027import java.util.HashSet;
    -028import java.util.List;
    -029import java.util.Map;
    -030import java.util.Set;
    -031import 
    java.util.concurrent.CopyOnWriteArrayList;
    -032import java.util.concurrent.Future;
    -033import java.util.concurrent.TimeUnit;
    -034import 
    java.util.concurrent.atomic.AtomicBoolean;
    -035import 
    java.util.concurrent.locks.Condition;
    -036import 
    java.util.concurrent.locks.ReentrantLock;
    -037import java.util.stream.Collectors;
    -038import 
    org.apache.hadoop.conf.Configuration;
    -039import 
    org.apache.hadoop.hbase.HBaseIOException;
    -040import 
    org.apache.hadoop.hbase.HConstants;
    -041import 
    org.apache.hadoop.hbase.PleaseHoldException;
    -042import 
    org.apache.hadoop.hbase.RegionException;
    -043import 
    org.apache.hadoop.hbase.RegionStateListener;
    -044import 
    org.apache.hadoop.hbase.ServerName;
    -045import 
    org.apache.hadoop.hbase.TableName;
    -046import 
    org.apache.hadoop.hbase.YouAreDeadException;
    -047import 
    org.apache.hadoop.hbase.client.RegionInfo;
    -048import 
    org.apache.hadoop.hbase.client.RegionInfoBuilder;
    -049import 
    org.apache.hadoop.hbase.client.RegionReplicaUtil;
    -050import 
    org.apache.hadoop.hbase.client.Result;
    -051import 
    org.apache.hadoop.hbase.client.TableState;
    -052import 
    org.apache.hadoop.hbase.exceptions.UnexpectedStateException;
    -053import 
    org.apache.hadoop.hbase.favored.FavoredNodesManager;
    -054import 
    org.apache.hadoop.hbase.favored.FavoredNodesPromoter;
    -055import 
    org.apache.hadoop.hbase.master.AssignmentListener;
    -056import 
    org.apache.hadoop.hbase.master.LoadBalancer;
    -057import 
    org.apache.hadoop.hbase.master.MasterServices;
    -058import 
    org.apache.hadoop.hbase.master.MetricsAssignmentManager;
    -059import 
    org.apache.hadoop.hbase.master.NoSuchProcedureException;
    -060import 
    org.apache.hadoop.hbase.master.RegionPlan;
    -061import 
    org.apache.hadoop.hbase.master.RegionState;
    -062import 
    org.apache.hadoop.hbase.master.RegionState.State;
    -063import 
    org.apache.hadoop.hbase.master.ServerListener;
    -064import 
    org.apache.hadoop.hbase.master.TableStateManager;
    -065import 
    org.apache.hadoop.hbase.master.assignment.RegionStates.RegionStateNode;
    -066import 
    org.apache.hadoop.hbase.master.assignment.RegionStates.ServerState;
    -067import 
    org.apache.hadoop.hbase.master.assignment.RegionStates.ServerStateNode;
    -068import 
    org.apache.hadoop.hbase.master.balancer.FavoredStochasticBalancer;
    -069import 
    org.apache.hadoop.hbase.master.normalizer.RegionNormalizer;
    -070import 
    org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
    -071import 
    org.apache.hadoop.hbase.master.procedure.MasterProcedureScheduler;
    -072import 
    org.apache.hadoop.hbase.master.procedure.ProcedureSyncWait;
    -073import 
    org.apache.hadoop.hbase.master.procedure.ServerCrashException;
    -074import 
    org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure;
    -075import 
    org.apache.hadoop.hbase.procedure2.Procedure;
    -076import 
    org.apache.hadoop.hbase.procedure2.ProcedureEvent;
    -077import 
    org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
    -078import 
    org.apache.hadoop.hbase.procedure2.ProcedureInMemoryChore;
    -079import 
    org.apache.hadoop.hbase.procedure2.util.StringUtils;
    -080import 
    org.apache.hadoop.hbase.regionserver.SequenceId;
    -081import 
    org.apache.hadoop.hbase.util.Bytes;
    -082import 
    org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
    -083import 
    org.apache.hadoop.hbase.util.HasThread;
    -084import 
    org.apache.hadoop.hbase.util.Pair;
    -085import 
    org.apache.hadoop.hbase.util.Threads;
    -086import 
    org.apache.hadoop.hbase.util.VersionInfo;
    -087import 
    org.apache.yetus.audience.InterfaceAudience;
    -088import org.slf4j.Logger;
    -089import org.slf4j.LoggerFactory;
    -090
    

    [05/51] [partial] hbase-site git commit: Published site at 0b28155d274910b4e667b949d51f78809a1eff0b.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e11cf2cb/devapidocs/src-html/org/apache/hadoop/hbase/wal/WAL.Entry.html
    --
    diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WAL.Entry.html 
    b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WAL.Entry.html
    index e31f5c6..f4d1eb0 100644
    --- a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WAL.Entry.html
    +++ b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WAL.Entry.html
    @@ -31,277 +31,266 @@
     023import java.util.Set;
     024import 
    org.apache.hadoop.hbase.HConstants;
     025import 
    org.apache.hadoop.hbase.client.RegionInfo;
    -026import 
    org.apache.hadoop.hbase.regionserver.wal.CompressionContext;
    -027import 
    org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
    -028import 
    org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
    -029import 
    org.apache.hadoop.hbase.regionserver.wal.WALCoprocessorHost;
    -030import 
    org.apache.hadoop.hbase.replication.regionserver.WALFileLengthProvider;
    -031import 
    org.apache.yetus.audience.InterfaceAudience;
    -032import 
    org.apache.yetus.audience.InterfaceStability;
    -033
    -034import 
    org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
    -035
    -036/**
    -037 * A Write Ahead Log (WAL) provides 
    service for reading, writing waledits. This interface provides
    -038 * APIs for WAL users (such as 
    RegionServer) to use the WAL (do append, sync, etc).
    -039 *
    -040 * Note that some internals, such as log 
    rolling and performance evaluation tools, will use
    -041 * WAL.equals to determine if they have 
    already seen a given WAL.
    -042 */
    -043@InterfaceAudience.Private
    -044@InterfaceStability.Evolving
    -045public interface WAL extends Closeable, 
    WALFileLengthProvider {
    -046
    -047  /**
    -048   * Registers WALActionsListener
    -049   */
    -050  void registerWALActionsListener(final 
    WALActionsListener listener);
    -051
    -052  /**
    -053   * Unregisters WALActionsListener
    -054   */
    -055  boolean 
    unregisterWALActionsListener(final WALActionsListener listener);
    -056
    -057  /**
    -058   * Roll the log writer. That is, start 
    writing log messages to a new file.
    -059   *
    -060   * p
    -061   * The implementation is synchronized 
    in order to make sure there's one rollWriter
    -062   * running at any given time.
    -063   *
    -064   * @return If lots of logs, flush the 
    returned regions so next time through we
    -065   * can clean logs. Returns null 
    if nothing to flush. Names are actual
    -066   * region names as returned by 
    {@link RegionInfo#getEncodedName()}
    -067   */
    -068  byte[][] rollWriter() throws 
    FailedLogCloseException, IOException;
    -069
    -070  /**
    -071   * Roll the log writer. That is, start 
    writing log messages to a new file.
    -072   *
    -073   * p
    -074   * The implementation is synchronized 
    in order to make sure there's one rollWriter
    -075   * running at any given time.
    -076   *
    -077   * @param force
    -078   *  If true, force creation of 
    a new writer even if no entries have
    -079   *  been written to the current 
    writer
    -080   * @return If lots of logs, flush the 
    returned regions so next time through we
    -081   * can clean logs. Returns null 
    if nothing to flush. Names are actual
    -082   * region names as returned by 
    {@link RegionInfo#getEncodedName()}
    -083   */
    -084  byte[][] rollWriter(boolean force) 
    throws FailedLogCloseException, IOException;
    -085
    -086  /**
    -087   * Stop accepting new writes. If we 
    have unsynced writes still in buffer, sync them.
    -088   * Extant edits are left in place in 
    backing storage to be replayed later.
    -089   */
    -090  void shutdown() throws IOException;
    -091
    -092  /**
    -093   * Caller no longer needs any edits 
    from this WAL. Implementers are free to reclaim
    -094   * underlying resources after this 
    call; i.e. filesystem based WALs can archive or
    -095   * delete files.
    -096   */
    -097  @Override
    -098  void close() throws IOException;
    -099
    -100  /**
    -101   * Append a set of edits to the WAL. 
    The WAL is not flushed/sync'd after this transaction
    -102   * completes BUT on return this edit 
    must have its region edit/sequence id assigned
    -103   * else it messes up our unification of 
    mvcc and sequenceid.  On return codekey/code will
    -104   * have the region edit/sequence id 
    filled in.
    -105   * @param info the regioninfo 
    associated with append
    -106   * @param key Modified by this call; we 
    add to it this edits region edit/sequence id.
    -107   * @param edits Edits to append. MAY 
    CONTAIN NO EDITS for case where we want to get an edit
    -108   * sequence id that is after all 
    currently appended edits.
    -109   * @param inMemstore Always true except 
    for case where we are writing a compaction completion
    -110   * record into the WAL; in this case 
    the entry is just so we can finish an unfinished compaction
    -111   * -- it is not an edit for memstore.
    -112   * @return Returns a 'transaction id' 
    and codekey/code will have the region edit/sequence id
    -113   * in it.
    -114   */
    -115  long 

    [05/51] [partial] hbase-site git commit: Published site at 7d3750bd9fc9747623549c242cc4171e224b3eaf.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.RemoteOperation.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.RemoteOperation.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.RemoteOperation.html
    index 594ef24..17d5c40 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.RemoteOperation.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.RemoteOperation.html
    @@ -170,241 +170,242 @@
     162  }
     163
     164  /**
    -165   * Add a remote rpc. Be sure to check 
    result for successful add.
    +165   * Add a remote rpc.
     166   * @param key the node identifier
    -167   * @return True if we successfully 
    added the operation.
    -168   */
    -169  public boolean addOperationToNode(final 
    TRemote key, RemoteProcedure rp) {
    +167   */
    +168  public void addOperationToNode(final 
    TRemote key, RemoteProcedure rp)
    +169  throws 
    NullTargetServerDispatchException, NoServerDispatchException, 
    NoNodeDispatchException {
     170if (key == null) {
    -171  // Key is remote server name. Be 
    careful. It could have been nulled by a concurrent
    -172  // ServerCrashProcedure shutting 
    down outstanding RPC requests. See remoteCallFailed.
    -173  return false;
    -174}
    -175assert key != null : "found null key 
    for node";
    -176BufferNode node = nodeMap.get(key);
    -177if (node == null) {
    -178  return false;
    -179}
    -180node.add(rp);
    -181// Check our node still in the map; 
    could have been removed by #removeNode.
    -182return nodeMap.containsValue(node);
    -183  }
    -184
    -185  /**
    -186   * Remove a remote node
    -187   * @param key the node identifier
    -188   */
    -189  public boolean removeNode(final TRemote 
    key) {
    -190final BufferNode node = 
    nodeMap.remove(key);
    -191if (node == null) return false;
    -192node.abortOperationsInQueue();
    -193return true;
    -194  }
    -195
    -196  // 
    
    -197  //  Task Helpers
    -198  // 
    
    -199  protected FutureVoid 
    submitTask(CallableVoid task) {
    -200return threadPool.submit(task);
    -201  }
    -202
    -203  protected FutureVoid 
    submitTask(CallableVoid task, long delay, TimeUnit unit) {
    -204final FutureTaskVoid 
    futureTask = new FutureTask(task);
    -205timeoutExecutor.add(new 
    DelayedTask(futureTask, delay, unit));
    -206return futureTask;
    -207  }
    -208
    -209  protected abstract void 
    remoteDispatch(TRemote key, SetRemoteProcedure operations);
    -210  protected abstract void 
    abortPendingOperations(TRemote key, SetRemoteProcedure operations);
    -211
    -212  /**
    -213   * Data structure with reference to 
    remote operation.
    -214   */
    -215  public static abstract class 
    RemoteOperation {
    -216private final RemoteProcedure 
    remoteProcedure;
    -217
    -218protected RemoteOperation(final 
    RemoteProcedure remoteProcedure) {
    -219  this.remoteProcedure = 
    remoteProcedure;
    -220}
    -221
    -222public RemoteProcedure 
    getRemoteProcedure() {
    -223  return remoteProcedure;
    -224}
    -225  }
    -226
    -227  /**
    -228   * Remote procedure reference.
    -229   */
    -230  public interface 
    RemoteProcedureTEnv, TRemote {
    -231/**
    -232 * For building the remote 
    operation.
    -233 */
    -234RemoteOperation remoteCallBuild(TEnv 
    env, TRemote remote);
    -235
    -236/**
    -237 * Called when the executeProcedure 
    call is failed.
    -238 */
    -239void remoteCallFailed(TEnv env, 
    TRemote remote, IOException exception);
    -240
    -241/**
    -242 * Called when RS tells the remote 
    procedure is succeeded through the
    -243 * {@code reportProcedureDone} 
    method.
    -244 */
    -245void remoteOperationCompleted(TEnv 
    env);
    -246
    -247/**
    -248 * Called when RS tells the remote 
    procedure is failed through the {@code reportProcedureDone}
    -249 * method.
    -250 */
    -251void remoteOperationFailed(TEnv env, 
    RemoteProcedureException error);
    -252  }
    -253
    -254  /**
    -255   * Account of what procedures are 
    running on remote node.
    -256   * @param TEnv
    -257   * @param TRemote
    -258   */
    -259  public interface RemoteNodeTEnv, 
    TRemote {
    -260TRemote getKey();
    -261void add(RemoteProcedureTEnv, 
    TRemote operation);
    -262void dispatch();
    -263  }
    -264
    -265  protected 
    ArrayListMultimapClass?, RemoteOperation 
    buildAndGroupRequestByType(final TEnv env,
    -266  final TRemote remote, final 
    SetRemoteProcedure remoteProcedures) {
    -267final 
    ArrayListMultimapClass?, RemoteOperation requestByType = 
    ArrayListMultimap.create();
    -268for (RemoteProcedure proc: 
    remoteProcedures) {
    -269  RemoteOperation operation = 
    proc.remoteCallBuild(env, remote);
    -270  
    

    [05/51] [partial] hbase-site git commit: Published site at 997747076d8ec0b4346d7cb99c4b0667a7c14905.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4df09ed9/testdevapidocs/org/apache/hadoop/hbase/quotas/TestSpaceQuotas.html
    --
    diff --git a/testdevapidocs/org/apache/hadoop/hbase/quotas/TestSpaceQuotas.html 
    b/testdevapidocs/org/apache/hadoop/hbase/quotas/TestSpaceQuotas.html
    index 8a96d4e..a07240b 100644
    --- a/testdevapidocs/org/apache/hadoop/hbase/quotas/TestSpaceQuotas.html
    +++ b/testdevapidocs/org/apache/hadoop/hbase/quotas/TestSpaceQuotas.html
    @@ -18,7 +18,7 @@
     catch(err) {
     }
     //-->
    -var methods = 
    {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":9,"i9":9,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10};
    +var methods = 
    {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":9,"i9":9,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10};
     var tabs = {65535:["t0","All Methods"],1:["t1","Static 
    Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
     var altColor = "altColor";
     var rowColor = "rowColor";
    @@ -147,14 +147,18 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
     LOG
     
     
    +private 
    org.apache.hadoop.hbase.TableName
    +NON_EXISTENT_TABLE
    +
    +
     private static int
     NUM_RETRIES
     
    -
    +
     private static HBaseTestingUtility
     TEST_UTIL
     
    -
    +
     org.junit.rules.TestName
     testName
     
    @@ -195,34 +199,35 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
     getReportedSizesForTable(org.apache.hadoop.hbase.TableNametn)
     
     
    -private void
    -increaseQuotaLimit(org.apache.hadoop.hbase.TableNametn,
    -  
    org.apache.hadoop.hbase.quotas.SpaceViolationPolicypolicy)
    -
    -
     void
     removeAllQuotas()
     
    -
    +
     private void
     removeQuotaFromtable(org.apache.hadoop.hbase.TableNametn)
     
    -
    +
     private void
     setQuotaAndThenDropTable(org.apache.hadoop.hbase.quotas.SpaceViolationPolicypolicy)
     
    -
    +
     private void
     setQuotaAndThenIncreaseQuota(org.apache.hadoop.hbase.quotas.SpaceViolationPolicypolicy)
     
    -
    +
     private void
     setQuotaAndThenRemove(org.apache.hadoop.hbase.quotas.SpaceViolationPolicypolicy)
     
    -
    +
     void
     setQuotaAndThenRemoveInOneAmongTwoTables(org.apache.hadoop.hbase.quotas.SpaceViolationPolicypolicy)
     
    +
    +private void
    +setQuotaLimit(org.apache.hadoop.hbase.TableNametn,
    + org.apache.hadoop.hbase.quotas.SpaceViolationPolicypolicy,
    + longsizeInMBs)
    +
     
     static void
     setUp()
    @@ -285,77 +290,97 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
     
     
     void
    -testSetQuotaAndThenDropTableWithNoInserts()
    +testSetQuotaAndThenDropTableWithDisable()
     
     
     void
    -testSetQuotaAndThenDropTableWithNoWrite()
    +testSetQuotaAndThenDropTableWithNoInserts()
     
     
     void
    -testSetQuotaAndThenIncreaseQuotaWithNoInserts()
    +testSetQuotaAndThenDropTableWithNoWrite()
     
     
     void
    -testSetQuotaAndThenIncreaseQuotaWithNoWrite()
    +testSetQuotaAndThenIncreaseQuotaWithNoInserts()
     
     
     void
    -testSetQuotaAndThenIncreaseQuotaWithNoWritesCompactions()
    +testSetQuotaAndThenIncreaseQuotaWithNoWrite()
     
     
     void
    -testSetQuotaAndThenRemoveInOneWithDisable()
    +testSetQuotaAndThenIncreaseQuotaWithNoWritesCompactions()
     
     
     void
    -testSetQuotaAndThenRemoveInOneWithNoInserts()
    +testSetQuotaAndThenRemoveInOneWithDisable()
     
     
     void
    -testSetQuotaAndThenRemoveInOneWithNoWrite()
    +testSetQuotaAndThenRemoveInOneWithNoInserts()
     
     
     void
    -testSetQuotaAndThenRemoveInOneWithNoWritesCompaction()
    +testSetQuotaAndThenRemoveInOneWithNoWrite()
     
     
     void
    -testSetQuotaAndThenRemoveWithDisable()
    +testSetQuotaAndThenRemoveInOneWithNoWritesCompaction()
     
     
     void
    -testSetQuotaAndThenRemoveWithNoInserts()
    +testSetQuotaAndThenRemoveWithDisable()
     
     
     void
    -testSetQuotaAndThenRemoveWithNoWrite()
    +testSetQuotaAndThenRemoveWithNoInserts()
     
     
     void
    -testSetQuotaAndThenRemoveWithNoWritesCompactions()
    +testSetQuotaAndThenRemoveWithNoWrite()
     
     
     void
    -testTableQuotaOverridesNamespaceQuota()
    +testSetQuotaAndThenRemoveWithNoWritesCompactions()
     
     
    +void
    +testSetQuotaOnNonExistingTableWithDisable()
    +
    +
    +void
    +testSetQuotaOnNonExistingTableWithNoInserts()
    +
    +
    +void
    +testSetQuotaOnNonExistingTableWithNoWrites()
    +
    +
    +void
    +testSetQuotaOnNonExistingTableWithNoWritesCompaction()
    +
    +
    +void
    +testTableQuotaOverridesNamespaceQuota()
    +
    +
     private void
     verifyNoViolation(org.apache.hadoop.hbase.quotas.SpaceViolationPolicypolicyToViolate,
      org.apache.hadoop.hbase.TableNametn,
      
    org.apache.hadoop.hbase.client.Mutationm)
     
    -
    +
     private void
     

    [05/51] [partial] hbase-site git commit: Published site at f3d1c021de2264301f68eadb9ef126ff83d7ef53.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/883dde2f/license.html
    --
    diff --git a/license.html b/license.html
    index b1d1d42..6c82745 100644
    --- a/license.html
    +++ b/license.html
    @@ -7,7 +7,7 @@
       
     
     
    -
    +
     
     Apache HBase  Project Licenses
     
    @@ -276,209 +276,7 @@
     Project Licenses
     
     Apache License, Version 
    2.0
    -
    - Apache License
    -   Version 2.0, January 2004
    -http://www.apache.org/licenses/
    -
    -   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
    -
    -   1. Definitions.
    -
    -  License shall mean the terms and conditions for use, 
    reproduction,
    -  and distribution as defined by Sections 1 through 9 of this document.
    -
    -  Licensor shall mean the copyright owner or entity authorized 
    by
    -  the copyright owner that is granting the License.
    -
    -  Legal Entity shall mean the union of the acting entity and 
    all
    -  other entities that control, are controlled by, or are under common
    -  control with that entity. For the purposes of this definition,
    -  control means (i) the power, direct or indirect, to cause the
    -  direction or management of such entity, whether by contract or
    -  otherwise, or (ii) ownership of fifty percent (50%) or more of the
    -  outstanding shares, or (iii) beneficial ownership of such entity.
    -
    -  You (or Your) shall mean an individual or Legal 
    Entity
    -  exercising permissions granted by this License.
    -
    -  Source form shall mean the preferred form for making 
    modifications,
    -  including but not limited to software source code, documentation
    -  source, and configuration files.
    -
    -  Object form shall mean any form resulting from mechanical
    -  transformation or translation of a Source form, including but
    -  not limited to compiled object code, generated documentation,
    -  and conversions to other media types.
    -
    -  Work shall mean the work of authorship, whether in Source or
    -  Object form, made available under the License, as indicated by a
    -  copyright notice that is included in or attached to the work
    -  (an example is provided in the Appendix below).
    -
    -  Derivative Works shall mean any work, whether in Source or 
    Object
    -  form, that is based on (or derived from) the Work and for which the
    -  editorial revisions, annotations, elaborations, or other modifications
    -  represent, as a whole, an original work of authorship. For the purposes
    -  of this License, Derivative Works shall not include works that remain
    -  separable from, or merely link (or bind by name) to the interfaces of,
    -  the Work and Derivative Works thereof.
    -
    -  Contribution shall mean any work of authorship, including
    -  the original version of the Work and any modifications or additions
    -  to that Work or Derivative Works thereof, that is intentionally
    -  submitted to Licensor for inclusion in the Work by the copyright owner
    -  or by an individual or Legal Entity authorized to submit on behalf of
    -  the copyright owner. For the purposes of this definition, 
    submitted
    -  means any form of electronic, verbal, or written communication sent
    -  to the Licensor or its representatives, including but not limited to
    -  communication on electronic mailing lists, source code control systems,
    -  and issue tracking systems that are managed by, or on behalf of, the
    -  Licensor for the purpose of discussing and improving the Work, but
    -  excluding communication that is conspicuously marked or otherwise
    -  designated in writing by the copyright owner as Not a 
    Contribution.
    -
    -  Contributor shall mean Licensor and any individual or Legal 
    Entity
    -  on behalf of whom a Contribution has been received by Licensor and
    -  subsequently incorporated within the Work.
    -
    -   2. Grant of Copyright License. Subject to the terms and conditions of
    -  this License, each Contributor hereby grants to You a perpetual,
    -  worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    -  copyright license to reproduce, prepare Derivative Works of,
    -  publicly display, publicly perform, sublicense, and distribute the
    -  Work and such Derivative Works in Source or Object form.
    -
    -   3. Grant of Patent License. Subject to the terms and conditions of
    -  this License, each Contributor hereby grants to You a perpetual,
    -  worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    -  (except as stated in this section) patent license to make, have made,
    -  use, offer to sell, sell, import, and otherwise transfer the Work,
    -  where such license applies only to those patent claims licensable
    -  by such Contributor that are necessarily infringed by their
    -  Contribution(s) alone or by combination of their Contribution(s)
    -  with the Work to 

    [05/51] [partial] hbase-site git commit: Published site at cf529f18a9959589fa635f78df4840472526ea2c.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CASTableTest.html
    --
    diff --git 
    a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CASTableTest.html
     
    b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CASTableTest.html
    index 3f8844b..cdb9398 100644
    --- 
    a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CASTableTest.html
    +++ 
    b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CASTableTest.html
    @@ -140,2712 +140,2713 @@
     132public class PerformanceEvaluation 
    extends Configured implements Tool {
     133  static final String RANDOM_SEEK_SCAN = 
    "randomSeekScan";
     134  static final String RANDOM_READ = 
    "randomRead";
    -135  private static final Logger LOG = 
    LoggerFactory.getLogger(PerformanceEvaluation.class.getName());
    -136  private static final ObjectMapper 
    MAPPER = new ObjectMapper();
    -137  static {
    -138
    MAPPER.configure(MapperFeature.SORT_PROPERTIES_ALPHABETICALLY, true);
    -139  }
    -140
    -141  public static final String TABLE_NAME = 
    "TestTable";
    -142  public static final String 
    FAMILY_NAME_BASE = "info";
    -143  public static final byte[] FAMILY_ZERO 
    = Bytes.toBytes("info0");
    -144  public static final byte[] COLUMN_ZERO 
    = Bytes.toBytes("" + 0);
    -145  public static final int 
    DEFAULT_VALUE_LENGTH = 1000;
    -146  public static final int ROW_LENGTH = 
    26;
    -147
    -148  private static final int ONE_GB = 1024 
    * 1024 * 1000;
    -149  private static final int 
    DEFAULT_ROWS_PER_GB = ONE_GB / DEFAULT_VALUE_LENGTH;
    -150  // TODO : should we make this 
    configurable
    -151  private static final int TAG_LENGTH = 
    256;
    -152  private static final DecimalFormat FMT 
    = new DecimalFormat("0.##");
    -153  private static final MathContext CXT = 
    MathContext.DECIMAL64;
    -154  private static final BigDecimal 
    MS_PER_SEC = BigDecimal.valueOf(1000);
    -155  private static final BigDecimal 
    BYTES_PER_MB = BigDecimal.valueOf(1024 * 1024);
    -156  private static final TestOptions 
    DEFAULT_OPTS = new TestOptions();
    -157
    -158  private static MapString, 
    CmdDescriptor COMMANDS = new TreeMap();
    -159  private static final Path PERF_EVAL_DIR 
    = new Path("performance_evaluation");
    -160
    -161  static {
    -162
    addCommandDescriptor(AsyncRandomReadTest.class, "asyncRandomRead",
    -163"Run async random read test");
    -164
    addCommandDescriptor(AsyncRandomWriteTest.class, "asyncRandomWrite",
    -165"Run async random write test");
    -166
    addCommandDescriptor(AsyncSequentialReadTest.class, "asyncSequentialRead",
    -167"Run async sequential read 
    test");
    -168
    addCommandDescriptor(AsyncSequentialWriteTest.class, "asyncSequentialWrite",
    -169"Run async sequential write 
    test");
    -170
    addCommandDescriptor(AsyncScanTest.class, "asyncScan",
    -171"Run async scan test (read every 
    row)");
    -172
    addCommandDescriptor(RandomReadTest.class, RANDOM_READ,
    -173  "Run random read test");
    -174
    addCommandDescriptor(RandomSeekScanTest.class, RANDOM_SEEK_SCAN,
    -175  "Run random seek and scan 100 
    test");
    -176
    addCommandDescriptor(RandomScanWithRange10Test.class, "scanRange10",
    -177  "Run random seek scan with both 
    start and stop row (max 10 rows)");
    -178
    addCommandDescriptor(RandomScanWithRange100Test.class, "scanRange100",
    -179  "Run random seek scan with both 
    start and stop row (max 100 rows)");
    -180
    addCommandDescriptor(RandomScanWithRange1000Test.class, "scanRange1000",
    -181  "Run random seek scan with both 
    start and stop row (max 1000 rows)");
    -182
    addCommandDescriptor(RandomScanWithRange1Test.class, "scanRange1",
    -183  "Run random seek scan with both 
    start and stop row (max 1 rows)");
    -184
    addCommandDescriptor(RandomWriteTest.class, "randomWrite",
    -185  "Run random write test");
    -186
    addCommandDescriptor(SequentialReadTest.class, "sequentialRead",
    -187  "Run sequential read test");
    -188
    addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite",
    -189  "Run sequential write test");
    -190addCommandDescriptor(ScanTest.class, 
    "scan",
    -191  "Run scan test (read every 
    row)");
    -192
    addCommandDescriptor(FilteredScanTest.class, "filterScan",
    -193  "Run scan test using a filter to 
    find a specific row based on it's value " +
    -194  "(make sure to use --rows=20)");
    -195
    addCommandDescriptor(IncrementTest.class, "increment",
    -196  "Increment on each row; clients 
    overlap on keyspace so some concurrent operations");
    -197
    addCommandDescriptor(AppendTest.class, "append",
    -198  "Append on each row; clients 
    overlap on keyspace so some concurrent operations");
    -199
    addCommandDescriptor(CheckAndMutateTest.class, "checkAndMutate",
    -200  "CheckAndMutate on each row; 
    clients overlap on keyspace so some concurrent operations");
    -201
    addCommandDescriptor(CheckAndPutTest.class, 

    [05/51] [partial] hbase-site git commit: Published site at 021f66d11d2cbb7308308093e29e69d6e7661ee9.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/92a26cfb/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ScannerContext.NextState.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ScannerContext.NextState.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ScannerContext.NextState.html
    index 03a0b2a..cabb570 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ScannerContext.NextState.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ScannerContext.NextState.html
    @@ -561,209 +561,206 @@
     553
     554/**
     555 * Set all fields together.
    -556 * @param batch
    -557 * @param sizeScope
    -558 * @param dataSize
    -559 */
    -560void setFields(int batch, LimitScope 
    sizeScope, long dataSize, long heapSize,
    -561LimitScope timeScope, long time) 
    {
    -562  setBatch(batch);
    -563  setSizeScope(sizeScope);
    -564  setDataSize(dataSize);
    -565  setHeapSize(heapSize);
    -566  setTimeScope(timeScope);
    -567  setTime(time);
    -568}
    -569
    -570int getBatch() {
    -571  return this.batch;
    -572}
    -573
    -574void setBatch(int batch) {
    -575  this.batch = batch;
    -576}
    -577
    -578/**
    -579 * @param checkerScope
    -580 * @return true when the limit can be 
    enforced from the scope of the checker
    -581 */
    -582boolean 
    canEnforceBatchLimitFromScope(LimitScope checkerScope) {
    -583  return 
    LimitScope.BETWEEN_CELLS.canEnforceLimitFromScope(checkerScope);
    -584}
    -585
    -586long getDataSize() {
    -587  return this.dataSize;
    -588}
    -589
    -590long getHeapSize() {
    -591  return this.heapSize;
    -592}
    -593
    -594void setDataSize(long dataSize) {
    -595  this.dataSize = dataSize;
    -596}
    -597
    -598void setHeapSize(long heapSize) {
    -599  this.heapSize = heapSize;
    -600}
    -601
    -602/**
    -603 * @return {@link LimitScope} 
    indicating scope in which the size limit is enforced
    -604 */
    -605LimitScope getSizeScope() {
    -606  return this.sizeScope;
    -607}
    -608
    -609/**
    -610 * Change the scope in which the size 
    limit is enforced
    -611 */
    -612void setSizeScope(LimitScope scope) 
    {
    -613  this.sizeScope = scope;
    -614}
    -615
    -616/**
    -617 * @param checkerScope
    -618 * @return true when the limit can be 
    enforced from the scope of the checker
    -619 */
    -620boolean 
    canEnforceSizeLimitFromScope(LimitScope checkerScope) {
    -621  return 
    this.sizeScope.canEnforceLimitFromScope(checkerScope);
    -622}
    -623
    -624long getTime() {
    -625  return this.time;
    -626}
    -627
    -628void setTime(long time) {
    -629  this.time = time;
    -630}
    -631
    -632/**
    -633 * @return {@link LimitScope} 
    indicating scope in which the time limit is enforced
    -634 */
    -635LimitScope getTimeScope() {
    -636  return this.timeScope;
    -637}
    -638
    -639/**
    -640 * Change the scope in which the time 
    limit is enforced
    -641 */
    -642void setTimeScope(LimitScope scope) 
    {
    -643  this.timeScope = scope;
    -644}
    -645
    -646/**
    -647 * @param checkerScope
    -648 * @return true when the limit can be 
    enforced from the scope of the checker
    -649 */
    -650boolean 
    canEnforceTimeLimitFromScope(LimitScope checkerScope) {
    -651  return 
    this.timeScope.canEnforceLimitFromScope(checkerScope);
    -652}
    -653
    -654@Override
    -655public String toString() {
    -656  StringBuilder sb = new 
    StringBuilder();
    -657  sb.append("{");
    +556 */
    +557void setFields(int batch, LimitScope 
    sizeScope, long dataSize, long heapSize,
    +558LimitScope timeScope, long time) 
    {
    +559  setBatch(batch);
    +560  setSizeScope(sizeScope);
    +561  setDataSize(dataSize);
    +562  setHeapSize(heapSize);
    +563  setTimeScope(timeScope);
    +564  setTime(time);
    +565}
    +566
    +567int getBatch() {
    +568  return this.batch;
    +569}
    +570
    +571void setBatch(int batch) {
    +572  this.batch = batch;
    +573}
    +574
    +575/**
    +576 * @param checkerScope
    +577 * @return true when the limit can be 
    enforced from the scope of the checker
    +578 */
    +579boolean 
    canEnforceBatchLimitFromScope(LimitScope checkerScope) {
    +580  return 
    LimitScope.BETWEEN_CELLS.canEnforceLimitFromScope(checkerScope);
    +581}
    +582
    +583long getDataSize() {
    +584  return this.dataSize;
    +585}
    +586
    +587long getHeapSize() {
    +588  return this.heapSize;
    +589}
    +590
    +591void setDataSize(long dataSize) {
    +592  this.dataSize = dataSize;
    +593}
    +594
    +595void setHeapSize(long heapSize) {
    +596  this.heapSize = heapSize;
    +597}
    +598
    +599/**
    +600 * @return {@link LimitScope} 
    indicating scope in which the size limit is enforced
    +601 */
    +602LimitScope getSizeScope() {
    +603  return this.sizeScope;
    +604}
    +605
    +606/**
    +607 * Change the scope in which the size 
    

    [05/51] [partial] hbase-site git commit: Published site at acd0d1e446c164d9c54bfb461b2d449c8d717c07.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f2065178/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.ScanTest.html
    --
    diff --git 
    a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.ScanTest.html
     
    b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.ScanTest.html
    index 2510283..418c60c 100644
    --- 
    a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.ScanTest.html
    +++ 
    b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.ScanTest.html
    @@ -77,77 +77,77 @@
     069import 
    org.apache.hadoop.hbase.client.RowMutations;
     070import 
    org.apache.hadoop.hbase.client.Scan;
     071import 
    org.apache.hadoop.hbase.client.Table;
    -072import 
    org.apache.hadoop.hbase.filter.BinaryComparator;
    -073import 
    org.apache.hadoop.hbase.filter.Filter;
    -074import 
    org.apache.hadoop.hbase.filter.FilterAllFilter;
    -075import 
    org.apache.hadoop.hbase.filter.FilterList;
    -076import 
    org.apache.hadoop.hbase.filter.PageFilter;
    -077import 
    org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
    -078import 
    org.apache.hadoop.hbase.filter.WhileMatchFilter;
    -079import 
    org.apache.hadoop.hbase.io.compress.Compression;
    -080import 
    org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
    -081import 
    org.apache.hadoop.hbase.io.hfile.RandomDistribution;
    -082import 
    org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
    -083import 
    org.apache.hadoop.hbase.regionserver.BloomType;
    -084import 
    org.apache.hadoop.hbase.regionserver.CompactingMemStore;
    -085import 
    org.apache.hadoop.hbase.trace.HBaseHTraceConfiguration;
    -086import 
    org.apache.hadoop.hbase.trace.SpanReceiverHost;
    -087import 
    org.apache.hadoop.hbase.trace.TraceUtil;
    -088import 
    org.apache.hadoop.hbase.util.ByteArrayHashKey;
    -089import 
    org.apache.hadoop.hbase.util.Bytes;
    -090import 
    org.apache.hadoop.hbase.util.Hash;
    -091import 
    org.apache.hadoop.hbase.util.MurmurHash;
    -092import 
    org.apache.hadoop.hbase.util.Pair;
    -093import 
    org.apache.hadoop.hbase.util.YammerHistogramUtils;
    -094import 
    org.apache.hadoop.io.LongWritable;
    -095import org.apache.hadoop.io.Text;
    -096import org.apache.hadoop.mapreduce.Job;
    -097import 
    org.apache.hadoop.mapreduce.Mapper;
    -098import 
    org.apache.hadoop.mapreduce.lib.input.NLineInputFormat;
    -099import 
    org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
    -100import 
    org.apache.hadoop.mapreduce.lib.reduce.LongSumReducer;
    -101import org.apache.hadoop.util.Tool;
    -102import 
    org.apache.hadoop.util.ToolRunner;
    -103import 
    org.apache.htrace.core.ProbabilitySampler;
    -104import org.apache.htrace.core.Sampler;
    -105import 
    org.apache.htrace.core.TraceScope;
    -106import 
    org.apache.yetus.audience.InterfaceAudience;
    -107import org.slf4j.Logger;
    -108import org.slf4j.LoggerFactory;
    -109import 
    org.apache.hbase.thirdparty.com.google.common.base.MoreObjects;
    -110import 
    org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
    -111
    -112/**
    -113 * Script used evaluating HBase 
    performance and scalability.  Runs a HBase
    -114 * client that steps through one of a set 
    of hardcoded tests or 'experiments'
    -115 * (e.g. a random reads test, a random 
    writes test, etc.). Pass on the
    -116 * command-line which test to run and how 
    many clients are participating in
    -117 * this experiment. Run {@code 
    PerformanceEvaluation --help} to obtain usage.
    -118 *
    -119 * pThis class sets up and runs 
    the evaluation programs described in
    -120 * Section 7, iPerformance 
    Evaluation/i, of the a
    -121 * 
    href="http://labs.google.com/papers/bigtable.html"Bigtable/a;
    -122 * paper, pages 8-10.
    -123 *
    -124 * pBy default, runs as a 
    mapreduce job where each mapper runs a single test
    -125 * client. Can also run as a 
    non-mapreduce, multithreaded application by
    -126 * specifying {@code --nomapred}. Each 
    client does about 1GB of data, unless
    -127 * specified otherwise.
    -128 */
    -129@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
    -130public class PerformanceEvaluation 
    extends Configured implements Tool {
    -131  static final String RANDOM_SEEK_SCAN = 
    "randomSeekScan";
    -132  static final String RANDOM_READ = 
    "randomRead";
    -133  private static final Logger LOG = 
    LoggerFactory.getLogger(PerformanceEvaluation.class.getName());
    -134  private static final ObjectMapper 
    MAPPER = new ObjectMapper();
    -135  static {
    -136
    MAPPER.configure(MapperFeature.SORT_PROPERTIES_ALPHABETICALLY, true);
    -137  }
    -138
    -139  public static final String TABLE_NAME = 
    "TestTable";
    -140  public static final byte[] FAMILY_NAME 
    = Bytes.toBytes("info");
    -141  public static final byte [] COLUMN_ZERO 
    = Bytes.toBytes("" + 0);
    -142  public static final byte [] 
    QUALIFIER_NAME = COLUMN_ZERO;
    +072import 
    org.apache.hadoop.hbase.client.metrics.ScanMetrics;
    +073import 
    org.apache.hadoop.hbase.filter.BinaryComparator;
    +074import 
    org.apache.hadoop.hbase.filter.Filter;
    +075import 
    org.apache.hadoop.hbase.filter.FilterAllFilter;
    +076import 
    

    [05/51] [partial] hbase-site git commit: Published site at 87f5b5f3411d96c31b4cb61b9a57ced22be91d1f.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/de18d468/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.EverythingPassesWALEntryFilter.html
    --
    diff --git 
    a/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.EverythingPassesWALEntryFilter.html
     
    b/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.EverythingPassesWALEntryFilter.html
    index 7a938de..43a87b6 100644
    --- 
    a/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.EverythingPassesWALEntryFilter.html
    +++ 
    b/testdevapidocs/src-html/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.EverythingPassesWALEntryFilter.html
    @@ -33,539 +33,515 @@
     025import java.util.ArrayList;
     026import java.util.List;
     027import java.util.UUID;
    -028import 
    java.util.concurrent.atomic.AtomicBoolean;
    -029import 
    java.util.concurrent.atomic.AtomicInteger;
    -030import 
    java.util.concurrent.atomic.AtomicReference;
    -031import org.apache.hadoop.hbase.Cell;
    -032import 
    org.apache.hadoop.hbase.HBaseClassTestRule;
    -033import org.apache.hadoop.hbase.Waiter;
    -034import 
    org.apache.hadoop.hbase.client.Connection;
    -035import 
    org.apache.hadoop.hbase.client.ConnectionFactory;
    -036import 
    org.apache.hadoop.hbase.client.Put;
    -037import 
    org.apache.hadoop.hbase.client.RegionInfo;
    -038import 
    org.apache.hadoop.hbase.client.Table;
    -039import 
    org.apache.hadoop.hbase.regionserver.HRegion;
    -040import 
    org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint;
    -041import 
    org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationGlobalSourceSource;
    -042import 
    org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSourceImpl;
    -043import 
    org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSourceSource;
    -044import 
    org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSourceSourceImpl;
    -045import 
    org.apache.hadoop.hbase.replication.regionserver.MetricsSource;
    -046import 
    org.apache.hadoop.hbase.testclassification.MediumTests;
    -047import 
    org.apache.hadoop.hbase.testclassification.ReplicationTests;
    -048import 
    org.apache.hadoop.hbase.util.Bytes;
    -049import 
    org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
    -050import 
    org.apache.hadoop.hbase.util.Threads;
    -051import 
    org.apache.hadoop.hbase.wal.WAL.Entry;
    -052import 
    org.apache.hadoop.hbase.zookeeper.ZKConfig;
    -053import 
    org.apache.hadoop.metrics2.lib.DynamicMetricsRegistry;
    -054import org.junit.AfterClass;
    -055import org.junit.Assert;
    -056import org.junit.Before;
    -057import org.junit.BeforeClass;
    -058import org.junit.ClassRule;
    -059import org.junit.Test;
    -060import 
    org.junit.experimental.categories.Category;
    -061import org.slf4j.Logger;
    -062import org.slf4j.LoggerFactory;
    -063
    -064/**
    -065 * Tests ReplicationSource and 
    ReplicationEndpoint interactions
    -066 */
    -067@Category({ ReplicationTests.class, 
    MediumTests.class })
    -068public class TestReplicationEndpoint 
    extends TestReplicationBase {
    -069
    -070  @ClassRule
    -071  public static final HBaseClassTestRule 
    CLASS_RULE =
    -072  
    HBaseClassTestRule.forClass(TestReplicationEndpoint.class);
    -073
    -074  private static final Logger LOG = 
    LoggerFactory.getLogger(TestReplicationEndpoint.class);
    -075
    -076  static int numRegionServers;
    -077
    -078  @BeforeClass
    -079  public static void setUpBeforeClass() 
    throws Exception {
    -080
    TestReplicationBase.setUpBeforeClass();
    -081numRegionServers = 
    utility1.getHBaseCluster().getRegionServerThreads().size();
    -082  }
    -083
    -084  @AfterClass
    -085  public static void tearDownAfterClass() 
    throws Exception {
    -086
    TestReplicationBase.tearDownAfterClass();
    -087// check stop is called
    -088
    Assert.assertTrue(ReplicationEndpointForTest.stoppedCount.get()  0);
    -089  }
    -090
    -091  @Before
    -092  public void setup() throws Exception 
    {
    -093
    ReplicationEndpointForTest.contructedCount.set(0);
    -094
    ReplicationEndpointForTest.startedCount.set(0);
    -095
    ReplicationEndpointForTest.replicateCount.set(0);
    -096
    ReplicationEndpointReturningFalse.replicated.set(false);
    -097
    ReplicationEndpointForTest.lastEntries = null;
    -098final ListRegionServerThread 
    rsThreads =
    -099
    utility1.getMiniHBaseCluster().getRegionServerThreads();
    -100for (RegionServerThread rs : 
    rsThreads) {
    -101  
    utility1.getAdmin().rollWALWriter(rs.getRegionServer().getServerName());
    -102}
    -103// Wait for  all log roll to finish
    -104utility1.waitFor(3000, new 
    Waiter.ExplainingPredicateException() {
    -105  @Override
    -106  public boolean evaluate() throws 
    Exception {
    -107for (RegionServerThread rs : 
    rsThreads) {
    -108  if 
    (!rs.getRegionServer().walRollRequestFinished()) {
    -109return false;
    -110  }
    -111}
    -112return true;
    -113  }
    -114
    -115  @Override
    -116  public String 

    [05/51] [partial] hbase-site git commit: Published site at 2912c953551bedbfbf30c32c156ed7bb187d54c3.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html 
    b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html
    index 8302e28..c370eb9 100644
    --- a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html
    +++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.html
    @@ -2113,3031 +2113,3033 @@
     2105
    errors.reportError(ERROR_CODE.ORPHAN_TABLE_STATE,
     2106tableName + " unable to 
    delete dangling table state " + tableState);
     2107  }
    -2108} else {
    -2109  
    errors.reportError(ERROR_CODE.ORPHAN_TABLE_STATE,
    -2110  tableName + " has dangling 
    table state " + tableState);
    -2111}
    -2112  }
    -2113}
    -2114// check that all tables have 
    states
    -2115for (TableName tableName : 
    tablesInfo.keySet()) {
    -2116  if (isTableIncluded(tableName) 
     !tableStates.containsKey(tableName)) {
    -2117if (fixMeta) {
    -2118  
    MetaTableAccessor.updateTableState(connection, tableName, 
    TableState.State.ENABLED);
    -2119  TableState newState = 
    MetaTableAccessor.getTableState(connection, tableName);
    -2120  if (newState == null) {
    -2121
    errors.reportError(ERROR_CODE.NO_TABLE_STATE,
    -2122"Unable to change state 
    for table " + tableName + " in meta ");
    -2123  }
    -2124} else {
    -2125  
    errors.reportError(ERROR_CODE.NO_TABLE_STATE,
    -2126  tableName + " has no state 
    in meta ");
    -2127}
    -2128  }
    -2129}
    -2130  }
    -2131
    -2132  private void preCheckPermission() 
    throws IOException, AccessDeniedException {
    -2133if 
    (shouldIgnorePreCheckPermission()) {
    -2134  return;
    -2135}
    -2136
    -2137Path hbaseDir = 
    FSUtils.getRootDir(getConf());
    -2138FileSystem fs = 
    hbaseDir.getFileSystem(getConf());
    -2139UserProvider userProvider = 
    UserProvider.instantiate(getConf());
    -2140UserGroupInformation ugi = 
    userProvider.getCurrent().getUGI();
    -2141FileStatus[] files = 
    fs.listStatus(hbaseDir);
    -2142for (FileStatus file : files) {
    -2143  try {
    -2144FSUtils.checkAccess(ugi, file, 
    FsAction.WRITE);
    -2145  } catch (AccessDeniedException 
    ace) {
    -2146LOG.warn("Got 
    AccessDeniedException when preCheckPermission ", ace);
    -2147
    errors.reportError(ERROR_CODE.WRONG_USAGE, "Current user " + 
    ugi.getUserName()
    -2148  + " does not have write perms 
    to " + file.getPath()
    -2149  + ". Please rerun hbck as hdfs 
    user " + file.getOwner());
    -2150throw ace;
    -2151  }
    -2152}
    -2153  }
    -2154
    -2155  /**
    -2156   * Deletes region from meta table
    -2157   */
    -2158  private void deleteMetaRegion(HbckInfo 
    hi) throws IOException {
    -2159
    deleteMetaRegion(hi.metaEntry.getRegionName());
    -2160  }
    -2161
    -2162  /**
    -2163   * Deletes region from meta table
    -2164   */
    -2165  private void deleteMetaRegion(byte[] 
    metaKey) throws IOException {
    -2166Delete d = new Delete(metaKey);
    -2167meta.delete(d);
    -2168LOG.info("Deleted " + 
    Bytes.toString(metaKey) + " from META" );
    -2169  }
    -2170
    -2171  /**
    -2172   * Reset the split parent region info 
    in meta table
    -2173   */
    -2174  private void resetSplitParent(HbckInfo 
    hi) throws IOException {
    -2175RowMutations mutations = new 
    RowMutations(hi.metaEntry.getRegionName());
    -2176Delete d = new 
    Delete(hi.metaEntry.getRegionName());
    -2177
    d.addColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER);
    -2178
    d.addColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER);
    -2179mutations.add(d);
    -2180
    -2181RegionInfo hri = 
    RegionInfoBuilder.newBuilder(hi.metaEntry)
    -2182.setOffline(false)
    -2183.setSplit(false)
    -2184.build();
    -2185Put p = 
    MetaTableAccessor.makePutFromRegionInfo(hri, 
    EnvironmentEdgeManager.currentTime());
    -2186mutations.add(p);
    -2187
    -2188meta.mutateRow(mutations);
    -2189LOG.info("Reset split parent " + 
    hi.metaEntry.getRegionNameAsString() + " in META" );
    -2190  }
    -2191
    -2192  /**
    -2193   * This backwards-compatibility 
    wrapper for permanently offlining a region
    -2194   * that should not be alive.  If the 
    region server does not support the
    -2195   * "offline" method, it will use the 
    closest unassign method instead.  This
    -2196   * will basically work until one 
    attempts to disable or delete the affected
    -2197   * table.  The problem has to do with 
    in-memory only master state, so
    -2198   * restarting the HMaster or failing 
    over to another should fix this.
    -2199   */
    -2200  private void offline(byte[] 
    regionName) throws IOException {
    -2201String regionString = 
    Bytes.toStringBinary(regionName);
    -2202if (!rsSupportsOffline) {
    -2203  LOG.warn("Using unassign region " 
    + 

    [05/51] [partial] hbase-site git commit: Published site at 2a2258656b2fcd92b967131b6c1f037363553bc4.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e0fb1fde/devapidocs/src-html/org/apache/hadoop/hbase/master/HMasterCommandLine.LocalHMaster.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMasterCommandLine.LocalHMaster.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMasterCommandLine.LocalHMaster.html
    index 64b9ab5..30fe780 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMasterCommandLine.LocalHMaster.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMasterCommandLine.LocalHMaster.html
    @@ -30,308 +30,309 @@
     022import java.io.IOException;
     023import java.util.List;
     024
    -025import 
    org.apache.commons.cli.CommandLine;
    -026import 
    org.apache.commons.cli.GnuParser;
    -027import org.apache.commons.cli.Options;
    -028import 
    org.apache.commons.cli.ParseException;
    -029import 
    org.apache.hadoop.conf.Configuration;
    -030import 
    org.apache.hadoop.hbase.HConstants;
    -031import 
    org.apache.hadoop.hbase.LocalHBaseCluster;
    -032import 
    org.apache.hadoop.hbase.MasterNotRunningException;
    -033import 
    org.apache.hadoop.hbase.ZNodeClearer;
    -034import 
    org.apache.hadoop.hbase.ZooKeeperConnectionException;
    -035import 
    org.apache.hadoop.hbase.trace.TraceUtil;
    -036import 
    org.apache.yetus.audience.InterfaceAudience;
    -037import 
    org.apache.hadoop.hbase.client.Admin;
    -038import 
    org.apache.hadoop.hbase.client.Connection;
    -039import 
    org.apache.hadoop.hbase.client.ConnectionFactory;
    -040import 
    org.apache.hadoop.hbase.regionserver.HRegionServer;
    -041import 
    org.apache.hadoop.hbase.util.JVMClusterUtil;
    -042import 
    org.apache.hadoop.hbase.util.ServerCommandLine;
    -043import 
    org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
    -044import 
    org.apache.hadoop.hbase.zookeeper.ZKUtil;
    -045import 
    org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
    -046import 
    org.apache.zookeeper.KeeperException;
    -047import org.slf4j.Logger;
    -048import org.slf4j.LoggerFactory;
    -049
    -050@InterfaceAudience.Private
    -051public class HMasterCommandLine extends 
    ServerCommandLine {
    -052  private static final Logger LOG = 
    LoggerFactory.getLogger(HMasterCommandLine.class);
    -053
    -054  private static final String USAGE =
    -055"Usage: Master [opts] 
    start|stop|clear\n" +
    -056" start  Start Master. If local mode, 
    start Master and RegionServer in same JVM\n" +
    -057" stop   Start cluster shutdown; 
    Master signals RegionServer shutdown\n" +
    -058" clear  Delete the master znode in 
    ZooKeeper after a master crashes\n "+
    -059" where [opts] are:\n" +
    -060"   
    --minRegionServers=servers   Minimum RegionServers needed to host user 
    tables.\n" +
    -061"   
    --localRegionServers=servers " +
    -062  "RegionServers to start in master 
    process when in standalone mode.\n" +
    -063"   --masters=servers 
       Masters to start in this process.\n" +
    -064"   --backup   
    Master should start in backup mode";
    -065
    -066  private final Class? extends 
    HMaster masterClass;
    -067
    -068  public HMasterCommandLine(Class? 
    extends HMaster masterClass) {
    -069this.masterClass = masterClass;
    -070  }
    -071
    -072  @Override
    -073  protected String getUsage() {
    -074return USAGE;
    -075  }
    -076
    -077  @Override
    -078  public int run(String args[]) throws 
    Exception {
    -079Options opt = new Options();
    -080opt.addOption("localRegionServers", 
    true,
    -081  "RegionServers to start in master 
    process when running standalone");
    -082opt.addOption("masters", true, 
    "Masters to start in this process");
    -083opt.addOption("minRegionServers", 
    true, "Minimum RegionServers needed to host user tables");
    -084opt.addOption("backup", false, "Do 
    not try to become HMaster until the primary fails");
    -085
    -086CommandLine cmd;
    -087try {
    -088  cmd = new GnuParser().parse(opt, 
    args);
    -089} catch (ParseException e) {
    -090  LOG.error("Could not parse: ", 
    e);
    -091  usage(null);
    -092  return 1;
    -093}
    -094
    +025import 
    org.apache.hadoop.conf.Configuration;
    +026import 
    org.apache.hadoop.hbase.HConstants;
    +027import 
    org.apache.hadoop.hbase.LocalHBaseCluster;
    +028import 
    org.apache.hadoop.hbase.MasterNotRunningException;
    +029import 
    org.apache.hadoop.hbase.ZNodeClearer;
    +030import 
    org.apache.hadoop.hbase.ZooKeeperConnectionException;
    +031import 
    org.apache.hadoop.hbase.trace.TraceUtil;
    +032import 
    org.apache.yetus.audience.InterfaceAudience;
    +033import 
    org.apache.hadoop.hbase.client.Admin;
    +034import 
    org.apache.hadoop.hbase.client.Connection;
    +035import 
    org.apache.hadoop.hbase.client.ConnectionFactory;
    +036import 
    org.apache.hadoop.hbase.regionserver.HRegionServer;
    +037import 
    org.apache.hadoop.hbase.util.JVMClusterUtil;
    +038import 
    org.apache.hadoop.hbase.util.ServerCommandLine;
    +039import 
    org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
    +040import 
    org.apache.hadoop.hbase.zookeeper.ZKUtil;
    +041import 
    org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
    +042import 
    

    [05/51] [partial] hbase-site git commit: Published site at e468b4022f76688851b3e0c34722f01a56bd624f.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/16541468/devapidocs/org/apache/hadoop/hbase/client/RawAsyncTableImpl.CheckAndMutateBuilderImpl.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncTableImpl.CheckAndMutateBuilderImpl.html
     
    b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncTableImpl.CheckAndMutateBuilderImpl.html
    index 3adb320..9f01018 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncTableImpl.CheckAndMutateBuilderImpl.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncTableImpl.CheckAndMutateBuilderImpl.html
    @@ -18,7 +18,7 @@
     catch(err) {
     }
     //-->
    -var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10};
    +var methods = 
    {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10};
     var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
    Methods"],8:["t4","Concrete Methods"]};
     var altColor = "altColor";
     var rowColor = "rowColor";
    @@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
     
     
     
    -private final class RawAsyncTableImpl.CheckAndMutateBuilderImpl
    +private final class RawAsyncTableImpl.CheckAndMutateBuilderImpl
     extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">Object
     implements AsyncTable.CheckAndMutateBuilder
     
    @@ -155,6 +155,10 @@ implements row
     
     
    +private TimeRange
    +timeRange
    +
    +
     private byte[]
     value
     
    @@ -222,6 +226,10 @@ implements https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
     title="class or interface in java.lang">Boolean
     thenPut(Putput)
     
    +
    +AsyncTable.CheckAndMutateBuilder
    +timeRange(TimeRangetimeRange)
    +
     
     
     
    @@ -257,7 +265,7 @@ implements 
     
     row
    -private finalbyte[] row
    +private finalbyte[] row
     
     
     
    @@ -266,7 +274,7 @@ implements 
     
     family
    -private finalbyte[] family
    +private finalbyte[] family
     
     
     
    @@ -275,7 +283,16 @@ implements 
     
     qualifier
    -privatebyte[] qualifier
    +privatebyte[] qualifier
    +
    +
    +
    +
    +
    +
    +
    +timeRange
    +privateTimeRange timeRange
     
     
     
    @@ -284,7 +301,7 @@ implements 
     
     op
    -privateCompareOperator op
    +privateCompareOperator op
     
     
     
    @@ -293,7 +310,7 @@ implements 
     
     value
    -privatebyte[] value
    +privatebyte[] value
     
     
     
    @@ -310,7 +327,7 @@ implements 
     
     CheckAndMutateBuilderImpl
    -publicCheckAndMutateBuilderImpl(byte[]row,
    +publicCheckAndMutateBuilderImpl(byte[]row,
      byte[]family)
     
     
    @@ -328,7 +345,7 @@ implements 
     
     qualifier
    -publicAsyncTable.CheckAndMutateBuilderqualifier(byte[]qualifier)
    +publicAsyncTable.CheckAndMutateBuilderqualifier(byte[]qualifier)
     
     Specified by:
     qualifierin
     interfaceAsyncTable.CheckAndMutateBuilder
    @@ -337,13 +354,28 @@ implements 
    +
    +
    +
    +
    +timeRange
    +publicAsyncTable.CheckAndMutateBuildertimeRange(TimeRangetimeRange)
    +
    +Specified by:
    +timeRangein
     interfaceAsyncTable.CheckAndMutateBuilder
    +Parameters:
    +timeRange - time range to check.
    +
    +
    +
     
     
     
     
     
     ifNotExists
    -publicAsyncTable.CheckAndMutateBuilderifNotExists()
    +publicAsyncTable.CheckAndMutateBuilderifNotExists()
     Description copied from 
    interface:AsyncTable.CheckAndMutateBuilder
     Check for lack of column.
     
    @@ -358,7 +390,7 @@ implements 
     
     ifMatches
    -publicAsyncTable.CheckAndMutateBuilderifMatches(CompareOperatorcompareOp,
    +publicAsyncTable.CheckAndMutateBuilderifMatches(CompareOperatorcompareOp,
       byte[]value)
     
     Specified by:
    @@ -375,7 +407,7 @@ implements 
     
     preCheck
    -privatevoidpreCheck()
    +privatevoidpreCheck()
     
     
     
    @@ -384,7 +416,7 @@ implements 
     
     thenPut
    -publichttps://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
     title="class or interface in java.lang">BooleanthenPut(Putput)
    +publichttps://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
     title="class or interface in java.lang">BooleanthenPut(Putput)
     
     Specified by:
     thenPutin
     interfaceAsyncTable.CheckAndMutateBuilder
    @@ -402,7 +434,7 @@ implements 
     
     thenDelete
    -publichttps://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
     title="class or interface in java.lang">BooleanthenDelete(Deletedelete)
    

    [05/51] [partial] hbase-site git commit: Published site at 64061f896fe21512504e3886a400759e88b519da.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f1ebf5b6/devapidocs/org/apache/hadoop/hbase/ipc/class-use/PriorityFunction.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/ipc/class-use/PriorityFunction.html 
    b/devapidocs/org/apache/hadoop/hbase/ipc/class-use/PriorityFunction.html
    index 2253191..58f9a7c 100644
    --- a/devapidocs/org/apache/hadoop/hbase/ipc/class-use/PriorityFunction.html
    +++ b/devapidocs/org/apache/hadoop/hbase/ipc/class-use/PriorityFunction.html
    @@ -114,15 +114,15 @@
     
     
     private PriorityFunction
    -RpcExecutor.priority
    +SimpleRpcScheduler.priority
     
     
     private PriorityFunction
    -RpcExecutor.CallPriorityComparator.priority
    +RpcExecutor.priority
     
     
     private PriorityFunction
    -SimpleRpcScheduler.priority
    +RpcExecutor.CallPriorityComparator.priority
     
     
     
    @@ -319,7 +319,7 @@
     
     
     RpcScheduler
    -FifoRpcSchedulerFactory.create(org.apache.hadoop.conf.Configurationconf,
    +RpcSchedulerFactory.create(org.apache.hadoop.conf.Configurationconf,
       PriorityFunctionpriority)
     Deprecated.
     
    @@ -333,16 +333,18 @@
     
     
     RpcScheduler
    -RpcSchedulerFactory.create(org.apache.hadoop.conf.Configurationconf,
    +FifoRpcSchedulerFactory.create(org.apache.hadoop.conf.Configurationconf,
       PriorityFunctionpriority)
     Deprecated.
     
     
     
     RpcScheduler
    -FifoRpcSchedulerFactory.create(org.apache.hadoop.conf.Configurationconf,
    +RpcSchedulerFactory.create(org.apache.hadoop.conf.Configurationconf,
       PriorityFunctionpriority,
    -  Abortableserver)
    +  Abortableserver)
    +Constructs a RpcScheduler.
    +
     
     
     RpcScheduler
    @@ -352,11 +354,9 @@
     
     
     RpcScheduler
    -RpcSchedulerFactory.create(org.apache.hadoop.conf.Configurationconf,
    +FifoRpcSchedulerFactory.create(org.apache.hadoop.conf.Configurationconf,
       PriorityFunctionpriority,
    -  Abortableserver)
    -Constructs a RpcScheduler.
    -
    +  Abortableserver)
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f1ebf5b6/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcCallback.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcCallback.html 
    b/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcCallback.html
    index e491ef8..c3d3cc2 100644
    --- a/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcCallback.html
    +++ b/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcCallback.html
    @@ -123,13 +123,13 @@
     
     
     void
    -RpcCallContext.setCallBack(RpcCallbackcallback)
    -Sets a callback which has to be executed at the end of this 
    RPC call.
    -
    +ServerCall.setCallBack(RpcCallbackcallback)
     
     
     void
    -ServerCall.setCallBack(RpcCallbackcallback)
    +RpcCallContext.setCallBack(RpcCallbackcallback)
    +Sets a callback which has to be executed at the end of this 
    RPC call.
    +
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f1ebf5b6/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcControllerFactory.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcControllerFactory.html 
    b/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcControllerFactory.html
    index c3eee11..f6ddc97 100644
    --- a/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcControllerFactory.html
    +++ b/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcControllerFactory.html
    @@ -131,24 +131,32 @@
     
     
     
    -protected RpcControllerFactory
    -RegionAdminServiceCallable.rpcControllerFactory
    -
    -
     private RpcControllerFactory
     ConnectionImplementation.rpcControllerFactory
     
    +
    +protected RpcControllerFactory
    +ClientScanner.rpcControllerFactory
    +
     
    +protected RpcControllerFactory
    +RegionAdminServiceCallable.rpcControllerFactory
    +
    +
     (package private) RpcControllerFactory
     AsyncConnectionImpl.rpcControllerFactory
     
    -
    +
     private RpcControllerFactory
     HTable.rpcControllerFactory
     
    +
    +private RpcControllerFactory
    +HBaseAdmin.rpcControllerFactory
    +
     
     private RpcControllerFactory
    -RpcRetryingCallerWithReadReplicas.rpcControllerFactory
    +SecureBulkLoadClient.rpcControllerFactory
     
     
     protected RpcControllerFactory
    @@ -156,15 +164,7 @@
     
     
     private RpcControllerFactory
    -HBaseAdmin.rpcControllerFactory
    -
    -
    -private RpcControllerFactory
    -SecureBulkLoadClient.rpcControllerFactory
    -
    -
    -protected RpcControllerFactory
    -ClientScanner.rpcControllerFactory
    +RpcRetryingCallerWithReadReplicas.rpcControllerFactory
     
     
     (package private) RpcControllerFactory
    @@ -181,11 +181,11 @@
     
     
     RpcControllerFactory
    -ClusterConnection.getRpcControllerFactory()
    +ConnectionImplementation.getRpcControllerFactory()
     
     
     RpcControllerFactory
    -ConnectionImplementation.getRpcControllerFactory()
    +ClusterConnection.getRpcControllerFactory()
     
     
     private RpcControllerFactory
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f1ebf5b6/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcExecutor.Handler.html
    --
    diff --git 
    

    [05/51] [partial] hbase-site git commit: Published site at 4cb40e6d846ce1f28ffb40d388c9efb753197813.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4dc2a2e8/devapidocs/org/apache/hadoop/hbase/http/HttpRequestLogAppender.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/http/HttpRequestLogAppender.html 
    b/devapidocs/org/apache/hadoop/hbase/http/HttpRequestLogAppender.html
    index ee9b433..02c9bdd 100644
    --- a/devapidocs/org/apache/hadoop/hbase/http/HttpRequestLogAppender.html
    +++ b/devapidocs/org/apache/hadoop/hbase/http/HttpRequestLogAppender.html
    @@ -118,7 +118,8 @@ var activeTableTab = "activeTableTab";
     
     
     
    -public class HttpRequestLogAppender
    +@InterfaceAudience.Private
    +public class HttpRequestLogAppender
     extends org.apache.log4j.AppenderSkeleton
     Log4j Appender adapter for HttpRequestLog
     
    @@ -249,7 +250,7 @@ extends org.apache.log4j.AppenderSkeleton
     
     
     filename
    -privatehttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String filename
    +privatehttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String filename
     
     
     
    @@ -258,7 +259,7 @@ extends org.apache.log4j.AppenderSkeleton
     
     
     retainDays
    -privateint retainDays
    +privateint retainDays
     
     
     
    @@ -275,7 +276,7 @@ extends org.apache.log4j.AppenderSkeleton
     
     
     HttpRequestLogAppender
    -publicHttpRequestLogAppender()
    +publicHttpRequestLogAppender()
     
     
     
    @@ -292,7 +293,7 @@ extends org.apache.log4j.AppenderSkeleton
     
     
     setRetainDays
    -publicvoidsetRetainDays(intretainDays)
    +publicvoidsetRetainDays(intretainDays)
     
     
     
    @@ -301,7 +302,7 @@ extends org.apache.log4j.AppenderSkeleton
     
     
     getRetainDays
    -publicintgetRetainDays()
    +publicintgetRetainDays()
     
     
     
    @@ -310,7 +311,7 @@ extends org.apache.log4j.AppenderSkeleton
     
     
     setFilename
    -publicvoidsetFilename(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">Stringfilename)
    +publicvoidsetFilename(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">Stringfilename)
     
     
     
    @@ -319,7 +320,7 @@ extends org.apache.log4j.AppenderSkeleton
     
     
     getFilename
    -publichttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringgetFilename()
    +publichttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringgetFilename()
     
     
     
    @@ -328,7 +329,7 @@ extends org.apache.log4j.AppenderSkeleton
     
     
     append
    -publicvoidappend(org.apache.log4j.spi.LoggingEventevent)
    +publicvoidappend(org.apache.log4j.spi.LoggingEventevent)
     
     Specified by:
     appendin 
    classorg.apache.log4j.AppenderSkeleton
    @@ -341,7 +342,7 @@ extends org.apache.log4j.AppenderSkeleton
     
     
     close
    -publicvoidclose()
    +publicvoidclose()
     
     
     
    @@ -350,7 +351,7 @@ extends org.apache.log4j.AppenderSkeleton
     
     
     requiresLayout
    -publicbooleanrequiresLayout()
    +publicbooleanrequiresLayout()
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4dc2a2e8/devapidocs/org/apache/hadoop/hbase/http/HttpServerUtil.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/http/HttpServerUtil.html 
    b/devapidocs/org/apache/hadoop/hbase/http/HttpServerUtil.html
    index 401d81c..2a9216a 100644
    --- a/devapidocs/org/apache/hadoop/hbase/http/HttpServerUtil.html
    +++ b/devapidocs/org/apache/hadoop/hbase/http/HttpServerUtil.html
    @@ -109,7 +109,8 @@ var activeTableTab = "activeTableTab";
     
     
     
    -public class HttpServerUtil
    +@InterfaceAudience.Private
    +public final class HttpServerUtil
     extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">Object
     HttpServer utility.
     
    @@ -127,10 +128,12 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
     
     Constructors
     
    -Constructor and Description
    +Modifier
    +Constructor and Description
     
     
    -HttpServerUtil()
    +private 
    +HttpServerUtil()
     
     
     
    @@ -181,7 +184,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
     
     
     HttpServerUtil
    -publicHttpServerUtil()
    +privateHttpServerUtil()
     
     
     
    @@ -198,7 +201,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
     
     
     constrainHttpMethods
    -public staticvoidconstrainHttpMethods(org.eclipse.jetty.servlet.ServletContextHandlerctxHandler)
    +public staticvoidconstrainHttpMethods(org.eclipse.jetty.servlet.ServletContextHandlerctxHandler)
     Add constraints to a Jetty Context to disallow undesirable 
    Http methods.
     
     Parameters:
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4dc2a2e8/devapidocs/org/apache/hadoop/hbase/http/jmx/JMXJsonServlet.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/http/jmx/JMXJsonServlet.html 
    

    [05/51] [partial] hbase-site git commit: Published site at 8ab7b20f48951d77945181024f5e15842bc253c4.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6eb695c8/devapidocs/src-html/org/apache/hadoop/hbase/wal/WAL.Entry.html
    --
    diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WAL.Entry.html 
    b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WAL.Entry.html
    index 0fa5a23..e31f5c6 100644
    --- a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WAL.Entry.html
    +++ b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WAL.Entry.html
    @@ -148,143 +148,160 @@
     140  void sync(long txid) throws 
    IOException;
     141
     142  /**
    -143   * WAL keeps track of the sequence 
    numbers that are as yet not flushed im memstores
    -144   * in order to be able to do accounting 
    to figure which WALs can be let go. This method tells WAL
    -145   * that some region is about to flush. 
    The flush can be the whole region or for a column family
    -146   * of the region only.
    -147   *
    -148   * pCurrently, it is expected 
    that the update lock is held for the region; i.e. no
    -149   * concurrent appends while we set up 
    cache flush.
    -150   * @param families Families to flush. 
    May be a subset of all families in the region.
    -151   * @return Returns {@link 
    HConstants#NO_SEQNUM} if we are flushing the whole region OR if
    -152   * we are flushing a subset of all 
    families but there are no edits in those families not
    -153   * being flushed; in other words, this 
    is effectively same as a flush of all of the region
    -154   * though we were passed a subset of 
    regions. Otherwise, it returns the sequence id of the
    -155   * oldest/lowest outstanding edit.
    -156   * @see #completeCacheFlush(byte[])
    -157   * @see #abortCacheFlush(byte[])
    -158   */
    -159  Long startCacheFlush(final byte[] 
    encodedRegionName, Setbyte[] families);
    -160
    -161  Long startCacheFlush(final byte[] 
    encodedRegionName, Mapbyte[], Long familyToSeq);
    -162
    -163  /**
    -164   * Complete the cache flush.
    -165   * @param encodedRegionName Encoded 
    region name.
    -166   * @see #startCacheFlush(byte[], Set)
    -167   * @see #abortCacheFlush(byte[])
    -168   */
    -169  void completeCacheFlush(final byte[] 
    encodedRegionName);
    -170
    -171  /**
    -172   * Abort a cache flush. Call if the 
    flush fails. Note that the only recovery
    -173   * for an aborted flush currently is a 
    restart of the regionserver so the
    -174   * snapshot content dropped by the 
    failure gets restored to the memstore.
    -175   * @param encodedRegionName Encoded 
    region name.
    -176   */
    -177  void abortCacheFlush(byte[] 
    encodedRegionName);
    -178
    -179  /**
    -180   * @return Coprocessor host.
    -181   */
    -182  WALCoprocessorHost 
    getCoprocessorHost();
    -183
    -184  /**
    -185   * Gets the earliest unflushed sequence 
    id in the memstore for the region.
    -186   * @param encodedRegionName The region 
    to get the number for.
    -187   * @return The earliest/lowest/oldest 
    sequence id if present, HConstants.NO_SEQNUM if absent.
    -188   * @deprecated Since version 1.2.0. 
    Removing because not used and exposes subtle internal
    -189   * workings. Use {@link 
    #getEarliestMemStoreSeqNum(byte[], byte[])}
    -190   */
    -191  @VisibleForTesting
    -192  @Deprecated
    -193  long getEarliestMemStoreSeqNum(byte[] 
    encodedRegionName);
    -194
    -195  /**
    -196   * Gets the earliest unflushed sequence 
    id in the memstore for the store.
    -197   * @param encodedRegionName The region 
    to get the number for.
    -198   * @param familyName The family to get 
    the number for.
    -199   * @return The earliest/lowest/oldest 
    sequence id if present, HConstants.NO_SEQNUM if absent.
    -200   */
    -201  long getEarliestMemStoreSeqNum(byte[] 
    encodedRegionName, byte[] familyName);
    -202
    -203  /**
    -204   * Human readable identifying 
    information about the state of this WAL.
    -205   * Implementors are encouraged to 
    include information appropriate for debugging.
    -206   * Consumers are advised not to rely on 
    the details of the returned String; it does
    -207   * not have a defined structure.
    -208   */
    -209  @Override
    -210  String toString();
    +143   * @param forceSync Flag to force sync 
    rather than flushing to the buffer. Example - Hadoop hflush
    +144   *  vs hsync.
    +145   */
    +146  default void sync(boolean forceSync) 
    throws IOException {
    +147sync();
    +148  }
    +149
    +150  /**
    +151   * @param txid Transaction id to sync 
    to.
    +152   * @param forceSync Flag to force sync 
    rather than flushing to the buffer. Example - Hadoop hflush
    +153   *  vs hsync.
    +154   */
    +155  default void sync(long txid, boolean 
    forceSync) throws IOException {
    +156sync(txid);
    +157  }
    +158
    +159  /**
    +160   * WAL keeps track of the sequence 
    numbers that are as yet not flushed im memstores
    +161   * in order to be able to do accounting 
    to figure which WALs can be let go. This method tells WAL
    +162   * that some region is about to flush. 
    The flush can be the whole region or for a column family
    +163   * of the region only.
    +164   *
    +165   * pCurrently, it is expected 
    that the update lock is held for the region; i.e. no
    +166   * concurrent appends while we set up 
    

    [05/51] [partial] hbase-site git commit: Published site at 00095a2ef9442e3fd86c04876c9d91f2f8b23ad8.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.Testing.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.Testing.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.Testing.html
    index 3bc66bb..97aa79c 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.Testing.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.Testing.html
    @@ -1435,459 +1435,460 @@
     1427   */
     1428  private void execProcedure(final 
    RootProcedureState procStack,
     1429  final 
    ProcedureTEnvironment procedure) {
    -1430
    Preconditions.checkArgument(procedure.getState() == ProcedureState.RUNNABLE);
    -1431
    -1432// Procedures can suspend 
    themselves. They skip out by throwing a ProcedureSuspendedException.
    -1433// The exception is caught below and 
    then we hurry to the exit without disturbing state. The
    -1434// idea is that the processing of 
    this procedure will be unsuspended later by an external event
    -1435// such the report of a region open. 
    TODO: Currently, its possible for two worker threads
    -1436// to be working on the same 
    procedure concurrently (locking in procedures is NOT about
    -1437// concurrency but about tying an 
    entity to a procedure; i.e. a region to a particular
    -1438// procedure instance). This can 
    make for issues if both threads are changing state.
    -1439// See 
    env.getProcedureScheduler().wakeEvent(regionNode.getProcedureEvent());
    -1440// in 
    RegionTransitionProcedure#reportTransition for example of Procedure putting
    -1441// itself back on the scheduler 
    making it possible for two threads running against
    -1442// the one Procedure. Might be ok if 
    they are both doing different, idempotent sections.
    -1443boolean suspended = false;
    -1444
    -1445// Whether to 're-' -execute; run 
    through the loop again.
    -1446boolean reExecute = false;
    -1447
    -1448ProcedureTEnvironment[] 
    subprocs = null;
    -1449do {
    -1450  reExecute = false;
    -1451  try {
    -1452subprocs = 
    procedure.doExecute(getEnvironment());
    -1453if (subprocs != null  
    subprocs.length == 0) {
    -1454  subprocs = null;
    -1455}
    -1456  } catch 
    (ProcedureSuspendedException e) {
    -1457if (LOG.isTraceEnabled()) {
    -1458  LOG.trace("Suspend " + 
    procedure);
    -1459}
    -1460suspended = true;
    -1461  } catch (ProcedureYieldException 
    e) {
    -1462if (LOG.isTraceEnabled()) {
    -1463  LOG.trace("Yield " + procedure 
    + ": " + e.getMessage(), e);
    -1464}
    -1465scheduler.yield(procedure);
    -1466return;
    -1467  } catch (InterruptedException e) 
    {
    -1468if (LOG.isTraceEnabled()) {
    -1469  LOG.trace("Yield interrupt " + 
    procedure + ": " + e.getMessage(), e);
    -1470}
    -1471
    handleInterruptedException(procedure, e);
    -1472scheduler.yield(procedure);
    -1473return;
    -1474  } catch (Throwable e) {
    -1475// Catch NullPointerExceptions 
    or similar errors...
    -1476String msg = "CODE-BUG: Uncaught 
    runtime exception: " + procedure;
    -1477LOG.error(msg, e);
    -1478procedure.setFailure(new 
    RemoteProcedureException(msg, e));
    -1479  }
    -1480
    -1481  if (!procedure.isFailed()) {
    -1482if (subprocs != null) {
    -1483  if (subprocs.length == 1 
     subprocs[0] == procedure) {
    -1484// Procedure returned 
    itself. Quick-shortcut for a state machine-like procedure;
    -1485// i.e. we go around this 
    loop again rather than go back out on the scheduler queue.
    -1486subprocs = null;
    -1487reExecute = true;
    -1488if (LOG.isTraceEnabled()) 
    {
    -1489  LOG.trace("Short-circuit 
    to next step on pid=" + procedure.getProcId());
    -1490}
    -1491  } else {
    -1492// Yield the current 
    procedure, and make the subprocedure runnable
    -1493// subprocs may come back 
    'null'.
    -1494subprocs = 
    initializeChildren(procStack, procedure, subprocs);
    -1495LOG.info("Initialized 
    subprocedures=" +
    -1496  (subprocs == null? null:
    -1497
    Stream.of(subprocs).map(e - "{" + e.toString() + "}").
    -1498
    collect(Collectors.toList()).toString()));
    -1499  }
    -1500} else if (procedure.getState() 
    == ProcedureState.WAITING_TIMEOUT) {
    -1501  if (LOG.isTraceEnabled()) {
    -1502LOG.trace("Added to 
    timeoutExecutor " + procedure);
    -1503  }
    -1504  
    timeoutExecutor.add(procedure);
    -1505} else if (!suspended) {
    -1506  // No subtask, so we are 
    done
    -1507  
    procedure.setState(ProcedureState.SUCCESS);
    -1508}
    -1509  }
    -1510
    -1511  // Add the procedure to 

    [05/51] [partial] hbase-site git commit: Published site at 22f4def942f8a3367d0ca6598317e9b9a7d0cfcd.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8b1eaec1/testdevapidocs/org/apache/hadoop/hbase/client/package-summary.html
    --
    diff --git a/testdevapidocs/org/apache/hadoop/hbase/client/package-summary.html 
    b/testdevapidocs/org/apache/hadoop/hbase/client/package-summary.html
    index d43317c..3385a0a 100644
    --- a/testdevapidocs/org/apache/hadoop/hbase/client/package-summary.html
    +++ b/testdevapidocs/org/apache/hadoop/hbase/client/package-summary.html
    @@ -742,497 +742,501 @@
     
     
     
    -TestDelayingRunner
    +TestCoprocessorDescriptor
     
     
     
    -TestDeleteTimeStamp
    +TestDelayingRunner
     
     
     
    +TestDeleteTimeStamp
    +
    +
    +
     TestDropTimeoutRequest
     
     Test a drop timeout request.
     
     
    -
    +
     TestDropTimeoutRequest.SleepLongerAtFirstCoprocessor
     
     Coprocessor that sleeps a while the first time you do a 
    Get
     
     
    -
    +
     TestEnableTable
     
     
    -
    +
     TestEnableTable.MasterSyncObserver
     
     
    -
    +
     TestFastFail
     
     
    -
    +
     TestFastFail.CallQueueTooBigPffeInterceptor
     
     
    -
    +
     TestFastFail.MyPreemptiveFastFailInterceptor
     
     
    -
    +
     TestFlushFromClient
     
     
    -
    +
     TestFromClientSide
     
     Run tests that use the HBase clients; 
    Table.
     
     
    -
    +
     TestFromClientSide3
     
     
    -
    +
     TestFromClientSide3.WaitingForMultiMutationsObserver
     
     
    -
    +
     TestFromClientSide3.WaitingForScanObserver
     
     
    -
    +
     TestFromClientSideNoCodec
     
     Do some ops and prove that client and server can work w/o 
    codecs; that we can pb all the time.
     
     
    -
    +
     TestFromClientSideScanExcpetion
     
     
    -
    +
     TestFromClientSideScanExcpetion.MyHRegion
     
     
    -
    +
     TestFromClientSideScanExcpetion.MyHStore
     
     
    -
    +
     TestFromClientSideScanExcpetion.MyStoreScanner
     
     
    -
    +
     TestFromClientSideScanExcpetionWithCoprocessor
     
     Test all client operations with a coprocessor that just 
    implements the default flush/compact/scan
      policy.
     
     
    -
    +
     TestFromClientSideWithCoprocessor
     
     Test all client operations with a coprocessor that just 
    implements the default flush/compact/scan
      policy.
     
     
    -
    +
     TestGet
     
     
    -
    +
     TestGetProcedureResult
     
     Testcase for HBASE-19608.
     
     
    -
    +
     TestGetProcedureResult.DummyProcedure
     
     
    -
    +
     TestHBaseAdminNoCluster
     
     
    -
    +
     TestHTableMultiplexer
     
     
    -
    +
     TestHTableMultiplexerFlushCache
     
     
    -
    +
     TestHTableMultiplexerViaMocks
     
     
    -
    +
     TestIllegalTableDescriptor
     
     
    -
    +
     TestImmutableHColumnDescriptor
     
     
    -
    +
     TestImmutableHRegionInfo
     
     Test ImmutableHRegionInfo
     
     
    -
    +
     TestImmutableHTableDescriptor
     
     
    -
    +
     TestIncrement
     
     
    -
    +
     TestIncrementFromClientSideWithCoprocessor
     
     Test all Increment client operations with a 
    coprocessor that
      just implements the default flush/compact/scan policy.
     
     
    -
    +
     TestIncrementsFromClientSide
     
     Run Increment tests that use the HBase clients; 
    HTable.
     
     
    -
    +
     TestInterfaceAlign
     
     
    -
    +
     TestIntraRowPagination
     
     Test scan/get offset and limit settings within one row 
    through HRegion API.
     
     
    -
    +
     TestLeaseRenewal
     
     
    -
    +
     TestLimitedScanWithFilter
     
     With filter we may stop at a middle of row and think that 
    we still have more cells for the
      current row but actually all the remaining cells will be filtered out by the 
    filter.
     
     
    -
    +
     TestMalformedCellFromClient
     
     The purpose of this test is to ensure whether rs deals with 
    the malformed cells correctly.
     
     
    -
    +
     TestMetaCache
     
     
    -
    +
     TestMetaCache.CallQueueTooBigExceptionInjector
     
     Throws CallQueueTooBigException for all gets.
     
     
    -
    +
     TestMetaCache.ExceptionInjector
     
     
    -
    +
     TestMetaCache.FakeRSRpcServices
     
     
    -
    +
     TestMetaCache.RegionServerWithFakeRpcServices
     
     
    -
    +
     TestMetaCache.RoundRobinExceptionInjector
     
     Rotates through the possible cache clearing and non-cache 
    clearing exceptions
      for requests.
     
     
    -
    +
     TestMetaWithReplicas
     
     Tests the scenarios where replicas are enabled for the meta 
    table
     
     
    -
    +
     TestMetricsConnection
     
     
    -
    +
     TestMobCloneSnapshotFromClient
     
     Test clone snapshots from the client
     
     
    -
    +
     TestMobCloneSnapshotFromClient.DelayFlushCoprocessor
     
     This coprocessor is used to delay the flush.
     
     
    -
    +
     TestMobRestoreSnapshotFromClient
     
     Test restore snapshots from the client
     
     
    -
    +
     TestMobSnapshotCloneIndependence
     
     Test to verify that the cloned table is independent of the 
    table from which it was cloned
     
     
    -
    +
     TestMobSnapshotFromClient
     
     Test create/using/deleting snapshots from the client
     
     
    -
    +
     TestMultiParallel
     
     
    -
    +
     TestMultiParallel.MyMasterObserver
     
     
    -
    +
     TestMultipleTimestamps
     
     Run tests related to TimestampsFilter using 
    HBase client APIs.
     
     
    -
    +
     TestMultiRespectsLimits
     
     This test sets the multi size WAY low and then checks 
    to make sure that gets will still make
      progress.
     
     
    -
    +
     TestMutation
     
     
    -
    +
     TestMvccConsistentScanner
     
     
    -
    +
     TestOperation
     
     Run tests that use the functionality of the Operation 
    superclass for
      Puts, Gets, Deletes, Scans, and MultiPuts.
     
     
    -
    +
     TestProcedureFuture
     
     
    -
    +
     TestProcedureFuture.TestFuture
     
     
    -
    +
     TestPutDeleteEtcCellIteration
     
     Test that I can Iterate Client Actions that hold Cells (Get 
    does not have Cells).
     
     
    -
    +
     

    [05/51] [partial] hbase-site git commit: Published site at 31da4d0bce69b3a47066a5df675756087ce4dc60.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a754d895/devapidocs/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.html 
    b/devapidocs/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.html
    index 0ca6e81..bf375f2 100644
    --- a/devapidocs/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.html
    +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.html
    @@ -115,7 +115,7 @@ var activeTableTab = "activeTableTab";
     
     
     @InterfaceAudience.Private
    -public class RegionCoprocessorHost
    +public class RegionCoprocessorHost
     extends CoprocessorHostRegionCoprocessor,RegionCoprocessorEnvironment
     Implements the coprocessor environment and runtime support 
    for coprocessors
      loaded within a Region.
    @@ -796,7 +796,7 @@ extends 
     
     LOG
    -private static finalorg.slf4j.Logger LOG
    +private static finalorg.slf4j.Logger LOG
     
     
     
    @@ -805,7 +805,7 @@ extends 
     
     SHARED_DATA_MAP
    -private static 
    finalorg.apache.commons.collections4.map.ReferenceMaphttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String,https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentMap.html?is-external=true;
     title="class or interface in java.util.concurrent">ConcurrentMaphttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String,https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">Object SHARED_DATA_MAP
    +private static 
    finalorg.apache.commons.collections4.map.ReferenceMaphttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String,https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentMap.html?is-external=true;
     title="class or interface in java.util.concurrent">ConcurrentMaphttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String,https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">Object SHARED_DATA_MAP
     
     
     
    @@ -814,7 +814,7 @@ extends 
     
     hasCustomPostScannerFilterRow
    -private finalboolean hasCustomPostScannerFilterRow
    +private finalboolean hasCustomPostScannerFilterRow
     
     
     
    @@ -823,7 +823,7 @@ extends 
     
     rsServices
    -RegionServerServices rsServices
    +RegionServerServices rsServices
     The region server services
     
     
    @@ -833,7 +833,7 @@ extends 
     
     region
    -HRegion region
    +HRegion region
     The region
     
     
    @@ -843,7 +843,7 @@ extends 
     
     regionObserverGetter
    -privateCoprocessorHost.ObserverGetterRegionCoprocessor,RegionObserver 
    regionObserverGetter
    +privateCoprocessorHost.ObserverGetterRegionCoprocessor,RegionObserver 
    regionObserverGetter
     
     
     
    @@ -852,7 +852,7 @@ extends 
     
     endpointObserverGetter
    -privateCoprocessorHost.ObserverGetterRegionCoprocessor,EndpointObserver endpointObserverGetter
    +privateCoprocessorHost.ObserverGetterRegionCoprocessor,EndpointObserver endpointObserverGetter
     
     
     
    @@ -869,7 +869,7 @@ extends 
     
     RegionCoprocessorHost
    -publicRegionCoprocessorHost(HRegionregion,
    +publicRegionCoprocessorHost(HRegionregion,
      RegionServerServicesrsServices,
      
    org.apache.hadoop.conf.Configurationconf)
     Constructor
    @@ -895,7 +895,7 @@ extends 
     
     getTableCoprocessorAttrsFromSchema
    -statichttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListRegionCoprocessorHost.TableCoprocessorAttributegetTableCoprocessorAttrsFromSchema(org.apache.hadoop.conf.Configurationconf,
    +statichttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListRegionCoprocessorHost.TableCoprocessorAttributegetTableCoprocessorAttrsFromSchema(org.apache.hadoop.conf.Configurationconf,
    
     TableDescriptorhtd)
     
     
    @@ -905,7 +905,7 @@ extends 
     
     testTableCoprocessorAttrs
    -public staticvoidtestTableCoprocessorAttrs(org.apache.hadoop.conf.Configurationconf,
    +public staticvoidtestTableCoprocessorAttrs(org.apache.hadoop.conf.Configurationconf,
      TableDescriptorhtd)
       throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException
     Sanity check the table coprocessor attributes of the 
    supplied schema. Will
    @@ -925,7 +925,7 @@ extends 
     
     loadTableCoprocessors
    -voidloadTableCoprocessors(org.apache.hadoop.conf.Configurationconf)
    

    [05/51] [partial] hbase-site git commit: Published site at 6b77786dfc46d25ac5bb5f1c8a4a9eb47b52a604.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/81cde4ce/devapidocs/org/apache/hadoop/hbase/master/class-use/MetricsMasterWrapper.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/master/class-use/MetricsMasterWrapper.html 
    b/devapidocs/org/apache/hadoop/hbase/master/class-use/MetricsMasterWrapper.html
    index 99718bf..228e2d0 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/master/class-use/MetricsMasterWrapper.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/master/class-use/MetricsMasterWrapper.html
    @@ -139,29 +139,29 @@
     
     
     
    -MetricsMasterSource
    -MetricsMasterSourceFactory.create(MetricsMasterWrappermasterWrapper)
    +MetricsMasterProcSource
    +MetricsMasterProcSourceFactory.create(MetricsMasterWrappermasterWrapper)
     
     
     MetricsMasterQuotaSource
     MetricsMasterQuotaSourceFactory.create(MetricsMasterWrappermasterWrapper)
     
     
    -MetricsMasterProcSource
    -MetricsMasterProcSourceFactory.create(MetricsMasterWrappermasterWrapper)
    +MetricsMasterSource
    +MetricsMasterSourceFactory.create(MetricsMasterWrappermasterWrapper)
     
     
     MetricsMasterQuotaSource
     MetricsMasterQuotaSourceFactoryImpl.create(MetricsMasterWrappermasterWrapper)
     
     
    -MetricsMasterProcSource
    -MetricsMasterProcSourceFactoryImpl.create(MetricsMasterWrappermasterWrapper)
    -
    -
     MetricsMasterSource
     MetricsMasterSourceFactoryImpl.create(MetricsMasterWrappermasterWrapper)
     
    +
    +MetricsMasterProcSource
    +MetricsMasterProcSourceFactoryImpl.create(MetricsMasterWrappermasterWrapper)
    +
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/81cde4ce/devapidocs/org/apache/hadoop/hbase/master/class-use/RackManager.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/master/class-use/RackManager.html 
    b/devapidocs/org/apache/hadoop/hbase/master/class-use/RackManager.html
    index bfbcbd9..b76b306 100644
    --- a/devapidocs/org/apache/hadoop/hbase/master/class-use/RackManager.html
    +++ b/devapidocs/org/apache/hadoop/hbase/master/class-use/RackManager.html
    @@ -116,11 +116,11 @@
     
     
     private RackManager
    -FavoredNodesManager.rackManager
    +FavoredNodeLoadBalancer.rackManager
     
     
     private RackManager
    -FavoredNodeLoadBalancer.rackManager
    +FavoredNodesManager.rackManager
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/81cde4ce/devapidocs/org/apache/hadoop/hbase/master/class-use/RegionPlan.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/master/class-use/RegionPlan.html 
    b/devapidocs/org/apache/hadoop/hbase/master/class-use/RegionPlan.html
    index 9630dc4..f5c3b0f 100644
    --- a/devapidocs/org/apache/hadoop/hbase/master/class-use/RegionPlan.html
    +++ b/devapidocs/org/apache/hadoop/hbase/master/class-use/RegionPlan.html
    @@ -282,7 +282,10 @@
     
     
     https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListRegionPlan
    -FavoredStochasticBalancer.balanceCluster(https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
     title="class or interface in java.util">MapServerName,https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListRegionInfoclusterState)
    +SimpleLoadBalancer.balanceCluster(https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
     title="class or interface in java.util">MapServerName,https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListRegionInfoclusterMap)
    +Generate a global load balancing plan according to the 
    specified map of
    + server information to the most loaded regions of each server.
    +
     
     
     https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListRegionPlan
    @@ -292,19 +295,16 @@
     
     
     https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListRegionPlan
    -SimpleLoadBalancer.balanceCluster(https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
     title="class or interface in java.util">MapServerName,https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListRegionInfoclusterMap)
    -Generate a global load balancing plan according to the 
    specified map of
    - server information to the most loaded regions of each server.
    -
    +FavoredStochasticBalancer.balanceCluster(https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
     title="class or interface in java.util">MapServerName,https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListRegionInfoclusterState)
     
     
     https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListRegionPlan
    

    [05/51] [partial] hbase-site git commit: Published site at 1384da71375427b522b09f06862bb5d629cef52f.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d347bde8/devapidocs/org/apache/hadoop/hbase/ipc/class-use/PriorityFunction.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/ipc/class-use/PriorityFunction.html 
    b/devapidocs/org/apache/hadoop/hbase/ipc/class-use/PriorityFunction.html
    index 58f9a7c..2253191 100644
    --- a/devapidocs/org/apache/hadoop/hbase/ipc/class-use/PriorityFunction.html
    +++ b/devapidocs/org/apache/hadoop/hbase/ipc/class-use/PriorityFunction.html
    @@ -114,15 +114,15 @@
     
     
     private PriorityFunction
    -SimpleRpcScheduler.priority
    +RpcExecutor.priority
     
     
     private PriorityFunction
    -RpcExecutor.priority
    +RpcExecutor.CallPriorityComparator.priority
     
     
     private PriorityFunction
    -RpcExecutor.CallPriorityComparator.priority
    +SimpleRpcScheduler.priority
     
     
     
    @@ -319,7 +319,7 @@
     
     
     RpcScheduler
    -RpcSchedulerFactory.create(org.apache.hadoop.conf.Configurationconf,
    +FifoRpcSchedulerFactory.create(org.apache.hadoop.conf.Configurationconf,
       PriorityFunctionpriority)
     Deprecated.
     
    @@ -333,18 +333,16 @@
     
     
     RpcScheduler
    -FifoRpcSchedulerFactory.create(org.apache.hadoop.conf.Configurationconf,
    +RpcSchedulerFactory.create(org.apache.hadoop.conf.Configurationconf,
       PriorityFunctionpriority)
     Deprecated.
     
     
     
     RpcScheduler
    -RpcSchedulerFactory.create(org.apache.hadoop.conf.Configurationconf,
    +FifoRpcSchedulerFactory.create(org.apache.hadoop.conf.Configurationconf,
       PriorityFunctionpriority,
    -  Abortableserver)
    -Constructs a RpcScheduler.
    -
    +  Abortableserver)
     
     
     RpcScheduler
    @@ -354,9 +352,11 @@
     
     
     RpcScheduler
    -FifoRpcSchedulerFactory.create(org.apache.hadoop.conf.Configurationconf,
    +RpcSchedulerFactory.create(org.apache.hadoop.conf.Configurationconf,
       PriorityFunctionpriority,
    -  Abortableserver)
    +  Abortableserver)
    +Constructs a RpcScheduler.
    +
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d347bde8/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcCallback.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcCallback.html 
    b/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcCallback.html
    index c3d3cc2..e491ef8 100644
    --- a/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcCallback.html
    +++ b/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcCallback.html
    @@ -123,14 +123,14 @@
     
     
     void
    -ServerCall.setCallBack(RpcCallbackcallback)
    -
    -
    -void
     RpcCallContext.setCallBack(RpcCallbackcallback)
     Sets a callback which has to be executed at the end of this 
    RPC call.
     
     
    +
    +void
    +ServerCall.setCallBack(RpcCallbackcallback)
    +
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d347bde8/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcControllerFactory.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcControllerFactory.html 
    b/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcControllerFactory.html
    index f6ddc97..c3eee11 100644
    --- a/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcControllerFactory.html
    +++ b/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcControllerFactory.html
    @@ -131,32 +131,24 @@
     
     
     
    -private RpcControllerFactory
    -ConnectionImplementation.rpcControllerFactory
    -
    -
    -protected RpcControllerFactory
    -ClientScanner.rpcControllerFactory
    -
    -
     protected RpcControllerFactory
     RegionAdminServiceCallable.rpcControllerFactory
     
     
    -(package private) RpcControllerFactory
    -AsyncConnectionImpl.rpcControllerFactory
    +private RpcControllerFactory
    +ConnectionImplementation.rpcControllerFactory
     
     
    -private RpcControllerFactory
    -HTable.rpcControllerFactory
    +(package private) RpcControllerFactory
    +AsyncConnectionImpl.rpcControllerFactory
     
     
     private RpcControllerFactory
    -HBaseAdmin.rpcControllerFactory
    +HTable.rpcControllerFactory
     
     
     private RpcControllerFactory
    -SecureBulkLoadClient.rpcControllerFactory
    +RpcRetryingCallerWithReadReplicas.rpcControllerFactory
     
     
     protected RpcControllerFactory
    @@ -164,7 +156,15 @@
     
     
     private RpcControllerFactory
    -RpcRetryingCallerWithReadReplicas.rpcControllerFactory
    +HBaseAdmin.rpcControllerFactory
    +
    +
    +private RpcControllerFactory
    +SecureBulkLoadClient.rpcControllerFactory
    +
    +
    +protected RpcControllerFactory
    +ClientScanner.rpcControllerFactory
     
     
     (package private) RpcControllerFactory
    @@ -181,11 +181,11 @@
     
     
     RpcControllerFactory
    -ConnectionImplementation.getRpcControllerFactory()
    +ClusterConnection.getRpcControllerFactory()
     
     
     RpcControllerFactory
    -ClusterConnection.getRpcControllerFactory()
    +ConnectionImplementation.getRpcControllerFactory()
     
     
     private RpcControllerFactory
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d347bde8/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcExecutor.Handler.html
    --
    diff --git 
    

    [05/51] [partial] hbase-site git commit: Published site at b7b86839250bf9b295ebc1948826f43a88736d6c.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6b94a2f2/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionStat.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionStat.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionStat.html
    index 3563b1c..0cc71bf 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionStat.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionStat.html
    @@ -1577,387 +1577,386 @@
     1569  }
     1570
     1571  public void markRegionAsSplit(final 
    RegionInfo parent, final ServerName serverName,
    -1572  final RegionInfo daughterA, final 
    RegionInfo daughterB)
    -1573  throws IOException {
    -1574// Update hbase:meta. Parent will be 
    marked offline and split up in hbase:meta.
    -1575// The parent stays in regionStates 
    until cleared when removed by CatalogJanitor.
    -1576// Update its state in regionStates 
    to it shows as offline and split when read
    -1577// later figuring what regions are 
    in a table and what are not: see
    -1578// regionStates#getRegionsOfTable
    -1579final RegionStateNode node = 
    regionStates.getOrCreateRegionStateNode(parent);
    -1580node.setState(State.SPLIT);
    -1581final RegionStateNode nodeA = 
    regionStates.getOrCreateRegionStateNode(daughterA);
    -1582
    nodeA.setState(State.SPLITTING_NEW);
    -1583final RegionStateNode nodeB = 
    regionStates.getOrCreateRegionStateNode(daughterB);
    -1584
    nodeB.setState(State.SPLITTING_NEW);
    -1585
    -1586regionStateStore.splitRegion(parent, 
    daughterA, daughterB, serverName);
    -1587if 
    (shouldAssignFavoredNodes(parent)) {
    -1588  ListServerName 
    onlineServers = this.master.getServerManager().getOnlineServersList();
    -1589  
    ((FavoredNodesPromoter)getBalancer()).
    -1590  
    generateFavoredNodesForDaughter(onlineServers, parent, daughterA, daughterB);
    -1591}
    -1592  }
    -1593
    -1594  /**
    -1595   * When called here, the merge has 
    happened. The two merged regions have been
    -1596   * unassigned and the above 
    markRegionClosed has been called on each so they have been
    -1597   * disassociated from a hosting 
    Server. The merged region will be open after this call. The
    -1598   * merged regions are removed from 
    hbase:meta below Later they are deleted from the filesystem
    -1599   * by the catalog janitor running 
    against hbase:meta. It notices when the merged region no
    -1600   * longer holds references to the old 
    regions.
    -1601   */
    -1602  public void markRegionAsMerged(final 
    RegionInfo child, final ServerName serverName,
    -1603  final RegionInfo mother, final 
    RegionInfo father) throws IOException {
    -1604final RegionStateNode node = 
    regionStates.getOrCreateRegionStateNode(child);
    -1605node.setState(State.MERGED);
    -1606regionStates.deleteRegion(mother);
    -1607regionStates.deleteRegion(father);
    -1608regionStateStore.mergeRegions(child, 
    mother, father, serverName);
    -1609if (shouldAssignFavoredNodes(child)) 
    {
    -1610  
    ((FavoredNodesPromoter)getBalancer()).
    -1611
    generateFavoredNodesForMergedRegion(child, mother, father);
    -1612}
    -1613  }
    -1614
    -1615  /*
    -1616   * Favored nodes should be applied 
    only when FavoredNodes balancer is configured and the region
    -1617   * belongs to a non-system table.
    -1618   */
    -1619  private boolean 
    shouldAssignFavoredNodes(RegionInfo region) {
    -1620return 
    this.shouldAssignRegionsWithFavoredNodes 
    -1621
    FavoredNodesManager.isFavoredNodeApplicable(region);
    -1622  }
    -1623
    -1624  // 
    
    -1625  //  Assign Queue (Assign/Balance)
    -1626  // 
    
    -1627  private final 
    ArrayListRegionStateNode pendingAssignQueue = new 
    ArrayListRegionStateNode();
    -1628  private final ReentrantLock 
    assignQueueLock = new ReentrantLock();
    -1629  private final Condition 
    assignQueueFullCond = assignQueueLock.newCondition();
    -1630
    -1631  /**
    -1632   * Add the assign operation to the 
    assignment queue.
    -1633   * The pending assignment operation 
    will be processed,
    -1634   * and each region will be assigned by 
    a server using the balancer.
    -1635   */
    -1636  protected void queueAssign(final 
    RegionStateNode regionNode) {
    -1637
    regionNode.getProcedureEvent().suspend();
    -1638
    -1639// TODO: quick-start for meta and 
    the other sys-tables?
    -1640assignQueueLock.lock();
    -1641try {
    -1642  
    pendingAssignQueue.add(regionNode);
    -1643  if (regionNode.isSystemTable() 
    ||
    -1644  pendingAssignQueue.size() == 1 
    ||
    -1645  pendingAssignQueue.size() 
    = assignDispatchWaitQueueMaxSize) {
    -1646

    [05/51] [partial] hbase-site git commit: Published site at 1d25b60831b8cc8f7ad5fd366f1867de5c20d2f3.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/eb05e3e3/apidocs/org/apache/hadoop/hbase/client/Table.html
    --
    diff --git a/apidocs/org/apache/hadoop/hbase/client/Table.html 
    b/apidocs/org/apache/hadoop/hbase/client/Table.html
    index 9e72f68..db20dae 100644
    --- a/apidocs/org/apache/hadoop/hbase/client/Table.html
    +++ b/apidocs/org/apache/hadoop/hbase/client/Table.html
    @@ -101,7 +101,7 @@ var activeTableTab = "activeTableTab";
     
     
     All Superinterfaces:
    -http://docs.oracle.com/javase/8/docs/api/java/lang/AutoCloseable.html?is-external=true;
     title="class or interface in java.lang">AutoCloseable, http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html?is-external=true;
     title="class or interface in java.io">Closeable
    +https://docs.oracle.com/javase/8/docs/api/java/lang/AutoCloseable.html?is-external=true;
     title="class or interface in java.lang">AutoCloseable, https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html?is-external=true;
     title="class or interface in java.io">Closeable
     
     
     All Known Implementing Classes:
    @@ -111,7 +111,7 @@ var activeTableTab = "activeTableTab";
     
     @InterfaceAudience.Public
     public interface Table
    -extends http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html?is-external=true;
     title="class or interface in java.io">Closeable
    +extends https://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html?is-external=true;
     title="class or interface in java.io">Closeable
     Used to communicate with a single HBase table.
      Obtain an instance from a Connection 
    and call close()
     afterwards.
     
    @@ -151,21 +151,21 @@ extends http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html
     
     
     void
    -batch(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">List? extends Rowactions,
    - http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">Object[]results)
    +batch(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">List? extends Rowactions,
    + https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">Object[]results)
     Method that does a batch call on Deletes, Gets, Puts, 
    Increments, Appends, RowMutations.
     
     
     
     Rvoid
    -batchCallback(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">List? extends Rowactions,
    - http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">Object[]results,
    +batchCallback(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">List? extends Rowactions,
    + https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">Object[]results,
      
    org.apache.hadoop.hbase.client.coprocessor.Batch.CallbackRcallback)
     Same as batch(List,
     Object[]), but with a callback.
     
     
     
    -R extends com.google.protobuf.Messagehttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
     title="class or interface in java.util">Mapbyte[],R
    +R extends com.google.protobuf.Messagehttps://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
     title="class or interface in java.util">Mapbyte[],R
     batchCoprocessorService(com.google.protobuf.Descriptors.MethodDescriptormethodDescriptor,
    com.google.protobuf.Messagerequest,
    byte[]startKey,
    @@ -312,8 +312,8 @@ extends http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html
     
     
     
    -T extends 
    com.google.protobuf.Service,Rhttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
     title="class or interface in java.util">Mapbyte[],R
    -coprocessorService(http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true;
     title="class or interface in java.lang">ClassTservice,
    +T extends 
    com.google.protobuf.Service,Rhttps://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
     title="class or interface in java.util">Mapbyte[],R
    +coprocessorService(https://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true;
     title="class or interface in java.lang">ClassTservice,
       byte[]startKey,
       byte[]endKey,
       
    org.apache.hadoop.hbase.client.coprocessor.Batch.CallT,Rcallable)
    @@ -325,7 +325,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html
     
     
     T extends 
    com.google.protobuf.Service,Rvoid
    -coprocessorService(http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true;
     title="class or interface in java.lang">ClassTservice,
    

    [05/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ec8bf761/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
    index 7edb3ff..665071c 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
    @@ -1221,2378 +1221,2377 @@
     1213
    configurationManager.registerObserver(procEnv);
     1214
     1215int cpus = 
    Runtime.getRuntime().availableProcessors();
    -1216final int numThreads = 
    conf.getInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS,
    -1217Math.max((cpus  0? cpus/4: 
    0),
    -1218
    MasterProcedureConstants.DEFAULT_MIN_MASTER_PROCEDURE_THREADS));
    -1219final boolean abortOnCorruption = 
    conf.getBoolean(
    -1220
    MasterProcedureConstants.EXECUTOR_ABORT_ON_CORRUPTION,
    -1221
    MasterProcedureConstants.DEFAULT_EXECUTOR_ABORT_ON_CORRUPTION);
    -1222procedureStore.start(numThreads);
    -1223procedureExecutor.start(numThreads, 
    abortOnCorruption);
    -1224
    procEnv.getRemoteDispatcher().start();
    -1225  }
    -1226
    -1227  private void stopProcedureExecutor() 
    {
    -1228if (procedureExecutor != null) {
    -1229  
    configurationManager.deregisterObserver(procedureExecutor.getEnvironment());
    -1230  
    procedureExecutor.getEnvironment().getRemoteDispatcher().stop();
    -1231  procedureExecutor.stop();
    -1232  procedureExecutor.join();
    -1233  procedureExecutor = null;
    -1234}
    -1235
    -1236if (procedureStore != null) {
    -1237  
    procedureStore.stop(isAborted());
    -1238  procedureStore = null;
    -1239}
    -1240  }
    -1241
    -1242  private void stopChores() {
    -1243if (this.expiredMobFileCleanerChore 
    != null) {
    -1244  
    this.expiredMobFileCleanerChore.cancel(true);
    -1245}
    -1246if (this.mobCompactChore != null) 
    {
    -1247  
    this.mobCompactChore.cancel(true);
    -1248}
    -1249if (this.balancerChore != null) {
    -1250  this.balancerChore.cancel(true);
    -1251}
    -1252if (this.normalizerChore != null) 
    {
    -1253  
    this.normalizerChore.cancel(true);
    -1254}
    -1255if (this.clusterStatusChore != null) 
    {
    -1256  
    this.clusterStatusChore.cancel(true);
    -1257}
    -1258if (this.catalogJanitorChore != 
    null) {
    -1259  
    this.catalogJanitorChore.cancel(true);
    -1260}
    -1261if (this.clusterStatusPublisherChore 
    != null){
    -1262  
    clusterStatusPublisherChore.cancel(true);
    -1263}
    -1264if (this.mobCompactThread != null) 
    {
    -1265  this.mobCompactThread.close();
    -1266}
    -1267
    -1268if (this.quotaObserverChore != null) 
    {
    -1269  quotaObserverChore.cancel();
    -1270}
    -1271if (this.snapshotQuotaChore != null) 
    {
    -1272  snapshotQuotaChore.cancel();
    -1273}
    -1274  }
    -1275
    -1276  /**
    -1277   * @return Get remote side's 
    InetAddress
    -1278   */
    -1279  InetAddress getRemoteInetAddress(final 
    int port,
    -1280  final long serverStartCode) throws 
    UnknownHostException {
    -1281// Do it out here in its own little 
    method so can fake an address when
    -1282// mocking up in tests.
    -1283InetAddress ia = 
    RpcServer.getRemoteIp();
    -1284
    -1285// The call could be from the local 
    regionserver,
    -1286// in which case, there is no remote 
    address.
    -1287if (ia == null  
    serverStartCode == startcode) {
    -1288  InetSocketAddress isa = 
    rpcServices.getSocketAddress();
    -1289  if (isa != null  
    isa.getPort() == port) {
    -1290ia = isa.getAddress();
    -1291  }
    -1292}
    -1293return ia;
    -1294  }
    -1295
    -1296  /**
    -1297   * @return Maximum time we should run 
    balancer for
    -1298   */
    -1299  private int getMaxBalancingTime() {
    -1300int maxBalancingTime = 
    getConfiguration().getInt(HConstants.HBASE_BALANCER_MAX_BALANCING, -1);
    -1301if (maxBalancingTime == -1) {
    -1302  // if max balancing time isn't 
    set, defaulting it to period time
    -1303  maxBalancingTime = 
    getConfiguration().getInt(HConstants.HBASE_BALANCER_PERIOD,
    -1304
    HConstants.DEFAULT_HBASE_BALANCER_PERIOD);
    -1305}
    -1306return maxBalancingTime;
    -1307  }
    -1308
    -1309  /**
    -1310   * @return Maximum number of regions 
    in transition
    -1311   */
    -1312  private int 
    getMaxRegionsInTransition() {
    -1313int numRegions = 
    this.assignmentManager.getRegionStates().getRegionAssignments().size();
    -1314return Math.max((int) 
    Math.floor(numRegions * this.maxRitPercent), 1);
    -1315  }
    -1316
    -1317  /**
    -1318   * It first sleep to the next balance 
    plan start time. Meanwhile, throttling by the max
    -1319   * number regions in transition to 
    protect availability.
    -1320   * @param nextBalanceStartTime The 
    next balance plan start time
    -1321   * @param maxRegionsInTransition max 
    number of regions in 

    [05/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
    index 802b925..a3e80ab 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
    @@ -73,229 +73,229 @@
     065import 
    java.util.concurrent.TimeoutException;
     066import 
    java.util.concurrent.atomic.AtomicBoolean;
     067import 
    java.util.concurrent.atomic.AtomicInteger;
    -068import 
    java.util.concurrent.atomic.AtomicLong;
    -069import 
    java.util.concurrent.atomic.LongAdder;
    -070import java.util.concurrent.locks.Lock;
    -071import 
    java.util.concurrent.locks.ReadWriteLock;
    -072import 
    java.util.concurrent.locks.ReentrantReadWriteLock;
    -073import java.util.function.Function;
    -074import 
    org.apache.hadoop.conf.Configuration;
    -075import org.apache.hadoop.fs.FileStatus;
    -076import org.apache.hadoop.fs.FileSystem;
    -077import 
    org.apache.hadoop.fs.LocatedFileStatus;
    -078import org.apache.hadoop.fs.Path;
    -079import org.apache.hadoop.hbase.Cell;
    -080import 
    org.apache.hadoop.hbase.CellBuilderType;
    -081import 
    org.apache.hadoop.hbase.CellComparator;
    -082import 
    org.apache.hadoop.hbase.CellComparatorImpl;
    -083import 
    org.apache.hadoop.hbase.CellScanner;
    -084import 
    org.apache.hadoop.hbase.CellUtil;
    -085import 
    org.apache.hadoop.hbase.CompareOperator;
    -086import 
    org.apache.hadoop.hbase.CompoundConfiguration;
    -087import 
    org.apache.hadoop.hbase.DoNotRetryIOException;
    -088import 
    org.apache.hadoop.hbase.DroppedSnapshotException;
    -089import 
    org.apache.hadoop.hbase.ExtendedCellBuilderFactory;
    -090import 
    org.apache.hadoop.hbase.HConstants;
    -091import 
    org.apache.hadoop.hbase.HConstants.OperationStatusCode;
    -092import 
    org.apache.hadoop.hbase.HDFSBlocksDistribution;
    -093import 
    org.apache.hadoop.hbase.HRegionInfo;
    -094import 
    org.apache.hadoop.hbase.KeyValue;
    -095import 
    org.apache.hadoop.hbase.KeyValueUtil;
    -096import 
    org.apache.hadoop.hbase.NamespaceDescriptor;
    -097import 
    org.apache.hadoop.hbase.NotServingRegionException;
    -098import 
    org.apache.hadoop.hbase.PrivateCellUtil;
    -099import 
    org.apache.hadoop.hbase.RegionTooBusyException;
    -100import 
    org.apache.hadoop.hbase.TableName;
    -101import org.apache.hadoop.hbase.Tag;
    -102import org.apache.hadoop.hbase.TagUtil;
    -103import 
    org.apache.hadoop.hbase.UnknownScannerException;
    -104import 
    org.apache.hadoop.hbase.client.Append;
    -105import 
    org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
    -106import 
    org.apache.hadoop.hbase.client.CompactionState;
    -107import 
    org.apache.hadoop.hbase.client.Delete;
    -108import 
    org.apache.hadoop.hbase.client.Durability;
    -109import 
    org.apache.hadoop.hbase.client.Get;
    -110import 
    org.apache.hadoop.hbase.client.Increment;
    -111import 
    org.apache.hadoop.hbase.client.IsolationLevel;
    -112import 
    org.apache.hadoop.hbase.client.Mutation;
    -113import 
    org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor;
    -114import 
    org.apache.hadoop.hbase.client.Put;
    -115import 
    org.apache.hadoop.hbase.client.RegionInfo;
    -116import 
    org.apache.hadoop.hbase.client.RegionReplicaUtil;
    -117import 
    org.apache.hadoop.hbase.client.Result;
    -118import 
    org.apache.hadoop.hbase.client.RowMutations;
    -119import 
    org.apache.hadoop.hbase.client.Scan;
    -120import 
    org.apache.hadoop.hbase.client.TableDescriptor;
    -121import 
    org.apache.hadoop.hbase.client.TableDescriptorBuilder;
    -122import 
    org.apache.hadoop.hbase.conf.ConfigurationManager;
    -123import 
    org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver;
    -124import 
    org.apache.hadoop.hbase.coprocessor.RegionObserver.MutationType;
    -125import 
    org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
    -126import 
    org.apache.hadoop.hbase.exceptions.FailedSanityCheckException;
    -127import 
    org.apache.hadoop.hbase.exceptions.TimeoutIOException;
    -128import 
    org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
    -129import 
    org.apache.hadoop.hbase.filter.ByteArrayComparable;
    -130import 
    org.apache.hadoop.hbase.filter.FilterWrapper;
    -131import 
    org.apache.hadoop.hbase.filter.IncompatibleFilterException;
    -132import 
    org.apache.hadoop.hbase.io.HFileLink;
    -133import 
    org.apache.hadoop.hbase.io.HeapSize;
    -134import 
    org.apache.hadoop.hbase.io.TimeRange;
    -135import 
    org.apache.hadoop.hbase.io.hfile.HFile;
    -136import 
    org.apache.hadoop.hbase.ipc.CallerDisconnectedException;
    -137import 
    org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
    -138import 
    org.apache.hadoop.hbase.ipc.RpcCall;
    -139import 
    org.apache.hadoop.hbase.ipc.RpcServer;
    -140import 
    org.apache.hadoop.hbase.monitoring.MonitoredTask;
    -141import 
    org.apache.hadoop.hbase.monitoring.TaskMonitor;
    -142import 
    

    [05/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/193b4259/hbase-build-configuration/hbase-archetypes/dependency-info.html
    --
    diff --git a/hbase-build-configuration/hbase-archetypes/dependency-info.html 
    b/hbase-build-configuration/hbase-archetypes/dependency-info.html
    index e9437e1..9e4ee47 100644
    --- a/hbase-build-configuration/hbase-archetypes/dependency-info.html
    +++ b/hbase-build-configuration/hbase-archetypes/dependency-info.html
    @@ -7,7 +7,7 @@
       
     
     
    -
    +
     
     Apache HBase - Archetypes  Dependency Information
     
    @@ -148,7 +148,7 @@
     https://www.apache.org/;>The Apache Software 
    Foundation.
     All rights reserved.  
     
    -  Last Published: 
    2018-02-16
    +  Last Published: 
    2018-02-17
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/193b4259/hbase-build-configuration/hbase-archetypes/dependency-management.html
    --
    diff --git 
    a/hbase-build-configuration/hbase-archetypes/dependency-management.html 
    b/hbase-build-configuration/hbase-archetypes/dependency-management.html
    index 960eaa4..ac7bc36 100644
    --- a/hbase-build-configuration/hbase-archetypes/dependency-management.html
    +++ b/hbase-build-configuration/hbase-archetypes/dependency-management.html
    @@ -7,7 +7,7 @@
       
     
     
    -
    +
     
     Apache HBase - Archetypes  Project Dependency 
    Management
     
    @@ -810,7 +810,7 @@
     https://www.apache.org/;>The Apache Software 
    Foundation.
     All rights reserved.  
     
    -  Last Published: 
    2018-02-16
    +  Last Published: 
    2018-02-17
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/193b4259/hbase-build-configuration/hbase-archetypes/hbase-archetype-builder/dependencies.html
    --
    diff --git 
    a/hbase-build-configuration/hbase-archetypes/hbase-archetype-builder/dependencies.html
     
    b/hbase-build-configuration/hbase-archetypes/hbase-archetype-builder/dependencies.html
    index eab7442..213185b 100644
    --- 
    a/hbase-build-configuration/hbase-archetypes/hbase-archetype-builder/dependencies.html
    +++ 
    b/hbase-build-configuration/hbase-archetypes/hbase-archetype-builder/dependencies.html
    @@ -7,7 +7,7 @@
       
     
     
    -
    +
     
     Apache HBase - Archetype builder  Project 
    Dependencies
     
    @@ -330,7 +330,7 @@
     https://www.apache.org/;>The Apache Software 
    Foundation.
     All rights reserved.  
     
    -  Last Published: 
    2018-02-16
    +  Last Published: 
    2018-02-17
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/193b4259/hbase-build-configuration/hbase-archetypes/hbase-archetype-builder/dependency-convergence.html
    --
    diff --git 
    a/hbase-build-configuration/hbase-archetypes/hbase-archetype-builder/dependency-convergence.html
     
    b/hbase-build-configuration/hbase-archetypes/hbase-archetype-builder/dependency-convergence.html
    index 1d71236..f6ffd0b 100644
    --- 
    a/hbase-build-configuration/hbase-archetypes/hbase-archetype-builder/dependency-convergence.html
    +++ 
    b/hbase-build-configuration/hbase-archetypes/hbase-archetype-builder/dependency-convergence.html
    @@ -7,7 +7,7 @@
       
     
     
    -
    +
     
     Apache HBase - Archetype builder  Reactor Dependency 
    Convergence
     
    @@ -865,7 +865,7 @@
     https://www.apache.org/;>The Apache Software 
    Foundation.
     All rights reserved.  
     
    -  Last Published: 
    2018-02-16
    +  Last Published: 
    2018-02-17
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/193b4259/hbase-build-configuration/hbase-archetypes/hbase-archetype-builder/dependency-info.html
    --
    diff --git 
    a/hbase-build-configuration/hbase-archetypes/hbase-archetype-builder/dependency-info.html
     
    b/hbase-build-configuration/hbase-archetypes/hbase-archetype-builder/dependency-info.html
    index a002a90..8ce8895 100644
    --- 
    a/hbase-build-configuration/hbase-archetypes/hbase-archetype-builder/dependency-info.html
    +++ 
    b/hbase-build-configuration/hbase-archetypes/hbase-archetype-builder/dependency-info.html
    @@ -7,7 +7,7 @@
       
     
     
    -
    +
     
     Apache HBase - Archetype builder  Dependency 
    Information
     
    @@ -148,7 +148,7 @@
     https://www.apache.org/;>The Apache Software 
    Foundation.
     All rights reserved.  
     
    -  Last Published: 
    2018-02-16
    +  Last 

    [05/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/94208cfe/devapidocs/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.PushType.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.PushType.html
     
    b/devapidocs/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.PushType.html
    index ed15d9b..3d03e17 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.PushType.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.PushType.html
    @@ -248,7 +248,7 @@ the order they are declared.
     
     
     values
    -public staticWALProcedureStore.PushType[]values()
    +public staticWALProcedureStore.PushType[]values()
     Returns an array containing the constants of this enum 
    type, in
     the order they are declared.  This method may be used to iterate
     over the constants as follows:
    @@ -268,7 +268,7 @@ for (WALProcedureStore.PushType c : 
    WALProcedureStore.PushType.values())
     
     
     valueOf
    -public staticWALProcedureStore.PushTypevalueOf(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">Stringname)
    +public staticWALProcedureStore.PushTypevalueOf(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">Stringname)
     Returns the enum constant of this type with the specified 
    name.
     The string must match exactly an identifier used to declare an
     enum constant in this type.  (Extraneous whitespace characters are 
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/94208cfe/devapidocs/org/apache/hadoop/hbase/procedure2/util/class-use/DelayedUtil.DelayedWithTimeout.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/procedure2/util/class-use/DelayedUtil.DelayedWithTimeout.html
     
    b/devapidocs/org/apache/hadoop/hbase/procedure2/util/class-use/DelayedUtil.DelayedWithTimeout.html
    index c6f6a46..5bd2115 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/procedure2/util/class-use/DelayedUtil.DelayedWithTimeout.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/procedure2/util/class-use/DelayedUtil.DelayedWithTimeout.html
    @@ -141,11 +141,11 @@
     
     
     private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/DelayQueue.html?is-external=true;
     title="class or interface in java.util.concurrent">DelayQueueDelayedUtil.DelayedWithTimeout
    -ProcedureExecutor.TimeoutExecutorThread.queue
    +RemoteProcedureDispatcher.TimeoutExecutorThread.queue
     
     
     private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/DelayQueue.html?is-external=true;
     title="class or interface in java.util.concurrent">DelayQueueDelayedUtil.DelayedWithTimeout
    -RemoteProcedureDispatcher.TimeoutExecutorThread.queue
    +ProcedureExecutor.TimeoutExecutorThread.queue
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/94208cfe/devapidocs/org/apache/hadoop/hbase/quotas/class-use/MasterQuotaManager.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/quotas/class-use/MasterQuotaManager.html 
    b/devapidocs/org/apache/hadoop/hbase/quotas/class-use/MasterQuotaManager.html
    index 934c2fa..dd6045b 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/quotas/class-use/MasterQuotaManager.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/quotas/class-use/MasterQuotaManager.html
    @@ -125,11 +125,11 @@
     
     
     MasterQuotaManager
    -MasterServices.getMasterQuotaManager()
    +HMaster.getMasterQuotaManager()
     
     
     MasterQuotaManager
    -HMaster.getMasterQuotaManager()
    +MasterServices.getMasterQuotaManager()
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/94208cfe/devapidocs/org/apache/hadoop/hbase/quotas/class-use/QuotaFilter.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/quotas/class-use/QuotaFilter.html 
    b/devapidocs/org/apache/hadoop/hbase/quotas/class-use/QuotaFilter.html
    index a495cd1..d81fa5e 100644
    --- a/devapidocs/org/apache/hadoop/hbase/quotas/class-use/QuotaFilter.html
    +++ b/devapidocs/org/apache/hadoop/hbase/quotas/class-use/QuotaFilter.html
    @@ -110,9 +110,7 @@
     
     
     http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
     title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListQuotaSettings
    -AsyncAdmin.getQuota(QuotaFilterfilter)
    -List the quotas based on the filter.
    -
    +AsyncHBaseAdmin.getQuota(QuotaFilterfilter)
     
     
     http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListQuotaSettings
    @@ -121,16 +119,18 @@
     
     
     
    

    [05/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0cd17dc5/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html 
    b/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
    index 93ad2d1..0fb72f0 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
    @@ -493,89 +493,89 @@ implements getBalancer()
     
     
    -private http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListRegionInfo
    -getCarryingSystemTables(ServerNameserverName)
    -
    -
     org.apache.hadoop.conf.Configuration
     getConfiguration()
     
    -
    +
     http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListServerName
     getExcludedServersForSystemTable()
     Get a list of servers that this region cannot be assigned 
    to.
     
     
    -
    +
     ProcedureEvent
     getFailoverCleanupEvent()
     
    -
    +
     http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListServerName
     getFavoredNodes(RegionInforegionInfo)
     
    -
    +
     private RegionInfo
     getMetaForRegion(RegionInforegionInfo)
     
    -
    +
     private ProcedureEvent
     getMetaInitializedEvent(RegionInfometaRegionInfo)
     
    -
    +
     RegionInfo
     getMetaRegionFromName(byte[]regionName)
     
    -
    +
     http://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
     title="class or interface in java.util">SetRegionInfo
     getMetaRegionSet()
     
    -
    +
     int
     getNumRegionsOpened()
     
    -
    +
     private MasterProcedureEnv
     getProcedureEnvironment()
     
    -
    +
     private MasterProcedureScheduler
     getProcedureScheduler()
     
    -
    +
     RegionInfo
     getRegionInfo(byte[]regionName)
     
    -
    +
     RegionNormalizer
     getRegionNormalizer()
     
    -
    +
     http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListRegionStates.RegionStateNode
     getRegionsInTransition()
     
    -
    +
     RegionStates
     getRegionStates()
     
    -
    +
     RegionStateStore
     getRegionStateStore()
     
    -
    +
     Pairhttp://docs.oracle.com/javase/8/docs/api/java/lang/Integer.html?is-external=true;
     title="class or interface in java.lang">Integer,http://docs.oracle.com/javase/8/docs/api/java/lang/Integer.html?is-external=true;
     title="class or interface in java.lang">Integer
     getReopenStatus(TableNametableName)
     Used by the client (via master) to identify if all regions 
    have the schema updates
     
     
    -
    +
     int
     getServerVersion(ServerNameserverName)
     
    -
    +
     http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
     title="class or interface in java.util">MapServerName,http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListRegionInfo
     getSnapShotOfAssignment(http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
     title="class or interface in java.util">CollectionRegionInforegions)
     
    +
    +private http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListRegionInfo
    +getSystemTables(ServerNameserverName)
    +
     
     (package private) TableStateManager
     getTableStateManager()
    @@ -1279,7 +1279,7 @@ implements 
     
     ASSIGN_PROCEDURE_ARRAY_TYPE
    -private static finalAssignProcedure[] ASSIGN_PROCEDURE_ARRAY_TYPE
    +private static finalAssignProcedure[] ASSIGN_PROCEDURE_ARRAY_TYPE
     
     
     
    @@ -1288,7 +1288,7 @@ implements 
     
     UNASSIGN_PROCEDURE_ARRAY_TYPE
    -private static finalUnassignProcedure[] UNASSIGN_PROCEDURE_ARRAY_TYPE
    +private static finalUnassignProcedure[] UNASSIGN_PROCEDURE_ARRAY_TYPE
     
     
     
    @@ -1297,7 +1297,7 @@ implements 
     
     pendingAssignQueue
    -private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
     title="class or interface in java.util">ArrayListRegionStates.RegionStateNode 
    pendingAssignQueue
    +private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
     title="class or interface in java.util">ArrayListRegionStates.RegionStateNode 
    pendingAssignQueue
     
     
     
    @@ -1306,7 +1306,7 @@ implements 
     
     assignQueueLock
    -private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/locks/ReentrantLock.html?is-external=true;
     title="class or interface in java.util.concurrent.locks">ReentrantLock assignQueueLock
    +private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/locks/ReentrantLock.html?is-external=true;
     title="class or interface in java.util.concurrent.locks">ReentrantLock assignQueueLock
     
     
     
    @@ -1315,7 +1315,7 @@ implements 
     
     assignQueueFullCond
    -private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/locks/Condition.html?is-external=true;
     title="class or interface in java.util.concurrent.locks">Condition 

    [05/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/828486ae/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/Procedure.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/Procedure.html 
    b/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/Procedure.html
    index 29b9507..bd8ccff 100644
    --- a/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/Procedure.html
    +++ b/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/Procedure.html
    @@ -132,13 +132,13 @@
     
     
     http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListProcedure?
    -MasterServices.getProcedures()
    -Get procedures
    -
    +HMaster.getProcedures()
     
     
     http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListProcedure?
    -HMaster.getProcedures()
    +MasterServices.getProcedures()
    +Get procedures
    +
     
     
     
    @@ -920,44 +920,44 @@
     
     
     
    -protected Procedure
    -SimpleProcedureScheduler.dequeue()
    -
    -
     protected abstract Procedure
     AbstractProcedureScheduler.dequeue()
     Fetch one Procedure from the queue
      NOTE: this method is called with the sched lock held.
     
     
    -
    -protected Procedure[]
    -SequentialProcedure.doExecute(TEnvironmentenv)
    -
     
    +protected Procedure
    +SimpleProcedureScheduler.dequeue()
    +
    +
     protected ProcedureTEnvironment[]
     Procedure.doExecute(TEnvironmentenv)
     Internal method called by the ProcedureExecutor that starts 
    the user-level code execute().
     
     
    -
    -protected ProcedureTEnvironment[]
    -ProcedureExecutor.FailedProcedure.execute(TEnvironmentenv)
    -
     
     protected Procedure[]
    +SequentialProcedure.doExecute(TEnvironmentenv)
    +
    +
    +protected Procedure[]
     StateMachineProcedure.execute(TEnvironmentenv)
     
    +
    +protected ProcedureTEnvironment[]
    +ProcedureInMemoryChore.execute(TEnvironmentenv)
    +
     
    +protected ProcedureTEnvironment[]
    +ProcedureExecutor.FailedProcedure.execute(TEnvironmentenv)
    +
    +
     protected abstract ProcedureTEnvironment[]
     Procedure.execute(TEnvironmentenv)
     The main code of the procedure.
     
     
    -
    -protected ProcedureTEnvironment[]
    -ProcedureInMemoryChore.execute(TEnvironmentenv)
    -
     
     Procedure?
     LockedResource.getExclusiveLockOwnerProcedure()
    @@ -1115,14 +1115,14 @@
     
     
     void
    -SimpleProcedureScheduler.completionCleanup(Procedureproc)
    -
    -
    -void
     ProcedureScheduler.completionCleanup(Procedureproc)
     The procedure in execution completed.
     
     
    +
    +void
    +SimpleProcedureScheduler.completionCleanup(Procedureproc)
    +
     
     static 
    org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure
     ProcedureUtil.convertToProtoProcedure(Procedureproc)
    @@ -1135,17 +1135,17 @@
      Procedureprocedure)
     
     
    -protected void
    -SimpleProcedureScheduler.enqueue(Procedureprocedure,
    -   booleanaddFront)
    -
    -
     protected abstract void
     AbstractProcedureScheduler.enqueue(Procedureprocedure,
    booleanaddFront)
     Add the procedure to the queue.
     
     
    +
    +protected void
    +SimpleProcedureScheduler.enqueue(Procedureprocedure,
    +   booleanaddFront)
    +
     
     private void
     ProcedureExecutor.execCompletionCleanup(Procedureproc)
    @@ -1327,14 +1327,14 @@
     
     
     void
    -SimpleProcedureScheduler.yield(Procedureproc)
    -
    -
    -void
     ProcedureScheduler.yield(Procedureproc)
     The procedure can't run at the moment.
     
     
    +
    +void
    +SimpleProcedureScheduler.yield(Procedureproc)
    +
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/828486ae/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureEvent.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureEvent.html 
    b/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureEvent.html
    index a9a3870..5f35947 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureEvent.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureEvent.html
    @@ -141,11 +141,11 @@
     
     
     ProcedureEvent?
    -MasterServices.getInitializedEvent()
    +HMaster.getInitializedEvent()
     
     
     ProcedureEvent?
    -HMaster.getInitializedEvent()
    +MasterServices.getInitializedEvent()
     
     
     ProcedureEvent?
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/828486ae/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureExecutor.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureExecutor.html
     
    b/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureExecutor.html
    index 9c6d034..8c7413e 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureExecutor.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureExecutor.html
    @@ -121,11 +121,11 @@
     
     
     ProcedureExecutorMasterProcedureEnv
    -MasterServices.getMasterProcedureExecutor()
    +HMaster.getMasterProcedureExecutor()
     
     
     ProcedureExecutorMasterProcedureEnv
    

    [05/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f272b0e8/devapidocs/org/apache/hadoop/hbase/master/HMaster.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/master/HMaster.html 
    b/devapidocs/org/apache/hadoop/hbase/master/HMaster.html
    index 49b9aea..2c232be 100644
    --- a/devapidocs/org/apache/hadoop/hbase/master/HMaster.html
    +++ b/devapidocs/org/apache/hadoop/hbase/master/HMaster.html
    @@ -128,7 +128,7 @@ var activeTableTab = "activeTableTab";
     
     
     @InterfaceAudience.LimitedPrivate(value="Tools")
    -public class HMaster
    +public class HMaster
     extends HRegionServer
     implements MasterServices
     HMaster is the "master server" for HBase. An HBase cluster 
    has one active
    @@ -1471,7 +1471,7 @@ implements 
     
     LOG
    -private staticorg.slf4j.Logger LOG
    +private staticorg.slf4j.Logger LOG
     
     
     
    @@ -1480,7 +1480,7 @@ implements 
     
     MASTER
    -public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String MASTER
    +public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String MASTER
     
     See Also:
     Constant
     Field Values
    @@ -1493,7 +1493,7 @@ implements 
     
     activeMasterManager
    -private finalActiveMasterManager activeMasterManager
    +private finalActiveMasterManager activeMasterManager
     
     
     
    @@ -1502,7 +1502,7 @@ implements 
     
     regionServerTracker
    -RegionServerTracker regionServerTracker
    +RegionServerTracker regionServerTracker
     
     
     
    @@ -1511,7 +1511,7 @@ implements 
     
     drainingServerTracker
    -privateDrainingServerTracker drainingServerTracker
    +privateDrainingServerTracker drainingServerTracker
     
     
     
    @@ -1520,7 +1520,7 @@ implements 
     
     loadBalancerTracker
    -LoadBalancerTracker loadBalancerTracker
    +LoadBalancerTracker loadBalancerTracker
     
     
     
    @@ -1529,7 +1529,7 @@ implements 
     
     splitOrMergeTracker
    -privateSplitOrMergeTracker splitOrMergeTracker
    +privateSplitOrMergeTracker splitOrMergeTracker
     
     
     
    @@ -1538,7 +1538,7 @@ implements 
     
     regionNormalizerTracker
    -privateRegionNormalizerTracker 
    regionNormalizerTracker
    +privateRegionNormalizerTracker 
    regionNormalizerTracker
     
     
     
    @@ -1547,7 +1547,7 @@ implements 
     
     maintenanceModeTracker
    -privateMasterMaintenanceModeTracker maintenanceModeTracker
    +privateMasterMaintenanceModeTracker maintenanceModeTracker
     
     
     
    @@ -1556,7 +1556,7 @@ implements 
     
     clusterSchemaService
    -privateClusterSchemaService clusterSchemaService
    +privateClusterSchemaService clusterSchemaService
     
     
     
    @@ -1565,7 +1565,7 @@ implements 
     
     HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS
    -public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS
    +public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS
     
     See Also:
     Constant
     Field Values
    @@ -1578,7 +1578,7 @@ implements 
     
     DEFAULT_HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS
    -public static finalint DEFAULT_HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS
    +public static finalint DEFAULT_HBASE_MASTER_WAIT_ON_SERVICE_IN_SECONDS
     
     See Also:
     Constant
     Field Values
    @@ -1591,7 +1591,7 @@ implements 
     
     metricsMaster
    -finalMetricsMaster metricsMaster
    +finalMetricsMaster metricsMaster
     
     
     
    @@ -1600,7 +1600,7 @@ implements 
     
     fileSystemManager
    -privateMasterFileSystem fileSystemManager
    +privateMasterFileSystem fileSystemManager
     
     
     
    @@ -1609,7 +1609,7 @@ implements 
     
     walManager
    -privateMasterWalManager walManager
    +privateMasterWalManager walManager
     
     
     
    @@ -1618,7 +1618,7 @@ implements 
     
     serverManager
    -private volatileServerManager serverManager
    +private volatileServerManager serverManager
     
     
     
    @@ -1627,7 +1627,7 @@ implements 
     
     assignmentManager
    -privateAssignmentManager assignmentManager
    +privateAssignmentManager assignmentManager
     
     
     
    @@ -1636,7 +1636,7 @@ implements 
     
     replicationPeerManager
    -privateReplicationPeerManager replicationPeerManager
    +privateReplicationPeerManager replicationPeerManager
     
     
     
    @@ -1645,7 +1645,7 @@ implements 
     
     rsFatals
    -MemoryBoundedLogMessageBuffer rsFatals
    +MemoryBoundedLogMessageBuffer rsFatals
     
     
     
    @@ -1654,7 +1654,7 @@ implements 
     
     activeMaster
    -private volatileboolean activeMaster
    +private volatileboolean activeMaster
     
     
     
    @@ -1663,7 +1663,7 @@ implements 
     
     initialized
    -private finalProcedureEvent? 
    initialized
    +private finalProcedureEvent? 
    initialized
     
     
     
    @@ -1672,7 +1672,7 @@ implements 
     
     serviceStarted
    -volatileboolean serviceStarted
    +volatileboolean serviceStarted
     
     
     
    @@ -1681,7 +1681,7 @@ implements 
     
     serverCrashProcessingEnabled
    -private finalProcedureEvent? 
    serverCrashProcessingEnabled
    +private finalProcedureEvent? 
    serverCrashProcessingEnabled
     
     
     
    @@ -1690,7 +1690,7 @@ implements 
     
     maxBlancingTime
    -private finalint 

    [05/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c83a37c8/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/Procedure.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/Procedure.html 
    b/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/Procedure.html
    index 29b9507..bd8ccff 100644
    --- a/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/Procedure.html
    +++ b/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/Procedure.html
    @@ -132,13 +132,13 @@
     
     
     http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListProcedure?
    -MasterServices.getProcedures()
    -Get procedures
    -
    +HMaster.getProcedures()
     
     
     http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListProcedure?
    -HMaster.getProcedures()
    +MasterServices.getProcedures()
    +Get procedures
    +
     
     
     
    @@ -920,44 +920,44 @@
     
     
     
    -protected Procedure
    -SimpleProcedureScheduler.dequeue()
    -
    -
     protected abstract Procedure
     AbstractProcedureScheduler.dequeue()
     Fetch one Procedure from the queue
      NOTE: this method is called with the sched lock held.
     
     
    -
    -protected Procedure[]
    -SequentialProcedure.doExecute(TEnvironmentenv)
    -
     
    +protected Procedure
    +SimpleProcedureScheduler.dequeue()
    +
    +
     protected ProcedureTEnvironment[]
     Procedure.doExecute(TEnvironmentenv)
     Internal method called by the ProcedureExecutor that starts 
    the user-level code execute().
     
     
    -
    -protected ProcedureTEnvironment[]
    -ProcedureExecutor.FailedProcedure.execute(TEnvironmentenv)
    -
     
     protected Procedure[]
    +SequentialProcedure.doExecute(TEnvironmentenv)
    +
    +
    +protected Procedure[]
     StateMachineProcedure.execute(TEnvironmentenv)
     
    +
    +protected ProcedureTEnvironment[]
    +ProcedureInMemoryChore.execute(TEnvironmentenv)
    +
     
    +protected ProcedureTEnvironment[]
    +ProcedureExecutor.FailedProcedure.execute(TEnvironmentenv)
    +
    +
     protected abstract ProcedureTEnvironment[]
     Procedure.execute(TEnvironmentenv)
     The main code of the procedure.
     
     
    -
    -protected ProcedureTEnvironment[]
    -ProcedureInMemoryChore.execute(TEnvironmentenv)
    -
     
     Procedure?
     LockedResource.getExclusiveLockOwnerProcedure()
    @@ -1115,14 +1115,14 @@
     
     
     void
    -SimpleProcedureScheduler.completionCleanup(Procedureproc)
    -
    -
    -void
     ProcedureScheduler.completionCleanup(Procedureproc)
     The procedure in execution completed.
     
     
    +
    +void
    +SimpleProcedureScheduler.completionCleanup(Procedureproc)
    +
     
     static 
    org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure
     ProcedureUtil.convertToProtoProcedure(Procedureproc)
    @@ -1135,17 +1135,17 @@
      Procedureprocedure)
     
     
    -protected void
    -SimpleProcedureScheduler.enqueue(Procedureprocedure,
    -   booleanaddFront)
    -
    -
     protected abstract void
     AbstractProcedureScheduler.enqueue(Procedureprocedure,
    booleanaddFront)
     Add the procedure to the queue.
     
     
    +
    +protected void
    +SimpleProcedureScheduler.enqueue(Procedureprocedure,
    +   booleanaddFront)
    +
     
     private void
     ProcedureExecutor.execCompletionCleanup(Procedureproc)
    @@ -1327,14 +1327,14 @@
     
     
     void
    -SimpleProcedureScheduler.yield(Procedureproc)
    -
    -
    -void
     ProcedureScheduler.yield(Procedureproc)
     The procedure can't run at the moment.
     
     
    +
    +void
    +SimpleProcedureScheduler.yield(Procedureproc)
    +
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c83a37c8/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureEvent.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureEvent.html 
    b/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureEvent.html
    index a9a3870..5f35947 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureEvent.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureEvent.html
    @@ -141,11 +141,11 @@
     
     
     ProcedureEvent?
    -MasterServices.getInitializedEvent()
    +HMaster.getInitializedEvent()
     
     
     ProcedureEvent?
    -HMaster.getInitializedEvent()
    +MasterServices.getInitializedEvent()
     
     
     ProcedureEvent?
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c83a37c8/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureExecutor.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureExecutor.html
     
    b/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureExecutor.html
    index 9c6d034..8c7413e 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureExecutor.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureExecutor.html
    @@ -121,11 +121,11 @@
     
     
     ProcedureExecutorMasterProcedureEnv
    -MasterServices.getMasterProcedureExecutor()
    +HMaster.getMasterProcedureExecutor()
     
     
     ProcedureExecutorMasterProcedureEnv
    

    [05/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0ab8335e/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureStateSerializer.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureStateSerializer.html
     
    b/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureStateSerializer.html
    index 733e376..01a50f5 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureStateSerializer.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureStateSerializer.html
    @@ -120,19 +120,19 @@
     
     
     protected void
    -UnassignProcedure.deserializeStateData(ProcedureStateSerializerserializer)
    +MoveRegionProcedure.deserializeStateData(ProcedureStateSerializerserializer)
     
     
     protected void
    -MoveRegionProcedure.deserializeStateData(ProcedureStateSerializerserializer)
    +GCMergedRegionsProcedure.deserializeStateData(ProcedureStateSerializerserializer)
     
     
     protected void
    -GCRegionProcedure.deserializeStateData(ProcedureStateSerializerserializer)
    +AssignProcedure.deserializeStateData(ProcedureStateSerializerserializer)
     
     
     protected void
    -GCMergedRegionsProcedure.deserializeStateData(ProcedureStateSerializerserializer)
    +GCRegionProcedure.deserializeStateData(ProcedureStateSerializerserializer)
     
     
     protected void
    @@ -144,23 +144,23 @@
     
     
     protected void
    -AssignProcedure.deserializeStateData(ProcedureStateSerializerserializer)
    +UnassignProcedure.deserializeStateData(ProcedureStateSerializerserializer)
     
     
     protected void
    -UnassignProcedure.serializeStateData(ProcedureStateSerializerserializer)
    +MoveRegionProcedure.serializeStateData(ProcedureStateSerializerserializer)
     
     
     protected void
    -MoveRegionProcedure.serializeStateData(ProcedureStateSerializerserializer)
    +GCMergedRegionsProcedure.serializeStateData(ProcedureStateSerializerserializer)
     
     
     protected void
    -GCRegionProcedure.serializeStateData(ProcedureStateSerializerserializer)
    +AssignProcedure.serializeStateData(ProcedureStateSerializerserializer)
     
     
     protected void
    -GCMergedRegionsProcedure.serializeStateData(ProcedureStateSerializerserializer)
    +GCRegionProcedure.serializeStateData(ProcedureStateSerializerserializer)
     
     
     protected void
    @@ -172,7 +172,7 @@
     
     
     protected void
    -AssignProcedure.serializeStateData(ProcedureStateSerializerserializer)
    +UnassignProcedure.serializeStateData(ProcedureStateSerializerserializer)
     
     
     
    @@ -212,115 +212,115 @@
     
     
     protected void
    -DeleteTableProcedure.deserializeStateData(ProcedureStateSerializerserializer)
    +CloneSnapshotProcedure.deserializeStateData(ProcedureStateSerializerserializer)
     
     
     protected void
    -DisableTableProcedure.deserializeStateData(ProcedureStateSerializerserializer)
    +DeleteTableProcedure.deserializeStateData(ProcedureStateSerializerserializer)
     
     
     protected void
    -DeleteNamespaceProcedure.deserializeStateData(ProcedureStateSerializerserializer)
    +TruncateTableProcedure.deserializeStateData(ProcedureStateSerializerserializer)
     
     
     protected void
    -CreateNamespaceProcedure.deserializeStateData(ProcedureStateSerializerserializer)
    +DeleteNamespaceProcedure.deserializeStateData(ProcedureStateSerializerserializer)
     
     
     protected void
    -EnableTableProcedure.deserializeStateData(ProcedureStateSerializerserializer)
    +ServerCrashProcedure.deserializeStateData(ProcedureStateSerializerserializer)
     
     
     protected void
    -CreateTableProcedure.deserializeStateData(ProcedureStateSerializerserializer)
    +DisableTableProcedure.deserializeStateData(ProcedureStateSerializerserializer)
     
     
     protected void
    -ModifyNamespaceProcedure.deserializeStateData(ProcedureStateSerializerserializer)
    +RecoverMetaProcedure.deserializeStateData(ProcedureStateSerializerserializer)
     
     
     protected void
    -AbstractStateMachineRegionProcedure.deserializeStateData(ProcedureStateSerializerserializer)
    +CreateNamespaceProcedure.deserializeStateData(ProcedureStateSerializerserializer)
     
     
     protected void
    -CloneSnapshotProcedure.deserializeStateData(ProcedureStateSerializerserializer)
    +AbstractStateMachineRegionProcedure.deserializeStateData(ProcedureStateSerializerserializer)
     
     
     protected void
    -ServerCrashProcedure.deserializeStateData(ProcedureStateSerializerserializer)
    +EnableTableProcedure.deserializeStateData(ProcedureStateSerializerserializer)
     
     
     protected void
    -ModifyTableProcedure.deserializeStateData(ProcedureStateSerializerserializer)
    +CreateTableProcedure.deserializeStateData(ProcedureStateSerializerserializer)
     
     
     protected void
    -RecoverMetaProcedure.deserializeStateData(ProcedureStateSerializerserializer)
    +ModifyNamespaceProcedure.deserializeStateData(ProcedureStateSerializerserializer)
     
     
     protected void
    -RestoreSnapshotProcedure.deserializeStateData(ProcedureStateSerializerserializer)
    +ModifyTableProcedure.deserializeStateData(ProcedureStateSerializerserializer)
     
     
     protected void
    -TruncateTableProcedure.deserializeStateData(ProcedureStateSerializerserializer)
    

    [05/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/src-html/org/apache/hadoop/hbase/master/SplitLogManager.Task.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/SplitLogManager.Task.html 
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/SplitLogManager.Task.html
    index 2939a56..681e263 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/SplitLogManager.Task.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/SplitLogManager.Task.html
    @@ -61,602 +61,608 @@
     053import 
    org.apache.hadoop.hbase.monitoring.TaskMonitor;
     054import 
    org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
     055import 
    org.apache.hadoop.hbase.util.FSUtils;
    -056import 
    org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
    -057import 
    org.apache.yetus.audience.InterfaceAudience;
    -058import org.slf4j.Logger;
    -059import org.slf4j.LoggerFactory;
    -060import 
    org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
    -061
    -062/**
    -063 * Distributes the task of log splitting 
    to the available region servers.
    -064 * Coordination happens via coordination 
    engine. For every log file that has to be split a
    -065 * task is created. SplitLogWorkers race 
    to grab a task.
    -066 *
    -067 * pSplitLogManager monitors the 
    tasks that it creates using the
    -068 * timeoutMonitor thread. If a task's 
    progress is slow then
    -069 * {@link 
    SplitLogManagerCoordination#checkTasks} will take away the
    -070 * task from the owner {@link 
    org.apache.hadoop.hbase.regionserver.SplitLogWorker}
    -071 * and the task will be up for grabs 
    again. When the task is done then it is
    -072 * deleted by SplitLogManager.
    -073 *
    -074 * pClients call {@link 
    #splitLogDistributed(Path)} to split a region server's
    -075 * log files. The caller thread waits in 
    this method until all the log files
    -076 * have been split.
    -077 *
    -078 * pAll the coordination calls 
    made by this class are asynchronous. This is mainly
    -079 * to help reduce response time seen by 
    the callers.
    -080 *
    -081 * pThere is race in this design 
    between the SplitLogManager and the
    -082 * SplitLogWorker. SplitLogManager might 
    re-queue a task that has in reality
    -083 * already been completed by a 
    SplitLogWorker. We rely on the idempotency of
    -084 * the log splitting task for 
    correctness.
    -085 *
    -086 * pIt is also assumed that every 
    log splitting task is unique and once
    -087 * completed (either with success or with 
    error) it will be not be submitted
    -088 * again. If a task is resubmitted then 
    there is a risk that old "delete task"
    -089 * can delete the re-submission.
    -090 */
    -091@InterfaceAudience.Private
    -092public class SplitLogManager {
    -093  private static final Logger LOG = 
    LoggerFactory.getLogger(SplitLogManager.class);
    -094
    -095  private final MasterServices server;
    -096
    -097  private final Configuration conf;
    -098  private final ChoreService 
    choreService;
    -099
    -100  public static final int 
    DEFAULT_UNASSIGNED_TIMEOUT = (3 * 60 * 1000); // 3 min
    -101
    -102  private long unassignedTimeout;
    -103  private long lastTaskCreateTime = 
    Long.MAX_VALUE;
    -104
    -105  @VisibleForTesting
    -106  final ConcurrentMapString, Task 
    tasks = new ConcurrentHashMap();
    -107  private TimeoutMonitor 
    timeoutMonitor;
    -108
    -109  private volatile SetServerName 
    deadWorkers = null;
    -110  private final Object deadWorkersLock = 
    new Object();
    -111
    -112  /**
    -113   * Its OK to construct this object even 
    when region-servers are not online. It does lookup the
    -114   * orphan tasks in coordination engine 
    but it doesn't block waiting for them to be done.
    -115   * @param master the master services
    -116   * @param conf the HBase 
    configuration
    -117   * @throws IOException
    -118   */
    -119  public SplitLogManager(MasterServices 
    master, Configuration conf)
    -120  throws IOException {
    -121this.server = master;
    -122this.conf = conf;
    -123this.choreService = new 
    ChoreService(master.getServerName() + "_splitLogManager_");
    -124if 
    (server.getCoordinatedStateManager() != null) {
    -125  SplitLogManagerCoordination 
    coordination = getSplitLogManagerCoordination();
    -126  SetString failedDeletions = 
    Collections.synchronizedSet(new HashSetString());
    -127  SplitLogManagerDetails details = 
    new SplitLogManagerDetails(tasks, master, failedDeletions);
    -128  coordination.setDetails(details);
    -129  coordination.init();
    -130}
    -131this.unassignedTimeout =
    -132
    conf.getInt("hbase.splitlog.manager.unassigned.timeout", 
    DEFAULT_UNASSIGNED_TIMEOUT);
    -133this.timeoutMonitor =
    -134new 
    TimeoutMonitor(conf.getInt("hbase.splitlog.manager.timeoutmonitor.period", 
    1000),
    -135master);
    -136
    choreService.scheduleChore(timeoutMonitor);
    -137  }
    -138
    -139  private SplitLogManagerCoordination 
    getSplitLogManagerCoordination() {
    -140return 
    server.getCoordinatedStateManager().getSplitLogManagerCoordination();
    -141  }
    -142
    -143  private FileStatus[] 
    

    [05/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1f2eeb22/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.html
    index b05cc00..c352c2f 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.html
    @@ -271,555 +271,561 @@
     263return Flow.HAS_MORE_STATE;
     264  }
     265
    -266  @Override
    -267  protected void rollbackState(final 
    MasterProcedureEnv env, final SplitTableRegionState state)
    -268  throws IOException, 
    InterruptedException {
    -269if (isTraceEnabled()) {
    -270  LOG.trace(this + " rollback state=" 
    + state);
    -271}
    -272
    -273try {
    -274  switch (state) {
    -275  case 
    SPLIT_TABLE_REGION_POST_OPERATION:
    -276  case 
    SPLIT_TABLE_REGION_OPEN_CHILD_REGIONS:
    -277  case 
    SPLIT_TABLE_REGION_PRE_OPERATION_AFTER_META:
    -278  case 
    SPLIT_TABLE_REGION_UPDATE_META:
    -279// PONR
    -280throw new 
    UnsupportedOperationException(this + " unhandled state=" + state);
    -281  case 
    SPLIT_TABLE_REGION_PRE_OPERATION_BEFORE_META:
    -282break;
    -283  case 
    SPLIT_TABLE_REGION_CREATE_DAUGHTER_REGIONS:
    -284// Doing nothing, as re-open 
    parent region would clean up daughter region directories.
    -285break;
    -286  case 
    SPLIT_TABLE_REGION_CLOSE_PARENT_REGION:
    -287openParentRegion(env);
    +266  /**
    +267   * To rollback {@link 
    SplitTableRegionProcedure}, an AssignProcedure is asynchronously
    +268   * submitted for parent region to be 
    split (rollback doesn't wait on the completion of the
    +269   * AssignProcedure) . This can be 
    improved by changing rollback() to support sub-procedures.
    +270   * See HBASE-19851 for details.
    +271   */
    +272  @Override
    +273  protected void rollbackState(final 
    MasterProcedureEnv env, final SplitTableRegionState state)
    +274  throws IOException, 
    InterruptedException {
    +275if (isTraceEnabled()) {
    +276  LOG.trace(this + " rollback state=" 
    + state);
    +277}
    +278
    +279try {
    +280  switch (state) {
    +281  case 
    SPLIT_TABLE_REGION_POST_OPERATION:
    +282  case 
    SPLIT_TABLE_REGION_OPEN_CHILD_REGIONS:
    +283  case 
    SPLIT_TABLE_REGION_PRE_OPERATION_AFTER_META:
    +284  case 
    SPLIT_TABLE_REGION_UPDATE_META:
    +285// PONR
    +286throw new 
    UnsupportedOperationException(this + " unhandled state=" + state);
    +287  case 
    SPLIT_TABLE_REGION_PRE_OPERATION_BEFORE_META:
     288break;
    -289  case 
    SPLIT_TABLE_REGION_PRE_OPERATION:
    -290postRollBackSplitRegion(env);
    +289  case 
    SPLIT_TABLE_REGION_CREATE_DAUGHTER_REGIONS:
    +290// Doing nothing, as re-open 
    parent region would clean up daughter region directories.
     291break;
    -292  case SPLIT_TABLE_REGION_PREPARE:
    -293break; // nothing to do
    -294  default:
    -295throw new 
    UnsupportedOperationException(this + " unhandled state=" + state);
    -296  }
    -297} catch (IOException e) {
    -298  // This will be retried. Unless 
    there is a bug in the code,
    -299  // this should be just a "temporary 
    error" (e.g. network down)
    -300  LOG.warn("pid=" + getProcId() + " 
    failed rollback attempt step " + state +
    -301  " for splitting the region "
    -302+ 
    getParentRegion().getEncodedName() + " in table " + getTableName(), e);
    -303  throw e;
    -304}
    -305  }
    -306
    -307  /*
    -308   * Check whether we are in the state 
    that can be rollback
    -309   */
    -310  @Override
    -311  protected boolean 
    isRollbackSupported(final SplitTableRegionState state) {
    -312switch (state) {
    -313  case 
    SPLIT_TABLE_REGION_POST_OPERATION:
    -314  case 
    SPLIT_TABLE_REGION_OPEN_CHILD_REGIONS:
    -315  case 
    SPLIT_TABLE_REGION_PRE_OPERATION_AFTER_META:
    -316  case 
    SPLIT_TABLE_REGION_UPDATE_META:
    -317// It is not safe to rollback if 
    we reach to these states.
    -318return false;
    -319  default:
    -320break;
    -321}
    -322return true;
    -323  }
    -324
    -325  @Override
    -326  protected SplitTableRegionState 
    getState(final int stateId) {
    -327return 
    SplitTableRegionState.forNumber(stateId);
    -328  }
    -329
    -330  @Override
    -331  protected int getStateId(final 
    SplitTableRegionState state) {
    -332return state.getNumber();
    -333  }
    -334
    -335  @Override
    -336  protected SplitTableRegionState 
    getInitialState() {
    -337return 
    SplitTableRegionState.SPLIT_TABLE_REGION_PREPARE;
    -338  }
    -339
    -340  @Override
    -341  protected void 
    serializeStateData(ProcedureStateSerializer serializer)
    -342  throws IOException {
    -343
    super.serializeStateData(serializer);
    -344
    -345final 
    MasterProcedureProtos.SplitTableRegionStateData.Builder 

    [05/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/cc6597ec/testdevapidocs/org/apache/hadoop/hbase/client/TestConnectionUtils.html
    --
    diff --git 
    a/testdevapidocs/org/apache/hadoop/hbase/client/TestConnectionUtils.html 
    b/testdevapidocs/org/apache/hadoop/hbase/client/TestConnectionUtils.html
    index 234985c..a36b85e 100644
    --- a/testdevapidocs/org/apache/hadoop/hbase/client/TestConnectionUtils.html
    +++ b/testdevapidocs/org/apache/hadoop/hbase/client/TestConnectionUtils.html
    @@ -75,13 +75,13 @@ var activeTableTab = "activeTableTab";
     
     Summary:
     Nested|
    -Field|
    +Field|
     Constr|
     Method
     
     
     Detail:
    -Field|
    +Field|
     Constr|
     Method
     
    @@ -109,7 +109,7 @@ var activeTableTab = "activeTableTab";
     
     
     
    -public class TestConnectionUtils
    +public class TestConnectionUtils
     extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">Object
     
     
    @@ -117,6 +117,25 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     
    +
    +
    +
    +
    +
    +Field Summary
    +
    +Fields
    +
    +Modifier and Type
    +Field and Description
    +
    +
    +static HBaseClassTestRule
    +CLASS_RULE
    +
    +
    +
    +
     
     
     
    @@ -170,6 +189,23 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     
    +
    +
    +
    +
    +
    +Field Detail
    +
    +
    +
    +
    +
    +CLASS_RULE
    +public static finalHBaseClassTestRule CLASS_RULE
    +
    +
    +
    +
     
     
     
    @@ -182,7 +218,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     TestConnectionUtils
    -publicTestConnectionUtils()
    +publicTestConnectionUtils()
     
     
     
    @@ -199,7 +235,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     testRetryTimeJitter
    -publicvoidtestRetryTimeJitter()
    +publicvoidtestRetryTimeJitter()
     
     
     
    @@ -208,7 +244,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     testGetPauseTime
    -publicvoidtestGetPauseTime()
    +publicvoidtestGetPauseTime()
     
     
     
    @@ -265,13 +301,13 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     Summary:
     Nested|
    -Field|
    +Field|
     Constr|
     Method
     
     
     Detail:
    -Field|
    +Field|
     Constr|
     Method
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/cc6597ec/testdevapidocs/org/apache/hadoop/hbase/client/TestDelayingRunner.html
    --
    diff --git 
    a/testdevapidocs/org/apache/hadoop/hbase/client/TestDelayingRunner.html 
    b/testdevapidocs/org/apache/hadoop/hbase/client/TestDelayingRunner.html
    index d133c31..f0d2d8e 100644
    --- a/testdevapidocs/org/apache/hadoop/hbase/client/TestDelayingRunner.html
    +++ b/testdevapidocs/org/apache/hadoop/hbase/client/TestDelayingRunner.html
    @@ -109,7 +109,7 @@ var activeTableTab = "activeTableTab";
     
     
     
    -public class TestDelayingRunner
    +public class TestDelayingRunner
     extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">Object
     
     
    @@ -130,18 +130,22 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     Field and Description
     
     
    +static HBaseClassTestRule
    +CLASS_RULE
    +
    +
     private static byte[]
     DUMMY_BYTES_1
     
    -
    +
     private static byte[]
     DUMMY_BYTES_2
     
    -
    +
     private static 
    org.apache.hadoop.hbase.TableName
     DUMMY_TABLE
     
    -
    +
     private static 
    org.apache.hadoop.hbase.HRegionInfo
     hri1
     
    @@ -203,13 +207,22 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     Field Detail
    +
    +
    +
    +
    +
    +CLASS_RULE
    +public static finalHBaseClassTestRule CLASS_RULE
    +
    +
     
     
     
     
     
     DUMMY_TABLE
    -private static finalorg.apache.hadoop.hbase.TableName DUMMY_TABLE
    +private static finalorg.apache.hadoop.hbase.TableName DUMMY_TABLE
     
     
     
    @@ -218,7 +231,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     DUMMY_BYTES_1
    -private static finalbyte[] DUMMY_BYTES_1
    +private static finalbyte[] DUMMY_BYTES_1
     
     
     
    @@ -227,7 +240,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     DUMMY_BYTES_2
    -private static finalbyte[] DUMMY_BYTES_2
    +private static finalbyte[] DUMMY_BYTES_2
     
     
     
    @@ -236,7 +249,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     hri1
    -private staticorg.apache.hadoop.hbase.HRegionInfo hri1
    +private staticorg.apache.hadoop.hbase.HRegionInfo hri1
     
     
     
    @@ -253,7 +266,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     TestDelayingRunner
    -publicTestDelayingRunner()
    +publicTestDelayingRunner()
     
     
     
    @@ -270,7 +283,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     testDelayingRunner
    -publicvoidtestDelayingRunner()
    +publicvoidtestDelayingRunner()
     throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
     title="class or interface in java.lang">Exception
     
     Throws:
    
    

    [05/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aa7ffc92/devapidocs/src-html/org/apache/hadoop/hbase/backup/regionserver/LogRollRegionServerProcedureManager.BackupSubprocedureBuilder.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/backup/regionserver/LogRollRegionServerProcedureManager.BackupSubprocedureBuilder.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/backup/regionserver/LogRollRegionServerProcedureManager.BackupSubprocedureBuilder.html
    index 2d7a3e0..2c2ce10 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/backup/regionserver/LogRollRegionServerProcedureManager.BackupSubprocedureBuilder.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/backup/regionserver/LogRollRegionServerProcedureManager.BackupSubprocedureBuilder.html
    @@ -35,14 +35,14 @@
     027import 
    org.apache.hadoop.hbase.backup.impl.BackupManager;
     028import 
    org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager;
     029import 
    org.apache.hadoop.hbase.coordination.ZkCoordinatedStateManager;
    -030import 
    org.apache.yetus.audience.InterfaceAudience;
    -031import 
    org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
    -032import 
    org.apache.hadoop.hbase.procedure.ProcedureMember;
    -033import 
    org.apache.hadoop.hbase.procedure.ProcedureMemberRpcs;
    -034import 
    org.apache.hadoop.hbase.procedure.RegionServerProcedureManager;
    -035import 
    org.apache.hadoop.hbase.procedure.Subprocedure;
    -036import 
    org.apache.hadoop.hbase.procedure.SubprocedureFactory;
    -037import 
    org.apache.hadoop.hbase.regionserver.RegionServerServices;
    +030import 
    org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
    +031import 
    org.apache.hadoop.hbase.procedure.ProcedureMember;
    +032import 
    org.apache.hadoop.hbase.procedure.ProcedureMemberRpcs;
    +033import 
    org.apache.hadoop.hbase.procedure.RegionServerProcedureManager;
    +034import 
    org.apache.hadoop.hbase.procedure.Subprocedure;
    +035import 
    org.apache.hadoop.hbase.procedure.SubprocedureFactory;
    +036import 
    org.apache.hadoop.hbase.regionserver.RegionServerServices;
    +037import 
    org.apache.yetus.audience.InterfaceAudience;
     038import 
    org.apache.zookeeper.KeeperException;
     039import org.slf4j.Logger;
     040import org.slf4j.LoggerFactory;
    @@ -60,134 +60,129 @@
     052 */
     053@InterfaceAudience.Private
     054public class 
    LogRollRegionServerProcedureManager extends RegionServerProcedureManager {
    -055
    -056  private static final Logger LOG =
    -057  
    LoggerFactory.getLogger(LogRollRegionServerProcedureManager.class);
    -058
    -059  /** Conf key for number of request 
    threads to start backup on region servers */
    -060  public static final String 
    BACKUP_REQUEST_THREADS_KEY = "hbase.backup.region.pool.threads";
    -061  /** # of threads for backup work on the 
    rs. */
    -062  public static final int 
    BACKUP_REQUEST_THREADS_DEFAULT = 10;
    -063
    -064  public static final String 
    BACKUP_TIMEOUT_MILLIS_KEY = "hbase.backup.timeout";
    -065  public static final long 
    BACKUP_TIMEOUT_MILLIS_DEFAULT = 6;
    -066
    -067  /** Conf key for millis between checks 
    to see if backup work completed or if there are errors */
    -068  public static final String 
    BACKUP_REQUEST_WAKE_MILLIS_KEY = "hbase.backup.region.wakefrequency";
    -069  /** Default amount of time to check for 
    errors while regions finish backup work */
    -070  private static final long 
    BACKUP_REQUEST_WAKE_MILLIS_DEFAULT = 500;
    -071
    -072  private RegionServerServices rss;
    -073  private ProcedureMemberRpcs 
    memberRpcs;
    -074  private ProcedureMember member;
    -075  private boolean started = false;
    -076
    -077  /**
    -078   * Create a default backup procedure 
    manager
    -079   */
    -080  public 
    LogRollRegionServerProcedureManager() {
    -081  }
    -082
    -083  /**
    -084   * Start accepting backup procedure 
    requests.
    -085   */
    -086  @Override
    -087  public void start() {
    -088if 
    (!BackupManager.isBackupEnabled(rss.getConfiguration())) {
    -089  LOG.warn("Backup is not enabled. 
    Check your " + BackupRestoreConstants.BACKUP_ENABLE_KEY
    -090  + " setting");
    -091  return;
    -092}
    -093
    this.memberRpcs.start(rss.getServerName().toString(), member);
    -094started = true;
    -095LOG.info("Started region server 
    backup manager.");
    -096  }
    -097
    -098  /**
    -099   * Close ttthis/tt and 
    all running backup procedure tasks
    -100   * @param force forcefully stop all 
    running tasks
    -101   * @throws IOException exception
    -102   */
    -103  @Override
    -104  public void stop(boolean force) throws 
    IOException {
    -105if (!started) {
    -106  return;
    -107}
    -108String mode = force ? "abruptly" : 
    "gracefully";
    -109LOG.info("Stopping 
    RegionServerBackupManager " + mode + ".");
    -110
    -111try {
    -112  this.member.close();
    -113} finally {
    -114  this.memberRpcs.close();
    -115}
    -116  }
    -117
    -118  /**
    -119   * If in a running state, creates the 
    specified subprocedure for handling a backup procedure.
    -120   * @return Subprocedure to submit to 
    the ProcedureMemeber.
    -121   */
    

    [05/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/96e5e102/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.HeartbeatHRegion.html
    --
    diff --git 
    a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.HeartbeatHRegion.html
     
    b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.HeartbeatHRegion.html
    index 281c243..1a84ee1 100644
    --- 
    a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.HeartbeatHRegion.html
    +++ 
    b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestScannerHeartbeatMessages.HeartbeatHRegion.html
    @@ -152,433 +152,461 @@
     144
     145  /**
     146   * Make puts to put the input value 
    into each combination of row, family, and qualifier
    -147   * @param rows
    -148   * @param families
    -149   * @param qualifiers
    -150   * @param value
    -151   * @return
    -152   * @throws IOException
    -153   */
    -154  static ArrayListPut 
    createPuts(byte[][] rows, byte[][] families, byte[][] qualifiers,
    -155  byte[] value) throws IOException 
    {
    -156Put put;
    -157ArrayListPut puts = new 
    ArrayList();
    -158
    -159for (int row = 0; row  
    rows.length; row++) {
    -160  put = new Put(rows[row]);
    -161  for (int fam = 0; fam  
    families.length; fam++) {
    -162for (int qual = 0; qual  
    qualifiers.length; qual++) {
    -163  KeyValue kv = new 
    KeyValue(rows[row], families[fam], qualifiers[qual], qual, value);
    -164  put.add(kv);
    -165}
    -166  }
    -167  puts.add(put);
    -168}
    -169
    -170return puts;
    -171  }
    -172
    -173  @AfterClass
    -174  public static void tearDownAfterClass() 
    throws Exception {
    -175TEST_UTIL.shutdownMiniCluster();
    -176  }
    -177
    -178  @Before
    -179  public void setupBeforeTest() throws 
    Exception {
    -180disableSleeping();
    -181  }
    -182
    -183  @After
    -184  public void teardownAfterTest() throws 
    Exception {
    -185disableSleeping();
    -186  }
    -187
    -188  /**
    -189   * Run the test callable when 
    heartbeats are enabled/disabled. We expect all tests to only pass
    -190   * when heartbeat messages are enabled 
    (otherwise the test is pointless). When heartbeats are
    -191   * disabled, the test should throw an 
    exception.
    -192   * @param testCallable
    -193   * @throws InterruptedException
    -194   */
    -195  private void 
    testImportanceOfHeartbeats(CallableVoid testCallable) throws 
    InterruptedException {
    -196
    HeartbeatRPCServices.heartbeatsEnabled = true;
    -197
    +147   */
    +148  static ArrayListPut 
    createPuts(byte[][] rows, byte[][] families, byte[][] qualifiers,
    +149  byte[] value) throws IOException 
    {
    +150Put put;
    +151ArrayListPut puts = new 
    ArrayList();
    +152
    +153for (int row = 0; row  
    rows.length; row++) {
    +154  put = new Put(rows[row]);
    +155  for (int fam = 0; fam  
    families.length; fam++) {
    +156for (int qual = 0; qual  
    qualifiers.length; qual++) {
    +157  KeyValue kv = new 
    KeyValue(rows[row], families[fam], qualifiers[qual], qual, value);
    +158  put.add(kv);
    +159}
    +160  }
    +161  puts.add(put);
    +162}
    +163
    +164return puts;
    +165  }
    +166
    +167  @AfterClass
    +168  public static void tearDownAfterClass() 
    throws Exception {
    +169TEST_UTIL.shutdownMiniCluster();
    +170  }
    +171
    +172  @Before
    +173  public void setupBeforeTest() throws 
    Exception {
    +174disableSleeping();
    +175  }
    +176
    +177  @After
    +178  public void teardownAfterTest() throws 
    Exception {
    +179disableSleeping();
    +180  }
    +181
    +182  /**
    +183   * Run the test callable when 
    heartbeats are enabled/disabled. We expect all tests to only pass
    +184   * when heartbeat messages are enabled 
    (otherwise the test is pointless). When heartbeats are
    +185   * disabled, the test should throw an 
    exception.
    +186   */
    +187  private void 
    testImportanceOfHeartbeats(CallableVoid testCallable) throws 
    InterruptedException {
    +188
    HeartbeatRPCServices.heartbeatsEnabled = true;
    +189
    +190try {
    +191  testCallable.call();
    +192} catch (Exception e) {
    +193  fail("Heartbeat messages are 
    enabled, exceptions should NOT be thrown. Exception trace:"
    +194  + 
    ExceptionUtils.getStackTrace(e));
    +195}
    +196
    +197
    HeartbeatRPCServices.heartbeatsEnabled = false;
     198try {
     199  testCallable.call();
     200} catch (Exception e) {
    -201  fail("Heartbeat messages are 
    enabled, exceptions should NOT be thrown. Exception trace:"
    -202  + 
    ExceptionUtils.getStackTrace(e));
    -203}
    -204
    -205
    HeartbeatRPCServices.heartbeatsEnabled = false;
    -206try {
    -207  testCallable.call();
    -208} catch (Exception e) {
    -209  return;
    -210} finally {
    -211  
    HeartbeatRPCServices.heartbeatsEnabled = true;
    -212}
    -213fail("Heartbeats messages are 
    disabled, an exception should be thrown. If an exception "
    -214+ " is not thrown, the test case 
    is not testing the importance of 

    [05/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServerCommandLine.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServerCommandLine.html 
    b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServerCommandLine.html
    index 4a166a4..7a49825 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServerCommandLine.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServerCommandLine.html
    @@ -309,7 +309,7 @@ extends 
     
     getUsage
    -protectedhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringgetUsage()
    +protectedhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringgetUsage()
     Description copied from 
    class:ServerCommandLine
     Implementing subclasses should return a usage string to 
    print out.
     
    @@ -324,7 +324,7 @@ extends 
     
     start
    -privateintstart()
    +privateintstart()
    throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
     title="class or interface in java.lang">Exception
     
     Throws:
    @@ -338,7 +338,7 @@ extends 
     
     run
    -publicintrun(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String[]args)
    +publicintrun(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String[]args)
     throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
     title="class or interface in java.lang">Exception
     
     Throws:
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html 
    b/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html
    index db4f709..44dae4e 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html
    @@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
     
     
     
    -private final class HStore.StoreFlusherImpl
    +private final class HStore.StoreFlusherImpl
     extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">Object
     implements StoreFlushContext
     
    @@ -279,7 +279,7 @@ implements 
     
     tracker
    -private finalFlushLifeCycleTracker tracker
    +private finalFlushLifeCycleTracker tracker
     
     
     
    @@ -288,7 +288,7 @@ implements 
     
     cacheFlushSeqNum
    -private finallong cacheFlushSeqNum
    +private finallong cacheFlushSeqNum
     
     
     
    @@ -297,7 +297,7 @@ implements 
     
     snapshot
    -privateMemStoreSnapshot snapshot
    +privateMemStoreSnapshot snapshot
     
     
     
    @@ -306,7 +306,7 @@ implements 
     
     tempFiles
    -privatehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in 
    java.util">Listorg.apache.hadoop.fs.Path tempFiles
    +privatehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in 
    java.util">Listorg.apache.hadoop.fs.Path tempFiles
     
     
     
    @@ -315,7 +315,7 @@ implements 
     
     committedFiles
    -privatehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in 
    java.util">Listorg.apache.hadoop.fs.Path committedFiles
    +privatehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in 
    java.util">Listorg.apache.hadoop.fs.Path committedFiles
     
     
     
    @@ -324,7 +324,7 @@ implements 
     
     cacheFlushCount
    -privatelong cacheFlushCount
    +privatelong cacheFlushCount
     
     
     
    @@ -333,7 +333,7 @@ implements 
     
     cacheFlushSize
    -privatelong cacheFlushSize
    +privatelong cacheFlushSize
     
     
     
    @@ -342,7 +342,7 @@ implements 
     
     outputFileSize
    -privatelong outputFileSize
    +privatelong outputFileSize
     
     
     
    @@ -359,7 +359,7 @@ implements 
     
     StoreFlusherImpl
    -privateStoreFlusherImpl(longcacheFlushSeqNum,
    +privateStoreFlusherImpl(longcacheFlushSeqNum,
      FlushLifeCycleTrackertracker)
     
     
    @@ -377,7 +377,7 @@ implements 
     
     prepare
    -publicMemStoreSizeprepare()
    +publicMemStoreSizeprepare()
     This is not thread safe. The caller should have a lock on 
    the region or the store.
      If necessary, the lock can be added with the patch provided in 
    HBASE-10087
     
    @@ -394,7 +394,7 @@ implements 
     
     flushCache
    -publicvoidflushCache(MonitoredTaskstatus)
    +publicvoidflushCache(MonitoredTaskstatus)
     throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException
     Description copied from 
    

    [05/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/14db89d7/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.MockRSProcedureDispatcher.html
    --
    diff --git 
    a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.MockRSProcedureDispatcher.html
     
    b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.MockRSProcedureDispatcher.html
    index f1db5ca..d8515d7 100644
    --- 
    a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.MockRSProcedureDispatcher.html
    +++ 
    b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.MockRSProcedureDispatcher.html
    @@ -32,813 +32,820 @@
     024import static org.junit.Assert.fail;
     025
     026import java.io.IOException;
    -027import java.net.SocketTimeoutException;
    -028import java.util.NavigableMap;
    -029import java.util.Random;
    -030import java.util.Set;
    -031import java.util.SortedSet;
    -032import 
    java.util.concurrent.ConcurrentSkipListMap;
    -033import 
    java.util.concurrent.ConcurrentSkipListSet;
    -034import 
    java.util.concurrent.ExecutionException;
    -035import java.util.concurrent.Executors;
    -036import java.util.concurrent.Future;
    -037import 
    java.util.concurrent.ScheduledExecutorService;
    -038import java.util.concurrent.TimeUnit;
    -039
    -040import 
    org.apache.hadoop.conf.Configuration;
    -041import 
    org.apache.hadoop.hbase.CategoryBasedTimeout;
    -042import 
    org.apache.hadoop.hbase.DoNotRetryIOException;
    -043import 
    org.apache.hadoop.hbase.HBaseTestingUtility;
    -044import 
    org.apache.hadoop.hbase.NotServingRegionException;
    -045import 
    org.apache.hadoop.hbase.ServerName;
    -046import 
    org.apache.hadoop.hbase.TableName;
    -047import 
    org.apache.hadoop.hbase.client.RegionInfo;
    -048import 
    org.apache.hadoop.hbase.client.RegionInfoBuilder;
    -049import 
    org.apache.hadoop.hbase.client.RetriesExhaustedException;
    -050import 
    org.apache.hadoop.hbase.exceptions.UnexpectedStateException;
    -051import 
    org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
    -052import 
    org.apache.hadoop.hbase.master.MasterServices;
    -053import 
    org.apache.hadoop.hbase.master.RegionState.State;
    -054import 
    org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants;
    -055import 
    org.apache.hadoop.hbase.master.procedure.ProcedureSyncWait;
    -056import 
    org.apache.hadoop.hbase.master.procedure.RSProcedureDispatcher;
    -057import 
    org.apache.hadoop.hbase.procedure2.Procedure;
    -058import 
    org.apache.hadoop.hbase.procedure2.ProcedureMetrics;
    -059import 
    org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
    -060import 
    org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore;
    -061import 
    org.apache.hadoop.hbase.procedure2.util.StringUtils;
    -062import 
    org.apache.hadoop.hbase.regionserver.RegionServerAbortedException;
    -063import 
    org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
    -064import 
    org.apache.hadoop.hbase.testclassification.MasterTests;
    -065import 
    org.apache.hadoop.hbase.testclassification.MediumTests;
    -066import 
    org.apache.hadoop.hbase.util.Bytes;
    -067import 
    org.apache.hadoop.hbase.util.FSUtils;
    -068import 
    org.apache.hadoop.ipc.RemoteException;
    -069import org.junit.After;
    -070import org.junit.Before;
    -071import org.junit.Ignore;
    -072import org.junit.Rule;
    -073import org.junit.Test;
    -074import 
    org.junit.experimental.categories.Category;
    -075import 
    org.junit.rules.ExpectedException;
    -076import org.junit.rules.TestName;
    -077import org.junit.rules.TestRule;
    -078import org.slf4j.Logger;
    -079import org.slf4j.LoggerFactory;
    -080import 
    org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
    -081import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
    -082import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
    -083import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest;
    -084import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse;
    -085import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest;
    -086import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo;
    -087import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse;
    -088import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse.RegionOpeningState;
    -089import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition;
    -090import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
    -091import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest;
    -092
    -093@Category({MasterTests.class, 
    MediumTests.class})
    -094public class TestAssignmentManager {
    -095  private static final Logger LOG = 
    

    [05/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/testdevapidocs/src-html/org/apache/hadoop/hbase/TestKeyValue.MockKeyValue.html
    --
    diff --git 
    a/testdevapidocs/src-html/org/apache/hadoop/hbase/TestKeyValue.MockKeyValue.html
     
    b/testdevapidocs/src-html/org/apache/hadoop/hbase/TestKeyValue.MockKeyValue.html
    index a4b20ad..e46ae3f 100644
    --- 
    a/testdevapidocs/src-html/org/apache/hadoop/hbase/TestKeyValue.MockKeyValue.html
    +++ 
    b/testdevapidocs/src-html/org/apache/hadoop/hbase/TestKeyValue.MockKeyValue.html
    @@ -745,13 +745,8 @@
     737public byte[] getTagsArray() {
     738  return this.kv.getTagsArray();
     739}
    -740
    -741@Override
    -742public Type getType() {
    -743  return 
    PrivateCellUtil.toType(getTypeByte());
    -744}
    -745  }
    -746}
    +740  }
    +741}
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/testdevapidocs/src-html/org/apache/hadoop/hbase/TestKeyValue.html
    --
    diff --git a/testdevapidocs/src-html/org/apache/hadoop/hbase/TestKeyValue.html 
    b/testdevapidocs/src-html/org/apache/hadoop/hbase/TestKeyValue.html
    index a4b20ad..e46ae3f 100644
    --- a/testdevapidocs/src-html/org/apache/hadoop/hbase/TestKeyValue.html
    +++ b/testdevapidocs/src-html/org/apache/hadoop/hbase/TestKeyValue.html
    @@ -745,13 +745,8 @@
     737public byte[] getTagsArray() {
     738  return this.kv.getTagsArray();
     739}
    -740
    -741@Override
    -742public Type getType() {
    -743  return 
    PrivateCellUtil.toType(getTypeByte());
    -744}
    -745  }
    -746}
    +740  }
    +741}
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/testdevapidocs/src-html/org/apache/hadoop/hbase/client/AbstractTestCIOperationTimeout.html
    --
    diff --git 
    a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/AbstractTestCIOperationTimeout.html
     
    b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/AbstractTestCIOperationTimeout.html
    new file mode 100644
    index 000..68404b4
    --- /dev/null
    +++ 
    b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/AbstractTestCIOperationTimeout.html
    @@ -0,0 +1,155 @@
    +http://www.w3.org/TR/html4/loose.dtd;>
    +
    +
    +Source code
    +
    +
    +
    +
    +001/**
    +002 * Licensed to the Apache Software 
    Foundation (ASF) under one
    +003 * or more contributor license 
    agreements.  See the NOTICE file
    +004 * distributed with this work for 
    additional information
    +005 * regarding copyright ownership.  The 
    ASF licenses this file
    +006 * to you under the Apache License, 
    Version 2.0 (the
    +007 * "License"); you may not use this file 
    except in compliance
    +008 * with the License.  You may obtain a 
    copy of the License at
    +009 *
    +010 * 
    http://www.apache.org/licenses/LICENSE-2.0
    +011 *
    +012 * Unless required by applicable law or 
    agreed to in writing, software
    +013 * distributed under the License is 
    distributed on an "AS IS" BASIS,
    +014 * WITHOUT WARRANTIES OR CONDITIONS OF 
    ANY KIND, either express or implied.
    +015 * See the License for the specific 
    language governing permissions and
    +016 * limitations under the License.
    +017 */
    +018package org.apache.hadoop.hbase.client;
    +019
    +020import static org.junit.Assert.fail;
    +021
    +022import java.io.IOException;
    +023import java.net.SocketTimeoutException;
    +024import 
    org.apache.hadoop.hbase.TableName;
    +025import org.junit.Before;
    +026import org.junit.Test;
    +027import org.slf4j.Logger;
    +028import org.slf4j.LoggerFactory;
    +029
    +030/**
    +031 * Based class for testing operation 
    timeout logic for {@link ConnectionImplementation}.
    +032 */
    +033public abstract class 
    AbstractTestCIOperationTimeout extends AbstractTestCITimeout {
    +034
    +035  private static final Logger LOG = 
    LoggerFactory.getLogger(AbstractTestCIOperationTimeout.class);
    +036
    +037  private TableName tableName;
    +038
    +039  @Before
    +040  public void setUp() throws IOException 
    {
    +041tableName = 
    TableName.valueOf(name.getMethodName());
    +042TableDescriptor htd = 
    TableDescriptorBuilder.newBuilder(tableName)
    +043
    .addCoprocessor(SleepAndFailFirstTime.class.getName())
    +044
    .addColumnFamily(ColumnFamilyDescriptorBuilder.of(FAM_NAM)).build();
    +045
    TEST_UTIL.getAdmin().createTable(htd);
    +046  }
    +047
    +048  protected abstract void execute(Table 
    table) throws IOException;
    +049
    +050  /**
    +051   * Test that an operation can fail if 
    we read the global operation timeout, even if the individual
    +052   * timeout is fine. We do that with:
    +053   * ul
    +054   * liclient side: an operation 
    timeout of 30 seconds/li
    +055   * liserver side: we sleep 20 
    second at each attempt. The first work fails, the second one
    +056   * succeeds. But the client won't wait 
    that much, because 20 + 20  30, so the client timed out
    +057   * when the server 
    answers./li
    +058   * /ul
    +059   */
    +060  @Test
    +061  public void testOperationTimeout() 
    throws IOException {
    +062TableBuilder builder =
    +063 

    [05/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/testdevapidocs/org/apache/hadoop/hbase/tool/TestLoadIncrementalHFiles.html
    --
    diff --git 
    a/testdevapidocs/org/apache/hadoop/hbase/tool/TestLoadIncrementalHFiles.html 
    b/testdevapidocs/org/apache/hadoop/hbase/tool/TestLoadIncrementalHFiles.html
    index 4f02de9..124cf3b 100644
    --- a/testdevapidocs/org/apache/hadoop/hbase/tool/TestLoadIncrementalHFiles.html
    +++ b/testdevapidocs/org/apache/hadoop/hbase/tool/TestLoadIncrementalHFiles.html
    @@ -18,7 +18,7 @@
     catch(err) {
     }
     //-->
    -var methods = 
    {"i0":10,"i1":10,"i2":9,"i3":9,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":9,"i11":9,"i12":9,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10};
    +var methods = 
    {"i0":10,"i1":10,"i2":9,"i3":9,"i4":9,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":9,"i12":9,"i13":9,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10};
     var tabs = {65535:["t0","All Methods"],1:["t1","Static 
    Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
     var altColor = "altColor";
     var rowColor = "rowColor";
    @@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
     
     
     
    -public class TestLoadIncrementalHFiles
    +public class TestLoadIncrementalHFiles
     extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">Object
     Test cases for the "load" half of the HFileOutputFormat 
    bulk load functionality. These tests run
      faster than the full MR cluster tests in TestHFileOutputFormat
    @@ -233,26 +233,43 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       intfactor)
     
     
    +static int
    +loadHFiles(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringtestName,
    +  org.apache.hadoop.hbase.client.TableDescriptorhtd,
    +  HBaseTestingUtilityutil,
    +  byte[]fam,
    +  byte[]qual,
    +  booleanpreCreateTable,
    +  byte[][]tableSplitKeys,
    +  byte[][][]hfileRanges,
    +  booleanuseMap,
    +  booleandeleteFile,
    +  booleancopyFiles,
    +  intinitRowCount,
    +  intfactor,
    +  intdepth)
    +
    +
     private void
     runTest(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringtestName,
    org.apache.hadoop.hbase.regionserver.BloomTypebloomType,
    byte[][][]hfileRanges)
     
    -
    +
     private void
     runTest(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringtestName,
    org.apache.hadoop.hbase.regionserver.BloomTypebloomType,
    byte[][][]hfileRanges,
    booleanuseMap)
     
    -
    +
     private void
     runTest(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringtestName,
    org.apache.hadoop.hbase.regionserver.BloomTypebloomType,
    byte[][]tableSplitKeys,
    byte[][][]hfileRanges)
     
    -
    +
     private void
     runTest(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringtestName,
    org.apache.hadoop.hbase.regionserver.BloomTypebloomType,
    @@ -260,58 +277,59 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
    byte[][][]hfileRanges,
    booleanuseMap)
     
    -
    +
     private void
    -runTest(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringtestName,
    +runTest(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringtestName,
    org.apache.hadoop.hbase.client.TableDescriptorhtd,
    -   org.apache.hadoop.hbase.regionserver.BloomTypebloomType,
    booleanpreCreateTable,
    byte[][]tableSplitKeys,
    byte[][][]hfileRanges,
    booleanuseMap,
    -   booleancopyFiles)
    +   booleancopyFiles,
    +   intdepth)
     
    -
    +
     private void
    -runTest(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringtestName,
    +runTest(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringtestName,
    org.apache.hadoop.hbase.TableNametableName,
    org.apache.hadoop.hbase.regionserver.BloomTypebloomType,
    booleanpreCreateTable,
     

    [05/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/49431b18/devapidocs/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.html
     
    b/devapidocs/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.html
    index b90933c..e3182e9 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.html
    @@ -18,7 +18,7 @@
     catch(err) {
     }
     //-->
    -var methods = 
    {"i0":9,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":9,"i12":10,"i13":10,"i14":9,"i15":10,"i16":9,"i17":10,"i18":9,"i19":9,"i20":10,"i21":10,"i22":10,"i23":10,"i24":9,"i25":10,"i26":9,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10};
    +var methods = 
    {"i0":10,"i1":9,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":9,"i13":10,"i14":10,"i15":9,"i16":10,"i17":9,"i18":10,"i19":9,"i20":10,"i21":9,"i22":9,"i23":9,"i24":10,"i25":10,"i26":10,"i27":10,"i28":9,"i29":10,"i30":10,"i31":9,"i32":9,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10};
     var tabs = {65535:["t0","All Methods"],1:["t1","Static 
    Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
     var altColor = "altColor";
     var rowColor = "rowColor";
    @@ -119,7 +119,7 @@ var activeTableTab = "activeTableTab";
     
     
     @InterfaceAudience.Private
    -public class MasterProcedureScheduler
    +public class MasterProcedureScheduler
     extends AbstractProcedureScheduler
     ProcedureScheduler for the Master Procedures.
      This ProcedureScheduler tries to provide to the ProcedureExecutor procedures
    @@ -191,6 +191,14 @@ extends MasterProcedureScheduler.FairQueueT extends http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
     title="class or interface in java.lang">ComparableT
     
     
    +static class
    +MasterProcedureScheduler.PeerQueue
    +
    +
    +private static class
    +MasterProcedureScheduler.PeerQueueKeyComparator
    +
    +
     private static class
     MasterProcedureScheduler.QueueTKey extends http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
     title="class or interface in java.lang">ComparableTKey
     
    @@ -246,30 +254,42 @@ extends LOG
     
     
    +private static MasterProcedureScheduler.PeerQueueKeyComparator
    +PEER_QUEUE_KEY_COMPARATOR
    +
    +
    +private MasterProcedureScheduler.PeerQueue
    +peerMap
    +
    +
    +private MasterProcedureScheduler.FairQueuehttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String
    +peerRunQueue
    +
    +
     private static MasterProcedureScheduler.ServerQueueKeyComparator
     SERVER_QUEUE_KEY_COMPARATOR
     
    -
    +
     private MasterProcedureScheduler.ServerQueue[]
     serverBuckets
     
    -
    +
     private MasterProcedureScheduler.FairQueueServerName
     serverRunQueue
     
    -
    +
     private static MasterProcedureScheduler.TableQueueKeyComparator
     TABLE_QUEUE_KEY_COMPARATOR
     
    -
    +
     private MasterProcedureScheduler.TableQueue
     tableMap
     
    -
    +
     private MasterProcedureScheduler.TablePriorities
     tablePriorities
     
    -
    +
     private MasterProcedureScheduler.FairQueueTableName
     tableRunQueue
     
    @@ -306,163 +326,202 @@ extends Method and Description
     
     
    +private Tvoid
    +addToLockedResources(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListLockedResourcelockedResources,
    +http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
     title="class or interface in java.util">MapT,LockAndQueuelocks,
    +http://docs.oracle.com/javase/8/docs/api/java/util/function/Function.html?is-external=true;
     title="class or interface in java.util.function">FunctionT,http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringkeyTransformer,
    +LockedResourceTyperesourcesType)
    +
    +
     private static T extends http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
     title="class or interface in 
    java.lang">ComparableTvoid
     addToRunQueue(MasterProcedureScheduler.FairQueueTfairq,
      MasterProcedureScheduler.QueueTqueue)
     
    -
    +
     void
     clear()
     Clear current state of scheduler such that it is equivalent 
    to newly created scheduler.
     
     
    -
    +
     private T extends http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
     title="class or interface in java.lang">ComparableT,TNode extends 
    MasterProcedureScheduler.QueueTvoid
     clear(TNodetreeMap,
      MasterProcedureScheduler.FairQueueTfairq,
      AvlUtil.AvlKeyComparatorTNodecomparator)
     
    -
    +
     protected 

    [05/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7c40c62/devapidocs/org/apache/hadoop/hbase/security/visibility/VisibilityController.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/security/visibility/VisibilityController.html
     
    b/devapidocs/org/apache/hadoop/hbase/security/visibility/VisibilityController.html
    index 7715243..398a35e 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/security/visibility/VisibilityController.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/security/visibility/VisibilityController.html
    @@ -551,7 +551,7 @@ implements MasterObserver
    -postAbortProcedure,
     postAddReplicationPeer,
     postAddRSGroup,
     postAssign,
     postBalance, postBalanceRSGroup,
     postBalanceSwitch,
     postClearDeadServers,
     postCloneSnapshot,
     postCompletedCreateTableAction,
     postCompletedDeleteTableAction,
     postCompletedDisableTableAction,
     postCompletedEnableTableAction,
     postCompletedMergeRegionsAction,
     postCompletedModifyTableAction,
     postCompletedSplitRegionAction,
     postCompletedTruncateTableAction, postCreateNamespace,
     postCreateTable,
     postDecommissionRegionServers,
     postDeleteNamespace,
     postDeleteSnapshot,
     postDeleteTable,
     postDisableReplicationPeer,
     postDisableTable,
     postEnableReplicationPeer, postEnableTable,
     postGetClusterStatus,
     postGetLocks,
     postGetNamespaceDescriptor,
     postGetProcedures, postGetReplicationPeerConfig,
     postGetTableDescriptors,
     postGetTableNames,
     postListDecommissionedRegionServers,
     postListNamespaceDescriptors,
     postListReplicationPeers,
     postListSnapshot,
     postLockHeartbeat,
     postMergeRegions,
     postMergeRegionsCommitAction,
     postModifyNamespace,
     postModifyTable,
     postMove, postMoveServers,
     postMoveServersAndTables,
     postMoveTables,
     postRecommissionRegionServer,
     postRegionOffline,
     postRemoveReplicationPeer,
     postRemoveRSGroup,
     postRemoveServers,
     postRequestLock, postRestoreSnapshot,
     postRollBackMergeRegionsAction,
     postRollBackSplitRegionAction,
     pos
     tSetNamespaceQuota, postSetTableQuota,
     postSetUserQuota,
     postSetUserQuota,
     postSetUserQuota, postSnapshot,
     postTableFlush,
     postTruncateTable,
     postUnassign,
     postUpdateReplicationPeerConfig,
     preAbortProcedure,
     preAddReplicationPeer,
     preAddRSGroup,
     preAssign,
     preBalance,
     preBalanceRSGroup,
     preBalanceSwitch,
     preClearDeadServers,
     preCloneSnapshot,
     preCreateNamespace,
     preCreateTable,
     preCreateTableAction,
     preDecommissionRegionServers,
     preDeleteNamespace,
     preDeleteSnapshot,
     preDeleteTable,
     preDeleteTableAction,
     preDisableReplicationPeer,
     preDisableTableAction,
     preEnableReplicationPeer,
     preEnableTable,
     preEnableTableActio
     n, preGetClusterStatus,
     preGetLocks,
     preGetNamespaceDescriptor,
     preGetProcedures,
     preGetReplicationPeerConfig,
     preGetTableDescriptors,
     preGetTableNames,
     preListDecommissionedRegionServers,
     preListNamespaceDescriptors,
     preListReplicationPeers, preListSnapshot,
     preLockHeartbeat,
     preMasterInitialization,
     preMergeRegions,
     preMergeRegionsAction,
     preMergeRegionsCommitAction,
     preModifyNamespace,
     preModifyTableAction,
     preMove,
     preMoveServers,
     preMoveServersAndTables,
     preMoveTables,
     preRecommissionRegionServer, preRegionOffline,
     preRemoveReplicationPeer,
     preRemoveRSGroup,
     preRemoveServers,
     preRequestLock,
     preRestoreSnapshot,
     preSetNamespaceQuota,
     preSetSplitOrMergeEnabled,
     preSetTableQuota,
     preSetUserQuota,
     preSetUserQuota,
     preSetUserQuota,
     preShutdown,
     preSnapshot,
     preSplitRegion,
     preSplitRegionAction,
     p
     reSplitRegionAfterMETAAction, preSplitRegionBeforeMETAAction,
     preStopMaster,
     preTableFlush,
     preTruncateTable,
     preTruncateTableAction, preUnassign,
     preUpdateReplicationPeerConfig
    +postAbortProcedure,
     postAddReplicationPeer,
     postAddRSGroup,
     postAssign,
     postBalance, postBalanceRSGroup,
     postBalanceSwitch,
     postClearDeadServers,
     postCloneSnapshot,
     postCompletedCreateTableAction,
     postCompletedDeleteTableAction,
     postCompletedDisableTableAction,
     postCompletedEnableTableAction,
     postCompletedMergeRegionsAction,
     postCompletedModifyTableAction,
     postCompletedSplitRegionAction,
     postCompletedTruncateTableAction, postCreateNamespace,
     postCreateTable,
     postDecommissionRegionServers,
     postDeleteNamespace,
     postDeleteSnapshot,
     postDeleteTable,
     postDisableReplicationPeer,
     postDisableTable,
     postEnableReplicationPeer, postEnableTable,
     postGetClusterMetrics,
     postGetLocks,
     postGetNamespaceDescriptor,
     postGetProcedures, postGetReplicationPeerConfig,
     postGetTableDescriptors,
     postGetTableNames,
     postListDecommissionedRegionServers,
     postListNamespaceDescriptors,
     postListReplicationPeers,
     postListSnapshot,
     postLockHeartbeat,
     postMergeRegions,
     postMergeRegionsCommitAction,
     postModifyNamespace,
     postModifyTable,
     postMove, postMoveServers,
     postMoveServersAndTables,
     postMoveTables,
     postRecommissionRegionServer,
     postRegionOffline,
     postRemoveReplicationPeer,
     postRemoveRSGroup,
     postRemoveServers,
     postRequestLock, postRestoreSnapshot,
     

    [05/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bb398572/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterEnvironment.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterEnvironment.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterEnvironment.html
    index 1318b95..841130a 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterEnvironment.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterCoprocessorHost.MasterEnvironment.html
    @@ -55,1647 +55,1615 @@
     047import 
    org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
     048import 
    org.apache.hadoop.hbase.coprocessor.MasterObserver;
     049import 
    org.apache.hadoop.hbase.coprocessor.MetricsCoprocessor;
    -050import 
    org.apache.hadoop.hbase.coprocessor.ObserverContext;
    -051import 
    org.apache.hadoop.hbase.master.locking.LockProcedure;
    -052import 
    org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
    -053import 
    org.apache.hadoop.hbase.metrics.MetricRegistry;
    -054import 
    org.apache.hadoop.hbase.net.Address;
    -055import 
    org.apache.hadoop.hbase.procedure2.LockType;
    -056import 
    org.apache.hadoop.hbase.procedure2.LockedResource;
    -057import 
    org.apache.hadoop.hbase.procedure2.Procedure;
    -058import 
    org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
    -059import 
    org.apache.hadoop.hbase.quotas.GlobalQuotaSettings;
    -060import 
    org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
    -061import 
    org.apache.hadoop.hbase.security.User;
    -062import 
    org.apache.yetus.audience.InterfaceAudience;
    -063import org.slf4j.Logger;
    -064import org.slf4j.LoggerFactory;
    -065
    -066/**
    -067 * Provides the coprocessor framework and 
    environment for master oriented
    -068 * operations.  {@link HMaster} interacts 
    with the loaded coprocessors
    -069 * through this class.
    -070 */
    -071@InterfaceAudience.Private
    -072public class MasterCoprocessorHost
    -073extends 
    CoprocessorHostMasterCoprocessor, MasterCoprocessorEnvironment {
    -074
    -075  private static final Logger LOG = 
    LoggerFactory.getLogger(MasterCoprocessorHost.class);
    -076
    -077  /**
    -078   * Coprocessor environment extension 
    providing access to master related
    -079   * services.
    -080   */
    -081  private static class MasterEnvironment 
    extends BaseEnvironmentMasterCoprocessor
    -082  implements 
    MasterCoprocessorEnvironment {
    -083private final boolean 
    supportGroupCPs;
    -084private final MetricRegistry 
    metricRegistry;
    -085private final MasterServices 
    services;
    -086
    -087public MasterEnvironment(final 
    MasterCoprocessor impl, final int priority, final int seq,
    -088final Configuration conf, final 
    MasterServices services) {
    -089  super(impl, priority, seq, conf);
    -090  this.services = services;
    -091  supportGroupCPs = 
    !useLegacyMethod(impl.getClass(),
    -092  "preBalanceRSGroup", 
    ObserverContext.class, String.class);
    -093  this.metricRegistry =
    -094  
    MetricsCoprocessor.createRegistryForMasterCoprocessor(impl.getClass().getName());
    -095}
    -096
    -097@Override
    -098public ServerName getServerName() {
    -099  return 
    this.services.getServerName();
    -100}
    -101
    -102@Override
    -103public Connection getConnection() {
    -104  return new 
    SharedConnection(this.services.getConnection());
    -105}
    -106
    -107@Override
    -108public Connection 
    createConnection(Configuration conf) throws IOException {
    -109  return 
    this.services.createConnection(conf);
    -110}
    -111
    -112@Override
    -113public MetricRegistry 
    getMetricRegistryForMaster() {
    -114  return metricRegistry;
    -115}
    -116
    -117@Override
    -118public void shutdown() {
    -119  super.shutdown();
    -120  
    MetricsCoprocessor.removeRegistry(this.metricRegistry);
    -121}
    -122  }
    -123
    -124  /**
    -125   * Special version of MasterEnvironment 
    that exposes MasterServices for Core Coprocessors only.
    -126   * Temporary hack until Core 
    Coprocessors are integrated into Core.
    -127   */
    -128  private static class 
    MasterEnvironmentForCoreCoprocessors extends MasterEnvironment
    -129  implements HasMasterServices {
    -130private final MasterServices 
    masterServices;
    -131
    -132public 
    MasterEnvironmentForCoreCoprocessors(final MasterCoprocessor impl, final int 
    priority,
    -133final int seq, final 
    Configuration conf, final MasterServices services) {
    -134  super(impl, priority, seq, conf, 
    services);
    -135  this.masterServices = services;
    -136}
    -137
    -138/**
    -139 * @return An instance of 
    MasterServices, an object NOT for general user-space Coprocessor
    -140 * consumption.
    -141 */
    -142public MasterServices 
    getMasterServices() {
    -143  return this.masterServices;
    -144}
    -145  }
    -146
    -147  private MasterServices 
    masterServices;
    -148
    -149  public MasterCoprocessorHost(final 
    MasterServices services, 

    [05/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/Import.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/Import.html 
    b/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/Import.html
    index d626531..9a46853 100644
    --- a/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/Import.html
    +++ b/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/Import.html
    @@ -120,6 +120,6 @@
     
     
     
    -Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
    reserved.
    +Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
    reserved.
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/ImportTsv.TsvParser.BadTsvLineException.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/ImportTsv.TsvParser.BadTsvLineException.html
     
    b/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/ImportTsv.TsvParser.BadTsvLineException.html
    index 1702c97..bd804f4 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/ImportTsv.TsvParser.BadTsvLineException.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/ImportTsv.TsvParser.BadTsvLineException.html
    @@ -182,6 +182,6 @@ Input/OutputFormats, a table indexing MapReduce job, and 
    utility methods.
     
     
     
    -Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
    reserved.
    +Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
    reserved.
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/ImportTsv.TsvParser.ParsedLine.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/ImportTsv.TsvParser.ParsedLine.html
     
    b/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/ImportTsv.TsvParser.ParsedLine.html
    index b1d461c..383a97b 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/ImportTsv.TsvParser.ParsedLine.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/ImportTsv.TsvParser.ParsedLine.html
    @@ -180,6 +180,6 @@ Input/OutputFormats, a table indexing MapReduce job, and 
    utility methods.
     
     
     
    -Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
    reserved.
    +Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
    reserved.
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/ImportTsv.TsvParser.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/ImportTsv.TsvParser.html
     
    b/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/ImportTsv.TsvParser.html
    index ebbfc2d..1534ccd 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/ImportTsv.TsvParser.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/ImportTsv.TsvParser.html
    @@ -171,6 +171,6 @@ Input/OutputFormats, a table indexing MapReduce job, and 
    utility methods.
     
     
     
    -Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
    reserved.
    +Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
    reserved.
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/ImportTsv.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/ImportTsv.html 
    b/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/ImportTsv.html
    index fc2be7a..57c8a12 100644
    --- a/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/ImportTsv.html
    +++ b/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/ImportTsv.html
    @@ -120,6 +120,6 @@
     
     
     
    -Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
    reserved.
    +Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
    reserved.
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/IndexBuilder.Map.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/IndexBuilder.Map.html 
    b/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/IndexBuilder.Map.html
    index 7f8040d..202aa2a 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/IndexBuilder.Map.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/IndexBuilder.Map.html
    @@ -120,6 +120,6 @@
    

    [05/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.MyConnectionImpl.TestRegistry.html
    --
    diff --git 
    a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.MyConnectionImpl.TestRegistry.html
     
    b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.MyConnectionImpl.TestRegistry.html
    index bbd91b8..4f76302 100644
    --- 
    a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.MyConnectionImpl.TestRegistry.html
    +++ 
    b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.MyConnectionImpl.TestRegistry.html
    @@ -56,1641 +56,1753 @@
     048import 
    java.util.concurrent.atomic.AtomicBoolean;
     049import 
    java.util.concurrent.atomic.AtomicInteger;
     050import 
    java.util.concurrent.atomic.AtomicLong;
    -051
    -052import 
    org.apache.hadoop.conf.Configuration;
    -053import 
    org.apache.hadoop.hbase.CallQueueTooBigException;
    -054import 
    org.apache.hadoop.hbase.CategoryBasedTimeout;
    -055import org.apache.hadoop.hbase.Cell;
    -056import 
    org.apache.hadoop.hbase.HConstants;
    -057import 
    org.apache.hadoop.hbase.HRegionInfo;
    -058import 
    org.apache.hadoop.hbase.HRegionLocation;
    -059import 
    org.apache.hadoop.hbase.RegionLocations;
    -060import 
    org.apache.hadoop.hbase.ServerName;
    -061import 
    org.apache.hadoop.hbase.TableName;
    -062import 
    org.apache.hadoop.hbase.client.AsyncProcessTask.ListRowAccess;
    -063import 
    org.apache.hadoop.hbase.client.AsyncProcessTask.SubmittedRows;
    -064import 
    org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicy;
    -065import 
    org.apache.hadoop.hbase.client.backoff.ServerStatistics;
    -066import 
    org.apache.hadoop.hbase.client.coprocessor.Batch;
    -067import 
    org.apache.hadoop.hbase.ipc.RpcControllerFactory;
    -068import 
    org.apache.hadoop.hbase.testclassification.ClientTests;
    -069import 
    org.apache.hadoop.hbase.testclassification.MediumTests;
    -070import 
    org.apache.hadoop.hbase.util.Bytes;
    -071import 
    org.apache.hadoop.hbase.util.Threads;
    -072import org.junit.Assert;
    -073import org.junit.BeforeClass;
    -074import org.junit.Ignore;
    -075import org.junit.Rule;
    -076import org.junit.Test;
    -077import 
    org.junit.experimental.categories.Category;
    -078import org.junit.rules.TestRule;
    -079import org.mockito.Mockito;
    -080import org.slf4j.Logger;
    -081import org.slf4j.LoggerFactory;
    -082
    -083@Category({ClientTests.class, 
    MediumTests.class})
    -084public class TestAsyncProcess {
    -085  @Rule public final TestRule timeout = 
    CategoryBasedTimeout.builder().withTimeout(this.getClass()).
    -086  
    withLookingForStuckThread(true).build();
    -087  private static final Logger LOG = 
    LoggerFactory.getLogger(TestAsyncProcess.class);
    -088  private static final TableName 
    DUMMY_TABLE =
    -089  TableName.valueOf("DUMMY_TABLE");
    -090  private static final byte[] 
    DUMMY_BYTES_1 = Bytes.toBytes("DUMMY_BYTES_1");
    -091  private static final byte[] 
    DUMMY_BYTES_2 = Bytes.toBytes("DUMMY_BYTES_2");
    -092  private static final byte[] 
    DUMMY_BYTES_3 = Bytes.toBytes("DUMMY_BYTES_3");
    -093  private static final byte[] FAILS = 
    Bytes.toBytes("FAILS");
    -094  private static final Configuration CONF 
    = new Configuration();
    -095  private static final 
    ConnectionConfiguration CONNECTION_CONFIG =
    -096  new 
    ConnectionConfiguration(CONF);
    -097  private static final ServerName sn = 
    ServerName.valueOf("s1,1,1");
    -098  private static final ServerName sn2 = 
    ServerName.valueOf("s2,2,2");
    -099  private static final ServerName sn3 = 
    ServerName.valueOf("s3,3,3");
    -100  private static final HRegionInfo hri1 
    =
    -101  new HRegionInfo(DUMMY_TABLE, 
    DUMMY_BYTES_1, DUMMY_BYTES_2, false, 1);
    -102  private static final HRegionInfo hri2 
    =
    -103  new HRegionInfo(DUMMY_TABLE, 
    DUMMY_BYTES_2, HConstants.EMPTY_END_ROW, false, 2);
    -104  private static final HRegionInfo hri3 
    =
    -105  new HRegionInfo(DUMMY_TABLE, 
    DUMMY_BYTES_3, HConstants.EMPTY_END_ROW, false, 3);
    -106  private static final HRegionLocation 
    loc1 = new HRegionLocation(hri1, sn);
    -107  private static final HRegionLocation 
    loc2 = new HRegionLocation(hri2, sn);
    -108  private static final HRegionLocation 
    loc3 = new HRegionLocation(hri3, sn2);
    -109
    -110  // Replica stuff
    -111  private static final RegionInfo hri1r1 
    = RegionReplicaUtil.getRegionInfoForReplica(hri1, 1);
    -112  private static final RegionInfo hri1r2 
    = RegionReplicaUtil.getRegionInfoForReplica(hri1, 2);
    -113  private static final RegionInfo hri2r1 
    = RegionReplicaUtil.getRegionInfoForReplica(hri2, 1);
    -114  private static final RegionLocations 
    hrls1 = new RegionLocations(new HRegionLocation(hri1, sn),
    -115  new HRegionLocation(hri1r1, sn2), 
    new HRegionLocation(hri1r2, sn3));
    -116  private static final RegionLocations 
    hrls2 = new RegionLocations(new HRegionLocation(hri2, sn2),
    -117  new HRegionLocation(hri2r1, 
    sn3));
    -118  private static final RegionLocations 
    hrls3 =
    -119  new RegionLocations(new 
    

    [05/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/63d6f712/devapidocs/org/apache/hadoop/hbase/regionserver/ImmutableSegment.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/regionserver/ImmutableSegment.html 
    b/devapidocs/org/apache/hadoop/hbase/regionserver/ImmutableSegment.html
    index 36c5b3b..dc87d63 100644
    --- a/devapidocs/org/apache/hadoop/hbase/regionserver/ImmutableSegment.html
    +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/ImmutableSegment.html
    @@ -231,7 +231,7 @@ extends Segment
    -close,
     compare,
     compareRows,
     decScannerCount,
     dump,
     getCellLength,
     getCellsCount,
     getCellSet,
     getComparator,
     getFirstAfter,
     getMemStoreLAB,
     getMinSequenceId,
     getScanner,
     getScanner,
     getScanners,
     getTimeRangeTracker,
     headSet, heapSize,
     heapSizeChange,
     incScannerCount,
     incSize,
     indexEntrySize,
     internalAdd,
     isEmpty,
     isTagsPresent, iterator,
     keySize,
     last,
     maybeCloneWithAllocator,
     setCellSet,
     shouldSeek,
     tailSet,
     updateMetaInfo,
     updateMetaInfo
    +close,
     compare,
     compareRows,
     decScannerCount,
     dump,
     getCellLength,
     getCellsCount,
     getCellSet,
     getComparator,
     getFirstAfter,
     getMemStoreLAB,
     getMinSequenceId,
     getScanner,
     getScanner,
     getScanners,
     getTimeRangeTracker,
     headSet, heapSize,
     heapSizeChange,
     incScannerCount,
     incSize,
     indexEntrySize,
     internalAdd,
     isEmpty,
     isTagsPresent, iterator,
     keySize,
     last,
     maybeCloneWithAllocator,
     setCellSet,
     shouldSeek,
     tailSet,
     updateMetaInfo,
     updateMetaInfo
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/63d6f712/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreLAB.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreLAB.html 
    b/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreLAB.html
    index eb53ee2..b761b3f 100644
    --- a/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreLAB.html
    +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreLAB.html
    @@ -18,7 +18,7 @@
     catch(err) {
     }
     //-->
    -var methods = {"i0":6,"i1":6,"i2":6,"i3":6,"i4":6,"i5":6,"i6":17,"i7":17};
    +var methods = 
    {"i0":6,"i1":6,"i2":6,"i3":6,"i4":6,"i5":6,"i6":6,"i7":17,"i8":17};
     var tabs = {65535:["t0","All Methods"],1:["t1","Static 
    Methods"],2:["t2","Instance Methods"],4:["t3","Abstract 
    Methods"],16:["t5","Default Methods"]};
     var altColor = "altColor";
     var rowColor = "rowColor";
    @@ -225,24 +225,30 @@ public interface 
    +Cell
    +forceCopyOfBigCellInto(Cellcell)
    +Allocates slice in this LAB and copy the passed Cell into 
    this area.
    +
    +
    +
     Chunk
     getNewExternalChunk()
     
    -
    +
     Chunk
     getNewExternalJumboChunk(intsize)
     
    -
    +
     void
     incScannerCount()
     Called when opening a scanner on the data of this 
    MemStoreLAB
     
     
    -
    +
     static boolean
     isEnabled(org.apache.hadoop.conf.Configurationconf)
     
    -
    +
     static MemStoreLAB
     newInstance(org.apache.hadoop.conf.Configurationconf)
     
    @@ -423,13 +429,29 @@ public interface 
    +
    +
    +
    +
    +forceCopyOfBigCellInto
    +CellforceCopyOfBigCellInto(Cellcell)
    +Allocates slice in this LAB and copy the passed Cell into 
    this area. Returns new Cell instance
    + over the copied the data. When this MemStoreLAB can not copy this Cell, it 
    returns null.
    +
    + Since the process of flattening to CellChunkMap assumes all cells are 
    allocated on MSLAB,
    + and since copyCellInto does not copy big cells (for whom size > maxAlloc) 
    into MSLAB,
    + this method is called while the process of flattening to CellChunkMap is 
    running,
    + for forcing the allocation of big cells on this MSLAB.
    +
    +
     
     
     
     
     
     close
    -voidclose()
    +voidclose()
     Close instance since it won't be used any more, try to put 
    the chunks back to pool
     
     
    @@ -439,7 +461,7 @@ public interface 
     
     incScannerCount
    -voidincScannerCount()
    +voidincScannerCount()
     Called when opening a scanner on the data of this 
    MemStoreLAB
     
     
    @@ -449,7 +471,7 @@ public interface 
     
     decScannerCount
    -voiddecScannerCount()
    +voiddecScannerCount()
     Called when closing a scanner on the data of this 
    MemStoreLAB
     
     
    @@ -459,7 +481,7 @@ public interface 
     
     getNewExternalChunk
    -ChunkgetNewExternalChunk()
    +ChunkgetNewExternalChunk()
     
     
     
    @@ -468,7 +490,7 @@ public interface 
     
     getNewExternalJumboChunk
    -ChunkgetNewExternalJumboChunk(intsize)
    +ChunkgetNewExternalJumboChunk(intsize)
     
     
     
    @@ -477,7 +499,7 @@ public interface 
     
     newInstance
    -staticMemStoreLABnewInstance(org.apache.hadoop.conf.Configurationconf)
    +staticMemStoreLABnewInstance(org.apache.hadoop.conf.Configurationconf)
     
     
     
    @@ -486,7 +508,7 @@ public interface 
     
     isEnabled
    -staticbooleanisEnabled(org.apache.hadoop.conf.Configurationconf)
    +staticbooleanisEnabled(org.apache.hadoop.conf.Configurationconf)
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/63d6f712/devapidocs/org/apache/hadoop/hbase/regionserver/MemStoreLABImpl.html
    --
    diff --git 
    

    [05/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d449e87f/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileAccessor.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileAccessor.html
     
    b/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileAccessor.html
    index 8bf6d61..73e6f14 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileAccessor.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileAccessor.html
    @@ -109,7 +109,7 @@ var activeTableTab = "activeTableTab";
     
     
     
    -private static interface FileIOEngine.FileAccessor
    +private static interface FileIOEngine.FileAccessor
     
     
     
    @@ -155,7 +155,7 @@ var activeTableTab = "activeTableTab";
     
     
     access
    -intaccess(http://docs.oracle.com/javase/8/docs/api/java/nio/channels/FileChannel.html?is-external=true;
     title="class or interface in 
    java.nio.channels">FileChannelfileChannel,
    +intaccess(http://docs.oracle.com/javase/8/docs/api/java/nio/channels/FileChannel.html?is-external=true;
     title="class or interface in 
    java.nio.channels">FileChannelfileChannel,
    http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
     title="class or interface in java.nio">ByteBufferbyteBuffer,
    longaccessOffset)
     throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d449e87f/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileReadAccessor.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileReadAccessor.html
     
    b/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileReadAccessor.html
    index 5357055..3475837 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileReadAccessor.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileReadAccessor.html
    @@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
     
     
     
    -private static class FileIOEngine.FileReadAccessor
    +private static class FileIOEngine.FileReadAccessor
     extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">Object
     implements FileIOEngine.FileAccessor
     
    @@ -191,7 +191,7 @@ implements 
     
     FileReadAccessor
    -privateFileReadAccessor()
    +privateFileReadAccessor()
     
     
     
    @@ -208,7 +208,7 @@ implements 
     
     access
    -publicintaccess(http://docs.oracle.com/javase/8/docs/api/java/nio/channels/FileChannel.html?is-external=true;
     title="class or interface in 
    java.nio.channels">FileChannelfileChannel,
    +publicintaccess(http://docs.oracle.com/javase/8/docs/api/java/nio/channels/FileChannel.html?is-external=true;
     title="class or interface in 
    java.nio.channels">FileChannelfileChannel,
       http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
     title="class or interface in java.nio">ByteBufferbyteBuffer,
       longaccessOffset)
    throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d449e87f/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileWriteAccessor.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileWriteAccessor.html
     
    b/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileWriteAccessor.html
    index 9c1b746..2f8ba60 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileWriteAccessor.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileWriteAccessor.html
    @@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
     
     
     
    -private static class FileIOEngine.FileWriteAccessor
    +private static class FileIOEngine.FileWriteAccessor
     extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">Object
     implements FileIOEngine.FileAccessor
     
    @@ -191,7 +191,7 @@ implements 
     
     FileWriteAccessor
    -privateFileWriteAccessor()
    +privateFileWriteAccessor()
     
     
     
    @@ -208,7 +208,7 @@ implements 
     
     access
    -publicintaccess(http://docs.oracle.com/javase/8/docs/api/java/nio/channels/FileChannel.html?is-external=true;
     title="class or interface in 
    java.nio.channels">FileChannelfileChannel,
    +publicintaccess(http://docs.oracle.com/javase/8/docs/api/java/nio/channels/FileChannel.html?is-external=true;
     title="class or interface in 
    java.nio.channels">FileChannelfileChannel,
       

    [05/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d2b28a1a/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowColTSCell.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowColTSCell.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowColTSCell.html
    index 3400507..2baa140 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowColTSCell.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowColTSCell.html
    @@ -28,3034 +28,2926 @@
     020import static 
    org.apache.hadoop.hbase.HConstants.EMPTY_BYTE_ARRAY;
     021import static 
    org.apache.hadoop.hbase.Tag.TAG_LENGTH_SIZE;
     022
    -023import 
    com.google.common.annotations.VisibleForTesting;
    -024
    -025import java.io.DataOutput;
    -026import java.io.DataOutputStream;
    -027import java.io.IOException;
    -028import java.io.OutputStream;
    -029import java.math.BigDecimal;
    -030import java.nio.ByteBuffer;
    -031import java.util.ArrayList;
    -032import java.util.Iterator;
    -033import java.util.List;
    -034import java.util.Optional;
    -035
    -036import 
    org.apache.hadoop.hbase.KeyValue.Type;
    -037import 
    org.apache.hadoop.hbase.filter.ByteArrayComparable;
    -038import 
    org.apache.hadoop.hbase.io.HeapSize;
    -039import 
    org.apache.hadoop.hbase.io.TagCompressionContext;
    -040import 
    org.apache.hadoop.hbase.io.util.Dictionary;
    -041import 
    org.apache.hadoop.hbase.io.util.StreamUtils;
    -042import 
    org.apache.hadoop.hbase.util.ByteBufferUtils;
    -043import 
    org.apache.hadoop.hbase.util.ByteRange;
    -044import 
    org.apache.hadoop.hbase.util.Bytes;
    -045import 
    org.apache.hadoop.hbase.util.ClassSize;
    -046import 
    org.apache.yetus.audience.InterfaceAudience;
    -047
    -048
    -049/**
    -050 * Utility methods helpful slinging 
    {@link Cell} instances. It has more powerful and
    -051 * rich set of APIs than those in {@link 
    CellUtil} for internal usage.
    -052 */
    -053@InterfaceAudience.Private
    -054public final class PrivateCellUtil {
    -055
    -056  /**
    -057   * Private constructor to keep this 
    class from being instantiated.
    -058   */
    -059  private PrivateCellUtil() {
    -060  }
    +023import java.io.DataOutput;
    +024import java.io.DataOutputStream;
    +025import java.io.IOException;
    +026import java.io.OutputStream;
    +027import java.math.BigDecimal;
    +028import java.nio.ByteBuffer;
    +029import java.util.ArrayList;
    +030import java.util.Iterator;
    +031import java.util.List;
    +032import java.util.Optional;
    +033import 
    org.apache.hadoop.hbase.KeyValue.Type;
    +034import 
    org.apache.hadoop.hbase.filter.ByteArrayComparable;
    +035import 
    org.apache.hadoop.hbase.io.HeapSize;
    +036import 
    org.apache.hadoop.hbase.io.TagCompressionContext;
    +037import 
    org.apache.hadoop.hbase.io.util.Dictionary;
    +038import 
    org.apache.hadoop.hbase.io.util.StreamUtils;
    +039import 
    org.apache.hadoop.hbase.util.ByteBufferUtils;
    +040import 
    org.apache.hadoop.hbase.util.ByteRange;
    +041import 
    org.apache.hadoop.hbase.util.Bytes;
    +042import 
    org.apache.hadoop.hbase.util.ClassSize;
    +043import 
    org.apache.yetus.audience.InterfaceAudience;
    +044
    +045import 
    org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
    +046
    +047/**
    +048 * Utility methods helpful slinging 
    {@link Cell} instances. It has more powerful and
    +049 * rich set of APIs than those in {@link 
    CellUtil} for internal usage.
    +050 */
    +051@InterfaceAudience.Private
    +052public final class PrivateCellUtil {
    +053
    +054  /**
    +055   * Private constructor to keep this 
    class from being instantiated.
    +056   */
    +057  private PrivateCellUtil() {
    +058  }
    +059
    +060  /*** ByteRange 
    ***/
     061
    -062  /*** ByteRange 
    ***/
    -063
    -064  public static ByteRange 
    fillRowRange(Cell cell, ByteRange range) {
    -065return range.set(cell.getRowArray(), 
    cell.getRowOffset(), cell.getRowLength());
    -066  }
    -067
    -068  public static ByteRange 
    fillFamilyRange(Cell cell, ByteRange range) {
    -069return 
    range.set(cell.getFamilyArray(), cell.getFamilyOffset(), 
    cell.getFamilyLength());
    -070  }
    -071
    -072  public static ByteRange 
    fillQualifierRange(Cell cell, ByteRange range) {
    -073return 
    range.set(cell.getQualifierArray(), cell.getQualifierOffset(),
    -074  cell.getQualifierLength());
    -075  }
    -076
    -077  public static ByteRange 
    fillValueRange(Cell cell, ByteRange range) {
    -078return 
    range.set(cell.getValueArray(), cell.getValueOffset(), 
    cell.getValueLength());
    -079  }
    -080
    -081  public static ByteRange 
    fillTagRange(Cell cell, ByteRange range) {
    -082return range.set(cell.getTagsArray(), 
    cell.getTagsOffset(), cell.getTagsLength());
    -083  }
    +062  public static ByteRange 
    fillRowRange(Cell cell, ByteRange range) {
    +063return range.set(cell.getRowArray(), 
    cell.getRowOffset(), cell.getRowLength());
    +064  }
    +065
    +066  public static ByteRange 
    fillFamilyRange(Cell cell, ByteRange range) {
    +067return 
    

    [05/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.html 
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.html
    index 07b6abe..f51c693 100644
    --- a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.html
    +++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.html
    @@ -78,2190 +78,2184 @@
     070import 
    org.apache.hadoop.hbase.procedure2.LockType;
     071import 
    org.apache.hadoop.hbase.procedure2.LockedResource;
     072import 
    org.apache.hadoop.hbase.procedure2.Procedure;
    -073import 
    org.apache.hadoop.hbase.procedure2.ProcedureUtil;
    -074import 
    org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.AccessControlService;
    -075import 
    org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsService;
    -076import 
    org.apache.hadoop.hbase.quotas.MasterQuotaManager;
    -077import 
    org.apache.hadoop.hbase.quotas.QuotaObserverChore;
    -078import 
    org.apache.hadoop.hbase.quotas.QuotaUtil;
    -079import 
    org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot;
    -080import 
    org.apache.hadoop.hbase.regionserver.RSRpcServices;
    -081import 
    org.apache.hadoop.hbase.regionserver.RpcSchedulerFactory;
    -082import 
    org.apache.hadoop.hbase.replication.ReplicationException;
    -083import 
    org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
    -084import 
    org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
    -085import 
    org.apache.hadoop.hbase.security.User;
    -086import 
    org.apache.hadoop.hbase.security.access.AccessController;
    -087import 
    org.apache.hadoop.hbase.security.visibility.VisibilityController;
    -088import 
    org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
    -089import 
    org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
    -090import 
    org.apache.hadoop.hbase.util.Bytes;
    -091import 
    org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
    -092import 
    org.apache.hadoop.hbase.util.ForeignExceptionUtil;
    -093import 
    org.apache.hadoop.hbase.util.Pair;
    -094import 
    org.apache.yetus.audience.InterfaceAudience;
    -095import 
    org.apache.zookeeper.KeeperException;
    -096import org.slf4j.Logger;
    -097import org.slf4j.LoggerFactory;
    -098
    -099import 
    org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController;
    -100import 
    org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
    -101import 
    org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
    -102import 
    org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
    -103import 
    org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
    -104import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
    -105import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
    -106import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
    -107import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
    -108import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
    -109import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos;
    -110import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds;
    -111import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
    -112import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameStringPair;
    -113import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
    -114import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
    -115import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockHeartbeatRequest;
    -116import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockHeartbeatResponse;
    -117import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockRequest;
    -118import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockResponse;
    -119import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockService;
    -120import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos;
    -121import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
    -122import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
    -123import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
    -124import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
    -125import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
    -126import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
    -127import 
    

    [05/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7c0589c0/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
    index f7fbfbf..88ebcbc 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
    @@ -34,1583 +34,1583 @@
     026import java.io.IOException;
     027import java.util.ArrayList;
     028import java.util.Arrays;
    -029import java.util.Collection;
    -030import java.util.Collections;
    -031import java.util.EnumSet;
    -032import java.util.HashMap;
    -033import java.util.List;
    -034import java.util.Map;
    -035import java.util.Optional;
    -036import java.util.Set;
    -037import 
    java.util.concurrent.CompletableFuture;
    -038import java.util.concurrent.TimeUnit;
    -039import 
    java.util.concurrent.atomic.AtomicReference;
    -040import java.util.function.BiConsumer;
    -041import java.util.function.Function;
    -042import java.util.regex.Pattern;
    -043import java.util.stream.Collectors;
    -044import java.util.stream.Stream;
    -045import org.apache.commons.io.IOUtils;
    -046import 
    org.apache.hadoop.conf.Configuration;
    -047import 
    org.apache.hadoop.hbase.AsyncMetaTableAccessor;
    -048import 
    org.apache.hadoop.hbase.ClusterMetrics.Option;
    -049import 
    org.apache.hadoop.hbase.ClusterStatus;
    -050import 
    org.apache.hadoop.hbase.HConstants;
    -051import 
    org.apache.hadoop.hbase.HRegionLocation;
    -052import 
    org.apache.hadoop.hbase.MetaTableAccessor;
    -053import 
    org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
    -054import 
    org.apache.hadoop.hbase.NamespaceDescriptor;
    -055import 
    org.apache.hadoop.hbase.RegionLoad;
    -056import 
    org.apache.hadoop.hbase.RegionLocations;
    -057import 
    org.apache.hadoop.hbase.ServerName;
    -058import 
    org.apache.hadoop.hbase.TableExistsException;
    -059import 
    org.apache.hadoop.hbase.TableName;
    -060import 
    org.apache.hadoop.hbase.TableNotDisabledException;
    -061import 
    org.apache.hadoop.hbase.TableNotEnabledException;
    -062import 
    org.apache.hadoop.hbase.TableNotFoundException;
    -063import 
    org.apache.hadoop.hbase.UnknownRegionException;
    -064import 
    org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
    -065import 
    org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
    -066import 
    org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.ServerRequestCallerBuilder;
    -067import 
    org.apache.hadoop.hbase.client.Scan.ReadType;
    -068import 
    org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil;
    -069import 
    org.apache.hadoop.hbase.client.replication.TableCFs;
    -070import 
    org.apache.hadoop.hbase.client.security.SecurityCapability;
    -071import 
    org.apache.hadoop.hbase.exceptions.DeserializationException;
    -072import 
    org.apache.hadoop.hbase.ipc.HBaseRpcController;
    -073import 
    org.apache.hadoop.hbase.quotas.QuotaFilter;
    -074import 
    org.apache.hadoop.hbase.quotas.QuotaSettings;
    -075import 
    org.apache.hadoop.hbase.quotas.QuotaTableUtil;
    -076import 
    org.apache.hadoop.hbase.replication.ReplicationException;
    -077import 
    org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
    -078import 
    org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
    -079import 
    org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
    -080import 
    org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
    -081import 
    org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
    -082import 
    org.apache.hadoop.hbase.util.Bytes;
    -083import 
    org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
    -084import 
    org.apache.hadoop.hbase.util.ForeignExceptionUtil;
    -085import 
    org.apache.yetus.audience.InterfaceAudience;
    -086import org.slf4j.Logger;
    -087import org.slf4j.LoggerFactory;
    -088
    -089import 
    org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
    -090import 
    org.apache.hadoop.hbase.shaded.io.netty.util.HashedWheelTimer;
    -091import 
    org.apache.hadoop.hbase.shaded.io.netty.util.Timeout;
    -092import 
    org.apache.hadoop.hbase.shaded.io.netty.util.TimerTask;
    -093import 
    org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
    -094import 
    org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
    -095import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
    -096import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest;
    -097import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesResponse;
    -098import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
    -099import 
    

    [05/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c4b2cc17/devapidocs/org/apache/hadoop/hbase/MetaTableAccessor.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/MetaTableAccessor.html 
    b/devapidocs/org/apache/hadoop/hbase/MetaTableAccessor.html
    index 350b409..7f7a20f 100644
    --- a/devapidocs/org/apache/hadoop/hbase/MetaTableAccessor.html
    +++ b/devapidocs/org/apache/hadoop/hbase/MetaTableAccessor.html
    @@ -18,7 +18,7 @@
     catch(err) {
     }
     //-->
    -var methods = 
    {"i0":9,"i1":9,"i2":9,"i3":9,"i4":9,"i5":9,"i6":9,"i7":9,"i8":9,"i9":9,"i10":9,"i11":9,"i12":9,"i13":41,"i14":9,"i15":9,"i16":9,"i17":9,"i18":9,"i19":9,"i20":9,"i21":9,"i22":9,"i23":9,"i24":9,"i25":9,"i26":9,"i27":9,"i28":9,"i29":9,"i30":9,"i31":9,"i32":9,"i33":9,"i34":9,"i35":9,"i36":9,"i37":41,"i38":9,"i39":9,"i40":9,"i41":9,"i42":9,"i43":9,"i44":9,"i45":9,"i46":9,"i47":9,"i48":9,"i49":9,"i50":9,"i51":9,"i52":9,"i53":41,"i54":9,"i55":9,"i56":9,"i57":9,"i58":9,"i59":9,"i60":9,"i61":9,"i62":9,"i63":9,"i64":9,"i65":9,"i66":9,"i67":9,"i68":9,"i69":9,"i70":9,"i71":9,"i72":9,"i73":9,"i74":9,"i75":9,"i76":9,"i77":9,"i78":9,"i79":9,"i80":9,"i81":9,"i82":9,"i83":9,"i84":9,"i85":9,"i86":9,"i87":9,"i88":9,"i89":9,"i90":9,"i91":9,"i92":9,"i93":9,"i94":9,"i95":9,"i96":9,"i97":9,"i98":9,"i99":9,"i100":9,"i101":9,"i102":9,"i103":9,"i104":9,"i105":9,"i106":9,"i107":9,"i108":9};
    +var methods = 
    {"i0":9,"i1":9,"i2":9,"i3":9,"i4":9,"i5":9,"i6":9,"i7":9,"i8":9,"i9":9,"i10":41,"i11":9,"i12":9,"i13":9,"i14":9,"i15":9,"i16":9,"i17":9,"i18":9,"i19":9,"i20":9,"i21":9,"i22":9,"i23":9,"i24":9,"i25":9,"i26":9,"i27":9,"i28":9,"i29":9,"i30":9,"i31":9,"i32":9,"i33":9,"i34":9,"i35":9,"i36":41,"i37":9,"i38":9,"i39":9,"i40":9,"i41":9,"i42":9,"i43":9,"i44":9,"i45":9,"i46":9,"i47":9,"i48":9,"i49":9,"i50":9,"i51":9,"i52":41,"i53":9,"i54":9,"i55":9,"i56":9,"i57":9,"i58":9,"i59":9,"i60":9,"i61":9,"i62":9,"i63":9,"i64":9,"i65":9,"i66":9,"i67":9,"i68":9,"i69":9,"i70":9,"i71":9,"i72":9,"i73":9,"i74":9,"i75":9,"i76":9,"i77":9,"i78":9,"i79":9,"i80":9,"i81":9,"i82":9,"i83":9,"i84":9,"i85":9,"i86":9,"i87":9,"i88":9,"i89":9,"i90":9,"i91":9,"i92":9,"i93":9,"i94":9,"i95":9,"i96":9,"i97":9,"i98":9,"i99":9,"i100":9,"i101":9,"i102":9,"i103":9,"i104":9,"i105":9};
     var tabs = {65535:["t0","All Methods"],1:["t1","Static 
    Methods"],8:["t4","Concrete Methods"],32:["t6","Deprecated Methods"]};
     var altColor = "altColor";
     var rowColor = "rowColor";
    @@ -261,15 +261,6 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     Method and Description
     
     
    -static void
    -addDaughter(Connectionconnection,
    -   RegionInforegionInfo,
    -   ServerNamesn,
    -   longopenSeqNum)
    -Adds a daughter region entry to meta.
    -
    -
    -
     static Put
     addDaughtersToPut(Putput,
      RegionInfosplitA,
    @@ -277,12 +268,12 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     Adds split daughters to the Put
     
     
    -
    +
     static Put
     addEmptyLocation(Putp,
     intreplicaId)
     
    -
    +
     static Put
     addLocation(Putp,
    ServerNamesn,
    @@ -290,17 +281,17 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
    longtime,
    intreplicaId)
     
    -
    +
     static Put
     addRegionInfo(Putp,
      RegionInfohri)
     
    -
    +
     (package private) static void
     addRegionStateToPut(Putput,
    RegionState.Statestate)
     
    -
    +
     static void
     addRegionsToMeta(Connectionconnection,
     http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListRegionInforegionInfos,
    @@ -308,7 +299,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     Adds a hbase:meta row for each of the specified new 
    regions.
     
     
    -
    +
     static void
     addRegionsToMeta(Connectionconnection,
     http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListRegionInforegionInfos,
    @@ -317,46 +308,30 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     Adds a hbase:meta row for each of the specified new 
    regions.
     
     
    -
    -(package private) static void
    +
    +static void
     addRegionToMeta(Connectionconnection,
    RegionInforegionInfo)
     Adds a hbase:meta row for the specified new region.
     
     
    -
    -static void
    -addRegionToMeta(Connectionconnection,
    -   RegionInforegionInfo,
    -   RegionInfosplitA,
    -   RegionInfosplitB)
    -Adds a (single) hbase:meta row for the specified new region 
    and its daughters.
    -
    -
    -
    -static void
    -addRegionToMeta(Tablemeta,
    -   RegionInforegionInfo)
    -Adds a hbase:meta row for the specified new region to the 
    given catalog table.
    -
    -
    -
    -static void
    -addRegionToMeta(Tablemeta,
    -   RegionInforegionInfo,
    -   RegionInfosplitA,
    -   RegionInfosplitB)
    -Adds a (single) hbase:meta row for the specified new 

    [05/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/505bbb2e/devapidocs/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.html 
    b/devapidocs/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.html
    index 897cf16..8731466 100644
    --- a/devapidocs/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.html
    +++ b/devapidocs/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.html
    @@ -217,7 +217,7 @@ implements hasAnyReplicaGets
     
     
    -private static 
    org.apache.commons.logging.Log
    +private static org.slf4j.Logger
     LOG
     
     
    @@ -533,7 +533,7 @@ implements 
     
     LOG
    -private static finalorg.apache.commons.logging.Log LOG
    +private static finalorg.slf4j.Logger LOG
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/505bbb2e/devapidocs/org/apache/hadoop/hbase/client/AsyncRpcRetryingCaller.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/client/AsyncRpcRetryingCaller.html 
    b/devapidocs/org/apache/hadoop/hbase/client/AsyncRpcRetryingCaller.html
    index 5e9367f..2e49153 100644
    --- a/devapidocs/org/apache/hadoop/hbase/client/AsyncRpcRetryingCaller.html
    +++ b/devapidocs/org/apache/hadoop/hbase/client/AsyncRpcRetryingCaller.html
    @@ -151,7 +151,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     future
     
     
    -private static 
    org.apache.commons.logging.Log
    +private static org.slf4j.Logger
     LOG
     
     
    @@ -282,7 +282,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     LOG
    -private static finalorg.apache.commons.logging.Log LOG
    +private static finalorg.slf4j.Logger LOG
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/505bbb2e/devapidocs/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.html
     
    b/devapidocs/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.html
    index 09f4f1a..29068cd 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.html
    @@ -110,7 +110,7 @@ var activeTableTab = "activeTableTab";
     
     
     @InterfaceAudience.Private
    -class AsyncScanSingleRegionRpcRetryingCaller
    +class AsyncScanSingleRegionRpcRetryingCaller
     extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">Object
     Retry caller for scanning a region.
      
    @@ -194,7 +194,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     loc
     
     
    -private static 
    org.apache.commons.logging.Log
    +private static org.slf4j.Logger
     LOG
     
     
    @@ -415,7 +415,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     LOG
    -private static finalorg.apache.commons.logging.Log LOG
    +private static finalorg.slf4j.Logger LOG
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/505bbb2e/devapidocs/org/apache/hadoop/hbase/client/AsyncTableResultScanner.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/client/AsyncTableResultScanner.html 
    b/devapidocs/org/apache/hadoop/hbase/client/AsyncTableResultScanner.html
    index fa53027..80b31e3 100644
    --- a/devapidocs/org/apache/hadoop/hbase/client/AsyncTableResultScanner.html
    +++ b/devapidocs/org/apache/hadoop/hbase/client/AsyncTableResultScanner.html
    @@ -166,7 +166,7 @@ implements error
     
     
    -private static 
    org.apache.commons.logging.Log
    +private static org.slf4j.Logger
     LOG
     
     
    @@ -341,7 +341,7 @@ implements 
     
     LOG
    -private static finalorg.apache.commons.logging.Log LOG
    +private static finalorg.slf4j.Logger LOG
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/505bbb2e/devapidocs/org/apache/hadoop/hbase/client/BatchErrors.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/client/BatchErrors.html 
    b/devapidocs/org/apache/hadoop/hbase/client/BatchErrors.html
    index 2ed1bab..947c2bc 100644
    --- a/devapidocs/org/apache/hadoop/hbase/client/BatchErrors.html
    +++ b/devapidocs/org/apache/hadoop/hbase/client/BatchErrors.html
    @@ -138,7 +138,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     addresses
     
     
    -private static 
    org.apache.commons.logging.Log
    +private static org.slf4j.Logger
     LOG
     
     
    @@ -227,7 +227,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     LOG
    -private static finalorg.apache.commons.logging.Log LOG
    +private static finalorg.slf4j.Logger LOG
     
     
     
    
    

    [05/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c4c0cfa5/apidocs/src-html/org/apache/hadoop/hbase/util/ByteBufferUtils.html
    --
    diff --git a/apidocs/src-html/org/apache/hadoop/hbase/util/ByteBufferUtils.html 
    b/apidocs/src-html/org/apache/hadoop/hbase/util/ByteBufferUtils.html
    index c869024..0fdf3e6 100644
    --- a/apidocs/src-html/org/apache/hadoop/hbase/util/ByteBufferUtils.html
    +++ b/apidocs/src-html/org/apache/hadoop/hbase/util/ByteBufferUtils.html
    @@ -24,26 +24,26 @@
     016 */
     017package org.apache.hadoop.hbase.util;
     018
    -019import 
    org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
    -020import java.io.ByteArrayOutputStream;
    -021import java.io.DataInput;
    -022import java.io.DataInputStream;
    -023import java.io.DataOutput;
    -024import java.io.IOException;
    -025import java.io.InputStream;
    -026import java.io.OutputStream;
    -027import java.math.BigDecimal;
    -028import java.math.BigInteger;
    -029import java.nio.ByteBuffer;
    -030import java.util.Arrays;
    -031
    -032import 
    org.apache.yetus.audience.InterfaceAudience;
    -033import 
    org.apache.hadoop.hbase.io.ByteBufferWriter;
    -034import 
    org.apache.hadoop.hbase.io.util.StreamUtils;
    -035import org.apache.hadoop.io.IOUtils;
    -036import 
    org.apache.hadoop.io.WritableUtils;
    +019import java.io.ByteArrayOutputStream;
    +020import java.io.DataInput;
    +021import java.io.DataInputStream;
    +022import java.io.DataOutput;
    +023import java.io.IOException;
    +024import java.io.InputStream;
    +025import java.io.OutputStream;
    +026import java.math.BigDecimal;
    +027import java.math.BigInteger;
    +028import java.nio.ByteBuffer;
    +029import java.util.Arrays;
    +030
    +031import 
    org.apache.hadoop.hbase.io.ByteBufferWriter;
    +032import 
    org.apache.hadoop.hbase.io.util.StreamUtils;
    +033import org.apache.hadoop.io.IOUtils;
    +034import 
    org.apache.hadoop.io.WritableUtils;
    +035import 
    org.apache.yetus.audience.InterfaceAudience;
    +036import sun.nio.ch.DirectBuffer;
     037
    -038import sun.nio.ch.DirectBuffer;
    +038import 
    org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
     039
     040/**
     041 * Utility functions for working with 
    byte buffers, such as reading/writing
    @@ -253,27 +253,27 @@
     245  return 8;
     246}
     247
    -248if (value  (1l  4 * 8)) 
    {
    +248if (value  (1L  (4 * 8))) 
    {
     249  // no more than 4 bytes
    -250  if (value  (1l  2 * 8)) 
    {
    -251if (value  (1l  1 * 
    8)) {
    +250  if (value  (1L  (2 * 
    8))) {
    +251if (value  (1L  (1 * 
    8))) {
     252  return 1;
     253}
     254return 2;
     255  }
    -256  if (value  (1l  3 * 8)) 
    {
    +256  if (value  (1L  (3 * 
    8))) {
     257return 3;
     258  }
     259  return 4;
     260}
     261// more than 4 bytes
    -262if (value  (1l  6 * 8)) 
    {
    -263  if (value  (1l  5 * 8)) 
    {
    +262if (value  (1L  (6 * 8))) 
    {
    +263  if (value  (1L  (5 * 
    8))) {
     264return 5;
     265  }
     266  return 6;
     267}
    -268if (value  (1l  7 * 8)) 
    {
    +268if (value  (1L  (7 * 8))) 
    {
     269  return 7;
     270}
     271return 8;
    @@ -289,13 +289,13 @@
     281  return 4;
     282}
     283
    -284if (value  (1  2 * 8)) 
    {
    -285  if (value  (1  1 * 8)) 
    {
    +284if (value  (1  (2 * 8))) 
    {
    +285  if (value  (1  (1 * 
    8))) {
     286return 1;
     287  }
     288  return 2;
     289}
    -290if (value = (1  3 * 8)) 
    {
    +290if (value = (1  (3 * 8))) 
    {
     291  return 3;
     292}
     293return 4;
    @@ -345,7 +345,7 @@
     337  throws IOException {
     338long tmpLong = 0;
     339for (int i = 0; i  fitInBytes; 
    ++i) {
    -340  tmpLong |= (in.read()  0xffl) 
     (8 * i);
    +340  tmpLong |= (in.read()  0xffL) 
     (8 * i);
     341}
     342return tmpLong;
     343  }
    @@ -358,7 +358,7 @@
     350  public static long readLong(ByteBuffer 
    in, final int fitInBytes) {
     351long tmpLength = 0;
     352for (int i = 0; i  fitInBytes; 
    ++i) {
    -353  tmpLength |= (in.get()  0xffl) 
     (8l * i);
    +353  tmpLength |= (in.get()  0xffL) 
     (8L * i);
     354}
     355return tmpLength;
     356  }
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c4c0cfa5/apidocs/src-html/org/apache/hadoop/hbase/util/Bytes.ByteArrayComparator.html
    --
    diff --git 
    a/apidocs/src-html/org/apache/hadoop/hbase/util/Bytes.ByteArrayComparator.html 
    b/apidocs/src-html/org/apache/hadoop/hbase/util/Bytes.ByteArrayComparator.html
    index 6ab259f..51d92c2 100644
    --- 
    a/apidocs/src-html/org/apache/hadoop/hbase/util/Bytes.ByteArrayComparator.html
    +++ 
    b/apidocs/src-html/org/apache/hadoop/hbase/util/Bytes.ByteArrayComparator.html
    @@ -44,240 +44,240 @@
     036import java.util.Iterator;
     037import java.util.List;
     038
    -039import org.apache.commons.logging.Log;
    -040import 
    org.apache.commons.logging.LogFactory;
    -041import org.apache.hadoop.hbase.Cell;
    -042import 
    org.apache.hadoop.hbase.CellComparator;
    -043import 
    org.apache.hadoop.hbase.KeyValue;
    -044import 
    

    [05/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/071f974b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.html
    --
    diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.html 
    b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.html
    index 5e45072..34535d8 100644
    --- a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.html
    +++ b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.html
    @@ -50,754 +50,751 @@
     042import 
    org.apache.hadoop.hbase.CallQueueTooBigException;
     043import 
    org.apache.hadoop.hbase.CellScanner;
     044import 
    org.apache.hadoop.hbase.DoNotRetryIOException;
    -045import 
    org.apache.hadoop.hbase.HBaseInterfaceAudience;
    -046import 
    org.apache.hadoop.hbase.HConstants;
    -047import org.apache.hadoop.hbase.Server;
    -048import 
    org.apache.hadoop.hbase.conf.ConfigurationObserver;
    -049import 
    org.apache.hadoop.hbase.exceptions.RequestTooBigException;
    -050import 
    org.apache.hadoop.hbase.io.ByteBufferPool;
    -051import 
    org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler;
    -052import 
    org.apache.hadoop.hbase.monitoring.TaskMonitor;
    -053import 
    org.apache.hadoop.hbase.nio.ByteBuff;
    -054import 
    org.apache.hadoop.hbase.nio.MultiByteBuff;
    -055import 
    org.apache.hadoop.hbase.nio.SingleByteBuff;
    -056import 
    org.apache.hadoop.hbase.regionserver.RSRpcServices;
    -057import 
    org.apache.hadoop.hbase.security.SaslUtil;
    -058import 
    org.apache.hadoop.hbase.security.SaslUtil.QualityOfProtection;
    -059import 
    org.apache.hadoop.hbase.security.User;
    -060import 
    org.apache.hadoop.hbase.security.UserProvider;
    -061import 
    org.apache.hadoop.hbase.security.token.AuthenticationTokenSecretManager;
    -062import 
    org.apache.hadoop.hbase.util.Pair;
    -063import 
    org.apache.hadoop.security.UserGroupInformation;
    -064import 
    org.apache.hadoop.security.authorize.AuthorizationException;
    -065import 
    org.apache.hadoop.security.authorize.PolicyProvider;
    -066import 
    org.apache.hadoop.security.authorize.ServiceAuthorizationManager;
    -067import 
    org.apache.hadoop.security.token.SecretManager;
    -068import 
    org.apache.hadoop.security.token.TokenIdentifier;
    -069import 
    org.apache.yetus.audience.InterfaceAudience;
    -070import 
    org.apache.yetus.audience.InterfaceStability;
    -071
    -072import 
    org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
    -073import 
    org.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingService;
    -074import 
    org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.MethodDescriptor;
    -075import 
    org.apache.hadoop.hbase.shaded.com.google.protobuf.Message;
    -076import 
    org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
    -077import 
    org.apache.hadoop.hbase.shaded.com.google.protobuf.TextFormat;
    -078import 
    org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
    -079import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
    -080import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ConnectionHeader;
    -081
    -082/**
    -083 * An RPC server that hosts protobuf 
    described Services.
    -084 *
    -085 */
    -086@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC,
     HBaseInterfaceAudience.PHOENIX})
    -087@InterfaceStability.Evolving
    -088public abstract class RpcServer 
    implements RpcServerInterface,
    -089ConfigurationObserver {
    -090  // LOG is being used in CallRunner and 
    the log level is being changed in tests
    -091  public static final Log LOG = 
    LogFactory.getLog(RpcServer.class);
    -092  protected static final 
    CallQueueTooBigException CALL_QUEUE_TOO_BIG_EXCEPTION
    -093  = new CallQueueTooBigException();
    +045import 
    org.apache.hadoop.hbase.HConstants;
    +046import org.apache.hadoop.hbase.Server;
    +047import 
    org.apache.hadoop.hbase.conf.ConfigurationObserver;
    +048import 
    org.apache.hadoop.hbase.exceptions.RequestTooBigException;
    +049import 
    org.apache.hadoop.hbase.io.ByteBufferPool;
    +050import 
    org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler;
    +051import 
    org.apache.hadoop.hbase.monitoring.TaskMonitor;
    +052import 
    org.apache.hadoop.hbase.nio.ByteBuff;
    +053import 
    org.apache.hadoop.hbase.nio.MultiByteBuff;
    +054import 
    org.apache.hadoop.hbase.nio.SingleByteBuff;
    +055import 
    org.apache.hadoop.hbase.regionserver.RSRpcServices;
    +056import 
    org.apache.hadoop.hbase.security.SaslUtil;
    +057import 
    org.apache.hadoop.hbase.security.SaslUtil.QualityOfProtection;
    +058import 
    org.apache.hadoop.hbase.security.User;
    +059import 
    org.apache.hadoop.hbase.security.UserProvider;
    +060import 
    org.apache.hadoop.hbase.security.token.AuthenticationTokenSecretManager;
    +061import 
    org.apache.hadoop.hbase.util.Pair;
    +062import 
    org.apache.hadoop.security.UserGroupInformation;
    +063import 
    org.apache.hadoop.security.authorize.AuthorizationException;
    +064import 
    org.apache.hadoop.security.authorize.PolicyProvider;
    +065import 
    org.apache.hadoop.security.authorize.ServiceAuthorizationManager;
    +066import 
    org.apache.hadoop.security.token.SecretManager;
    +067import 
    

    [05/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dc4e5c85/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.BucketEntryGroup.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.BucketEntryGroup.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.BucketEntryGroup.html
    index 219283e..2b5d70b 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.BucketEntryGroup.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.BucketEntryGroup.html
    @@ -435,1198 +435,1203 @@
     427
     428if (backingMap.containsKey(cacheKey)) 
    {
     429  Cacheable existingBlock = 
    getBlock(cacheKey, false, false, false);
    -430  if 
    (BlockCacheUtil.compareCacheBlock(cachedItem, existingBlock) != 0) {
    -431throw new 
    RuntimeException("Cached block contents differ, which should not have 
    happened."
    -432+ "cacheKey:" + cacheKey);
    -433  }
    -434   String msg = "Caching an already 
    cached block: " + cacheKey;
    -435   msg += ". This is harmless and can 
    happen in rare cases (see HBASE-8547)";
    -436   LOG.warn(msg);
    -437  return;
    -438}
    -439
    -440/*
    -441 * Stuff the entry into the RAM cache 
    so it can get drained to the persistent store
    -442 */
    -443RAMQueueEntry re =
    -444new RAMQueueEntry(cacheKey, 
    cachedItem, accessCount.incrementAndGet(), inMemory);
    -445if (ramCache.putIfAbsent(cacheKey, 
    re) != null) {
    -446  return;
    -447}
    -448int queueNum = (cacheKey.hashCode() 
     0x7FFF) % writerQueues.size();
    -449BlockingQueueRAMQueueEntry bq 
    = writerQueues.get(queueNum);
    -450boolean successfulAddition = false;
    -451if (wait) {
    -452  try {
    -453successfulAddition = bq.offer(re, 
    DEFAULT_CACHE_WAIT_TIME, TimeUnit.MILLISECONDS);
    -454  } catch (InterruptedException e) 
    {
    -455
    Thread.currentThread().interrupt();
    -456  }
    -457} else {
    -458  successfulAddition = 
    bq.offer(re);
    -459}
    -460if (!successfulAddition) {
    -461  ramCache.remove(cacheKey);
    -462  cacheStats.failInsert();
    -463} else {
    -464  this.blockNumber.increment();
    -465  
    this.heapSize.add(cachedItem.heapSize());
    -466  blocksByHFile.add(cacheKey);
    -467}
    -468  }
    -469
    -470  /**
    -471   * Get the buffer of the block with the 
    specified key.
    -472   * @param key block's cache key
    -473   * @param caching true if the caller 
    caches blocks on cache misses
    -474   * @param repeat Whether this is a 
    repeat lookup for the same block
    -475   * @param updateCacheMetrics Whether we 
    should update cache metrics or not
    -476   * @return buffer of specified cache 
    key, or null if not in cache
    -477   */
    -478  @Override
    -479  public Cacheable getBlock(BlockCacheKey 
    key, boolean caching, boolean repeat,
    -480  boolean updateCacheMetrics) {
    -481if (!cacheEnabled) {
    -482  return null;
    -483}
    -484RAMQueueEntry re = 
    ramCache.get(key);
    -485if (re != null) {
    -486  if (updateCacheMetrics) {
    -487cacheStats.hit(caching, 
    key.isPrimary(), key.getBlockType());
    -488  }
    -489  
    re.access(accessCount.incrementAndGet());
    -490  return re.getData();
    -491}
    -492BucketEntry bucketEntry = 
    backingMap.get(key);
    -493if (bucketEntry != null) {
    -494  long start = System.nanoTime();
    -495  ReentrantReadWriteLock lock = 
    offsetLock.getLock(bucketEntry.offset());
    -496  try {
    -497lock.readLock().lock();
    -498// We can not read here even if 
    backingMap does contain the given key because its offset
    -499// maybe changed. If we lock 
    BlockCacheKey instead of offset, then we can only check
    -500// existence here.
    -501if 
    (bucketEntry.equals(backingMap.get(key))) {
    -502  // TODO : change this area - 
    should be removed after server cells and
    -503  // 12295 are available
    -504  int len = 
    bucketEntry.getLength();
    -505  if (LOG.isTraceEnabled()) {
    -506LOG.trace("Read offset=" + 
    bucketEntry.offset() + ", len=" + len);
    -507  }
    -508  Cacheable cachedBlock = 
    ioEngine.read(bucketEntry.offset(), len,
    -509  
    bucketEntry.deserializerReference(this.deserialiserMap));
    -510  long timeTaken = 
    System.nanoTime() - start;
    -511  if (updateCacheMetrics) {
    -512cacheStats.hit(caching, 
    key.isPrimary(), key.getBlockType());
    -513
    cacheStats.ioHit(timeTaken);
    -514  }
    -515  if (cachedBlock.getMemoryType() 
    == MemoryType.SHARED) {
    -516
    bucketEntry.refCount.incrementAndGet();
    -517  }
    -518  
    bucketEntry.access(accessCount.incrementAndGet());
    -519  if (this.ioErrorStartTime  
    0) {
    -520ioErrorStartTime = -1;
    -521  }
    -522  return cachedBlock;
    -523}
    -524  } catch (IOException ioex) {
    -525

    [05/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4abd958d/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncTable.CheckAndMutateBuilder.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncTable.CheckAndMutateBuilder.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncTable.CheckAndMutateBuilder.html
    index 7cece5c..6361a24 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncTable.CheckAndMutateBuilder.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncTable.CheckAndMutateBuilder.html
    @@ -248,379 +248,383 @@
     240 */
     241CheckAndMutateBuilder 
    ifNotExists();
     242
    -243default CheckAndMutateBuilder 
    ifEquals(byte[] value) {
    -244  return 
    ifMatches(CompareOperator.EQUAL, value);
    -245}
    -246
    -247/**
    -248 * @param compareOp comparison 
    operator to use
    -249 * @param value the expected value
    -250 */
    -251CheckAndMutateBuilder 
    ifMatches(CompareOperator compareOp, byte[] value);
    -252
    -253/**
    -254 * @param put data to put if check 
    succeeds
    -255 * @return {@code true} if the new 
    put was executed, {@code false} otherwise. The return value
    -256 * will be wrapped by a 
    {@link CompletableFuture}.
    -257 */
    -258CompletableFutureBoolean 
    thenPut(Put put);
    -259
    -260/**
    -261 * @param delete data to delete if 
    check succeeds
    -262 * @return {@code true} if the new 
    delete was executed, {@code false} otherwise. The return
    -263 * value will be wrapped by a 
    {@link CompletableFuture}.
    -264 */
    -265CompletableFutureBoolean 
    thenDelete(Delete delete);
    -266
    -267/**
    -268 * @param mutation mutations to 
    perform if check succeeds
    -269 * @return true if the new mutation 
    was executed, false otherwise. The return value will be
    -270 * wrapped by a {@link 
    CompletableFuture}.
    -271 */
    -272CompletableFutureBoolean 
    thenMutate(RowMutations mutation);
    -273  }
    -274
    -275  /**
    -276   * Performs multiple mutations 
    atomically on a single row. Currently {@link Put} and
    -277   * {@link Delete} are supported.
    -278   * @param mutation object that 
    specifies the set of mutations to perform atomically
    -279   * @return A {@link CompletableFuture} 
    that always returns null when complete normally.
    -280   */
    -281  CompletableFutureVoid 
    mutateRow(RowMutations mutation);
    -282
    -283  /**
    -284   * The scan API uses the observer 
    pattern.
    -285   * @param scan A configured {@link 
    Scan} object.
    -286   * @param consumer the consumer used to 
    receive results.
    -287   * @see ScanResultConsumer
    -288   * @see AdvancedScanResultConsumer
    -289   */
    -290  void scan(Scan scan, C consumer);
    -291
    -292  /**
    -293   * Gets a scanner on the current table 
    for the given family.
    -294   * @param family The column family to 
    scan.
    -295   * @return A scanner.
    -296   */
    -297  default ResultScanner getScanner(byte[] 
    family) {
    -298return getScanner(new 
    Scan().addFamily(family));
    -299  }
    -300
    -301  /**
    -302   * Gets a scanner on the current table 
    for the given family and qualifier.
    -303   * @param family The column family to 
    scan.
    -304   * @param qualifier The column 
    qualifier to scan.
    -305   * @return A scanner.
    -306   */
    -307  default ResultScanner getScanner(byte[] 
    family, byte[] qualifier) {
    -308return getScanner(new 
    Scan().addColumn(family, qualifier));
    -309  }
    -310
    -311  /**
    -312   * Returns a scanner on the current 
    table as specified by the {@link Scan} object.
    -313   * @param scan A configured {@link 
    Scan} object.
    -314   * @return A scanner.
    -315   */
    -316  ResultScanner getScanner(Scan scan);
    -317
    -318  /**
    -319   * Return all the results that match 
    the given scan object.
    -320   * p
    -321   * Notice that usually you should use 
    this method with a {@link Scan} object that has limit set.
    -322   * For example, if you want to get the 
    closest row after a given row, you could do this:
    -323   * p
    -324   *
    -325   * pre
    -326   * code
    -327   * table.scanAll(new 
    Scan().withStartRow(row, false).setLimit(1)).thenAccept(results - {
    -328   *   if (results.isEmpty()) {
    -329   *  System.out.println("No row 
    after " + Bytes.toStringBinary(row));
    -330   *   } else {
    -331   * System.out.println("The closest 
    row after " + Bytes.toStringBinary(row) + " is "
    -332   * + 
    Bytes.toStringBinary(results.stream().findFirst().get().getRow()));
    -333   *   }
    -334   * });
    -335   * /code
    -336   * /pre
    -337   * p
    -338   * If your result set is very large, 
    you should use other scan method to get a scanner or use
    -339   * callback to process the results. 
    They will do chunking to prevent OOM. The scanAll method will
    -340   * fetch all the results and store them 
    in a List and then return the list to you.
    +243/**
    +244 * Check for equality.
    +245 * @param value the expected value
    +246 */
    +247default CheckAndMutateBuilder 
    ifEquals(byte[] value) {
    +248  return 
    

    [05/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e23b49ba/devapidocs/org/apache/hadoop/hbase/coprocessor/example/ValueRewritingObserver.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/coprocessor/example/ValueRewritingObserver.html
     
    b/devapidocs/org/apache/hadoop/hbase/coprocessor/example/ValueRewritingObserver.html
    index 075273c..6494589 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/coprocessor/example/ValueRewritingObserver.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/coprocessor/example/ValueRewritingObserver.html
    @@ -256,7 +256,7 @@ implements RegionObserver
    -postAppend,
     postBatchMutate,
     postBatchMutateIndispensably,
     postBulkLoadHFile,
     postCheckAndDelete,
     postCheckAndPut,
     postClose,
     postCloseRegionOperation, postCommitStoreFile,
     postCompact,
     postCompactSelection,
     postDelete,
     postExists,
     postFlush,
     postFlush,
     postGetOp,
     postIncrement,
     postInstantiateDeleteTracker,
     postMemStoreCompaction, postMutationBeforeWAL,
     postOpen,
     postPut,
     postReplayWALs,
     postScannerClose,
     postScannerFilterRow,
     postScannerNext,
     postScannerOpen,
     postStartRegionOperation,
     postStoreFileReaderOpen,
     postWALRestore,
     preAppend,
     preAppendAfterRowLock,
     preBatchMutate,
     preBulkLoadHFile,
     preCheckAndDelete,
     preCheckAndDeleteAfterRowLock,
     preCheckAndPut,
     preCheckAndPutAfterRowLock,
     preClose,
     preCommitStoreFile,
     preCompactScannerOpen,
     preCompactSelection,
     preDelete,
     preExists,
     preFlush,
     preFlush,
     preFlushScannerOpen,
     preGetOp, preIncrement,
     preIncrementAfterRowLock,
     preMemStoreCompaction,
     preMemStoreCompacti
     onCompact, preMemStoreCompactionCompactScannerOpen,
     preOpen,
     prePrepareTimeStampForDeleteVersion,
     prePut,
     preReplayWALs,
     preScannerClose,
     preScannerNext,
     preScann
     erOpen, preStoreFileReaderOpen,
     preStoreScannerOpen,
     preWALRes
     tore
    +postAppend,
     postBatchMutate,
     postBatchMutateIndispensably,
     postBulkLoadHFile,
     postCheckAndDelete,
     postCheckAndPut,
     postClose,
     postCloseRegionOperation, postCommitStoreFile,
     postCompact,
     postCompactSelection,
     postDelete,
     postExists,
     postFlush,
     postFlush,
     postGetOp,
     postIncrement,
     postInstantiateDeleteTracker,
     postMemStoreCompaction, postMutationBeforeWAL,
     postOpen,
     postPut,
     postReplayWALs,
     postScannerClose,
     postScannerFilterRow,
     postScannerNext,
     postScannerOpen,
     postStartRegionOperation,
     postStoreFileReaderOpen,
     postWALRestore,
     preAppend,
     preAppendAfterRowLock,
     preBatchMutate,
     preBulkLoadHFile,
     preCheckAndDelete,
     preCheckAndDeleteAfterRowLock,
     preCheckAndPut,
     preCheckAndPutAfterRowLock,
     preClose,
     preCommitStoreFile,
     preCompactScannerOpen,
     preCompactSelection,
     preDelete,
     preExists,
     preFlush,
     preFlush,
     preFlushScannerOpen,
     preGetOp, preIncrement,
     preIncrementAfterRowLock,
     preMemStoreCompaction,
     preMemStoreCompactionCompac
     t, preMemStoreCompactionCompactScannerOpen,
     preOpen,
     prePrepareTimeStampForDeleteVersion,
     prePut, preReplayWALs,
     preScannerClose,
     preScannerNext,
     preScannerOpen, preStoreFileReaderOpen,
     preStoreScannerOpen,
     preWALRestore
     
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e23b49ba/devapidocs/org/apache/hadoop/hbase/coprocessor/example/WriteHeavyIncrementObserver.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/coprocessor/example/WriteHeavyIncrementObserver.html
     
    b/devapidocs/org/apache/hadoop/hbase/coprocessor/example/WriteHeavyIncrementObserver.html
    index 9726790..a8b23e7 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/coprocessor/example/WriteHeavyIncrementObserver.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/coprocessor/example/WriteHeavyIncrementObserver.html
    @@ -338,7 +338,7 @@ implements RegionObserver
    -postAppend,
     postBatchMutate,
     postBatchMutateIndispensably,
     postBulkLoadHFile,
     postCheckAndDelete,
     postCheckAndPut,
     postClose,
     postCloseRegionOperation, postCommitStoreFile,
     postCompact,
     postCompactSelection,
     postDelete,
     postExists,
     postFlush,
     postFlush,
     postGetOp,
     postIncrement,
     postInstantiateDeleteTracker,
     postMemStoreCompaction, postMutationBeforeWAL,
     postOpen,
     postPut,
     postReplayWALs,
     postScannerClose,
     postScannerFilterRow,
     postScannerNext,
     postScannerOpen,
     postStartRegionOperation,
     postStoreFileReaderOpen,
     postWALRestore,
     preAppend,
     preAppendAfterRowLock,
     preBatchMutate,
     preBulkLoadHFile,
     preCheckAndDelete,
     preCheckAndDeleteAfterRowLock,
     preCheckAndPut,
     preCheckAndPutAfterRowLock,
     preClose,
     preCommitStoreFile,
     preCompactSel
     ection, preDelete,
     preExists,
     preFlush,
     preIncrementAfterRowLock,
     preMemStoreCompaction,
     preOpen,
     prePrepareTimeStampForDeleteVersion,
     prePut,
     preReplayWALs,
     preScannerClose,
     preScannerNext,
     preScannerOpen,
     preStoreFileReaderOpen,
     preWALRestore
    +postAppend,
     postBatchMutate,
     postBatchMutateIndispensably,
     postBulkLoadHFile,
     postCheckAndDelete,
     postCheckAndPut,
     postClose,
     postCloseRegionOperation, postCommitStoreFile,
     

    [05/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d171b896/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/dependency-convergence.html
    --
    diff --git 
    a/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/dependency-convergence.html
     
    b/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/dependency-convergence.html
    index 9dfdaa4..9cb6590 100644
    --- 
    a/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/dependency-convergence.html
    +++ 
    b/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/dependency-convergence.html
    @@ -7,7 +7,7 @@
       
     
     
    -
    +
     
     Apache HBase - Exemplar for hbase-shaded-client archetype  
    Reactor Dependency Convergence
     
    @@ -488,22 +488,22 @@
     3.4.10
     
     
    -org.apache.hbase:hbase-assembly:pom:3.0.0-SNAPSHOT+-org.apache.hbase:hbase-server:jar:3.0.0-SNAPSHOT:compile|+-org.apache.hbase:hbase-client:jar:3.0.0-SNAPSHOT:compile||\-(org.apache.zookeeper:zookeeper:jar:3.4.10:compile
     - version managed from 3.4.6; omitted for duplicate)|+-org.apache.zookeeper:zookeeper:jar:3.4.10:compile|+-org.apache.hadoop:hadoop-common:jar:2.7.4:compile||\-(org.apache.zookeeper:zookeeper:jar:3.4.10:compile
     - version managed from 3.4.6; omitted for duplicate)|+-org.apache.hadoop:hadoop-auth:jar:2.7.4:compile||\-(org.apache.zookeeper:zookeeper:jar:3.4.10:compile
     - version managed from 3.4.6; omitted for duplicate)|\-org.apache.hadoop:hadoop-client:jar:2.7.4:compile|\-org.apache.had
     oop:hadoop-mapreduce-client-app:jar:2.7.4:compile|\-org.apache.hadoop:hadoop-mapreduce-client-common:jar:2.7.4:compile|\-org.apache.hadoop:hadoop-yarn-server-common:jar:2.7.4:compile|\-(org.apache.zookeeper:zookeeper:jar:3.4.10:compile
     - version managed from 3.4.6; omitted for duplicate)+-org.apache.hbase:hbase-mapreduce:jar:3.0.0-SNAPSHOT:compile|\-(org.apache.zookeeper:zookeeper:jar:3.4.10:compile - 
    version managed from 3.4.6; omitted for duplicate)+-org.apache.hbase:hbase-mapreduce:test-jar:tests:3.0.0-SNAPSHOT:test|\-(org.apache.zookeeper:zookeeper:jar:3.4.10:test - 
    version managed from 3.4.6; omitted for duplicate)+-org.apache.hbase:hbase-testing-util:jar:3.0.0-SNAP
     SHOT:test|+-org.apache.hbase:hbase-server:test-jar:tests:3.0.0-SNAPSHOT:test||\-(org.apache.zookeeper:zookeeper:jar:3.4.10:test
     - version managed from 3.4.6; omitted for duplicate)|\-org.apache.hadoop:hadoop-minicluster:jar:2.7.4:test|+-org.apache.hadoop:hadoop-common:test-jar:tests:2.7.4:test||\-(org.apache.zookeeper:zookeeper:jar:3.4.10:test
     - version managed from 3.4.6; omitted for duplicate)|\-org.apache.hadoop:hadoop-yarn-server-tests:test-jar:tests:2.7.4:test|\-org.apache.hadoop:hadoop-yarn-server-resourcemanager:jar:2.7.4:test|\-(org.apache.zookeeper:zookeeper:jar:3.4.10:test
     - version managed from 3.4.6; omitted for dupli
     cate)+-org.apache.hbase:hbase-backup:jar:3.0.0-SNAPSHOT:compile|\-(org.apache.zookeeper:zookeeper:jar:3.4.10:compile - 
    version managed from 3.4.6; omitted for duplicate)+-org.apache.hbase:hbase-replication:jar:3.0.0-SNAPSHOT:compile|\-(org.apache.zookeeper:zookeeper:jar:3.4.10:compile - 
    version managed from 3.4.6; omitted for duplicate)+-org.apache.hbase:hbase-examples:jar:3.0.0-SNAPSHOT:compile|\-(org.apache.zookeeper:zookeeper:jar:3.4.10:compile - 
    version managed from 3.4.6; omitted for duplicate)+-org.apache.hbase:hbase-zookeeper:jar:3.0.0-SNAPSHOT:compile|\-(org.apache.zookeeper:zookeeper:jar:3.4.10:compile - 
    version managed from 3.4.6; omitted for duplicate)\-org.apache.hbase:hbase-rsgroup:jar:3.0.0-SNAPSHOT:compile\-(org.apache.zookeeper:zookeeper:jar:3.4.10:compile 
    - version managed from 3.4.6
     ; omitted for duplicate)
    -org.apache.hbase:hbase-backup:jar:3.0.0-SNAPSHOT+-org.apache.hbase:hbase-client:jar:3.0.0-SNAPSHOT:compile|+-(org.apache.zookeeper:zookeeper:jar:3.4.10:compile - 
    version managed from 3.4.6; omitted for duplicate)|\-org.apache.hadoop:hadoop-auth:jar:2.7.4:compile|\-(org.apache.zookeeper:zookeeper:jar:3.4.10:compile
     - version managed from 3.4.6; omitted for duplicate)+-org.apache.hbase:hbase-server:jar:3.0.0-SNAPSHOT:compile|+-org.apache.hbase:hbase-zookeeper:jar:3.0.0-SNAPSHOT:compile||\-(org.apache.zookeeper:zookeeper:jar:3.4.10:compile
     - version managed from 3.4.6; omitted for duplicate)|+-org.apache.hbase:hbase-replication:jar:3.0.0-SNAPSHOT:compile||\-(org.apache.zookeeper:zookeeper:jar:3.4.10:compile
     - version managed from 3.4.6; omitted for dup
     licate)|+-(org.apache.zookeeper:zookeeper:jar:3.4.10:compile - 
    version managed from 3.4.6; omitted for duplicate)|\-org.apache.hadoop:hadoop-client:jar:2.7.4:compile|\-org.apache.hadoop:hadoop-mapreduce-client-app:jar:2.7.4:compile|\-org.apache.hadoop:hadoop-mapreduce-client-common:jar:2.7.4:compile|\-org.apache.hadoop:hadoop-yarn-server-common:jar:2.7.4:compile|\-(org.apache.zookeeper:zookeeper:jar:3.4.10:compile
     - version managed from 3.4.6; omitted for 

    [05/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c54c242b/hbase-build-configuration/hbase-spark/dependency-convergence.html
    --
    diff --git a/hbase-build-configuration/hbase-spark/dependency-convergence.html 
    b/hbase-build-configuration/hbase-spark/dependency-convergence.html
    index 042dcf3..deb4bd8 100644
    --- a/hbase-build-configuration/hbase-spark/dependency-convergence.html
    +++ b/hbase-build-configuration/hbase-spark/dependency-convergence.html
    @@ -7,7 +7,7 @@
       
     
     
    -
    +
     
     Apache HBase - Spark  Reactor Dependency Convergence
     
    @@ -838,7 +838,7 @@
     https://www.apache.org/;>The Apache Software 
    Foundation.
     All rights reserved.  
     
    -  Last Published: 
    2017-12-02
    +  Last Published: 
    2017-12-03
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c54c242b/hbase-build-configuration/hbase-spark/dependency-info.html
    --
    diff --git a/hbase-build-configuration/hbase-spark/dependency-info.html 
    b/hbase-build-configuration/hbase-spark/dependency-info.html
    index c4f5e75..9bcde02 100644
    --- a/hbase-build-configuration/hbase-spark/dependency-info.html
    +++ b/hbase-build-configuration/hbase-spark/dependency-info.html
    @@ -7,7 +7,7 @@
       
     
     
    -
    +
     
     Apache HBase - Spark  Dependency Information
     
    @@ -147,7 +147,7 @@
     https://www.apache.org/;>The Apache Software 
    Foundation.
     All rights reserved.  
     
    -  Last Published: 
    2017-12-02
    +  Last Published: 
    2017-12-03
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c54c242b/hbase-build-configuration/hbase-spark/dependency-management.html
    --
    diff --git a/hbase-build-configuration/hbase-spark/dependency-management.html 
    b/hbase-build-configuration/hbase-spark/dependency-management.html
    index 7a1bf0b..a740cf8 100644
    --- a/hbase-build-configuration/hbase-spark/dependency-management.html
    +++ b/hbase-build-configuration/hbase-spark/dependency-management.html
    @@ -7,7 +7,7 @@
       
     
     
    -
    +
     
     Apache HBase - Spark  Project Dependency Management
     
    @@ -804,7 +804,7 @@
     https://www.apache.org/;>The Apache Software 
    Foundation.
     All rights reserved.  
     
    -  Last Published: 
    2017-12-02
    +  Last Published: 
    2017-12-03
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c54c242b/hbase-build-configuration/hbase-spark/index.html
    --
    diff --git a/hbase-build-configuration/hbase-spark/index.html 
    b/hbase-build-configuration/hbase-spark/index.html
    index 23626e8..955708c 100644
    --- a/hbase-build-configuration/hbase-spark/index.html
    +++ b/hbase-build-configuration/hbase-spark/index.html
    @@ -7,7 +7,7 @@
       
     
     
    -
    +
     
     Apache HBase - Spark  About
     
    @@ -119,7 +119,7 @@
     https://www.apache.org/;>The Apache Software 
    Foundation.
     All rights reserved.  
     
    -  Last Published: 
    2017-12-02
    +  Last Published: 
    2017-12-03
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c54c242b/hbase-build-configuration/hbase-spark/integration.html
    --
    diff --git a/hbase-build-configuration/hbase-spark/integration.html 
    b/hbase-build-configuration/hbase-spark/integration.html
    index 2c49e04..b1a7524 100644
    --- a/hbase-build-configuration/hbase-spark/integration.html
    +++ b/hbase-build-configuration/hbase-spark/integration.html
    @@ -7,7 +7,7 @@
       
     
     
    -
    +
     
     Apache HBase - Spark  CI Management
     
    @@ -126,7 +126,7 @@
     https://www.apache.org/;>The Apache Software 
    Foundation.
     All rights reserved.  
     
    -  Last Published: 
    2017-12-02
    +  Last Published: 
    2017-12-03
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c54c242b/hbase-build-configuration/hbase-spark/issue-tracking.html
    --
    diff --git a/hbase-build-configuration/hbase-spark/issue-tracking.html 
    b/hbase-build-configuration/hbase-spark/issue-tracking.html
    index f9e4bb8..8d6f488 100644
    --- a/hbase-build-configuration/hbase-spark/issue-tracking.html
    +++ b/hbase-build-configuration/hbase-spark/issue-tracking.html
    @@ -7,7 +7,7 @@
       
     
     
    -
    +
     
     Apache HBase - Spark  Issue 

    [05/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.StorageTypeSetter.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.StorageTypeSetter.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.StorageTypeSetter.html
    index 25e368d..d0f781f 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.StorageTypeSetter.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.StorageTypeSetter.html
    @@ -25,798 +25,798 @@
     017 */
     018package 
    org.apache.hadoop.hbase.io.asyncfs;
     019
    -020import static 
    org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelOption.CONNECT_TIMEOUT_MILLIS;
    -021import static 
    org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleState.READER_IDLE;
    -022import static 
    org.apache.hadoop.fs.CreateFlag.CREATE;
    -023import static 
    org.apache.hadoop.fs.CreateFlag.OVERWRITE;
    -024import static 
    org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createEncryptor;
    -025import static 
    org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.trySaslNegotiate;
    +020import static 
    org.apache.hadoop.fs.CreateFlag.CREATE;
    +021import static 
    org.apache.hadoop.fs.CreateFlag.OVERWRITE;
    +022import static 
    org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createEncryptor;
    +023import static 
    org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.trySaslNegotiate;
    +024import static 
    org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelOption.CONNECT_TIMEOUT_MILLIS;
    +025import static 
    org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleState.READER_IDLE;
     026import static 
    org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY;
     027import static 
    org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME;
     028import static 
    org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT;
     029import static 
    org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage.PIPELINE_SETUP_CREATE;
     030
    -031import 
    org.apache.hadoop.hbase.shaded.com.google.common.base.Throwables;
    -032import 
    org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableMap;
    -033import 
    com.google.protobuf.CodedOutputStream;
    -034
    -035import 
    org.apache.hadoop.hbase.shaded.io.netty.bootstrap.Bootstrap;
    -036import 
    org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBuf;
    -037import 
    org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBufAllocator;
    -038import 
    org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBufOutputStream;
    -039import 
    org.apache.hadoop.hbase.shaded.io.netty.buffer.PooledByteBufAllocator;
    -040import 
    org.apache.hadoop.hbase.shaded.io.netty.channel.Channel;
    -041import 
    org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelFuture;
    -042import 
    org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelFutureListener;
    -043import 
    org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandler;
    -044import 
    org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandlerContext;
    -045import 
    org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelInitializer;
    -046import 
    org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelPipeline;
    -047import 
    org.apache.hadoop.hbase.shaded.io.netty.channel.EventLoop;
    -048import 
    org.apache.hadoop.hbase.shaded.io.netty.channel.SimpleChannelInboundHandler;
    -049import 
    org.apache.hadoop.hbase.shaded.io.netty.handler.codec.protobuf.ProtobufDecoder;
    -050import 
    org.apache.hadoop.hbase.shaded.io.netty.handler.codec.protobuf.ProtobufVarint32FrameDecoder;
    -051import 
    org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleStateEvent;
    -052import 
    org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleStateHandler;
    -053import 
    org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.Future;
    -054import 
    org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.FutureListener;
    -055import 
    org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.Promise;
    -056
    -057import java.io.IOException;
    -058import 
    java.lang.reflect.InvocationTargetException;
    -059import java.lang.reflect.Method;
    -060import java.util.ArrayList;
    -061import java.util.EnumSet;
    -062import java.util.List;
    -063import java.util.concurrent.TimeUnit;
    -064
    -065import org.apache.commons.logging.Log;
    -066import 
    org.apache.commons.logging.LogFactory;
    -067import 
    org.apache.hadoop.conf.Configuration;
    -068import 
    org.apache.hadoop.crypto.CryptoProtocolVersion;
    -069import 
    org.apache.hadoop.crypto.Encryptor;
    -070import org.apache.hadoop.fs.CreateFlag;
    -071import org.apache.hadoop.fs.FileSystem;
    -072import 
    org.apache.hadoop.fs.FileSystemLinkResolver;
    -073import org.apache.hadoop.fs.Path;
    -074import 
    org.apache.hadoop.fs.UnresolvedLinkException;
    -075import 
    

    [05/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.WaitForStateCallable.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.WaitForStateCallable.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.WaitForStateCallable.html
    index d438f22..7c59e27 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.WaitForStateCallable.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.WaitForStateCallable.html
    @@ -1290,8 +1290,8 @@
     1282   CompactType 
    compactType) throws IOException {
     1283switch (compactType) {
     1284  case MOB:
    -1285
    compact(this.connection.getAdminForMaster(), getMobRegionInfo(tableName), 
    major,
    -1286  columnFamily);
    +1285
    compact(this.connection.getAdminForMaster(), 
    RegionInfo.createMobRegionInfo(tableName),
    +1286major, columnFamily);
     1287break;
     1288  case NORMAL:
     1289checkTableExists(tableName);
    @@ -3248,7 +3248,7 @@
     3240  new 
    CallableAdminProtos.GetRegionInfoResponse.CompactionState() {
     3241@Override
     3242public 
    AdminProtos.GetRegionInfoResponse.CompactionState call() throws Exception {
    -3243  RegionInfo info = 
    getMobRegionInfo(tableName);
    +3243  RegionInfo info = 
    RegionInfo.createMobRegionInfo(tableName);
     3244  GetRegionInfoRequest 
    request =
     3245
    RequestConverter.buildGetRegionInfoRequest(info.getRegionName(), true);
     3246  GetRegionInfoResponse 
    response = masterAdmin.getRegionInfo(rpcController, request);
    @@ -3312,7 +3312,7 @@
     3304}
     3305break;
     3306  default:
    -3307throw new 
    IllegalArgumentException("Unknowne compactType: " + compactType);
    +3307throw new 
    IllegalArgumentException("Unknown compactType: " + compactType);
     3308}
     3309if (state != null) {
     3310  return 
    ProtobufUtil.createCompactionState(state);
    @@ -3847,325 +3847,320 @@
     3839});
     3840  }
     3841
    -3842  private RegionInfo 
    getMobRegionInfo(TableName tableName) {
    -3843return 
    RegionInfoBuilder.newBuilder(tableName).setStartKey(Bytes.toBytes(".mob")).setRegionId(0)
    -3844.build();
    -3845  }
    -3846
    -3847  private RpcControllerFactory 
    getRpcControllerFactory() {
    -3848return this.rpcControllerFactory;
    -3849  }
    -3850
    -3851  @Override
    -3852  public void addReplicationPeer(String 
    peerId, ReplicationPeerConfig peerConfig, boolean enabled)
    -3853  throws IOException {
    -3854executeCallable(new 
    MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
    -3855  @Override
    -3856  protected Void rpcCall() throws 
    Exception {
    -3857
    master.addReplicationPeer(getRpcController(),
    -3858  
    RequestConverter.buildAddReplicationPeerRequest(peerId, peerConfig, 
    enabled));
    -3859return null;
    -3860  }
    -3861});
    -3862  }
    -3863
    -3864  @Override
    -3865  public void 
    removeReplicationPeer(String peerId) throws IOException {
    -3866executeCallable(new 
    MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
    -3867  @Override
    -3868  protected Void rpcCall() throws 
    Exception {
    -3869
    master.removeReplicationPeer(getRpcController(),
    -3870  
    RequestConverter.buildRemoveReplicationPeerRequest(peerId));
    -3871return null;
    -3872  }
    -3873});
    -3874  }
    -3875
    -3876  @Override
    -3877  public void 
    enableReplicationPeer(final String peerId) throws IOException {
    -3878executeCallable(new 
    MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
    -3879  @Override
    -3880  protected Void rpcCall() throws 
    Exception {
    -3881
    master.enableReplicationPeer(getRpcController(),
    -3882  
    RequestConverter.buildEnableReplicationPeerRequest(peerId));
    -3883return null;
    -3884  }
    -3885});
    -3886  }
    -3887
    -3888  @Override
    -3889  public void 
    disableReplicationPeer(final String peerId) throws IOException {
    -3890executeCallable(new 
    MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
    -3891  @Override
    -3892  protected Void rpcCall() throws 
    Exception {
    -3893
    master.disableReplicationPeer(getRpcController(),
    -3894  
    RequestConverter.buildDisableReplicationPeerRequest(peerId));
    -3895return null;
    -3896  }
    -3897});
    -3898  }
    -3899
    -3900  @Override
    -3901  public ReplicationPeerConfig 
    getReplicationPeerConfig(final String peerId) throws IOException {
    -3902return executeCallable(new 
    MasterCallableReplicationPeerConfig(getConnection(),
    -3903getRpcControllerFactory()) {
    -3904  @Override
    -3905  protected ReplicationPeerConfig 
    rpcCall() throws Exception {
    -3906GetReplicationPeerConfigResponse 
    response = 

    [05/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
    index 29ea7b3..6ed75c9 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
    @@ -1313,7093 +1313,7082 @@
     1305
     1306  @Override
     1307  public boolean isSplittable() {
    -1308boolean result = isAvailable() 
     !hasReferences();
    -1309LOG.info("ASKED IF SPLITTABLE " + 
    result + " " + getRegionInfo().getShortNameToLog(),
    -1310  new Throwable("LOGGING: 
    REMOVE"));
    -1311// REMOVE BELOW
    -1312LOG.info("DEBUG LIST ALL FILES");
    -1313for (HStore store : 
    this.stores.values()) {
    -1314  LOG.info("store " + 
    store.getColumnFamilyName());
    -1315  for (HStoreFile sf : 
    store.getStorefiles()) {
    -1316
    LOG.info(sf.toStringDetailed());
    -1317  }
    -1318}
    -1319return result;
    -1320  }
    -1321
    -1322  @Override
    -1323  public boolean isMergeable() {
    -1324if (!isAvailable()) {
    -1325  LOG.debug("Region " + this
    -1326  + " is not mergeable because 
    it is closing or closed");
    -1327  return false;
    -1328}
    -1329if (hasReferences()) {
    -1330  LOG.debug("Region " + this
    -1331  + " is not mergeable because 
    it has references");
    -1332  return false;
    -1333}
    -1334
    -1335return true;
    +1308return isAvailable()  
    !hasReferences();
    +1309  }
    +1310
    +1311  @Override
    +1312  public boolean isMergeable() {
    +1313if (!isAvailable()) {
    +1314  LOG.debug("Region " + this
    +1315  + " is not mergeable because 
    it is closing or closed");
    +1316  return false;
    +1317}
    +1318if (hasReferences()) {
    +1319  LOG.debug("Region " + this
    +1320  + " is not mergeable because 
    it has references");
    +1321  return false;
    +1322}
    +1323
    +1324return true;
    +1325  }
    +1326
    +1327  public boolean areWritesEnabled() {
    +1328synchronized(this.writestate) {
    +1329  return 
    this.writestate.writesEnabled;
    +1330}
    +1331  }
    +1332
    +1333  @VisibleForTesting
    +1334  public MultiVersionConcurrencyControl 
    getMVCC() {
    +1335return mvcc;
     1336  }
     1337
    -1338  public boolean areWritesEnabled() {
    -1339synchronized(this.writestate) {
    -1340  return 
    this.writestate.writesEnabled;
    -1341}
    -1342  }
    -1343
    -1344  @VisibleForTesting
    -1345  public MultiVersionConcurrencyControl 
    getMVCC() {
    -1346return mvcc;
    -1347  }
    -1348
    -1349  @Override
    -1350  public long getMaxFlushedSeqId() {
    -1351return maxFlushedSeqId;
    +1338  @Override
    +1339  public long getMaxFlushedSeqId() {
    +1340return maxFlushedSeqId;
    +1341  }
    +1342
    +1343  /**
    +1344   * @return readpoint considering given 
    IsolationLevel. Pass {@code null} for default
    +1345   */
    +1346  public long 
    getReadPoint(IsolationLevel isolationLevel) {
    +1347if (isolationLevel != null 
     isolationLevel == IsolationLevel.READ_UNCOMMITTED) {
    +1348  // This scan can read even 
    uncommitted transactions
    +1349  return Long.MAX_VALUE;
    +1350}
    +1351return mvcc.getReadPoint();
     1352  }
     1353
    -1354  /**
    -1355   * @return readpoint considering given 
    IsolationLevel. Pass {@code null} for default
    -1356   */
    -1357  public long 
    getReadPoint(IsolationLevel isolationLevel) {
    -1358if (isolationLevel != null 
     isolationLevel == IsolationLevel.READ_UNCOMMITTED) {
    -1359  // This scan can read even 
    uncommitted transactions
    -1360  return Long.MAX_VALUE;
    -1361}
    -1362return mvcc.getReadPoint();
    -1363  }
    -1364
    -1365  public boolean 
    isLoadingCfsOnDemandDefault() {
    -1366return 
    this.isLoadingCfsOnDemandDefault;
    -1367  }
    -1368
    -1369  /**
    -1370   * Close down this HRegion.  Flush the 
    cache, shut down each HStore, don't
    -1371   * service any more calls.
    -1372   *
    -1373   * pThis method could take 
    some time to execute, so don't call it from a
    -1374   * time-sensitive thread.
    -1375   *
    -1376   * @return Vector of all the storage 
    files that the HRegion's component
    -1377   * HStores make use of.  It's a list 
    of all StoreFile objects. Returns empty
    -1378   * vector if already closed and null 
    if judged that it should not close.
    -1379   *
    -1380   * @throws IOException e
    -1381   * @throws DroppedSnapshotException 
    Thrown when replay of wal is required
    -1382   * because a Snapshot was not properly 
    persisted. The region is put in closing mode, and the
    -1383   * caller MUST abort after this.
    -1384   */
    -1385  public Mapbyte[], 
    ListHStoreFile close() throws IOException {
    -1386return close(false);
    -1387  }
    -1388
    -1389  private final Object closeLock = new 
    Object();
    -1390
    -1391  /** Conf key for the periodic flush 
    interval */
    -1392  public static final String 
    

    [05/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b1eb7453/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.EmptyCell.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.EmptyCell.html 
    b/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.EmptyCell.html
    index 9098105..b05691f 100644
    --- a/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.EmptyCell.html
    +++ b/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.EmptyCell.html
    @@ -37,1514 +37,1514 @@
     029import java.util.ArrayList;
     030import java.util.Iterator;
     031import java.util.List;
    -032
    -033import 
    org.apache.hadoop.hbase.KeyValue.Type;
    -034import 
    org.apache.hadoop.hbase.filter.ByteArrayComparable;
    -035import 
    org.apache.hadoop.hbase.io.HeapSize;
    -036import 
    org.apache.hadoop.hbase.io.TagCompressionContext;
    -037import 
    org.apache.hadoop.hbase.io.util.Dictionary;
    -038import 
    org.apache.hadoop.hbase.io.util.StreamUtils;
    -039import 
    org.apache.hadoop.hbase.util.ByteBufferUtils;
    -040import 
    org.apache.hadoop.hbase.util.ByteRange;
    -041import 
    org.apache.hadoop.hbase.util.Bytes;
    -042import 
    org.apache.hadoop.hbase.util.ClassSize;
    -043import 
    org.apache.yetus.audience.InterfaceAudience;
    -044
    -045import 
    com.google.common.annotations.VisibleForTesting;
    -046
    -047/**
    -048 * Utility methods helpful slinging 
    {@link Cell} instances. It has more powerful and
    -049 * rich set of APIs than those in {@link 
    CellUtil} for internal usage.
    -050 */
    -051@InterfaceAudience.Private
    -052// TODO : Make Tag IA.LimitedPrivate and 
    move some of the Util methods to CP exposed Util class
    -053public class PrivateCellUtil {
    +032import java.util.Optional;
    +033
    +034import 
    org.apache.hadoop.hbase.KeyValue.Type;
    +035import 
    org.apache.hadoop.hbase.filter.ByteArrayComparable;
    +036import 
    org.apache.hadoop.hbase.io.HeapSize;
    +037import 
    org.apache.hadoop.hbase.io.TagCompressionContext;
    +038import 
    org.apache.hadoop.hbase.io.util.Dictionary;
    +039import 
    org.apache.hadoop.hbase.io.util.StreamUtils;
    +040import 
    org.apache.hadoop.hbase.util.ByteBufferUtils;
    +041import 
    org.apache.hadoop.hbase.util.ByteRange;
    +042import 
    org.apache.hadoop.hbase.util.Bytes;
    +043import 
    org.apache.hadoop.hbase.util.ClassSize;
    +044import 
    org.apache.yetus.audience.InterfaceAudience;
    +045
    +046import 
    com.google.common.annotations.VisibleForTesting;
    +047
    +048/**
    +049 * Utility methods helpful slinging 
    {@link Cell} instances. It has more powerful and
    +050 * rich set of APIs than those in {@link 
    CellUtil} for internal usage.
    +051 */
    +052@InterfaceAudience.Private
    +053public final class PrivateCellUtil {
     054
     055  /**
     056   * Private constructor to keep this 
    class from being instantiated.
     057   */
     058  private PrivateCellUtil() {
    -059
    -060  }
    -061
    -062  /*** ByteRange 
    ***/
    -063
    -064  public static ByteRange 
    fillRowRange(Cell cell, ByteRange range) {
    -065return range.set(cell.getRowArray(), 
    cell.getRowOffset(), cell.getRowLength());
    -066  }
    -067
    -068  public static ByteRange 
    fillFamilyRange(Cell cell, ByteRange range) {
    -069return 
    range.set(cell.getFamilyArray(), cell.getFamilyOffset(), 
    cell.getFamilyLength());
    -070  }
    -071
    -072  public static ByteRange 
    fillQualifierRange(Cell cell, ByteRange range) {
    -073return 
    range.set(cell.getQualifierArray(), cell.getQualifierOffset(),
    -074  cell.getQualifierLength());
    -075  }
    -076
    -077  public static ByteRange 
    fillValueRange(Cell cell, ByteRange range) {
    -078return 
    range.set(cell.getValueArray(), cell.getValueOffset(), 
    cell.getValueLength());
    -079  }
    -080
    -081  public static ByteRange 
    fillTagRange(Cell cell, ByteRange range) {
    -082return range.set(cell.getTagsArray(), 
    cell.getTagsOffset(), cell.getTagsLength());
    -083  }
    -084
    -085  /**
    -086   * Returns tag value in a new byte 
    array. If server-side, use {@link Tag#getValueArray()} with
    -087   * appropriate {@link 
    Tag#getValueOffset()} and {@link Tag#getValueLength()} instead to save on
    -088   * allocations.
    -089   * @param cell
    -090   * @return tag value in a new byte 
    array.
    -091   */
    -092  public static byte[] getTagsArray(Cell 
    cell) {
    -093byte[] output = new 
    byte[cell.getTagsLength()];
    -094copyTagsTo(cell, output, 0);
    -095return output;
    -096  }
    -097
    -098  public static byte[] cloneTags(Cell 
    cell) {
    -099byte[] output = new 
    byte[cell.getTagsLength()];
    -100copyTagsTo(cell, output, 0);
    -101return output;
    -102  }
    -103
    -104  /**
    -105   * Copies the tags info into the tag 
    portion of the cell
    -106   * @param cell
    -107   * @param destination
    -108   * @param destinationOffset
    -109   * @return position after tags
    +059  }
    +060
    +061  /*** ByteRange 
    ***/
    +062
    +063  public static ByteRange 
    fillRowRange(Cell cell, ByteRange range) {
    +064return range.set(cell.getRowArray(), 
    cell.getRowOffset(), cell.getRowLength());
    +065  }
    +066
    +067  

    [05/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9118853f/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHRegion.html
    --
    diff --git 
    a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHRegion.html 
    b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHRegion.html
    index c2d5961..1ccb0f6 100644
    --- a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHRegion.html
    +++ b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHRegion.html
    @@ -18,7 +18,7 @@
     catch(err) {
     }
     //-->
    -var methods = 
    {"i0":9,"i1":10,"i2":10,"i3":10,"i4":9,"i5":9,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":9,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":9,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i109":
     
    10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":9,"i130":10};
    +var methods = 
    {"i0":9,"i1":10,"i2":10,"i3":10,"i4":9,"i5":9,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":9,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":9,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i109":
     
    10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":9,"i129":10};
     var tabs = {65535:["t0","All Methods"],1:["t1","Static 
    Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
     var altColor = "altColor";
     var rowColor = "rowColor";
    @@ -732,142 +732,136 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     void
    -testMemstoreSizeWithFlushCanceling()
    -Test for HBASE-14229: Flushing canceled by coprocessor 
    still leads to memstoreSize set down
    -
    -
    -
    -void
     testMemstoreSnapshotSize()
     
    -
    +
     void
     testMutateRow_WriteRequestCount()
     
    -
    +
     void
     testOpenRegionWrittenToWAL()
     
    -
    +
     void
     testParallelAppendWithMemStoreFlush()
     Test case to check append function with memstore 
    flushing
     
     
    -
    +
     void
     testParallelIncrementWithMemStoreFlush()
     Test case to check increment function with memstore 
    flushing
     
     
    -
    +
     void
     testPutWithLatestTS()
     Tests that the special LATEST_TIMESTAMP option for puts 
    gets replaced by
      the actual timestamp
     
     
    -
    +
     void
     testPutWithMemStoreFlush()
     Test case to check put function with memstore flushing for 
    same row, same ts
     
     
    -
    +
     void
     testPutWithTsSlop()
     Tests that there is server-side filtering for invalid 
    timestamp upper
      bound.
     
     
    -
    +
     void
     testRecoveredEditsReplayCompaction()
     
    -
    +
     void
     testRecoveredEditsReplayCompaction(booleanmismatchedRegionName)
     
    -
    +
     void
     testRegionInfoFileCreation()
     Verifies that the .regioninfo file is written on region 
    creation and that
      is recreated if missing during region opening.
     
     
    -
    +
     void
     testRegionReplicaSecondary()
     
    -
    +
     void
     testRegionReplicaSecondaryIsReadOnly()
     
    -
    +
     void
     testRegionScanner_Next()
     
    -
    +
     void
     testRegionTooBusy()
     Test RegionTooBusyException thrown when region is busy
     
     
    -
    +
     void
     testReverseScanner_FromMemStore_SingleCF_FullScan()
     
    -
    +
     void
     testReverseScanner_FromMemStore_SingleCF_LargerKey()
     
    -
    +
     void
     testReverseScanner_FromMemStore_SingleCF_Normal()
     
    -
    +
     void
     testReverseScanner_FromMemStoreAndHFiles_MultiCFs1()
     
    -
    +
     void
     

      1   2   3   >

  • 使用DoNotRetryIOException的程序包  
    PackageDescription程序包说明
    org.apache.hadoop.hbase.client -
    Provides HBase Client
    +
    Provides HBase Client + +Table of Contents + + Overview +Example API Usage + + + Overview + To administer HBase, create and drop tables, list and alter tables, + use Admin.
    org.apache.hadoop.hbase.coprocessor -
    Table of Contents
    +
    Table of Contents + +Overview +Coprocessor +RegionObserver +Endpoint +Coprocessor loading + + +Overview +Coprocessors are code that runs in-process on each region server.