[06/51] [partial] hbase-site git commit: Published site at 620d70d6186fb800299bcc62ad7179fccfd1be41.

2019-01-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aa3fb87f/devapidocs/src-html/org/apache/hadoop/hbase/thrift2/client/ThriftConnection.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/thrift2/client/ThriftConnection.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/thrift2/client/ThriftConnection.html
new file mode 100644
index 000..fee41a8
--- /dev/null
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/thrift2/client/ThriftConnection.html
@@ -0,0 +1,394 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+Source code
+
+
+
+
+001/**
+002 *
+003 * Licensed to the Apache Software 
Foundation (ASF) under one
+004 * or more contributor license 
agreements.  See the NOTICE file
+005 * distributed with this work for 
additional information
+006 * regarding copyright ownership.  The 
ASF licenses this file
+007 * to you under the Apache License, 
Version 2.0 (the
+008 * "License"); you may not use this file 
except in compliance
+009 * with the License.  You may obtain a 
copy of the License at
+010 *
+011 * 
http://www.apache.org/licenses/LICENSE-2.0
+012 *
+013 * Unless required by applicable law or 
agreed to in writing, software
+014 * distributed under the License is 
distributed on an "AS IS" BASIS,
+015 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
+016 * See the License for the specific 
language governing permissions and
+017 * limitations under the License.
+018 */
+019package 
org.apache.hadoop.hbase.thrift2.client;
+020
+021import static 
org.apache.hadoop.hbase.ipc.RpcClient.DEFAULT_SOCKET_TIMEOUT_CONNECT;
+022import static 
org.apache.hadoop.hbase.ipc.RpcClient.SOCKET_TIMEOUT_CONNECT;
+023
+024import java.io.IOException;
+025import java.lang.reflect.Constructor;
+026import java.util.HashMap;
+027import java.util.Map;
+028import 
java.util.concurrent.ExecutorService;
+029
+030import 
org.apache.commons.lang3.NotImplementedException;
+031import 
org.apache.hadoop.conf.Configuration;
+032import 
org.apache.hadoop.hbase.HConstants;
+033import 
org.apache.hadoop.hbase.TableName;
+034import 
org.apache.hadoop.hbase.client.Admin;
+035import 
org.apache.hadoop.hbase.client.BufferedMutator;
+036import 
org.apache.hadoop.hbase.client.BufferedMutatorParams;
+037import 
org.apache.hadoop.hbase.client.Connection;
+038import 
org.apache.hadoop.hbase.client.RegionLocator;
+039import 
org.apache.hadoop.hbase.client.Table;
+040import 
org.apache.hadoop.hbase.client.TableBuilder;
+041import 
org.apache.hadoop.hbase.security.User;
+042import 
org.apache.hadoop.hbase.thrift.Constants;
+043import 
org.apache.hadoop.hbase.thrift2.generated.THBaseService;
+044import 
org.apache.hadoop.hbase.util.Pair;
+045import 
org.apache.http.client.HttpClient;
+046import 
org.apache.http.client.config.RequestConfig;
+047import 
org.apache.http.client.utils.HttpClientUtils;
+048import 
org.apache.http.impl.client.HttpClientBuilder;
+049import 
org.apache.thrift.protocol.TBinaryProtocol;
+050import 
org.apache.thrift.protocol.TCompactProtocol;
+051import 
org.apache.thrift.protocol.TProtocol;
+052import 
org.apache.thrift.transport.TFramedTransport;
+053import 
org.apache.thrift.transport.THttpClient;
+054import 
org.apache.thrift.transport.TSocket;
+055import 
org.apache.thrift.transport.TTransport;
+056import 
org.apache.thrift.transport.TTransportException;
+057import 
org.apache.yetus.audience.InterfaceAudience;
+058
+059import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
+060
+061@InterfaceAudience.Private
+062public class ThriftConnection implements 
Connection {
+063  private Configuration conf;
+064  private User user;
+065  // For HTTP protocol
+066  private HttpClient httpClient;
+067  private boolean httpClientCreated = 
false;
+068  private boolean isClosed = false;
+069
+070  private String host;
+071  private int port;
+072  private boolean isFramed = false;
+073  private boolean isCompact = false;
+074
+075  private ThriftClientBuilder 
clientBuilder;
+076
+077  private int operationTimeout;
+078  private int connectTimeout;
+079
+080  public ThriftConnection(Configuration 
conf, ExecutorService pool, final User user)
+081  throws IOException {
+082this.conf = conf;
+083this.user = user;
+084this.host = 
conf.get(Constants.HBASE_THRIFT_SERVER_NAME);
+085this.port = 
conf.getInt(Constants.HBASE_THRIFT_SERVER_PORT, -1);
+086Preconditions.checkArgument(port  
0);
+087Preconditions.checkArgument(host != 
null);
+088this.isFramed = 
conf.getBoolean(Constants.FRAMED_CONF_KEY, Constants.FRAMED_CONF_DEFAULT);
+089this.isCompact = 
conf.getBoolean(Constants.COMPACT_CONF_KEY, Constants.COMPACT_CONF_DEFAULT);
+090this.operationTimeout = 
conf.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT,
+091
HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT);
+092this.connectTimeout = 
conf.getInt(SOCKET_TIMEOUT_CONNECT, 

[06/51] [partial] hbase-site git commit: Published site at 281d6429e55149cc4c05430dcc1d1dc136d8b245.

2019-01-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/901d593a/devapidocs/org/apache/hadoop/hbase/regionserver/SplitLogWorker.TaskExecutor.Status.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/SplitLogWorker.TaskExecutor.Status.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/SplitLogWorker.TaskExecutor.Status.html
index 1126570..cd96401 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/SplitLogWorker.TaskExecutor.Status.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/SplitLogWorker.TaskExecutor.Status.html
@@ -122,7 +122,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public static enum SplitLogWorker.TaskExecutor.Status
+public static enum SplitLogWorker.TaskExecutor.Status
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumSplitLogWorker.TaskExecutor.Status
 
 
@@ -216,7 +216,7 @@ the order they are declared.
 
 
 DONE
-public static finalSplitLogWorker.TaskExecutor.Status DONE
+public static finalSplitLogWorker.TaskExecutor.Status DONE
 
 
 
@@ -225,7 +225,7 @@ the order they are declared.
 
 
 ERR
-public static finalSplitLogWorker.TaskExecutor.Status ERR
+public static finalSplitLogWorker.TaskExecutor.Status ERR
 
 
 
@@ -234,7 +234,7 @@ the order they are declared.
 
 
 RESIGNED
-public static finalSplitLogWorker.TaskExecutor.Status RESIGNED
+public static finalSplitLogWorker.TaskExecutor.Status RESIGNED
 
 
 
@@ -243,7 +243,7 @@ the order they are declared.
 
 
 PREEMPTED
-public static finalSplitLogWorker.TaskExecutor.Status PREEMPTED
+public static finalSplitLogWorker.TaskExecutor.Status PREEMPTED
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/901d593a/devapidocs/org/apache/hadoop/hbase/regionserver/SplitLogWorker.TaskExecutor.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/SplitLogWorker.TaskExecutor.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/SplitLogWorker.TaskExecutor.html
index 8ae8dd4..36ae80d 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/SplitLogWorker.TaskExecutor.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/SplitLogWorker.TaskExecutor.html
@@ -110,7 +110,7 @@ var activeTableTab = "activeTableTab";
 
 
 https://docs.oracle.com/javase/8/docs/api/java/lang/FunctionalInterface.html?is-external=true;
 title="class or interface in java.lang">@FunctionalInterface
-public static interface SplitLogWorker.TaskExecutor
+public static interface SplitLogWorker.TaskExecutor
 Objects implementing this interface actually do the task 
that has been
  acquired by a SplitLogWorker. Since 
there isn't a water-tight
  guarantee that two workers will not be executing the same task therefore it
@@ -180,7 +180,7 @@ public static interface 
 
 exec
-SplitLogWorker.TaskExecutor.Statusexec(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringname,
+SplitLogWorker.TaskExecutor.Statusexec(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringname,
 CancelableProgressablep)
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/901d593a/devapidocs/org/apache/hadoop/hbase/regionserver/SplitLogWorker.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/SplitLogWorker.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/SplitLogWorker.html
index 322dc6b..36f1ac8 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/SplitLogWorker.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/SplitLogWorker.html
@@ -248,7 +248,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/lang/Runnable
 run()
 
 
-private static SplitLogWorker.TaskExecutor.Status
+(package private) static SplitLogWorker.TaskExecutor.Status
 splitLog(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringname,
 CancelableProgressablep,
 org.apache.hadoop.conf.Configurationconf,
@@ -396,12 +396,12 @@ implements https://docs.oracle.com/javase/8/docs/api/java/lang/Runnable
 
 
 splitLog
-private staticSplitLogWorker.TaskExecutor.StatussplitLog(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringname,
-   CancelableProgressablep,
-   
org.apache.hadoop.conf.Configurationconf,
-   RegionServerServicesserver,
-   

[06/51] [partial] hbase-site git commit: Published site at 466fa920fee572fe20db3b77ebf539dc304d5f31.

2019-01-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2bf59208/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteNamespaceProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteNamespaceProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteNamespaceProcedureBiConsumer.html
index 736388b..197b99d 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteNamespaceProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteNamespaceProcedureBiConsumer.html
@@ -26,3624 +26,3599 @@
 018package org.apache.hadoop.hbase.client;
 019
 020import static 
org.apache.hadoop.hbase.TableName.META_TABLE_NAME;
-021
-022import com.google.protobuf.Message;
-023import com.google.protobuf.RpcChannel;
-024import java.io.IOException;
-025import java.util.ArrayList;
-026import java.util.Arrays;
-027import java.util.Collections;
-028import java.util.EnumSet;
-029import java.util.HashMap;
-030import java.util.List;
-031import java.util.Map;
-032import java.util.Optional;
-033import java.util.Set;
-034import 
java.util.concurrent.CompletableFuture;
-035import 
java.util.concurrent.ConcurrentHashMap;
-036import java.util.concurrent.TimeUnit;
-037import 
java.util.concurrent.atomic.AtomicReference;
-038import java.util.function.BiConsumer;
-039import java.util.function.Function;
-040import java.util.function.Supplier;
-041import java.util.regex.Pattern;
-042import java.util.stream.Collectors;
-043import java.util.stream.Stream;
-044import org.apache.commons.io.IOUtils;
-045import 
org.apache.hadoop.conf.Configuration;
-046import 
org.apache.hadoop.hbase.AsyncMetaTableAccessor;
-047import 
org.apache.hadoop.hbase.CacheEvictionStats;
-048import 
org.apache.hadoop.hbase.CacheEvictionStatsAggregator;
-049import 
org.apache.hadoop.hbase.ClusterMetrics;
-050import 
org.apache.hadoop.hbase.ClusterMetrics.Option;
-051import 
org.apache.hadoop.hbase.ClusterMetricsBuilder;
-052import 
org.apache.hadoop.hbase.HConstants;
-053import 
org.apache.hadoop.hbase.HRegionLocation;
-054import 
org.apache.hadoop.hbase.MetaTableAccessor;
-055import 
org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
-056import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-057import 
org.apache.hadoop.hbase.RegionLocations;
-058import 
org.apache.hadoop.hbase.RegionMetrics;
-059import 
org.apache.hadoop.hbase.RegionMetricsBuilder;
-060import 
org.apache.hadoop.hbase.ServerName;
-061import 
org.apache.hadoop.hbase.TableExistsException;
-062import 
org.apache.hadoop.hbase.TableName;
-063import 
org.apache.hadoop.hbase.TableNotDisabledException;
-064import 
org.apache.hadoop.hbase.TableNotEnabledException;
-065import 
org.apache.hadoop.hbase.TableNotFoundException;
-066import 
org.apache.hadoop.hbase.UnknownRegionException;
-067import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
-068import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
-069import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.ServerRequestCallerBuilder;
-070import 
org.apache.hadoop.hbase.client.Scan.ReadType;
-071import 
org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil;
-072import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-073import 
org.apache.hadoop.hbase.client.security.SecurityCapability;
-074import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-075import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-076import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-077import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-078import 
org.apache.hadoop.hbase.quotas.QuotaTableUtil;
-079import 
org.apache.hadoop.hbase.replication.ReplicationException;
-080import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-081import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-082import 
org.apache.hadoop.hbase.replication.SyncReplicationState;
-083import 
org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
-084import 
org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
-085import 
org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
-086import 
org.apache.hadoop.hbase.util.Bytes;
-087import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-088import 
org.apache.hadoop.hbase.util.ForeignExceptionUtil;
-089import 
org.apache.yetus.audience.InterfaceAudience;
-090import org.slf4j.Logger;
-091import org.slf4j.LoggerFactory;
-092
-093import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-094import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-095import 
org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback;
-096import 

[06/51] [partial] hbase-site git commit: Published site at e4b6b4afb933a961f543537875f87a2dc62d3757.

2019-01-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/849d84a8/testdevapidocs/org/apache/hadoop/hbase/coprocessor/package-summary.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/coprocessor/package-summary.html 
b/testdevapidocs/org/apache/hadoop/hbase/coprocessor/package-summary.html
index f17e275..ec3348b 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/coprocessor/package-summary.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/coprocessor/package-summary.html
@@ -479,58 +479,62 @@
 
 
 
-TestRegionObserverBypass
+TestRegionCoprocessorHost
 
 
 
-TestRegionObserverBypass.TestCoprocessor
+TestRegionObserverBypass
 
 
 
+TestRegionObserverBypass.TestCoprocessor
+
+
+
 TestRegionObserverBypass.TestCoprocessor2
 
 Calls through to TestCoprocessor.
 
 
-
+
 TestRegionObserverBypass.TestCoprocessor3
 
 Calls through to TestCoprocessor.
 
 
-
+
 TestRegionObserverForAddingMutationsFromCoprocessors
 
 
-
+
 TestRegionObserverForAddingMutationsFromCoprocessors.TestDeleteCellCoprocessor
 
 
-
+
 TestRegionObserverForAddingMutationsFromCoprocessors.TestDeleteFamilyCoprocessor
 
 
-
+
 TestRegionObserverForAddingMutationsFromCoprocessors.TestDeleteRowCoprocessor
 
 
-
+
 TestRegionObserverForAddingMutationsFromCoprocessors.TestMultiMutationCoprocessor
 
 
-
+
 TestRegionObserverForAddingMutationsFromCoprocessors.TestWALObserver
 
 
-
+
 TestRegionObserverInterface
 
 
-
+
 TestRegionObserverInterface.EvenOnlyCompactor
 
 
-
+
 TestRegionObserverPreFlushAndPreCompact
 
 Test that we fail if a Coprocessor tries to return a null 
scanner out
@@ -539,131 +543,131 @@
  CompactionLifeCycleTracker, CompactionRequest)
 
 
-
+
 TestRegionObserverPreFlushAndPreCompact.TestRegionObserver
 
 Coprocessor that returns null when preCompact or preFlush 
is called.
 
 
-
+
 TestRegionObserverScannerOpenHook
 
 
-
+
 TestRegionObserverScannerOpenHook.CompactionCompletionNotifyingRegion
 
 
-
+
 TestRegionObserverScannerOpenHook.EmptyRegionObsever
 
 Do the default logic in RegionObserver 
interface.
 
 
-
+
 TestRegionObserverScannerOpenHook.NoDataFilter
 
 
-
+
 TestRegionObserverScannerOpenHook.NoDataFromCompaction
 
 Don't allow any data to be written out in the compaction by 
creating a custom
  StoreScanner.
 
 
-
+
 TestRegionObserverScannerOpenHook.NoDataFromFlush
 
 Don't allow any data in a flush by creating a custom 
StoreScanner.
 
 
-
+
 TestRegionObserverScannerOpenHook.NoDataFromScan
 
 Don't return any data from a scan by creating a custom 
StoreScanner.
 
 
-
+
 TestRegionObserverStacking
 
 
-
+
 TestRegionObserverStacking.ObserverA
 
 
-
+
 TestRegionObserverStacking.ObserverB
 
 
-
+
 TestRegionObserverStacking.ObserverC
 
 
-
+
 TestRegionServerCoprocessorEndpoint
 
 
-
+
 TestRegionServerCoprocessorEndpoint.DummyRegionServerEndpoint
 
 
-
+
 TestRegionServerCoprocessorExceptionWithAbort
 
 Tests unhandled exceptions thrown by coprocessors running 
on a regionserver..
 
 
-
+
 TestRegionServerCoprocessorExceptionWithAbort.BuggyRegionObserver
 
 
-
+
 TestRegionServerCoprocessorExceptionWithAbort.FailedInitializationObserver
 
 
-
+
 TestRegionServerCoprocessorExceptionWithRemove
 
 Tests unhandled exceptions thrown by coprocessors running 
on regionserver.
 
 
-
+
 TestRegionServerCoprocessorExceptionWithRemove.BuggyRegionObserver
 
 
-
+
 TestRowProcessorEndpoint
 
 Verifies ProcessEndpoint works.
 
 
-
+
 TestRowProcessorEndpoint.RowProcessorEndpointS
 extends com.google.protobuf.Message,T extends 
com.google.protobuf.Message
 
 This class defines two RowProcessors:
  IncrementCounterProcessor and FriendsOfFriendsProcessor.
 
 
-
+
 TestRowProcessorEndpoint.RowProcessorEndpoint.FriendsOfFriendsProcessor
 
 
-
+
 TestRowProcessorEndpoint.RowProcessorEndpoint.IncrementCounterProcessor
 
 
-
+
 TestRowProcessorEndpoint.RowProcessorEndpoint.RowSwapProcessor
 
 
-
+
 TestRowProcessorEndpoint.RowProcessorEndpoint.TimeoutProcessor
 
 
-
+
 TestSecureExport
 
 
-
+
 TestWALObserver
 
 Tests invocation of the

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/849d84a8/testdevapidocs/org/apache/hadoop/hbase/coprocessor/package-tree.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/coprocessor/package-tree.html 
b/testdevapidocs/org/apache/hadoop/hbase/coprocessor/package-tree.html
index b6f3005..98006bc 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/coprocessor/package-tree.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/coprocessor/package-tree.html
@@ -240,6 +240,7 @@
 org.apache.hadoop.hbase.coprocessor.TestPostIncrementAndAppendBeforeWAL.ChangeCellWithDifferntColumnFamilyObserver
 (implements org.apache.hadoop.hbase.coprocessor.RegionCoprocessor, 
org.apache.hadoop.hbase.coprocessor.RegionObserver)
 org.apache.hadoop.hbase.coprocessor.TestPostIncrementAndAppendBeforeWAL.ChangeCellWithNotExistColumnFamilyObserver
 (implements 

[06/51] [partial] hbase-site git commit: Published site at 3ab895979b643a2980bcdb7fee2078f14b614210.

2019-01-01 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/MutationSerialization.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/MutationSerialization.html
 
b/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/MutationSerialization.html
index a7ab356..eefc000 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/MutationSerialization.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/MutationSerialization.html
@@ -120,6 +120,6 @@
 
 
 
-Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/PutCombiner.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/PutCombiner.html 
b/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/PutCombiner.html
index 8ad5ca0..475a882 100644
--- a/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/PutCombiner.html
+++ b/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/PutCombiner.html
@@ -120,6 +120,6 @@
 
 
 
-Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/PutSortReducer.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/PutSortReducer.html 
b/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/PutSortReducer.html
index d246f74..56dd2a5 100644
--- a/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/PutSortReducer.html
+++ b/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/PutSortReducer.html
@@ -120,6 +120,6 @@
 
 
 
-Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/RegionSizeCalculator.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/RegionSizeCalculator.html
 
b/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/RegionSizeCalculator.html
index 61b7bde..e08521b 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/RegionSizeCalculator.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/RegionSizeCalculator.html
@@ -164,6 +164,6 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 
-Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/ResultSerialization.Result94Deserializer.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/ResultSerialization.Result94Deserializer.html
 
b/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/ResultSerialization.Result94Deserializer.html
index 06e0de7..72b0648 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/ResultSerialization.Result94Deserializer.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/ResultSerialization.Result94Deserializer.html
@@ -120,6 +120,6 @@
 
 
 
-Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/ResultSerialization.ResultDeserializer.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/ResultSerialization.ResultDeserializer.html
 
b/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/ResultSerialization.ResultDeserializer.html
index b380dc1..dbd9375 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/ResultSerialization.ResultDeserializer.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/mapreduce/class-use/ResultSerialization.ResultDeserializer.html
@@ -120,6 +120,6 @@
 
 
 
-Copyright  

[06/51] [partial] hbase-site git commit: Published site at 7820ba1dbdba58b1002cdfde08eb21aa7a0bb6da.

2018-12-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/09ea0d5f/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
index 0f5a095..50bf692 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
@@ -78,8712 +78,8714 @@
 070import 
java.util.concurrent.locks.ReadWriteLock;
 071import 
java.util.concurrent.locks.ReentrantReadWriteLock;
 072import java.util.function.Function;
-073import 
org.apache.hadoop.conf.Configuration;
-074import org.apache.hadoop.fs.FileStatus;
-075import org.apache.hadoop.fs.FileSystem;
-076import 
org.apache.hadoop.fs.LocatedFileStatus;
-077import org.apache.hadoop.fs.Path;
-078import org.apache.hadoop.hbase.Cell;
-079import 
org.apache.hadoop.hbase.CellBuilderType;
-080import 
org.apache.hadoop.hbase.CellComparator;
-081import 
org.apache.hadoop.hbase.CellComparatorImpl;
-082import 
org.apache.hadoop.hbase.CellScanner;
-083import 
org.apache.hadoop.hbase.CellUtil;
-084import 
org.apache.hadoop.hbase.CompareOperator;
-085import 
org.apache.hadoop.hbase.CompoundConfiguration;
-086import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-087import 
org.apache.hadoop.hbase.DroppedSnapshotException;
-088import 
org.apache.hadoop.hbase.ExtendedCellBuilderFactory;
-089import 
org.apache.hadoop.hbase.HConstants;
-090import 
org.apache.hadoop.hbase.HConstants.OperationStatusCode;
-091import 
org.apache.hadoop.hbase.HDFSBlocksDistribution;
-092import 
org.apache.hadoop.hbase.KeyValue;
-093import 
org.apache.hadoop.hbase.KeyValueUtil;
-094import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-095import 
org.apache.hadoop.hbase.NotServingRegionException;
-096import 
org.apache.hadoop.hbase.PrivateCellUtil;
-097import 
org.apache.hadoop.hbase.RegionTooBusyException;
-098import org.apache.hadoop.hbase.Tag;
-099import org.apache.hadoop.hbase.TagUtil;
-100import 
org.apache.hadoop.hbase.UnknownScannerException;
-101import 
org.apache.hadoop.hbase.client.Append;
-102import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-103import 
org.apache.hadoop.hbase.client.CompactionState;
-104import 
org.apache.hadoop.hbase.client.Delete;
-105import 
org.apache.hadoop.hbase.client.Durability;
-106import 
org.apache.hadoop.hbase.client.Get;
-107import 
org.apache.hadoop.hbase.client.Increment;
-108import 
org.apache.hadoop.hbase.client.IsolationLevel;
-109import 
org.apache.hadoop.hbase.client.Mutation;
-110import 
org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor;
-111import 
org.apache.hadoop.hbase.client.Put;
-112import 
org.apache.hadoop.hbase.client.RegionInfo;
-113import 
org.apache.hadoop.hbase.client.RegionInfoBuilder;
-114import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
-115import 
org.apache.hadoop.hbase.client.Result;
-116import 
org.apache.hadoop.hbase.client.RowMutations;
-117import 
org.apache.hadoop.hbase.client.Scan;
-118import 
org.apache.hadoop.hbase.client.TableDescriptor;
-119import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-120import 
org.apache.hadoop.hbase.conf.ConfigurationManager;
-121import 
org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver;
-122import 
org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
-123import 
org.apache.hadoop.hbase.coprocessor.RegionObserver.MutationType;
-124import 
org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
-125import 
org.apache.hadoop.hbase.exceptions.FailedSanityCheckException;
-126import 
org.apache.hadoop.hbase.exceptions.TimeoutIOException;
-127import 
org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
-128import 
org.apache.hadoop.hbase.filter.ByteArrayComparable;
-129import 
org.apache.hadoop.hbase.filter.FilterWrapper;
-130import 
org.apache.hadoop.hbase.filter.IncompatibleFilterException;
-131import 
org.apache.hadoop.hbase.io.HFileLink;
-132import 
org.apache.hadoop.hbase.io.HeapSize;
-133import 
org.apache.hadoop.hbase.io.TimeRange;
-134import 
org.apache.hadoop.hbase.io.hfile.BlockCache;
-135import 
org.apache.hadoop.hbase.io.hfile.HFile;
-136import 
org.apache.hadoop.hbase.ipc.CallerDisconnectedException;
-137import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
-138import 
org.apache.hadoop.hbase.ipc.RpcCall;
-139import 
org.apache.hadoop.hbase.ipc.RpcServer;
-140import 
org.apache.hadoop.hbase.mob.MobFileCache;
-141import 
org.apache.hadoop.hbase.monitoring.MonitoredTask;
-142import 
org.apache.hadoop.hbase.monitoring.TaskMonitor;
-143import 
org.apache.hadoop.hbase.quotas.RegionServerSpaceQuotaManager;
-144import 
org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl.WriteEntry;
-145import 

[06/51] [partial] hbase-site git commit: Published site at c448604ceb987d113913f0583452b2abce04db0d.

2018-12-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4f8b8424/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.HFileScannerImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.HFileScannerImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.HFileScannerImpl.html
index 79cb21b..d8d391b 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.HFileScannerImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.HFileScannerImpl.html
@@ -378,1508 +378,1510 @@
 370
 371  @Override
 372  public void returnBlock(HFileBlock 
block) {
-373BlockCache blockCache = 
this.cacheConf.getBlockCache();
-374if (blockCache != null  
block != null) {
-375  BlockCacheKey cacheKey = new 
BlockCacheKey(this.getFileContext().getHFileName(),
-376  block.getOffset(), 
this.isPrimaryReplicaReader(), block.getBlockType());
-377  blockCache.returnBlock(cacheKey, 
block);
-378}
-379  }
-380  /**
-381   * @return the first key in the file. 
May be null if file has no entries. Note
-382   * that this is not the first 
row key, but rather the byte form of the
-383   * first KeyValue.
-384   */
-385  @Override
-386  public OptionalCell 
getFirstKey() {
-387if (dataBlockIndexReader == null) {
-388  throw new 
BlockIndexNotLoadedException();
-389}
-390return dataBlockIndexReader.isEmpty() 
? Optional.empty()
-391: 
Optional.of(dataBlockIndexReader.getRootBlockKey(0));
-392  }
-393
-394  /**
-395   * TODO left from {@link HFile} version 
1: move this to StoreFile after Ryan's
-396   * patch goes in to eliminate {@link 
KeyValue} here.
-397   *
-398   * @return the first row key, or null 
if the file is empty.
-399   */
-400  @Override
-401  public Optionalbyte[] 
getFirstRowKey() {
-402// We have to copy the row part to 
form the row key alone
-403return 
getFirstKey().map(CellUtil::cloneRow);
-404  }
-405
-406  /**
-407   * TODO left from {@link HFile} version 
1: move this to StoreFile after
-408   * Ryan's patch goes in to eliminate 
{@link KeyValue} here.
-409   *
-410   * @return the last row key, or null if 
the file is empty.
-411   */
-412  @Override
-413  public Optionalbyte[] 
getLastRowKey() {
-414// We have to copy the row part to 
form the row key alone
-415return 
getLastKey().map(CellUtil::cloneRow);
-416  }
-417
-418  /** @return number of KV entries in 
this HFile */
-419  @Override
-420  public long getEntries() {
-421return trailer.getEntryCount();
-422  }
-423
-424  /** @return comparator */
-425  @Override
-426  public CellComparator getComparator() 
{
-427return comparator;
-428  }
-429
-430  /** @return compression algorithm */
-431  @Override
-432  public Compression.Algorithm 
getCompressionAlgorithm() {
-433return compressAlgo;
-434  }
-435
-436  /**
-437   * @return the total heap size of data 
and meta block indexes in bytes. Does
-438   * not take into account 
non-root blocks of a multilevel data index.
-439   */
-440  @Override
-441  public long indexSize() {
-442return (dataBlockIndexReader != null 
? dataBlockIndexReader.heapSize() : 0)
-443+ ((metaBlockIndexReader != null) 
? metaBlockIndexReader.heapSize()
-444: 0);
-445  }
-446
-447  @Override
-448  public String getName() {
-449return name;
-450  }
-451
-452  @Override
-453  public HFileBlockIndex.BlockIndexReader 
getDataBlockIndexReader() {
-454return dataBlockIndexReader;
-455  }
-456
-457  @Override
-458  public FixedFileTrailer getTrailer() 
{
-459return trailer;
-460  }
-461
-462  @Override
-463  public boolean isPrimaryReplicaReader() 
{
-464return primaryReplicaReader;
-465  }
-466
-467  @Override
-468  public FileInfo loadFileInfo() throws 
IOException {
-469return fileInfo;
-470  }
-471
-472  /**
-473   * An exception thrown when an 
operation requiring a scanner to be seeked
-474   * is invoked on a scanner that is not 
seeked.
-475   */
-476  @SuppressWarnings("serial")
-477  public static class NotSeekedException 
extends IllegalStateException {
-478public NotSeekedException() {
-479  super("Not seeked to a 
key/value");
-480}
-481  }
-482
-483  protected static class HFileScannerImpl 
implements HFileScanner {
-484private ByteBuff blockBuffer;
-485protected final boolean 
cacheBlocks;
-486protected final boolean pread;
-487protected final boolean 
isCompaction;
-488private int currKeyLen;
-489private int currValueLen;
-490private int currMemstoreTSLen;
-491private long currMemstoreTS;
-492// Updated but never read?
-493protected AtomicInteger blockFetches 
= new AtomicInteger(0);
-494protected final HFile.Reader 
reader;
-495private int currTagsLen;
-496// buffer backed keyonlyKV
-497private 

[06/51] [partial] hbase-site git commit: Published site at 8bf966c8e936dec4d83bcbe85c5aab543f14a0df.

2018-12-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html
index a957d31..62f81b6 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html
@@ -142,5192 +142,5186 @@
 134import org.apache.hadoop.hbase.wal.WAL;
 135import 
org.apache.hadoop.hbase.wal.WALFactory;
 136import 
org.apache.hadoop.hbase.wal.WALSplitter;
-137import 
org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-138import 
org.apache.hadoop.hbase.zookeeper.ZKUtil;
-139import 
org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-140import 
org.apache.hadoop.hbase.zookeeper.ZNodePaths;
-141import 
org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
-142import 
org.apache.hadoop.ipc.RemoteException;
-143import 
org.apache.hadoop.security.UserGroupInformation;
-144import 
org.apache.hadoop.util.ReflectionUtils;
-145import org.apache.hadoop.util.Tool;
-146import 
org.apache.hadoop.util.ToolRunner;
-147import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-148import 
org.apache.hbase.thirdparty.com.google.common.collect.Sets;
-149import 
org.apache.yetus.audience.InterfaceAudience;
-150import 
org.apache.yetus.audience.InterfaceStability;
-151import 
org.apache.zookeeper.KeeperException;
-152import org.slf4j.Logger;
-153import org.slf4j.LoggerFactory;
-154
-155import 
org.apache.hbase.thirdparty.com.google.common.base.Joiner;
-156import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-157import 
org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;
-158import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
-159import 
org.apache.hbase.thirdparty.com.google.common.collect.Multimap;
-160import 
org.apache.hbase.thirdparty.com.google.common.collect.Ordering;
-161import 
org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;
-162
-163import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-164import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;
-165
-166/**
-167 * HBaseFsck (hbck) is a tool for 
checking and repairing region consistency and
-168 * table integrity problems in a 
corrupted HBase. This tool was written for hbase-1.x. It does not
-169 * work with hbase-2.x; it can read state 
but is not allowed to change state; i.e. effect 'repair'.
-170 * See hbck2 (HBASE-19121) for a hbck 
tool for hbase2.
-171 *
-172 * p
-173 * Region consistency checks verify that 
hbase:meta, region deployment on region
-174 * servers and the state of data in HDFS 
(.regioninfo files) all are in
-175 * accordance.
-176 * p
-177 * Table integrity checks verify that all 
possible row keys resolve to exactly
-178 * one region of a table.  This means 
there are no individual degenerate
-179 * or backwards regions; no holes between 
regions; and that there are no
-180 * overlapping regions.
-181 * p
-182 * The general repair strategy works in 
two phases:
-183 * ol
-184 * li Repair Table Integrity on 
HDFS. (merge or fabricate regions)
-185 * li Repair Region Consistency 
with hbase:meta and assignments
-186 * /ol
-187 * p
-188 * For table integrity repairs, the 
tables' region directories are scanned
-189 * for .regioninfo files.  Each table's 
integrity is then verified.  If there
-190 * are any orphan regions (regions with 
no .regioninfo files) or holes, new
-191 * regions are fabricated.  Backwards 
regions are sidelined as well as empty
-192 * degenerate (endkey==startkey) regions. 
 If there are any overlapping regions,
-193 * a new region is created and all data 
is merged into the new region.
-194 * p
-195 * Table integrity repairs deal solely 
with HDFS and could potentially be done
-196 * offline -- the hbase region servers or 
master do not need to be running.
-197 * This phase can eventually be used to 
completely reconstruct the hbase:meta table in
-198 * an offline fashion.
-199 * p
-200 * Region consistency requires three 
conditions -- 1) valid .regioninfo file
-201 * present in an HDFS region dir,  2) 
valid row with .regioninfo data in META,
-202 * and 3) a region is deployed only at 
the regionserver that was assigned to
-203 * with proper state in the master.
-204 * p
-205 * Region consistency repairs require 
hbase to be online so that hbck can
-206 * contact the HBase master and region 
servers.  The hbck#connect() method must
-207 * first be called successfully.  Much of 
the region consistency information
-208 * is transient and less risky to 
repair.
-209 * p
-210 * If 

[06/51] [partial] hbase-site git commit: Published site at 1acbd36c903b048141866b143507bfce124a5c5f.

2018-11-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5299e667/devapidocs/src-html/org/apache/hadoop/hbase/TableName.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/TableName.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/TableName.html
index 10f7ae8..99abe5f 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/TableName.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/TableName.html
@@ -88,462 +88,467 @@
 080  public static final TableName 
META_TABLE_NAME =
 081  
valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "meta");
 082
-083  /** The Namespace table's name. */
-084  public static final TableName 
NAMESPACE_TABLE_NAME =
-085  
valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "namespace");
-086
-087  public static final String OLD_META_STR 
= ".META.";
-088  public static final String OLD_ROOT_STR 
= "-ROOT-";
-089
-090  /** One globally disallowed name */
-091  public static final String 
DISALLOWED_TABLE_NAME = "zookeeper";
-092
-093  /**
-094   * @return True if 
codetn/code is the hbase:meta table name.
-095   */
-096  public static boolean 
isMetaTableName(final TableName tn) {
-097return 
tn.equals(TableName.META_TABLE_NAME);
-098  }
-099
-100  /**
-101   * TableName for old -ROOT- table. It 
is used to read/process old WALs which have
-102   * ROOT edits.
-103   */
-104  public static final TableName 
OLD_ROOT_TABLE_NAME = getADummyTableName(OLD_ROOT_STR);
+083  /**
+084   * The Namespace table's name.
+085   * @deprecated We have folded the data 
in namespace table into meta table, so do not use it any
+086   * more.
+087   */
+088  @Deprecated
+089  public static final TableName 
NAMESPACE_TABLE_NAME =
+090
valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "namespace");
+091
+092  public static final String OLD_META_STR 
= ".META.";
+093  public static final String OLD_ROOT_STR 
= "-ROOT-";
+094
+095  /** One globally disallowed name */
+096  public static final String 
DISALLOWED_TABLE_NAME = "zookeeper";
+097
+098  /**
+099   * @return True if 
codetn/code is the hbase:meta table name.
+100   */
+101  public static boolean 
isMetaTableName(final TableName tn) {
+102return 
tn.equals(TableName.META_TABLE_NAME);
+103  }
+104
 105  /**
-106   * TableName for old .META. table. Used 
in testing.
-107   */
-108  public static final TableName 
OLD_META_TABLE_NAME = getADummyTableName(OLD_META_STR);
-109
-110  private final byte[] name;
-111  private final String nameAsString;
-112  private final byte[] namespace;
-113  private final String 
namespaceAsString;
-114  private final byte[] qualifier;
-115  private final String 
qualifierAsString;
-116  private final boolean systemTable;
-117  private final int hashCode;
-118
-119  /**
-120   * Check passed byte array, 
"tableName", is legal user-space table name.
-121   * @return Returns passed 
codetableName/code param
-122   * @throws IllegalArgumentException if 
passed a tableName is null or
-123   * is made of other than 'word' 
characters or underscores: i.e.
-124   * 
code[\p{IsAlphabetic}\p{Digit}.-:]/code. The ':' is used to 
delimit the namespace
-125   * from the table name and can be used 
for nothing else.
-126   *
-127   * Namespace names can only contain 
'word' characters
-128   * 
code[\p{IsAlphabetic}\p{Digit}]/code or '_'
-129   *
-130   * Qualifier names can only contain 
'word' characters
-131   * 
code[\p{IsAlphabetic}\p{Digit}]/code or '_', '.' or '-'.
-132   * The name may not start with '.' or 
'-'.
-133   *
-134   * Valid fully qualified table names:
-135   * foo:bar, namespace=gt;foo, 
table=gt;bar
-136   * org:foo.bar, namespace=org, 
table=gt;foo.bar
-137   */
-138  public static byte [] 
isLegalFullyQualifiedTableName(final byte[] tableName) {
-139if (tableName == null || 
tableName.length = 0) {
-140  throw new 
IllegalArgumentException("Name is null or empty");
-141}
-142
-143int namespaceDelimIndex =
-144  
org.apache.hbase.thirdparty.com.google.common.primitives.Bytes.lastIndexOf(tableName,
-145(byte) NAMESPACE_DELIM);
-146if (namespaceDelimIndex  0){
-147  
isLegalTableQualifierName(tableName);
-148} else {
-149  isLegalNamespaceName(tableName, 0, 
namespaceDelimIndex);
-150  
isLegalTableQualifierName(tableName, namespaceDelimIndex + 1, 
tableName.length);
-151}
-152return tableName;
-153  }
-154
-155  public static byte [] 
isLegalTableQualifierName(final byte[] qualifierName) {
-156
isLegalTableQualifierName(qualifierName, 0, qualifierName.length, false);
-157return qualifierName;
+106   * TableName for old -ROOT- table. It 
is used to read/process old WALs which have
+107   * ROOT edits.
+108   */
+109  public static final TableName 
OLD_ROOT_TABLE_NAME = getADummyTableName(OLD_ROOT_STR);
+110  /**
+111   * TableName for old .META. table. Used 
in testing.
+112   */
+113  public static final TableName 

[06/51] [partial] hbase-site git commit: Published site at 130057f13774f6b213cdb06952c805a29d59396e.

2018-11-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/68eae623/testdevapidocs/src-html/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.html
index 1222951..6f09c2f 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.html
@@ -51,389 +51,414 @@
 043import org.apache.hadoop.hbase.Tag;
 044import 
org.apache.hadoop.hbase.io.ByteArrayOutputStream;
 045import 
org.apache.hadoop.hbase.io.compress.Compression;
-046import 
org.apache.hadoop.hbase.io.hfile.HFileContext;
-047import 
org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
-048import 
org.apache.hadoop.hbase.nio.SingleByteBuff;
-049import 
org.apache.hadoop.hbase.testclassification.IOTests;
-050import 
org.apache.hadoop.hbase.testclassification.LargeTests;
-051import 
org.apache.hadoop.hbase.util.Bytes;
-052import 
org.apache.hadoop.hbase.util.RedundantKVGenerator;
-053import org.junit.ClassRule;
-054import org.junit.Test;
-055import 
org.junit.experimental.categories.Category;
-056import org.junit.runner.RunWith;
-057import org.junit.runners.Parameterized;
-058import 
org.junit.runners.Parameterized.Parameters;
-059import org.slf4j.Logger;
-060import org.slf4j.LoggerFactory;
-061
-062/**
-063 * Test all of the data block encoding 
algorithms for correctness. Most of the
-064 * class generate data which will test 
different branches in code.
-065 */
-066@Category({IOTests.class, 
LargeTests.class})
-067@RunWith(Parameterized.class)
-068public class TestDataBlockEncoders {
-069
-070  @ClassRule
-071  public static final HBaseClassTestRule 
CLASS_RULE =
-072  
HBaseClassTestRule.forClass(TestDataBlockEncoders.class);
-073
-074  private static final Logger LOG = 
LoggerFactory.getLogger(TestDataBlockEncoders.class);
+046import 
org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
+047import 
org.apache.hadoop.hbase.io.hfile.HFileContext;
+048import 
org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
+049import 
org.apache.hadoop.hbase.nio.SingleByteBuff;
+050import 
org.apache.hadoop.hbase.testclassification.IOTests;
+051import 
org.apache.hadoop.hbase.testclassification.LargeTests;
+052import 
org.apache.hadoop.hbase.util.Bytes;
+053import 
org.apache.hadoop.hbase.util.RedundantKVGenerator;
+054import org.junit.Assert;
+055import org.junit.ClassRule;
+056import org.junit.Test;
+057import 
org.junit.experimental.categories.Category;
+058import org.junit.runner.RunWith;
+059import org.junit.runners.Parameterized;
+060import 
org.junit.runners.Parameterized.Parameters;
+061import org.slf4j.Logger;
+062import org.slf4j.LoggerFactory;
+063
+064/**
+065 * Test all of the data block encoding 
algorithms for correctness. Most of the
+066 * class generate data which will test 
different branches in code.
+067 */
+068@Category({IOTests.class, 
LargeTests.class})
+069@RunWith(Parameterized.class)
+070public class TestDataBlockEncoders {
+071
+072  @ClassRule
+073  public static final HBaseClassTestRule 
CLASS_RULE =
+074  
HBaseClassTestRule.forClass(TestDataBlockEncoders.class);
 075
-076  private static int NUMBER_OF_KV = 
1;
-077  private static int NUM_RANDOM_SEEKS = 
1000;
-078
-079  private static int ENCODED_DATA_OFFSET 
= HConstants.HFILEBLOCK_HEADER_SIZE
-080  + DataBlockEncoding.ID_SIZE;
-081  static final byte[] 
HFILEBLOCK_DUMMY_HEADER = new byte[HConstants.HFILEBLOCK_HEADER_SIZE];
-082
-083  private RedundantKVGenerator generator 
= new RedundantKVGenerator();
-084  private Random randomizer = new 
Random(42L);
-085
-086  private final boolean 
includesMemstoreTS;
-087  private final boolean includesTags;
-088  private final boolean useOffheapData;
-089
-090  @Parameters
-091  public static 
CollectionObject[] parameters() {
-092return 
HBaseTestingUtility.memStoreTSTagsAndOffheapCombination();
-093  }
-094
-095  public TestDataBlockEncoders(boolean 
includesMemstoreTS, boolean includesTag,
-096  boolean useOffheapData) {
-097this.includesMemstoreTS = 
includesMemstoreTS;
-098this.includesTags = includesTag;
-099this.useOffheapData = 
useOffheapData;
-100  }
-101
-102  private HFileBlockEncodingContext 
getEncodingContext(Compression.Algorithm algo,
-103  DataBlockEncoding encoding) {
-104DataBlockEncoder encoder = 
encoding.getEncoder();
-105HFileContext meta = new 
HFileContextBuilder()
-106
.withHBaseCheckSum(false)
-107
.withIncludesMvcc(includesMemstoreTS)
-108
.withIncludesTags(includesTags)
-109
.withCompression(algo).build();
-110if (encoder != null) {
-111  return 

[06/51] [partial] hbase-site git commit: Published site at d5e4faacc354c1bc4d93efa71ca97ee3a056123e.

2018-10-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b5e107c3/testdevapidocs/org/apache/hadoop/hbase/regionserver/package-summary.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/regionserver/package-summary.html 
b/testdevapidocs/org/apache/hadoop/hbase/regionserver/package-summary.html
index 8a925d1..a6c6c1a 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/regionserver/package-summary.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/regionserver/package-summary.html
@@ -1039,452 +1039,464 @@
 
 
 
-TestRegionServerAccounting
+TestRegionServerAbortTimeout
+
+
+
+TestRegionServerAbortTimeout.SleepWhenCloseCoprocessor
+
+
+
+TestRegionServerAbortTimeout.TestAbortTimeoutTask
 
 
 
+TestRegionServerAccounting
+
+
+
 TestRegionServerCrashDisableWAL
 
 Testcase for HBASE-20742
 
 
-
+
 TestRegionServerHostname
 
 Tests for the hostname specification by region server
 
 
-
+
 TestRegionServerMetrics
 
 
-
+
 TestRegionServerNoMaster
 
 Tests on the region server, without the master.
 
 
-
+
 TestRegionServerOnlineConfigChange
 
 Verify that the Online config Changes on the HRegionServer 
side are actually
  happening.
 
 
-
+
 TestRegionServerReadRequestMetrics
 
 
-
+
 TestRegionServerReadRequestMetrics.ScanRegionCoprocessor
 
 
-
+
 TestRegionServerRegionSpaceUseReport
 
 Test class for isolated (non-cluster) tests surrounding the 
report
  of Region space use to the Master by RegionServers.
 
 
-
+
 TestRegionServerReportForDuty
 
 
-
+
 TestRegionServerReportForDuty.LogCapturer
 
 LogCapturer is similar to 
GenericTestUtils.LogCapturer
  except that this implementation has a default appender to the root 
logger.
 
 
-
+
 TestRegionServerReportForDuty.MyRegionServer
 
 
-
+
 TestRegionServerReportForDuty.NeverInitializedMaster
 
 This test HMaster class will always throw 
ServerNotRunningYetException if checked.
 
 
-
+
 TestRegionSplitPolicy
 
 
-
+
 TestRemoveRegionMetrics
 
 
-
+
 TestResettingCounters
 
 
-
+
 TestReversibleScanners
 
 Test cases against ReversibleKeyValueScanner
 
 
-
+
 TestRowPrefixBloomFilter
 
 Test TestRowPrefixBloomFilter
 
 
-
+
 TestRowTooBig
 
 Test case to check HRS throws 
RowTooBigException
  when row size exceeds configured limits.
 
 
-
+
 TestRpcSchedulerFactory
 
 A silly test that does nothing but make sure an 
rpcscheduler factory makes what it says
  it is going to make.
 
 
-
+
 TestRSKilledWhenInitializing
 
 Tests that a regionserver that dies after reporting for 
duty gets removed
  from list of online regions.
 
 
-
+
 TestRSKilledWhenInitializing.RegisterAndDieRegionServer
 
 A RegionServer that reports for duty and then immediately 
dies if it is the first to receive
  the response to a reportForDuty.
 
 
-
+
 TestRSStatusServlet
 
 Tests for the region server status page and its 
template.
 
 
-
+
 TestScanner
 
 Test of a long-lived scanner validating as we go.
 
 
-
+
 TestScannerHeartbeatMessages
 
 Here we test to make sure that scans return the expected 
Results when the server is sending the
  Client heartbeat messages.
 
 
-
+
 TestScannerHeartbeatMessages.HeartbeatHRegion
 
 Custom HRegion class that instantiates 
RegionScanners with configurable sleep times
  between fetches of row Results and/or column family cells.
 
 
-
+
 TestScannerHeartbeatMessages.HeartbeatHRegionServer
 
 Custom HRegionServer instance that instantiates TestScannerHeartbeatMessages.HeartbeatRPCServices
 in place of
  RSRpcServices to allow us to toggle support for heartbeat 
messages
 
 
-
+
 TestScannerHeartbeatMessages.HeartbeatKVHeap
 
 Custom KV Heap that can be configured to sleep/wait in 
between retrievals of column family
  cells.
 
 
-
+
 TestScannerHeartbeatMessages.HeartbeatRegionScanner
 
 Custom RegionScanner that can be configured to sleep 
between retrievals of row Results and/or
  column family cells
 
 
-
+
 TestScannerHeartbeatMessages.HeartbeatReversedKVHeap
 
 Custom reversed KV Heap that can be configured to sleep in 
between retrievals of column family
  cells.
 
 
-
+
 TestScannerHeartbeatMessages.HeartbeatReversedRegionScanner
 
 Custom ReversedRegionScanner that can be configured to 
sleep between retrievals of row Results
  and/or column family cells
 
 
-
+
 TestScannerHeartbeatMessages.HeartbeatRPCServices
 
 Custom RSRpcServices instance that allows heartbeat support 
to be toggled
 
 
-
+
 TestScannerHeartbeatMessages.SparseCellFilter
 
 
-
+
 TestScannerHeartbeatMessages.SparseRowFilter
 
 
-
+
 TestScannerRetriableFailure
 
 
-
+
 TestScannerRetriableFailure.FaultyScannerObserver
 
 
-
+
 TestScannerWithBulkload
 
 
-
+
 TestScannerWithCorruptHFile
 
 Tests a scanner on a corrupt hfile.
 
 
-
+
 TestScannerWithCorruptHFile.CorruptHFileCoprocessor
 
 
-
+
 TestScanWithBloomError
 
 Test a multi-column scanner when there is a Bloom filter 
false-positive.
 
 
-
+
 TestSCVFWithMiniCluster
 
 
-
+
 TestSecureBulkLoadManager
 
 
-
+
 TestSeekOptimizations
 
 Test 

[06/51] [partial] hbase-site git commit: Published site at 3fe8649b2c9ba1271c25e8f476548907e4c7a90d.

2018-10-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8f09a71d/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.PushType.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.PushType.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.PushType.html
index 9b964f6..98ef11a 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.PushType.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.PushType.html
@@ -105,7 +105,7 @@
 097 * will first be initialized to the 
oldest file's tracker(which is stored in the trailer), using the
 098 * method {@link 
ProcedureStoreTracker#resetTo(ProcedureStoreTracker, boolean)}, and then merge 
it
 099 * with the tracker of every newer wal 
files, using the
-100 * {@link 
ProcedureStoreTracker#setDeletedIfModifiedInBoth(ProcedureStoreTracker, 
boolean)}.
+100 * {@link 
ProcedureStoreTracker#setDeletedIfModifiedInBoth(ProcedureStoreTracker)}.
 101 * If we find out
 102 * that all the modified procedures for 
the oldest wal file are modified or deleted in newer wal
 103 * files, then we can delete it. This is 
because that, every time we call
@@ -1181,244 +1181,243 @@
 1173}
 1174
 1175// compute the holding tracker.
-1176//  - the first WAL is used for the 
'updates'
-1177//  - the global tracker is passed 
in first to decide which procedures are not
-1178//exist anymore, so we can mark 
them as deleted in holdingCleanupTracker.
-1179//Only global tracker have the 
whole picture here.
-1180//  - the other WALs are scanned to 
remove procs already updated in a newer wal.
-1181//If it is updated in a newer 
wal, we can mark it as delelted in holdingCleanupTracker
-1182//But, we can not delete it if 
it was shown deleted in the newer wal, as said
-1183//above.
-1184// TODO: exit early if 
holdingCleanupTracker.isEmpty()
-1185
holdingCleanupTracker.resetTo(logs.getFirst().getTracker(), true);
-1186//Passing in the global tracker, we 
can delete the procedures not in the global
-1187//tracker, because they are deleted 
in the later logs
-1188
holdingCleanupTracker.setDeletedIfModifiedInBoth(storeTracker, true);
-1189for (int i = 1, size = logs.size() - 
1; i  size; ++i) {
-1190  // Set deleteIfNotExists to false 
since a single log's tracker is passed in.
-1191  // Since a specific procedure may 
not show up in the log at all(not executed or
-1192  // updated during the time), we 
can not delete the procedure just because this log
-1193  // don't have the info of the 
procedure. We can delete the procedure only if
-1194  // in this log's tracker, it was 
cleanly showed that the procedure is modified or deleted
-1195  // in the corresponding 
BitSetNode.
-1196  
holdingCleanupTracker.setDeletedIfModifiedInBoth(logs.get(i).getTracker(), 
false);
-1197}
-1198  }
-1199
-1200  /**
-1201   * Remove all logs with logId = 
{@code lastLogId}.
-1202   */
-1203  private void removeAllLogs(long 
lastLogId, String why) {
-1204if (logs.size() = 1) {
-1205  return;
-1206}
-1207
-1208LOG.info("Remove all state logs with 
ID less than {}, since {}", lastLogId, why);
-1209
-1210boolean removed = false;
-1211while (logs.size()  1) {
-1212  ProcedureWALFile log = 
logs.getFirst();
-1213  if (lastLogId  log.getLogId()) 
{
-1214break;
-1215  }
-1216  removeLogFile(log, 
walArchiveDir);
-1217  removed = true;
-1218}
-1219
-1220if (removed) {
-1221  buildHoldingCleanupTracker();
-1222}
-1223  }
-1224
-1225  private boolean removeLogFile(final 
ProcedureWALFile log, final Path walArchiveDir) {
-1226try {
-1227  LOG.trace("Removing log={}", 
log);
-1228  log.removeFile(walArchiveDir);
-1229  logs.remove(log);
-1230  LOG.debug("Removed log={}, 
activeLogs={}", log, logs);
-1231  assert logs.size()  0 : 
"expected at least one log";
-1232} catch (IOException e) {
-1233  LOG.error("Unable to remove log: " 
+ log, e);
-1234  return false;
-1235}
-1236return true;
-1237  }
-1238
-1239  // 
==
-1240  //  FileSystem Log Files helpers
-1241  // 
==
-1242  public Path getWALDir() {
-1243return this.walDir;
-1244  }
-1245
-1246  @VisibleForTesting
-1247  Path getWalArchiveDir() {
-1248return this.walArchiveDir;
-1249  }
-1250
-1251  public FileSystem getFileSystem() {
-1252return this.fs;
-1253  }
-1254
-1255  protected Path getLogFilePath(final 
long logId) throws IOException {
-1256return new Path(walDir, 
String.format(LOG_PREFIX + "%020d.log", logId));
-1257  

[06/51] [partial] hbase-site git commit: Published site at 7adf590106826b9e4432cfeee06acdc0ccff8c6e.

2018-10-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/425db230/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestHRegion.HRegionForTesting.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestHRegion.HRegionForTesting.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestHRegion.HRegionForTesting.html
index ed3db7a..156dabb 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestHRegion.HRegionForTesting.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestHRegion.HRegionForTesting.html
@@ -5542,785 +5542,825 @@
 5534  }
 5535
 5536  @Test
-5537  public void testWriteRequestsCounter() 
throws IOException {
-5538byte[] fam = 
Bytes.toBytes("info");
-5539byte[][] families = { fam };
-5540this.region = initHRegion(tableName, 
method, CONF, families);
+5537  public void 
testReverseScanWhenPutCellsAfterOpenReverseScan() throws Exception {
+5538byte[] cf1 = Bytes.toBytes("CF1");
+5539byte[][] families = { cf1 };
+5540byte[] col = Bytes.toBytes("C");
 5541
-5542Assert.assertEquals(0L, 
region.getWriteRequestsCount());
-5543
-5544Put put = new Put(row);
-5545put.addColumn(fam, fam, fam);
-5546
-5547Assert.assertEquals(0L, 
region.getWriteRequestsCount());
-5548region.put(put);
-5549Assert.assertEquals(1L, 
region.getWriteRequestsCount());
-5550region.put(put);
-5551Assert.assertEquals(2L, 
region.getWriteRequestsCount());
-5552region.put(put);
-5553Assert.assertEquals(3L, 
region.getWriteRequestsCount());
-5554
-region.delete(new Delete(row));
-5556Assert.assertEquals(4L, 
region.getWriteRequestsCount());
-5557  }
-5558
-5559  @Test
-5560  public void 
testOpenRegionWrittenToWAL() throws Exception {
-5561final ServerName serverName = 
ServerName.valueOf(name.getMethodName(), 100, 42);
-5562final RegionServerServices rss = 
spy(TEST_UTIL.createMockRegionServerService(serverName));
-5563
-5564HTableDescriptor htd = new 
HTableDescriptor(TableName.valueOf(name.getMethodName()));
-5565htd.addFamily(new 
HColumnDescriptor(fam1));
-5566htd.addFamily(new 
HColumnDescriptor(fam2));
-5567
-5568HRegionInfo hri = new 
HRegionInfo(htd.getTableName(),
-5569  HConstants.EMPTY_BYTE_ARRAY, 
HConstants.EMPTY_BYTE_ARRAY);
-5570
-5571// open the region w/o rss and wal 
and flush some files
-5572region =
-5573 
HBaseTestingUtility.createRegionAndWAL(hri, TEST_UTIL.getDataTestDir(), 
TEST_UTIL
-5574 .getConfiguration(), 
htd);
-5575assertNotNull(region);
-5576
-5577// create a file in fam1 for the 
region before opening in OpenRegionHandler
-5578region.put(new 
Put(Bytes.toBytes("a")).addColumn(fam1, fam1, fam1));
-5579region.flush(true);
-5580
HBaseTestingUtility.closeRegionAndWAL(region);
+5542HBaseConfiguration conf = new 
HBaseConfiguration();
+5543this.region = initHRegion(tableName, 
method, conf, families);
+5544
+5545Put put = new 
Put(Bytes.toBytes("16"));
+5546put.addColumn(cf1, col, 
Bytes.toBytes("val"));
+5547region.put(put);
+5548Put put2 = new 
Put(Bytes.toBytes("15"));
+5549put2.addColumn(cf1, col, 
Bytes.toBytes("val"));
+5550region.put(put2);
+5551
+5552// Create a reverse scan
+5553Scan scan = new 
Scan(Bytes.toBytes("16"));
+5554scan.setReversed(true);
+RegionScannerImpl scanner = 
region.getScanner(scan);
+5556
+5557// Put a lot of cells that have 
sequenceIDs grater than the readPt of the reverse scan
+5558for (int i = 10; i  20; 
i++) {
+5559  Put p = new Put(Bytes.toBytes("" + 
i));
+5560  p.addColumn(cf1, col, 
Bytes.toBytes("" + i));
+5561  region.put(p);
+5562}
+5563ListCell currRow = new 
ArrayList();
+5564boolean hasNext;
+5565do {
+5566  hasNext = scanner.next(currRow);
+5567} while (hasNext);
+5568
+5569assertEquals(2, currRow.size());
+5570assertEquals("16", 
Bytes.toString(currRow.get(0).getRowArray(),
+5571  currRow.get(0).getRowOffset(), 
currRow.get(0).getRowLength()));
+5572assertEquals("15", 
Bytes.toString(currRow.get(1).getRowArray(),
+5573  currRow.get(1).getRowOffset(), 
currRow.get(1).getRowLength()));
+5574  }
+5575
+5576  @Test
+5577  public void testWriteRequestsCounter() 
throws IOException {
+5578byte[] fam = 
Bytes.toBytes("info");
+5579byte[][] families = { fam };
+5580this.region = initHRegion(tableName, 
method, CONF, families);
 5581
-5582ArgumentCaptorWALEdit 
editCaptor = ArgumentCaptor.forClass(WALEdit.class);
+5582Assert.assertEquals(0L, 
region.getWriteRequestsCount());
 5583
-5584// capture append() calls
-5585WAL wal = mockWAL();
-5586when(rss.getWAL((HRegionInfo) 
any())).thenReturn(wal);
-5587
-5588region = HRegion.openHRegion(hri, 
htd, rss.getWAL(hri),
-5589  

[06/51] [partial] hbase-site git commit: Published site at 5fbb227deb365fe812d433fe39b85ac4b0ddee20.

2018-10-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9ebe686/devapidocs/org/apache/hadoop/hbase/util/Bytes.Converter.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/util/Bytes.Converter.html 
b/devapidocs/org/apache/hadoop/hbase/util/Bytes.Converter.html
new file mode 100644
index 000..a8a22f1
--- /dev/null
+++ b/devapidocs/org/apache/hadoop/hbase/util/Bytes.Converter.html
@@ -0,0 +1,369 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+Bytes.Converter (Apache HBase 3.0.0-SNAPSHOT API)
+
+
+
+
+
+var methods = {"i0":6,"i1":6,"i2":6,"i3":6,"i4":6,"i5":6};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],4:["t3","Abstract Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase.util
+Class Bytes.Converter
+
+
+
+https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.util.Bytes.Converter
+
+
+
+
+
+
+
+Direct Known Subclasses:
+Bytes.ConverterHolder.PureJavaConverter, Bytes.ConverterHolder.UnsafeConverter
+
+
+Enclosing class:
+Bytes
+
+
+
+abstract static class Bytes.Converter
+extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+
+
+
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors
+
+Constructor and Description
+
+
+Converter()
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All MethodsInstance MethodsAbstract Methods
+
+Modifier and Type
+Method and Description
+
+
+(package private) abstract int
+putInt(byte[]bytes,
+  intoffset,
+  intval)
+
+
+(package private) abstract int
+putLong(byte[]bytes,
+   intoffset,
+   longval)
+
+
+(package private) abstract int
+putShort(byte[]bytes,
+intoffset,
+shortval)
+
+
+(package private) abstract int
+toInt(byte[]bytes,
+ intoffset,
+ intlength)
+
+
+(package private) abstract long
+toLong(byte[]bytes,
+  intoffset,
+  intlength)
+
+
+(package private) abstract short
+toShort(byte[]bytes,
+   intoffset,
+   intlength)
+
+
+
+
+
+
+Methods inherited from classjava.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--;
 title="class or interface in java.lang">clone, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-;
 title="class or interface in java.lang">equals, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--;
 title="class or interface in java.lang">finalize, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--;
 title="class or interface in java.lang">getClass, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--;
 title="class or interface in java.lang">hashCode, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notify--;
 title="class or interface in java.lang">notify, https://docs.oracle.com/javase/8/docs/api/ja
 va/lang/Object.html?is-external=true#notifyAll--" title="class or interface in 
java.lang">notifyAll, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#toString--;
 title="class or interface in java.lang">toString, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait--;
 title="class or interface in java.lang">wait, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-;
 title="class or interface in java.lang">wait, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-int-;
 title="class or interface in java.lang">wait
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Constructor Detail
+
+
+
+
+
+Converter
+Converter()
+
+
+
+
+
+
+
+
+
+Method Detail
+
+
+
+
+
+toLong
+abstractlongtoLong(byte[]bytes,
+ intoffset,
+ 

[06/51] [partial] hbase-site git commit: Published site at 821e4d7de2d576189f4288d1c2acf9e9a9471f5c.

2018-10-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/323b17d9/testdevapidocs/src-html/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.StepHook.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.StepHook.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.StepHook.html
index 8c7d5c9..0c87f49 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.StepHook.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.StepHook.html
@@ -387,7 +387,7 @@
 379   */
 380  public static void 
testRecoveryAndDoubleExecution(
 381  final 
ProcedureExecutorMasterProcedureEnv procExec, final long procId,
-382  final int numSteps, final boolean 
expectExecRunning) throws Exception {
+382  final int lastStep, final boolean 
expectExecRunning) throws Exception {
 383
ProcedureTestingUtility.waitProcedure(procExec, procId);
 384assertEquals(false, 
procExec.isRunning());
 385
@@ -405,201 +405,204 @@
 397// fix would be get all visited 
states by the procedure and then check if user speccified
 398// state is in that list. Current 
assumption of sequential proregression of steps/ states is
 399// made at multiple places so we can 
keep while condition below for simplicity.
-400Procedure proc = 
procExec.getProcedure(procId);
+400Procedure? proc = 
procExec.getProcedure(procId);
 401int stepNum = proc instanceof 
StateMachineProcedure ?
 402((StateMachineProcedure) 
proc).getCurrentStateId() : 0;
-403while (stepNum  numSteps) {
-404  LOG.info("Restart " + stepNum + " 
exec state=" + proc);
-405  
ProcedureTestingUtility.assertProcNotYetCompleted(procExec, procId);
-406  
restartMasterProcedureExecutor(procExec);
-407  
ProcedureTestingUtility.waitProcedure(procExec, procId);
-408  // Old proc object is stale, need 
to get the new one after ProcedureExecutor restart
-409  proc = 
procExec.getProcedure(procId);
-410  stepNum = proc instanceof 
StateMachineProcedure ?
-411  ((StateMachineProcedure) 
proc).getCurrentStateId() : stepNum + 1;
-412}
-413
-414assertEquals(expectExecRunning, 
procExec.isRunning());
-415  }
+403for (;;) {
+404  if (stepNum == lastStep) {
+405break;
+406  }
+407  LOG.info("Restart " + stepNum + " 
exec state=" + proc);
+408  
ProcedureTestingUtility.assertProcNotYetCompleted(procExec, procId);
+409  
restartMasterProcedureExecutor(procExec);
+410  
ProcedureTestingUtility.waitProcedure(procExec, procId);
+411  // Old proc object is stale, need 
to get the new one after ProcedureExecutor restart
+412  proc = 
procExec.getProcedure(procId);
+413  stepNum = proc instanceof 
StateMachineProcedure ?
+414  ((StateMachineProcedure) 
proc).getCurrentStateId() : stepNum + 1;
+415}
 416
-417  /**
-418   * Run through all procedure flow 
states TWICE while also restarting
-419   * procedure executor at each step; i.e 
force a reread of procedure store.
-420   *
-421   *pIt does
-422   * olliExecute step N - 
kill the executor before store update
-423   * liRestart executor/store
-424   * liExecutes hook for each 
step twice
-425   * liExecute step N - and then 
save to store
-426   * /ol
-427   *
-428   *pThis is a good test for 
finding state that needs persisting and steps that are not
-429   * idempotent. Use this version of the 
test when the order in which flow steps are executed is
-430   * not start to finish; where the 
procedure may vary the flow steps dependent on circumstance
-431   * found.
-432   * @see 
#testRecoveryAndDoubleExecution(ProcedureExecutor, long, int, boolean)
-433   */
-434  public static void 
testRecoveryAndDoubleExecution(
-435  final 
ProcedureExecutorMasterProcedureEnv procExec, final long procId, final 
StepHook hook)
-436  throws Exception {
-437
ProcedureTestingUtility.waitProcedure(procExec, procId);
-438assertEquals(false, 
procExec.isRunning());
-439for (int i = 0; 
!procExec.isFinished(procId); ++i) {
-440  LOG.info("Restart " + i + " exec 
state=" + procExec.getProcedure(procId));
-441  if (hook != null) {
-442assertTrue(hook.execute(i));
-443  }
-444  
restartMasterProcedureExecutor(procExec);
-445  
ProcedureTestingUtility.waitProcedure(procExec, procId);
-446}
-447assertEquals(true, 
procExec.isRunning());
-448
ProcedureTestingUtility.assertProcNotFailed(procExec, procId);
-449  }
-450
-451  public static void 
testRecoveryAndDoubleExecution(
-452  final 
ProcedureExecutorMasterProcedureEnv procExec, final long procId) throws 
Exception {
-453
testRecoveryAndDoubleExecution(procExec, procId, null);
-454  }
-455
-456  /**
-457   * Hook which 

[06/51] [partial] hbase-site git commit: Published site at fa5fa6ecdd071b72b58971058ff3ab9d28c3e709.

2018-10-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d1341859/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALFactory.Providers.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALFactory.Providers.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALFactory.Providers.html
index d69bb8c..92967f2 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALFactory.Providers.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALFactory.Providers.html
@@ -88,428 +88,404 @@
 080
 081  public static final String WAL_PROVIDER 
= "hbase.wal.provider";
 082  static final String 
DEFAULT_WAL_PROVIDER = Providers.defaultProvider.name();
-083  public static final String 
WAL_PROVIDER_CLASS = "hbase.wal.provider.class";
-084  static final Class? extends 
WALProvider DEFAULT_WAL_PROVIDER_CLASS = AsyncFSWALProvider.class;
+083
+084  public static final String 
META_WAL_PROVIDER = "hbase.wal.meta_provider";
 085
-086  public static final String 
META_WAL_PROVIDER = "hbase.wal.meta_provider";
-087  public static final String 
META_WAL_PROVIDER_CLASS = "hbase.wal.meta_provider.class";
-088
-089  final String factoryId;
-090  private final WALProvider provider;
-091  // The meta updates are written to a 
different wal. If this
-092  // regionserver holds meta regions, 
then this ref will be non-null.
-093  // lazily intialized; most 
RegionServers don't deal with META
-094  private final 
AtomicReferenceWALProvider metaProvider = new 
AtomicReference();
-095
-096  /**
-097   * Configuration-specified WAL Reader 
used when a custom reader is requested
-098   */
-099  private final Class? extends 
AbstractFSWALProvider.Reader logReaderClass;
-100
-101  /**
-102   * How long to attempt opening 
in-recovery wals
-103   */
-104  private final int timeoutMillis;
-105
-106  private final Configuration conf;
-107
-108  // Used for the singleton WALFactory, 
see below.
-109  private WALFactory(Configuration conf) 
{
-110// this code is duplicated here so we 
can keep our members final.
-111// until we've moved reader/writer 
construction down into providers, this initialization must
-112// happen prior to provider 
initialization, in case they need to instantiate a reader/writer.
-113timeoutMillis = 
conf.getInt("hbase.hlog.open.timeout", 30);
-114/* TODO Both of these are probably 
specific to the fs wal provider */
-115logReaderClass = 
conf.getClass("hbase.regionserver.hlog.reader.impl", ProtobufLogReader.class,
-116  
AbstractFSWALProvider.Reader.class);
-117this.conf = conf;
-118// end required early 
initialization
-119
-120// this instance can't create wals, 
just reader/writers.
-121provider = null;
-122factoryId = SINGLETON_ID;
-123  }
-124
-125  @VisibleForTesting
-126  Providers getDefaultProvider() {
-127return Providers.defaultProvider;
-128  }
-129
-130  @VisibleForTesting
-131  /*
-132   * @param clsKey config key for 
provider classname
-133   * @param key config key for provider 
enum
-134   * @param defaultValue default value 
for provider enum
-135   * @return Class which extends 
WALProvider
-136   */
-137  public Class? extends 
WALProvider getProviderClass(String clsKey, String key,
-138  String defaultValue) {
-139String clsName = conf.get(clsKey);
-140if (clsName == null || 
clsName.isEmpty()) {
-141  clsName = conf.get(key, 
defaultValue);
-142}
-143if (clsName != null  
!clsName.isEmpty()) {
-144  try {
-145return (Class? extends 
WALProvider) Class.forName(clsName);
-146  } catch (ClassNotFoundException 
exception) {
-147// try with enum key next
-148  }
-149}
-150try {
-151  Providers provider = 
Providers.valueOf(conf.get(key, defaultValue));
-152
-153  // AsyncFSWALProvider is not 
guaranteed to work on all Hadoop versions, when it's chosen as
-154  // the default and we can't use it, 
we want to fall back to FSHLog which we know works on
-155  // all versions.
-156  if (provider == 
getDefaultProvider()  provider.clazz == AsyncFSWALProvider.class
-157   
!AsyncFSWALProvider.load()) {
-158// AsyncFSWAL has better 
performance in most cases, and also uses less resources, we will
-159// try to use it if possible. It 
deeply hacks into the internal of DFSClient so will be
-160// easily broken when upgrading 
hadoop.
-161LOG.warn("Failed to load 
AsyncFSWALProvider, falling back to FSHLogProvider");
-162return FSHLogProvider.class;
-163  }
-164
-165  // N.b. If the user specifically 
requested AsyncFSWALProvider but their environment doesn't
-166  // support using it (e.g. 
AsyncFSWALProvider.load() == false), we should let this fail and
-167  // not fall back to 
FSHLogProvider.
-168  return provider.clazz;
-169} catch (IllegalArgumentException 
exception) {
-170  // Fall back to them 

[06/51] [partial] hbase-site git commit: Published site at 6bc7089f9e0793efc9bdd46a84f5ccd9bc4579ad.

2018-09-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/419d0338/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.TestMultiStepProcedure.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.TestMultiStepProcedure.html
 
b/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.TestMultiStepProcedure.html
index f2124e0..bfe7950 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.TestMultiStepProcedure.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.TestMultiStepProcedure.html
@@ -238,7 +238,7 @@ extends 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/419d0338/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.TestSingleStepProcedure.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.TestSingleStepProcedure.html
 
b/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.TestSingleStepProcedure.html
index 22c5028..2339e3b 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.TestSingleStepProcedure.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.TestSingleStepProcedure.html
@@ -230,7 +230,7 @@ extends 
org.apache.hadoop.hbase.procedure2.SequentialProcedure
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/419d0338/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.TestStateMachineProcedure.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.TestStateMachineProcedure.html
 
b/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.TestStateMachineProcedure.html
index 434ca30..eb593ae 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.TestStateMachineProcedure.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.TestStateMachineProcedure.html
@@ -288,7 +288,7 @@ extends 
org.apache.hadoop.hbase.procedure2.StateMachineProcedure
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/419d0338/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureReplayOrder.TestProcedure.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureReplayOrder.TestProcedure.html
 
b/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureReplayOrder.TestProcedure.html
index 8d0d9a2..bbcfdca 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureReplayOrder.TestProcedure.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureReplayOrder.TestProcedure.html
@@ -234,7 +234,7 @@ extends org.apache.hadoop.hbase.procedure2.Procedure
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/419d0338/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureReplayOrder.TestSingleStepProcedure.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureReplayOrder.TestSingleStepProcedure.html
 
b/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureReplayOrder.TestSingleStepProcedure.html
index bb28784..36c883e 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureReplayOrder.TestSingleStepProcedure.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureReplayOrder.TestSingleStepProcedure.html
@@ -222,7 +222,7 @@ extends 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/419d0338/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureReplayOrder.TestTwoStepProcedure.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureReplayOrder.TestTwoStepProcedure.html
 
b/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureReplayOrder.TestTwoStepProcedure.html
index 20b22e0..d9e4839 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureReplayOrder.TestTwoStepProcedure.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureReplayOrder.TestTwoStepProcedure.html
@@ -222,7 +222,7 @@ extends 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/419d0338/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureSchedulerConcurrency.TestProcedureWithEvent.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureSchedulerConcurrency.TestProcedureWithEvent.html
 

[06/51] [partial] hbase-site git commit: Published site at d7e08317d2f214e4cca7b67578aba0ed7a567d54.

2018-09-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/37cf49a6/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatchOperation.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatchOperation.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatchOperation.html
index 566f410..da040ad 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatchOperation.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatchOperation.html
@@ -341,8361 +341,8425 @@
 333  private final int 
rowLockWaitDuration;
 334  static final int 
DEFAULT_ROWLOCK_WAIT_DURATION = 3;
 335
-336  // The internal wait duration to 
acquire a lock before read/update
-337  // from the region. It is not per row. 
The purpose of this wait time
-338  // is to avoid waiting a long time 
while the region is busy, so that
-339  // we can release the IPC handler soon 
enough to improve the
-340  // availability of the region server. 
It can be adjusted by
-341  // tuning configuration 
"hbase.busy.wait.duration".
-342  final long busyWaitDuration;
-343  static final long 
DEFAULT_BUSY_WAIT_DURATION = HConstants.DEFAULT_HBASE_RPC_TIMEOUT;
-344
-345  // If updating multiple rows in one 
call, wait longer,
-346  // i.e. waiting for busyWaitDuration * 
# of rows. However,
-347  // we can limit the max multiplier.
-348  final int maxBusyWaitMultiplier;
-349
-350  // Max busy wait duration. There is no 
point to wait longer than the RPC
-351  // purge timeout, when a RPC call will 
be terminated by the RPC engine.
-352  final long maxBusyWaitDuration;
-353
-354  // Max cell size. If nonzero, the 
maximum allowed size for any given cell
-355  // in bytes
-356  final long maxCellSize;
-357
-358  // Number of mutations for minibatch 
processing.
-359  private final int miniBatchSize;
+336  private Path regionDir;
+337  private FileSystem walFS;
+338
+339  // The internal wait duration to 
acquire a lock before read/update
+340  // from the region. It is not per row. 
The purpose of this wait time
+341  // is to avoid waiting a long time 
while the region is busy, so that
+342  // we can release the IPC handler soon 
enough to improve the
+343  // availability of the region server. 
It can be adjusted by
+344  // tuning configuration 
"hbase.busy.wait.duration".
+345  final long busyWaitDuration;
+346  static final long 
DEFAULT_BUSY_WAIT_DURATION = HConstants.DEFAULT_HBASE_RPC_TIMEOUT;
+347
+348  // If updating multiple rows in one 
call, wait longer,
+349  // i.e. waiting for busyWaitDuration * 
# of rows. However,
+350  // we can limit the max multiplier.
+351  final int maxBusyWaitMultiplier;
+352
+353  // Max busy wait duration. There is no 
point to wait longer than the RPC
+354  // purge timeout, when a RPC call will 
be terminated by the RPC engine.
+355  final long maxBusyWaitDuration;
+356
+357  // Max cell size. If nonzero, the 
maximum allowed size for any given cell
+358  // in bytes
+359  final long maxCellSize;
 360
-361  // negative number indicates infinite 
timeout
-362  static final long 
DEFAULT_ROW_PROCESSOR_TIMEOUT = 60 * 1000L;
-363  final ExecutorService 
rowProcessorExecutor = Executors.newCachedThreadPool();
-364
-365  private final 
ConcurrentHashMapRegionScanner, Long scannerReadPoints;
-366
-367  /**
-368   * The sequence ID that was 
enLongAddered when this region was opened.
-369   */
-370  private long openSeqNum = 
HConstants.NO_SEQNUM;
-371
-372  /**
-373   * The default setting for whether to 
enable on-demand CF loading for
-374   * scan requests to this region. 
Requests can override it.
-375   */
-376  private boolean 
isLoadingCfsOnDemandDefault = false;
-377
-378  private final AtomicInteger 
majorInProgress = new AtomicInteger(0);
-379  private final AtomicInteger 
minorInProgress = new AtomicInteger(0);
+361  // Number of mutations for minibatch 
processing.
+362  private final int miniBatchSize;
+363
+364  // negative number indicates infinite 
timeout
+365  static final long 
DEFAULT_ROW_PROCESSOR_TIMEOUT = 60 * 1000L;
+366  final ExecutorService 
rowProcessorExecutor = Executors.newCachedThreadPool();
+367
+368  private final 
ConcurrentHashMapRegionScanner, Long scannerReadPoints;
+369
+370  /**
+371   * The sequence ID that was 
enLongAddered when this region was opened.
+372   */
+373  private long openSeqNum = 
HConstants.NO_SEQNUM;
+374
+375  /**
+376   * The default setting for whether to 
enable on-demand CF loading for
+377   * scan requests to this region. 
Requests can override it.
+378   */
+379  private boolean 
isLoadingCfsOnDemandDefault = false;
 380
-381  //
-382  // Context: During replay we want to 
ensure that we do not lose any data. So, we
-383  // have to be conservative in how we 
replay wals. For each store, we calculate
-384  // the maxSeqId up to which the 

[06/51] [partial] hbase-site git commit: Published site at 8eaaa63114a64bcaeaf0ed9bdd88615ee22255c1.

2018-09-25 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f6f9d4f3/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RegionServerServices.RegionStateTransitionContext.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RegionServerServices.RegionStateTransitionContext.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RegionServerServices.RegionStateTransitionContext.html
index acc491f..e6c6561 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RegionServerServices.RegionStateTransitionContext.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RegionServerServices.RegionStateTransitionContext.html
@@ -26,256 +26,255 @@
 018 */
 019package 
org.apache.hadoop.hbase.regionserver;
 020
-021import java.io.IOException;
-022import java.util.Collection;
-023import java.util.List;
-024import java.util.Map.Entry;
-025import 
java.util.concurrent.ConcurrentMap;
-026
+021import com.google.protobuf.Service;
+022import java.io.IOException;
+023import java.util.Collection;
+024import java.util.List;
+025import java.util.Map.Entry;
+026import 
java.util.concurrent.ConcurrentMap;
 027import 
org.apache.hadoop.hbase.Abortable;
 028import org.apache.hadoop.hbase.Server;
-029import 
org.apache.hadoop.hbase.TableName;
-030import 
org.apache.hadoop.hbase.client.RegionInfo;
-031import 
org.apache.hadoop.hbase.client.locking.EntityLock;
-032import 
org.apache.hadoop.hbase.executor.ExecutorService;
-033import 
org.apache.hadoop.hbase.ipc.RpcServerInterface;
-034import 
org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager;
-035import 
org.apache.hadoop.hbase.quotas.RegionServerSpaceQuotaManager;
-036import 
org.apache.hadoop.hbase.quotas.RegionSizeStore;
-037import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionRequester;
-038import 
org.apache.hadoop.hbase.regionserver.throttle.ThroughputController;
-039import org.apache.hadoop.hbase.wal.WAL;
-040import 
org.apache.yetus.audience.InterfaceAudience;
-041import 
org.apache.zookeeper.KeeperException;
+029import 
org.apache.hadoop.hbase.TableDescriptors;
+030import 
org.apache.hadoop.hbase.TableName;
+031import 
org.apache.hadoop.hbase.client.RegionInfo;
+032import 
org.apache.hadoop.hbase.client.locking.EntityLock;
+033import 
org.apache.hadoop.hbase.executor.ExecutorService;
+034import 
org.apache.hadoop.hbase.ipc.RpcServerInterface;
+035import 
org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager;
+036import 
org.apache.hadoop.hbase.quotas.RegionServerSpaceQuotaManager;
+037import 
org.apache.hadoop.hbase.quotas.RegionSizeStore;
+038import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionRequester;
+039import 
org.apache.hadoop.hbase.regionserver.throttle.ThroughputController;
+040import org.apache.hadoop.hbase.wal.WAL;
+041import 
org.apache.yetus.audience.InterfaceAudience;
 042
 043import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
 044
-045import com.google.protobuf.Service;
-046
-047/**
-048 * A curated subset of services provided 
by {@link HRegionServer}.
-049 * For use internally only. Passed to 
Managers, Services and Chores so can pass less-than-a
-050 * full-on HRegionServer at test-time. Be 
judicious adding API. Changes cause ripples through
-051 * the code base.
-052 */
-053@InterfaceAudience.Private
-054public interface RegionServerServices 
extends Server, MutableOnlineRegions, FavoredNodesForRegion {
-055
-056  /** @return the WAL for a particular 
region. Pass null for getting the
-057   * default (common) WAL */
-058  WAL getWAL(RegionInfo regionInfo) 
throws IOException;
-059
-060  /** @return the List of WALs that are 
used by this server
-061   *  Doesn't include the meta WAL
-062   */
-063  ListWAL getWALs() throws 
IOException;
-064
-065  /**
-066   * @return Implementation of {@link 
FlushRequester} or null. Usually it will not be null unless
-067   * during intialization.
-068   */
-069  FlushRequester getFlushRequester();
-070
-071  /**
-072   * @return Implementation of {@link 
CompactionRequester} or null. Usually it will not be null
-073   * unless during 
intialization.
-074   */
-075  CompactionRequester 
getCompactionRequestor();
-076
-077  /**
-078   * @return the RegionServerAccounting 
for this Region Server
-079   */
-080  RegionServerAccounting 
getRegionServerAccounting();
-081
-082  /**
-083   * @return RegionServer's instance of 
{@link RegionServerRpcQuotaManager}
-084   */
-085  RegionServerRpcQuotaManager 
getRegionServerRpcQuotaManager();
-086
-087  /**
-088   * @return RegionServer's instance of 
{@link SecureBulkLoadManager}
-089   */
-090  SecureBulkLoadManager 
getSecureBulkLoadManager();
-091
-092  /**
-093   * @return RegionServer's instance of 
{@link RegionServerSpaceQuotaManager}
-094   */
-095  RegionServerSpaceQuotaManager 

[06/51] [partial] hbase-site git commit: Published site at cd161d976ef47b84e904f2d54bac65d2f3417c2a.

2018-09-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fa1bebf8/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerMonitor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerMonitor.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerMonitor.html
index 2c14c50..43c66a8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerMonitor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerMonitor.html
@@ -46,2104 +46,2113 @@
 038import 
java.util.concurrent.atomic.AtomicLong;
 039import java.util.stream.Collectors;
 040import java.util.stream.Stream;
-041import 
org.apache.hadoop.conf.Configuration;
-042import 
org.apache.hadoop.hbase.HConstants;
-043import 
org.apache.hadoop.hbase.exceptions.IllegalArgumentIOException;
-044import 
org.apache.hadoop.hbase.log.HBaseMarkers;
-045import 
org.apache.hadoop.hbase.procedure2.Procedure.LockState;
-046import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore;
-047import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureIterator;
-048import 
org.apache.hadoop.hbase.procedure2.util.StringUtils;
-049import 
org.apache.hadoop.hbase.security.User;
-050import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-051import 
org.apache.hadoop.hbase.util.IdLock;
-052import 
org.apache.hadoop.hbase.util.NonceKey;
-053import 
org.apache.hadoop.hbase.util.Threads;
-054import 
org.apache.yetus.audience.InterfaceAudience;
-055import org.slf4j.Logger;
-056import org.slf4j.LoggerFactory;
-057
-058import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-059import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-060
-061import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState;
-062
-063/**
-064 * Thread Pool that executes the 
submitted procedures.
-065 * The executor has a ProcedureStore 
associated.
-066 * Each operation is logged and on 
restart the pending procedures are resumed.
-067 *
-068 * Unless the Procedure code throws an 
error (e.g. invalid user input)
-069 * the procedure will complete (at some 
point in time), On restart the pending
-070 * procedures are resumed and the once 
failed will be rolledback.
-071 *
-072 * The user can add procedures to the 
executor via submitProcedure(proc)
-073 * check for the finished state via 
isFinished(procId)
-074 * and get the result via 
getResult(procId)
-075 */
-076@InterfaceAudience.Private
-077public class 
ProcedureExecutorTEnvironment {
-078  private static final Logger LOG = 
LoggerFactory.getLogger(ProcedureExecutor.class);
-079
-080  public static final String 
CHECK_OWNER_SET_CONF_KEY = "hbase.procedure.check.owner.set";
-081  private static final boolean 
DEFAULT_CHECK_OWNER_SET = false;
-082
-083  public static final String 
WORKER_KEEP_ALIVE_TIME_CONF_KEY =
-084  
"hbase.procedure.worker.keep.alive.time.msec";
-085  private static final long 
DEFAULT_WORKER_KEEP_ALIVE_TIME = TimeUnit.MINUTES.toMillis(1);
-086
-087  /**
-088   * {@link #testing} is non-null when 
ProcedureExecutor is being tested. Tests will try to
-089   * break PE having it fail at various 
junctures. When non-null, testing is set to an instance of
-090   * the below internal {@link Testing} 
class with flags set for the particular test.
-091   */
-092  Testing testing = null;
-093
-094  /**
-095   * Class with parameters describing how 
to fail/die when in testing-context.
-096   */
-097  public static class Testing {
-098protected boolean killIfHasParent = 
true;
-099protected boolean killIfSuspended = 
false;
-100
-101/**
-102 * Kill the PE BEFORE we store state 
to the WAL. Good for figuring out if a Procedure is
-103 * persisting all the state it needs 
to recover after a crash.
-104 */
-105protected boolean 
killBeforeStoreUpdate = false;
-106protected boolean 
toggleKillBeforeStoreUpdate = false;
-107
-108/**
-109 * Set when we want to fail AFTER 
state has been stored into the WAL. Rarely used. HBASE-20978
-110 * is about a case where memory-state 
was being set after store to WAL where a crash could
-111 * cause us to get stuck. This flag 
allows killing at what was a vulnerable time.
-112 */
-113protected boolean 
killAfterStoreUpdate = false;
-114protected boolean 
toggleKillAfterStoreUpdate = false;
-115
-116protected boolean 
shouldKillBeforeStoreUpdate() {
-117  final boolean kill = 
this.killBeforeStoreUpdate;
-118  if 
(this.toggleKillBeforeStoreUpdate) {
-119this.killBeforeStoreUpdate = 
!kill;
-120LOG.warn("Toggle KILL before 
store update to: " + this.killBeforeStoreUpdate);
-121  }
-122  return kill;
-123}
-124
-125protected boolean 
shouldKillBeforeStoreUpdate(boolean 

[06/51] [partial] hbase-site git commit: Published site at c6a65ba63fce85ac7c4b62b96ef2bbe6c35d2f00.

2018-09-04 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/293abb17/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.DefaultRackManager.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.DefaultRackManager.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.DefaultRackManager.html
index c372545..af3b364 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.DefaultRackManager.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.DefaultRackManager.html
@@ -1279,322 +1279,339 @@
 1271ListRegionInfo 
lastFewRegions = new ArrayList();
 1272// assign the remaining by going 
through the list and try to assign to servers one-by-one
 1273int serverIdx = 
RANDOM.nextInt(numServers);
-1274for (RegionInfo region : 
unassignedRegions) {
+1274OUTER : for (RegionInfo region : 
unassignedRegions) {
 1275  boolean assigned = false;
-1276  for (int j = 0; j  numServers; 
j++) { // try all servers one by one
+1276  INNER : for (int j = 0; j  
numServers; j++) { // try all servers one by one
 1277ServerName serverName = 
servers.get((j + serverIdx) % numServers);
 1278if 
(!cluster.wouldLowerAvailability(region, serverName)) {
 1279  ListRegionInfo 
serverRegions =
 1280  
assignments.computeIfAbsent(serverName, k - new ArrayList());
-1281  serverRegions.add(region);
-1282  cluster.doAssignRegion(region, 
serverName);
-1283  serverIdx = (j + serverIdx + 
1) % numServers; //remain from next server
-1284  assigned = true;
-1285  break;
-1286}
-1287  }
-1288  if (!assigned) {
-1289lastFewRegions.add(region);
-1290  }
-1291}
-1292// just sprinkle the rest of the 
regions on random regionservers. The balanceCluster will
-1293// make it optimal later. we can end 
up with this if numReplicas  numServers.
-1294for (RegionInfo region : 
lastFewRegions) {
-1295  int i = 
RANDOM.nextInt(numServers);
-1296  ServerName server = 
servers.get(i);
-1297  ListRegionInfo 
serverRegions = assignments.computeIfAbsent(server, k - new 
ArrayList());
-1298  serverRegions.add(region);
-1299  cluster.doAssignRegion(region, 
server);
-1300}
-1301return assignments;
-1302  }
-1303
-1304  protected Cluster 
createCluster(ListServerName servers, CollectionRegionInfo 
regions) {
-1305// Get the snapshot of the current 
assignments for the regions in question, and then create
-1306// a cluster out of it. Note that we 
might have replicas already assigned to some servers
-1307// earlier. So we want to get the 
snapshot to see those assignments, but this will only contain
-1308// replicas of the regions that are 
passed (for performance).
-1309MapServerName, 
ListRegionInfo clusterState = 
getRegionAssignmentsByServer(regions);
-1310
-1311for (ServerName server : servers) 
{
-1312  if 
(!clusterState.containsKey(server)) {
-1313clusterState.put(server, 
EMPTY_REGION_LIST);
-1314  }
-1315}
-1316return new Cluster(regions, 
clusterState, null, this.regionFinder,
-1317rackManager);
-1318  }
-1319
-1320  private ListServerName 
findIdleServers(ListServerName servers) {
-1321return 
this.services.getServerManager()
-1322
.getOnlineServersListWithPredicator(servers, IDLE_SERVER_PREDICATOR);
-1323  }
-1324
-1325  /**
-1326   * Used to assign a single region to a 
random server.
-1327   */
-1328  @Override
-1329  public ServerName 
randomAssignment(RegionInfo regionInfo, ListServerName servers)
-1330  throws HBaseIOException {
-1331
metricsBalancer.incrMiscInvocations();
-1332if (servers != null  
servers.contains(masterServerName)) {
-1333  if (shouldBeOnMaster(regionInfo)) 
{
-1334return masterServerName;
-1335  }
-1336  if 
(!LoadBalancer.isTablesOnMaster(getConf())) {
-1337// Guarantee we do not put any 
regions on master
-1338servers = new 
ArrayList(servers);
-1339
servers.remove(masterServerName);
-1340  }
-1341}
-1342
-1343int numServers = servers == null ? 0 
: servers.size();
-1344if (numServers == 0) {
-1345  LOG.warn("Wanted to retain 
assignment but no servers to assign to");
-1346  return null;
-1347}
-1348if (numServers == 1) { // Only one 
server, nothing fancy we can do here
-1349  return servers.get(0);
-1350}
-1351ListServerName idleServers = 
findIdleServers(servers);
-1352if (idleServers.size() == 1) {
-1353  return idleServers.get(0);
-1354}
-1355final ListServerName 
finalServers = idleServers.isEmpty() ?
-1356servers : idleServers;
-1357ListRegionInfo regions = 
Lists.newArrayList(regionInfo);
-1358Cluster cluster = 

[06/51] [partial] hbase-site git commit: Published site at 7c1fad4992a169a35b4457e6f4afcb30d04406e9.

2018-08-31 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/74f60271/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerThread.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerThread.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerThread.html
index d11176a..2c14c50 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerThread.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerThread.html
@@ -982,1050 +982,1168 @@
 974  }
 975
 976  /**
-977   * Add a new root-procedure to the 
executor.
-978   * @param proc the new procedure to 
execute.
-979   * @param nonceKey the registered 
unique identifier for this operation from the client or process.
-980   * @return the procedure id, that can 
be used to monitor the operation
-981   */
-982  
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NP_NULL_ON_SOME_PATH",
-983  justification = "FindBugs is blind 
to the check-for-null")
-984  public long 
submitProcedure(ProcedureTEnvironment proc, NonceKey nonceKey) {
-985
Preconditions.checkArgument(lastProcId.get() = 0);
-986
-987prepareProcedure(proc);
-988
-989final Long currentProcId;
-990if (nonceKey != null) {
-991  currentProcId = 
nonceKeysToProcIdsMap.get(nonceKey);
-992  
Preconditions.checkArgument(currentProcId != null,
-993"Expected nonceKey=" + nonceKey + 
" to be reserved, use registerNonce(); proc=" + proc);
-994} else {
-995  currentProcId = nextProcId();
-996}
-997
-998// Initialize the procedure
-999proc.setNonceKey(nonceKey);
-1000
proc.setProcId(currentProcId.longValue());
-1001
-1002// Commit the transaction
-1003store.insert(proc, null);
-1004LOG.debug("Stored {}", proc);
-1005
-1006// Add the procedure to the 
executor
-1007return pushProcedure(proc);
-1008  }
-1009
-1010  /**
-1011   * Add a set of new root-procedure to 
the executor.
-1012   * @param procs the new procedures to 
execute.
-1013   */
-1014  // TODO: Do we need to take nonces 
here?
-1015  public void 
submitProcedures(ProcedureTEnvironment[] procs) {
-1016
Preconditions.checkArgument(lastProcId.get() = 0);
-1017if (procs == null || procs.length 
= 0) {
-1018  return;
-1019}
-1020
-1021// Prepare procedure
-1022for (int i = 0; i  procs.length; 
++i) {
-1023  
prepareProcedure(procs[i]).setProcId(nextProcId());
-1024}
-1025
-1026// Commit the transaction
-1027store.insert(procs);
-1028if (LOG.isDebugEnabled()) {
-1029  LOG.debug("Stored " + 
Arrays.toString(procs));
-1030}
-1031
-1032// Add the procedure to the 
executor
-1033for (int i = 0; i  procs.length; 
++i) {
-1034  pushProcedure(procs[i]);
-1035}
-1036  }
-1037
-1038  private ProcedureTEnvironment 
prepareProcedure(ProcedureTEnvironment proc) {
-1039
Preconditions.checkArgument(proc.getState() == ProcedureState.INITIALIZING);
-1040
Preconditions.checkArgument(!proc.hasParent(), "unexpected parent", proc);
-1041if (this.checkOwnerSet) {
-1042  
Preconditions.checkArgument(proc.hasOwner(), "missing owner");
-1043}
-1044return proc;
-1045  }
-1046
-1047  private long 
pushProcedure(ProcedureTEnvironment proc) {
-1048final long currentProcId = 
proc.getProcId();
+977   * Bypass a procedure. If the procedure 
is set to bypass, all the logic in
+978   * execute/rollback will be ignored and 
it will return success, whatever.
+979   * It is used to recover buggy stuck 
procedures, releasing the lock resources
+980   * and letting other procedures to run. 
Bypassing one procedure (and its ancestors will
+981   * be bypassed automatically) may leave 
the cluster in a middle state, e.g. region
+982   * not assigned, or some hdfs files 
left behind. After getting rid of those stuck procedures,
+983   * the operators may have to do some 
clean up on hdfs or schedule some assign procedures
+984   * to let region online. DO AT YOUR OWN 
RISK.
+985   * p
+986   * A procedure can be bypassed only 
if
+987   * 1. The procedure is in state of 
RUNNABLE, WAITING, WAITING_TIMEOUT
+988   * or it is a root procedure without 
any child.
+989   * 2. No other worker thread is 
executing it
+990   * 3. No child procedure has been 
submitted
+991   *
+992   * p
+993   * If all the requirements are meet, 
the procedure and its ancestors will be
+994   * bypassed and persisted to WAL.
+995   *
+996   * p
+997   * If the procedure is in WAITING 
state, will set it to RUNNABLE add it to run queue.
+998   * TODO: What about WAITING_TIMEOUT?
+999   * @param id the procedure id
+1000   * @param lockWait time to wait lock
+1001   * @param force if force set to true, 
we will bypass the procedure even if it is executing.
+1002   *  This is for procedures 
which 

[06/51] [partial] hbase-site git commit: Published site at 3afe9fb7e6ebfa71187cbe131558a83fae61cecd.

2018-08-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/424d7e41/testdevapidocs/org/apache/hadoop/hbase/client/TestClientNoCluster.RegionServerStoppedOnScannerOpenConnection.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/client/TestClientNoCluster.RegionServerStoppedOnScannerOpenConnection.html
 
b/testdevapidocs/org/apache/hadoop/hbase/client/TestClientNoCluster.RegionServerStoppedOnScannerOpenConnection.html
index f163123..6ab4ef2 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/client/TestClientNoCluster.RegionServerStoppedOnScannerOpenConnection.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/client/TestClientNoCluster.RegionServerStoppedOnScannerOpenConnection.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":9,"i39":10,"i40":10,"i41":42,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":9,"i41":10,"i42":10,"i43":42,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete 
Methods"],32:["t6","Deprecated Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -331,113 +331,121 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 getCurrentNrHRS()
 
 
+org.apache.hadoop.hbase.client.Hbck
+getHbck()
+
+
+org.apache.hadoop.hbase.client.Hbck
+getHbck(org.apache.hadoop.hbase.ServerNamearg0)
+
+
 org.apache.hadoop.hbase.client.MasterKeepAliveConnection
 getMaster()
 
-
+
 org.apache.hadoop.hbase.client.RpcRetryingCallerFactory
 getNewRpcRetryingCallerFactory(org.apache.hadoop.conf.Configurationarg0)
 
-
+
 org.apache.hadoop.hbase.client.NonceGenerator
 getNonceGenerator()
 
-
+
 (package private) int
 getNumberOfCachedRegionLocations(org.apache.hadoop.hbase.TableNamearg0)
 
-
+
 org.apache.hadoop.hbase.HRegionLocation
 getRegionLocation(org.apache.hadoop.hbase.TableNamearg0,
  byte[]arg1,
  booleanarg2)
 
-
+
 org.apache.hadoop.hbase.client.RegionLocator
 getRegionLocator(org.apache.hadoop.hbase.TableNamearg0)
 
-
+
 (package private) 
org.apache.hadoop.hbase.ipc.RpcClient
 getRpcClient()
 
-
+
 org.apache.hadoop.hbase.ipc.RpcControllerFactory
 getRpcControllerFactory()
 
-
+
 org.apache.hadoop.hbase.client.RpcRetryingCallerFactory
 getRpcRetryingCallerFactory()
 
-
+
 org.apache.hadoop.hbase.client.ServerStatisticTracker
 getStatisticsTracker()
 
-
+
 org.apache.hadoop.hbase.client.Table
 getTable(org.apache.hadoop.hbase.TableNamearg0)
 
-
+
 org.apache.hadoop.hbase.client.TableBuilder
 getTableBuilder(org.apache.hadoop.hbase.TableNamearg0,
https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html?is-external=true;
 title="class or interface in 
java.util.concurrent">ExecutorServicearg1)
 
-
+
 org.apache.hadoop.hbase.client.TableState
 getTableState(org.apache.hadoop.hbase.TableNamearg0)
 
-
+
 boolean
 hasCellBlockSupport()
 
-
+
 (package private) static 
org.apache.hadoop.hbase.client.NonceGenerator
 injectNonceGeneratorForTesting(org.apache.hadoop.hbase.client.ClusterConnectionarg0,
   
org.apache.hadoop.hbase.client.NonceGeneratorarg1)
 
-
+
 boolean
 isAborted()
 
-
+
 boolean
 isClosed()
 
-
+
 boolean
 isMasterRunning()
 Deprecated.
 
 
-
+
 boolean
 isTableAvailable(org.apache.hadoop.hbase.TableNamearg0,
 byte[][]arg1)
 
-
+
 boolean
 isTableDisabled(org.apache.hadoop.hbase.TableNamearg0)
 
-
+
 boolean
 isTableEnabled(org.apache.hadoop.hbase.TableNamearg0)
 
-
+
 org.apache.hadoop.hbase.HRegionLocation
 locateRegion(byte[]arg0)
 
-
+
 org.apache.hadoop.hbase.HRegionLocation
 locateRegion(org.apache.hadoop.hbase.TableNamearg0,
 byte[]arg1)
 
-
+
 org.apache.hadoop.hbase.RegionLocations
 locateRegion(org.apache.hadoop.hbase.TableNamearg0,
 byte[]arg1,
 booleanarg2,
 booleanarg3)
 
-
+
 org.apache.hadoop.hbase.RegionLocations
 locateRegion(org.apache.hadoop.hbase.TableNamearg0,
   

[06/51] [partial] hbase-site git commit: Published site at a452487a9b82bfd33bc10683c3f8b8ae74d58883.

2018-08-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0cf79db0/devapidocs/src-html/org/apache/hadoop/hbase/filter/ColumnCountGetFilter.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/filter/ColumnCountGetFilter.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/filter/ColumnCountGetFilter.html
index c6137d0..4ca69da 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/filter/ColumnCountGetFilter.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/filter/ColumnCountGetFilter.html
@@ -29,118 +29,133 @@
 021
 022import java.io.IOException;
 023import java.util.ArrayList;
-024
-025import org.apache.hadoop.hbase.Cell;
-026import 
org.apache.yetus.audience.InterfaceAudience;
-027import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-028import 
org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos;
-029
-030import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-031import 
org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException;
-032
-033/**
-034 * Simple filter that returns first N 
columns on row only.
-035 * This filter was written to test 
filters in Get and as soon as it gets
-036 * its quota of columns, {@link 
#filterAllRemaining()} returns true.  This
-037 * makes this filter unsuitable as a Scan 
filter.
-038 */
-039@InterfaceAudience.Public
-040public class ColumnCountGetFilter extends 
FilterBase {
-041  private int limit = 0;
-042  private int count = 0;
-043
-044  public ColumnCountGetFilter(final int 
n) {
-045Preconditions.checkArgument(n = 
0, "limit be positive %s", n);
-046this.limit = n;
-047  }
-048
-049  public int getLimit() {
-050return limit;
-051  }
-052
-053  @Override
-054  public boolean filterRowKey(Cell cell) 
throws IOException {
-055// Impl in FilterBase might do 
unnecessary copy for Off heap backed Cells.
-056if (filterAllRemaining()) return 
true;
-057return false;
-058  }
-059
-060  @Override
-061  public boolean filterAllRemaining() {
-062return this.count  this.limit;
-063  }
-064
-065  @Deprecated
-066  @Override
-067  public ReturnCode filterKeyValue(final 
Cell c) {
-068return filterCell(c);
-069  }
-070
-071  @Override
-072  public ReturnCode filterCell(final Cell 
c) {
-073this.count++;
-074return filterAllRemaining() ? 
ReturnCode.NEXT_COL : ReturnCode.INCLUDE_AND_NEXT_COL;
-075  }
-076
-077  @Override
-078  public void reset() {
-079this.count = 0;
-080  }
-081
-082  public static Filter 
createFilterFromArguments(ArrayListbyte [] filterArguments) {
-083
Preconditions.checkArgument(filterArguments.size() == 1,
-084"Expected 
1 but got: %s", filterArguments.size());
-085int limit = 
ParseFilter.convertByteArrayToInt(filterArguments.get(0));
-086return new 
ColumnCountGetFilter(limit);
-087  }
-088
-089  /**
-090   * @return The filter serialized using 
pb
-091   */
-092  @Override
-093  public byte [] toByteArray() {
-094
FilterProtos.ColumnCountGetFilter.Builder builder =
-095  
FilterProtos.ColumnCountGetFilter.newBuilder();
-096builder.setLimit(this.limit);
-097return 
builder.build().toByteArray();
-098  }
-099
-100  /**
-101   * @param pbBytes A pb serialized 
{@link ColumnCountGetFilter} instance
-102   * @return An instance of {@link 
ColumnCountGetFilter} made from codebytes/code
-103   * @throws 
org.apache.hadoop.hbase.exceptions.DeserializationException
-104   * @see #toByteArray
-105   */
-106  public static ColumnCountGetFilter 
parseFrom(final byte [] pbBytes)
-107  throws DeserializationException {
-108FilterProtos.ColumnCountGetFilter 
proto;
-109try {
-110  proto = 
FilterProtos.ColumnCountGetFilter.parseFrom(pbBytes);
-111} catch 
(InvalidProtocolBufferException e) {
-112  throw new 
DeserializationException(e);
-113}
-114return new 
ColumnCountGetFilter(proto.getLimit());
-115  }
-116
-117  /**
-118   * @param o the other filter to compare 
with
-119   * @return true if and only if the 
fields of the filter that are serialized
-120   * are equal to the corresponding 
fields in other.  Used for testing.
-121   */
-122  @Override
-123  boolean areSerializedFieldsEqual(Filter 
o) {
-124if (o == this) return true;
-125if (!(o instanceof 
ColumnCountGetFilter)) return false;
-126
-127ColumnCountGetFilter other = 
(ColumnCountGetFilter)o;
-128return this.getLimit() == 
other.getLimit();
-129  }
-130
-131  @Override
-132  public String toString() {
-133return 
this.getClass().getSimpleName() + " " + this.limit;
-134  }
-135}
+024import java.util.Objects;
+025
+026import org.apache.hadoop.hbase.Cell;
+027import 
org.apache.yetus.audience.InterfaceAudience;
+028import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
+029import 
org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos;
+030
+031import 

[06/51] [partial] hbase-site git commit: Published site at 6a5b4f2a5c188f8eef4f2250b8b7db7dd1e750e4.

2018-08-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ff05a18/testdevapidocs/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.html
 
b/testdevapidocs/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.html
index 5d2c1df..7959886 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -114,7 +114,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public class TestCreateTableProcedure
+public class TestCreateTableProcedure
 extends TestTableDDLProcedureBase
 
 
@@ -166,10 +166,6 @@ extends F2
 
 
-private static org.slf4j.Logger
-LOG
-
-
 org.junit.rules.TestName
 name
 
@@ -222,38 +218,34 @@ extends 
 void
-testMRegions()
-
-
-void
 testOnHDFSFailure()
 
-
+
 void
 testRecoveryAndDoubleExecution()
 
-
+
 void
 testRollbackAndDoubleExecution()
 
-
+
 private void
 testRollbackAndDoubleExecution(org.apache.hadoop.hbase.client.TableDescriptorBuilderbuilder)
 
-
+
 void
 testRollbackAndDoubleExecutionOnMobTable()
 
-
+
 void
 testSimpleCreate()
 
-
+
 private void
 testSimpleCreate(org.apache.hadoop.hbase.TableNametableName,
 byte[][]splitKeys)
 
-
+
 void
 testSimpleCreateWithSplits()
 
@@ -292,16 +284,7 @@ extends 
 
 CLASS_RULE
-public static finalHBaseClassTestRule CLASS_RULE
-
-
-
-
-
-
-
-LOG
-private static finalorg.slf4j.Logger LOG
+public static finalHBaseClassTestRule CLASS_RULE
 
 
 
@@ -310,7 +293,7 @@ extends 
 
 F1
-private static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String F1
+private static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String F1
 
 See Also:
 Constant
 Field Values
@@ -323,7 +306,7 @@ extends 
 
 F2
-private static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String F2
+private static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String F2
 
 See Also:
 Constant
 Field Values
@@ -336,7 +319,7 @@ extends 
 
 name
-publicorg.junit.rules.TestName name
+publicorg.junit.rules.TestName name
 
 
 
@@ -353,7 +336,7 @@ extends 
 
 TestCreateTableProcedure
-publicTestCreateTableProcedure()
+publicTestCreateTableProcedure()
 
 
 
@@ -370,7 +353,7 @@ extends 
 
 testSimpleCreate
-publicvoidtestSimpleCreate()
+publicvoidtestSimpleCreate()
   throws https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">Exception
 
 Throws:
@@ -384,7 +367,7 @@ extends 
 
 testSimpleCreateWithSplits
-publicvoidtestSimpleCreateWithSplits()
+publicvoidtestSimpleCreateWithSplits()
 throws https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">Exception
 
 Throws:
@@ -398,7 +381,7 @@ extends 
 
 testSimpleCreate
-privatevoidtestSimpleCreate(org.apache.hadoop.hbase.TableNametableName,
+privatevoidtestSimpleCreate(org.apache.hadoop.hbase.TableNametableName,
   byte[][]splitKeys)
throws https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">Exception
 
@@ -413,7 +396,7 @@ extends 
 
 testCreateWithoutColumnFamily
-publicvoidtestCreateWithoutColumnFamily()
+publicvoidtestCreateWithoutColumnFamily()
throws https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">Exception
 
 Throws:
@@ -427,7 +410,7 @@ extends 
 
 testCreateExisting
-publicvoidtestCreateExisting()
+publicvoidtestCreateExisting()
 throws https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">Exception
 
 Throws:
@@ -441,7 +424,7 @@ extends 
 
 testRecoveryAndDoubleExecution
-publicvoidtestRecoveryAndDoubleExecution()
+publicvoidtestRecoveryAndDoubleExecution()
 throws 

[06/51] [partial] hbase-site git commit: Published site at 63f2d3cbdc8151f5f61f33e0a078c51b9ac076a5.

2018-08-21 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7ae6a80c/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/StateMachineProcedure.Flow.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/StateMachineProcedure.Flow.html
 
b/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/StateMachineProcedure.Flow.html
index 1a5a90b..bc44c38 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/StateMachineProcedure.Flow.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/StateMachineProcedure.Flow.html
@@ -115,6 +115,16 @@
 
 
 
+private StateMachineProcedure.Flow
+TransitRegionStateProcedure.confirmClosed(MasterProcedureEnvenv,
+ RegionStateNoderegionNode)
+
+
+private StateMachineProcedure.Flow
+TransitRegionStateProcedure.confirmOpened(MasterProcedureEnvenv,
+ RegionStateNoderegionNode)
+
+
 protected StateMachineProcedure.Flow
 GCMergedRegionsProcedure.executeFromState(MasterProcedureEnvenv,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.GCMergedRegionsStatestate)
@@ -132,10 +142,17 @@
 
 protected StateMachineProcedure.Flow
 MoveRegionProcedure.executeFromState(MasterProcedureEnvenv,
-
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MoveRegionStatestate)
+
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MoveRegionStatestate)
+Deprecated.
+
 
 
 protected StateMachineProcedure.Flow
+TransitRegionStateProcedure.executeFromState(MasterProcedureEnvenv,
+
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionStateTransitionStatestate)
+
+
+protected StateMachineProcedure.Flow
 SplitTableRegionProcedure.executeFromState(MasterProcedureEnvenv,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionStatestate)
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7ae6a80c/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/StateMachineProcedure.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/StateMachineProcedure.html
 
b/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/StateMachineProcedure.html
index ce402ec..dd470ad 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/StateMachineProcedure.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/StateMachineProcedure.html
@@ -131,7 +131,9 @@
 
 class
 MoveRegionProcedure
-Procedure that implements a RegionPlan.
+Deprecated.
+Do not use any 
more.
+
 
 
 
@@ -140,6 +142,12 @@
 The procedure to split a region in a table.
 
 
+
+class
+TransitRegionStateProcedure
+The procedure to deal with the state transition of a 
region.
+
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7ae6a80c/devapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html
index bd09b84..22decce 100644
--- a/devapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html
@@ -216,11 +216,11 @@
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.procedure2.RootProcedureState.State
+org.apache.hadoop.hbase.procedure2.StateMachineProcedure.Flow
 org.apache.hadoop.hbase.procedure2.LockedResourceType
-org.apache.hadoop.hbase.procedure2.LockType
 org.apache.hadoop.hbase.procedure2.Procedure.LockState
-org.apache.hadoop.hbase.procedure2.StateMachineProcedure.Flow
+org.apache.hadoop.hbase.procedure2.RootProcedureState.State
+org.apache.hadoop.hbase.procedure2.LockType
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7ae6a80c/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html
index c5b39c0..f767f0e 100644
--- a/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html
@@ -229,13 +229,13 @@
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class 

[06/51] [partial] hbase-site git commit: Published site at 092efb42749bf7fc6ad338c96aae8e7b9d3a2c74.

2018-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.WriterThread.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.WriterThread.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.WriterThread.html
index 63e4b46..514f830 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.WriterThread.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.WriterThread.html
@@ -468,15 +468,15 @@
 460   * creating it if necessary.
 461   * @param logEntry
 462   * @param fileNameBeingSplit the file 
being split currently. Used to generate tmp file name.
-463   * @param conf
-464   * @return Path to file into which to 
dump split log edits.
-465   * @throws IOException
-466   */
-467  @SuppressWarnings("deprecation")
-468  @VisibleForTesting
-469  static Path 
getRegionSplitEditsPath(final Entry logEntry, String fileNameBeingSplit,
-470  Configuration conf)
-471  throws IOException {
+463   * @param tmpDirName of the directory 
used to sideline old recovered edits file
+464   * @param conf
+465   * @return Path to file into which to 
dump split log edits.
+466   * @throws IOException
+467   */
+468  @SuppressWarnings("deprecation")
+469  @VisibleForTesting
+470  static Path 
getRegionSplitEditsPath(final Entry logEntry, String fileNameBeingSplit,
+471  String tmpDirName, Configuration 
conf) throws IOException {
 472FileSystem fs = 
FileSystem.get(conf);
 473Path rootDir = 
FSUtils.getRootDir(conf);
 474Path tableDir = 
FSUtils.getTableDir(rootDir, logEntry.getKey().getTableName());
@@ -491,7 +491,7 @@
 483  return null;
 484}
 485if (fs.exists(dir)  
fs.isFile(dir)) {
-486  Path tmp = new Path("/tmp");
+486  Path tmp = new Path(tmpDirName);
 487  if (!fs.exists(tmp)) {
 488fs.mkdirs(tmp);
 489  }
@@ -1520,411 +1520,413 @@
 1512 * @return a path with a write for 
that path. caller should close.
 1513 */
 1514WriterAndPath createWAP(byte[] 
region, Entry entry) throws IOException {
-1515  Path regionedits = 
getRegionSplitEditsPath(entry,
-1516  
fileBeingSplit.getPath().getName(), conf);
-1517  if (regionedits == null) {
-1518return null;
-1519  }
-1520  FileSystem rootFs = 
FileSystem.get(conf);
-1521  if (rootFs.exists(regionedits)) 
{
-1522LOG.warn("Found old edits file. 
It could be the "
-1523+ "result of a previous 
failed split attempt. Deleting " + regionedits + ", length="
-1524+ 
rootFs.getFileStatus(regionedits).getLen());
-1525if (!rootFs.delete(regionedits, 
false)) {
-1526  LOG.warn("Failed delete of old 
{}", regionedits);
-1527}
-1528  }
-1529  Writer w = 
createWriter(regionedits);
-1530  LOG.debug("Creating writer 
path={}", regionedits);
-1531  return new 
WriterAndPath(regionedits, w, entry.getKey().getSequenceId());
-1532}
-1533
-1534void filterCellByStore(Entry 
logEntry) {
-1535  Mapbyte[], Long 
maxSeqIdInStores =
-1536  
regionMaxSeqIdInStores.get(Bytes.toString(logEntry.getKey().getEncodedRegionName()));
-1537  if 
(MapUtils.isEmpty(maxSeqIdInStores)) {
-1538return;
-1539  }
-1540  // Create the array list for the 
cells that aren't filtered.
-1541  // We make the assumption that 
most cells will be kept.
-1542  ArrayListCell keptCells = 
new ArrayList(logEntry.getEdit().getCells().size());
-1543  for (Cell cell : 
logEntry.getEdit().getCells()) {
-1544if 
(CellUtil.matchingFamily(cell, WALEdit.METAFAMILY)) {
-1545  keptCells.add(cell);
-1546} else {
-1547  byte[] family = 
CellUtil.cloneFamily(cell);
-1548  Long maxSeqId = 
maxSeqIdInStores.get(family);
-1549  // Do not skip cell even if 
maxSeqId is null. Maybe we are in a rolling upgrade,
-1550  // or the master was crashed 
before and we can not get the information.
-1551  if (maxSeqId == null || 
maxSeqId.longValue()  logEntry.getKey().getSequenceId()) {
-1552keptCells.add(cell);
-1553  }
-1554}
-1555  }
-1556
-1557  // Anything in the keptCells array 
list is still live.
-1558  // So rather than removing the 
cells from the array list
-1559  // which would be an O(n^2) 
operation, we just replace the list
-1560  
logEntry.getEdit().setCells(keptCells);
-1561}
-1562
-1563@Override
-1564public void append(RegionEntryBuffer 
buffer) throws IOException {
-1565  appendBuffer(buffer, true);
-1566}
-1567
-1568WriterAndPath 
appendBuffer(RegionEntryBuffer buffer, boolean reusable) throws IOException{
-1569  ListEntry entries = 
buffer.entryBuffer;
-1570  if (entries.isEmpty()) {
-1571LOG.warn("got an empty buffer, 
skipping");
-1572return null;

[06/51] [partial] hbase-site git commit: Published site at 613d831429960348dc42c3bdb6ea5d31be15c81c.

2018-08-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7cf6034b/testapidocs/org/apache/hadoop/hbase/MiniHBaseCluster.html
--
diff --git a/testapidocs/org/apache/hadoop/hbase/MiniHBaseCluster.html 
b/testapidocs/org/apache/hadoop/hbase/MiniHBaseCluster.html
index 00c8bf0..1e87652 100644
--- a/testapidocs/org/apache/hadoop/hbase/MiniHBaseCluster.html
+++ b/testapidocs/org/apache/hadoop/hbase/MiniHBaseCluster.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":42,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":42,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"],32:["t6","Deprecated Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -119,7 +119,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Public
-public class MiniHBaseCluster
+public class MiniHBaseCluster
 extends HBaseCluster
 This class creates a single process HBase cluster.
  each server.  The master uses the 'default' FileSystem.  The RegionServers,
@@ -416,38 +416,45 @@ extends 
 void
+killNameNode(ServerNameserverName)
+Kills the namenode process if this is a distributed 
cluster, otherwise, this causes master to
+ exit doing basic clean up only.
+
+
+
+void
 killRegionServer(ServerNameserverName)
 Kills the region server process if this is a distributed 
cluster, otherwise
  this causes the region server to exit doing basic clean up only.
 
 
-
+
 void
 killZkNode(ServerNameserverName)
 Kills the zookeeper node process if this is a distributed 
cluster, otherwise,
  this causes master to exit doing basic clean up only.
 
 
-
+
 void
 shutdown()
 Shut down the mini HBase cluster
 
 
-
+
 void
 startDataNode(ServerNameserverName)
 Starts a new datanode on the given hostname or if this is a 
mini/local cluster,
  silently logs warning message.
 
 
-
+
 JVMClusterUtil.MasterThread
 startMaster()
 Starts a master thread running
 
 
-
+
 void
 startMaster(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringhostname,
intport)
@@ -455,13 +462,20 @@ extends 
+
+void
+startNameNode(ServerNameserverName)
+Starts a new namenode on the given hostname or if this is a 
mini/local cluster, silently logs
+ warning message.
+
+
+
 JVMClusterUtil.RegionServerThread
 startRegionServer()
 Starts a region server thread running
 
 
-
+
 void
 startRegionServer(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringhostname,
  intport)
@@ -469,13 +483,13 @@ extends 
+
 JVMClusterUtil.RegionServerThread
 startRegionServerAndWait(longtimeout)
 Starts a region server thread and waits until its processed 
by master.
 
 
-
+
 void
 startZkNode(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringhostname,
intport)
@@ -483,120 +497,140 @@ extends 
+
 void
 stopDataNode(ServerNameserverName)
 Stops the datanode if this is a distributed cluster, 
otherwise
  silently logs warning message.
 
 
-
+
 JVMClusterUtil.MasterThread
 stopMaster(intserverNumber)
 Shut down the specified master cleanly
 
 
-
+
 JVMClusterUtil.MasterThread
 stopMaster(intserverNumber,
   booleanshutdownFS)
 Shut down the specified master cleanly
 
 
-
+
 void
 stopMaster(ServerNameserverName)
 Stops the given master, by attempting a gradual stop.
 
 
-
+
+void
+stopNameNode(ServerNameserverName)
+Stops the namenode if this is a distributed cluster, 
otherwise silently logs warning message.
+
+
+
 JVMClusterUtil.RegionServerThread
 stopRegionServer(intserverNumber)
 Shut down the specified region server cleanly
 
 
-
+
 

[06/51] [partial] hbase-site git commit: Published site at ba5d1c1f28301adc99019d9d6c4a04fac98ae511.

2018-07-25 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/804782f0/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
index 95f2a65..073d0d0 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
@@ -931,7 +931,7 @@
 923InitMetaProcedure initMetaProc = 
null;
 924if 
(assignmentManager.getRegionStates().getRegionState(RegionInfoBuilder.FIRST_META_REGIONINFO)
 925  .isOffline()) {
-926  OptionalProcedure? 
optProc = procedureExecutor.getProcedures().stream()
+926  
OptionalProcedureMasterProcedureEnv optProc = 
procedureExecutor.getProcedures().stream()
 927.filter(p - p instanceof 
InitMetaProcedure).findAny();
 928  if (optProc.isPresent()) {
 929initMetaProc = 
(InitMetaProcedure) optProc.get();
@@ -3210,566 +3210,567 @@
 3202  cpHost.preGetProcedures();
 3203}
 3204
-3205final ListProcedure? 
procList = this.procedureExecutor.getProcedures();
-3206
-3207if (cpHost != null) {
-3208  
cpHost.postGetProcedures(procList);
-3209}
-3210
-3211return procList;
-3212  }
-3213
-3214  @Override
-3215  public ListLockedResource 
getLocks() throws IOException {
-3216if (cpHost != null) {
-3217  cpHost.preGetLocks();
-3218}
-3219
-3220MasterProcedureScheduler 
procedureScheduler =
-3221  
procedureExecutor.getEnvironment().getProcedureScheduler();
-3222
-3223final ListLockedResource 
lockedResources = procedureScheduler.getLocks();
-3224
-3225if (cpHost != null) {
-3226  
cpHost.postGetLocks(lockedResources);
-3227}
-3228
-3229return lockedResources;
-3230  }
-3231
-3232  /**
-3233   * Returns the list of table 
descriptors that match the specified request
-3234   * @param namespace the namespace to 
query, or null if querying for all
-3235   * @param regex The regular expression 
to match against, or null if querying for all
-3236   * @param tableNameList the list of 
table names, or null if querying for all
-3237   * @param includeSysTables False to 
match only against userspace tables
-3238   * @return the list of table 
descriptors
-3239   */
-3240  public ListTableDescriptor 
listTableDescriptors(final String namespace, final String regex,
-3241  final ListTableName 
tableNameList, final boolean includeSysTables)
-3242  throws IOException {
-3243ListTableDescriptor htds = 
new ArrayList();
-3244if (cpHost != null) {
-3245  
cpHost.preGetTableDescriptors(tableNameList, htds, regex);
-3246}
-3247htds = getTableDescriptors(htds, 
namespace, regex, tableNameList, includeSysTables);
-3248if (cpHost != null) {
-3249  
cpHost.postGetTableDescriptors(tableNameList, htds, regex);
-3250}
-3251return htds;
-3252  }
-3253
-3254  /**
-3255   * Returns the list of table names 
that match the specified request
-3256   * @param regex The regular expression 
to match against, or null if querying for all
-3257   * @param namespace the namespace to 
query, or null if querying for all
-3258   * @param includeSysTables False to 
match only against userspace tables
-3259   * @return the list of table names
-3260   */
-3261  public ListTableName 
listTableNames(final String namespace, final String regex,
-3262  final boolean includeSysTables) 
throws IOException {
-3263ListTableDescriptor htds = 
new ArrayList();
-3264if (cpHost != null) {
-3265  cpHost.preGetTableNames(htds, 
regex);
-3266}
-3267htds = getTableDescriptors(htds, 
namespace, regex, null, includeSysTables);
-3268if (cpHost != null) {
-3269  cpHost.postGetTableNames(htds, 
regex);
-3270}
-3271ListTableName result = new 
ArrayList(htds.size());
-3272for (TableDescriptor htd: htds) 
result.add(htd.getTableName());
-3273return result;
-3274  }
-3275
-3276  /**
-3277   * @return list of table table 
descriptors after filtering by regex and whether to include system
-3278   *tables, etc.
-3279   * @throws IOException
-3280   */
-3281  private ListTableDescriptor 
getTableDescriptors(final ListTableDescriptor htds,
-3282  final String namespace, final 
String regex, final ListTableName tableNameList,
-3283  final boolean includeSysTables)
-3284  throws IOException {
-3285if (tableNameList == null || 
tableNameList.isEmpty()) {
-3286  // request for all 
TableDescriptors
-3287  CollectionTableDescriptor 
allHtds;
-3288  if (namespace != null  
namespace.length()  0) {
-3289// Do a check on the namespace 
existence. Will fail if does not exist.
-3290
this.clusterSchemaService.getNamespace(namespace);
-3291allHtds = 
tableDescriptors.getByNamespace(namespace).values();
-3292  } else {
-3293allHtds = 

[06/51] [partial] hbase-site git commit: Published site at b4759ce6e72f50ccd9d410bd5917dc5a515414f1.

2018-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.html
index 233dba3..91b9055 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.html
@@ -540,1205 +540,1204 @@
 532  
sm.getRegionMetrics().forEach((byte[] regionName, RegionMetrics rm) - {
 533DequeBalancerRegionLoad 
rLoads = oldLoads.get(Bytes.toString(regionName));
 534if (rLoads == null) {
-535  // There was nothing there
-536  rLoads = new 
ArrayDeque();
-537} else if (rLoads.size() = 
numRegionLoadsToRemember) {
-538  rLoads.remove();
-539}
-540rLoads.add(new 
BalancerRegionLoad(rm));
-541
loads.put(Bytes.toString(regionName), rLoads);
-542  });
-543});
-544
-545for(CostFromRegionLoadFunction cost : 
regionLoadFunctions) {
-546  cost.setLoads(loads);
-547}
-548  }
-549
-550  protected void initCosts(Cluster 
cluster) {
-551for (CostFunction c:costFunctions) 
{
-552  c.init(cluster);
-553}
-554  }
-555
-556  protected void 
updateCostsWithAction(Cluster cluster, Action action) {
-557for (CostFunction c : costFunctions) 
{
-558  c.postAction(action);
-559}
-560  }
-561
-562  /**
-563   * Get the names of the cost 
functions
-564   */
-565  public String[] getCostFunctionNames() 
{
-566if (costFunctions == null) return 
null;
-567String[] ret = new 
String[costFunctions.length];
-568for (int i = 0; i  
costFunctions.length; i++) {
-569  CostFunction c = 
costFunctions[i];
-570  ret[i] = 
c.getClass().getSimpleName();
-571}
-572
-573return ret;
-574  }
-575
-576  /**
-577   * This is the main cost function.  It 
will compute a cost associated with a proposed cluster
-578   * state.  All different costs will be 
combined with their multipliers to produce a double cost.
-579   *
-580   * @param cluster The state of the 
cluster
-581   * @param previousCost the previous 
cost. This is used as an early out.
-582   * @return a double of a cost 
associated with the proposed cluster state.  This cost is an
-583   * aggregate of all individual 
cost functions.
-584   */
-585  protected double computeCost(Cluster 
cluster, double previousCost) {
-586double total = 0;
-587
-588for (int i = 0; i  
costFunctions.length; i++) {
-589  CostFunction c = 
costFunctions[i];
-590  this.tempFunctionCosts[i] = 0.0;
-591
-592  if (c.getMultiplier() = 0) {
-593continue;
-594  }
-595
-596  Float multiplier = 
c.getMultiplier();
-597  Double cost = c.cost();
-598
-599  this.tempFunctionCosts[i] = 
multiplier*cost;
-600  total += 
this.tempFunctionCosts[i];
-601
-602  if (total  previousCost) {
-603break;
-604  }
-605}
-606
-607return total;
-608  }
-609
-610  /** Generates a candidate action to be 
applied to the cluster for cost function search */
-611  abstract static class 
CandidateGenerator {
-612abstract Cluster.Action 
generate(Cluster cluster);
-613
-614/**
-615 * From a list of regions pick a 
random one. Null can be returned which
-616 * {@link 
StochasticLoadBalancer#balanceCluster(Map)} recognize as signal to try a region 
move
-617 * rather than swap.
-618 *
-619 * @param clusterThe state of 
the cluster
-620 * @param server index of the 
server
-621 * @param chanceOfNoSwap Chance that 
this will decide to try a move rather
-622 *   than a 
swap.
-623 * @return a random {@link 
RegionInfo} or null if an asymmetrical move is
-624 * suggested.
-625 */
-626protected int 
pickRandomRegion(Cluster cluster, int server, double chanceOfNoSwap) {
-627  // Check to see if this is just a 
move.
-628  if 
(cluster.regionsPerServer[server].length == 0 || RANDOM.nextFloat()  
chanceOfNoSwap) {
-629// signal a move only.
-630return -1;
-631  }
-632  int rand = 
RANDOM.nextInt(cluster.regionsPerServer[server].length);
-633  return 
cluster.regionsPerServer[server][rand];
-634
-635}
-636protected int 
pickRandomServer(Cluster cluster) {
-637  if (cluster.numServers  1) {
-638return -1;
-639  }
-640
-641  return 
RANDOM.nextInt(cluster.numServers);
-642}
-643
-644protected int pickRandomRack(Cluster 
cluster) {
-645  if (cluster.numRacks  1) {
-646return -1;
-647  }
-648
-649  return 
RANDOM.nextInt(cluster.numRacks);
-650}
-651
-652protected int 

[06/51] [partial] hbase-site git commit: Published site at e66a6603e36ecd67237ca16acd5e2de03f0d372d.

2018-07-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0c6f447e/apidocs/org/apache/hadoop/hbase/class-use/DoNotRetryIOException.html
--
diff --git 
a/apidocs/org/apache/hadoop/hbase/class-use/DoNotRetryIOException.html 
b/apidocs/org/apache/hadoop/hbase/class-use/DoNotRetryIOException.html
index a652985..0a58d3f 100644
--- a/apidocs/org/apache/hadoop/hbase/class-use/DoNotRetryIOException.html
+++ b/apidocs/org/apache/hadoop/hbase/class-use/DoNotRetryIOException.html
@@ -1,10 +1,10 @@
 http://www.w3.org/TR/html4/loose.dtd;>
 
-
+
 
 
 
-类 org.apache.hadoop.hbase.DoNotRetryIOException的使用 (Apache 
HBase 3.0.0-SNAPSHOT API)
+Uses of Class org.apache.hadoop.hbase.DoNotRetryIOException (Apache 
HBase 3.0.0-SNAPSHOT API)
 
 
 
@@ -12,7 +12,7 @@
 
 
 
-您的浏览器已禁用 JavaScript。
+JavaScript is disabled on your browser.
 
 
 
 
 
-跳过导航链接
+Skip navigation links
 
 
 
-
-概览
-程序包
-ç±»
-使用
-树
-已过时
-索引
-帮助
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
 
 
 
 
-上一个
-下一个
+Prev
+Next
 
 
-框架
-无框架
+Frames
+NoFrames
 
 
-所有类
+AllClasses
 
 
 
 
-

类的使用
org.apache.hadoop.hbase.DoNotRetryIOException

+

Uses of Class
org.apache.hadoop.hbase.DoNotRetryIOException

  • - - +
    使用DoNotRetryIOException的程序包  
    + - - + + @@ -89,33 +89,13 @@ @@ -152,78 +132,78 @@ Coprocessors are code that runs in-process on each region server.
  • -


    [06/51] [partial] hbase-site git commit: Published site at e66a6603e36ecd67237ca16acd5e2de03f0d372d.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5427a45e/apidocs/org/apache/hadoop/hbase/class-use/CompareOperator.html
    --
    diff --git a/apidocs/org/apache/hadoop/hbase/class-use/CompareOperator.html 
    b/apidocs/org/apache/hadoop/hbase/class-use/CompareOperator.html
    index 0c69e4c..315944d 100644
    --- a/apidocs/org/apache/hadoop/hbase/class-use/CompareOperator.html
    +++ b/apidocs/org/apache/hadoop/hbase/class-use/CompareOperator.html
    @@ -1,10 +1,10 @@
     http://www.w3.org/TR/html4/loose.dtd;>
     
    -
    +
     
     
     
    -Uses of Class org.apache.hadoop.hbase.CompareOperator (Apache HBase 
    3.0.0-SNAPSHOT API)
    +类 org.apache.hadoop.hbase.CompareOperator的使用 (Apache HBase 
    3.0.0-SNAPSHOT API)
     
     
     
    @@ -12,7 +12,7 @@
     
     
     
    -JavaScript is disabled on your browser.
    +您的浏览器已禁用 JavaScript。
     
     
     
     
     
    -Skip navigation links
    +跳过导航链接
     
     
     
    -
    -Overview
    -Package
    -Class
    -Use
    -Tree
    -Deprecated
    -Index
    -Help
    +
    +概览
    +程序包
    +ç±»
    +使用
    +树
    +已过时
    +索引
    +帮助
     
     
     
     
    -Prev
    -Next
    +上一个
    +下一个
     
     
    -Frames
    -NoFrames
    +框架
    +无框架
     
     
    -AllClasses
    +所有类
     
     
     

    [06/51] [partial] hbase-site git commit: Published site at 0f23784182ab88649de340d75804e0ff20dcd0fc.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bcb555af/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.html 
    b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.html
    index f118c08..cd9722f 100644
    --- a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.html
    +++ b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.html
    @@ -206,140 +206,175 @@
     198   */
     199  public static int 
    validateBlockAddition(Cacheable existing, Cacheable newBlock,
     200  
    BlockCacheKey cacheKey) {
    -201int comparison = 
    compareCacheBlock(existing, newBlock, true);
    +201int comparison = 
    compareCacheBlock(existing, newBlock, false);
     202if (comparison != 0) {
    -203  LOG.warn("Cached block contents 
    differ, trying to just compare the block contents " +
    -204  "without the next block. 
    CacheKey: " + cacheKey);
    -205
    -206  // compare the contents, if they 
    are not equal, we are in big trouble
    -207  int 
    comparisonWithoutNextBlockMetadata = compareCacheBlock(existing, newBlock, 
    false);
    -208
    -209  if 
    (comparisonWithoutNextBlockMetadata != 0) {
    -210throw new 
    RuntimeException("Cached block contents differ, which should not have 
    happened."
    -211+ "cacheKey:" + cacheKey);
    -212  }
    -213}
    -214return comparison;
    -215  }
    -216
    -217  /**
    -218   * Use one of these to keep a running 
    account of cached blocks by file.  Throw it away when done.
    -219   * This is different than metrics in 
    that it is stats on current state of a cache.
    -220   * See getLoadedCachedBlocksByFile
    -221   */
    -222  
    @JsonIgnoreProperties({"cachedBlockStatsByFile"})
    -223  public static class CachedBlocksByFile 
    {
    -224private int count;
    -225private int dataBlockCount;
    -226private long size;
    -227private long dataSize;
    -228private final long now = 
    System.nanoTime();
    -229/**
    -230 * How many blocks to look at before 
    we give up.
    -231 * There could be many millions of 
    blocks. We don't want the
    -232 * ui to freeze while we run through 
    1B blocks... users will
    -233 * think hbase dead. UI displays 
    warning in red when stats
    -234 * are incomplete.
    -235 */
    -236private final int max;
    -237public static final int DEFAULT_MAX = 
    100;
    -238
    -239CachedBlocksByFile() {
    -240  this(null);
    -241}
    -242
    -243CachedBlocksByFile(final 
    Configuration c) {
    -244  this.max = c == null? DEFAULT_MAX: 
    c.getInt("hbase.ui.blockcache.by.file.max", DEFAULT_MAX);
    -245}
    -246
    -247/**
    -248 * Map by filename. use concurent 
    utils because we want our Map and contained blocks sorted.
    -249 */
    -250private NavigableMapString, 
    NavigableSetCachedBlock cachedBlockByFile = new 
    ConcurrentSkipListMap();
    -251FastLongHistogram hist = new 
    FastLongHistogram();
    -252
    -253/**
    -254 * @param cb
    -255 * @return True if full if we 
    won't be adding any more.
    -256 */
    -257public boolean update(final 
    CachedBlock cb) {
    -258  if (isFull()) return true;
    -259  NavigableSetCachedBlock set 
    = this.cachedBlockByFile.get(cb.getFilename());
    -260  if (set == null) {
    -261set = new 
    ConcurrentSkipListSet();
    -262
    this.cachedBlockByFile.put(cb.getFilename(), set);
    -263  }
    -264  set.add(cb);
    -265  this.size += cb.getSize();
    -266  this.count++;
    -267  BlockType bt = cb.getBlockType();
    -268  if (bt != null  
    bt.isData()) {
    -269this.dataBlockCount++;
    -270this.dataSize += cb.getSize();
    -271  }
    -272  long age = (this.now - 
    cb.getCachedTime())/NANOS_PER_SECOND;
    -273  this.hist.add(age, 1);
    -274  return false;
    -275}
    -276
    -277/**
    -278 * @return True if full; i.e. there 
    are more items in the cache but we only loaded up
    -279 * the maximum set in configuration 
    codehbase.ui.blockcache.by.file.max/code
    -280 * (Default: DEFAULT_MAX).
    -281 */
    -282public boolean isFull() {
    -283  return this.count = this.max;
    -284}
    -285
    -286public NavigableMapString, 
    NavigableSetCachedBlock getCachedBlockStatsByFile() {
    -287  return this.cachedBlockByFile;
    -288}
    -289
    -290/**
    -291 * @return count of blocks in the 
    cache
    -292 */
    -293public int getCount() {
    -294  return count;
    -295}
    -296
    -297public int getDataCount() {
    -298  return dataBlockCount;
    -299}
    -300
    -301/**
    -302 * @return size of blocks in the 
    cache
    -303 */
    -304public long getSize() {
    -305  return size;
    -306}
    -307
    -308/**
    -309 * @return Size of data.
    -310 */
    -311public long getDataSize() {
    -312  return dataSize;
    -313}
    -314
    -315public AgeSnapshot 
    getAgeInCacheSnapshot() {
    -316  return new 
    AgeSnapshot(this.hist);
    -317}
    -318
    -319@Override
    

    [06/51] [partial] hbase-site git commit: Published site at 85b41f36e01214b6485c9352875c84ebf877dab3.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a5c66de0/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.WaitForStateCallable.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.WaitForStateCallable.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.WaitForStateCallable.html
    index c10cfbf..a3e2f4a 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.WaitForStateCallable.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.WaitForStateCallable.html
    @@ -3371,7 +3371,7 @@
     3363private V result = null;
     3364
     3365private final HBaseAdmin admin;
    -3366private final Long procId;
    +3366protected final Long procId;
     3367
     3368public ProcedureFuture(final 
    HBaseAdmin admin, final Long procId) {
     3369  this.admin = admin;
    @@ -3653,653 +3653,651 @@
     3645 * @return a description of the 
    operation
     3646 */
     3647protected String getDescription() 
    {
    -3648  return "Operation: " + 
    getOperationType() + ", "
    -3649  + "Table Name: " + 
    tableName.getNameWithNamespaceInclAsString();
    -3650
    -3651}
    -3652
    -3653protected abstract class 
    TableWaitForStateCallable implements WaitForStateCallable {
    -3654  @Override
    -3655  public void 
    throwInterruptedException() throws InterruptedIOException {
    -3656throw new 
    InterruptedIOException("Interrupted while waiting for operation: "
    -3657+ getOperationType() + " on 
    table: " + tableName.getNameWithNamespaceInclAsString());
    -3658  }
    -3659
    -3660  @Override
    -3661  public void 
    throwTimeoutException(long elapsedTime) throws TimeoutException {
    -3662throw new TimeoutException("The 
    operation: " + getOperationType() + " on table: " +
    -3663tableName.getNameAsString() 
    + " has not completed after " + elapsedTime + "ms");
    -3664  }
    -3665}
    -3666
    -3667@Override
    -3668protected V 
    postOperationResult(final V result, final long deadlineTs)
    -3669throws IOException, 
    TimeoutException {
    -3670  LOG.info(getDescription() + " 
    completed");
    -3671  return 
    super.postOperationResult(result, deadlineTs);
    -3672}
    -3673
    -3674@Override
    -3675protected V 
    postOperationFailure(final IOException exception, final long deadlineTs)
    -3676throws IOException, 
    TimeoutException {
    -3677  LOG.info(getDescription() + " 
    failed with " + exception.getMessage());
    -3678  return 
    super.postOperationFailure(exception, deadlineTs);
    -3679}
    -3680
    -3681protected void 
    waitForTableEnabled(final long deadlineTs)
    -3682throws IOException, 
    TimeoutException {
    -3683  waitForState(deadlineTs, new 
    TableWaitForStateCallable() {
    -3684@Override
    -3685public boolean checkState(int 
    tries) throws IOException {
    -3686  try {
    -3687if 
    (getAdmin().isTableAvailable(tableName)) {
    -3688  return true;
    -3689}
    -3690  } catch 
    (TableNotFoundException tnfe) {
    -3691LOG.debug("Table " + 
    tableName.getNameWithNamespaceInclAsString()
    -3692+ " was not enabled, 
    sleeping. tries=" + tries);
    -3693  }
    -3694  return false;
    -3695}
    -3696  });
    -3697}
    -3698
    -3699protected void 
    waitForTableDisabled(final long deadlineTs)
    -3700throws IOException, 
    TimeoutException {
    -3701  waitForState(deadlineTs, new 
    TableWaitForStateCallable() {
    -3702@Override
    -3703public boolean checkState(int 
    tries) throws IOException {
    -3704  return 
    getAdmin().isTableDisabled(tableName);
    -3705}
    -3706  });
    -3707}
    -3708
    -3709protected void 
    waitTableNotFound(final long deadlineTs)
    -3710throws IOException, 
    TimeoutException {
    -3711  waitForState(deadlineTs, new 
    TableWaitForStateCallable() {
    -3712@Override
    -3713public boolean checkState(int 
    tries) throws IOException {
    -3714  return 
    !getAdmin().tableExists(tableName);
    -3715}
    -3716  });
    -3717}
    -3718
    -3719protected void 
    waitForSchemaUpdate(final long deadlineTs)
    -3720throws IOException, 
    TimeoutException {
    -3721  waitForState(deadlineTs, new 
    TableWaitForStateCallable() {
    -3722@Override
    -3723public boolean checkState(int 
    tries) throws IOException {
    -3724  return 
    getAdmin().getAlterStatus(tableName).getFirst() == 0;
    -3725}
    -3726  });
    -3727}
    -3728
    -3729protected void 
    waitForAllRegionsOnline(final long deadlineTs, final byte[][] splitKeys)
    -3730throws IOException, 
    TimeoutException {
    -3731  final TableDescriptor desc = 
    getTableDescriptor();
    -3732  final AtomicInteger actualRegCount 
    = new AtomicInteger(0);
    -3733  final MetaTableAccessor.Visitor 
    visitor = new MetaTableAccessor.Visitor() {
    -3734

    [06/51] [partial] hbase-site git commit: Published site at 6198e1fc7dfa85c3bc6b2855f9a5fb5f4b2354ff.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/eb5d2c62/devapidocs/org/apache/hadoop/hbase/master/replication/EnablePeerProcedure.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/master/replication/EnablePeerProcedure.html
     
    b/devapidocs/org/apache/hadoop/hbase/master/replication/EnablePeerProcedure.html
    index 6763db0..88d6b76 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/master/replication/EnablePeerProcedure.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/master/replication/EnablePeerProcedure.html
    @@ -284,14 +284,14 @@ extends ModifyPeerProcedure
    -enablePeerBeforeFinish,
     executeFromState,
     getInitialState,
     getNewPeerConfig,
     getOldPeerConfig,
     getState,
     getStateId,
     nextStateAfterRefresh,
     rollbackState,
     setLastPushedSequenceId,
     setLastPushedSequenceIdForTable,
     updateLastPushedSequenceIdForSerialPeer
    +enablePeerBeforeFinish,
     executeFromState,
     getInitialState,
     getNewPeerConfig,
     getOldPeerConfig,
     getState,
     getStateId,
     nextStateAfterRefresh,
     setLastPushedSequenceId,
     setLastPushedSequenceIdForTable,
     updateLastPushedSequenceIdForSerialPeer
     
     
     
     
     
     Methods inherited from 
    classorg.apache.hadoop.hbase.master.replication.AbstractPeerProcedure
    -acquireLock,
     getLatch,
     getPeerId,
     hasLock,
     holdLock,
     releaseLock
     
    +acquireLock,
     getLatch,
     getPeerId,
     hasLock,
     holdLock,
     refreshPeer, 
    releaseLock,
     rollbackState
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/eb5d2c62/devapidocs/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.html
     
    b/devapidocs/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.html
    index 890dc44..894ce37 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.html
    @@ -18,7 +18,7 @@
     catch(err) {
     }
     //-->
    -var methods = 
    {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":6,"i12":6,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":6};
    +var methods = 
    {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":6,"i12":6,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":6};
     var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
    Methods"],4:["t3","Abstract Methods"],8:["t4","Concrete Methods"]};
     var altColor = "altColor";
     var rowColor = "rowColor";
    @@ -50,7 +50,7 @@ var activeTableTab = "activeTableTab";
     
     
     PrevClass
    -NextClass
    +NextClass
     
     
     Frames
    @@ -325,40 +325,28 @@ extends 
     private void
    -refreshPeer(MasterProcedureEnvenv,
    -   PeerProcedureInterface.PeerOperationTypetype)
    -
    -
    -private void
     releaseLatch()
     
    -
    +
     private void
     reopenRegions(MasterProcedureEnvenv)
     
    -
    -protected void
    -rollbackState(MasterProcedureEnvenv,
    - 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.PeerModificationStatestate)
    -called to perform the rollback of the specified state
    -
    -
    -
    +
     protected void
     setLastPushedSequenceId(MasterProcedureEnvenv,
    ReplicationPeerConfigpeerConfig)
     
    -
    +
     protected void
     setLastPushedSequenceIdForTable(MasterProcedureEnvenv,
    TableNametableName,
    https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
     title="class or interface in java.util">Maphttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String,https://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true;
     title="class or interface in 
    java.lang">LonglastSeqIds)
     
    -
    +
     protected void
     updateLastPushedSequenceIdForSerialPeer(MasterProcedureEnvenv)
     
    -
    +
     protected abstract void
     updatePeerStorage(MasterProcedureEnvenv)
     
    @@ -368,7 +356,7 @@ extends AbstractPeerProcedure
    -acquireLock,
     deserializeStateData,
     getLatch,
     getPeerId,
     hasLock,
     h
     oldLock, releaseLock,
     serializeStateData
    +acquireLock,
     deserializeStateData,
     getLatch,
     getPeerId,
     hasLock,
     h
     oldLock, refreshPeer,
     releaseLock,
     rollbackState,
     serializeStateData
     
     
     
    @@ -570,23 +558,13 @@ extends 
    -
    -
    -
    -
    -refreshPeer
    -privatevoidrefreshPeer(MasterProcedureEnvenv,
    - PeerProcedureInterface.PeerOperationTypetype)
    -
    -
     
     
     
     
     
     getOldPeerConfig
    -protectedReplicationPeerConfiggetOldPeerConfig()
    +protectedReplicationPeerConfiggetOldPeerConfig()
     
     
     
    @@ -595,7 +573,7 @@ extends 
     
     getNewPeerConfig
    -protectedReplicationPeerConfiggetNewPeerConfig()
    +protectedReplicationPeerConfiggetNewPeerConfig()
     
     
     
    @@ -604,7 +582,7 @@ extends 
     
     updateLastPushedSequenceIdForSerialPeer
    -protectedvoidupdateLastPushedSequenceIdForSerialPeer(MasterProcedureEnvenv)
    

    [06/51] [partial] hbase-site git commit: Published site at 14087cc919da9f2e0b1a68f701f6365ad9d1d71f.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/55ce8d97/devapidocs/org/apache/hadoop/hbase/rest/model/StorageClusterStatusModel.Node.Region.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/rest/model/StorageClusterStatusModel.Node.Region.html
     
    b/devapidocs/org/apache/hadoop/hbase/rest/model/StorageClusterStatusModel.Node.Region.html
    index 841fc14..3d0efba 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/rest/model/StorageClusterStatusModel.Node.Region.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/rest/model/StorageClusterStatusModel.Node.Region.html
    @@ -18,7 +18,7 @@
     catch(err) {
     }
     //-->
    -var methods = 
    {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10};
    +var methods = 
    {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10};
     var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
    Methods"],8:["t4","Concrete Methods"]};
     var altColor = "altColor";
     var rowColor = "rowColor";
    @@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
     
     
     
    -public static class StorageClusterStatusModel.Node.Region
    +public static class StorageClusterStatusModel.Node.Region
     extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">Object
     implements https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
     title="class or interface in java.io">Serializable
     Represents a region hosted on a region server.
    @@ -145,57 +145,61 @@ implements https://docs.oracle.com/javase/8/docs/api/java/io/Serializab
     
     
     private long
    -currentCompactedKVs
    +cpRequestsCount
     
     
    +private long
    +currentCompactedKVs
    +
    +
     private int
     memstoreSizeMB
     
    -
    +
     private byte[]
     name
     
    -
    +
     private long
     readRequestsCount
     
    -
    +
     private int
     rootIndexSizeKB
     
    -
    +
     private static long
     serialVersionUID
     
    -
    +
     private long
     storefileIndexSizeKB
     
    -
    +
     private int
     storefiles
     
    -
    +
     private int
     storefileSizeMB
     
    -
    +
     private int
     stores
     
    -
    +
     private long
     totalCompactingKVs
     
    -
    +
     private int
     totalStaticBloomSizeKB
     
    -
    +
     private int
     totalStaticIndexSizeKB
     
    -
    +
     private long
     writeRequestsCount
     
    @@ -224,13 +228,14 @@ implements https://docs.oracle.com/javase/8/docs/api/java/io/Serializab
     
     
     
    -Region(byte[]name,
    +Region(byte[]name,
       intstores,
       intstorefiles,
       intstorefileSizeMB,
       intmemstoreSizeMB,
       longstorefileIndexSizeKB,
       longreadRequestsCount,
    +  longcpRequestsCount,
       longwriteRequestsCount,
       introotIndexSizeKB,
       inttotalStaticIndexSizeKB,
    @@ -257,105 +262,113 @@ implements https://docs.oracle.com/javase/8/docs/api/java/io/Serializab
     
     
     long
    -getCurrentCompactedKVs()
    +getCpRequestsCount()
     
     
    +long
    +getCurrentCompactedKVs()
    +
    +
     int
     getMemStoreSizeMB()
     
    -
    +
     byte[]
     getName()
     
    -
    +
     long
     getReadRequestsCount()
     
    -
    +
     int
     getRootIndexSizeKB()
     
    -
    +
     long
     getStorefileIndexSizeKB()
     
    -
    +
     int
     getStorefiles()
     
    -
    +
     int
     getStorefileSizeMB()
     
    -
    +
     int
     getStores()
     
    -
    +
     long
     getTotalCompactingKVs()
     
    -
    +
     int
     getTotalStaticBloomSizeKB()
     
    -
    +
     int
     getTotalStaticIndexSizeKB()
     
    -
    +
     long
     getWriteRequestsCount()
     
    -
    +
    +void
    +setCpRequestsCount(longcpRequestsCount)
    +
    +
     void
     setCurrentCompactedKVs(longcurrentCompactedKVs)
     
    -
    +
     void
     setMemStoreSizeMB(intmemstoreSizeMB)
     
    -
    +
     void
     setName(byte[]name)
     
    -
    +
     void
     setReadRequestsCount(longreadRequestsCount)
     
    -
    +
     void
     setRootIndexSizeKB(introotIndexSizeKB)
     
    -
    +
     void
     setStorefileIndexSizeKB(longstorefileIndexSizeKB)
     
    -
    +
     void
     setStorefiles(intstorefiles)
     
    -
    +
     void
     setStorefileSizeMB(intstorefileSizeMB)
     
    -
    +
     void
     setStores(intstores)
     
    -
    +
     void
     setTotalCompactingKVs(longtotalCompactingKVs)
     
    -
    +
     void
     setTotalStaticBloomSizeKB(inttotalStaticBloomSizeKB)
     
    -
    +
     void
     setTotalStaticIndexSizeKB(inttotalStaticIndexSizeKB)
     
    -
    +
     void
     setWriteRequestsCount(longwriteRequestsCount)
     
    @@ -387,7 +400,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/io/Serializab
     
     
     serialVersionUID
    -private static finallong serialVersionUID
    +private static finallong serialVersionUID
     
     See Also:
     Constant
     Field Values
    @@ -400,7 +413,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/io/Serializab
     
     
     name
    -privatebyte[] name
    +privatebyte[] name
     
     
     
    @@ -409,7 +422,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/io/Serializab
     
     
     stores
    -privateint stores
    +privateint stores
     
     
     
    @@ -418,7 +431,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/io/Serializab
     
     
     storefiles
    -privateint storefiles
    +privateint storefiles
     
     
     
    @@ 

    [06/51] [partial] hbase-site git commit: Published site at 72784c2d836a4b977667449d3adec5e8d15453f5.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2b11656f/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
    --
    diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html 
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
    index 74bacd8..546d2b6 100644
    --- a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
    +++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
    @@ -2249,1468 +2249,1484 @@
     2241  }
     2242
     2243  @Override
    -2244  public long addColumn(
    -2245  final TableName tableName,
    -2246  final ColumnFamilyDescriptor 
    column,
    -2247  final long nonceGroup,
    -2248  final long nonce)
    -2249  throws IOException {
    -2250checkInitialized();
    -2251checkTableExists(tableName);
    -2252
    -2253TableDescriptor old = 
    getTableDescriptors().get(tableName);
    -2254if 
    (old.hasColumnFamily(column.getName())) {
    -2255  throw new 
    InvalidFamilyOperationException("Column family '" + column.getNameAsString()
    -2256  + "' in table '" + tableName + 
    "' already exists so cannot be added");
    -2257}
    +2244  public long addColumn(final TableName 
    tableName, final ColumnFamilyDescriptor column,
    +2245  final long nonceGroup, final long 
    nonce) throws IOException {
    +2246checkInitialized();
    +2247checkTableExists(tableName);
    +2248
    +2249return modifyTable(tableName, new 
    TableDescriptorGetter() {
    +2250
    +2251  @Override
    +2252  public TableDescriptor get() 
    throws IOException {
    +2253TableDescriptor old = 
    getTableDescriptors().get(tableName);
    +2254if 
    (old.hasColumnFamily(column.getName())) {
    +2255  throw new 
    InvalidFamilyOperationException("Column family '" + column.getNameAsString()
    +2256  + "' in table '" + 
    tableName + "' already exists so cannot be added");
    +2257}
     2258
    -2259TableDescriptor newDesc = 
    TableDescriptorBuilder
    -2260
    .newBuilder(old).setColumnFamily(column).build();
    -2261return modifyTable(tableName, 
    newDesc, nonceGroup, nonce);
    +2259return 
    TableDescriptorBuilder.newBuilder(old).setColumnFamily(column).build();
    +2260  }
    +2261}, nonceGroup, nonce);
     2262  }
     2263
    -2264  @Override
    -2265  public long modifyColumn(
    -2266  final TableName tableName,
    -2267  final ColumnFamilyDescriptor 
    descriptor,
    -2268  final long nonceGroup,
    -2269  final long nonce)
    -2270  throws IOException {
    -2271checkInitialized();
    -2272checkTableExists(tableName);
    -2273
    -2274TableDescriptor old = 
    getTableDescriptors().get(tableName);
    -2275if (! 
    old.hasColumnFamily(descriptor.getName())) {
    -2276  throw new 
    InvalidFamilyOperationException("Family '" + descriptor.getNameAsString()
    -2277  + "' does not exist, so it 
    cannot be modified");
    -2278}
    -2279
    -2280TableDescriptor td = 
    TableDescriptorBuilder
    -2281.newBuilder(old)
    -2282
    .modifyColumnFamily(descriptor)
    -2283.build();
    -2284
    -2285return modifyTable(tableName, td, 
    nonceGroup, nonce);
    -2286  }
    -2287
    -2288  @Override
    -2289  public long deleteColumn(
    -2290  final TableName tableName,
    -2291  final byte[] columnName,
    -2292  final long nonceGroup,
    -2293  final long nonce)
    -2294  throws IOException {
    -2295checkInitialized();
    -2296checkTableExists(tableName);
    -2297
    -2298TableDescriptor old = 
    getTableDescriptors().get(tableName);
    -2299
    -2300if (! 
    old.hasColumnFamily(columnName)) {
    -2301  throw new 
    InvalidFamilyOperationException("Family '" + Bytes.toString(columnName)
    -2302  + "' does not exist, so it 
    cannot be deleted");
    -2303}
    -2304if (old.getColumnFamilyCount() == 1) 
    {
    -2305  throw new 
    InvalidFamilyOperationException("Family '" + Bytes.toString(columnName)
    -2306  + "' is the only column family 
    in the table, so it cannot be deleted");
    -2307}
    -2308
    -2309TableDescriptor td = 
    TableDescriptorBuilder
    -2310
    .newBuilder(old).removeColumnFamily(columnName).build();
    -2311return modifyTable(tableName, td, 
    nonceGroup, nonce);
    -2312  }
    -2313
    -2314  @Override
    -2315  public long enableTable(final 
    TableName tableName, final long nonceGroup, final long nonce)
    -2316  throws IOException {
    -2317checkInitialized();
    -2318
    -2319return 
    MasterProcedureUtil.submitProcedure(
    -2320new 
    MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
    -2321  @Override
    -2322  protected void run() throws 
    IOException {
    -2323
    getMaster().getMasterCoprocessorHost().preEnableTable(tableName);
    -2324
    -2325// Normally, it would make sense 
    for this authorization check to exist inside
    -2326// AccessController, but because 
    the authorization check is done based on internal state
    -2327// (rather than explicit 
    permissions) we'll do the check here instead of in the
    -2328// coprocessor.
    -2329

    [06/51] [partial] hbase-site git commit: Published site at 9101fc246f86445006bfbcdfda5cc495016dc280.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/65565d77/devapidocs/src-html/org/apache/hadoop/hbase/master/ServerManager.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/ServerManager.html 
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/ServerManager.html
    index 6b9e2a8..ba6e4aa 100644
    --- a/devapidocs/src-html/org/apache/hadoop/hbase/master/ServerManager.html
    +++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/ServerManager.html
    @@ -33,1047 +33,1151 @@
     025import java.util.ArrayList;
     026import java.util.Collections;
     027import java.util.HashMap;
    -028import java.util.HashSet;
    -029import java.util.Iterator;
    -030import java.util.List;
    -031import java.util.Map;
    -032import java.util.Map.Entry;
    -033import java.util.Set;
    -034import 
    java.util.concurrent.ConcurrentHashMap;
    -035import 
    java.util.concurrent.ConcurrentNavigableMap;
    -036import 
    java.util.concurrent.ConcurrentSkipListMap;
    -037import 
    java.util.concurrent.CopyOnWriteArrayList;
    -038import 
    java.util.concurrent.atomic.AtomicBoolean;
    -039import java.util.function.Predicate;
    -040import 
    org.apache.hadoop.conf.Configuration;
    -041import 
    org.apache.hadoop.hbase.ClockOutOfSyncException;
    -042import 
    org.apache.hadoop.hbase.HConstants;
    -043import 
    org.apache.hadoop.hbase.NotServingRegionException;
    -044import 
    org.apache.hadoop.hbase.RegionMetrics;
    -045import 
    org.apache.hadoop.hbase.ServerMetrics;
    -046import 
    org.apache.hadoop.hbase.ServerMetricsBuilder;
    -047import 
    org.apache.hadoop.hbase.ServerName;
    -048import 
    org.apache.hadoop.hbase.YouAreDeadException;
    -049import 
    org.apache.hadoop.hbase.client.ClusterConnection;
    -050import 
    org.apache.hadoop.hbase.client.RegionInfo;
    -051import 
    org.apache.hadoop.hbase.client.RetriesExhaustedException;
    -052import 
    org.apache.hadoop.hbase.ipc.HBaseRpcController;
    -053import 
    org.apache.hadoop.hbase.ipc.RpcControllerFactory;
    -054import 
    org.apache.hadoop.hbase.monitoring.MonitoredTask;
    -055import 
    org.apache.hadoop.hbase.regionserver.HRegionServer;
    -056import 
    org.apache.hadoop.hbase.util.Bytes;
    -057import 
    org.apache.hadoop.hbase.zookeeper.ZKUtil;
    -058import 
    org.apache.hadoop.hbase.zookeeper.ZKWatcher;
    -059import 
    org.apache.yetus.audience.InterfaceAudience;
    -060import 
    org.apache.zookeeper.KeeperException;
    -061import org.slf4j.Logger;
    -062import org.slf4j.LoggerFactory;
    -063
    -064import 
    org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
    -065import 
    org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations;
    -066
    -067import 
    org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
    -068import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
    -069import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds;
    -070import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.StoreSequenceId;
    -071import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest;
    +028import java.util.Iterator;
    +029import java.util.List;
    +030import java.util.Map;
    +031import java.util.Map.Entry;
    +032import java.util.Set;
    +033import 
    java.util.concurrent.ConcurrentNavigableMap;
    +034import 
    java.util.concurrent.ConcurrentSkipListMap;
    +035import 
    java.util.concurrent.CopyOnWriteArrayList;
    +036import 
    java.util.concurrent.atomic.AtomicBoolean;
    +037import java.util.function.Predicate;
    +038import 
    org.apache.hadoop.conf.Configuration;
    +039import 
    org.apache.hadoop.fs.FSDataInputStream;
    +040import 
    org.apache.hadoop.fs.FSDataOutputStream;
    +041import org.apache.hadoop.fs.FileSystem;
    +042import org.apache.hadoop.fs.Path;
    +043import 
    org.apache.hadoop.hbase.ClockOutOfSyncException;
    +044import 
    org.apache.hadoop.hbase.HConstants;
    +045import 
    org.apache.hadoop.hbase.NotServingRegionException;
    +046import 
    org.apache.hadoop.hbase.RegionMetrics;
    +047import 
    org.apache.hadoop.hbase.ScheduledChore;
    +048import 
    org.apache.hadoop.hbase.ServerMetrics;
    +049import 
    org.apache.hadoop.hbase.ServerMetricsBuilder;
    +050import 
    org.apache.hadoop.hbase.ServerName;
    +051import 
    org.apache.hadoop.hbase.YouAreDeadException;
    +052import 
    org.apache.hadoop.hbase.client.ClusterConnection;
    +053import 
    org.apache.hadoop.hbase.client.RegionInfo;
    +054import 
    org.apache.hadoop.hbase.client.RetriesExhaustedException;
    +055import 
    org.apache.hadoop.hbase.ipc.HBaseRpcController;
    +056import 
    org.apache.hadoop.hbase.ipc.RpcControllerFactory;
    +057import 
    org.apache.hadoop.hbase.master.assignment.RegionStates;
    +058import 
    org.apache.hadoop.hbase.monitoring.MonitoredTask;
    +059import 
    org.apache.hadoop.hbase.regionserver.HRegionServer;
    +060import 
    org.apache.hadoop.hbase.util.Bytes;
    +061import 
    org.apache.hadoop.hbase.util.FSUtils;
    +062import 
    org.apache.hadoop.hbase.zookeeper.ZKUtil;
    +063import 
    org.apache.hadoop.hbase.zookeeper.ZKWatcher;
    +064import 
    org.apache.yetus.audience.InterfaceAudience;
    +065import 
    

    [06/51] [partial] hbase-site git commit: Published site at 0b28155d274910b4e667b949d51f78809a1eff0b.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e11cf2cb/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.html
    index 83c17c0..9df0225 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.html
    @@ -54,323 +54,362 @@
     046import org.apache.hadoop.io.IOUtils;
     047
     048import 
    org.apache.hbase.thirdparty.com.google.protobuf.ByteString;
    -049
    +049import 
    org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations;
     050
    -051/**
    -052 * Compression in this class is lifted 
    off Compressor/KeyValueCompression.
    -053 * This is a pure coincidence... they are 
    independent and don't have to be compatible.
    -054 *
    -055 * This codec is used at server side for 
    writing cells to WAL as well as for sending edits
    -056 * as part of the distributed splitting 
    process.
    -057 */
    -058@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC,
    -059  HBaseInterfaceAudience.PHOENIX, 
    HBaseInterfaceAudience.CONFIG})
    -060public class WALCellCodec implements 
    Codec {
    -061  /** Configuration key for the class to 
    use when encoding cells in the WAL */
    -062  public static final String 
    WAL_CELL_CODEC_CLASS_KEY = "hbase.regionserver.wal.codec";
    -063
    -064  protected final CompressionContext 
    compression;
    -065  protected final ByteStringUncompressor 
    statelessUncompressor = new ByteStringUncompressor() {
    -066@Override
    -067public byte[] uncompress(ByteString 
    data, Dictionary dict) throws IOException {
    -068  return 
    WALCellCodec.uncompressByteString(data, dict);
    -069}
    -070  };
    -071
    -072  /**
    -073   * bAll subclasses must 
    implement a no argument constructor/b
    -074   */
    -075  public WALCellCodec() {
    -076this.compression = null;
    -077  }
    -078
    -079  /**
    -080   * Default constructor - ball 
    subclasses must implement a constructor with this signature /b
    -081   * if they are to be dynamically loaded 
    from the {@link Configuration}.
    -082   * @param conf configuration to 
    configure ttthis/tt
    -083   * @param compression compression the 
    codec should support, can be ttnull/tt to indicate no
    -084   *  compression
    -085   */
    -086  public WALCellCodec(Configuration conf, 
    CompressionContext compression) {
    -087this.compression = compression;
    -088  }
    -089
    -090  public static String 
    getWALCellCodecClass(Configuration conf) {
    -091return 
    conf.get(WAL_CELL_CODEC_CLASS_KEY, WALCellCodec.class.getName());
    -092  }
    -093
    -094  /**
    -095   * Create and setup a {@link 
    WALCellCodec} from the {@code cellCodecClsName} and
    -096   * CompressionContext, if {@code 
    cellCodecClsName} is specified.
    -097   * Otherwise Cell Codec classname is 
    read from {@link Configuration}.
    -098   * Fully prepares the codec for use.
    -099   * @param conf {@link Configuration} to 
    read for the user-specified codec. If none is specified,
    -100   *  uses a {@link 
    WALCellCodec}.
    -101   * @param cellCodecClsName name of 
    codec
    -102   * @param compression compression the 
    codec should use
    -103   * @return a {@link WALCellCodec} ready 
    for use.
    -104   * @throws 
    UnsupportedOperationException if the codec cannot be instantiated
    -105   */
    -106
    -107  public static WALCellCodec 
    create(Configuration conf, String cellCodecClsName,
    -108  CompressionContext compression) 
    throws UnsupportedOperationException {
    -109if (cellCodecClsName == null) {
    -110  cellCodecClsName = 
    getWALCellCodecClass(conf);
    -111}
    -112return 
    ReflectionUtils.instantiateWithCustomCtor(cellCodecClsName, new Class[]
    -113{ Configuration.class, 
    CompressionContext.class }, new Object[] { conf, compression });
    -114  }
    -115
    -116  /**
    -117   * Create and setup a {@link 
    WALCellCodec} from the
    -118   * CompressionContext.
    -119   * Cell Codec classname is read from 
    {@link Configuration}.
    -120   * Fully prepares the codec for use.
    -121   * @param conf {@link Configuration} to 
    read for the user-specified codec. If none is specified,
    -122   *  uses a {@link 
    WALCellCodec}.
    -123   * @param compression compression the 
    codec should use
    -124   * @return a {@link WALCellCodec} ready 
    for use.
    -125   * @throws 
    UnsupportedOperationException if the codec cannot be instantiated
    -126   */
    -127  public static WALCellCodec 
    create(Configuration conf,
    -128  CompressionContext compression) 
    throws UnsupportedOperationException {
    -129String cellCodecClsName = 
    getWALCellCodecClass(conf);
    -130return 
    ReflectionUtils.instantiateWithCustomCtor(cellCodecClsName, new Class[]
    -131{ Configuration.class, 
    CompressionContext.class }, new Object[] { conf, compression });
    -132  }
    -133
    -134  public interface ByteStringCompressor 
    {
    -135ByteString compress(byte[] data, 

    [06/51] [partial] hbase-site git commit: Published site at 7d3750bd9fc9747623549c242cc4171e224b3eaf.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.RemoteNode.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.RemoteNode.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.RemoteNode.html
    index 594ef24..17d5c40 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.RemoteNode.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.RemoteNode.html
    @@ -170,241 +170,242 @@
     162  }
     163
     164  /**
    -165   * Add a remote rpc. Be sure to check 
    result for successful add.
    +165   * Add a remote rpc.
     166   * @param key the node identifier
    -167   * @return True if we successfully 
    added the operation.
    -168   */
    -169  public boolean addOperationToNode(final 
    TRemote key, RemoteProcedure rp) {
    +167   */
    +168  public void addOperationToNode(final 
    TRemote key, RemoteProcedure rp)
    +169  throws 
    NullTargetServerDispatchException, NoServerDispatchException, 
    NoNodeDispatchException {
     170if (key == null) {
    -171  // Key is remote server name. Be 
    careful. It could have been nulled by a concurrent
    -172  // ServerCrashProcedure shutting 
    down outstanding RPC requests. See remoteCallFailed.
    -173  return false;
    -174}
    -175assert key != null : "found null key 
    for node";
    -176BufferNode node = nodeMap.get(key);
    -177if (node == null) {
    -178  return false;
    -179}
    -180node.add(rp);
    -181// Check our node still in the map; 
    could have been removed by #removeNode.
    -182return nodeMap.containsValue(node);
    -183  }
    -184
    -185  /**
    -186   * Remove a remote node
    -187   * @param key the node identifier
    -188   */
    -189  public boolean removeNode(final TRemote 
    key) {
    -190final BufferNode node = 
    nodeMap.remove(key);
    -191if (node == null) return false;
    -192node.abortOperationsInQueue();
    -193return true;
    -194  }
    -195
    -196  // 
    
    -197  //  Task Helpers
    -198  // 
    
    -199  protected FutureVoid 
    submitTask(CallableVoid task) {
    -200return threadPool.submit(task);
    -201  }
    -202
    -203  protected FutureVoid 
    submitTask(CallableVoid task, long delay, TimeUnit unit) {
    -204final FutureTaskVoid 
    futureTask = new FutureTask(task);
    -205timeoutExecutor.add(new 
    DelayedTask(futureTask, delay, unit));
    -206return futureTask;
    -207  }
    -208
    -209  protected abstract void 
    remoteDispatch(TRemote key, SetRemoteProcedure operations);
    -210  protected abstract void 
    abortPendingOperations(TRemote key, SetRemoteProcedure operations);
    -211
    -212  /**
    -213   * Data structure with reference to 
    remote operation.
    -214   */
    -215  public static abstract class 
    RemoteOperation {
    -216private final RemoteProcedure 
    remoteProcedure;
    -217
    -218protected RemoteOperation(final 
    RemoteProcedure remoteProcedure) {
    -219  this.remoteProcedure = 
    remoteProcedure;
    -220}
    -221
    -222public RemoteProcedure 
    getRemoteProcedure() {
    -223  return remoteProcedure;
    -224}
    -225  }
    -226
    -227  /**
    -228   * Remote procedure reference.
    -229   */
    -230  public interface 
    RemoteProcedureTEnv, TRemote {
    -231/**
    -232 * For building the remote 
    operation.
    -233 */
    -234RemoteOperation remoteCallBuild(TEnv 
    env, TRemote remote);
    -235
    -236/**
    -237 * Called when the executeProcedure 
    call is failed.
    -238 */
    -239void remoteCallFailed(TEnv env, 
    TRemote remote, IOException exception);
    -240
    -241/**
    -242 * Called when RS tells the remote 
    procedure is succeeded through the
    -243 * {@code reportProcedureDone} 
    method.
    -244 */
    -245void remoteOperationCompleted(TEnv 
    env);
    -246
    -247/**
    -248 * Called when RS tells the remote 
    procedure is failed through the {@code reportProcedureDone}
    -249 * method.
    -250 */
    -251void remoteOperationFailed(TEnv env, 
    RemoteProcedureException error);
    -252  }
    -253
    -254  /**
    -255   * Account of what procedures are 
    running on remote node.
    -256   * @param TEnv
    -257   * @param TRemote
    -258   */
    -259  public interface RemoteNodeTEnv, 
    TRemote {
    -260TRemote getKey();
    -261void add(RemoteProcedureTEnv, 
    TRemote operation);
    -262void dispatch();
    -263  }
    -264
    -265  protected 
    ArrayListMultimapClass?, RemoteOperation 
    buildAndGroupRequestByType(final TEnv env,
    -266  final TRemote remote, final 
    SetRemoteProcedure remoteProcedures) {
    -267final 
    ArrayListMultimapClass?, RemoteOperation requestByType = 
    ArrayListMultimap.create();
    -268for (RemoteProcedure proc: 
    remoteProcedures) {
    -269  RemoteOperation operation = 
    proc.remoteCallBuild(env, remote);
    -270  
    requestByType.put(operation.getClass(), 

    [06/51] [partial] hbase-site git commit: Published site at 997747076d8ec0b4346d7cb99c4b0667a7c14905.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4df09ed9/testdevapidocs/org/apache/hadoop/hbase/client/HConnectionTestingUtility.html
    --
    diff --git 
    a/testdevapidocs/org/apache/hadoop/hbase/client/HConnectionTestingUtility.html 
    b/testdevapidocs/org/apache/hadoop/hbase/client/HConnectionTestingUtility.html
    index 815b946..a976c88 100644
    --- 
    a/testdevapidocs/org/apache/hadoop/hbase/client/HConnectionTestingUtility.html
    +++ 
    b/testdevapidocs/org/apache/hadoop/hbase/client/HConnectionTestingUtility.html
    @@ -243,7 +243,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
     
    throws org.apache.hadoop.hbase.ZooKeeperConnectionException
     Get a Mocked ClusterConnection that goes with 
    the passed conf
      configuration instance.  Minimally the mock will return
    - conf when Connection.getConfiguration() is invoked.
    + code>conf/conf> when Connection.getConfiguration() is 
    invoked.
      Be sure to shutdown the connection when done by calling
      Connection.close() else it will stick around; this is probably 
    not what you want.
     
    @@ -317,10 +317,9 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
     Returns:
     ClusterConnection object for conf
     Throws:
    -org.apache.hadoop.hbase.ZooKeeperConnectionException
    +org.apache.hadoop.hbase.ZooKeeperConnectionException - [Dead 
    link]: See also
    + 
    {http://mockito.googlecode.com/svn/branches/1.6/javadoc/org/mockito/Mockito.html#spy(T)}
     https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException
    -See Also:
    -{http://mockito.googlecode.com/svn/branches/1.6/javadoc/org/mockito/Mockito.html#spy(T)}
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4df09ed9/testdevapidocs/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.html
    --
    diff --git 
    a/testdevapidocs/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.html 
    b/testdevapidocs/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.html
    index f49fab7..e2cfb1e 100644
    --- 
    a/testdevapidocs/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.html
    +++ 
    b/testdevapidocs/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.html
    @@ -50,7 +50,7 @@ var activeTableTab = "activeTableTab";
     
     
     PrevClass
    -NextClass
    +NextClass
     
     
     Frames
    @@ -553,7 +553,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
     
     
     PrevClass
    -NextClass
    +NextClass
     
     
     Frames
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4df09ed9/testdevapidocs/org/apache/hadoop/hbase/client/TestTableDescriptorUtils.html
    --
    diff --git 
    a/testdevapidocs/org/apache/hadoop/hbase/client/TestTableDescriptorUtils.html 
    b/testdevapidocs/org/apache/hadoop/hbase/client/TestTableDescriptorUtils.html
    new file mode 100644
    index 000..aa76488
    --- /dev/null
    +++ 
    b/testdevapidocs/org/apache/hadoop/hbase/client/TestTableDescriptorUtils.html
    @@ -0,0 +1,308 @@
    +http://www.w3.org/TR/html4/loose.dtd;>
    +
    +
    +
    +
    +
    +TestTableDescriptorUtils (Apache HBase 3.0.0-SNAPSHOT Test API)
    +
    +
    +
    +
    +
    +var methods = {"i0":10};
    +var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
    Methods"],8:["t4","Concrete Methods"]};
    +var altColor = "altColor";
    +var rowColor = "rowColor";
    +var tableTab = "tableTab";
    +var activeTableTab = "activeTableTab";
    +
    +
    +JavaScript is disabled on your browser.
    +
    +
    +
    +
    +
    +Skip navigation links
    +
    +
    +
    +
    +Overview
    +Package
    +Class
    +Use
    +Tree
    +Deprecated
    +Index
    +Help
    +
    +
    +
    +
    +PrevClass
    +NextClass
    +
    +
    +Frames
    +NoFrames
    +
    +
    +AllClasses
    +
    +
    +
    +
    +
    +
    +
    +Summary:
    +Nested|
    +Field|
    +Constr|
    +Method
    +
    +
    +Detail:
    +Field|
    +Constr|
    +Method
    +
    +
    +
    +
    +
    +
    +
    +
    +org.apache.hadoop.hbase.client
    +Class 
    TestTableDescriptorUtils
    +
    +
    +
    +https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">java.lang.Object
    +
    +
    +org.apache.hadoop.hbase.client.TestTableDescriptorUtils
    +
    +
    +
    +
    +
    +
    +
    +
    +public class TestTableDescriptorUtils
    +extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">Object
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +Field Summary
    +
    +Fields
    +
    +Modifier and Type
    +Field and Description
    +
    +
    +static HBaseClassTestRule
    +CLASS_RULE
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +Constructor Summary
    +
    +Constructors
    +
    

    [06/51] [partial] hbase-site git commit: Published site at f3d1c021de2264301f68eadb9ef126ff83d7ef53.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/883dde2f/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALFactory.html
    --
    diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALFactory.html 
    b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALFactory.html
    index 28539e5..c3f794b 100644
    --- a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALFactory.html
    +++ b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALFactory.html
    @@ -55,404 +55,406 @@
     047 * implementations:
     048 * ul
     049 *   
    liemdefaultProvider/em : whatever provider is standard 
    for the hbase version. Currently
    -050 *  
    "filesystem"/li
    -051 *   
    liemfilesystem/em : a provider that will run on top of 
    an implementation of the Hadoop
    -052 * FileSystem 
    interface, normally HDFS./li
    -053 *   
    liemmultiwal/em : a provider that will use multiple 
    "filesystem" wal instances per region
    -054 *   
    server./li
    -055 * /ul
    -056 *
    -057 * Alternatively, you may provide a 
    custom implementation of {@link WALProvider} by class name.
    -058 */
    -059@InterfaceAudience.Private
    -060public class WALFactory {
    -061
    -062  private static final Logger LOG = 
    LoggerFactory.getLogger(WALFactory.class);
    +050 *  
    "asyncfs"/li
    +051 *   
    liemasyncfs/em : a provider that will run on top of an 
    implementation of the Hadoop
    +052 * FileSystem 
    interface via an asynchronous client./li
    +053 *   
    liemfilesystem/em : a provider that will run on top of 
    an implementation of the Hadoop
    +054 * FileSystem 
    interface via HDFS's synchronous DFSClient./li
    +055 *   
    liemmultiwal/em : a provider that will use multiple 
    "filesystem" wal instances per region
    +056 *   
    server./li
    +057 * /ul
    +058 *
    +059 * Alternatively, you may provide a 
    custom implementation of {@link WALProvider} by class name.
    +060 */
    +061@InterfaceAudience.Private
    +062public class WALFactory {
     063
    -064  /**
    -065   * Maps between configuration names for 
    providers and implementation classes.
    -066   */
    -067  static enum Providers {
    -068
    defaultProvider(AsyncFSWALProvider.class),
    -069filesystem(FSHLogProvider.class),
    -070
    multiwal(RegionGroupingProvider.class),
    -071asyncfs(AsyncFSWALProvider.class);
    -072
    -073final Class? extends 
    WALProvider clazz;
    -074Providers(Class? extends 
    WALProvider clazz) {
    -075  this.clazz = clazz;
    -076}
    -077  }
    -078
    -079  public static final String WAL_PROVIDER 
    = "hbase.wal.provider";
    -080  static final String 
    DEFAULT_WAL_PROVIDER = Providers.defaultProvider.name();
    -081
    -082  public static final String 
    META_WAL_PROVIDER = "hbase.wal.meta_provider";
    -083  static final String 
    DEFAULT_META_WAL_PROVIDER = Providers.defaultProvider.name();
    -084
    -085  final String factoryId;
    -086  private final WALProvider provider;
    -087  // The meta updates are written to a 
    different wal. If this
    -088  // regionserver holds meta regions, 
    then this ref will be non-null.
    -089  // lazily intialized; most 
    RegionServers don't deal with META
    -090  private final 
    AtomicReferenceWALProvider metaProvider = new 
    AtomicReference();
    -091
    -092  /**
    -093   * Configuration-specified WAL Reader 
    used when a custom reader is requested
    -094   */
    -095  private final Class? extends 
    AbstractFSWALProvider.Reader logReaderClass;
    -096
    -097  /**
    -098   * How long to attempt opening 
    in-recovery wals
    -099   */
    -100  private final int timeoutMillis;
    -101
    -102  private final Configuration conf;
    +064  private static final Logger LOG = 
    LoggerFactory.getLogger(WALFactory.class);
    +065
    +066  /**
    +067   * Maps between configuration names for 
    providers and implementation classes.
    +068   */
    +069  static enum Providers {
    +070
    defaultProvider(AsyncFSWALProvider.class),
    +071filesystem(FSHLogProvider.class),
    +072
    multiwal(RegionGroupingProvider.class),
    +073asyncfs(AsyncFSWALProvider.class);
    +074
    +075final Class? extends 
    WALProvider clazz;
    +076Providers(Class? extends 
    WALProvider clazz) {
    +077  this.clazz = clazz;
    +078}
    +079  }
    +080
    +081  public static final String WAL_PROVIDER 
    = "hbase.wal.provider";
    +082  static final String 
    DEFAULT_WAL_PROVIDER = Providers.defaultProvider.name();
    +083
    +084  public static final String 
    META_WAL_PROVIDER = "hbase.wal.meta_provider";
    +085  static final String 
    DEFAULT_META_WAL_PROVIDER = Providers.defaultProvider.name();
    +086
    +087  final String factoryId;
    +088  private final WALProvider provider;
    +089  // The meta updates are written to a 
    different wal. If this
    +090  // regionserver holds meta regions, 
    then this ref will be non-null.
    +091  // lazily intialized; most 
    RegionServers don't deal with META
    +092  private final 
    AtomicReferenceWALProvider metaProvider = new 
    AtomicReference();
    +093
    +094  /**
    +095   * Configuration-specified WAL Reader 
    used when a custom reader is 

    [06/51] [partial] hbase-site git commit: Published site at cf529f18a9959589fa635f78df4840472526ea2c.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.BufferedMutatorTest.html
    --
    diff --git 
    a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.BufferedMutatorTest.html
     
    b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.BufferedMutatorTest.html
    index 3f8844b..cdb9398 100644
    --- 
    a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.BufferedMutatorTest.html
    +++ 
    b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.BufferedMutatorTest.html
    @@ -140,2712 +140,2713 @@
     132public class PerformanceEvaluation 
    extends Configured implements Tool {
     133  static final String RANDOM_SEEK_SCAN = 
    "randomSeekScan";
     134  static final String RANDOM_READ = 
    "randomRead";
    -135  private static final Logger LOG = 
    LoggerFactory.getLogger(PerformanceEvaluation.class.getName());
    -136  private static final ObjectMapper 
    MAPPER = new ObjectMapper();
    -137  static {
    -138
    MAPPER.configure(MapperFeature.SORT_PROPERTIES_ALPHABETICALLY, true);
    -139  }
    -140
    -141  public static final String TABLE_NAME = 
    "TestTable";
    -142  public static final String 
    FAMILY_NAME_BASE = "info";
    -143  public static final byte[] FAMILY_ZERO 
    = Bytes.toBytes("info0");
    -144  public static final byte[] COLUMN_ZERO 
    = Bytes.toBytes("" + 0);
    -145  public static final int 
    DEFAULT_VALUE_LENGTH = 1000;
    -146  public static final int ROW_LENGTH = 
    26;
    -147
    -148  private static final int ONE_GB = 1024 
    * 1024 * 1000;
    -149  private static final int 
    DEFAULT_ROWS_PER_GB = ONE_GB / DEFAULT_VALUE_LENGTH;
    -150  // TODO : should we make this 
    configurable
    -151  private static final int TAG_LENGTH = 
    256;
    -152  private static final DecimalFormat FMT 
    = new DecimalFormat("0.##");
    -153  private static final MathContext CXT = 
    MathContext.DECIMAL64;
    -154  private static final BigDecimal 
    MS_PER_SEC = BigDecimal.valueOf(1000);
    -155  private static final BigDecimal 
    BYTES_PER_MB = BigDecimal.valueOf(1024 * 1024);
    -156  private static final TestOptions 
    DEFAULT_OPTS = new TestOptions();
    -157
    -158  private static MapString, 
    CmdDescriptor COMMANDS = new TreeMap();
    -159  private static final Path PERF_EVAL_DIR 
    = new Path("performance_evaluation");
    -160
    -161  static {
    -162
    addCommandDescriptor(AsyncRandomReadTest.class, "asyncRandomRead",
    -163"Run async random read test");
    -164
    addCommandDescriptor(AsyncRandomWriteTest.class, "asyncRandomWrite",
    -165"Run async random write test");
    -166
    addCommandDescriptor(AsyncSequentialReadTest.class, "asyncSequentialRead",
    -167"Run async sequential read 
    test");
    -168
    addCommandDescriptor(AsyncSequentialWriteTest.class, "asyncSequentialWrite",
    -169"Run async sequential write 
    test");
    -170
    addCommandDescriptor(AsyncScanTest.class, "asyncScan",
    -171"Run async scan test (read every 
    row)");
    -172
    addCommandDescriptor(RandomReadTest.class, RANDOM_READ,
    -173  "Run random read test");
    -174
    addCommandDescriptor(RandomSeekScanTest.class, RANDOM_SEEK_SCAN,
    -175  "Run random seek and scan 100 
    test");
    -176
    addCommandDescriptor(RandomScanWithRange10Test.class, "scanRange10",
    -177  "Run random seek scan with both 
    start and stop row (max 10 rows)");
    -178
    addCommandDescriptor(RandomScanWithRange100Test.class, "scanRange100",
    -179  "Run random seek scan with both 
    start and stop row (max 100 rows)");
    -180
    addCommandDescriptor(RandomScanWithRange1000Test.class, "scanRange1000",
    -181  "Run random seek scan with both 
    start and stop row (max 1000 rows)");
    -182
    addCommandDescriptor(RandomScanWithRange1Test.class, "scanRange1",
    -183  "Run random seek scan with both 
    start and stop row (max 1 rows)");
    -184
    addCommandDescriptor(RandomWriteTest.class, "randomWrite",
    -185  "Run random write test");
    -186
    addCommandDescriptor(SequentialReadTest.class, "sequentialRead",
    -187  "Run sequential read test");
    -188
    addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite",
    -189  "Run sequential write test");
    -190addCommandDescriptor(ScanTest.class, 
    "scan",
    -191  "Run scan test (read every 
    row)");
    -192
    addCommandDescriptor(FilteredScanTest.class, "filterScan",
    -193  "Run scan test using a filter to 
    find a specific row based on it's value " +
    -194  "(make sure to use --rows=20)");
    -195
    addCommandDescriptor(IncrementTest.class, "increment",
    -196  "Increment on each row; clients 
    overlap on keyspace so some concurrent operations");
    -197
    addCommandDescriptor(AppendTest.class, "append",
    -198  "Append on each row; clients 
    overlap on keyspace so some concurrent operations");
    -199
    addCommandDescriptor(CheckAndMutateTest.class, "checkAndMutate",
    -200  "CheckAndMutate on each row; 
    clients overlap on keyspace so some concurrent operations");
    -201
    

    [06/51] [partial] hbase-site git commit: Published site at 021f66d11d2cbb7308308093e29e69d6e7661ee9.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/92a26cfb/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ScannerContext.LimitFields.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ScannerContext.LimitFields.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ScannerContext.LimitFields.html
    index 03a0b2a..cabb570 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ScannerContext.LimitFields.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ScannerContext.LimitFields.html
    @@ -561,209 +561,206 @@
     553
     554/**
     555 * Set all fields together.
    -556 * @param batch
    -557 * @param sizeScope
    -558 * @param dataSize
    -559 */
    -560void setFields(int batch, LimitScope 
    sizeScope, long dataSize, long heapSize,
    -561LimitScope timeScope, long time) 
    {
    -562  setBatch(batch);
    -563  setSizeScope(sizeScope);
    -564  setDataSize(dataSize);
    -565  setHeapSize(heapSize);
    -566  setTimeScope(timeScope);
    -567  setTime(time);
    -568}
    -569
    -570int getBatch() {
    -571  return this.batch;
    -572}
    -573
    -574void setBatch(int batch) {
    -575  this.batch = batch;
    -576}
    -577
    -578/**
    -579 * @param checkerScope
    -580 * @return true when the limit can be 
    enforced from the scope of the checker
    -581 */
    -582boolean 
    canEnforceBatchLimitFromScope(LimitScope checkerScope) {
    -583  return 
    LimitScope.BETWEEN_CELLS.canEnforceLimitFromScope(checkerScope);
    -584}
    -585
    -586long getDataSize() {
    -587  return this.dataSize;
    -588}
    -589
    -590long getHeapSize() {
    -591  return this.heapSize;
    -592}
    -593
    -594void setDataSize(long dataSize) {
    -595  this.dataSize = dataSize;
    -596}
    -597
    -598void setHeapSize(long heapSize) {
    -599  this.heapSize = heapSize;
    -600}
    -601
    -602/**
    -603 * @return {@link LimitScope} 
    indicating scope in which the size limit is enforced
    -604 */
    -605LimitScope getSizeScope() {
    -606  return this.sizeScope;
    -607}
    -608
    -609/**
    -610 * Change the scope in which the size 
    limit is enforced
    -611 */
    -612void setSizeScope(LimitScope scope) 
    {
    -613  this.sizeScope = scope;
    -614}
    -615
    -616/**
    -617 * @param checkerScope
    -618 * @return true when the limit can be 
    enforced from the scope of the checker
    -619 */
    -620boolean 
    canEnforceSizeLimitFromScope(LimitScope checkerScope) {
    -621  return 
    this.sizeScope.canEnforceLimitFromScope(checkerScope);
    -622}
    -623
    -624long getTime() {
    -625  return this.time;
    -626}
    -627
    -628void setTime(long time) {
    -629  this.time = time;
    -630}
    -631
    -632/**
    -633 * @return {@link LimitScope} 
    indicating scope in which the time limit is enforced
    -634 */
    -635LimitScope getTimeScope() {
    -636  return this.timeScope;
    -637}
    -638
    -639/**
    -640 * Change the scope in which the time 
    limit is enforced
    -641 */
    -642void setTimeScope(LimitScope scope) 
    {
    -643  this.timeScope = scope;
    -644}
    -645
    -646/**
    -647 * @param checkerScope
    -648 * @return true when the limit can be 
    enforced from the scope of the checker
    -649 */
    -650boolean 
    canEnforceTimeLimitFromScope(LimitScope checkerScope) {
    -651  return 
    this.timeScope.canEnforceLimitFromScope(checkerScope);
    -652}
    -653
    -654@Override
    -655public String toString() {
    -656  StringBuilder sb = new 
    StringBuilder();
    -657  sb.append("{");
    +556 */
    +557void setFields(int batch, LimitScope 
    sizeScope, long dataSize, long heapSize,
    +558LimitScope timeScope, long time) 
    {
    +559  setBatch(batch);
    +560  setSizeScope(sizeScope);
    +561  setDataSize(dataSize);
    +562  setHeapSize(heapSize);
    +563  setTimeScope(timeScope);
    +564  setTime(time);
    +565}
    +566
    +567int getBatch() {
    +568  return this.batch;
    +569}
    +570
    +571void setBatch(int batch) {
    +572  this.batch = batch;
    +573}
    +574
    +575/**
    +576 * @param checkerScope
    +577 * @return true when the limit can be 
    enforced from the scope of the checker
    +578 */
    +579boolean 
    canEnforceBatchLimitFromScope(LimitScope checkerScope) {
    +580  return 
    LimitScope.BETWEEN_CELLS.canEnforceLimitFromScope(checkerScope);
    +581}
    +582
    +583long getDataSize() {
    +584  return this.dataSize;
    +585}
    +586
    +587long getHeapSize() {
    +588  return this.heapSize;
    +589}
    +590
    +591void setDataSize(long dataSize) {
    +592  this.dataSize = dataSize;
    +593}
    +594
    +595void setHeapSize(long heapSize) {
    +596  this.heapSize = heapSize;
    +597}
    +598
    +599/**
    +600 * @return {@link LimitScope} 
    indicating scope in which the size limit is enforced
    +601 */
    +602LimitScope getSizeScope() {
    +603  return this.sizeScope;
    +604}
    +605
    +606/**
    +607 * Change the scope in which 

    [06/51] [partial] hbase-site git commit: Published site at acd0d1e446c164d9c54bfb461b2d449c8d717c07.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f2065178/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RunResult.html
    --
    diff --git 
    a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RunResult.html
     
    b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RunResult.html
    index 2510283..418c60c 100644
    --- 
    a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RunResult.html
    +++ 
    b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RunResult.html
    @@ -77,77 +77,77 @@
     069import 
    org.apache.hadoop.hbase.client.RowMutations;
     070import 
    org.apache.hadoop.hbase.client.Scan;
     071import 
    org.apache.hadoop.hbase.client.Table;
    -072import 
    org.apache.hadoop.hbase.filter.BinaryComparator;
    -073import 
    org.apache.hadoop.hbase.filter.Filter;
    -074import 
    org.apache.hadoop.hbase.filter.FilterAllFilter;
    -075import 
    org.apache.hadoop.hbase.filter.FilterList;
    -076import 
    org.apache.hadoop.hbase.filter.PageFilter;
    -077import 
    org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
    -078import 
    org.apache.hadoop.hbase.filter.WhileMatchFilter;
    -079import 
    org.apache.hadoop.hbase.io.compress.Compression;
    -080import 
    org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
    -081import 
    org.apache.hadoop.hbase.io.hfile.RandomDistribution;
    -082import 
    org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
    -083import 
    org.apache.hadoop.hbase.regionserver.BloomType;
    -084import 
    org.apache.hadoop.hbase.regionserver.CompactingMemStore;
    -085import 
    org.apache.hadoop.hbase.trace.HBaseHTraceConfiguration;
    -086import 
    org.apache.hadoop.hbase.trace.SpanReceiverHost;
    -087import 
    org.apache.hadoop.hbase.trace.TraceUtil;
    -088import 
    org.apache.hadoop.hbase.util.ByteArrayHashKey;
    -089import 
    org.apache.hadoop.hbase.util.Bytes;
    -090import 
    org.apache.hadoop.hbase.util.Hash;
    -091import 
    org.apache.hadoop.hbase.util.MurmurHash;
    -092import 
    org.apache.hadoop.hbase.util.Pair;
    -093import 
    org.apache.hadoop.hbase.util.YammerHistogramUtils;
    -094import 
    org.apache.hadoop.io.LongWritable;
    -095import org.apache.hadoop.io.Text;
    -096import org.apache.hadoop.mapreduce.Job;
    -097import 
    org.apache.hadoop.mapreduce.Mapper;
    -098import 
    org.apache.hadoop.mapreduce.lib.input.NLineInputFormat;
    -099import 
    org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
    -100import 
    org.apache.hadoop.mapreduce.lib.reduce.LongSumReducer;
    -101import org.apache.hadoop.util.Tool;
    -102import 
    org.apache.hadoop.util.ToolRunner;
    -103import 
    org.apache.htrace.core.ProbabilitySampler;
    -104import org.apache.htrace.core.Sampler;
    -105import 
    org.apache.htrace.core.TraceScope;
    -106import 
    org.apache.yetus.audience.InterfaceAudience;
    -107import org.slf4j.Logger;
    -108import org.slf4j.LoggerFactory;
    -109import 
    org.apache.hbase.thirdparty.com.google.common.base.MoreObjects;
    -110import 
    org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
    -111
    -112/**
    -113 * Script used evaluating HBase 
    performance and scalability.  Runs a HBase
    -114 * client that steps through one of a set 
    of hardcoded tests or 'experiments'
    -115 * (e.g. a random reads test, a random 
    writes test, etc.). Pass on the
    -116 * command-line which test to run and how 
    many clients are participating in
    -117 * this experiment. Run {@code 
    PerformanceEvaluation --help} to obtain usage.
    -118 *
    -119 * pThis class sets up and runs 
    the evaluation programs described in
    -120 * Section 7, iPerformance 
    Evaluation/i, of the a
    -121 * 
    href="http://labs.google.com/papers/bigtable.html"Bigtable/a;
    -122 * paper, pages 8-10.
    -123 *
    -124 * pBy default, runs as a 
    mapreduce job where each mapper runs a single test
    -125 * client. Can also run as a 
    non-mapreduce, multithreaded application by
    -126 * specifying {@code --nomapred}. Each 
    client does about 1GB of data, unless
    -127 * specified otherwise.
    -128 */
    -129@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
    -130public class PerformanceEvaluation 
    extends Configured implements Tool {
    -131  static final String RANDOM_SEEK_SCAN = 
    "randomSeekScan";
    -132  static final String RANDOM_READ = 
    "randomRead";
    -133  private static final Logger LOG = 
    LoggerFactory.getLogger(PerformanceEvaluation.class.getName());
    -134  private static final ObjectMapper 
    MAPPER = new ObjectMapper();
    -135  static {
    -136
    MAPPER.configure(MapperFeature.SORT_PROPERTIES_ALPHABETICALLY, true);
    -137  }
    -138
    -139  public static final String TABLE_NAME = 
    "TestTable";
    -140  public static final byte[] FAMILY_NAME 
    = Bytes.toBytes("info");
    -141  public static final byte [] COLUMN_ZERO 
    = Bytes.toBytes("" + 0);
    -142  public static final byte [] 
    QUALIFIER_NAME = COLUMN_ZERO;
    +072import 
    org.apache.hadoop.hbase.client.metrics.ScanMetrics;
    +073import 
    org.apache.hadoop.hbase.filter.BinaryComparator;
    +074import 
    org.apache.hadoop.hbase.filter.Filter;
    +075import 
    org.apache.hadoop.hbase.filter.FilterAllFilter;
    +076import 
    

    [06/51] [partial] hbase-site git commit: Published site at 87f5b5f3411d96c31b4cb61b9a57ced22be91d1f.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/de18d468/testdevapidocs/org/apache/hadoop/hbase/wal/package-tree.html
    --
    diff --git a/testdevapidocs/org/apache/hadoop/hbase/wal/package-tree.html 
    b/testdevapidocs/org/apache/hadoop/hbase/wal/package-tree.html
    index 434a274..f7d6df6 100644
    --- a/testdevapidocs/org/apache/hadoop/hbase/wal/package-tree.html
    +++ b/testdevapidocs/org/apache/hadoop/hbase/wal/package-tree.html
    @@ -139,9 +139,9 @@
     
     java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
     title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
     title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
     title="class or interface in java.io">Serializable)
     
    -org.apache.hadoop.hbase.wal.FaultyFSLog.FailureType
     org.apache.hadoop.hbase.wal.IOTestProvider.AllowedOperations
     org.apache.hadoop.hbase.wal.TestWALSplit.Corruptions
    +org.apache.hadoop.hbase.wal.FaultyFSLog.FailureType
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/de18d468/testdevapidocs/overview-tree.html
    --
    diff --git a/testdevapidocs/overview-tree.html 
    b/testdevapidocs/overview-tree.html
    index a0bac17..a7e9eab 100644
    --- a/testdevapidocs/overview-tree.html
    +++ b/testdevapidocs/overview-tree.html
    @@ -337,6 +337,7 @@
     org.apache.hadoop.hbase.replication.regionserver.TestReplicator.FailureInjectingReplicationEndpointForTest
     
     
    +org.apache.hadoop.hbase.replication.regionserver.TestSerialReplicationEndpoint.TestEndpoint
     
     
     
    @@ -2679,6 +2680,7 @@
     org.apache.hadoop.hbase.ipc.TestHBaseClient
     org.apache.hadoop.hbase.TestHBaseConfiguration
     org.apache.hadoop.hbase.TestHBaseConfiguration.ReflectiveCredentialProviderClient
    +org.apache.hadoop.hbase.util.TestHBaseFsckCleanReplicationBarriers
     org.apache.hadoop.hbase.util.TestHBaseFsckComparator
     org.apache.hadoop.hbase.util.TestHBaseFsckEncryption
     org.apache.hadoop.hbase.util.TestHBaseFsckReplication
    @@ -3258,11 +3260,6 @@
     org.apache.hadoop.hbase.replication.TestReplicationEndpoint.EverythingPassesWALEntryFilterSubclass
     
     
    -org.apache.hadoop.hbase.replication.TestReplicationEndpoint.InterClusterReplicationEndpointForTest.DummyReplicator
    -
    -org.apache.hadoop.hbase.replication.TestReplicationEndpoint.InterClusterReplicationEndpointForTest.FailingDummyReplicator
    -
    -
     org.apache.hadoop.hbase.master.cleaner.TestReplicationHFileCleaner
     org.apache.hadoop.hbase.master.cleaner.TestReplicationHFileCleaner.DummyServer 
    (implements org.apache.hadoop.hbase.Server)
     org.apache.hadoop.hbase.replication.TestReplicationPeerConfig
    @@ -3290,12 +3287,6 @@
     org.apache.hadoop.hbase.replication.TestReplicationWithTags
     org.apache.hadoop.hbase.replication.TestReplicationWithTags.TestCoprocessorForTagsAtSink
     (implements org.apache.hadoop.hbase.coprocessor.RegionCoprocessor, 
    org.apache.hadoop.hbase.coprocessor.RegionObserver)
     org.apache.hadoop.hbase.replication.TestReplicationWithTags.TestCoprocessorForTagsAtSource
     (implements org.apache.hadoop.hbase.coprocessor.RegionCoprocessor, 
    org.apache.hadoop.hbase.coprocessor.RegionObserver)
    -org.apache.hadoop.hbase.replication.regionserver.TestReplicator.FailureInjectingReplicationEndpointForTest.FailureInjectingBlockingInterface
     (implements 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface)
    -org.apache.hadoop.hbase.replication.regionserver.TestReplicator.ReplicationEndpointForTest.ReplicatorForTest
    -
    -org.apache.hadoop.hbase.replication.regionserver.TestReplicator.FailureInjectingReplicationEndpointForTest.FailureInjectingReplicatorForTest
    -
    -
     org.apache.hadoop.hbase.client.TestReplicaWithCluster
     org.apache.hadoop.hbase.client.TestReplicaWithCluster.RegionServerHostingPrimayMetaRegionSlowOrStopCopro
     (implements org.apache.hadoop.hbase.coprocessor.RegionCoprocessor, 
    org.apache.hadoop.hbase.coprocessor.RegionObserver)
     org.apache.hadoop.hbase.client.TestReplicaWithCluster.RegionServerStoppedCopro 
    (implements org.apache.hadoop.hbase.coprocessor.RegionCoprocessor, 
    org.apache.hadoop.hbase.coprocessor.RegionObserver)
    @@ -3406,6 +3397,7 @@
     org.apache.hadoop.hbase.TestSequenceIdMonotonicallyIncreasing
     org.apache.hadoop.hbase.TestSerialization
     org.apache.hadoop.hbase.replication.regionserver.TestSerialReplicationChecker
    +org.apache.hadoop.hbase.replication.regionserver.TestSerialReplicationEndpoint
     org.apache.hadoop.hbase.master.balancer.TestServerAndLoad
     org.apache.hadoop.hbase.client.TestServerBusyException
     org.apache.hadoop.hbase.client.TestServerBusyException.SleepCoprocessor 
    (implements org.apache.hadoop.hbase.coprocessor.RegionCoprocessor, 
    org.apache.hadoop.hbase.coprocessor.RegionObserver)
    
    

    [06/51] [partial] hbase-site git commit: Published site at 2912c953551bedbfbf30c32c156ed7bb187d54c3.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html
    index 8302e28..c370eb9 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.TableInfo.IntegrityFixSuggester.html
    @@ -2113,3031 +2113,3033 @@
     2105
    errors.reportError(ERROR_CODE.ORPHAN_TABLE_STATE,
     2106tableName + " unable to 
    delete dangling table state " + tableState);
     2107  }
    -2108} else {
    -2109  
    errors.reportError(ERROR_CODE.ORPHAN_TABLE_STATE,
    -2110  tableName + " has dangling 
    table state " + tableState);
    -2111}
    -2112  }
    -2113}
    -2114// check that all tables have 
    states
    -2115for (TableName tableName : 
    tablesInfo.keySet()) {
    -2116  if (isTableIncluded(tableName) 
     !tableStates.containsKey(tableName)) {
    -2117if (fixMeta) {
    -2118  
    MetaTableAccessor.updateTableState(connection, tableName, 
    TableState.State.ENABLED);
    -2119  TableState newState = 
    MetaTableAccessor.getTableState(connection, tableName);
    -2120  if (newState == null) {
    -2121
    errors.reportError(ERROR_CODE.NO_TABLE_STATE,
    -2122"Unable to change state 
    for table " + tableName + " in meta ");
    -2123  }
    -2124} else {
    -2125  
    errors.reportError(ERROR_CODE.NO_TABLE_STATE,
    -2126  tableName + " has no state 
    in meta ");
    -2127}
    -2128  }
    -2129}
    -2130  }
    -2131
    -2132  private void preCheckPermission() 
    throws IOException, AccessDeniedException {
    -2133if 
    (shouldIgnorePreCheckPermission()) {
    -2134  return;
    -2135}
    -2136
    -2137Path hbaseDir = 
    FSUtils.getRootDir(getConf());
    -2138FileSystem fs = 
    hbaseDir.getFileSystem(getConf());
    -2139UserProvider userProvider = 
    UserProvider.instantiate(getConf());
    -2140UserGroupInformation ugi = 
    userProvider.getCurrent().getUGI();
    -2141FileStatus[] files = 
    fs.listStatus(hbaseDir);
    -2142for (FileStatus file : files) {
    -2143  try {
    -2144FSUtils.checkAccess(ugi, file, 
    FsAction.WRITE);
    -2145  } catch (AccessDeniedException 
    ace) {
    -2146LOG.warn("Got 
    AccessDeniedException when preCheckPermission ", ace);
    -2147
    errors.reportError(ERROR_CODE.WRONG_USAGE, "Current user " + 
    ugi.getUserName()
    -2148  + " does not have write perms 
    to " + file.getPath()
    -2149  + ". Please rerun hbck as hdfs 
    user " + file.getOwner());
    -2150throw ace;
    -2151  }
    -2152}
    -2153  }
    -2154
    -2155  /**
    -2156   * Deletes region from meta table
    -2157   */
    -2158  private void deleteMetaRegion(HbckInfo 
    hi) throws IOException {
    -2159
    deleteMetaRegion(hi.metaEntry.getRegionName());
    -2160  }
    -2161
    -2162  /**
    -2163   * Deletes region from meta table
    -2164   */
    -2165  private void deleteMetaRegion(byte[] 
    metaKey) throws IOException {
    -2166Delete d = new Delete(metaKey);
    -2167meta.delete(d);
    -2168LOG.info("Deleted " + 
    Bytes.toString(metaKey) + " from META" );
    -2169  }
    -2170
    -2171  /**
    -2172   * Reset the split parent region info 
    in meta table
    -2173   */
    -2174  private void resetSplitParent(HbckInfo 
    hi) throws IOException {
    -2175RowMutations mutations = new 
    RowMutations(hi.metaEntry.getRegionName());
    -2176Delete d = new 
    Delete(hi.metaEntry.getRegionName());
    -2177
    d.addColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER);
    -2178
    d.addColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER);
    -2179mutations.add(d);
    -2180
    -2181RegionInfo hri = 
    RegionInfoBuilder.newBuilder(hi.metaEntry)
    -2182.setOffline(false)
    -2183.setSplit(false)
    -2184.build();
    -2185Put p = 
    MetaTableAccessor.makePutFromRegionInfo(hri, 
    EnvironmentEdgeManager.currentTime());
    -2186mutations.add(p);
    -2187
    -2188meta.mutateRow(mutations);
    -2189LOG.info("Reset split parent " + 
    hi.metaEntry.getRegionNameAsString() + " in META" );
    -2190  }
    -2191
    -2192  /**
    -2193   * This backwards-compatibility 
    wrapper for permanently offlining a region
    -2194   * that should not be alive.  If the 
    region server does not support the
    -2195   * "offline" method, it will use the 
    closest unassign method instead.  This
    -2196   * will basically work until one 
    attempts to disable or delete the affected
    -2197   * table.  The problem has to do with 
    in-memory only master state, so
    -2198   * restarting the HMaster or failing 
    over to another should fix this.
    -2199   */
    -2200  private void offline(byte[] 
    regionName) throws IOException {
    -2201String regionString = 
    

    [06/51] [partial] hbase-site git commit: Published site at 2a2258656b2fcd92b967131b6c1f037363553bc4.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e0fb1fde/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
    --
    diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html 
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
    index 79bf967..c8b113b 100644
    --- a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
    +++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
    @@ -115,3514 +115,3517 @@
     107import 
    org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer;
     108import 
    org.apache.hadoop.hbase.master.balancer.ClusterStatusChore;
     109import 
    org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory;
    -110import 
    org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
    -111import 
    org.apache.hadoop.hbase.master.cleaner.LogCleaner;
    -112import 
    org.apache.hadoop.hbase.master.cleaner.ReplicationBarrierCleaner;
    -113import 
    org.apache.hadoop.hbase.master.locking.LockManager;
    -114import 
    org.apache.hadoop.hbase.master.normalizer.NormalizationPlan;
    -115import 
    org.apache.hadoop.hbase.master.normalizer.NormalizationPlan.PlanType;
    -116import 
    org.apache.hadoop.hbase.master.normalizer.RegionNormalizer;
    -117import 
    org.apache.hadoop.hbase.master.normalizer.RegionNormalizerChore;
    -118import 
    org.apache.hadoop.hbase.master.normalizer.RegionNormalizerFactory;
    -119import 
    org.apache.hadoop.hbase.master.procedure.CreateTableProcedure;
    -120import 
    org.apache.hadoop.hbase.master.procedure.DeleteNamespaceProcedure;
    -121import 
    org.apache.hadoop.hbase.master.procedure.DeleteTableProcedure;
    -122import 
    org.apache.hadoop.hbase.master.procedure.DisableTableProcedure;
    -123import 
    org.apache.hadoop.hbase.master.procedure.EnableTableProcedure;
    -124import 
    org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants;
    -125import 
    org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
    -126import 
    org.apache.hadoop.hbase.master.procedure.MasterProcedureScheduler;
    -127import 
    org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil;
    -128import 
    org.apache.hadoop.hbase.master.procedure.ModifyTableProcedure;
    -129import 
    org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch;
    -130import 
    org.apache.hadoop.hbase.master.procedure.RecoverMetaProcedure;
    -131import 
    org.apache.hadoop.hbase.master.procedure.TruncateTableProcedure;
    -132import 
    org.apache.hadoop.hbase.master.replication.AddPeerProcedure;
    -133import 
    org.apache.hadoop.hbase.master.replication.DisablePeerProcedure;
    -134import 
    org.apache.hadoop.hbase.master.replication.EnablePeerProcedure;
    -135import 
    org.apache.hadoop.hbase.master.replication.ModifyPeerProcedure;
    -136import 
    org.apache.hadoop.hbase.master.replication.RemovePeerProcedure;
    -137import 
    org.apache.hadoop.hbase.master.replication.ReplicationPeerManager;
    -138import 
    org.apache.hadoop.hbase.master.replication.UpdatePeerConfigProcedure;
    -139import 
    org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
    -140import 
    org.apache.hadoop.hbase.mob.MobConstants;
    -141import 
    org.apache.hadoop.hbase.monitoring.MemoryBoundedLogMessageBuffer;
    -142import 
    org.apache.hadoop.hbase.monitoring.MonitoredTask;
    -143import 
    org.apache.hadoop.hbase.monitoring.TaskMonitor;
    -144import 
    org.apache.hadoop.hbase.procedure.MasterProcedureManagerHost;
    -145import 
    org.apache.hadoop.hbase.procedure.flush.MasterFlushTableProcedureManager;
    -146import 
    org.apache.hadoop.hbase.procedure2.LockedResource;
    -147import 
    org.apache.hadoop.hbase.procedure2.Procedure;
    -148import 
    org.apache.hadoop.hbase.procedure2.ProcedureEvent;
    -149import 
    org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
    -150import 
    org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher.RemoteProcedure;
    -151import 
    org.apache.hadoop.hbase.procedure2.RemoteProcedureException;
    -152import 
    org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore;
    -153import 
    org.apache.hadoop.hbase.quotas.MasterQuotaManager;
    -154import 
    org.apache.hadoop.hbase.quotas.MasterSpaceQuotaObserver;
    -155import 
    org.apache.hadoop.hbase.quotas.QuotaObserverChore;
    -156import 
    org.apache.hadoop.hbase.quotas.QuotaUtil;
    -157import 
    org.apache.hadoop.hbase.quotas.SnapshotQuotaObserverChore;
    -158import 
    org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshotNotifier;
    -159import 
    org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshotNotifierFactory;
    -160import 
    org.apache.hadoop.hbase.regionserver.DefaultStoreEngine;
    -161import 
    org.apache.hadoop.hbase.regionserver.HRegionServer;
    -162import 
    org.apache.hadoop.hbase.regionserver.HStore;
    -163import 
    org.apache.hadoop.hbase.regionserver.RSRpcServices;
    -164import 
    org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
    -165import 
    org.apache.hadoop.hbase.regionserver.RegionSplitPolicy;
    -166import 
    org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy;
    -167import 
    org.apache.hadoop.hbase.regionserver.compactions.FIFOCompactionPolicy;
    -168import 
    

    [06/51] [partial] hbase-site git commit: Published site at e468b4022f76688851b3e0c34722f01a56bd624f.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/16541468/devapidocs/org/apache/hadoop/hbase/client/Mutation.CellWrapper.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/client/Mutation.CellWrapper.html 
    b/devapidocs/org/apache/hadoop/hbase/client/Mutation.CellWrapper.html
    index 4b5ac40..5cac61b 100644
    --- a/devapidocs/org/apache/hadoop/hbase/client/Mutation.CellWrapper.html
    +++ b/devapidocs/org/apache/hadoop/hbase/client/Mutation.CellWrapper.html
    @@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
     
     
     
    -private static final class Mutation.CellWrapper
    +private static final class Mutation.CellWrapper
     extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">Object
     implements ExtendedCell
     
    @@ -395,7 +395,7 @@ implements 
     
     FIXED_OVERHEAD
    -private static finallong FIXED_OVERHEAD
    +private static finallong FIXED_OVERHEAD
     
     
     
    @@ -404,7 +404,7 @@ implements 
     
     cell
    -private finalCell cell
    +private finalCell cell
     
     
     
    @@ -413,7 +413,7 @@ implements 
     
     sequenceId
    -privatelong sequenceId
    +privatelong sequenceId
     
     
     
    @@ -422,7 +422,7 @@ implements 
     
     timestamp
    -privatelong timestamp
    +privatelong timestamp
     
     
     
    @@ -439,7 +439,7 @@ implements 
     
     CellWrapper
    -CellWrapper(Cellcell)
    +CellWrapper(Cellcell)
     
     
     
    @@ -456,7 +456,7 @@ implements 
     
     setSequenceId
    -publicvoidsetSequenceId(longseqId)
    +publicvoidsetSequenceId(longseqId)
     Description copied from 
    interface:ExtendedCell
     Sets with the given seqId.
     
    @@ -473,7 +473,7 @@ implements 
     
     setTimestamp
    -publicvoidsetTimestamp(longts)
    +publicvoidsetTimestamp(longts)
     Description copied from 
    interface:ExtendedCell
     Sets with the given timestamp.
     
    @@ -490,7 +490,7 @@ implements 
     
     setTimestamp
    -publicvoidsetTimestamp(byte[]ts)
    +publicvoidsetTimestamp(byte[]ts)
     Description copied from 
    interface:ExtendedCell
     Sets with the given timestamp.
     
    @@ -507,7 +507,7 @@ implements 
     
     getSequenceId
    -publiclonggetSequenceId()
    +publiclonggetSequenceId()
     Description copied from 
    interface:ExtendedCell
     A region-specific unique monotonically increasing sequence 
    ID given to each Cell. It always
      exists for cells in the memstore but is not retained forever. It will be kept 
    for
    @@ -529,7 +529,7 @@ implements 
     
     getValueArray
    -publicbyte[]getValueArray()
    +publicbyte[]getValueArray()
     Description copied from 
    interface:Cell
     Contiguous raw bytes that may start at any index in the 
    containing array. Max length is
      Integer.MAX_VALUE which is 2,147,483,647 bytes.
    @@ -547,7 +547,7 @@ implements 
     
     getValueOffset
    -publicintgetValueOffset()
    +publicintgetValueOffset()
     
     Specified by:
     getValueOffsetin
     interfaceCell
    @@ -562,7 +562,7 @@ implements 
     
     getValueLength
    -publicintgetValueLength()
    +publicintgetValueLength()
     
     Specified by:
     getValueLengthin
     interfaceCell
    @@ -577,7 +577,7 @@ implements 
     
     getTagsArray
    -publicbyte[]getTagsArray()
    +publicbyte[]getTagsArray()
     Description copied from 
    interface:ExtendedCell
     Contiguous raw bytes representing tags that may start at 
    any index in the containing array.
     
    @@ -596,7 +596,7 @@ implements 
     
     getTagsOffset
    -publicintgetTagsOffset()
    +publicintgetTagsOffset()
     
     Specified by:
     getTagsOffsetin
     interfaceCell
    @@ -613,7 +613,7 @@ implements 
     
     getTagsLength
    -publicintgetTagsLength()
    +publicintgetTagsLength()
     Description copied from 
    interface:ExtendedCell
     HBase internally uses 2 bytes to store tags length in Cell. 
    As the tags length is always a
      non-negative number, to make good use of the sign bit, the max of tags length 
    is defined 2 *
    @@ -636,7 +636,7 @@ implements 
     
     getRowArray
    -publicbyte[]getRowArray()
    +publicbyte[]getRowArray()
     Description copied from 
    interface:Cell
     Contiguous raw bytes that may start at any index in the 
    containing array. Max length is
      Short.MAX_VALUE which is 32,767 bytes.
    @@ -654,7 +654,7 @@ implements 
     
     getRowOffset
    -publicintgetRowOffset()
    +publicintgetRowOffset()
     
     Specified by:
     getRowOffsetin
     interfaceCell
    @@ -669,7 +669,7 @@ implements 
     
     getRowLength
    -publicshortgetRowLength()
    +publicshortgetRowLength()
     
     Specified by:
     getRowLengthin
     interfaceCell
    @@ -684,7 +684,7 @@ implements 
     
     getFamilyArray
    -publicbyte[]getFamilyArray()
    +publicbyte[]getFamilyArray()
     Description copied from 
    interface:Cell
     Contiguous bytes composed of legal HDFS filename characters 
    which may start at any index in the
      containing array. Max length is Byte.MAX_VALUE, which is 127 bytes.
    @@ -702,7 +702,7 @@ implements 
     
     getFamilyOffset
    -publicintgetFamilyOffset()
    +publicintgetFamilyOffset()
     
     Specified by:
     getFamilyOffsetin
     interfaceCell
    @@ -717,7 +717,7 @@ implements 
     
     getFamilyLength
    -publicbytegetFamilyLength()
    +publicbytegetFamilyLength()
     
     Specified by:
     getFamilyLengthin
     interfaceCell
    @@ -732,7 +732,7 @@ implements 
     
     getQualifierArray
    -publicbyte[]getQualifierArray()
    +publicbyte[]getQualifierArray()
     Description copied 

    [06/51] [partial] hbase-site git commit: Published site at 64061f896fe21512504e3886a400759e88b519da.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f1ebf5b6/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFile.Writer.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFile.Writer.html 
    b/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFile.Writer.html
    index c7d05d1..abcb738 100644
    --- a/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFile.Writer.html
    +++ b/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFile.Writer.html
    @@ -143,18 +143,18 @@
     
     
     void
    -HFileDataBlockEncoderImpl.saveMetadata(HFile.Writerwriter)
    -
    -
    -void
     NoOpDataBlockEncoder.saveMetadata(HFile.Writerwriter)
     
    -
    +
     void
     HFileDataBlockEncoder.saveMetadata(HFile.Writerwriter)
     Save metadata in HFile which will be written to disk
     
     
    +
    +void
    +HFileDataBlockEncoderImpl.saveMetadata(HFile.Writerwriter)
    +
     
     
     
    @@ -203,18 +203,18 @@
     
     
     
    -void
    -RowColBloomContext.addLastBloomKey(HFile.Writerwriter)
    +abstract void
    +BloomContext.addLastBloomKey(HFile.Writerwriter)
    +Adds the last bloom key to the HFile Writer as part of 
    StorefileWriter close.
    +
     
     
     void
     RowBloomContext.addLastBloomKey(HFile.Writerwriter)
     
     
    -abstract void
    -BloomContext.addLastBloomKey(HFile.Writerwriter)
    -Adds the last bloom key to the HFile Writer as part of 
    StorefileWriter close.
    -
    +void
    +RowColBloomContext.addLastBloomKey(HFile.Writerwriter)
     
     
     static BloomFilterWriter
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f1ebf5b6/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFileBlock.Writer.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFileBlock.Writer.html 
    b/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFileBlock.Writer.html
    index b55ecd8..e1139cc 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFileBlock.Writer.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFileBlock.Writer.html
    @@ -106,15 +106,15 @@
     
     
     
    +private HFileBlock.Writer
    +HFileBlockIndex.BlockIndexWriter.blockWriter
    +
    +
     protected HFileBlock.Writer
     HFileWriterImpl.blockWriter
     block writer
     
     
    -
    -private HFileBlock.Writer
    -HFileBlockIndex.BlockIndexWriter.blockWriter
    -
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f1ebf5b6/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFileContext.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFileContext.html 
    b/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFileContext.html
    index 29c8b1e..fabd03f 100644
    --- a/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFileContext.html
    +++ b/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFileContext.html
    @@ -136,15 +136,15 @@
     
     
     HFileContext
    -HFileBlockDecodingContext.getHFileContext()
    +HFileBlockEncodingContext.getHFileContext()
     
     
     HFileContext
    -HFileBlockDefaultDecodingContext.getHFileContext()
    +HFileBlockDecodingContext.getHFileContext()
     
     
     HFileContext
    -HFileBlockEncodingContext.getHFileContext()
    +HFileBlockDefaultDecodingContext.getHFileContext()
     
     
     HFileContext
    @@ -224,23 +224,23 @@
     
     
     private HFileContext
    +HFile.WriterFactory.fileContext
    +
    +
    +private HFileContext
     HFileBlock.fileContext
     Meta data that holds meta information on the 
    hfileblock.
     
     
    -
    +
     private HFileContext
     HFileBlock.Writer.fileContext
     Meta data that holds information about the hfileblock
     
     
    -
    -private HFileContext
    -HFileBlock.FSReaderImpl.fileContext
    -
     
     private HFileContext
    -HFile.WriterFactory.fileContext
    +HFileBlock.FSReaderImpl.fileContext
     
     
     private HFileContext
    @@ -277,20 +277,20 @@
     
     
     HFileContext
    -HFileWriterImpl.getFileContext()
    -
    -
    -HFileContext
     HFile.Writer.getFileContext()
     Return the file context for the HFile this writer belongs 
    to
     
     
    -
    +
     HFileContext
     HFile.Reader.getFileContext()
     Return the file context of the HFile this reader belongs 
    to
     
     
    +
    +HFileContext
    +HFileWriterImpl.getFileContext()
    +
     
     HFileContext
     HFileReaderImpl.getFileContext()
    @@ -323,35 +323,35 @@
     
     
     HFileBlockDecodingContext
    -HFileDataBlockEncoderImpl.newDataBlockDecodingContext(HFileContextfileContext)
    -
    -
    -HFileBlockDecodingContext
     NoOpDataBlockEncoder.newDataBlockDecodingContext(HFileContextmeta)
     
    -
    +
     HFileBlockDecodingContext
     HFileDataBlockEncoder.newDataBlockDecodingContext(HFileContextfileContext)
     create a encoder specific decoding context for 
    reading.
     
     
    -
    -HFileBlockEncodingContext
    -HFileDataBlockEncoderImpl.newDataBlockEncodingContext(byte[]dummyHeader,
    -   HFileContextfileContext)
    -
     
    +HFileBlockDecodingContext
    +HFileDataBlockEncoderImpl.newDataBlockDecodingContext(HFileContextfileContext)
    +
    +
     HFileBlockEncodingContext
     NoOpDataBlockEncoder.newDataBlockEncodingContext(byte[]dummyHeader,
    HFileContextmeta)
     
    -
    +
     HFileBlockEncodingContext
     

    [06/51] [partial] hbase-site git commit: Published site at 4cb40e6d846ce1f28ffb40d388c9efb753197813.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4dc2a2e8/devapidocs/org/apache/hadoop/hbase/filter/class-use/Filter.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/filter/class-use/Filter.html 
    b/devapidocs/org/apache/hadoop/hbase/filter/class-use/Filter.html
    index 41b2105..dc4b7bd 100644
    --- a/devapidocs/org/apache/hadoop/hbase/filter/class-use/Filter.html
    +++ b/devapidocs/org/apache/hadoop/hbase/filter/class-use/Filter.html
    @@ -495,15 +495,15 @@ Input/OutputFormats, a table indexing MapReduce job, and 
    utility methods.
     
     
     static Filter
    -SingleColumnValueExcludeFilter.createFilterFromArguments(https://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
     title="class or interface in 
    java.util">ArrayListbyte[]filterArguments)
    +ColumnPrefixFilter.createFilterFromArguments(https://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
     title="class or interface in 
    java.util">ArrayListbyte[]filterArguments)
     
     
     static Filter
    -ValueFilter.createFilterFromArguments(https://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
     title="class or interface in 
    java.util">ArrayListbyte[]filterArguments)
    +ColumnCountGetFilter.createFilterFromArguments(https://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
     title="class or interface in 
    java.util">ArrayListbyte[]filterArguments)
     
     
     static Filter
    -FamilyFilter.createFilterFromArguments(https://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
     title="class or interface in 
    java.util">ArrayListbyte[]filterArguments)
    +RowFilter.createFilterFromArguments(https://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
     title="class or interface in 
    java.util">ArrayListbyte[]filterArguments)
     
     
     static Filter
    @@ -513,69 +513,69 @@ Input/OutputFormats, a table indexing MapReduce job, and 
    utility methods.
     
     
     static Filter
    -ColumnPrefixFilter.createFilterFromArguments(https://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
     title="class or interface in 
    java.util">ArrayListbyte[]filterArguments)
    +FirstKeyOnlyFilter.createFilterFromArguments(https://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
     title="class or interface in 
    java.util">ArrayListbyte[]filterArguments)
     
     
     static Filter
    -PageFilter.createFilterFromArguments(https://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
     title="class or interface in 
    java.util">ArrayListbyte[]filterArguments)
    +TimestampsFilter.createFilterFromArguments(https://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
     title="class or interface in 
    java.util">ArrayListbyte[]filterArguments)
     
     
     static Filter
    -RowFilter.createFilterFromArguments(https://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
     title="class or interface in 
    java.util">ArrayListbyte[]filterArguments)
    +ValueFilter.createFilterFromArguments(https://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
     title="class or interface in 
    java.util">ArrayListbyte[]filterArguments)
     
     
     static Filter
    -ColumnRangeFilter.createFilterFromArguments(https://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
     title="class or interface in 
    java.util">ArrayListbyte[]filterArguments)
    +KeyOnlyFilter.createFilterFromArguments(https://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
     title="class or interface in 
    java.util">ArrayListbyte[]filterArguments)
     
     
     static Filter
    -ColumnCountGetFilter.createFilterFromArguments(https://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
     title="class or interface in 
    java.util">ArrayListbyte[]filterArguments)
    +FamilyFilter.createFilterFromArguments(https://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
     title="class or interface in 
    java.util">ArrayListbyte[]filterArguments)
     
     
     static Filter
    -MultipleColumnPrefixFilter.createFilterFromArguments(https://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
     title="class or interface in 
    java.util">ArrayListbyte[]filterArguments)
    +QualifierFilter.createFilterFromArguments(https://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
     title="class or interface in 
    java.util">ArrayListbyte[]filterArguments)
     
     
     static Filter
    -ColumnPaginationFilter.createFilterFromArguments(https://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
     title="class or interface in 
    java.util">ArrayListbyte[]filterArguments)
    +ColumnRangeFilter.createFilterFromArguments(https://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
     title="class or interface in 
    java.util">ArrayListbyte[]filterArguments)
     
     
     static Filter
    

    [06/51] [partial] hbase-site git commit: Published site at 8ab7b20f48951d77945181024f5e15842bc253c4.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6eb695c8/devapidocs/src-html/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.html
    index 7556544..ce54cbb 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.html
    @@ -41,520 +41,530 @@
     033import org.apache.hadoop.fs.Path;
     034import 
    org.apache.hadoop.hbase.HConstants;
     035import 
    org.apache.hadoop.hbase.ServerName;
    -036import 
    org.apache.hadoop.hbase.exceptions.DeserializationException;
    -037import 
    org.apache.hadoop.hbase.util.Bytes;
    -038import 
    org.apache.hadoop.hbase.util.CollectionUtils;
    -039import 
    org.apache.hadoop.hbase.util.Pair;
    -040import 
    org.apache.hadoop.hbase.zookeeper.ZKUtil;
    -041import 
    org.apache.hadoop.hbase.zookeeper.ZKUtil.ZKUtilOp;
    -042import 
    org.apache.hadoop.hbase.zookeeper.ZKWatcher;
    -043import 
    org.apache.hadoop.hbase.zookeeper.ZNodePaths;
    -044import 
    org.apache.yetus.audience.InterfaceAudience;
    -045import 
    org.apache.zookeeper.KeeperException;
    -046import 
    org.apache.zookeeper.KeeperException.BadVersionException;
    -047import 
    org.apache.zookeeper.KeeperException.NoNodeException;
    -048import 
    org.apache.zookeeper.KeeperException.NodeExistsException;
    -049import 
    org.apache.zookeeper.KeeperException.NotEmptyException;
    -050import org.apache.zookeeper.data.Stat;
    -051import org.slf4j.Logger;
    -052import org.slf4j.LoggerFactory;
    -053
    -054import 
    org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
    -055
    -056/**
    -057 * ZK based replication queue storage.
    -058 * p
    -059 * The base znode for each regionserver 
    is the regionserver name. For example:
    -060 *
    -061 * pre
    -062 * 
    /hbase/replication/rs/hostname.example.org,6020,1234
    -063 * /pre
    -064 *
    -065 * Within this znode, the region server 
    maintains a set of WAL replication queues. These queues are
    -066 * represented by child znodes named 
    using there give queue id. For example:
    -067 *
    -068 * pre
    -069 * 
    /hbase/replication/rs/hostname.example.org,6020,1234/1
    -070 * 
    /hbase/replication/rs/hostname.example.org,6020,1234/2
    -071 * /pre
    -072 *
    -073 * Each queue has one child znode for 
    every WAL that still needs to be replicated. The value of
    -074 * these WAL child znodes is the latest 
    position that has been replicated. This position is updated
    -075 * every time a WAL entry is replicated. 
    For example:
    -076 *
    -077 * pre
    -078 * 
    /hbase/replication/rs/hostname.example.org,6020,1234/1/23522342.23422 [VALUE: 
    254]
    -079 * /pre
    -080 */
    -081@InterfaceAudience.Private
    -082class ZKReplicationQueueStorage extends 
    ZKReplicationStorageBase
    -083implements ReplicationQueueStorage 
    {
    -084
    -085  private static final Logger LOG = 
    LoggerFactory.getLogger(ZKReplicationQueueStorage.class);
    -086
    -087  public static final String 
    ZOOKEEPER_ZNODE_REPLICATION_HFILE_REFS_KEY =
    -088  
    "zookeeper.znode.replication.hfile.refs";
    -089  public static final String 
    ZOOKEEPER_ZNODE_REPLICATION_HFILE_REFS_DEFAULT = "hfile-refs";
    -090
    -091  public static final String 
    ZOOKEEPER_ZNODE_REPLICATION_REGIONS_KEY =
    -092  
    "zookeeper.znode.replication.regions";
    -093  public static final String 
    ZOOKEEPER_ZNODE_REPLICATION_REGIONS_DEFAULT = "regions";
    -094
    -095  /**
    -096   * The name of the znode that contains 
    all replication queues
    -097   */
    -098  private final String queuesZNode;
    -099
    -100  /**
    -101   * The name of the znode that contains 
    queues of hfile references to be replicated
    -102   */
    -103  private final String hfileRefsZNode;
    -104
    -105  private final String regionsZNode;
    -106
    -107  public 
    ZKReplicationQueueStorage(ZKWatcher zookeeper, Configuration conf) {
    -108super(zookeeper, conf);
    -109
    -110String queuesZNodeName = 
    conf.get("zookeeper.znode.replication.rs", "rs");
    -111String hfileRefsZNodeName = 
    conf.get(ZOOKEEPER_ZNODE_REPLICATION_HFILE_REFS_KEY,
    -112  
    ZOOKEEPER_ZNODE_REPLICATION_HFILE_REFS_DEFAULT);
    -113this.queuesZNode = 
    ZNodePaths.joinZNode(replicationZNode, queuesZNodeName);
    -114this.hfileRefsZNode = 
    ZNodePaths.joinZNode(replicationZNode, hfileRefsZNodeName);
    -115this.regionsZNode = 
    ZNodePaths.joinZNode(replicationZNode, conf
    -116
    .get(ZOOKEEPER_ZNODE_REPLICATION_REGIONS_KEY, 
    ZOOKEEPER_ZNODE_REPLICATION_REGIONS_DEFAULT));
    -117  }
    -118
    -119  private String getRsNode(ServerName 
    serverName) {
    -120return 
    ZNodePaths.joinZNode(queuesZNode, serverName.getServerName());
    -121  }
    -122
    -123  private String getQueueNode(ServerName 
    serverName, String queueId) {
    -124return 
    ZNodePaths.joinZNode(getRsNode(serverName), queueId);
    -125  }
    -126
    -127  private String getFileNode(String 
    queueNode, String fileName) {
    -128return 
    

    [06/51] [partial] hbase-site git commit: Published site at 00095a2ef9442e3fd86c04876c9d91f2f8b23ad8.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.ProcedureExecutorListener.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.ProcedureExecutorListener.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.ProcedureExecutorListener.html
    index 3bc66bb..97aa79c 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.ProcedureExecutorListener.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.ProcedureExecutorListener.html
    @@ -1435,459 +1435,460 @@
     1427   */
     1428  private void execProcedure(final 
    RootProcedureState procStack,
     1429  final 
    ProcedureTEnvironment procedure) {
    -1430
    Preconditions.checkArgument(procedure.getState() == ProcedureState.RUNNABLE);
    -1431
    -1432// Procedures can suspend 
    themselves. They skip out by throwing a ProcedureSuspendedException.
    -1433// The exception is caught below and 
    then we hurry to the exit without disturbing state. The
    -1434// idea is that the processing of 
    this procedure will be unsuspended later by an external event
    -1435// such the report of a region open. 
    TODO: Currently, its possible for two worker threads
    -1436// to be working on the same 
    procedure concurrently (locking in procedures is NOT about
    -1437// concurrency but about tying an 
    entity to a procedure; i.e. a region to a particular
    -1438// procedure instance). This can 
    make for issues if both threads are changing state.
    -1439// See 
    env.getProcedureScheduler().wakeEvent(regionNode.getProcedureEvent());
    -1440// in 
    RegionTransitionProcedure#reportTransition for example of Procedure putting
    -1441// itself back on the scheduler 
    making it possible for two threads running against
    -1442// the one Procedure. Might be ok if 
    they are both doing different, idempotent sections.
    -1443boolean suspended = false;
    -1444
    -1445// Whether to 're-' -execute; run 
    through the loop again.
    -1446boolean reExecute = false;
    -1447
    -1448ProcedureTEnvironment[] 
    subprocs = null;
    -1449do {
    -1450  reExecute = false;
    -1451  try {
    -1452subprocs = 
    procedure.doExecute(getEnvironment());
    -1453if (subprocs != null  
    subprocs.length == 0) {
    -1454  subprocs = null;
    -1455}
    -1456  } catch 
    (ProcedureSuspendedException e) {
    -1457if (LOG.isTraceEnabled()) {
    -1458  LOG.trace("Suspend " + 
    procedure);
    -1459}
    -1460suspended = true;
    -1461  } catch (ProcedureYieldException 
    e) {
    -1462if (LOG.isTraceEnabled()) {
    -1463  LOG.trace("Yield " + procedure 
    + ": " + e.getMessage(), e);
    -1464}
    -1465scheduler.yield(procedure);
    -1466return;
    -1467  } catch (InterruptedException e) 
    {
    -1468if (LOG.isTraceEnabled()) {
    -1469  LOG.trace("Yield interrupt " + 
    procedure + ": " + e.getMessage(), e);
    -1470}
    -1471
    handleInterruptedException(procedure, e);
    -1472scheduler.yield(procedure);
    -1473return;
    -1474  } catch (Throwable e) {
    -1475// Catch NullPointerExceptions 
    or similar errors...
    -1476String msg = "CODE-BUG: Uncaught 
    runtime exception: " + procedure;
    -1477LOG.error(msg, e);
    -1478procedure.setFailure(new 
    RemoteProcedureException(msg, e));
    -1479  }
    -1480
    -1481  if (!procedure.isFailed()) {
    -1482if (subprocs != null) {
    -1483  if (subprocs.length == 1 
     subprocs[0] == procedure) {
    -1484// Procedure returned 
    itself. Quick-shortcut for a state machine-like procedure;
    -1485// i.e. we go around this 
    loop again rather than go back out on the scheduler queue.
    -1486subprocs = null;
    -1487reExecute = true;
    -1488if (LOG.isTraceEnabled()) 
    {
    -1489  LOG.trace("Short-circuit 
    to next step on pid=" + procedure.getProcId());
    -1490}
    -1491  } else {
    -1492// Yield the current 
    procedure, and make the subprocedure runnable
    -1493// subprocs may come back 
    'null'.
    -1494subprocs = 
    initializeChildren(procStack, procedure, subprocs);
    -1495LOG.info("Initialized 
    subprocedures=" +
    -1496  (subprocs == null? null:
    -1497
    Stream.of(subprocs).map(e - "{" + e.toString() + "}").
    -1498
    collect(Collectors.toList()).toString()));
    -1499  }
    -1500} else if (procedure.getState() 
    == ProcedureState.WAITING_TIMEOUT) {
    -1501  if (LOG.isTraceEnabled()) {
    -1502LOG.trace("Added to 
    timeoutExecutor " + procedure);
    -1503  }
    -1504  
    timeoutExecutor.add(procedure);
    -1505} else if (!suspended) {
    -1506  // No subtask, so we are 
    done
    -1507  
    

    [06/51] [partial] hbase-site git commit: Published site at 22f4def942f8a3367d0ca6598317e9b9a7d0cfcd.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8b1eaec1/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.html
    --
    diff --git 
    a/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.html 
    b/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.html
    index 34c552c..cd795e1 100644
    --- a/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.html
    +++ b/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.html
    @@ -114,7 +114,7 @@ var activeTableTab = "activeTableTab";
     
     
     
    -public class TestAsyncRegionAdminApi
    +public class TestAsyncRegionAdminApi
     extends TestAsyncAdminBase
     Class to test asynchronous region admin operations.
     
    @@ -288,7 +288,7 @@ extends 
     
     CLASS_RULE
    -public static finalHBaseClassTestRule CLASS_RULE
    +public static finalHBaseClassTestRule CLASS_RULE
     
     
     
    @@ -305,7 +305,7 @@ extends 
     
     TestAsyncRegionAdminApi
    -publicTestAsyncRegionAdminApi()
    +publicTestAsyncRegionAdminApi()
     
     
     
    @@ -322,7 +322,7 @@ extends 
     
     testAssignRegionAndUnassignRegion
    -publicvoidtestAssignRegionAndUnassignRegion()
    +publicvoidtestAssignRegionAndUnassignRegion()
    throws https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
     title="class or interface in java.lang">Exception
     
     Throws:
    @@ -336,7 +336,7 @@ extends 
     
     createTableAndGetOneRegion
    -org.apache.hadoop.hbase.client.RegionInfocreateTableAndGetOneRegion(org.apache.hadoop.hbase.TableNametableName)
    +org.apache.hadoop.hbase.client.RegionInfocreateTableAndGetOneRegion(org.apache.hadoop.hbase.TableNametableName)
       throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException,
      https://docs.oracle.com/javase/8/docs/api/java/lang/InterruptedException.html?is-external=true;
     title="class or interface in java.lang">InterruptedException,
      https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutionException.html?is-external=true;
     title="class or interface in java.util.concurrent">ExecutionException
    @@ -354,7 +354,7 @@ extends 
     
     testGetRegionByStateOfTable
    -publicvoidtestGetRegionByStateOfTable()
    +publicvoidtestGetRegionByStateOfTable()
      throws https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
     title="class or interface in java.lang">Exception
     
     Throws:
    @@ -368,7 +368,7 @@ extends 
     
     testMoveRegion
    -publicvoidtestMoveRegion()
    +publicvoidtestMoveRegion()
     throws https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
     title="class or interface in java.lang">Exception
     
     Throws:
    @@ -382,7 +382,7 @@ extends 
     
     testGetOnlineRegions
    -publicvoidtestGetOnlineRegions()
    +publicvoidtestGetOnlineRegions()
       throws https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
     title="class or interface in java.lang">Exception
     
     Throws:
    @@ -396,7 +396,7 @@ extends 
     
     testFlushTableAndRegion
    -publicvoidtestFlushTableAndRegion()
    +publicvoidtestFlushTableAndRegion()
      throws https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
     title="class or interface in java.lang">Exception
     
     Throws:
    @@ -410,7 +410,7 @@ extends 
     
     waitUntilMobCompactionFinished
    -privatevoidwaitUntilMobCompactionFinished(org.apache.hadoop.hbase.TableNametableName)
    +privatevoidwaitUntilMobCompactionFinished(org.apache.hadoop.hbase.TableNametableName)
      throws https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutionException.html?is-external=true;
     title="class or interface in java.util.concurrent">ExecutionException,
     https://docs.oracle.com/javase/8/docs/api/java/lang/InterruptedException.html?is-external=true;
     title="class or interface in java.lang">InterruptedException
     
    @@ -426,7 +426,7 @@ extends 
     
     testCompactMob
    -publicvoidtestCompactMob()
    +publicvoidtestCompactMob()
     throws https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
     title="class or interface in java.lang">Exception
     
     Throws:
    @@ -440,7 +440,7 @@ extends 
     
     testCompactRegionServer
    -publicvoidtestCompactRegionServer()
    +publicvoidtestCompactRegionServer()
      throws https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
     title="class or interface in java.lang">Exception
     
     Throws:
    @@ -454,7 +454,7 @@ extends 
     
     testCompact
    -publicvoidtestCompact()
    +publicvoidtestCompact()
      throws 

    [06/51] [partial] hbase-site git commit: Published site at 31da4d0bce69b3a47066a5df675756087ce4dc60.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a754d895/devapidocs/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.BulkLoadObserverOperation.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.BulkLoadObserverOperation.html
     
    b/devapidocs/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.BulkLoadObserverOperation.html
    index 9771a14..136a97b 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.BulkLoadObserverOperation.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.BulkLoadObserverOperation.html
    @@ -126,7 +126,7 @@
     
     
     
    -abstract class RegionCoprocessorHost.BulkLoadObserverOperation
    +abstract class RegionCoprocessorHost.BulkLoadObserverOperation
     extends CoprocessorHost.ObserverOperationWithoutResultBulkLoadObserver
     
     
    @@ -205,7 +205,7 @@ extends 
     
     BulkLoadObserverOperation
    -publicBulkLoadObserverOperation(Useruser)
    +publicBulkLoadObserverOperation(Useruser)
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a754d895/devapidocs/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.RegionEnvironment.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.RegionEnvironment.html
     
    b/devapidocs/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.RegionEnvironment.html
    index c88b1db..067b5c4 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.RegionEnvironment.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.RegionEnvironment.html
    @@ -126,7 +126,7 @@ var activeTableTab = "activeTableTab";
     
     
     
    -private static class RegionCoprocessorHost.RegionEnvironment
    +private static class RegionCoprocessorHost.RegionEnvironment
     extends BaseEnvironmentRegionCoprocessor
     implements RegionCoprocessorEnvironment
     Encapsulation of the environment of each coprocessor
    @@ -303,7 +303,7 @@ implements 
     
     region
    -privateRegion region
    +privateRegion region
     
     
     
    @@ -312,7 +312,7 @@ implements 
     
     sharedData
    -https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentMap.html?is-external=true;
     title="class or interface in java.util.concurrent">ConcurrentMaphttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String,https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">Object sharedData
    +https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentMap.html?is-external=true;
     title="class or interface in java.util.concurrent">ConcurrentMaphttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String,https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">Object sharedData
     
     
     
    @@ -321,7 +321,7 @@ implements 
     
     metricRegistry
    -private finalMetricRegistry metricRegistry
    +private finalMetricRegistry metricRegistry
     
     
     
    @@ -330,7 +330,7 @@ implements 
     
     services
    -private finalRegionServerServices services
    +private finalRegionServerServices services
     
     
     
    @@ -347,7 +347,7 @@ implements 
     
     RegionEnvironment
    -publicRegionEnvironment(RegionCoprocessorimpl,
    +publicRegionEnvironment(RegionCoprocessorimpl,
      intpriority,
      intseq,
      org.apache.hadoop.conf.Configurationconf,
    @@ -376,7 +376,7 @@ implements 
     
     getRegion
    -publicRegiongetRegion()
    +publicRegiongetRegion()
     
     Specified by:
     getRegionin
     interfaceRegionCoprocessorEnvironment
    @@ -391,7 +391,7 @@ implements 
     
     getOnlineRegions
    -publicOnlineRegionsgetOnlineRegions()
    +publicOnlineRegionsgetOnlineRegions()
     
     Specified by:
     getOnlineRegionsin
     interfaceRegionCoprocessorEnvironment
    @@ -406,7 +406,7 @@ implements 
     
     getConnection
    -publicConnectiongetConnection()
    +publicConnectiongetConnection()
     Description copied from 
    interface:RegionCoprocessorEnvironment
     Returns the hosts' Connection to the Cluster. Do not 
    close! This is a shared connection
      with the hosting server. Throws https://docs.oracle.com/javase/8/docs/api/java/lang/UnsupportedOperationException.html?is-external=true;
     title="class or interface in 
    java.lang">UnsupportedOperationException if you try to close
    @@ -445,7 +445,7 @@ implements 
     
     createConnection
    -publicConnectioncreateConnection(org.apache.hadoop.conf.Configurationconf)
    +publicConnectioncreateConnection(org.apache.hadoop.conf.Configurationconf)
     throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException
     Description copied from 
    interface:RegionCoprocessorEnvironment
     Creates a cluster 

    [06/51] [partial] hbase-site git commit: Published site at 6b77786dfc46d25ac5bb5f1c8a4a9eb47b52a604.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/81cde4ce/devapidocs/org/apache/hadoop/hbase/master/assignment/class-use/RegionStates.RegionStateNode.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/master/assignment/class-use/RegionStates.RegionStateNode.html
     
    b/devapidocs/org/apache/hadoop/hbase/master/assignment/class-use/RegionStates.RegionStateNode.html
    index d572923..adfea3a 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/master/assignment/class-use/RegionStates.RegionStateNode.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/master/assignment/class-use/RegionStates.RegionStateNode.html
    @@ -246,7 +246,7 @@
     
     
     protected void
    -AssignProcedure.finishTransition(MasterProcedureEnvenv,
    +UnassignProcedure.finishTransition(MasterProcedureEnvenv,
     RegionStates.RegionStateNoderegionNode)
     
     
    @@ -256,7 +256,7 @@
     
     
     protected void
    -UnassignProcedure.finishTransition(MasterProcedureEnvenv,
    +AssignProcedure.finishTransition(MasterProcedureEnvenv,
     RegionStates.RegionStateNoderegionNode)
     
     
    @@ -307,7 +307,7 @@
     
     
     protected boolean
    -AssignProcedure.remoteCallFailed(MasterProcedureEnvenv,
    +UnassignProcedure.remoteCallFailed(MasterProcedureEnvenv,
     RegionStates.RegionStateNoderegionNode,
     https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in 
    java.io">IOExceptionexception)
     
    @@ -319,7 +319,7 @@
     
     
     protected boolean
    -UnassignProcedure.remoteCallFailed(MasterProcedureEnvenv,
    +AssignProcedure.remoteCallFailed(MasterProcedureEnvenv,
     RegionStates.RegionStateNoderegionNode,
     https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in 
    java.io">IOExceptionexception)
     
    @@ -344,10 +344,10 @@
     
     
     protected void
    -AssignProcedure.reportTransition(MasterProcedureEnvenv,
    +UnassignProcedure.reportTransition(MasterProcedureEnvenv,
     RegionStates.RegionStateNoderegionNode,
     
    org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCodecode,
    -longopenSeqNum)
    +longseqId)
     
     
     protected abstract void
    @@ -358,10 +358,10 @@
     
     
     protected void
    -UnassignProcedure.reportTransition(MasterProcedureEnvenv,
    +AssignProcedure.reportTransition(MasterProcedureEnvenv,
     RegionStates.RegionStateNoderegionNode,
     
    org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCodecode,
    -longseqId)
    +longopenSeqNum)
     
     
     private boolean
    @@ -371,47 +371,52 @@
     longseqId)
     
     
    +private void
    +UnassignProcedure.reportTransitionCLOSED(MasterProcedureEnvenv,
    +  RegionStates.RegionStateNoderegionNode)
    +
    +
     protected boolean
    -AssignProcedure.startTransition(MasterProcedureEnvenv,
    +UnassignProcedure.startTransition(MasterProcedureEnvenv,
    RegionStates.RegionStateNoderegionNode)
     
    -
    +
     protected abstract boolean
     RegionTransitionProcedure.startTransition(MasterProcedureEnvenv,
    RegionStates.RegionStateNoderegionNode)
     
    -
    +
     protected boolean
    -UnassignProcedure.startTransition(MasterProcedureEnvenv,
    +AssignProcedure.startTransition(MasterProcedureEnvenv,
    RegionStates.RegionStateNoderegionNode)
     
    -
    +
     void
     AssignmentManager.undoRegionAsClosing(RegionStates.RegionStateNoderegionNode)
     
    -
    +
     void
     AssignmentManager.undoRegionAsOpening(RegionStates.RegionStateNoderegionNode)
     
    -
    +
     void
     RegionStateStore.updateRegionLocation(RegionStates.RegionStateNoderegionStateNode)
     
    -
    +
     protected boolean
    -AssignProcedure.updateTransition(MasterProcedureEnvenv,
    +UnassignProcedure.updateTransition(MasterProcedureEnvenv,
     RegionStates.RegionStateNoderegionNode)
     
    -
    +
     protected abstract boolean
     RegionTransitionProcedure.updateTransition(MasterProcedureEnvenv,
     RegionStates.RegionStateNoderegionNode)
     Called when the Procedure is in the 
    REGION_TRANSITION_DISPATCH state.
     
     
    -
    +
     protected boolean
    -UnassignProcedure.updateTransition(MasterProcedureEnvenv,
    +AssignProcedure.updateTransition(MasterProcedureEnvenv,
     RegionStates.RegionStateNoderegionNode)
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/81cde4ce/devapidocs/org/apache/hadoop/hbase/master/assignment/package-summary.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/master/assignment/package-summary.html 
    b/devapidocs/org/apache/hadoop/hbase/master/assignment/package-summary.html
    index c6bba43..5d7e6ed 100644
    --- a/devapidocs/org/apache/hadoop/hbase/master/assignment/package-summary.html
    +++ b/devapidocs/org/apache/hadoop/hbase/master/assignment/package-summary.html
    @@ -240,8 +240,7 @@
     
     

    [06/51] [partial] hbase-site git commit: Published site at 1384da71375427b522b09f06862bb5d629cef52f.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d347bde8/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFile.Writer.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFile.Writer.html 
    b/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFile.Writer.html
    index abcb738..c7d05d1 100644
    --- a/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFile.Writer.html
    +++ b/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFile.Writer.html
    @@ -143,17 +143,17 @@
     
     
     void
    -NoOpDataBlockEncoder.saveMetadata(HFile.Writerwriter)
    +HFileDataBlockEncoderImpl.saveMetadata(HFile.Writerwriter)
     
     
     void
    -HFileDataBlockEncoder.saveMetadata(HFile.Writerwriter)
    -Save metadata in HFile which will be written to disk
    -
    +NoOpDataBlockEncoder.saveMetadata(HFile.Writerwriter)
     
     
     void
    -HFileDataBlockEncoderImpl.saveMetadata(HFile.Writerwriter)
    +HFileDataBlockEncoder.saveMetadata(HFile.Writerwriter)
    +Save metadata in HFile which will be written to disk
    +
     
     
     
    @@ -203,18 +203,18 @@
     
     
     
    -abstract void
    -BloomContext.addLastBloomKey(HFile.Writerwriter)
    -Adds the last bloom key to the HFile Writer as part of 
    StorefileWriter close.
    -
    +void
    +RowColBloomContext.addLastBloomKey(HFile.Writerwriter)
     
     
     void
     RowBloomContext.addLastBloomKey(HFile.Writerwriter)
     
     
    -void
    -RowColBloomContext.addLastBloomKey(HFile.Writerwriter)
    +abstract void
    +BloomContext.addLastBloomKey(HFile.Writerwriter)
    +Adds the last bloom key to the HFile Writer as part of 
    StorefileWriter close.
    +
     
     
     static BloomFilterWriter
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d347bde8/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFileBlock.Writer.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFileBlock.Writer.html 
    b/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFileBlock.Writer.html
    index e1139cc..b55ecd8 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFileBlock.Writer.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFileBlock.Writer.html
    @@ -106,15 +106,15 @@
     
     
     
    -private HFileBlock.Writer
    -HFileBlockIndex.BlockIndexWriter.blockWriter
    -
    -
     protected HFileBlock.Writer
     HFileWriterImpl.blockWriter
     block writer
     
     
    +
    +private HFileBlock.Writer
    +HFileBlockIndex.BlockIndexWriter.blockWriter
    +
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d347bde8/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFileContext.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFileContext.html 
    b/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFileContext.html
    index fabd03f..29c8b1e 100644
    --- a/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFileContext.html
    +++ b/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFileContext.html
    @@ -136,15 +136,15 @@
     
     
     HFileContext
    -HFileBlockEncodingContext.getHFileContext()
    +HFileBlockDecodingContext.getHFileContext()
     
     
     HFileContext
    -HFileBlockDecodingContext.getHFileContext()
    +HFileBlockDefaultDecodingContext.getHFileContext()
     
     
     HFileContext
    -HFileBlockDefaultDecodingContext.getHFileContext()
    +HFileBlockEncodingContext.getHFileContext()
     
     
     HFileContext
    @@ -224,24 +224,24 @@
     
     
     private HFileContext
    -HFile.WriterFactory.fileContext
    -
    -
    -private HFileContext
     HFileBlock.fileContext
     Meta data that holds meta information on the 
    hfileblock.
     
     
    -
    +
     private HFileContext
     HFileBlock.Writer.fileContext
     Meta data that holds information about the hfileblock
     
     
    -
    +
     private HFileContext
     HFileBlock.FSReaderImpl.fileContext
     
    +
    +private HFileContext
    +HFile.WriterFactory.fileContext
    +
     
     private HFileContext
     HFileReaderImpl.hfileContext
    @@ -277,20 +277,20 @@
     
     
     HFileContext
    +HFileWriterImpl.getFileContext()
    +
    +
    +HFileContext
     HFile.Writer.getFileContext()
     Return the file context for the HFile this writer belongs 
    to
     
     
    -
    +
     HFileContext
     HFile.Reader.getFileContext()
     Return the file context of the HFile this reader belongs 
    to
     
     
    -
    -HFileContext
    -HFileWriterImpl.getFileContext()
    -
     
     HFileContext
     HFileReaderImpl.getFileContext()
    @@ -323,35 +323,35 @@
     
     
     HFileBlockDecodingContext
    -NoOpDataBlockEncoder.newDataBlockDecodingContext(HFileContextmeta)
    +HFileDataBlockEncoderImpl.newDataBlockDecodingContext(HFileContextfileContext)
     
     
     HFileBlockDecodingContext
    -HFileDataBlockEncoder.newDataBlockDecodingContext(HFileContextfileContext)
    -create a encoder specific decoding context for 
    reading.
    -
    +NoOpDataBlockEncoder.newDataBlockDecodingContext(HFileContextmeta)
     
     
     HFileBlockDecodingContext
    -HFileDataBlockEncoderImpl.newDataBlockDecodingContext(HFileContextfileContext)
    +HFileDataBlockEncoder.newDataBlockDecodingContext(HFileContextfileContext)
    +create a encoder specific decoding context for 
    reading.
    +
     
     
     HFileBlockEncodingContext
    

    [06/51] [partial] hbase-site git commit: Published site at b7b86839250bf9b295ebc1948826f43a88736d6c.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6b94a2f2/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html
    index 3563b1c..0cc71bf 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html
    @@ -1577,387 +1577,386 @@
     1569  }
     1570
     1571  public void markRegionAsSplit(final 
    RegionInfo parent, final ServerName serverName,
    -1572  final RegionInfo daughterA, final 
    RegionInfo daughterB)
    -1573  throws IOException {
    -1574// Update hbase:meta. Parent will be 
    marked offline and split up in hbase:meta.
    -1575// The parent stays in regionStates 
    until cleared when removed by CatalogJanitor.
    -1576// Update its state in regionStates 
    to it shows as offline and split when read
    -1577// later figuring what regions are 
    in a table and what are not: see
    -1578// regionStates#getRegionsOfTable
    -1579final RegionStateNode node = 
    regionStates.getOrCreateRegionStateNode(parent);
    -1580node.setState(State.SPLIT);
    -1581final RegionStateNode nodeA = 
    regionStates.getOrCreateRegionStateNode(daughterA);
    -1582
    nodeA.setState(State.SPLITTING_NEW);
    -1583final RegionStateNode nodeB = 
    regionStates.getOrCreateRegionStateNode(daughterB);
    -1584
    nodeB.setState(State.SPLITTING_NEW);
    -1585
    -1586regionStateStore.splitRegion(parent, 
    daughterA, daughterB, serverName);
    -1587if 
    (shouldAssignFavoredNodes(parent)) {
    -1588  ListServerName 
    onlineServers = this.master.getServerManager().getOnlineServersList();
    -1589  
    ((FavoredNodesPromoter)getBalancer()).
    -1590  
    generateFavoredNodesForDaughter(onlineServers, parent, daughterA, daughterB);
    -1591}
    -1592  }
    -1593
    -1594  /**
    -1595   * When called here, the merge has 
    happened. The two merged regions have been
    -1596   * unassigned and the above 
    markRegionClosed has been called on each so they have been
    -1597   * disassociated from a hosting 
    Server. The merged region will be open after this call. The
    -1598   * merged regions are removed from 
    hbase:meta below Later they are deleted from the filesystem
    -1599   * by the catalog janitor running 
    against hbase:meta. It notices when the merged region no
    -1600   * longer holds references to the old 
    regions.
    -1601   */
    -1602  public void markRegionAsMerged(final 
    RegionInfo child, final ServerName serverName,
    -1603  final RegionInfo mother, final 
    RegionInfo father) throws IOException {
    -1604final RegionStateNode node = 
    regionStates.getOrCreateRegionStateNode(child);
    -1605node.setState(State.MERGED);
    -1606regionStates.deleteRegion(mother);
    -1607regionStates.deleteRegion(father);
    -1608regionStateStore.mergeRegions(child, 
    mother, father, serverName);
    -1609if (shouldAssignFavoredNodes(child)) 
    {
    -1610  
    ((FavoredNodesPromoter)getBalancer()).
    -1611
    generateFavoredNodesForMergedRegion(child, mother, father);
    -1612}
    -1613  }
    -1614
    -1615  /*
    -1616   * Favored nodes should be applied 
    only when FavoredNodes balancer is configured and the region
    -1617   * belongs to a non-system table.
    -1618   */
    -1619  private boolean 
    shouldAssignFavoredNodes(RegionInfo region) {
    -1620return 
    this.shouldAssignRegionsWithFavoredNodes 
    -1621
    FavoredNodesManager.isFavoredNodeApplicable(region);
    -1622  }
    -1623
    -1624  // 
    
    -1625  //  Assign Queue (Assign/Balance)
    -1626  // 
    
    -1627  private final 
    ArrayListRegionStateNode pendingAssignQueue = new 
    ArrayListRegionStateNode();
    -1628  private final ReentrantLock 
    assignQueueLock = new ReentrantLock();
    -1629  private final Condition 
    assignQueueFullCond = assignQueueLock.newCondition();
    -1630
    -1631  /**
    -1632   * Add the assign operation to the 
    assignment queue.
    -1633   * The pending assignment operation 
    will be processed,
    -1634   * and each region will be assigned by 
    a server using the balancer.
    -1635   */
    -1636  protected void queueAssign(final 
    RegionStateNode regionNode) {
    -1637
    regionNode.getProcedureEvent().suspend();
    -1638
    -1639// TODO: quick-start for meta and 
    the other sys-tables?
    -1640assignQueueLock.lock();
    -1641try {
    -1642  
    pendingAssignQueue.add(regionNode);
    -1643  if (regionNode.isSystemTable() 
    ||
    -1644  pendingAssignQueue.size() == 1 
    ||
    -1645  pendingAssignQueue.size() 
    = assignDispatchWaitQueueMaxSize) {
    -1646

    [06/51] [partial] hbase-site git commit: Published site at 1d25b60831b8cc8f7ad5fd366f1867de5c20d2f3.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/eb05e3e3/apidocs/org/apache/hadoop/hbase/client/SnapshotDescription.html
    --
    diff --git a/apidocs/org/apache/hadoop/hbase/client/SnapshotDescription.html 
    b/apidocs/org/apache/hadoop/hbase/client/SnapshotDescription.html
    index 3c8462d..1f2e0d6 100644
    --- a/apidocs/org/apache/hadoop/hbase/client/SnapshotDescription.html
    +++ b/apidocs/org/apache/hadoop/hbase/client/SnapshotDescription.html
    @@ -97,7 +97,7 @@ var activeTableTab = "activeTableTab";
     
     
     
    -http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">java.lang.Object
    +https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">java.lang.Object
     
     
     org.apache.hadoop.hbase.client.SnapshotDescription
    @@ -111,7 +111,7 @@ var activeTableTab = "activeTableTab";
     
     @InterfaceAudience.Public
     public class SnapshotDescription
    -extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">Object
    +extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">Object
     The POJO equivalent of HBaseProtos.SnapshotDescription
     
     
    @@ -131,19 +131,19 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     Constructor and Description
     
     
    -SnapshotDescription(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">Stringname)
    +SnapshotDescription(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">Stringname)
     
     
    -SnapshotDescription(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">Stringname,
    -   http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">Stringtable)
    +SnapshotDescription(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">Stringname,
    +   https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">Stringtable)
     Deprecated.
     Use the version with the 
    TableName instance instead
     
     
     
     
    -SnapshotDescription(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">Stringname,
    -   http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">Stringtable,
    +SnapshotDescription(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">Stringname,
    +   https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">Stringtable,
    SnapshotTypetype)
     Deprecated.
     Use the version with the 
    TableName instance instead
    @@ -151,20 +151,20 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     
    -SnapshotDescription(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">Stringname,
    -   http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">Stringtable,
    +SnapshotDescription(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">Stringname,
    +   https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">Stringtable,
    SnapshotTypetype,
    -   http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">Stringowner)
    +   https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">Stringowner)
     Deprecated.
     Use the version with the 
    TableName instance instead
     
     
     
     
    -SnapshotDescription(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">Stringname,
    -   http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">Stringtable,
    +SnapshotDescription(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">Stringname,
    +   https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in 

    [06/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ec8bf761/devapidocs/src-html/org/apache/hadoop/hbase/filter/ParseFilter.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/filter/ParseFilter.html 
    b/devapidocs/src-html/org/apache/hadoop/hbase/filter/ParseFilter.html
    index 49de9ff..d98b2a6 100644
    --- a/devapidocs/src-html/org/apache/hadoop/hbase/filter/ParseFilter.html
    +++ b/devapidocs/src-html/org/apache/hadoop/hbase/filter/ParseFilter.html
    @@ -101,684 +101,684 @@
     093  
    "SingleColumnValueExcludeFilter");
     094
    filterHashMap.put("DependentColumnFilter", ParseConstants.FILTER_PACKAGE + "." 
    +
     095  
    "DependentColumnFilter");
    -096
    -097// Creates the 
    operatorPrecedenceHashMap
    -098operatorPrecedenceHashMap = new 
    HashMap();
    -099
    operatorPrecedenceHashMap.put(ParseConstants.SKIP_BUFFER, 1);
    -100
    operatorPrecedenceHashMap.put(ParseConstants.WHILE_BUFFER, 1);
    -101
    operatorPrecedenceHashMap.put(ParseConstants.AND_BUFFER, 2);
    -102
    operatorPrecedenceHashMap.put(ParseConstants.OR_BUFFER, 3);
    -103  }
    -104
    -105  /**
    -106   * Parses the filterString and 
    constructs a filter using it
    -107   * p
    -108   * @param filterString filter string 
    given by the user
    -109   * @return filter object we 
    constructed
    -110   */
    -111  public Filter parseFilterString (String 
    filterString)
    -112throws CharacterCodingException {
    -113return 
    parseFilterString(Bytes.toBytes(filterString));
    -114  }
    -115
    -116  /**
    -117   * Parses the filterString and 
    constructs a filter using it
    -118   * p
    -119   * @param filterStringAsByteArray 
    filter string given by the user
    -120   * @return filter object we 
    constructed
    -121   */
    -122  public Filter parseFilterString (byte 
    [] filterStringAsByteArray)
    -123throws CharacterCodingException {
    -124// stack for the operators and 
    parenthesis
    -125Stack ByteBuffer 
    operatorStack = new Stack();
    -126// stack for the filter objects
    -127Stack Filter filterStack = 
    new Stack();
    -128
    -129Filter filter = null;
    -130for (int i=0; 
    ifilterStringAsByteArray.length; i++) {
    -131  if (filterStringAsByteArray[i] == 
    ParseConstants.LPAREN) {
    -132// LPAREN found
    -133
    operatorStack.push(ParseConstants.LPAREN_BUFFER);
    -134  } else if 
    (filterStringAsByteArray[i] == ParseConstants.WHITESPACE ||
    -135 
    filterStringAsByteArray[i] == ParseConstants.TAB) {
    -136// WHITESPACE or TAB found
    -137continue;
    -138  } else if 
    (checkForOr(filterStringAsByteArray, i)) {
    -139// OR found
    -140i += 
    ParseConstants.OR_ARRAY.length - 1;
    -141reduce(operatorStack, 
    filterStack, ParseConstants.OR_BUFFER);
    -142
    operatorStack.push(ParseConstants.OR_BUFFER);
    -143  } else if 
    (checkForAnd(filterStringAsByteArray, i)) {
    -144// AND found
    -145i += 
    ParseConstants.AND_ARRAY.length - 1;
    -146reduce(operatorStack, 
    filterStack, ParseConstants.AND_BUFFER);
    -147
    operatorStack.push(ParseConstants.AND_BUFFER);
    -148  } else if 
    (checkForSkip(filterStringAsByteArray, i)) {
    -149// SKIP found
    -150i += 
    ParseConstants.SKIP_ARRAY.length - 1;
    -151reduce(operatorStack, 
    filterStack, ParseConstants.SKIP_BUFFER);
    -152
    operatorStack.push(ParseConstants.SKIP_BUFFER);
    -153  } else if 
    (checkForWhile(filterStringAsByteArray, i)) {
    -154// WHILE found
    -155i += 
    ParseConstants.WHILE_ARRAY.length - 1;
    -156reduce(operatorStack, 
    filterStack, ParseConstants.WHILE_BUFFER);
    -157
    operatorStack.push(ParseConstants.WHILE_BUFFER);
    -158  } else if 
    (filterStringAsByteArray[i] == ParseConstants.RPAREN) {
    -159// RPAREN found
    -160if (operatorStack.empty()) {
    -161  throw new 
    IllegalArgumentException("Mismatched parenthesis");
    -162}
    -163ByteBuffer argumentOnTopOfStack = 
    operatorStack.peek();
    -164if 
    (argumentOnTopOfStack.equals(ParseConstants.LPAREN_BUFFER)) {
    -165  operatorStack.pop();
    -166  continue;
    -167}
    -168while 
    (!(argumentOnTopOfStack.equals(ParseConstants.LPAREN_BUFFER))) {
    -169  
    filterStack.push(popArguments(operatorStack, filterStack));
    -170  if (operatorStack.empty()) {
    -171throw new 
    IllegalArgumentException("Mismatched parenthesis");
    -172  }
    -173  argumentOnTopOfStack = 
    operatorStack.pop();
    -174}
    -175  } else {
    -176// SimpleFilterExpression found
    -177byte [] filterSimpleExpression = 
    extractFilterSimpleExpression(filterStringAsByteArray, i);
    -178i+= 
    (filterSimpleExpression.length - 1);
    -179filter = 
    parseSimpleFilterExpression(filterSimpleExpression);
    -180filterStack.push(filter);
    -181  }
    -182}
    -183
    -184// Finished parsing filterString
    -185while (!operatorStack.empty()) {
    -186  
    

    [06/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ReplayBatchOperation.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ReplayBatchOperation.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ReplayBatchOperation.html
    index 802b925..a3e80ab 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ReplayBatchOperation.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ReplayBatchOperation.html
    @@ -73,229 +73,229 @@
     065import 
    java.util.concurrent.TimeoutException;
     066import 
    java.util.concurrent.atomic.AtomicBoolean;
     067import 
    java.util.concurrent.atomic.AtomicInteger;
    -068import 
    java.util.concurrent.atomic.AtomicLong;
    -069import 
    java.util.concurrent.atomic.LongAdder;
    -070import java.util.concurrent.locks.Lock;
    -071import 
    java.util.concurrent.locks.ReadWriteLock;
    -072import 
    java.util.concurrent.locks.ReentrantReadWriteLock;
    -073import java.util.function.Function;
    -074import 
    org.apache.hadoop.conf.Configuration;
    -075import org.apache.hadoop.fs.FileStatus;
    -076import org.apache.hadoop.fs.FileSystem;
    -077import 
    org.apache.hadoop.fs.LocatedFileStatus;
    -078import org.apache.hadoop.fs.Path;
    -079import org.apache.hadoop.hbase.Cell;
    -080import 
    org.apache.hadoop.hbase.CellBuilderType;
    -081import 
    org.apache.hadoop.hbase.CellComparator;
    -082import 
    org.apache.hadoop.hbase.CellComparatorImpl;
    -083import 
    org.apache.hadoop.hbase.CellScanner;
    -084import 
    org.apache.hadoop.hbase.CellUtil;
    -085import 
    org.apache.hadoop.hbase.CompareOperator;
    -086import 
    org.apache.hadoop.hbase.CompoundConfiguration;
    -087import 
    org.apache.hadoop.hbase.DoNotRetryIOException;
    -088import 
    org.apache.hadoop.hbase.DroppedSnapshotException;
    -089import 
    org.apache.hadoop.hbase.ExtendedCellBuilderFactory;
    -090import 
    org.apache.hadoop.hbase.HConstants;
    -091import 
    org.apache.hadoop.hbase.HConstants.OperationStatusCode;
    -092import 
    org.apache.hadoop.hbase.HDFSBlocksDistribution;
    -093import 
    org.apache.hadoop.hbase.HRegionInfo;
    -094import 
    org.apache.hadoop.hbase.KeyValue;
    -095import 
    org.apache.hadoop.hbase.KeyValueUtil;
    -096import 
    org.apache.hadoop.hbase.NamespaceDescriptor;
    -097import 
    org.apache.hadoop.hbase.NotServingRegionException;
    -098import 
    org.apache.hadoop.hbase.PrivateCellUtil;
    -099import 
    org.apache.hadoop.hbase.RegionTooBusyException;
    -100import 
    org.apache.hadoop.hbase.TableName;
    -101import org.apache.hadoop.hbase.Tag;
    -102import org.apache.hadoop.hbase.TagUtil;
    -103import 
    org.apache.hadoop.hbase.UnknownScannerException;
    -104import 
    org.apache.hadoop.hbase.client.Append;
    -105import 
    org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
    -106import 
    org.apache.hadoop.hbase.client.CompactionState;
    -107import 
    org.apache.hadoop.hbase.client.Delete;
    -108import 
    org.apache.hadoop.hbase.client.Durability;
    -109import 
    org.apache.hadoop.hbase.client.Get;
    -110import 
    org.apache.hadoop.hbase.client.Increment;
    -111import 
    org.apache.hadoop.hbase.client.IsolationLevel;
    -112import 
    org.apache.hadoop.hbase.client.Mutation;
    -113import 
    org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor;
    -114import 
    org.apache.hadoop.hbase.client.Put;
    -115import 
    org.apache.hadoop.hbase.client.RegionInfo;
    -116import 
    org.apache.hadoop.hbase.client.RegionReplicaUtil;
    -117import 
    org.apache.hadoop.hbase.client.Result;
    -118import 
    org.apache.hadoop.hbase.client.RowMutations;
    -119import 
    org.apache.hadoop.hbase.client.Scan;
    -120import 
    org.apache.hadoop.hbase.client.TableDescriptor;
    -121import 
    org.apache.hadoop.hbase.client.TableDescriptorBuilder;
    -122import 
    org.apache.hadoop.hbase.conf.ConfigurationManager;
    -123import 
    org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver;
    -124import 
    org.apache.hadoop.hbase.coprocessor.RegionObserver.MutationType;
    -125import 
    org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
    -126import 
    org.apache.hadoop.hbase.exceptions.FailedSanityCheckException;
    -127import 
    org.apache.hadoop.hbase.exceptions.TimeoutIOException;
    -128import 
    org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
    -129import 
    org.apache.hadoop.hbase.filter.ByteArrayComparable;
    -130import 
    org.apache.hadoop.hbase.filter.FilterWrapper;
    -131import 
    org.apache.hadoop.hbase.filter.IncompatibleFilterException;
    -132import 
    org.apache.hadoop.hbase.io.HFileLink;
    -133import 
    org.apache.hadoop.hbase.io.HeapSize;
    -134import 
    org.apache.hadoop.hbase.io.TimeRange;
    -135import 
    org.apache.hadoop.hbase.io.hfile.HFile;
    -136import 
    org.apache.hadoop.hbase.ipc.CallerDisconnectedException;
    -137import 
    org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
    -138import 
    org.apache.hadoop.hbase.ipc.RpcCall;
    -139import 
    org.apache.hadoop.hbase.ipc.RpcServer;
    -140import 
    org.apache.hadoop.hbase.monitoring.MonitoredTask;
    -141import 
    org.apache.hadoop.hbase.monitoring.TaskMonitor;
    -142import 
    

    [06/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/193b4259/devapidocs/src-html/org/apache/hadoop/hbase/security/token/TokenUtil.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/security/token/TokenUtil.html 
    b/devapidocs/src-html/org/apache/hadoop/hbase/security/token/TokenUtil.html
    index 8dcb015..052f33b 100644
    --- a/devapidocs/src-html/org/apache/hadoop/hbase/security/token/TokenUtil.html
    +++ b/devapidocs/src-html/org/apache/hadoop/hbase/security/token/TokenUtil.html
    @@ -34,21 +34,21 @@
     026import 
    com.google.protobuf.ServiceException;
     027
     028import 
    org.apache.hadoop.hbase.zookeeper.ZKWatcher;
    -029import 
    org.apache.yetus.audience.InterfaceAudience;
    -030import 
    org.apache.hadoop.conf.Configuration;
    -031import 
    org.apache.hadoop.hbase.HConstants;
    -032import 
    org.apache.hadoop.hbase.TableName;
    -033import 
    org.apache.hadoop.hbase.client.Connection;
    -034import 
    org.apache.hadoop.hbase.client.Table;
    -035import 
    org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
    -036import 
    org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos;
    -037import 
    org.apache.hadoop.hbase.security.User;
    -038import 
    org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
    -039import 
    org.apache.hadoop.hbase.zookeeper.ZKClusterId;
    -040import org.apache.hadoop.io.Text;
    -041import 
    org.apache.hadoop.mapred.JobConf;
    -042import org.apache.hadoop.mapreduce.Job;
    -043import 
    org.apache.hadoop.security.token.Token;
    +029import 
    org.apache.hadoop.conf.Configuration;
    +030import 
    org.apache.hadoop.hbase.HConstants;
    +031import 
    org.apache.hadoop.hbase.TableName;
    +032import 
    org.apache.hadoop.hbase.client.Connection;
    +033import 
    org.apache.hadoop.hbase.client.Table;
    +034import 
    org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
    +035import 
    org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos;
    +036import 
    org.apache.hadoop.hbase.security.User;
    +037import 
    org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
    +038import 
    org.apache.hadoop.hbase.zookeeper.ZKClusterId;
    +039import org.apache.hadoop.io.Text;
    +040import 
    org.apache.hadoop.mapred.JobConf;
    +041import org.apache.hadoop.mapreduce.Job;
    +042import 
    org.apache.hadoop.security.token.Token;
    +043import 
    org.apache.yetus.audience.InterfaceAudience;
     044import 
    org.apache.zookeeper.KeeperException;
     045import org.slf4j.Logger;
     046import org.slf4j.LoggerFactory;
    @@ -61,271 +61,282 @@
     053  // This class is referenced indirectly 
    by User out in common; instances are created by reflection
     054  private static final Logger LOG = 
    LoggerFactory.getLogger(TokenUtil.class);
     055
    -056  /**
    -057   * Obtain and return an authentication 
    token for the current user.
    -058   * @param conn The HBase cluster 
    connection
    -059   * @throws IOException if a remote 
    error or serialization problem occurs.
    -060   * @return the authentication token 
    instance
    -061   */
    -062  public static 
    TokenAuthenticationTokenIdentifier obtainToken(
    -063  Connection conn) throws IOException 
    {
    -064Table meta = null;
    -065try {
    -066  meta = 
    conn.getTable(TableName.META_TABLE_NAME);
    -067  CoprocessorRpcChannel rpcChannel = 
    meta.coprocessorService(HConstants.EMPTY_START_ROW);
    -068  
    AuthenticationProtos.AuthenticationService.BlockingInterface service =
    -069  
    AuthenticationProtos.AuthenticationService.newBlockingStub(rpcChannel);
    -070  
    AuthenticationProtos.GetAuthenticationTokenResponse response = 
    service.getAuthenticationToken(null,
    -071  
    AuthenticationProtos.GetAuthenticationTokenRequest.getDefaultInstance());
    -072
    -073  return 
    toToken(response.getToken());
    -074} catch (ServiceException se) {
    -075  throw 
    ProtobufUtil.handleRemoteException(se);
    -076} finally {
    -077  if (meta != null) {
    -078meta.close();
    -079  }
    -080}
    -081  }
    -082
    +056  // Set in TestTokenUtil via 
    reflection
    +057  private static ServiceException 
    injectedException;
    +058
    +059  private static void injectFault() 
    throws ServiceException {
    +060if (injectedException != null) {
    +061  throw injectedException;
    +062}
    +063  }
    +064
    +065  /**
    +066   * Obtain and return an authentication 
    token for the current user.
    +067   * @param conn The HBase cluster 
    connection
    +068   * @throws IOException if a remote 
    error or serialization problem occurs.
    +069   * @return the authentication token 
    instance
    +070   */
    +071  public static 
    TokenAuthenticationTokenIdentifier obtainToken(
    +072  Connection conn) throws IOException 
    {
    +073Table meta = null;
    +074try {
    +075  injectFault();
    +076
    +077  meta = 
    conn.getTable(TableName.META_TABLE_NAME);
    +078  CoprocessorRpcChannel rpcChannel = 
    meta.coprocessorService(HConstants.EMPTY_START_ROW);
    +079  
    AuthenticationProtos.AuthenticationService.BlockingInterface service =
    +080  
    AuthenticationProtos.AuthenticationService.newBlockingStub(rpcChannel);
    +081  
    AuthenticationProtos.GetAuthenticationTokenResponse 

    [06/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/94208cfe/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureStateSerializer.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureStateSerializer.html
     
    b/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureStateSerializer.html
    index 01a50f5..733e376 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureStateSerializer.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureStateSerializer.html
    @@ -120,19 +120,19 @@
     
     
     protected void
    -MoveRegionProcedure.deserializeStateData(ProcedureStateSerializerserializer)
    +UnassignProcedure.deserializeStateData(ProcedureStateSerializerserializer)
     
     
     protected void
    -GCMergedRegionsProcedure.deserializeStateData(ProcedureStateSerializerserializer)
    +MoveRegionProcedure.deserializeStateData(ProcedureStateSerializerserializer)
     
     
     protected void
    -AssignProcedure.deserializeStateData(ProcedureStateSerializerserializer)
    +GCRegionProcedure.deserializeStateData(ProcedureStateSerializerserializer)
     
     
     protected void
    -GCRegionProcedure.deserializeStateData(ProcedureStateSerializerserializer)
    +GCMergedRegionsProcedure.deserializeStateData(ProcedureStateSerializerserializer)
     
     
     protected void
    @@ -144,23 +144,23 @@
     
     
     protected void
    -UnassignProcedure.deserializeStateData(ProcedureStateSerializerserializer)
    +AssignProcedure.deserializeStateData(ProcedureStateSerializerserializer)
     
     
     protected void
    -MoveRegionProcedure.serializeStateData(ProcedureStateSerializerserializer)
    +UnassignProcedure.serializeStateData(ProcedureStateSerializerserializer)
     
     
     protected void
    -GCMergedRegionsProcedure.serializeStateData(ProcedureStateSerializerserializer)
    +MoveRegionProcedure.serializeStateData(ProcedureStateSerializerserializer)
     
     
     protected void
    -AssignProcedure.serializeStateData(ProcedureStateSerializerserializer)
    +GCRegionProcedure.serializeStateData(ProcedureStateSerializerserializer)
     
     
     protected void
    -GCRegionProcedure.serializeStateData(ProcedureStateSerializerserializer)
    +GCMergedRegionsProcedure.serializeStateData(ProcedureStateSerializerserializer)
     
     
     protected void
    @@ -172,7 +172,7 @@
     
     
     protected void
    -UnassignProcedure.serializeStateData(ProcedureStateSerializerserializer)
    +AssignProcedure.serializeStateData(ProcedureStateSerializerserializer)
     
     
     
    @@ -212,115 +212,115 @@
     
     
     protected void
    -CloneSnapshotProcedure.deserializeStateData(ProcedureStateSerializerserializer)
    +DeleteTableProcedure.deserializeStateData(ProcedureStateSerializerserializer)
     
     
     protected void
    -DeleteTableProcedure.deserializeStateData(ProcedureStateSerializerserializer)
    +DisableTableProcedure.deserializeStateData(ProcedureStateSerializerserializer)
     
     
     protected void
    -TruncateTableProcedure.deserializeStateData(ProcedureStateSerializerserializer)
    +DeleteNamespaceProcedure.deserializeStateData(ProcedureStateSerializerserializer)
     
     
     protected void
    -DeleteNamespaceProcedure.deserializeStateData(ProcedureStateSerializerserializer)
    +CreateNamespaceProcedure.deserializeStateData(ProcedureStateSerializerserializer)
     
     
     protected void
    -ServerCrashProcedure.deserializeStateData(ProcedureStateSerializerserializer)
    +EnableTableProcedure.deserializeStateData(ProcedureStateSerializerserializer)
     
     
     protected void
    -DisableTableProcedure.deserializeStateData(ProcedureStateSerializerserializer)
    +CreateTableProcedure.deserializeStateData(ProcedureStateSerializerserializer)
     
     
     protected void
    -RecoverMetaProcedure.deserializeStateData(ProcedureStateSerializerserializer)
    +ModifyNamespaceProcedure.deserializeStateData(ProcedureStateSerializerserializer)
     
     
     protected void
    -CreateNamespaceProcedure.deserializeStateData(ProcedureStateSerializerserializer)
    +AbstractStateMachineRegionProcedure.deserializeStateData(ProcedureStateSerializerserializer)
     
     
     protected void
    -AbstractStateMachineRegionProcedure.deserializeStateData(ProcedureStateSerializerserializer)
    +CloneSnapshotProcedure.deserializeStateData(ProcedureStateSerializerserializer)
     
     
     protected void
    -EnableTableProcedure.deserializeStateData(ProcedureStateSerializerserializer)
    +ServerCrashProcedure.deserializeStateData(ProcedureStateSerializerserializer)
     
     
     protected void
    -CreateTableProcedure.deserializeStateData(ProcedureStateSerializerserializer)
    +ModifyTableProcedure.deserializeStateData(ProcedureStateSerializerserializer)
     
     
     protected void
    -ModifyNamespaceProcedure.deserializeStateData(ProcedureStateSerializerserializer)
    +RecoverMetaProcedure.deserializeStateData(ProcedureStateSerializerserializer)
     
     
     protected void
    -ModifyTableProcedure.deserializeStateData(ProcedureStateSerializerserializer)
    +RestoreSnapshotProcedure.deserializeStateData(ProcedureStateSerializerserializer)
     
     
     protected void
    -RestoreSnapshotProcedure.deserializeStateData(ProcedureStateSerializerserializer)
    

    [06/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0cd17dc5/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionStat.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionStat.html
     
    b/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionStat.html
    index 7e70c36..1caaf50 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionStat.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionStat.html
    @@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
     
     
     
    -public static class AssignmentManager.RegionInTransitionStat
    +public static class AssignmentManager.RegionInTransitionStat
     extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">Object
     
     
    @@ -266,7 +266,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     ritThreshold
    -private finalint ritThreshold
    +private finalint ritThreshold
     
     
     
    @@ -275,7 +275,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     ritsOverThreshold
    -privatehttp://docs.oracle.com/javase/8/docs/api/java/util/HashMap.html?is-external=true;
     title="class or interface in java.util">HashMaphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String,RegionState ritsOverThreshold
    +privatehttp://docs.oracle.com/javase/8/docs/api/java/util/HashMap.html?is-external=true;
     title="class or interface in java.util">HashMaphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String,RegionState ritsOverThreshold
     
     
     
    @@ -284,7 +284,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     statTimestamp
    -privatelong statTimestamp
    +privatelong statTimestamp
     
     
     
    @@ -293,7 +293,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     oldestRITTime
    -privatelong oldestRITTime
    +privatelong oldestRITTime
     
     
     
    @@ -302,7 +302,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     totalRITsTwiceThreshold
    -privateint totalRITsTwiceThreshold
    +privateint totalRITsTwiceThreshold
     
     
     
    @@ -311,7 +311,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     totalRITs
    -privateint totalRITs
    +privateint totalRITs
     
     
     
    @@ -328,7 +328,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     RegionInTransitionStat
    -publicRegionInTransitionStat(org.apache.hadoop.conf.Configurationconf)
    +publicRegionInTransitionStat(org.apache.hadoop.conf.Configurationconf)
     
     
     
    @@ -345,7 +345,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     getRITThreshold
    -publicintgetRITThreshold()
    +publicintgetRITThreshold()
     
     
     
    @@ -354,7 +354,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     getTimestamp
    -publiclonggetTimestamp()
    +publiclonggetTimestamp()
     
     
     
    @@ -363,7 +363,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     getTotalRITs
    -publicintgetTotalRITs()
    +publicintgetTotalRITs()
     
     
     
    @@ -372,7 +372,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     getOldestRITTime
    -publiclonggetOldestRITTime()
    +publiclonggetOldestRITTime()
     
     
     
    @@ -381,7 +381,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     getTotalRITsOverThreshold
    -publicintgetTotalRITsOverThreshold()
    +publicintgetTotalRITsOverThreshold()
     
     
     
    @@ -390,7 +390,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     hasRegionsTwiceOverThreshold
    -publicbooleanhasRegionsTwiceOverThreshold()
    +publicbooleanhasRegionsTwiceOverThreshold()
     
     
     
    @@ -399,7 +399,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     hasRegionsOverThreshold
    -publicbooleanhasRegionsOverThreshold()
    +publicbooleanhasRegionsOverThreshold()
     
     
     
    @@ -408,7 +408,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     getRegionOverThreshold
    -publichttp://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
     title="class or interface in java.util">CollectionRegionStategetRegionOverThreshold()
    +publichttp://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
     title="class or interface in java.util">CollectionRegionStategetRegionOverThreshold()
     
     
     
    @@ -417,7 +417,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     isRegionOverThreshold
    -publicbooleanisRegionOverThreshold(RegionInforegionInfo)
    +publicbooleanisRegionOverThreshold(RegionInforegionInfo)
     
     
     
    @@ -426,7 +426,7 @@ extends 

    [06/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f272b0e8/devapidocs/org/apache/hadoop/hbase/ipc/class-use/PriorityFunction.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/ipc/class-use/PriorityFunction.html 
    b/devapidocs/org/apache/hadoop/hbase/ipc/class-use/PriorityFunction.html
    index 7244ce2..5f7ce59 100644
    --- a/devapidocs/org/apache/hadoop/hbase/ipc/class-use/PriorityFunction.html
    +++ b/devapidocs/org/apache/hadoop/hbase/ipc/class-use/PriorityFunction.html
    @@ -114,15 +114,15 @@
     
     
     private PriorityFunction
    -SimpleRpcScheduler.priority
    +RpcExecutor.priority
     
     
     private PriorityFunction
    -RpcExecutor.priority
    +RpcExecutor.CallPriorityComparator.priority
     
     
     private PriorityFunction
    -RpcExecutor.CallPriorityComparator.priority
    +SimpleRpcScheduler.priority
     
     
     
    @@ -319,7 +319,7 @@
     
     
     RpcScheduler
    -RpcSchedulerFactory.create(org.apache.hadoop.conf.Configurationconf,
    +FifoRpcSchedulerFactory.create(org.apache.hadoop.conf.Configurationconf,
       PriorityFunctionpriority)
     Deprecated.
     
    @@ -333,18 +333,16 @@
     
     
     RpcScheduler
    -FifoRpcSchedulerFactory.create(org.apache.hadoop.conf.Configurationconf,
    +RpcSchedulerFactory.create(org.apache.hadoop.conf.Configurationconf,
       PriorityFunctionpriority)
     Deprecated.
     
     
     
     RpcScheduler
    -RpcSchedulerFactory.create(org.apache.hadoop.conf.Configurationconf,
    +FifoRpcSchedulerFactory.create(org.apache.hadoop.conf.Configurationconf,
       PriorityFunctionpriority,
    -  Abortableserver)
    -Constructs a RpcScheduler.
    -
    +  Abortableserver)
     
     
     RpcScheduler
    @@ -354,9 +352,11 @@
     
     
     RpcScheduler
    -FifoRpcSchedulerFactory.create(org.apache.hadoop.conf.Configurationconf,
    +RpcSchedulerFactory.create(org.apache.hadoop.conf.Configurationconf,
       PriorityFunctionpriority,
    -  Abortableserver)
    +  Abortableserver)
    +Constructs a RpcScheduler.
    +
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f272b0e8/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcCallback.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcCallback.html 
    b/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcCallback.html
    index 4a25f5c..6d59fb7 100644
    --- a/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcCallback.html
    +++ b/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcCallback.html
    @@ -123,14 +123,14 @@
     
     
     void
    -ServerCall.setCallBack(RpcCallbackcallback)
    -
    -
    -void
     RpcCallContext.setCallBack(RpcCallbackcallback)
     Sets a callback which has to be executed at the end of this 
    RPC call.
     
     
    +
    +void
    +ServerCall.setCallBack(RpcCallbackcallback)
    +
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f272b0e8/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcControllerFactory.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcControllerFactory.html 
    b/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcControllerFactory.html
    index fab4d7a..baa4e5e 100644
    --- a/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcControllerFactory.html
    +++ b/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcControllerFactory.html
    @@ -131,32 +131,24 @@
     
     
     
    -private RpcControllerFactory
    -ConnectionImplementation.rpcControllerFactory
    -
    -
    -protected RpcControllerFactory
    -ClientScanner.rpcControllerFactory
    -
    -
     protected RpcControllerFactory
     RegionAdminServiceCallable.rpcControllerFactory
     
     
    -(package private) RpcControllerFactory
    -AsyncConnectionImpl.rpcControllerFactory
    +private RpcControllerFactory
    +ConnectionImplementation.rpcControllerFactory
     
     
    -private RpcControllerFactory
    -HTable.rpcControllerFactory
    +(package private) RpcControllerFactory
    +AsyncConnectionImpl.rpcControllerFactory
     
     
     private RpcControllerFactory
    -HBaseAdmin.rpcControllerFactory
    +HTable.rpcControllerFactory
     
     
     private RpcControllerFactory
    -SecureBulkLoadClient.rpcControllerFactory
    +RpcRetryingCallerWithReadReplicas.rpcControllerFactory
     
     
     protected RpcControllerFactory
    @@ -164,7 +156,15 @@
     
     
     private RpcControllerFactory
    -RpcRetryingCallerWithReadReplicas.rpcControllerFactory
    +HBaseAdmin.rpcControllerFactory
    +
    +
    +private RpcControllerFactory
    +SecureBulkLoadClient.rpcControllerFactory
    +
    +
    +protected RpcControllerFactory
    +ClientScanner.rpcControllerFactory
     
     
     (package private) RpcControllerFactory
    @@ -181,11 +181,11 @@
     
     
     RpcControllerFactory
    -ConnectionImplementation.getRpcControllerFactory()
    +ClusterConnection.getRpcControllerFactory()
     
     
     RpcControllerFactory
    -ClusterConnection.getRpcControllerFactory()
    +ConnectionImplementation.getRpcControllerFactory()
     
     
     private RpcControllerFactory
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f272b0e8/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcExecutor.Handler.html
    --
    diff --git 
    

    [06/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c83a37c8/devapidocs/org/apache/hadoop/hbase/nio/class-use/ByteBuff.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/nio/class-use/ByteBuff.html 
    b/devapidocs/org/apache/hadoop/hbase/nio/class-use/ByteBuff.html
    index 74a0687..158a6aa 100644
    --- a/devapidocs/org/apache/hadoop/hbase/nio/class-use/ByteBuff.html
    +++ b/devapidocs/org/apache/hadoop/hbase/nio/class-use/ByteBuff.html
    @@ -161,23 +161,23 @@
     
     
     Codec.Decoder
    -CellCodec.getDecoder(ByteBuffbuf)
    +KeyValueCodec.getDecoder(ByteBuffbuf)
     
     
     Codec.Decoder
    -Codec.getDecoder(ByteBuffbuf)
    +CellCodecWithTags.getDecoder(ByteBuffbuf)
     
     
     Codec.Decoder
    -KeyValueCodec.getDecoder(ByteBuffbuf)
    +Codec.getDecoder(ByteBuffbuf)
     
     
     Codec.Decoder
    -KeyValueCodecWithTags.getDecoder(ByteBuffbuf)
    +CellCodec.getDecoder(ByteBuffbuf)
     
     
     Codec.Decoder
    -CellCodecWithTags.getDecoder(ByteBuffbuf)
    +KeyValueCodecWithTags.getDecoder(ByteBuffbuf)
     
     
     Codec.Decoder
    @@ -259,20 +259,20 @@
     
     
     
    -private ByteBuff
    -RowIndexSeekerV1.currentBuffer
    +protected ByteBuff
    +BufferedDataBlockEncoder.SeekerState.currentBuffer
     
     
     protected ByteBuff
    -RowIndexSeekerV1.SeekerState.currentBuffer
    +BufferedDataBlockEncoder.BufferedEncodedSeeker.currentBuffer
     
     
    -protected ByteBuff
    -BufferedDataBlockEncoder.SeekerState.currentBuffer
    +private ByteBuff
    +RowIndexSeekerV1.currentBuffer
     
     
     protected ByteBuff
    -BufferedDataBlockEncoder.BufferedEncodedSeeker.currentBuffer
    +RowIndexSeekerV1.SeekerState.currentBuffer
     
     
     private ByteBuff
    @@ -295,23 +295,23 @@
     
     
     Cell
    -RowIndexCodecV1.getFirstKeyCellInBlock(ByteBuffblock)
    +CopyKeyDataBlockEncoder.getFirstKeyCellInBlock(ByteBuffblock)
     
     
     Cell
    -CopyKeyDataBlockEncoder.getFirstKeyCellInBlock(ByteBuffblock)
    +PrefixKeyDeltaEncoder.getFirstKeyCellInBlock(ByteBuffblock)
     
     
     Cell
    -DiffKeyDeltaEncoder.getFirstKeyCellInBlock(ByteBuffblock)
    +FastDiffDeltaEncoder.getFirstKeyCellInBlock(ByteBuffblock)
     
     
     Cell
    -FastDiffDeltaEncoder.getFirstKeyCellInBlock(ByteBuffblock)
    +DiffKeyDeltaEncoder.getFirstKeyCellInBlock(ByteBuffblock)
     
     
     Cell
    -PrefixKeyDeltaEncoder.getFirstKeyCellInBlock(ByteBuffblock)
    +RowIndexCodecV1.getFirstKeyCellInBlock(ByteBuffblock)
     
     
     void
    @@ -338,11 +338,11 @@
     
     
     void
    -RowIndexSeekerV1.setCurrentBuffer(ByteBuffbuffer)
    +BufferedDataBlockEncoder.BufferedEncodedSeeker.setCurrentBuffer(ByteBuffbuffer)
     
     
     void
    -BufferedDataBlockEncoder.BufferedEncodedSeeker.setCurrentBuffer(ByteBuffbuffer)
    +RowIndexSeekerV1.setCurrentBuffer(ByteBuffbuffer)
     
     
     
    @@ -498,21 +498,21 @@
     
     
     void
    -ByteBufferIOEngine.write(ByteBuffsrcBuffer,
    - longoffset)
    -
    -
    -void
     FileIOEngine.write(ByteBuffsrcBuffer,
      longoffset)
     
    -
    +
     void
     IOEngine.write(ByteBuffsrcBuffer,
      longoffset)
     Transfers the data from the given MultiByteBuffer to 
    IOEngine
     
     
    +
    +void
    +ByteBufferIOEngine.write(ByteBuffsrcBuffer,
    + longoffset)
    +
     
     void
     FileMmapEngine.write(ByteBuffsrcBuffer,
    @@ -812,6 +812,15 @@
      intindex)
     
     
    +MultiByteBuff
    +MultiByteBuff.put(intoffset,
    +   ByteBuffsrc,
    +   intsrcOffset,
    +   intlength)
    +Copies from a src MBB to this MBB.
    +
    +
    +
     abstract ByteBuff
     ByteBuff.put(intoffset,
    ByteBuffsrc,
    @@ -820,22 +829,13 @@
     Copies the contents from the src ByteBuff to this 
    ByteBuff.
     
     
    -
    +
     SingleByteBuff
     SingleByteBuff.put(intoffset,
    ByteBuffsrc,
    intsrcOffset,
    intlength)
     
    -
    -MultiByteBuff
    -MultiByteBuff.put(intoffset,
    -   ByteBuffsrc,
    -   intsrcOffset,
    -   intlength)
    -Copies from a src MBB to this MBB.
    -
    -
     
     static int
     ByteBuff.readCompressedInt(ByteBuffbuf)
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c83a37c8/devapidocs/org/apache/hadoop/hbase/package-tree.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/package-tree.html 
    b/devapidocs/org/apache/hadoop/hbase/package-tree.html
    index 36170ca..7f95c2c 100644
    --- a/devapidocs/org/apache/hadoop/hbase/package-tree.html
    +++ b/devapidocs/org/apache/hadoop/hbase/package-tree.html
    @@ -445,20 +445,20 @@
     
     java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
     title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
     title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
     title="class or interface in java.io">Serializable)
     
    -org.apache.hadoop.hbase.KeyValue.Type
    -org.apache.hadoop.hbase.Size.Unit
    +org.apache.hadoop.hbase.CompareOperator
     org.apache.hadoop.hbase.CellBuilderType
    -org.apache.hadoop.hbase.HealthChecker.HealthCheckerExitStatus
    +org.apache.hadoop.hbase.ProcedureState
     org.apache.hadoop.hbase.CompatibilitySingletonFactory.SingletonStorage
    -org.apache.hadoop.hbase.MetaTableAccessor.QueryType
    

    [06/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0ab8335e/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/Procedure.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/Procedure.html 
    b/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/Procedure.html
    index bd8ccff..29b9507 100644
    --- a/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/Procedure.html
    +++ b/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/Procedure.html
    @@ -132,14 +132,14 @@
     
     
     http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListProcedure?
    -HMaster.getProcedures()
    -
    -
    -http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListProcedure?
     MasterServices.getProcedures()
     Get procedures
     
     
    +
    +http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListProcedure?
    +HMaster.getProcedures()
    +
     
     
     
    @@ -920,44 +920,44 @@
     
     
     
    +protected Procedure
    +SimpleProcedureScheduler.dequeue()
    +
    +
     protected abstract Procedure
     AbstractProcedureScheduler.dequeue()
     Fetch one Procedure from the queue
      NOTE: this method is called with the sched lock held.
     
     
    -
    -protected Procedure
    -SimpleProcedureScheduler.dequeue()
    -
     
    -protected ProcedureTEnvironment[]
    -Procedure.doExecute(TEnvironmentenv)
    -Internal method called by the ProcedureExecutor that starts 
    the user-level code execute().
    -
    -
    -
     protected Procedure[]
     SequentialProcedure.doExecute(TEnvironmentenv)
     
    -
    -protected Procedure[]
    -StateMachineProcedure.execute(TEnvironmentenv)
    -
     
    -protected ProcedureTEnvironment[]
    -ProcedureInMemoryChore.execute(TEnvironmentenv)
    +protected ProcedureTEnvironment[]
    +Procedure.doExecute(TEnvironmentenv)
    +Internal method called by the ProcedureExecutor that starts 
    the user-level code execute().
    +
     
     
     protected ProcedureTEnvironment[]
     ProcedureExecutor.FailedProcedure.execute(TEnvironmentenv)
     
     
    +protected Procedure[]
    +StateMachineProcedure.execute(TEnvironmentenv)
    +
    +
     protected abstract ProcedureTEnvironment[]
     Procedure.execute(TEnvironmentenv)
     The main code of the procedure.
     
     
    +
    +protected ProcedureTEnvironment[]
    +ProcedureInMemoryChore.execute(TEnvironmentenv)
    +
     
     Procedure?
     LockedResource.getExclusiveLockOwnerProcedure()
    @@ -1115,13 +1115,13 @@
     
     
     void
    -ProcedureScheduler.completionCleanup(Procedureproc)
    -The procedure in execution completed.
    -
    +SimpleProcedureScheduler.completionCleanup(Procedureproc)
     
     
     void
    -SimpleProcedureScheduler.completionCleanup(Procedureproc)
    +ProcedureScheduler.completionCleanup(Procedureproc)
    +The procedure in execution completed.
    +
     
     
     static 
    org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure
    @@ -1135,17 +1135,17 @@
      Procedureprocedure)
     
     
    +protected void
    +SimpleProcedureScheduler.enqueue(Procedureprocedure,
    +   booleanaddFront)
    +
    +
     protected abstract void
     AbstractProcedureScheduler.enqueue(Procedureprocedure,
    booleanaddFront)
     Add the procedure to the queue.
     
     
    -
    -protected void
    -SimpleProcedureScheduler.enqueue(Procedureprocedure,
    -   booleanaddFront)
    -
     
     private void
     ProcedureExecutor.execCompletionCleanup(Procedureproc)
    @@ -1327,13 +1327,13 @@
     
     
     void
    -ProcedureScheduler.yield(Procedureproc)
    -The procedure can't run at the moment.
    -
    +SimpleProcedureScheduler.yield(Procedureproc)
     
     
     void
    -SimpleProcedureScheduler.yield(Procedureproc)
    +ProcedureScheduler.yield(Procedureproc)
    +The procedure can't run at the moment.
    +
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0ab8335e/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureEvent.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureEvent.html 
    b/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureEvent.html
    index 5f35947..a9a3870 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureEvent.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureEvent.html
    @@ -141,11 +141,11 @@
     
     
     ProcedureEvent?
    -HMaster.getInitializedEvent()
    +MasterServices.getInitializedEvent()
     
     
     ProcedureEvent?
    -MasterServices.getInitializedEvent()
    +HMaster.getInitializedEvent()
     
     
     ProcedureEvent?
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0ab8335e/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureExecutor.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureExecutor.html
     
    b/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureExecutor.html
    index 8c7413e..9c6d034 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureExecutor.html
    +++ 
    

    [06/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/src-html/org/apache/hadoop/hbase/master/SplitLogManager.ResubmitDirective.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/SplitLogManager.ResubmitDirective.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/SplitLogManager.ResubmitDirective.html
    index 2939a56..681e263 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/SplitLogManager.ResubmitDirective.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/SplitLogManager.ResubmitDirective.html
    @@ -61,602 +61,608 @@
     053import 
    org.apache.hadoop.hbase.monitoring.TaskMonitor;
     054import 
    org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
     055import 
    org.apache.hadoop.hbase.util.FSUtils;
    -056import 
    org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
    -057import 
    org.apache.yetus.audience.InterfaceAudience;
    -058import org.slf4j.Logger;
    -059import org.slf4j.LoggerFactory;
    -060import 
    org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
    -061
    -062/**
    -063 * Distributes the task of log splitting 
    to the available region servers.
    -064 * Coordination happens via coordination 
    engine. For every log file that has to be split a
    -065 * task is created. SplitLogWorkers race 
    to grab a task.
    -066 *
    -067 * pSplitLogManager monitors the 
    tasks that it creates using the
    -068 * timeoutMonitor thread. If a task's 
    progress is slow then
    -069 * {@link 
    SplitLogManagerCoordination#checkTasks} will take away the
    -070 * task from the owner {@link 
    org.apache.hadoop.hbase.regionserver.SplitLogWorker}
    -071 * and the task will be up for grabs 
    again. When the task is done then it is
    -072 * deleted by SplitLogManager.
    -073 *
    -074 * pClients call {@link 
    #splitLogDistributed(Path)} to split a region server's
    -075 * log files. The caller thread waits in 
    this method until all the log files
    -076 * have been split.
    -077 *
    -078 * pAll the coordination calls 
    made by this class are asynchronous. This is mainly
    -079 * to help reduce response time seen by 
    the callers.
    -080 *
    -081 * pThere is race in this design 
    between the SplitLogManager and the
    -082 * SplitLogWorker. SplitLogManager might 
    re-queue a task that has in reality
    -083 * already been completed by a 
    SplitLogWorker. We rely on the idempotency of
    -084 * the log splitting task for 
    correctness.
    -085 *
    -086 * pIt is also assumed that every 
    log splitting task is unique and once
    -087 * completed (either with success or with 
    error) it will be not be submitted
    -088 * again. If a task is resubmitted then 
    there is a risk that old "delete task"
    -089 * can delete the re-submission.
    -090 */
    -091@InterfaceAudience.Private
    -092public class SplitLogManager {
    -093  private static final Logger LOG = 
    LoggerFactory.getLogger(SplitLogManager.class);
    -094
    -095  private final MasterServices server;
    -096
    -097  private final Configuration conf;
    -098  private final ChoreService 
    choreService;
    -099
    -100  public static final int 
    DEFAULT_UNASSIGNED_TIMEOUT = (3 * 60 * 1000); // 3 min
    -101
    -102  private long unassignedTimeout;
    -103  private long lastTaskCreateTime = 
    Long.MAX_VALUE;
    -104
    -105  @VisibleForTesting
    -106  final ConcurrentMapString, Task 
    tasks = new ConcurrentHashMap();
    -107  private TimeoutMonitor 
    timeoutMonitor;
    -108
    -109  private volatile SetServerName 
    deadWorkers = null;
    -110  private final Object deadWorkersLock = 
    new Object();
    -111
    -112  /**
    -113   * Its OK to construct this object even 
    when region-servers are not online. It does lookup the
    -114   * orphan tasks in coordination engine 
    but it doesn't block waiting for them to be done.
    -115   * @param master the master services
    -116   * @param conf the HBase 
    configuration
    -117   * @throws IOException
    -118   */
    -119  public SplitLogManager(MasterServices 
    master, Configuration conf)
    -120  throws IOException {
    -121this.server = master;
    -122this.conf = conf;
    -123this.choreService = new 
    ChoreService(master.getServerName() + "_splitLogManager_");
    -124if 
    (server.getCoordinatedStateManager() != null) {
    -125  SplitLogManagerCoordination 
    coordination = getSplitLogManagerCoordination();
    -126  SetString failedDeletions = 
    Collections.synchronizedSet(new HashSetString());
    -127  SplitLogManagerDetails details = 
    new SplitLogManagerDetails(tasks, master, failedDeletions);
    -128  coordination.setDetails(details);
    -129  coordination.init();
    -130}
    -131this.unassignedTimeout =
    -132
    conf.getInt("hbase.splitlog.manager.unassigned.timeout", 
    DEFAULT_UNASSIGNED_TIMEOUT);
    -133this.timeoutMonitor =
    -134new 
    TimeoutMonitor(conf.getInt("hbase.splitlog.manager.timeoutmonitor.period", 
    1000),
    -135master);
    -136
    choreService.scheduleChore(timeoutMonitor);
    -137  }
    -138
    -139  private SplitLogManagerCoordination 
    getSplitLogManagerCoordination() {
    -140return 
    

    [06/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1f2eeb22/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.StoreFileSplitter.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.StoreFileSplitter.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.StoreFileSplitter.html
    index b05cc00..c352c2f 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.StoreFileSplitter.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.StoreFileSplitter.html
    @@ -271,555 +271,561 @@
     263return Flow.HAS_MORE_STATE;
     264  }
     265
    -266  @Override
    -267  protected void rollbackState(final 
    MasterProcedureEnv env, final SplitTableRegionState state)
    -268  throws IOException, 
    InterruptedException {
    -269if (isTraceEnabled()) {
    -270  LOG.trace(this + " rollback state=" 
    + state);
    -271}
    -272
    -273try {
    -274  switch (state) {
    -275  case 
    SPLIT_TABLE_REGION_POST_OPERATION:
    -276  case 
    SPLIT_TABLE_REGION_OPEN_CHILD_REGIONS:
    -277  case 
    SPLIT_TABLE_REGION_PRE_OPERATION_AFTER_META:
    -278  case 
    SPLIT_TABLE_REGION_UPDATE_META:
    -279// PONR
    -280throw new 
    UnsupportedOperationException(this + " unhandled state=" + state);
    -281  case 
    SPLIT_TABLE_REGION_PRE_OPERATION_BEFORE_META:
    -282break;
    -283  case 
    SPLIT_TABLE_REGION_CREATE_DAUGHTER_REGIONS:
    -284// Doing nothing, as re-open 
    parent region would clean up daughter region directories.
    -285break;
    -286  case 
    SPLIT_TABLE_REGION_CLOSE_PARENT_REGION:
    -287openParentRegion(env);
    +266  /**
    +267   * To rollback {@link 
    SplitTableRegionProcedure}, an AssignProcedure is asynchronously
    +268   * submitted for parent region to be 
    split (rollback doesn't wait on the completion of the
    +269   * AssignProcedure) . This can be 
    improved by changing rollback() to support sub-procedures.
    +270   * See HBASE-19851 for details.
    +271   */
    +272  @Override
    +273  protected void rollbackState(final 
    MasterProcedureEnv env, final SplitTableRegionState state)
    +274  throws IOException, 
    InterruptedException {
    +275if (isTraceEnabled()) {
    +276  LOG.trace(this + " rollback state=" 
    + state);
    +277}
    +278
    +279try {
    +280  switch (state) {
    +281  case 
    SPLIT_TABLE_REGION_POST_OPERATION:
    +282  case 
    SPLIT_TABLE_REGION_OPEN_CHILD_REGIONS:
    +283  case 
    SPLIT_TABLE_REGION_PRE_OPERATION_AFTER_META:
    +284  case 
    SPLIT_TABLE_REGION_UPDATE_META:
    +285// PONR
    +286throw new 
    UnsupportedOperationException(this + " unhandled state=" + state);
    +287  case 
    SPLIT_TABLE_REGION_PRE_OPERATION_BEFORE_META:
     288break;
    -289  case 
    SPLIT_TABLE_REGION_PRE_OPERATION:
    -290postRollBackSplitRegion(env);
    +289  case 
    SPLIT_TABLE_REGION_CREATE_DAUGHTER_REGIONS:
    +290// Doing nothing, as re-open 
    parent region would clean up daughter region directories.
     291break;
    -292  case SPLIT_TABLE_REGION_PREPARE:
    -293break; // nothing to do
    -294  default:
    -295throw new 
    UnsupportedOperationException(this + " unhandled state=" + state);
    -296  }
    -297} catch (IOException e) {
    -298  // This will be retried. Unless 
    there is a bug in the code,
    -299  // this should be just a "temporary 
    error" (e.g. network down)
    -300  LOG.warn("pid=" + getProcId() + " 
    failed rollback attempt step " + state +
    -301  " for splitting the region "
    -302+ 
    getParentRegion().getEncodedName() + " in table " + getTableName(), e);
    -303  throw e;
    -304}
    -305  }
    -306
    -307  /*
    -308   * Check whether we are in the state 
    that can be rollback
    -309   */
    -310  @Override
    -311  protected boolean 
    isRollbackSupported(final SplitTableRegionState state) {
    -312switch (state) {
    -313  case 
    SPLIT_TABLE_REGION_POST_OPERATION:
    -314  case 
    SPLIT_TABLE_REGION_OPEN_CHILD_REGIONS:
    -315  case 
    SPLIT_TABLE_REGION_PRE_OPERATION_AFTER_META:
    -316  case 
    SPLIT_TABLE_REGION_UPDATE_META:
    -317// It is not safe to rollback if 
    we reach to these states.
    -318return false;
    -319  default:
    -320break;
    -321}
    -322return true;
    -323  }
    -324
    -325  @Override
    -326  protected SplitTableRegionState 
    getState(final int stateId) {
    -327return 
    SplitTableRegionState.forNumber(stateId);
    -328  }
    -329
    -330  @Override
    -331  protected int getStateId(final 
    SplitTableRegionState state) {
    -332return state.getNumber();
    -333  }
    -334
    -335  @Override
    -336  protected SplitTableRegionState 
    getInitialState() {
    -337return 
    SplitTableRegionState.SPLIT_TABLE_REGION_PREPARE;
    -338  }
    -339
    -340  @Override
    -341  protected void 
    serializeStateData(ProcedureStateSerializer serializer)
    -342  throws IOException {
    -343
    

    [06/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/cc6597ec/testdevapidocs/org/apache/hadoop/hbase/client/TestCloneSnapshotFromClient.html
    --
    diff --git 
    a/testdevapidocs/org/apache/hadoop/hbase/client/TestCloneSnapshotFromClient.html
     
    b/testdevapidocs/org/apache/hadoop/hbase/client/TestCloneSnapshotFromClient.html
    index 2984d1b..fb6bc1b 100644
    --- 
    a/testdevapidocs/org/apache/hadoop/hbase/client/TestCloneSnapshotFromClient.html
    +++ 
    b/testdevapidocs/org/apache/hadoop/hbase/client/TestCloneSnapshotFromClient.html
    @@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
     
     
     
    -public class TestCloneSnapshotFromClient
    +public class TestCloneSnapshotFromClient
     extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">Object
     Test clone snapshots from the client
     
    @@ -139,46 +139,50 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     admin
     
     
    +static HBaseClassTestRule
    +CLASS_RULE
    +
    +
     protected byte[]
     emptySnapshot
     
    -
    +
     protected byte[]
     FAMILY
     
    -
    +
     private static org.slf4j.Logger
     LOG
     
    -
    +
     org.junit.rules.TestName
     name
     
    -
    +
     protected int
     snapshot0Rows
     
    -
    +
     protected int
     snapshot1Rows
     
    -
    +
     protected byte[]
     snapshotName0
     
    -
    +
     protected byte[]
     snapshotName1
     
    -
    +
     protected byte[]
     snapshotName2
     
    -
    +
     protected 
    org.apache.hadoop.hbase.TableName
     tableName
     
    -
    +
     protected static HBaseTestingUtility
     TEST_UTIL
     
    @@ -309,13 +313,22 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     Field Detail
    +
    +
    +
    +
    +
    +CLASS_RULE
    +public static finalHBaseClassTestRule CLASS_RULE
    +
    +
     
     
     
     
     
     LOG
    -private static finalorg.slf4j.Logger LOG
    +private static finalorg.slf4j.Logger LOG
     
     
     
    @@ -324,7 +337,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     TEST_UTIL
    -protected static finalHBaseTestingUtility TEST_UTIL
    +protected static finalHBaseTestingUtility TEST_UTIL
     
     
     
    @@ -333,7 +346,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     FAMILY
    -protected finalbyte[] FAMILY
    +protected finalbyte[] FAMILY
     
     
     
    @@ -342,7 +355,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     emptySnapshot
    -protectedbyte[] emptySnapshot
    +protectedbyte[] emptySnapshot
     
     
     
    @@ -351,7 +364,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     snapshotName0
    -protectedbyte[] snapshotName0
    +protectedbyte[] snapshotName0
     
     
     
    @@ -360,7 +373,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     snapshotName1
    -protectedbyte[] snapshotName1
    +protectedbyte[] snapshotName1
     
     
     
    @@ -369,7 +382,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     snapshotName2
    -protectedbyte[] snapshotName2
    +protectedbyte[] snapshotName2
     
     
     
    @@ -378,7 +391,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     tableName
    -protectedorg.apache.hadoop.hbase.TableName tableName
    +protectedorg.apache.hadoop.hbase.TableName tableName
     
     
     
    @@ -387,7 +400,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     snapshot0Rows
    -protectedint snapshot0Rows
    +protectedint snapshot0Rows
     
     
     
    @@ -396,7 +409,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     snapshot1Rows
    -protectedint snapshot1Rows
    +protectedint snapshot1Rows
     
     
     
    @@ -405,7 +418,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     admin
    -protectedorg.apache.hadoop.hbase.client.Admin admin
    +protectedorg.apache.hadoop.hbase.client.Admin admin
     
     
     
    @@ -414,7 +427,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     name
    -publicorg.junit.rules.TestName name
    +publicorg.junit.rules.TestName name
     
     
     
    @@ -431,7 +444,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     TestCloneSnapshotFromClient
    -publicTestCloneSnapshotFromClient()
    +publicTestCloneSnapshotFromClient()
     
     
     
    @@ -448,7 +461,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     setupConfiguration
    -protected staticvoidsetupConfiguration()
    +protected staticvoidsetupConfiguration()
     
     
     
    @@ -457,7 +470,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     setUpBeforeClass
    -public staticvoidsetUpBeforeClass()
    +public staticvoidsetUpBeforeClass()
      throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
     title="class or interface in java.lang">Exception
     
     Throws:
    @@ -471,7 +484,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     tearDownAfterClass
    -public staticvoidtearDownAfterClass()
    +public staticvoidtearDownAfterClass()
    throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
     title="class 

    [06/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aa7ffc92/devapidocs/src-html/org/apache/hadoop/hbase/backup/master/LogRollMasterProcedureManager.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/backup/master/LogRollMasterProcedureManager.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/backup/master/LogRollMasterProcedureManager.html
    index ef6faa1..dae573d 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/backup/master/LogRollMasterProcedureManager.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/backup/master/LogRollMasterProcedureManager.html
    @@ -32,26 +32,26 @@
     024import 
    java.util.concurrent.ThreadPoolExecutor;
     025
     026import 
    org.apache.hadoop.conf.Configuration;
    -027import 
    org.apache.hadoop.hbase.ServerName;
    -028import 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants;
    -029import 
    org.apache.hadoop.hbase.backup.impl.BackupManager;
    -030import 
    org.apache.hadoop.hbase.coordination.ZkCoordinatedStateManager;
    -031import 
    org.apache.yetus.audience.InterfaceAudience;
    -032import 
    org.apache.hadoop.hbase.CoordinatedStateManager;
    -033import 
    org.apache.hadoop.hbase.errorhandling.ForeignException;
    -034import 
    org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
    -035import 
    org.apache.hadoop.hbase.master.MasterServices;
    -036import 
    org.apache.hadoop.hbase.master.MetricsMaster;
    -037import 
    org.apache.hadoop.hbase.procedure.MasterProcedureManager;
    -038import 
    org.apache.hadoop.hbase.procedure.Procedure;
    -039import 
    org.apache.hadoop.hbase.procedure.ProcedureCoordinator;
    -040import 
    org.apache.hadoop.hbase.procedure.ProcedureCoordinatorRpcs;
    -041import 
    org.apache.hadoop.hbase.procedure.RegionServerProcedureManager;
    -042import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameStringPair;
    -043import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
    -044import 
    org.apache.zookeeper.KeeperException;
    -045import org.slf4j.Logger;
    -046import org.slf4j.LoggerFactory;
    +027import 
    org.apache.hadoop.hbase.CoordinatedStateManager;
    +028import 
    org.apache.hadoop.hbase.ServerName;
    +029import 
    org.apache.hadoop.hbase.backup.BackupRestoreConstants;
    +030import 
    org.apache.hadoop.hbase.backup.impl.BackupManager;
    +031import 
    org.apache.hadoop.hbase.coordination.ZkCoordinatedStateManager;
    +032import 
    org.apache.hadoop.hbase.errorhandling.ForeignException;
    +033import 
    org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
    +034import 
    org.apache.hadoop.hbase.master.MasterServices;
    +035import 
    org.apache.hadoop.hbase.master.MetricsMaster;
    +036import 
    org.apache.hadoop.hbase.procedure.MasterProcedureManager;
    +037import 
    org.apache.hadoop.hbase.procedure.Procedure;
    +038import 
    org.apache.hadoop.hbase.procedure.ProcedureCoordinator;
    +039import 
    org.apache.hadoop.hbase.procedure.ProcedureCoordinatorRpcs;
    +040import 
    org.apache.hadoop.hbase.procedure.RegionServerProcedureManager;
    +041import 
    org.apache.yetus.audience.InterfaceAudience;
    +042import org.slf4j.Logger;
    +043import org.slf4j.LoggerFactory;
    +044
    +045import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameStringPair;
    +046import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
     047
     048/**
     049 * Master procedure manager for 
    coordinated cluster-wide WAL roll operation, which is run during
    @@ -65,114 +65,114 @@
     057  public static final String 
    ROLLLOG_PROCEDURE_NAME = "rolllog";
     058  public static final String 
    BACKUP_WAKE_MILLIS_KEY = "hbase.backup.logroll.wake.millis";
     059  public static final String 
    BACKUP_TIMEOUT_MILLIS_KEY = "hbase.backup.logroll.timeout.millis";
    -060  public static final String 
    BACKUP_POOL_THREAD_NUMBER_KEY = "hbase.backup.logroll.pool.thread.number";
    -061
    -062  public static final int 
    BACKUP_WAKE_MILLIS_DEFAULT = 500;
    -063  public static final int 
    BACKUP_TIMEOUT_MILLIS_DEFAULT = 18;
    -064  public static final int 
    BACKUP_POOL_THREAD_NUMBER_DEFAULT = 8;
    -065  private MasterServices master;
    -066  private ProcedureCoordinator 
    coordinator;
    -067  private boolean done;
    -068
    -069  @Override
    -070  public void stop(String why) {
    -071LOG.info("stop: " + why);
    -072  }
    -073
    -074  @Override
    -075  public boolean isStopped() {
    -076return false;
    -077  }
    -078
    -079  @Override
    -080  public void initialize(MasterServices 
    master, MetricsMaster metricsMaster)
    -081  throws KeeperException, 
    IOException, UnsupportedOperationException {
    -082this.master = master;
    -083this.done = false;
    -084
    -085// setup the default procedure 
    coordinator
    -086String name = 
    master.getServerName().toString();
    -087
    +060  public static final String 
    BACKUP_POOL_THREAD_NUMBER_KEY =
    +061  
    "hbase.backup.logroll.pool.thread.number";
    +062
    +063  public static final int 
    BACKUP_WAKE_MILLIS_DEFAULT = 500;
    +064  public static final int 
    BACKUP_TIMEOUT_MILLIS_DEFAULT = 18;
    +065  public static final int 
    

    [06/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/96e5e102/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestRegionServerReportForDuty.html
    --
    diff --git 
    a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestRegionServerReportForDuty.html
     
    b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestRegionServerReportForDuty.html
    index 387384f..52df872 100644
    --- 
    a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestRegionServerReportForDuty.html
    +++ 
    b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestRegionServerReportForDuty.html
    @@ -31,179 +31,185 @@
     023import java.io.IOException;
     024
     025import 
    org.apache.hadoop.conf.Configuration;
    -026import 
    org.apache.hadoop.hbase.CoordinatedStateManager;
    -027import 
    org.apache.hadoop.hbase.HBaseTestingUtility;
    -028import 
    org.apache.hadoop.hbase.HConstants;
    -029import 
    org.apache.hadoop.hbase.LocalHBaseCluster;
    -030import 
    org.apache.hadoop.hbase.ServerName;
    -031import 
    org.apache.hadoop.hbase.MiniHBaseCluster.MiniHBaseClusterRegionServer;
    -032import 
    org.apache.hadoop.hbase.master.LoadBalancer;
    -033import 
    org.apache.hadoop.hbase.master.ServerManager;
    -034import 
    org.apache.hadoop.hbase.testclassification.MediumTests;
    -035import 
    org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
    -036import 
    org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
    -037import 
    org.apache.zookeeper.KeeperException;
    -038import org.junit.After;
    -039import org.junit.Before;
    -040import org.junit.Test;
    -041import 
    org.junit.experimental.categories.Category;
    -042import org.slf4j.Logger;
    -043import org.slf4j.LoggerFactory;
    -044
    -045@Category(MediumTests.class)
    -046public class 
    TestRegionServerReportForDuty {
    +026import 
    org.apache.hadoop.hbase.CategoryBasedTimeout;
    +027import 
    org.apache.hadoop.hbase.CoordinatedStateManager;
    +028import 
    org.apache.hadoop.hbase.HBaseTestingUtility;
    +029import 
    org.apache.hadoop.hbase.HConstants;
    +030import 
    org.apache.hadoop.hbase.LocalHBaseCluster;
    +031import 
    org.apache.hadoop.hbase.ServerName;
    +032import 
    org.apache.hadoop.hbase.MiniHBaseCluster.MiniHBaseClusterRegionServer;
    +033import 
    org.apache.hadoop.hbase.master.LoadBalancer;
    +034import 
    org.apache.hadoop.hbase.master.ServerManager;
    +035import 
    org.apache.hadoop.hbase.testclassification.MediumTests;
    +036import 
    org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
    +037import 
    org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
    +038import 
    org.apache.zookeeper.KeeperException;
    +039import org.junit.After;
    +040import org.junit.Before;
    +041import org.junit.Rule;
    +042import org.junit.Test;
    +043import 
    org.junit.experimental.categories.Category;
    +044import org.junit.rules.TestRule;
    +045import org.slf4j.Logger;
    +046import org.slf4j.LoggerFactory;
     047
    -048  private static final Logger LOG = 
    LoggerFactory.getLogger(TestRegionServerReportForDuty.class);
    -049
    -050  private static final long 
    SLEEP_INTERVAL = 500;
    -051
    -052  private HBaseTestingUtility testUtil;
    -053  private LocalHBaseCluster cluster;
    -054  private RegionServerThread rs;
    -055  private RegionServerThread rs2;
    -056  private MasterThread master;
    -057  private MasterThread backupMaster;
    -058
    -059  @Before
    -060  public void setUp() throws Exception 
    {
    -061testUtil = new 
    HBaseTestingUtility();
    -062testUtil.startMiniDFSCluster(1);
    -063testUtil.startMiniZKCluster(1);
    -064testUtil.createRootDir();
    -065cluster = new 
    LocalHBaseCluster(testUtil.getConfiguration(), 0, 0);
    -066  }
    -067
    -068  @After
    -069  public void tearDown() throws Exception 
    {
    -070cluster.shutdown();
    -071cluster.join();
    -072testUtil.shutdownMiniZKCluster();
    -073testUtil.shutdownMiniDFSCluster();
    -074  }
    -075
    -076  /**
    -077   * Tests region sever reportForDuty 
    with backup master becomes primary master after
    -078   * the first master goes away.
    -079   */
    -080  @Test (timeout=18)
    -081  public void 
    testReportForDutyWithMasterChange() throws Exception {
    -082
    -083// Start a master and wait for it to 
    become the active/primary master.
    -084// Use a random unique port
    -085
    cluster.getConfiguration().setInt(HConstants.MASTER_PORT, 
    HBaseTestingUtility.randomFreePort());
    -086// master has a rs. defaultMinToStart 
    = 2
    -087boolean tablesOnMaster = 
    LoadBalancer.isTablesOnMaster(testUtil.getConfiguration());
    -088
    cluster.getConfiguration().setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART,
     tablesOnMaster? 2: 1);
    -089
    cluster.getConfiguration().setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART,
     tablesOnMaster? 2: 1);
    -090master = cluster.addMaster();
    -091rs = cluster.addRegionServer();
    -092LOG.debug("Starting master: " + 
    master.getMaster().getServerName());
    -093master.start();
    -094rs.start();
    -095
    -096waitForClusterOnline(master);
    -097
    -098// Add a 2nd region server
    -099
    

    [06/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.html 
    b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.html
    index eeffbba..404e5b0 100644
    --- a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.html
    +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.html
    @@ -1563,10 +1563,13 @@ implements 
     
     clusterConnection
    -protectedClusterConnection clusterConnection
    +protectedClusterConnection clusterConnection
     Cluster connection to be shared by services.
      Initialized at server startup and closed when server shuts down.
    - Clients must never close it explicitly.
    + Clients must never close it explicitly.
    + Clients hosted by this Server should make use of this clusterConnection 
    rather than create
    + their own; if they create their own, there is no way for the hosting server 
    to shutdown
    + ongoing client RPCs.
     
     
     
    @@ -1575,7 +1578,7 @@ implements 
     
     metaTableLocator
    -protectedMetaTableLocator metaTableLocator
    +protectedMetaTableLocator metaTableLocator
     
     
     
    @@ -1584,7 +1587,7 @@ implements 
     
     tableDescriptors
    -protectedTableDescriptors tableDescriptors
    +protectedTableDescriptors tableDescriptors
     Go here to get table descriptors.
     
     
    @@ -1594,7 +1597,7 @@ implements 
     
     replicationSourceHandler
    -protectedReplicationSourceService replicationSourceHandler
    +protectedReplicationSourceService replicationSourceHandler
     
     
     
    @@ -1603,7 +1606,7 @@ implements 
     
     replicationSinkHandler
    -protectedReplicationSinkService replicationSinkHandler
    +protectedReplicationSinkService replicationSinkHandler
     
     
     
    @@ -1612,7 +1615,7 @@ implements 
     
     compactSplitThread
    -publicCompactSplit compactSplitThread
    +publicCompactSplit compactSplitThread
     
     
     
    @@ -1621,7 +1624,7 @@ implements 
     
     onlineRegions
    -protected finalhttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
     title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String,HRegion onlineRegions
    +protected finalhttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
     title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String,HRegion onlineRegions
     Map of regions currently being served by this region 
    server. Key is the
      encoded region name.  All access should be synchronized.
     
    @@ -1632,7 +1635,7 @@ implements 
     
     regionFavoredNodesMap
    -protected finalhttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
     title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String,http://docs.oracle.com/javase/8/docs/api/java/net/InetSocketAddress.html?is-external=true;
     title="class or interface in java.net">InetSocketAddress[] regionFavoredNodesMap
    +protected finalhttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
     title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String,http://docs.oracle.com/javase/8/docs/api/java/net/InetSocketAddress.html?is-external=true;
     title="class or interface in java.net">InetSocketAddress[] regionFavoredNodesMap
     Map of encoded region names to the DataNode locations they 
    should be hosted on
      We store the value as InetSocketAddress since this is used only in HDFS
      API (create() that takes favored nodes as hints for placing file blocks).
    @@ -1648,7 +1651,7 @@ implements 
     
     leases
    -protectedLeases leases
    +protectedLeases leases
     
     
     
    @@ -1657,7 +1660,7 @@ implements 
     
     executorService
    -protectedExecutorService executorService
    +protectedExecutorService executorService
     
     
     
    @@ -1666,7 +1669,7 @@ implements 
     
     fsOk
    -protected volatileboolean fsOk
    +protected volatileboolean fsOk
     
     
     
    @@ -1675,7 +1678,7 @@ implements 
     
     fs
    -protectedHFileSystem fs
    +protectedHFileSystem fs
     
     
     
    @@ -1684,7 +1687,7 @@ implements 
     
     walFs
    -protectedHFileSystem walFs
    +protectedHFileSystem walFs
     
     
     
    @@ -1693,7 +1696,7 @@ implements 
     
     stopped
    -private volatileboolean stopped
    +private volatileboolean stopped
     
     
     
    @@ -1702,7 +1705,7 @@ implements 
     
     abortRequested
    -private volatileboolean abortRequested
    +private volatileboolean abortRequested
     
     
     
    @@ -1711,7 +1714,7 @@ implements 
     
     rowlocks
    -http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentMap.html?is-external=true;
     title="class or interface in java.util.concurrent">ConcurrentMaphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in 

    [06/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/14db89d7/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.MockRSProcedureDispatcher.MockRemoteCall.html
    --
    diff --git 
    a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.MockRSProcedureDispatcher.MockRemoteCall.html
     
    b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.MockRSProcedureDispatcher.MockRemoteCall.html
    index f1db5ca..d8515d7 100644
    --- 
    a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.MockRSProcedureDispatcher.MockRemoteCall.html
    +++ 
    b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.MockRSProcedureDispatcher.MockRemoteCall.html
    @@ -32,813 +32,820 @@
     024import static org.junit.Assert.fail;
     025
     026import java.io.IOException;
    -027import java.net.SocketTimeoutException;
    -028import java.util.NavigableMap;
    -029import java.util.Random;
    -030import java.util.Set;
    -031import java.util.SortedSet;
    -032import 
    java.util.concurrent.ConcurrentSkipListMap;
    -033import 
    java.util.concurrent.ConcurrentSkipListSet;
    -034import 
    java.util.concurrent.ExecutionException;
    -035import java.util.concurrent.Executors;
    -036import java.util.concurrent.Future;
    -037import 
    java.util.concurrent.ScheduledExecutorService;
    -038import java.util.concurrent.TimeUnit;
    -039
    -040import 
    org.apache.hadoop.conf.Configuration;
    -041import 
    org.apache.hadoop.hbase.CategoryBasedTimeout;
    -042import 
    org.apache.hadoop.hbase.DoNotRetryIOException;
    -043import 
    org.apache.hadoop.hbase.HBaseTestingUtility;
    -044import 
    org.apache.hadoop.hbase.NotServingRegionException;
    -045import 
    org.apache.hadoop.hbase.ServerName;
    -046import 
    org.apache.hadoop.hbase.TableName;
    -047import 
    org.apache.hadoop.hbase.client.RegionInfo;
    -048import 
    org.apache.hadoop.hbase.client.RegionInfoBuilder;
    -049import 
    org.apache.hadoop.hbase.client.RetriesExhaustedException;
    -050import 
    org.apache.hadoop.hbase.exceptions.UnexpectedStateException;
    -051import 
    org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
    -052import 
    org.apache.hadoop.hbase.master.MasterServices;
    -053import 
    org.apache.hadoop.hbase.master.RegionState.State;
    -054import 
    org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants;
    -055import 
    org.apache.hadoop.hbase.master.procedure.ProcedureSyncWait;
    -056import 
    org.apache.hadoop.hbase.master.procedure.RSProcedureDispatcher;
    -057import 
    org.apache.hadoop.hbase.procedure2.Procedure;
    -058import 
    org.apache.hadoop.hbase.procedure2.ProcedureMetrics;
    -059import 
    org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
    -060import 
    org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore;
    -061import 
    org.apache.hadoop.hbase.procedure2.util.StringUtils;
    -062import 
    org.apache.hadoop.hbase.regionserver.RegionServerAbortedException;
    -063import 
    org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
    -064import 
    org.apache.hadoop.hbase.testclassification.MasterTests;
    -065import 
    org.apache.hadoop.hbase.testclassification.MediumTests;
    -066import 
    org.apache.hadoop.hbase.util.Bytes;
    -067import 
    org.apache.hadoop.hbase.util.FSUtils;
    -068import 
    org.apache.hadoop.ipc.RemoteException;
    -069import org.junit.After;
    -070import org.junit.Before;
    -071import org.junit.Ignore;
    -072import org.junit.Rule;
    -073import org.junit.Test;
    -074import 
    org.junit.experimental.categories.Category;
    -075import 
    org.junit.rules.ExpectedException;
    -076import org.junit.rules.TestName;
    -077import org.junit.rules.TestRule;
    -078import org.slf4j.Logger;
    -079import org.slf4j.LoggerFactory;
    -080import 
    org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
    -081import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
    -082import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
    -083import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest;
    -084import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse;
    -085import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest;
    -086import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo;
    -087import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse;
    -088import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse.RegionOpeningState;
    -089import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition;
    -090import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
    -091import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest;
    -092
    -093@Category({MasterTests.class, 
    MediumTests.class})
    

    [06/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/testdevapidocs/src-html/org/apache/hadoop/hbase/TestCellUtil.html
    --
    diff --git a/testdevapidocs/src-html/org/apache/hadoop/hbase/TestCellUtil.html 
    b/testdevapidocs/src-html/org/apache/hadoop/hbase/TestCellUtil.html
    index 232ef56..bc3a6d0 100644
    --- a/testdevapidocs/src-html/org/apache/hadoop/hbase/TestCellUtil.html
    +++ b/testdevapidocs/src-html/org/apache/hadoop/hbase/TestCellUtil.html
    @@ -29,610 +29,626 @@
     021import static 
    org.junit.Assert.assertEquals;
     022import static 
    org.junit.Assert.assertFalse;
     023import static 
    org.junit.Assert.assertTrue;
    -024
    -025import java.io.ByteArrayOutputStream;
    -026import java.io.IOException;
    -027import java.math.BigDecimal;
    -028import java.nio.ByteBuffer;
    -029import java.util.ArrayList;
    -030import java.util.List;
    -031import java.util.NavigableMap;
    -032import java.util.TreeMap;
    -033import 
    org.apache.hadoop.hbase.testclassification.MiscTests;
    -034import 
    org.apache.hadoop.hbase.testclassification.SmallTests;
    -035import 
    org.apache.hadoop.hbase.util.Bytes;
    -036import org.junit.Assert;
    -037import org.junit.Test;
    -038import 
    org.junit.experimental.categories.Category;
    -039
    -040@Category({MiscTests.class, 
    SmallTests.class})
    -041public class TestCellUtil {
    -042  /**
    -043   * CellScannable used in test. Returns 
    a {@link TestCellScanner}
    -044   */
    -045  private static class TestCellScannable 
    implements CellScannable {
    -046private final int cellsCount;
    -047TestCellScannable(final int 
    cellsCount) {
    -048  this.cellsCount = cellsCount;
    -049}
    -050@Override
    -051public CellScanner cellScanner() {
    -052  return new 
    TestCellScanner(this.cellsCount);
    -053}
    -054  }
    -055
    -056  /**
    -057   * CellScanner used in test.
    -058   */
    -059  private static class TestCellScanner 
    implements CellScanner {
    -060private int count = 0;
    -061private Cell current = null;
    -062private final int cellsCount;
    -063
    -064TestCellScanner(final int cellsCount) 
    {
    -065  this.cellsCount = cellsCount;
    -066}
    -067
    -068@Override
    -069public Cell current() {
    -070  return this.current;
    -071}
    -072
    -073@Override
    -074public boolean advance() throws 
    IOException {
    -075  if (this.count  cellsCount) {
    -076this.current = new 
    TestCell(this.count);
    -077this.count++;
    -078return true;
    -079  }
    -080  return false;
    -081}
    -082  }
    -083
    -084  /**
    -085   * Cell used in test. Has row only.
    -086   */
    -087  private static class TestCell 
    implements Cell {
    -088private final byte [] row;
    -089
    -090TestCell(final int i) {
    -091  this.row = Bytes.toBytes(i);
    -092}
    -093
    -094@Override
    -095public byte[] getRowArray() {
    -096  return this.row;
    -097}
    -098
    -099@Override
    -100public int getRowOffset() {
    -101  return 0;
    -102}
    -103
    -104@Override
    -105public short getRowLength() {
    -106  return (short)this.row.length;
    -107}
    -108
    -109@Override
    -110public byte[] getFamilyArray() {
    -111  // TODO Auto-generated method 
    stub
    -112  return null;
    -113}
    -114
    -115@Override
    -116public int getFamilyOffset() {
    -117  // TODO Auto-generated method 
    stub
    -118  return 0;
    -119}
    -120
    -121@Override
    -122public byte getFamilyLength() {
    -123  // TODO Auto-generated method 
    stub
    -124  return 0;
    -125}
    -126
    -127@Override
    -128public byte[] getQualifierArray() {
    -129  // TODO Auto-generated method 
    stub
    -130  return null;
    -131}
    -132
    -133@Override
    -134public int getQualifierOffset() {
    -135  // TODO Auto-generated method 
    stub
    -136  return 0;
    -137}
    -138
    -139@Override
    -140public int getQualifierLength() {
    -141  // TODO Auto-generated method 
    stub
    -142  return 0;
    -143}
    -144
    -145@Override
    -146public long getTimestamp() {
    -147  // TODO Auto-generated method 
    stub
    -148  return 0;
    -149}
    -150
    -151@Override
    -152public byte getTypeByte() {
    -153  // TODO Auto-generated method 
    stub
    -154  return 0;
    -155}
    -156
    -157@Override
    -158public byte[] getValueArray() {
    -159  // TODO Auto-generated method 
    stub
    -160  return null;
    -161}
    -162
    -163@Override
    -164public int getValueOffset() {
    -165  // TODO Auto-generated method 
    stub
    -166  return 0;
    -167}
    -168
    -169@Override
    -170public int getValueLength() {
    -171  // TODO Auto-generated method 
    stub
    -172  return 0;
    -173}
    -174
    -175@Override
    -176public byte[] getTagsArray() {
    -177  // TODO Auto-generated method 
    stub
    -178  return null;
    -179}
    -180
    -181@Override
    -182public int getTagsOffset() {
    -183  // TODO Auto-generated method 
    stub
    -184  return 0;
    -185}
    -186
    -187@Override
    -188public long getSequenceId() {
    -189  // TODO Auto-generated method 
    stub
    -190  return 0;
    -191}
    -192
    -193

    [06/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/testdevapidocs/org/apache/hadoop/hbase/replication/regionserver/TestWALEntryStream.html
    --
    diff --git 
    a/testdevapidocs/org/apache/hadoop/hbase/replication/regionserver/TestWALEntryStream.html
     
    b/testdevapidocs/org/apache/hadoop/hbase/replication/regionserver/TestWALEntryStream.html
    index 2cef7d6..1ec37a5 100644
    --- 
    a/testdevapidocs/org/apache/hadoop/hbase/replication/regionserver/TestWALEntryStream.html
    +++ 
    b/testdevapidocs/org/apache/hadoop/hbase/replication/regionserver/TestWALEntryStream.html
    @@ -109,7 +109,7 @@ var activeTableTab = "activeTableTab";
     
     
     
    -public class TestWALEntryStream
    +public class TestWALEntryStream
     extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">Object
     
     
    @@ -165,7 +165,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     fs
     
     
    -private static 
    org.apache.hadoop.hbase.HRegionInfo
    +private static 
    org.apache.hadoop.hbase.client.RegionInfo
     info
     
     
    @@ -362,7 +362,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     TEST_UTIL
    -private staticHBaseTestingUtility TEST_UTIL
    +private staticHBaseTestingUtility TEST_UTIL
     
     
     
    @@ -371,7 +371,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     conf
    -private staticorg.apache.hadoop.conf.Configuration conf
    +private staticorg.apache.hadoop.conf.Configuration conf
     
     
     
    @@ -380,7 +380,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     fs
    -private staticorg.apache.hadoop.fs.FileSystem fs
    +private staticorg.apache.hadoop.fs.FileSystem fs
     
     
     
    @@ -389,7 +389,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     cluster
    -private staticorg.apache.hadoop.hdfs.MiniDFSCluster cluster
    +private staticorg.apache.hadoop.hdfs.MiniDFSCluster cluster
     
     
     
    @@ -398,7 +398,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     tableName
    -private static finalorg.apache.hadoop.hbase.TableName tableName
    +private static finalorg.apache.hadoop.hbase.TableName tableName
     
     
     
    @@ -407,7 +407,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     family
    -private static finalbyte[] family
    +private static finalbyte[] family
     
     
     
    @@ -416,7 +416,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     qualifier
    -private static finalbyte[] qualifier
    +private static finalbyte[] qualifier
     
     
     
    @@ -425,7 +425,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     info
    -private static finalorg.apache.hadoop.hbase.HRegionInfo info
    +private static finalorg.apache.hadoop.hbase.client.RegionInfo info
     
     
     
    @@ -434,7 +434,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     scopes
    -private static finalhttp://docs.oracle.com/javase/8/docs/api/java/util/NavigableMap.html?is-external=true;
     title="class or interface in java.util">NavigableMapbyte[],http://docs.oracle.com/javase/8/docs/api/java/lang/Integer.html?is-external=true;
     title="class or interface in java.lang">Integer scopes
    +private static finalhttp://docs.oracle.com/javase/8/docs/api/java/util/NavigableMap.html?is-external=true;
     title="class or interface in java.util">NavigableMapbyte[],http://docs.oracle.com/javase/8/docs/api/java/lang/Integer.html?is-external=true;
     title="class or interface in java.lang">Integer scopes
     
     
     
    @@ -443,7 +443,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     log
    -privateorg.apache.hadoop.hbase.wal.WAL log
    +privateorg.apache.hadoop.hbase.wal.WAL log
     
     
     
    @@ -452,7 +452,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     walQueue
    -http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/PriorityBlockingQueue.html?is-external=true;
     title="class or interface in 
    java.util.concurrent">PriorityBlockingQueueorg.apache.hadoop.fs.Path
     walQueue
    +http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/PriorityBlockingQueue.html?is-external=true;
     title="class or interface in 
    java.util.concurrent">PriorityBlockingQueueorg.apache.hadoop.fs.Path
     walQueue
     
     
     
    @@ -461,7 +461,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     pathWatcher
    -privateTestWALEntryStream.PathWatcher
     pathWatcher
    +privateTestWALEntryStream.PathWatcher
     pathWatcher
     
     
     
    @@ -470,7 +470,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     tn
    -publicorg.junit.rules.TestName tn
    +publicorg.junit.rules.TestName tn
     
     
     
    @@ -479,7 +479,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     mvcc
    -private 
    finalorg.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl 
    mvcc
    +private 
    finalorg.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl 
    mvcc
     
     
     
    @@ 

    [06/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/49431b18/devapidocs/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.SchemaLocking.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.SchemaLocking.html
     
    b/devapidocs/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.SchemaLocking.html
    index cd145dd..ff57180 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.SchemaLocking.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.SchemaLocking.html
    @@ -18,7 +18,7 @@
     catch(err) {
     }
     //-->
    -var methods = 
    {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10};
    +var methods = 
    {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10};
     var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
    Methods"],8:["t4","Concrete Methods"]};
     var altColor = "altColor";
     var rowColor = "rowColor";
    @@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
     
     
     
    -private static class MasterProcedureScheduler.SchemaLocking
    +private static class MasterProcedureScheduler.SchemaLocking
     extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">Object
     Locks on namespaces, tables, and regions.
      Since LockAndQueue implementation is NOT thread-safe, schedLock() guards all 
    calls to these
    @@ -142,13 +142,17 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     (package private) http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
     title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String,LockAndQueue
    -regionLocks
    +peerLocks
     
     
    +(package private) http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
     title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String,LockAndQueue
    +regionLocks
    +
    +
     (package private) http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
     title="class or interface in java.util">MapServerName,LockAndQueue
     serverLocks
     
    -
    +
     (package private) http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
     title="class or interface in java.util">MapTableName,LockAndQueue
     tableLocks
     
    @@ -207,25 +211,33 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     (package private) LockAndQueue
    -getRegionLock(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in 
    java.lang">StringencodedRegionName)
    +getPeerLock(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in 
    java.lang">StringpeerId)
     
     
     (package private) LockAndQueue
    -getServerLock(ServerNameserverName)
    +getRegionLock(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in 
    java.lang">StringencodedRegionName)
     
     
     (package private) LockAndQueue
    -getTableLock(TableNametableName)
    +getServerLock(ServerNameserverName)
     
     
     (package private) LockAndQueue
    -removeRegionLock(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in 
    java.lang">StringencodedRegionName)
    +getTableLock(TableNametableName)
     
     
     (package private) LockAndQueue
    -removeTableLock(TableNametableName)
    +removePeerLock(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in 
    java.lang">StringpeerId)
     
     
    +(package private) LockAndQueue
    +removeRegionLock(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in 
    java.lang">StringencodedRegionName)
    +
    +
    +(package private) LockAndQueue
    +removeTableLock(TableNametableName)
    +
    +
     http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String
     toString()
     
    @@ -257,7 +269,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     serverLocks
    -finalhttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
     title="class or interface in java.util">MapServerName,LockAndQueue serverLocks
    +finalhttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
     title="class or interface in java.util">MapServerName,LockAndQueue serverLocks
     
     
     
    @@ -266,7 +278,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     namespaceLocks
    -finalhttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
     title="class or interface in 

    [06/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7c40c62/devapidocs/org/apache/hadoop/hbase/security/access/CoprocessorWhitelistMasterObserver.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/security/access/CoprocessorWhitelistMasterObserver.html
     
    b/devapidocs/org/apache/hadoop/hbase/security/access/CoprocessorWhitelistMasterObserver.html
    index a9b316c..b9ee03e 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/security/access/CoprocessorWhitelistMasterObserver.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/security/access/CoprocessorWhitelistMasterObserver.html
    @@ -255,7 +255,7 @@ implements MasterObserver
    -postAbortProcedure,
     postAddReplicationPeer,
     postAddRSGroup,
     postAssign,
     postBalance, postBalanceRSGroup,
     postBalanceSwitch,
     postClearDeadServers,
     postCloneSnapshot,
     postCompletedCreateTableAction,
     postCompletedDeleteTableAction,
     postCompletedDisableTableAction,
     postCompletedEnableTableAction,
     postCompletedMergeRegionsAction,
     postCompletedModifyTableAction,
     postCompletedSplitRegionAction,
     postCompletedTruncateTableAction, postCreateNamespace,
     postCreateTable,
     postDecommissionRegionServers,
     postDeleteNamespace,
     postDeleteSnapshot,
     postDeleteTable,
     postDisableReplicationPeer,
     postDisableTable,
     postEnableReplicationPeer, postEnableTable,
     postGetClusterStatus,
     postGetLocks,
     postGetNamespaceDescriptor,
     postGetProcedures, postGetReplicationPeerConfig,
     postGetTableDescriptors,
     postGetTableNames,
     postListDecommissionedRegionServers,
     postListNamespaceDescriptors,
     postListReplicationPeers,
     postListSnapshot,
     postLockHeartbeat,
     postMergeRegions,
     postMergeRegionsCommitAction,
     postModifyNamespace,
     postModifyTable,
     postMove, postMoveServers,
     postMoveServersAndTables,
     postMoveTables,
     postRecommissionRegionServer,
     postRegionOffline,
     postRemoveReplicationPeer,
     postRemoveRSGroup,
     postRemoveServers,
     postRequestLock, postRestoreSnapshot,
     postRollBackMergeRegionsAction,
     postRollBackSplitRegionAction,
     pos
     tSetNamespaceQuota, postSetSplitOrMergeEnabled,
     postSetTableQuota,
     postSetUserQuota,
     postSetUserQuota, postSetUserQuota,
     postSnapshot,
     postStartMaster,
     postTableFlush,
     postTruncateTable,
     postUnassign,
     postUpdateReplicationPeerConfig,
     preAbortProcedure,
     preAddReplicationPeer,
     preAddRSGroup,
     preAssign,
     preBalance,
     preBalanceRSGroup,
     preBalanceSwitch, preClearDeadServers,
     preCloneSnapshot,
     preCreateNamespace,
     preCreateTableAction, href="../../../../../../org/apache/hadoop/hbase/coprocessor/MasterObserver.html#preDecommissionRegionServers-org.apache.hadoop.hbase.coprocessor.ObserverContext-java.util.List-boolean-">preDecommissionRegionServers,
     > href="../../../../../../org/apache/hadoop/hbase/coprocessor/MasterObserver.html#preDeleteNamespace-org.apache.hadoop.hbase.coprocessor.ObserverContext-java.lang.String-">preDeleteNamespace,
     > href="../../../../../../org/apache/hadoop/hbase/coprocessor/MasterObserver.html#preDeleteSnapshot-org.apache.hadoop.hbase.coprocessor.ObserverContext-org.apache.hadoop.hbase.client.SnapshotDescription-">preDeleteSnapshot,
     > href="../../../../../../org/apache/hadoop/hbase/coprocessor/MasterObserver.html#preDeleteTable-org.apache.hadoop.hbase.coprocessor.ObserverContext-org.apache.hadoop.hbase.TableName-">preDeleteTable,
     > href="../../../../../../org/apache/hadoop/hbase/coprocessor/MasterObserver.html#preDeleteTableAction-org.apache.
     
    hadoop.hbase.coprocessor.ObserverContext-org.apache.hadoop.hbase.TableName-">preDeleteTableAction,
     preDisableReplicationPeer,
     preDisableTable,
     preDisableTableAction,
     preEnableReplicationPeer,
     preEnableTable,
     preEnableTableAction,
     preGetClusterStatus,
     preGetLocks,
     preGetNamespaceDescriptor,
     preGetProcedures,
     preGetReplicationPeerConfig,
     preGetTableDescriptors,
     preGetTableNames,
     preListDecommissionedRegionServers,
     preListNamespaceDescriptors,
     preListReplicationPeers,
     preListSnapshot,
     preLockHeartbeat,
     preMasterInitialization, preMergeRegions,
     preMergeRegionsAction,
     preMergeRegionsCommitAction,
     preModifyNamespace,
     preModifyTableAction,
     preMove,
     preMoveServers,
     preMoveServersAndTables,
     preMoveTables,
     preRecommissionRegionServer,
     preRegionOffline,
     preRemoveReplicationPeer,
     preRemoveRSGroup, preRemoveServers,
     preRequestLock,
     preRestoreSnapshot,
     preSetNamespaceQuota, preSetSplitOrMergeEnabled,
     preSetTableQuota,
     preSetUserQuota,
     preSetUserQuota, preSetUserQuota,
     preShutdown,
     preSnapshot,
     preSplitRegion,
     preSplitRegionAction,
     preSplitRegionAfterMETAAction,
     preSplitRegionBeforeMETAAction,
     preStopMaster,
     preTableFlu
     sh, preTruncateTable,
     preTruncateTableAction,
     preUnassign,
     preUpdateReplicationPeerConfig
    +postAbortProcedure,
     postAddReplicationPeer,
     postAddRSGroup,
     postAssign,
     postBalance, postBalanceRSGroup,
     postBalanceSwitch,
     postClearDeadServers,
     postCloneSnapshot,
     postCompletedCreateTableAction,
     postCompletedDeleteTableAction,
     postCompletedDisableTableAction,
     

    [06/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bb398572/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.html 
    b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.html
    index 8373f27..80df615 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.html
    @@ -6,294 +6,293 @@
     
     
     
    -001
    -002/**
    -003 * Copyright The Apache Software 
    Foundation
    -004 *
    -005 * Licensed to the Apache Software 
    Foundation (ASF) under one or more
    -006 * contributor license agreements. See 
    the NOTICE file distributed with this
    -007 * work for additional information 
    regarding copyright ownership. The ASF
    -008 * licenses this file to you under the 
    Apache License, Version 2.0 (the
    -009 * "License"); you may not use this file 
    except in compliance with the License.
    -010 * You may obtain a copy of the License 
    at
    -011 *
    -012 * 
    http://www.apache.org/licenses/LICENSE-2.0
    -013 *
    -014 * Unless required by applicable law or 
    agreed to in writing, software
    -015 * distributed under the License is 
    distributed on an "AS IS" BASIS, WITHOUT
    -016 * WARRANTIES OR CONDITIONS OF ANY KIND, 
    either express or implied. See the
    -017 * License for the specific language 
    governing permissions and limitations
    -018 * under the License.
    -019 */
    -020
    -021package 
    org.apache.hadoop.hbase.io.hfile;
    -022
    -023import java.io.IOException;
    -024import java.net.InetSocketAddress;
    -025import java.nio.ByteBuffer;
    -026import java.util.ArrayList;
    -027import java.util.Iterator;
    -028import java.util.List;
    -029import 
    java.util.NoSuchElementException;
    -030import 
    java.util.concurrent.ExecutionException;
    -031
    -032import 
    org.apache.hadoop.conf.Configuration;
    -033import 
    org.apache.hadoop.hbase.HConstants;
    -034import 
    org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType;
    -035import 
    org.apache.hadoop.hbase.nio.ByteBuff;
    -036import 
    org.apache.hadoop.hbase.nio.SingleByteBuff;
    -037import 
    org.apache.hadoop.hbase.trace.TraceUtil;
    -038import 
    org.apache.hadoop.hbase.util.Addressing;
    -039import 
    org.apache.htrace.core.TraceScope;
    -040import 
    org.apache.yetus.audience.InterfaceAudience;
    -041import org.slf4j.Logger;
    -042import org.slf4j.LoggerFactory;
    -043
    -044import net.spy.memcached.CachedData;
    -045import 
    net.spy.memcached.ConnectionFactoryBuilder;
    -046import net.spy.memcached.FailureMode;
    -047import 
    net.spy.memcached.MemcachedClient;
    -048import 
    net.spy.memcached.transcoders.Transcoder;
    -049
    -050/**
    -051 * Class to store blocks into 
    memcached.
    -052 * This should only be used on a cluster 
    of Memcached daemons that are tuned well and have a
    -053 * good network connection to the HBase 
    regionservers. Any other use will likely slow down HBase
    -054 * greatly.
    -055 */
    -056@InterfaceAudience.Private
    -057public class MemcachedBlockCache 
    implements BlockCache {
    -058  private static final Logger LOG = 
    LoggerFactory.getLogger(MemcachedBlockCache.class.getName());
    -059
    -060  // Some memcache versions won't take 
    more than 1024 * 1024. So set the limit below
    -061  // that just in case this client is 
    used with those versions.
    -062  public static final int MAX_SIZE = 1020 
    * 1024;
    -063
    -064  // Config key for what memcached 
    servers to use.
    -065  // They should be specified in a comma 
    sperated list with ports.
    -066  // like:
    -067  //
    -068  // host1:11211,host3:8080,host4:11211
    -069  public static final String 
    MEMCACHED_CONFIG_KEY = "hbase.cache.memcached.servers";
    -070  public static final String 
    MEMCACHED_TIMEOUT_KEY = "hbase.cache.memcached.timeout";
    -071  public static final String 
    MEMCACHED_OPTIMEOUT_KEY = "hbase.cache.memcached.optimeout";
    -072  public static final String 
    MEMCACHED_OPTIMIZE_KEY = "hbase.cache.memcached.spy.optimze";
    -073  public static final long 
    MEMCACHED_DEFAULT_TIMEOUT = 500;
    -074  public static final boolean 
    MEMCACHED_OPTIMIZE_DEFAULT = false;
    -075
    -076  private final MemcachedClient client;
    -077  private final HFileBlockTranscoder tc = 
    new HFileBlockTranscoder();
    -078  private final CacheStats cacheStats = 
    new CacheStats("MemcachedBlockCache");
    -079
    -080  public 
    MemcachedBlockCache(Configuration c) throws IOException {
    -081LOG.info("Creating 
    MemcachedBlockCache");
    -082
    -083long opTimeout = 
    c.getLong(MEMCACHED_OPTIMEOUT_KEY, MEMCACHED_DEFAULT_TIMEOUT);
    -084long queueTimeout = 
    c.getLong(MEMCACHED_TIMEOUT_KEY, opTimeout + MEMCACHED_DEFAULT_TIMEOUT);
    -085boolean optimize = 
    c.getBoolean(MEMCACHED_OPTIMIZE_KEY, MEMCACHED_OPTIMIZE_DEFAULT);
    -086
    -087ConnectionFactoryBuilder builder = 
    new ConnectionFactoryBuilder()
    -088.setOpTimeout(opTimeout)
    -089
    .setOpQueueMaxBlockTime(queueTimeout) // Cap the max time before anything times 
    out
    -090
    .setFailureMode(FailureMode.Redistribute)
    -091  

    [06/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/mapreduce/MutationSerialization.MutationSerializer.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/mapreduce/MutationSerialization.MutationSerializer.html
     
    b/devapidocs/org/apache/hadoop/hbase/mapreduce/MutationSerialization.MutationSerializer.html
    index b782b93..13ffdaf 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/mapreduce/MutationSerialization.MutationSerializer.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/mapreduce/MutationSerialization.MutationSerializer.html
    @@ -361,6 +361,6 @@ implements org.apache.hadoop.io.serializer.SerializerCopyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
    reserved.
    +Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
    reserved.
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/mapreduce/MutationSerialization.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/mapreduce/MutationSerialization.html 
    b/devapidocs/org/apache/hadoop/hbase/mapreduce/MutationSerialization.html
    index 852e6c7..b887231 100644
    --- a/devapidocs/org/apache/hadoop/hbase/mapreduce/MutationSerialization.html
    +++ b/devapidocs/org/apache/hadoop/hbase/mapreduce/MutationSerialization.html
    @@ -334,6 +334,6 @@ implements 
    org.apache.hadoop.io.serializer.SerializationCopyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
    reserved.
    +Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
    reserved.
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/mapreduce/PutCombiner.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/mapreduce/PutCombiner.html 
    b/devapidocs/org/apache/hadoop/hbase/mapreduce/PutCombiner.html
    index 5ce4d64..1203975 100644
    --- a/devapidocs/org/apache/hadoop/hbase/mapreduce/PutCombiner.html
    +++ b/devapidocs/org/apache/hadoop/hbase/mapreduce/PutCombiner.html
    @@ -352,6 +352,6 @@ extends org.apache.hadoop.mapreduce.ReducerK,Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
    reserved.
    +Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
    reserved.
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/mapreduce/PutSortReducer.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/mapreduce/PutSortReducer.html 
    b/devapidocs/org/apache/hadoop/hbase/mapreduce/PutSortReducer.html
    index f8ef872..94fb4f8 100644
    --- a/devapidocs/org/apache/hadoop/hbase/mapreduce/PutSortReducer.html
    +++ b/devapidocs/org/apache/hadoop/hbase/mapreduce/PutSortReducer.html
    @@ -375,6 +375,6 @@ extends org.apache.hadoop.mapreduce.ReducerCopyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
    reserved.
    +Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
    reserved.
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/mapreduce/RegionSizeCalculator.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/mapreduce/RegionSizeCalculator.html 
    b/devapidocs/org/apache/hadoop/hbase/mapreduce/RegionSizeCalculator.html
    index 19c9963..537e12b 100644
    --- a/devapidocs/org/apache/hadoop/hbase/mapreduce/RegionSizeCalculator.html
    +++ b/devapidocs/org/apache/hadoop/hbase/mapreduce/RegionSizeCalculator.html
    @@ -433,6 +433,6 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     
    -Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
    reserved.
    +Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
    reserved.
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/mapreduce/ResultSerialization.Result94Deserializer.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/mapreduce/ResultSerialization.Result94Deserializer.html
     
    b/devapidocs/org/apache/hadoop/hbase/mapreduce/ResultSerialization.Result94Deserializer.html
    index c69ba2c..895fd45 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/mapreduce/ResultSerialization.Result94Deserializer.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/mapreduce/ResultSerialization.Result94Deserializer.html
    @@ -386,6 +386,6 @@ implements 
    org.apache.hadoop.io.serializer.DeserializerCopyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
    

    [06/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.MyClientBackoffPolicy.html
    --
    diff --git 
    a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.MyClientBackoffPolicy.html
     
    b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.MyClientBackoffPolicy.html
    index bbd91b8..4f76302 100644
    --- 
    a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.MyClientBackoffPolicy.html
    +++ 
    b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.MyClientBackoffPolicy.html
    @@ -56,1641 +56,1753 @@
     048import 
    java.util.concurrent.atomic.AtomicBoolean;
     049import 
    java.util.concurrent.atomic.AtomicInteger;
     050import 
    java.util.concurrent.atomic.AtomicLong;
    -051
    -052import 
    org.apache.hadoop.conf.Configuration;
    -053import 
    org.apache.hadoop.hbase.CallQueueTooBigException;
    -054import 
    org.apache.hadoop.hbase.CategoryBasedTimeout;
    -055import org.apache.hadoop.hbase.Cell;
    -056import 
    org.apache.hadoop.hbase.HConstants;
    -057import 
    org.apache.hadoop.hbase.HRegionInfo;
    -058import 
    org.apache.hadoop.hbase.HRegionLocation;
    -059import 
    org.apache.hadoop.hbase.RegionLocations;
    -060import 
    org.apache.hadoop.hbase.ServerName;
    -061import 
    org.apache.hadoop.hbase.TableName;
    -062import 
    org.apache.hadoop.hbase.client.AsyncProcessTask.ListRowAccess;
    -063import 
    org.apache.hadoop.hbase.client.AsyncProcessTask.SubmittedRows;
    -064import 
    org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicy;
    -065import 
    org.apache.hadoop.hbase.client.backoff.ServerStatistics;
    -066import 
    org.apache.hadoop.hbase.client.coprocessor.Batch;
    -067import 
    org.apache.hadoop.hbase.ipc.RpcControllerFactory;
    -068import 
    org.apache.hadoop.hbase.testclassification.ClientTests;
    -069import 
    org.apache.hadoop.hbase.testclassification.MediumTests;
    -070import 
    org.apache.hadoop.hbase.util.Bytes;
    -071import 
    org.apache.hadoop.hbase.util.Threads;
    -072import org.junit.Assert;
    -073import org.junit.BeforeClass;
    -074import org.junit.Ignore;
    -075import org.junit.Rule;
    -076import org.junit.Test;
    -077import 
    org.junit.experimental.categories.Category;
    -078import org.junit.rules.TestRule;
    -079import org.mockito.Mockito;
    -080import org.slf4j.Logger;
    -081import org.slf4j.LoggerFactory;
    -082
    -083@Category({ClientTests.class, 
    MediumTests.class})
    -084public class TestAsyncProcess {
    -085  @Rule public final TestRule timeout = 
    CategoryBasedTimeout.builder().withTimeout(this.getClass()).
    -086  
    withLookingForStuckThread(true).build();
    -087  private static final Logger LOG = 
    LoggerFactory.getLogger(TestAsyncProcess.class);
    -088  private static final TableName 
    DUMMY_TABLE =
    -089  TableName.valueOf("DUMMY_TABLE");
    -090  private static final byte[] 
    DUMMY_BYTES_1 = Bytes.toBytes("DUMMY_BYTES_1");
    -091  private static final byte[] 
    DUMMY_BYTES_2 = Bytes.toBytes("DUMMY_BYTES_2");
    -092  private static final byte[] 
    DUMMY_BYTES_3 = Bytes.toBytes("DUMMY_BYTES_3");
    -093  private static final byte[] FAILS = 
    Bytes.toBytes("FAILS");
    -094  private static final Configuration CONF 
    = new Configuration();
    -095  private static final 
    ConnectionConfiguration CONNECTION_CONFIG =
    -096  new 
    ConnectionConfiguration(CONF);
    -097  private static final ServerName sn = 
    ServerName.valueOf("s1,1,1");
    -098  private static final ServerName sn2 = 
    ServerName.valueOf("s2,2,2");
    -099  private static final ServerName sn3 = 
    ServerName.valueOf("s3,3,3");
    -100  private static final HRegionInfo hri1 
    =
    -101  new HRegionInfo(DUMMY_TABLE, 
    DUMMY_BYTES_1, DUMMY_BYTES_2, false, 1);
    -102  private static final HRegionInfo hri2 
    =
    -103  new HRegionInfo(DUMMY_TABLE, 
    DUMMY_BYTES_2, HConstants.EMPTY_END_ROW, false, 2);
    -104  private static final HRegionInfo hri3 
    =
    -105  new HRegionInfo(DUMMY_TABLE, 
    DUMMY_BYTES_3, HConstants.EMPTY_END_ROW, false, 3);
    -106  private static final HRegionLocation 
    loc1 = new HRegionLocation(hri1, sn);
    -107  private static final HRegionLocation 
    loc2 = new HRegionLocation(hri2, sn);
    -108  private static final HRegionLocation 
    loc3 = new HRegionLocation(hri3, sn2);
    -109
    -110  // Replica stuff
    -111  private static final RegionInfo hri1r1 
    = RegionReplicaUtil.getRegionInfoForReplica(hri1, 1);
    -112  private static final RegionInfo hri1r2 
    = RegionReplicaUtil.getRegionInfoForReplica(hri1, 2);
    -113  private static final RegionInfo hri2r1 
    = RegionReplicaUtil.getRegionInfoForReplica(hri2, 1);
    -114  private static final RegionLocations 
    hrls1 = new RegionLocations(new HRegionLocation(hri1, sn),
    -115  new HRegionLocation(hri1r1, sn2), 
    new HRegionLocation(hri1r2, sn3));
    -116  private static final RegionLocations 
    hrls2 = new RegionLocations(new HRegionLocation(hri2, sn2),
    -117  new HRegionLocation(hri2r1, 
    sn3));
    -118  private static final RegionLocations 
    hrls3 =
    -119  new RegionLocations(new 
    HRegionLocation(hri3, sn3), null);
    -120
    -121  

    [06/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/63d6f712/devapidocs/org/apache/hadoop/hbase/regionserver/CompactingMemStore.IndexType.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/regionserver/CompactingMemStore.IndexType.html
     
    b/devapidocs/org/apache/hadoop/hbase/regionserver/CompactingMemStore.IndexType.html
    index 411b5f0..4379dfd 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/regionserver/CompactingMemStore.IndexType.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/regionserver/CompactingMemStore.IndexType.html
    @@ -122,7 +122,7 @@ var activeTableTab = "activeTableTab";
     
     
     
    -public static enum CompactingMemStore.IndexType
    +public static enum CompactingMemStore.IndexType
     extends http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
     title="class or interface in java.lang">EnumCompactingMemStore.IndexType
     Types of indexes (part of immutable segments) to be used 
    after flattening,
      compaction, or merge are applied.
    @@ -215,7 +215,7 @@ the order they are declared.
     
     
     CSLM_MAP
    -public static finalCompactingMemStore.IndexType CSLM_MAP
    +public static finalCompactingMemStore.IndexType CSLM_MAP
     
     
     
    @@ -224,7 +224,7 @@ the order they are declared.
     
     
     ARRAY_MAP
    -public static finalCompactingMemStore.IndexType ARRAY_MAP
    +public static finalCompactingMemStore.IndexType ARRAY_MAP
     
     
     
    @@ -233,7 +233,7 @@ the order they are declared.
     
     
     CHUNK_MAP
    -public static finalCompactingMemStore.IndexType CHUNK_MAP
    +public static finalCompactingMemStore.IndexType CHUNK_MAP
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/63d6f712/devapidocs/org/apache/hadoop/hbase/regionserver/CompactingMemStore.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/regionserver/CompactingMemStore.html 
    b/devapidocs/org/apache/hadoop/hbase/regionserver/CompactingMemStore.html
    index 66ce1a3..718f4d8 100644
    --- a/devapidocs/org/apache/hadoop/hbase/regionserver/CompactingMemStore.html
    +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/CompactingMemStore.html
    @@ -182,14 +182,6 @@ extends 
     static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String
    -COMPACTING_MEMSTORE_INDEX_DEFAULT
    -
    -
    -static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String
    -COMPACTING_MEMSTORE_INDEX_KEY
    -
    -
    -static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String
     COMPACTING_MEMSTORE_TYPE_DEFAULT
     
     
    @@ -427,8 +419,8 @@ extends setCompositeSnapshot(booleanuseCompositeSnapshot)
     
     
    -void
    -setIndexType()
    +(package private) void
    +setIndexType(CompactingMemStore.IndexTypetype)
     
     
     protected boolean
    @@ -531,35 +523,13 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String COMPACTING_MEMSTORE_TYPE_DEFAULT
     
     
    -
    -
    -
    -
    -
    -COMPACTING_MEMSTORE_INDEX_KEY
    -public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String COMPACTING_MEMSTORE_INDEX_KEY
    -
    -See Also:
    -Constant
     Field Values
    -
    -
    -
    -
    -
    -
    -
    -
    -COMPACTING_MEMSTORE_INDEX_DEFAULT
    -public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String COMPACTING_MEMSTORE_INDEX_DEFAULT
    -
    -
     
     
     
     
     
     IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY
    -public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY
    +public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY
     
     See Also:
     Constant
     Field Values
    @@ -572,7 +542,7 @@ extends 
     
     IN_MEMORY_FLUSH_THRESHOLD_FACTOR_DEFAULT
    -private static finaldouble IN_MEMORY_FLUSH_THRESHOLD_FACTOR_DEFAULT
    +private static finaldouble IN_MEMORY_FLUSH_THRESHOLD_FACTOR_DEFAULT
     
     See Also:
     Constant
     Field Values
    @@ -585,7 +555,7 @@ extends 
     
     LOG
    -private static finalorg.slf4j.Logger LOG
    +private static finalorg.slf4j.Logger LOG
     
     
     
    @@ -594,7 +564,7 @@ extends 
     
     store
    -privateHStore store
    +privateHStore store
     
     
     
    @@ -603,7 +573,7 @@ extends 
     
     regionServices
    -privateRegionServicesForStores regionServices
    +privateRegionServicesForStores regionServices
     
     
     
    @@ -612,7 +582,7 @@ extends 
     
     pipeline
    -privateCompactionPipeline 
    pipeline
    +privateCompactionPipeline 
    pipeline
     
     
     
    @@ -621,7 +591,7 @@ extends 
     
     compactor
    -protectedMemStoreCompactor compactor
    +protectedMemStoreCompactor compactor
     
     
     
    @@ -630,7 +600,7 @@ extends 
     
     inmemoryFlushSize
    -privatelong inmemoryFlushSize
    

    [06/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d449e87f/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.html 
    b/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.html
    index 8301da7..3b1f09e 100644
    --- a/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.html
    +++ b/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.html
    @@ -114,7 +114,7 @@ var activeTableTab = "activeTableTab";
     
     
     @InterfaceAudience.Private
    -public class BucketCache
    +public class BucketCache
     extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
     title="class or interface in java.lang">Object
     implements BlockCache, HeapSize
     BucketCache uses BucketAllocator to 
    allocate/free blocks, and uses
    @@ -781,7 +781,7 @@ implements 
     
     LOG
    -private static finalorg.slf4j.Logger LOG
    +private static finalorg.slf4j.Logger LOG
     
     
     
    @@ -790,7 +790,7 @@ implements 
     
     SINGLE_FACTOR_CONFIG_NAME
    -static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String SINGLE_FACTOR_CONFIG_NAME
    +static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String SINGLE_FACTOR_CONFIG_NAME
     Priority buckets config
     
     See Also:
    @@ -804,7 +804,7 @@ implements 
     
     MULTI_FACTOR_CONFIG_NAME
    -static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String MULTI_FACTOR_CONFIG_NAME
    +static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String MULTI_FACTOR_CONFIG_NAME
     
     See Also:
     Constant
     Field Values
    @@ -817,7 +817,7 @@ implements 
     
     MEMORY_FACTOR_CONFIG_NAME
    -static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String MEMORY_FACTOR_CONFIG_NAME
    +static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String MEMORY_FACTOR_CONFIG_NAME
     
     See Also:
     Constant
     Field Values
    @@ -830,7 +830,7 @@ implements 
     
     EXTRA_FREE_FACTOR_CONFIG_NAME
    -static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String EXTRA_FREE_FACTOR_CONFIG_NAME
    +static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String EXTRA_FREE_FACTOR_CONFIG_NAME
     
     See Also:
     Constant
     Field Values
    @@ -843,7 +843,7 @@ implements 
     
     ACCEPT_FACTOR_CONFIG_NAME
    -static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String ACCEPT_FACTOR_CONFIG_NAME
    +static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String ACCEPT_FACTOR_CONFIG_NAME
     
     See Also:
     Constant
     Field Values
    @@ -856,7 +856,7 @@ implements 
     
     MIN_FACTOR_CONFIG_NAME
    -static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String MIN_FACTOR_CONFIG_NAME
    +static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">String MIN_FACTOR_CONFIG_NAME
     
     See Also:
     Constant
     Field Values
    @@ -869,7 +869,7 @@ implements 
     
     DEFAULT_SINGLE_FACTOR
    -static finalfloat DEFAULT_SINGLE_FACTOR
    +static finalfloat DEFAULT_SINGLE_FACTOR
     Priority buckets
     
     See Also:
    @@ -883,7 +883,7 @@ implements 
     
     DEFAULT_MULTI_FACTOR
    -static finalfloat DEFAULT_MULTI_FACTOR
    +static finalfloat DEFAULT_MULTI_FACTOR
     
     See Also:
     Constant
     Field Values
    @@ -896,7 +896,7 @@ implements 
     
     DEFAULT_MEMORY_FACTOR
    -static finalfloat DEFAULT_MEMORY_FACTOR
    +static finalfloat DEFAULT_MEMORY_FACTOR
     
     See Also:
     Constant
     Field Values
    @@ -909,7 +909,7 @@ implements 
     
     DEFAULT_MIN_FACTOR
    -static finalfloat DEFAULT_MIN_FACTOR
    +static finalfloat DEFAULT_MIN_FACTOR
     
     See Also:
     Constant
     Field Values
    @@ -922,7 +922,7 @@ implements 
     
     DEFAULT_EXTRA_FREE_FACTOR
    -private static finalfloat DEFAULT_EXTRA_FREE_FACTOR
    +private static finalfloat DEFAULT_EXTRA_FREE_FACTOR
     
     See Also:
     Constant
     Field Values
    @@ -935,7 +935,7 @@ implements 
     
     DEFAULT_ACCEPT_FACTOR
    -private static finalfloat DEFAULT_ACCEPT_FACTOR
    +private static finalfloat DEFAULT_ACCEPT_FACTOR
     
     See Also:
     Constant
     Field Values
    @@ -948,7 +948,7 @@ implements 
     
     DEFAULT_FREE_ENTIRE_BLOCK_FACTOR
    -private static finalint DEFAULT_FREE_ENTIRE_BLOCK_FACTOR
    +private static finalint DEFAULT_FREE_ENTIRE_BLOCK_FACTOR
     
     See Also:
     Constant
     Field Values
    @@ -961,7 +961,7 @@ implements 
     
     

    [06/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d2b28a1a/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowColTSByteBufferCell.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowColTSByteBufferCell.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowColTSByteBufferCell.html
    index 3400507..2baa140 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowColTSByteBufferCell.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.FirstOnRowColTSByteBufferCell.html
    @@ -28,3034 +28,2926 @@
     020import static 
    org.apache.hadoop.hbase.HConstants.EMPTY_BYTE_ARRAY;
     021import static 
    org.apache.hadoop.hbase.Tag.TAG_LENGTH_SIZE;
     022
    -023import 
    com.google.common.annotations.VisibleForTesting;
    -024
    -025import java.io.DataOutput;
    -026import java.io.DataOutputStream;
    -027import java.io.IOException;
    -028import java.io.OutputStream;
    -029import java.math.BigDecimal;
    -030import java.nio.ByteBuffer;
    -031import java.util.ArrayList;
    -032import java.util.Iterator;
    -033import java.util.List;
    -034import java.util.Optional;
    -035
    -036import 
    org.apache.hadoop.hbase.KeyValue.Type;
    -037import 
    org.apache.hadoop.hbase.filter.ByteArrayComparable;
    -038import 
    org.apache.hadoop.hbase.io.HeapSize;
    -039import 
    org.apache.hadoop.hbase.io.TagCompressionContext;
    -040import 
    org.apache.hadoop.hbase.io.util.Dictionary;
    -041import 
    org.apache.hadoop.hbase.io.util.StreamUtils;
    -042import 
    org.apache.hadoop.hbase.util.ByteBufferUtils;
    -043import 
    org.apache.hadoop.hbase.util.ByteRange;
    -044import 
    org.apache.hadoop.hbase.util.Bytes;
    -045import 
    org.apache.hadoop.hbase.util.ClassSize;
    -046import 
    org.apache.yetus.audience.InterfaceAudience;
    -047
    -048
    -049/**
    -050 * Utility methods helpful slinging 
    {@link Cell} instances. It has more powerful and
    -051 * rich set of APIs than those in {@link 
    CellUtil} for internal usage.
    -052 */
    -053@InterfaceAudience.Private
    -054public final class PrivateCellUtil {
    -055
    -056  /**
    -057   * Private constructor to keep this 
    class from being instantiated.
    -058   */
    -059  private PrivateCellUtil() {
    -060  }
    +023import java.io.DataOutput;
    +024import java.io.DataOutputStream;
    +025import java.io.IOException;
    +026import java.io.OutputStream;
    +027import java.math.BigDecimal;
    +028import java.nio.ByteBuffer;
    +029import java.util.ArrayList;
    +030import java.util.Iterator;
    +031import java.util.List;
    +032import java.util.Optional;
    +033import 
    org.apache.hadoop.hbase.KeyValue.Type;
    +034import 
    org.apache.hadoop.hbase.filter.ByteArrayComparable;
    +035import 
    org.apache.hadoop.hbase.io.HeapSize;
    +036import 
    org.apache.hadoop.hbase.io.TagCompressionContext;
    +037import 
    org.apache.hadoop.hbase.io.util.Dictionary;
    +038import 
    org.apache.hadoop.hbase.io.util.StreamUtils;
    +039import 
    org.apache.hadoop.hbase.util.ByteBufferUtils;
    +040import 
    org.apache.hadoop.hbase.util.ByteRange;
    +041import 
    org.apache.hadoop.hbase.util.Bytes;
    +042import 
    org.apache.hadoop.hbase.util.ClassSize;
    +043import 
    org.apache.yetus.audience.InterfaceAudience;
    +044
    +045import 
    org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
    +046
    +047/**
    +048 * Utility methods helpful slinging 
    {@link Cell} instances. It has more powerful and
    +049 * rich set of APIs than those in {@link 
    CellUtil} for internal usage.
    +050 */
    +051@InterfaceAudience.Private
    +052public final class PrivateCellUtil {
    +053
    +054  /**
    +055   * Private constructor to keep this 
    class from being instantiated.
    +056   */
    +057  private PrivateCellUtil() {
    +058  }
    +059
    +060  /*** ByteRange 
    ***/
     061
    -062  /*** ByteRange 
    ***/
    -063
    -064  public static ByteRange 
    fillRowRange(Cell cell, ByteRange range) {
    -065return range.set(cell.getRowArray(), 
    cell.getRowOffset(), cell.getRowLength());
    -066  }
    -067
    -068  public static ByteRange 
    fillFamilyRange(Cell cell, ByteRange range) {
    -069return 
    range.set(cell.getFamilyArray(), cell.getFamilyOffset(), 
    cell.getFamilyLength());
    -070  }
    -071
    -072  public static ByteRange 
    fillQualifierRange(Cell cell, ByteRange range) {
    -073return 
    range.set(cell.getQualifierArray(), cell.getQualifierOffset(),
    -074  cell.getQualifierLength());
    -075  }
    -076
    -077  public static ByteRange 
    fillValueRange(Cell cell, ByteRange range) {
    -078return 
    range.set(cell.getValueArray(), cell.getValueOffset(), 
    cell.getValueLength());
    -079  }
    -080
    -081  public static ByteRange 
    fillTagRange(Cell cell, ByteRange range) {
    -082return range.set(cell.getTagsArray(), 
    cell.getTagsOffset(), cell.getTagsLength());
    -083  }
    +062  public static ByteRange 
    fillRowRange(Cell cell, ByteRange range) {
    +063return range.set(cell.getRowArray(), 
    cell.getRowOffset(), cell.getRowLength());
    +064  }
    +065
    +066  public static ByteRange 
    fillFamilyRange(Cell cell, ByteRange 

    [06/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.BalanceSwitchMode.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.BalanceSwitchMode.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.BalanceSwitchMode.html
    index 07b6abe..f51c693 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.BalanceSwitchMode.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.BalanceSwitchMode.html
    @@ -78,2190 +78,2184 @@
     070import 
    org.apache.hadoop.hbase.procedure2.LockType;
     071import 
    org.apache.hadoop.hbase.procedure2.LockedResource;
     072import 
    org.apache.hadoop.hbase.procedure2.Procedure;
    -073import 
    org.apache.hadoop.hbase.procedure2.ProcedureUtil;
    -074import 
    org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.AccessControlService;
    -075import 
    org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsService;
    -076import 
    org.apache.hadoop.hbase.quotas.MasterQuotaManager;
    -077import 
    org.apache.hadoop.hbase.quotas.QuotaObserverChore;
    -078import 
    org.apache.hadoop.hbase.quotas.QuotaUtil;
    -079import 
    org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot;
    -080import 
    org.apache.hadoop.hbase.regionserver.RSRpcServices;
    -081import 
    org.apache.hadoop.hbase.regionserver.RpcSchedulerFactory;
    -082import 
    org.apache.hadoop.hbase.replication.ReplicationException;
    -083import 
    org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
    -084import 
    org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
    -085import 
    org.apache.hadoop.hbase.security.User;
    -086import 
    org.apache.hadoop.hbase.security.access.AccessController;
    -087import 
    org.apache.hadoop.hbase.security.visibility.VisibilityController;
    -088import 
    org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
    -089import 
    org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
    -090import 
    org.apache.hadoop.hbase.util.Bytes;
    -091import 
    org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
    -092import 
    org.apache.hadoop.hbase.util.ForeignExceptionUtil;
    -093import 
    org.apache.hadoop.hbase.util.Pair;
    -094import 
    org.apache.yetus.audience.InterfaceAudience;
    -095import 
    org.apache.zookeeper.KeeperException;
    -096import org.slf4j.Logger;
    -097import org.slf4j.LoggerFactory;
    -098
    -099import 
    org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController;
    -100import 
    org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
    -101import 
    org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
    -102import 
    org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
    -103import 
    org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
    -104import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
    -105import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
    -106import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
    -107import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
    -108import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
    -109import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos;
    -110import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds;
    -111import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
    -112import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameStringPair;
    -113import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
    -114import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
    -115import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockHeartbeatRequest;
    -116import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockHeartbeatResponse;
    -117import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockRequest;
    -118import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockResponse;
    -119import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockService;
    -120import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos;
    -121import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
    -122import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
    -123import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
    -124import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
    -125import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
    -126import 
    

    [06/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7c0589c0/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.html
    --
    diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.html 
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.html
    index 6fecbc9..2accda0 100644
    --- a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.html
    +++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.html
    @@ -34,4140 +34,4141 @@
     026import 
    java.nio.charset.StandardCharsets;
     027import java.util.ArrayList;
     028import java.util.Arrays;
    -029import java.util.Collection;
    -030import java.util.EnumSet;
    -031import java.util.HashMap;
    -032import java.util.Iterator;
    -033import java.util.LinkedList;
    -034import java.util.List;
    -035import java.util.Map;
    -036import java.util.Set;
    -037import java.util.concurrent.Callable;
    -038import 
    java.util.concurrent.ExecutionException;
    -039import java.util.concurrent.Future;
    -040import java.util.concurrent.TimeUnit;
    -041import 
    java.util.concurrent.TimeoutException;
    -042import 
    java.util.concurrent.atomic.AtomicInteger;
    -043import 
    java.util.concurrent.atomic.AtomicReference;
    -044import java.util.regex.Pattern;
    -045import java.util.stream.Collectors;
    -046import java.util.stream.Stream;
    -047import 
    org.apache.hadoop.conf.Configuration;
    -048import 
    org.apache.hadoop.hbase.Abortable;
    -049import 
    org.apache.hadoop.hbase.CacheEvictionStats;
    -050import 
    org.apache.hadoop.hbase.CacheEvictionStatsBuilder;
    -051import 
    org.apache.hadoop.hbase.ClusterMetrics.Option;
    -052import 
    org.apache.hadoop.hbase.ClusterStatus;
    -053import 
    org.apache.hadoop.hbase.DoNotRetryIOException;
    -054import 
    org.apache.hadoop.hbase.HBaseConfiguration;
    -055import 
    org.apache.hadoop.hbase.HConstants;
    -056import 
    org.apache.hadoop.hbase.HRegionInfo;
    -057import 
    org.apache.hadoop.hbase.HRegionLocation;
    -058import 
    org.apache.hadoop.hbase.HTableDescriptor;
    -059import 
    org.apache.hadoop.hbase.MasterNotRunningException;
    -060import 
    org.apache.hadoop.hbase.MetaTableAccessor;
    -061import 
    org.apache.hadoop.hbase.NamespaceDescriptor;
    -062import 
    org.apache.hadoop.hbase.NamespaceNotFoundException;
    -063import 
    org.apache.hadoop.hbase.NotServingRegionException;
    -064import 
    org.apache.hadoop.hbase.RegionLoad;
    -065import 
    org.apache.hadoop.hbase.RegionLocations;
    -066import 
    org.apache.hadoop.hbase.ServerName;
    -067import 
    org.apache.hadoop.hbase.TableExistsException;
    -068import 
    org.apache.hadoop.hbase.TableName;
    -069import 
    org.apache.hadoop.hbase.TableNotDisabledException;
    -070import 
    org.apache.hadoop.hbase.TableNotFoundException;
    -071import 
    org.apache.hadoop.hbase.UnknownRegionException;
    -072import 
    org.apache.hadoop.hbase.ZooKeeperConnectionException;
    -073import 
    org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil;
    -074import 
    org.apache.hadoop.hbase.client.replication.TableCFs;
    -075import 
    org.apache.hadoop.hbase.client.security.SecurityCapability;
    -076import 
    org.apache.hadoop.hbase.exceptions.TimeoutIOException;
    -077import 
    org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
    -078import 
    org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
    -079import 
    org.apache.hadoop.hbase.ipc.HBaseRpcController;
    -080import 
    org.apache.hadoop.hbase.ipc.RpcControllerFactory;
    -081import 
    org.apache.hadoop.hbase.quotas.QuotaFilter;
    -082import 
    org.apache.hadoop.hbase.quotas.QuotaRetriever;
    -083import 
    org.apache.hadoop.hbase.quotas.QuotaSettings;
    -084import 
    org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
    -085import 
    org.apache.hadoop.hbase.replication.ReplicationException;
    -086import 
    org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
    -087import 
    org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
    -088import 
    org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
    -089import 
    org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
    -090import 
    org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
    -091import 
    org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
    -092import 
    org.apache.hadoop.hbase.snapshot.UnknownSnapshotException;
    -093import 
    org.apache.hadoop.hbase.util.Addressing;
    -094import 
    org.apache.hadoop.hbase.util.Bytes;
    -095import 
    org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
    -096import 
    org.apache.hadoop.hbase.util.ForeignExceptionUtil;
    -097import 
    org.apache.hadoop.hbase.util.Pair;
    -098import 
    org.apache.hadoop.ipc.RemoteException;
    -099import 
    org.apache.hadoop.util.StringUtils;
    -100import 
    org.apache.yetus.audience.InterfaceAudience;
    -101import 
    org.apache.yetus.audience.InterfaceStability;
    -102import org.slf4j.Logger;
    -103import org.slf4j.LoggerFactory;
    -104
    -105import 
    org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
    -106import 
    org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
    -107import 
    org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
    -108import 
    

    [06/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c4b2cc17/devapidocs/org/apache/hadoop/hbase/CompareOperator.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/CompareOperator.html 
    b/devapidocs/org/apache/hadoop/hbase/CompareOperator.html
    index c3b2946..18a2ddb 100644
    --- a/devapidocs/org/apache/hadoop/hbase/CompareOperator.html
    +++ b/devapidocs/org/apache/hadoop/hbase/CompareOperator.html
    @@ -49,7 +49,7 @@ var activeTableTab = "activeTableTab";
     
     
     
    -PrevClass
    +PrevClass
     NextClass
     
     
    @@ -383,7 +383,7 @@ not permitted.)
     
     
     
    -PrevClass
    +PrevClass
     NextClass
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c4b2cc17/devapidocs/org/apache/hadoop/hbase/KeepDeletedCells.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/KeepDeletedCells.html 
    b/devapidocs/org/apache/hadoop/hbase/KeepDeletedCells.html
    index 426945c..39b705c 100644
    --- a/devapidocs/org/apache/hadoop/hbase/KeepDeletedCells.html
    +++ b/devapidocs/org/apache/hadoop/hbase/KeepDeletedCells.html
    @@ -266,7 +266,7 @@ the order they are declared.
     
     
     values
    -public staticKeepDeletedCells[]values()
    +public staticKeepDeletedCells[]values()
     Returns an array containing the constants of this enum 
    type, in
     the order they are declared.  This method may be used to iterate
     over the constants as follows:
    @@ -286,7 +286,7 @@ for (KeepDeletedCells c : KeepDeletedCells.values())
     
     
     valueOf
    -public staticKeepDeletedCellsvalueOf(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">Stringname)
    +public staticKeepDeletedCellsvalueOf(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">Stringname)
     Returns the enum constant of this type with the specified 
    name.
     The string must match exactly an identifier used to declare an
     enum constant in this type.  (Extraneous whitespace characters are 
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c4b2cc17/devapidocs/org/apache/hadoop/hbase/MemoryCompactionPolicy.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/MemoryCompactionPolicy.html 
    b/devapidocs/org/apache/hadoop/hbase/MemoryCompactionPolicy.html
    index 73a32d0..62cca66 100644
    --- a/devapidocs/org/apache/hadoop/hbase/MemoryCompactionPolicy.html
    +++ b/devapidocs/org/apache/hadoop/hbase/MemoryCompactionPolicy.html
    @@ -279,7 +279,7 @@ the order they are declared.
     
     
     values
    -public staticMemoryCompactionPolicy[]values()
    +public staticMemoryCompactionPolicy[]values()
     Returns an array containing the constants of this enum 
    type, in
     the order they are declared.  This method may be used to iterate
     over the constants as follows:
    @@ -299,7 +299,7 @@ for (MemoryCompactionPolicy c : 
    MemoryCompactionPolicy.values())
     
     
     valueOf
    -public staticMemoryCompactionPolicyvalueOf(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">Stringname)
    +public staticMemoryCompactionPolicyvalueOf(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">Stringname)
     Returns the enum constant of this type with the specified 
    name.
     The string must match exactly an identifier used to declare an
     enum constant in this type.  (Extraneous whitespace characters are 
    
    
    

    [06/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/505bbb2e/devapidocs/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.html 
    b/devapidocs/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.html
    index 88d66e7..3ac4b13 100644
    --- a/devapidocs/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.html
    +++ b/devapidocs/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.html
    @@ -152,7 +152,7 @@ extends Field and Description
     
     
    -private static 
    org.apache.commons.logging.Log
    +private static org.slf4j.Logger
     LOG
     
     
    @@ -252,7 +252,7 @@ extends 
     
     LOG
    -private static finalorg.apache.commons.logging.Log LOG
    +private static finalorg.slf4j.Logger LOG
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/505bbb2e/devapidocs/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.html 
    b/devapidocs/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.html
    index 559ab52..d89cf57 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.html
    @@ -162,7 +162,7 @@ extends Field and Description
     
     
    -static org.apache.commons.logging.Log
    +static org.slf4j.Logger
     LOG
     
     
    @@ -286,7 +286,7 @@ extends 
     
     LOG
    -public static finalorg.apache.commons.logging.Log LOG
    +public static finalorg.slf4j.Logger LOG
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/505bbb2e/devapidocs/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.html
     
    b/devapidocs/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.html
    index 7b88fa4..b6707c1 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.html
    @@ -153,7 +153,7 @@ extends Field and Description
     
     
    -private static 
    org.apache.commons.logging.Log
    +private static org.slf4j.Logger
     LOG
     
     
    @@ -299,7 +299,7 @@ extends 
     
     LOG
    -private static finalorg.apache.commons.logging.Log LOG
    +private static finalorg.slf4j.Logger LOG
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/505bbb2e/devapidocs/org/apache/hadoop/hbase/backup/impl/RestoreTablesClient.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/backup/impl/RestoreTablesClient.html 
    b/devapidocs/org/apache/hadoop/hbase/backup/impl/RestoreTablesClient.html
    index 1caae5c..ace49de 100644
    --- a/devapidocs/org/apache/hadoop/hbase/backup/impl/RestoreTablesClient.html
    +++ b/devapidocs/org/apache/hadoop/hbase/backup/impl/RestoreTablesClient.html
    @@ -148,7 +148,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     isOverwrite
     
     
    -private static 
    org.apache.commons.logging.Log
    +private static org.slf4j.Logger
     LOG
     
     
    @@ -263,7 +263,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     LOG
    -private static finalorg.apache.commons.logging.Log LOG
    +private static finalorg.slf4j.Logger LOG
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/505bbb2e/devapidocs/org/apache/hadoop/hbase/backup/impl/TableBackupClient.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/backup/impl/TableBackupClient.html 
    b/devapidocs/org/apache/hadoop/hbase/backup/impl/TableBackupClient.html
    index 3e28de9..0de81bc 100644
    --- a/devapidocs/org/apache/hadoop/hbase/backup/impl/TableBackupClient.html
    +++ b/devapidocs/org/apache/hadoop/hbase/backup/impl/TableBackupClient.html
    @@ -185,7 +185,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     conn
     
     
    -private static 
    org.apache.commons.logging.Log
    +private static org.slf4j.Logger
     LOG
     
     
    @@ -392,7 +392,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     LOG
    -private static finalorg.apache.commons.logging.Log LOG
    +private static finalorg.slf4j.Logger LOG
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/505bbb2e/devapidocs/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupCopyJob.BackupDistCp.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupCopyJob.BackupDistCp.html
     
    b/devapidocs/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupCopyJob.BackupDistCp.html
    index 9db6332..2488141 100644
    --- 
    

    [06/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dad9a249/devapidocs/org/apache/hadoop/hbase/util/CommonFSUtils.html
    --
    diff --git a/devapidocs/org/apache/hadoop/hbase/util/CommonFSUtils.html 
    b/devapidocs/org/apache/hadoop/hbase/util/CommonFSUtils.html
    index 82e611c..002d13f 100644
    --- a/devapidocs/org/apache/hadoop/hbase/util/CommonFSUtils.html
    +++ b/devapidocs/org/apache/hadoop/hbase/util/CommonFSUtils.html
    @@ -567,7 +567,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     warningMap
    -private static finalhttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
     title="class or interface in 
    java.util">Maporg.apache.hadoop.fs.FileSystem,http://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
     title="class or interface in java.lang">Boolean warningMap
    +private static finalhttp://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
     title="class or interface in 
    java.util">Maporg.apache.hadoop.fs.FileSystem,http://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
     title="class or interface in java.lang">Boolean warningMap
     
     
     
    @@ -987,7 +987,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     getTableDir
    -public staticorg.apache.hadoop.fs.PathgetTableDir(org.apache.hadoop.fs.Pathrootdir,
    +public staticorg.apache.hadoop.fs.PathgetTableDir(org.apache.hadoop.fs.Pathrootdir,
     TableNametableName)
     Returns the Path object representing the table 
    directory under
      path rootdir
    @@ -1006,7 +1006,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     getTableName
    -public staticTableNamegetTableName(org.apache.hadoop.fs.PathtablePath)
    +public staticTableNamegetTableName(org.apache.hadoop.fs.PathtablePath)
     Returns the TableName object representing
      the table directory under
      path rootdir
    @@ -1024,7 +1024,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     getNamespaceDir
    -public staticorg.apache.hadoop.fs.PathgetNamespaceDir(org.apache.hadoop.fs.Pathrootdir,
    +public staticorg.apache.hadoop.fs.PathgetNamespaceDir(org.apache.hadoop.fs.Pathrootdir,
     http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">Stringnamespace)
     Returns the Path object representing
      the namespace directory under path rootdir
    @@ -1043,7 +1043,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     setStoragePolicy
    -public staticvoidsetStoragePolicy(org.apache.hadoop.fs.FileSystemfs,
    +public staticvoidsetStoragePolicy(org.apache.hadoop.fs.FileSystemfs,
     
    org.apache.hadoop.conf.Configurationconf,
     org.apache.hadoop.fs.Pathpath,
     http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringpolicyKey,
    @@ -1074,7 +1074,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     setStoragePolicy
    -public staticvoidsetStoragePolicy(org.apache.hadoop.fs.FileSystemfs,
    +public staticvoidsetStoragePolicy(org.apache.hadoop.fs.FileSystemfs,
     org.apache.hadoop.fs.Pathpath,
     http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringstoragePolicy)
     Sets storage policy for given path.
    @@ -1101,7 +1101,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     invokeSetStoragePolicy
    -private staticvoidinvokeSetStoragePolicy(org.apache.hadoop.fs.FileSystemfs,
    +private staticvoidinvokeSetStoragePolicy(org.apache.hadoop.fs.FileSystemfs,
    org.apache.hadoop.fs.Pathpath,
    http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in java.lang">StringstoragePolicy)
     
    @@ -1112,7 +1112,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     isHDFS
    -public staticbooleanisHDFS(org.apache.hadoop.conf.Configurationconf)
    +public staticbooleanisHDFS(org.apache.hadoop.conf.Configurationconf)
       throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
     title="class or interface in java.io">IOException
     
     Parameters:
    @@ -1130,7 +1130,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
     
     
     isRecoveredEdits
    -public staticbooleanisRecoveredEdits(org.apache.hadoop.fs.Pathpath)
    +public staticbooleanisRecoveredEdits(org.apache.hadoop.fs.Pathpath)
     Checks if the given path is the one with 'recovered.edits' 
    dir.
     
     

    [06/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c4c0cfa5/apidocs/src-html/org/apache/hadoop/hbase/util/Base64.html
    --
    diff --git a/apidocs/src-html/org/apache/hadoop/hbase/util/Base64.html 
    b/apidocs/src-html/org/apache/hadoop/hbase/util/Base64.html
    index 6646ba1..70481ce 100644
    --- a/apidocs/src-html/org/apache/hadoop/hbase/util/Base64.html
    +++ b/apidocs/src-html/org/apache/hadoop/hbase/util/Base64.html
    @@ -43,1651 +43,1652 @@
     035import java.io.OutputStream;
     036import java.io.Serializable;
     037import 
    java.io.UnsupportedEncodingException;
    -038import java.util.zip.GZIPInputStream;
    -039import java.util.zip.GZIPOutputStream;
    -040
    -041import org.apache.commons.logging.Log;
    -042import 
    org.apache.commons.logging.LogFactory;
    -043import 
    org.apache.yetus.audience.InterfaceAudience;
    -044
    -045/**
    -046 * Encodes and decodes to and from Base64 
    notation.
    -047 *
    -048 * p
    -049 * Homepage: a 
    href="http://iharder.net/base64"http://iharder.net/base64/a;.
    -050 * /p
    -051 *
    -052 * p
    -053 * Change Log:
    -054 * /p
    -055 * ul
    -056 *   liv2.2.1 - Fixed bug using 
    URL_SAFE and ORDERED encodings. Fixed bug
    -057 * when using very small files 
    (~lt; 40 bytes)./li
    -058 *   liv2.2 - Added some helper 
    methods for encoding/decoding directly from
    -059 * one file to the next. Also added a 
    main() method to support command
    -060 * line encoding/decoding from one 
    file to the next. Also added these
    -061 * Base64 dialects:
    -062 * ol
    -063 *   liThe default is RFC3548 
    format./li
    -064 *   liUsing Base64.URLSAFE 
    generates URL and file name friendly format as
    -065 * described in Section 4 of 
    RFC3548.
    -066 * 
    http://www.faqs.org/rfcs/rfc3548.html/li;
    -067 *   liUsing Base64.ORDERED 
    generates URL and file name friendly format
    -068 * that preserves lexical 
    ordering as described in
    -069 * 
    http://www.faqs.org/qa/rfcc-1940.html/li;
    -070 * /ol
    -071 * p
    -072 * Special thanks to Jim Kellerman at 
    a href="http://www.powerset.com/";
    -073 * http://www.powerset.com//a; 
    for contributing the new Base64 dialects.
    -074 *   /li
    -075 *
    -076 *   liv2.1 - Cleaned up javadoc 
    comments and unused variables and methods.
    -077 * Added some convenience methods for 
    reading and writing to and from files.
    -078 *   /li
    -079 *   liv2.0.2 - Now specifies 
    UTF-8 encoding in places where the code fails on
    -080 * systems with other encodings (like 
    EBCDIC)./li
    -081 *   liv2.0.1 - Fixed an error 
    when decoding a single byte, that is, when the
    -082 * encoded data was a single 
    byte./li
    -083 *   liv2.0 - I got rid of 
    methods that used booleans to set options. Now
    -084 * everything is more consolidated 
    and cleaner. The code now detects when
    -085 * data that's being decoded is 
    gzip-compressed and will decompress it
    -086 * automatically. Generally things 
    are cleaner. You'll probably have to
    -087 * change some method calls that you 
    were making to support the new options
    -088 * format (ttint/tts 
    that you "OR" together)./li
    -089 *   liv1.5.1 - Fixed bug when 
    decompressing and decoding to a byte[] using
    -090 * ttdecode( String s, 
    boolean gzipCompressed )/tt. Added the ability to
    -091 * "suspend" encoding in the Output 
    Stream so you can turn on and off the
    -092 * encoding if you need to embed 
    base64 data in an otherwise "normal" stream
    -093 * (like an XML file)./li
    -094 *   liv1.5 - Output stream pases 
    on flush() command but doesn't do anything
    -095 * itself. This helps when using GZIP 
    streams. Added the ability to
    -096 * GZip-compress objects before 
    encoding them./li
    -097 *   liv1.4 - Added helper 
    methods to read/write files./li
    -098 *   liv1.3.6 - Fixed 
    OutputStream.flush() so that 'position' is reset./li
    -099 *   liv1.3.5 - Added flag to 
    turn on and off line breaks. Fixed bug in input
    -100 * stream where last buffer being 
    read, if not completely full, was not
    -101 * returned./li
    -102 *   liv1.3.4 - Fixed when 
    "improperly padded stream" error was thrown at the
    -103 * wrong time./li
    -104 *   liv1.3.3 - Fixed I/O streams 
    which were totally messed up./li
    -105 * /ul
    -106 *
    -107 * p
    -108 * I am placing this code in the Public 
    Domain. Do with it as you will. This
    -109 * software comes with no guarantees or 
    warranties but with plenty of
    -110 * well-wishing instead!
    -111 * p
    -112 * Please visit a 
    href="http://iharder.net/base64"http://iharder.net/base64/a;
    -113 * periodically to check for updates or 
    to contribute improvements.
    -114 * p
    -115 * author: Robert Harder, 
    r...@iharder.net
    -116 * br
    -117 * version: 2.2.1
    -118 */
    -119@InterfaceAudience.Public
    -120public class Base64 {
    -121
    -122  /*  P U B L I C   F I E L D S 
     */
    -123
    -124  /** No options specified. Value is 
    zero. */
    -125  public final static int NO_OPTIONS = 
    0;
    -126
    -127  /** Specify encoding. */
    -128  public final static int ENCODE = 1;
    -129
    -130  /** Specify 

    [06/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/071f974b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.CallCleanup.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.CallCleanup.html 
    b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.CallCleanup.html
    index 5e45072..34535d8 100644
    --- a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.CallCleanup.html
    +++ b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.CallCleanup.html
    @@ -50,754 +50,751 @@
     042import 
    org.apache.hadoop.hbase.CallQueueTooBigException;
     043import 
    org.apache.hadoop.hbase.CellScanner;
     044import 
    org.apache.hadoop.hbase.DoNotRetryIOException;
    -045import 
    org.apache.hadoop.hbase.HBaseInterfaceAudience;
    -046import 
    org.apache.hadoop.hbase.HConstants;
    -047import org.apache.hadoop.hbase.Server;
    -048import 
    org.apache.hadoop.hbase.conf.ConfigurationObserver;
    -049import 
    org.apache.hadoop.hbase.exceptions.RequestTooBigException;
    -050import 
    org.apache.hadoop.hbase.io.ByteBufferPool;
    -051import 
    org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler;
    -052import 
    org.apache.hadoop.hbase.monitoring.TaskMonitor;
    -053import 
    org.apache.hadoop.hbase.nio.ByteBuff;
    -054import 
    org.apache.hadoop.hbase.nio.MultiByteBuff;
    -055import 
    org.apache.hadoop.hbase.nio.SingleByteBuff;
    -056import 
    org.apache.hadoop.hbase.regionserver.RSRpcServices;
    -057import 
    org.apache.hadoop.hbase.security.SaslUtil;
    -058import 
    org.apache.hadoop.hbase.security.SaslUtil.QualityOfProtection;
    -059import 
    org.apache.hadoop.hbase.security.User;
    -060import 
    org.apache.hadoop.hbase.security.UserProvider;
    -061import 
    org.apache.hadoop.hbase.security.token.AuthenticationTokenSecretManager;
    -062import 
    org.apache.hadoop.hbase.util.Pair;
    -063import 
    org.apache.hadoop.security.UserGroupInformation;
    -064import 
    org.apache.hadoop.security.authorize.AuthorizationException;
    -065import 
    org.apache.hadoop.security.authorize.PolicyProvider;
    -066import 
    org.apache.hadoop.security.authorize.ServiceAuthorizationManager;
    -067import 
    org.apache.hadoop.security.token.SecretManager;
    -068import 
    org.apache.hadoop.security.token.TokenIdentifier;
    -069import 
    org.apache.yetus.audience.InterfaceAudience;
    -070import 
    org.apache.yetus.audience.InterfaceStability;
    -071
    -072import 
    org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
    -073import 
    org.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingService;
    -074import 
    org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.MethodDescriptor;
    -075import 
    org.apache.hadoop.hbase.shaded.com.google.protobuf.Message;
    -076import 
    org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
    -077import 
    org.apache.hadoop.hbase.shaded.com.google.protobuf.TextFormat;
    -078import 
    org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
    -079import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
    -080import 
    org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ConnectionHeader;
    -081
    -082/**
    -083 * An RPC server that hosts protobuf 
    described Services.
    -084 *
    -085 */
    -086@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC,
     HBaseInterfaceAudience.PHOENIX})
    -087@InterfaceStability.Evolving
    -088public abstract class RpcServer 
    implements RpcServerInterface,
    -089ConfigurationObserver {
    -090  // LOG is being used in CallRunner and 
    the log level is being changed in tests
    -091  public static final Log LOG = 
    LogFactory.getLog(RpcServer.class);
    -092  protected static final 
    CallQueueTooBigException CALL_QUEUE_TOO_BIG_EXCEPTION
    -093  = new CallQueueTooBigException();
    +045import 
    org.apache.hadoop.hbase.HConstants;
    +046import org.apache.hadoop.hbase.Server;
    +047import 
    org.apache.hadoop.hbase.conf.ConfigurationObserver;
    +048import 
    org.apache.hadoop.hbase.exceptions.RequestTooBigException;
    +049import 
    org.apache.hadoop.hbase.io.ByteBufferPool;
    +050import 
    org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler;
    +051import 
    org.apache.hadoop.hbase.monitoring.TaskMonitor;
    +052import 
    org.apache.hadoop.hbase.nio.ByteBuff;
    +053import 
    org.apache.hadoop.hbase.nio.MultiByteBuff;
    +054import 
    org.apache.hadoop.hbase.nio.SingleByteBuff;
    +055import 
    org.apache.hadoop.hbase.regionserver.RSRpcServices;
    +056import 
    org.apache.hadoop.hbase.security.SaslUtil;
    +057import 
    org.apache.hadoop.hbase.security.SaslUtil.QualityOfProtection;
    +058import 
    org.apache.hadoop.hbase.security.User;
    +059import 
    org.apache.hadoop.hbase.security.UserProvider;
    +060import 
    org.apache.hadoop.hbase.security.token.AuthenticationTokenSecretManager;
    +061import 
    org.apache.hadoop.hbase.util.Pair;
    +062import 
    org.apache.hadoop.security.UserGroupInformation;
    +063import 
    org.apache.hadoop.security.authorize.AuthorizationException;
    +064import 
    org.apache.hadoop.security.authorize.PolicyProvider;
    +065import 
    org.apache.hadoop.security.authorize.ServiceAuthorizationManager;
    +066import 
    

    [06/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dc4e5c85/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.BucketEntry.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.BucketEntry.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.BucketEntry.html
    index 219283e..2b5d70b 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.BucketEntry.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.BucketEntry.html
    @@ -435,1198 +435,1203 @@
     427
     428if (backingMap.containsKey(cacheKey)) 
    {
     429  Cacheable existingBlock = 
    getBlock(cacheKey, false, false, false);
    -430  if 
    (BlockCacheUtil.compareCacheBlock(cachedItem, existingBlock) != 0) {
    -431throw new 
    RuntimeException("Cached block contents differ, which should not have 
    happened."
    -432+ "cacheKey:" + cacheKey);
    -433  }
    -434   String msg = "Caching an already 
    cached block: " + cacheKey;
    -435   msg += ". This is harmless and can 
    happen in rare cases (see HBASE-8547)";
    -436   LOG.warn(msg);
    -437  return;
    -438}
    -439
    -440/*
    -441 * Stuff the entry into the RAM cache 
    so it can get drained to the persistent store
    -442 */
    -443RAMQueueEntry re =
    -444new RAMQueueEntry(cacheKey, 
    cachedItem, accessCount.incrementAndGet(), inMemory);
    -445if (ramCache.putIfAbsent(cacheKey, 
    re) != null) {
    -446  return;
    -447}
    -448int queueNum = (cacheKey.hashCode() 
     0x7FFF) % writerQueues.size();
    -449BlockingQueueRAMQueueEntry bq 
    = writerQueues.get(queueNum);
    -450boolean successfulAddition = false;
    -451if (wait) {
    -452  try {
    -453successfulAddition = bq.offer(re, 
    DEFAULT_CACHE_WAIT_TIME, TimeUnit.MILLISECONDS);
    -454  } catch (InterruptedException e) 
    {
    -455
    Thread.currentThread().interrupt();
    -456  }
    -457} else {
    -458  successfulAddition = 
    bq.offer(re);
    -459}
    -460if (!successfulAddition) {
    -461  ramCache.remove(cacheKey);
    -462  cacheStats.failInsert();
    -463} else {
    -464  this.blockNumber.increment();
    -465  
    this.heapSize.add(cachedItem.heapSize());
    -466  blocksByHFile.add(cacheKey);
    -467}
    -468  }
    -469
    -470  /**
    -471   * Get the buffer of the block with the 
    specified key.
    -472   * @param key block's cache key
    -473   * @param caching true if the caller 
    caches blocks on cache misses
    -474   * @param repeat Whether this is a 
    repeat lookup for the same block
    -475   * @param updateCacheMetrics Whether we 
    should update cache metrics or not
    -476   * @return buffer of specified cache 
    key, or null if not in cache
    -477   */
    -478  @Override
    -479  public Cacheable getBlock(BlockCacheKey 
    key, boolean caching, boolean repeat,
    -480  boolean updateCacheMetrics) {
    -481if (!cacheEnabled) {
    -482  return null;
    -483}
    -484RAMQueueEntry re = 
    ramCache.get(key);
    -485if (re != null) {
    -486  if (updateCacheMetrics) {
    -487cacheStats.hit(caching, 
    key.isPrimary(), key.getBlockType());
    -488  }
    -489  
    re.access(accessCount.incrementAndGet());
    -490  return re.getData();
    -491}
    -492BucketEntry bucketEntry = 
    backingMap.get(key);
    -493if (bucketEntry != null) {
    -494  long start = System.nanoTime();
    -495  ReentrantReadWriteLock lock = 
    offsetLock.getLock(bucketEntry.offset());
    -496  try {
    -497lock.readLock().lock();
    -498// We can not read here even if 
    backingMap does contain the given key because its offset
    -499// maybe changed. If we lock 
    BlockCacheKey instead of offset, then we can only check
    -500// existence here.
    -501if 
    (bucketEntry.equals(backingMap.get(key))) {
    -502  // TODO : change this area - 
    should be removed after server cells and
    -503  // 12295 are available
    -504  int len = 
    bucketEntry.getLength();
    -505  if (LOG.isTraceEnabled()) {
    -506LOG.trace("Read offset=" + 
    bucketEntry.offset() + ", len=" + len);
    -507  }
    -508  Cacheable cachedBlock = 
    ioEngine.read(bucketEntry.offset(), len,
    -509  
    bucketEntry.deserializerReference(this.deserialiserMap));
    -510  long timeTaken = 
    System.nanoTime() - start;
    -511  if (updateCacheMetrics) {
    -512cacheStats.hit(caching, 
    key.isPrimary(), key.getBlockType());
    -513
    cacheStats.ioHit(timeTaken);
    -514  }
    -515  if (cachedBlock.getMemoryType() 
    == MemoryType.SHARED) {
    -516
    bucketEntry.refCount.incrementAndGet();
    -517  }
    -518  
    bucketEntry.access(accessCount.incrementAndGet());
    -519  if (this.ioErrorStartTime  
    0) {
    -520ioErrorStartTime = -1;
    -521  }
    -522  return cachedBlock;
    -523}
    -524  } catch (IOException ioex) {
    -525LOG.error("Failed reading 

    [06/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4abd958d/devapidocs/src-html/org/apache/hadoop/hbase/client/Append.html
    --
    diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/Append.html 
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/Append.html
    index 4678f40..04766ab 100644
    --- a/devapidocs/src-html/org/apache/hadoop/hbase/client/Append.html
    +++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/Append.html
    @@ -32,178 +32,217 @@
     024import org.apache.hadoop.hbase.Cell;
     025import 
    org.apache.hadoop.hbase.CellUtil;
     026import 
    org.apache.hadoop.hbase.KeyValue;
    -027import 
    org.apache.hadoop.hbase.security.access.Permission;
    -028import 
    org.apache.hadoop.hbase.security.visibility.CellVisibility;
    -029import 
    org.apache.hadoop.hbase.util.Bytes;
    -030import 
    org.apache.yetus.audience.InterfaceAudience;
    -031
    -032/**
    -033 * Performs Append operations on a single 
    row.
    -034 * p
    -035 * This operation ensures atomicty to 
    readers. Appends are done
    -036 * under a single row lock, so write 
    operations to a row are synchronized, and
    -037 * readers are guaranteed to see this 
    operation fully completed.
    -038 * p
    -039 * To append to a set of columns of a 
    row, instantiate an Append object with the
    -040 * row to append to. At least one column 
    to append must be specified using the
    -041 * {@link #addColumn(byte[], byte[], 
    byte[])} method.
    -042 */
    -043@InterfaceAudience.Public
    -044public class Append extends Mutation {
    -045  /**
    -046   * @param returnResults
    -047   *  True (default) if the 
    append operation should return the results.
    -048   *  A client that is not 
    interested in the result can save network
    -049   *  bandwidth setting this to 
    false.
    -050   */
    -051  public Append setReturnResults(boolean 
    returnResults) {
    -052
    super.setReturnResults(returnResults);
    -053return this;
    -054  }
    -055
    -056  /**
    -057   * @return current setting for 
    returnResults
    -058   */
    -059  // This method makes public the 
    superclasses's protected method.
    -060  public boolean isReturnResults() {
    -061return super.isReturnResults();
    -062  }
    -063
    -064  /**
    -065   * Create a Append operation for the 
    specified row.
    -066   * p
    -067   * At least one column must be appended 
    to.
    -068   * @param row row key; makes a local 
    copy of passed in array.
    -069   */
    -070  public Append(byte[] row) {
    -071this(row, 0, row.length);
    -072  }
    -073  /**
    -074   * Copy constructor
    -075   * @param a
    -076   */
    -077  public Append(Append a) {
    -078this.row = a.getRow();
    -079this.ts = a.getTimeStamp();
    -080
    this.familyMap.putAll(a.getFamilyCellMap());
    -081for (Map.EntryString, byte[] 
    entry : a.getAttributesMap().entrySet()) {
    -082  this.setAttribute(entry.getKey(), 
    entry.getValue());
    -083}
    -084this.setPriority(a.getPriority());
    -085  }
    -086
    -087  /** Create a Append operation for the 
    specified row.
    -088   * p
    -089   * At least one column must be appended 
    to.
    -090   * @param rowArray Makes a copy out of 
    this buffer.
    -091   * @param rowOffset
    -092   * @param rowLength
    -093   */
    -094  public Append(final byte [] rowArray, 
    final int rowOffset, final int rowLength) {
    -095checkRow(rowArray, rowOffset, 
    rowLength);
    -096this.row = Bytes.copy(rowArray, 
    rowOffset, rowLength);
    -097  }
    -098
    -099  /**
    -100   * Add the specified column and value 
    to this Append operation.
    -101   * @param family family name
    -102   * @param qualifier column qualifier
    -103   * @param value value to append to 
    specified column
    -104   * @return this
    -105   * @deprecated As of release 2.0.0, 
    this will be removed in HBase 3.0.0.
    -106   * Use {@link 
    #addColumn(byte[], byte[], byte[])} instead
    +027import 
    org.apache.hadoop.hbase.io.TimeRange;
    +028import 
    org.apache.hadoop.hbase.security.access.Permission;
    +029import 
    org.apache.hadoop.hbase.security.visibility.CellVisibility;
    +030import 
    org.apache.hadoop.hbase.util.Bytes;
    +031import 
    org.apache.hadoop.hbase.util.ClassSize;
    +032import 
    org.apache.yetus.audience.InterfaceAudience;
    +033
    +034/**
    +035 * Performs Append operations on a single 
    row.
    +036 * p
    +037 * This operation ensures atomicty to 
    readers. Appends are done
    +038 * under a single row lock, so write 
    operations to a row are synchronized, and
    +039 * readers are guaranteed to see this 
    operation fully completed.
    +040 * p
    +041 * To append to a set of columns of a 
    row, instantiate an Append object with the
    +042 * row to append to. At least one column 
    to append must be specified using the
    +043 * {@link #addColumn(byte[], byte[], 
    byte[])} method.
    +044 */
    +045@InterfaceAudience.Public
    +046public class Append extends Mutation {
    +047  private static final long HEAP_OVERHEAD 
    = ClassSize.REFERENCE + ClassSize.TIMERANGE;
    +048  private TimeRange tr = new 
    TimeRange();
    +049
    +050  /**
    +051   * Sets the TimeRange to be used on the 
    Get for this append.
    +052   * p
    +053   * This is useful for when you have 
    counters that only 

    [06/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e23b49ba/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/RegionCoprocessorEnvironment.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/RegionCoprocessorEnvironment.html
     
    b/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/RegionCoprocessorEnvironment.html
    index 1cef5b4..cb7a795 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/RegionCoprocessorEnvironment.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/RegionCoprocessorEnvironment.html
    @@ -139,11 +139,10 @@
     
     
     
    -boolean
    -BackupObserver.postBulkLoadHFile(ObserverContextRegionCoprocessorEnvironmentctx,
    +void
    +BackupObserver.postBulkLoadHFile(ObserverContextRegionCoprocessorEnvironmentctx,
      http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListPairbyte[],http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in 
    java.lang">StringstagingFamilyPaths,
    - http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
     title="class or interface in java.util">Mapbyte[],http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in 
    java.util">Listorg.apache.hadoop.fs.PathfinalPaths,
    - booleanhasLoaded)
    + http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
     title="class or interface in java.util">Mapbyte[],http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in 
    java.util">Listorg.apache.hadoop.fs.PathfinalPaths)
     
     
     void
    @@ -243,11 +242,10 @@
     
     
     
    -default boolean
    -RegionObserver.postBulkLoadHFile(ObserverContextRegionCoprocessorEnvironmentctx,
    +default void
    +RegionObserver.postBulkLoadHFile(ObserverContextRegionCoprocessorEnvironmentctx,
      http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in java.util">ListPairbyte[],http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
     title="class or interface in 
    java.lang">StringstagingFamilyPaths,
    - http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
     title="class or interface in java.util">Mapbyte[],http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in 
    java.util">Listorg.apache.hadoop.fs.PathfinalPaths,
    - booleanhasLoaded)
    + http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
     title="class or interface in java.util">Mapbyte[],http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
     title="class or interface in 
    java.util">Listorg.apache.hadoop.fs.PathfinalPaths)
     Called after bulkLoadHFile.
     
     
    @@ -492,12 +490,10 @@
     default void
     RegionObserver.postWALRestore(ObserverContext? extends RegionCoprocessorEnvironmentctx,
       RegionInfoinfo,
    -  WALKeylogKey,
    +  WALKeylogKey,
       WALEditlogEdit)
    -Deprecated.
    -Since hbase-2.0.0. No 
    replacement. To be removed in hbase-3.0.0 and replaced
    - with something that doesn't expose IntefaceAudience.Private 
    classes.
    -
    +Called after a WALEdit
    + replayed for this region.
     
     
     
    @@ -823,12 +819,10 @@
     default void
     RegionObserver.preWALRestore(ObserverContext? extends RegionCoprocessorEnvironmentctx,
      RegionInfoinfo,
    - WALKeylogKey,
    + WALKeylogKey,
      WALEditlogEdit)
    -Deprecated.
    -Since hbase-2.0.0. No 
    replacement. To be removed in hbase-3.0.0 and replaced
    - with something that doesn't expose IntefaceAudience.Private 
    classes.
    -
    +Called before a WALEdit
    + replayed for this region.
     
     
     
    
    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e23b49ba/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/WALCoprocessorEnvironment.html
    --
    diff --git 
    a/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/WALCoprocessorEnvironment.html
     
    b/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/WALCoprocessorEnvironment.html
    index 1783cb0..dfec323 100644
    --- 
    a/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/WALCoprocessorEnvironment.html
    +++ 
    b/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/WALCoprocessorEnvironment.html
    @@ -120,7 +120,7 @@
     default void
     WALObserver.postWALWrite(ObserverContext? extends WALCoprocessorEnvironmentctx,
     RegionInfoinfo,
    -WALKeylogKey,
    +WALKeylogKey,
     WALEditlogEdit)
     Deprecated.
     Since hbase-2.0.0. To be 
    replaced with an alternative that does not expose
    @@ -140,7 +140,7 @@
     default void
     

    [06/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d171b896/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/dependencies.html
    --
    diff --git 
    a/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/dependencies.html
     
    b/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/dependencies.html
    index 6589991..a9afb8f 100644
    --- 
    a/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/dependencies.html
    +++ 
    b/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/dependencies.html
    @@ -7,7 +7,7 @@
       
     
     
    -
    +
     
     Apache HBase - Exemplar for hbase-shaded-client archetype  
    Project Dependencies
     
    @@ -930,216 +930,223 @@
     jar
     https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
    2.0
     
    +org.apache.hbase
    +http://hbase.apache.org/hbase-build-configuration/hbase-zookeeper;>hbase-zookeeper
    +3.0.0-SNAPSHOT
    +tests
    +test-jar
    +https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
    2.0
    +
     org.apache.htrace
     http://incubator.apache.org/projects/htrace.html;>htrace-core
     3.2.0-incubating
     -
     jar
     http://www.apache.org/licenses/LICENSE-2.0.txt;>The Apache Software 
    License, Version 2.0
    -
    +
     org.apache.zookeeper
     zookeeper
     3.4.6
     tests
     test-jar
     -
    -
    +
     org.codehaus.jackson
     http://jackson.codehaus.org;>jackson-core-asl
     1.9.13
     -
     jar
     http://www.apache.org/licenses/LICENSE-2.0.txt;>The Apache Software 
    License, Version 2.0
    -
    +
     org.codehaus.jackson
     http://jackson.codehaus.org;>jackson-mapper-asl
     1.9.13
     -
     jar
     http://www.apache.org/licenses/LICENSE-2.0.txt;>The Apache Software 
    License, Version 2.0
    -
    +
     org.eclipse.jetty
     http://www.eclipse.org/jetty;>jetty-http
     9.3.19.v20170502
     -
     jar
     http://www.apache.org/licenses/LICENSE-2.0;>Apache Software License - 
    Version 2.0-http://www.eclipse.org/org/documents/epl-v10.php;>Eclipse Public License 
    - Version 1.0
    -
    +
     org.eclipse.jetty
     http://www.eclipse.org/jetty;>jetty-io
     9.3.19.v20170502
     -
     jar
     http://www.apache.org/licenses/LICENSE-2.0;>Apache Software License - 
    Version 2.0-http://www.eclipse.org/org/documents/epl-v10.php;>Eclipse Public License 
    - Version 1.0
    -
    +
     org.eclipse.jetty
     http://www.eclipse.org/jetty;>jetty-security
     9.3.19.v20170502
     -
     jar
     http://www.apache.org/licenses/LICENSE-2.0;>Apache Software License - 
    Version 2.0-http://www.eclipse.org/org/documents/epl-v10.php;>Eclipse Public License 
    - Version 1.0
    -
    +
     org.eclipse.jetty
     http://www.eclipse.org/jetty;>jetty-server
     9.3.19.v20170502
     -
     jar
     http://www.apache.org/licenses/LICENSE-2.0;>Apache Software License - 
    Version 2.0-http://www.eclipse.org/org/documents/epl-v10.php;>Eclipse Public License 
    - Version 1.0
    -
    +
     org.eclipse.jetty
     http://www.eclipse.org/jetty;>jetty-servlet
     9.3.19.v20170502
     -
     jar
     http://www.apache.org/licenses/LICENSE-2.0;>Apache Software License - 
    Version 2.0-http://www.eclipse.org/org/documents/epl-v10.php;>Eclipse Public License 
    - Version 1.0
    -
    +
     org.eclipse.jetty
     http://www.eclipse.org/jetty;>jetty-util
     9.3.19.v20170502
     -
     jar
     http://www.apache.org/licenses/LICENSE-2.0;>Apache Software License - 
    Version 2.0-http://www.eclipse.org/org/documents/epl-v10.php;>Eclipse Public License 
    - Version 1.0
    -
    +
     org.eclipse.jetty
     http://www.eclipse.org/jetty;>jetty-util-ajax
     9.3.19.v20170502
     -
     jar
     http://www.apache.org/licenses/LICENSE-2.0;>Apache Software License - 
    Version 2.0-http://www.eclipse.org/org/documents/epl-v10.php;>Eclipse Public License 
    - Version 1.0
    -
    +
     org.eclipse.jetty
     http://www.eclipse.org/jetty;>jetty-webapp
     9.3.19.v20170502
     -
     jar
     http://www.apache.org/licenses/LICENSE-2.0;>Apache Software License - 
    Version 2.0-http://www.eclipse.org/org/documents/epl-v10.php;>Eclipse Public License 
    - Version 1.0
    -
    +
     org.eclipse.jetty
     http://www.eclipse.org/jetty;>jetty-xml
     9.3.19.v20170502
     -
     jar
     http://www.apache.org/licenses/LICENSE-2.0;>Apache Software License - 
    Version 2.0-http://www.eclipse.org/org/documents/epl-v10.php;>Eclipse Public License 
    - Version 1.0
    -
    +
     org.fusesource.leveldbjni
     http://leveldbjni.fusesource.org/leveldbjni-all;>leveldbjni-all
     1.8
     -
     jar
     http://www.opensource.org/licenses/BSD-3-Clause;>The BSD 3-Clause 
    License
    -
    +
     org.glassfish
     http://uel.java.net;>javax.el
     3.0.1-b08
     -
     jar
     https://glassfish.dev.java.net/nonav/public/CDDL+GPL.html;>CDDL + GPLv2 
    with classpath exception
    -
    +
     org.glassfish.hk2
     https://hk2.java.net/hk2-api;>hk2-api
     2.5.0-b32
     -
     jar
     https://glassfish.java.net/nonav/public/CDDL+GPL_1_1.html;>CDDL + GPLv2 
    with classpath exception
    -
    +
     org.glassfish.hk2
     https://hk2.java.net/hk2-locator;>hk2-locator
     2.5.0-b32
     -
     jar
     https://glassfish.java.net/nonav/public/CDDL+GPL_1_1.html;>CDDL + GPLv2 
    with classpath exception
    -
    +
     org.glassfish.hk2
     https://hk2.java.net/hk2-utils;>hk2-utils
     2.5.0-b32
     -
     jar
     https://glassfish.java.net/nonav/public/CDDL+GPL_1_1.html;>CDDL + GPLv2 
    

    [06/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c54c242b/hbase-build-configuration/hbase-spark/dependencies.html
    --
    diff --git a/hbase-build-configuration/hbase-spark/dependencies.html 
    b/hbase-build-configuration/hbase-spark/dependencies.html
    index 881ad81..778fc67 100644
    --- a/hbase-build-configuration/hbase-spark/dependencies.html
    +++ b/hbase-build-configuration/hbase-spark/dependencies.html
    @@ -7,7 +7,7 @@
       
     
     
    -
    +
     
     Apache HBase - Spark  Project Dependencies
     
    @@ -1760,8 +1760,18 @@
     
     Description: All of the recipes listed on the ZooKeeper recipes doc 
    (except two phase commit).
     URL: http://curator.apache.org/curator-recipes;>http://curator.apache.org/curator-recipes
    -Project Licenses: http://www.apache.org/licenses/LICENSE-2.0.txt;>The Apache Software 
    License, Version 2.0
    -org.eclipse.jetty.orbit:javax.servlet:jar:3.0.0.v201112011016 (provided) 
    
    +Project Licenses: http://www.apache.org/licenses/LICENSE-2.0.txt;>The Apache Software 
    License, Version 2.0
    +
    +org.apache.curator:curator-framework:jar:4.0.0 (compile) 
    +
    +
    +Curator Framework
    +
    +
    +Description: High-level API that greatly simplifies using 
    ZooKeeper.
    +URL: http://curator.apache.org/curator-framework;>http://curator.apache.org/curator-framework
    +Project Licenses: http://www.apache.org/licenses/LICENSE-2.0.txt;>The Apache Software 
    License, Version 2.0
    +org.eclipse.jetty.orbit:javax.servlet:jar:3.0.0.v201112011016 (provided) 
    
     
     
     Jetty Orbit :: Servlet API
    @@ -1771,7 +1781,7 @@
     it is an osgi bundle and is signed as well.
     URL: http://www.eclipse.org/jetty/jetty-orbit/javax.servlet;>http://www.eclipse.org/jetty/jetty-orbit/javax.servlet
     Project Licenses: http://www.apache.org/licenses/LICENSE-2.0;>Apache Software License - 
    Version 2.0, http://www.eclipse.org/org/documents/epl-v10.php;>Eclipse Public License 
    - Version 1.0
    -org.apache.commons:commons-lang3:jar:3.6 (compile) 
    +org.apache.commons:commons-lang3:jar:3.6 (compile) 
     
     
     Apache Commons Lang
    @@ -1782,7 +1792,7 @@
       standard as to justify existence in java.lang.
     URL: http://commons.apache.org/proper/commons-lang/;>http://commons.apache.org/proper/commons-lang/
     Project Licenses: https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
    2.0
    -org.apache.commons:commons-math3:jar:3.6.1 (compile) 
    +org.apache.commons:commons-math3:jar:3.6.1 (compile) 
     
     
     Apache Commons Math
    @@ -1791,7 +1801,7 @@
     Description: The Apache Commons Math project is a library of 
    lightweight, self-contained mathematics and statistics components addressing 
    the most common practical problems not immediately available in the Java 
    programming language or commons-lang.
     URL: http://commons.apache.org/proper/commons-math/;>http://commons.apache.org/proper/commons-math/
     Project Licenses: http://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
    2.0
    -org.slf4j:slf4j-api:jar:1.7.24 (compile) 
    +org.slf4j:slf4j-api:jar:1.7.24 (compile) 
     
     
     SLF4J API Module
    @@ -1800,7 +1810,7 @@
     Description: The slf4j API
     URL: http://www.slf4j.org;>http://www.slf4j.org
     Project Licenses: http://www.opensource.org/licenses/mit-license.php;>MIT 
    License
    -org.slf4j:jul-to-slf4j:jar:1.7.10 (provided) 
    +org.slf4j:jul-to-slf4j:jar:1.7.10 (provided) 
     
     
     JUL to SLF4J bridge
    @@ -1809,7 +1819,7 @@
     Description: JUL to SLF4J bridge
     URL: http://www.slf4j.org;>http://www.slf4j.org
     Project Licenses: http://www.opensource.org/licenses/mit-license.php;>MIT 
    License
    -org.slf4j:jcl-over-slf4j:jar:1.7.10 (provided) 
    +org.slf4j:jcl-over-slf4j:jar:1.7.10 (provided) 
     
     
     JCL 1.1.1 implemented over SLF4J
    @@ -1818,7 +1828,7 @@
     Description: JCL 1.1.1 implemented over SLF4J
     URL: http://www.slf4j.org;>http://www.slf4j.org
     Project Licenses: http://www.opensource.org/licenses/mit-license.php;>MIT 
    License
    -log4j:log4j:jar:1.2.17 (compile) 
    +log4j:log4j:jar:1.2.17 (compile) 
     
     
     Apache Log4j
    @@ -1827,7 +1837,7 @@
     Description: Apache Log4j 1.2
     URL: http://logging.apache.org/log4j/1.2/;>http://logging.apache.org/log4j/1.2/
     Project Licenses: http://www.apache.org/licenses/LICENSE-2.0.txt;>The Apache Software 
    License, Version 2.0
    -org.slf4j:slf4j-log4j12:jar:1.7.10 (compile) 
    +org.slf4j:slf4j-log4j12:jar:1.7.10 (compile) 
     
     
     SLF4J LOG4J-12 Binding
    @@ -1836,7 +1846,7 @@
     Description: SLF4J LOG4J-12 Binding
     URL: http://www.slf4j.org;>http://www.slf4j.org
     Project Licenses: http://www.opensource.org/licenses/mit-license.php;>MIT 
    License
    -com.ning:compress-lzf:jar:1.0.3 (provided) 
    +com.ning:compress-lzf:jar:1.0.3 (provided) 
     
     
     Compress-LZF
    @@ -1847,7 +1857,7 @@ Compressor is basic Lempel-Ziv codec, without Huffman 
    (deflate/gzip) or statisti
     See http://oldhome.schmorp.de/marc/liblzf.html; for more on 
    original LZF package.
     URL: http://github.com/ning/compress;>http://github.com/ning/compress
     Project Licenses: http://www.apache.org/licenses/LICENSE-2.0.html;>Apache License 
    2.0
    

    [06/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.PipelineAckStatusGetter.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.PipelineAckStatusGetter.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.PipelineAckStatusGetter.html
    index 25e368d..d0f781f 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.PipelineAckStatusGetter.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.PipelineAckStatusGetter.html
    @@ -25,798 +25,798 @@
     017 */
     018package 
    org.apache.hadoop.hbase.io.asyncfs;
     019
    -020import static 
    org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelOption.CONNECT_TIMEOUT_MILLIS;
    -021import static 
    org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleState.READER_IDLE;
    -022import static 
    org.apache.hadoop.fs.CreateFlag.CREATE;
    -023import static 
    org.apache.hadoop.fs.CreateFlag.OVERWRITE;
    -024import static 
    org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createEncryptor;
    -025import static 
    org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.trySaslNegotiate;
    +020import static 
    org.apache.hadoop.fs.CreateFlag.CREATE;
    +021import static 
    org.apache.hadoop.fs.CreateFlag.OVERWRITE;
    +022import static 
    org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createEncryptor;
    +023import static 
    org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.trySaslNegotiate;
    +024import static 
    org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelOption.CONNECT_TIMEOUT_MILLIS;
    +025import static 
    org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleState.READER_IDLE;
     026import static 
    org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY;
     027import static 
    org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME;
     028import static 
    org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT;
     029import static 
    org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage.PIPELINE_SETUP_CREATE;
     030
    -031import 
    org.apache.hadoop.hbase.shaded.com.google.common.base.Throwables;
    -032import 
    org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableMap;
    -033import 
    com.google.protobuf.CodedOutputStream;
    -034
    -035import 
    org.apache.hadoop.hbase.shaded.io.netty.bootstrap.Bootstrap;
    -036import 
    org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBuf;
    -037import 
    org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBufAllocator;
    -038import 
    org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBufOutputStream;
    -039import 
    org.apache.hadoop.hbase.shaded.io.netty.buffer.PooledByteBufAllocator;
    -040import 
    org.apache.hadoop.hbase.shaded.io.netty.channel.Channel;
    -041import 
    org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelFuture;
    -042import 
    org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelFutureListener;
    -043import 
    org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandler;
    -044import 
    org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandlerContext;
    -045import 
    org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelInitializer;
    -046import 
    org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelPipeline;
    -047import 
    org.apache.hadoop.hbase.shaded.io.netty.channel.EventLoop;
    -048import 
    org.apache.hadoop.hbase.shaded.io.netty.channel.SimpleChannelInboundHandler;
    -049import 
    org.apache.hadoop.hbase.shaded.io.netty.handler.codec.protobuf.ProtobufDecoder;
    -050import 
    org.apache.hadoop.hbase.shaded.io.netty.handler.codec.protobuf.ProtobufVarint32FrameDecoder;
    -051import 
    org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleStateEvent;
    -052import 
    org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleStateHandler;
    -053import 
    org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.Future;
    -054import 
    org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.FutureListener;
    -055import 
    org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.Promise;
    -056
    -057import java.io.IOException;
    -058import 
    java.lang.reflect.InvocationTargetException;
    -059import java.lang.reflect.Method;
    -060import java.util.ArrayList;
    -061import java.util.EnumSet;
    -062import java.util.List;
    -063import java.util.concurrent.TimeUnit;
    -064
    -065import org.apache.commons.logging.Log;
    -066import 
    org.apache.commons.logging.LogFactory;
    -067import 
    org.apache.hadoop.conf.Configuration;
    -068import 
    org.apache.hadoop.crypto.CryptoProtocolVersion;
    -069import 
    org.apache.hadoop.crypto.Encryptor;
    -070import org.apache.hadoop.fs.CreateFlag;
    -071import org.apache.hadoop.fs.FileSystem;
    -072import 
    org.apache.hadoop.fs.FileSystemLinkResolver;
    -073import org.apache.hadoop.fs.Path;
    -074import 
    

    [06/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.NamespaceFuture.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.NamespaceFuture.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.NamespaceFuture.html
    index d438f22..7c59e27 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.NamespaceFuture.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.NamespaceFuture.html
    @@ -1290,8 +1290,8 @@
     1282   CompactType 
    compactType) throws IOException {
     1283switch (compactType) {
     1284  case MOB:
    -1285
    compact(this.connection.getAdminForMaster(), getMobRegionInfo(tableName), 
    major,
    -1286  columnFamily);
    +1285
    compact(this.connection.getAdminForMaster(), 
    RegionInfo.createMobRegionInfo(tableName),
    +1286major, columnFamily);
     1287break;
     1288  case NORMAL:
     1289checkTableExists(tableName);
    @@ -3248,7 +3248,7 @@
     3240  new 
    CallableAdminProtos.GetRegionInfoResponse.CompactionState() {
     3241@Override
     3242public 
    AdminProtos.GetRegionInfoResponse.CompactionState call() throws Exception {
    -3243  RegionInfo info = 
    getMobRegionInfo(tableName);
    +3243  RegionInfo info = 
    RegionInfo.createMobRegionInfo(tableName);
     3244  GetRegionInfoRequest 
    request =
     3245
    RequestConverter.buildGetRegionInfoRequest(info.getRegionName(), true);
     3246  GetRegionInfoResponse 
    response = masterAdmin.getRegionInfo(rpcController, request);
    @@ -3312,7 +3312,7 @@
     3304}
     3305break;
     3306  default:
    -3307throw new 
    IllegalArgumentException("Unknowne compactType: " + compactType);
    +3307throw new 
    IllegalArgumentException("Unknown compactType: " + compactType);
     3308}
     3309if (state != null) {
     3310  return 
    ProtobufUtil.createCompactionState(state);
    @@ -3847,325 +3847,320 @@
     3839});
     3840  }
     3841
    -3842  private RegionInfo 
    getMobRegionInfo(TableName tableName) {
    -3843return 
    RegionInfoBuilder.newBuilder(tableName).setStartKey(Bytes.toBytes(".mob")).setRegionId(0)
    -3844.build();
    -3845  }
    -3846
    -3847  private RpcControllerFactory 
    getRpcControllerFactory() {
    -3848return this.rpcControllerFactory;
    -3849  }
    -3850
    -3851  @Override
    -3852  public void addReplicationPeer(String 
    peerId, ReplicationPeerConfig peerConfig, boolean enabled)
    -3853  throws IOException {
    -3854executeCallable(new 
    MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
    -3855  @Override
    -3856  protected Void rpcCall() throws 
    Exception {
    -3857
    master.addReplicationPeer(getRpcController(),
    -3858  
    RequestConverter.buildAddReplicationPeerRequest(peerId, peerConfig, 
    enabled));
    -3859return null;
    -3860  }
    -3861});
    -3862  }
    -3863
    -3864  @Override
    -3865  public void 
    removeReplicationPeer(String peerId) throws IOException {
    -3866executeCallable(new 
    MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
    -3867  @Override
    -3868  protected Void rpcCall() throws 
    Exception {
    -3869
    master.removeReplicationPeer(getRpcController(),
    -3870  
    RequestConverter.buildRemoveReplicationPeerRequest(peerId));
    -3871return null;
    -3872  }
    -3873});
    -3874  }
    -3875
    -3876  @Override
    -3877  public void 
    enableReplicationPeer(final String peerId) throws IOException {
    -3878executeCallable(new 
    MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
    -3879  @Override
    -3880  protected Void rpcCall() throws 
    Exception {
    -3881
    master.enableReplicationPeer(getRpcController(),
    -3882  
    RequestConverter.buildEnableReplicationPeerRequest(peerId));
    -3883return null;
    -3884  }
    -3885});
    -3886  }
    -3887
    -3888  @Override
    -3889  public void 
    disableReplicationPeer(final String peerId) throws IOException {
    -3890executeCallable(new 
    MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
    -3891  @Override
    -3892  protected Void rpcCall() throws 
    Exception {
    -3893
    master.disableReplicationPeer(getRpcController(),
    -3894  
    RequestConverter.buildDisableReplicationPeerRequest(peerId));
    -3895return null;
    -3896  }
    -3897});
    -3898  }
    -3899
    -3900  @Override
    -3901  public ReplicationPeerConfig 
    getReplicationPeerConfig(final String peerId) throws IOException {
    -3902return executeCallable(new 
    MasterCallableReplicationPeerConfig(getConnection(),
    -3903getRpcControllerFactory()) {
    -3904  @Override
    -3905  protected ReplicationPeerConfig 
    rpcCall() throws Exception {
    -3906GetReplicationPeerConfigResponse 
    response = master.getReplicationPeerConfig(
    -3907  getRpcController(), 
    

    [06/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
    index 29ea7b3..6ed75c9 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
    @@ -1313,7093 +1313,7082 @@
     1305
     1306  @Override
     1307  public boolean isSplittable() {
    -1308boolean result = isAvailable() 
     !hasReferences();
    -1309LOG.info("ASKED IF SPLITTABLE " + 
    result + " " + getRegionInfo().getShortNameToLog(),
    -1310  new Throwable("LOGGING: 
    REMOVE"));
    -1311// REMOVE BELOW
    -1312LOG.info("DEBUG LIST ALL FILES");
    -1313for (HStore store : 
    this.stores.values()) {
    -1314  LOG.info("store " + 
    store.getColumnFamilyName());
    -1315  for (HStoreFile sf : 
    store.getStorefiles()) {
    -1316
    LOG.info(sf.toStringDetailed());
    -1317  }
    -1318}
    -1319return result;
    -1320  }
    -1321
    -1322  @Override
    -1323  public boolean isMergeable() {
    -1324if (!isAvailable()) {
    -1325  LOG.debug("Region " + this
    -1326  + " is not mergeable because 
    it is closing or closed");
    -1327  return false;
    -1328}
    -1329if (hasReferences()) {
    -1330  LOG.debug("Region " + this
    -1331  + " is not mergeable because 
    it has references");
    -1332  return false;
    -1333}
    -1334
    -1335return true;
    +1308return isAvailable()  
    !hasReferences();
    +1309  }
    +1310
    +1311  @Override
    +1312  public boolean isMergeable() {
    +1313if (!isAvailable()) {
    +1314  LOG.debug("Region " + this
    +1315  + " is not mergeable because 
    it is closing or closed");
    +1316  return false;
    +1317}
    +1318if (hasReferences()) {
    +1319  LOG.debug("Region " + this
    +1320  + " is not mergeable because 
    it has references");
    +1321  return false;
    +1322}
    +1323
    +1324return true;
    +1325  }
    +1326
    +1327  public boolean areWritesEnabled() {
    +1328synchronized(this.writestate) {
    +1329  return 
    this.writestate.writesEnabled;
    +1330}
    +1331  }
    +1332
    +1333  @VisibleForTesting
    +1334  public MultiVersionConcurrencyControl 
    getMVCC() {
    +1335return mvcc;
     1336  }
     1337
    -1338  public boolean areWritesEnabled() {
    -1339synchronized(this.writestate) {
    -1340  return 
    this.writestate.writesEnabled;
    -1341}
    -1342  }
    -1343
    -1344  @VisibleForTesting
    -1345  public MultiVersionConcurrencyControl 
    getMVCC() {
    -1346return mvcc;
    -1347  }
    -1348
    -1349  @Override
    -1350  public long getMaxFlushedSeqId() {
    -1351return maxFlushedSeqId;
    +1338  @Override
    +1339  public long getMaxFlushedSeqId() {
    +1340return maxFlushedSeqId;
    +1341  }
    +1342
    +1343  /**
    +1344   * @return readpoint considering given 
    IsolationLevel. Pass {@code null} for default
    +1345   */
    +1346  public long 
    getReadPoint(IsolationLevel isolationLevel) {
    +1347if (isolationLevel != null 
     isolationLevel == IsolationLevel.READ_UNCOMMITTED) {
    +1348  // This scan can read even 
    uncommitted transactions
    +1349  return Long.MAX_VALUE;
    +1350}
    +1351return mvcc.getReadPoint();
     1352  }
     1353
    -1354  /**
    -1355   * @return readpoint considering given 
    IsolationLevel. Pass {@code null} for default
    -1356   */
    -1357  public long 
    getReadPoint(IsolationLevel isolationLevel) {
    -1358if (isolationLevel != null 
     isolationLevel == IsolationLevel.READ_UNCOMMITTED) {
    -1359  // This scan can read even 
    uncommitted transactions
    -1360  return Long.MAX_VALUE;
    -1361}
    -1362return mvcc.getReadPoint();
    -1363  }
    -1364
    -1365  public boolean 
    isLoadingCfsOnDemandDefault() {
    -1366return 
    this.isLoadingCfsOnDemandDefault;
    -1367  }
    -1368
    -1369  /**
    -1370   * Close down this HRegion.  Flush the 
    cache, shut down each HStore, don't
    -1371   * service any more calls.
    -1372   *
    -1373   * pThis method could take 
    some time to execute, so don't call it from a
    -1374   * time-sensitive thread.
    -1375   *
    -1376   * @return Vector of all the storage 
    files that the HRegion's component
    -1377   * HStores make use of.  It's a list 
    of all StoreFile objects. Returns empty
    -1378   * vector if already closed and null 
    if judged that it should not close.
    -1379   *
    -1380   * @throws IOException e
    -1381   * @throws DroppedSnapshotException 
    Thrown when replay of wal is required
    -1382   * because a Snapshot was not properly 
    persisted. The region is put in closing mode, and the
    -1383   * caller MUST abort after this.
    -1384   */
    -1385  public Mapbyte[], 
    ListHStoreFile close() throws IOException {
    -1386return close(false);
    -1387  }
    -1388
    -1389  private final Object closeLock = new 
    Object();
    -1390
    -1391  /** Conf key for the periodic flush 
    interval */
    -1392  public 

    [06/51] [partial] hbase-site git commit: Published site at .

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b1eb7453/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.EmptyByteBufferCell.html
    --
    diff --git 
    a/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.EmptyByteBufferCell.html
     
    b/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.EmptyByteBufferCell.html
    index 9098105..b05691f 100644
    --- 
    a/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.EmptyByteBufferCell.html
    +++ 
    b/devapidocs/src-html/org/apache/hadoop/hbase/PrivateCellUtil.EmptyByteBufferCell.html
    @@ -37,1514 +37,1514 @@
     029import java.util.ArrayList;
     030import java.util.Iterator;
     031import java.util.List;
    -032
    -033import 
    org.apache.hadoop.hbase.KeyValue.Type;
    -034import 
    org.apache.hadoop.hbase.filter.ByteArrayComparable;
    -035import 
    org.apache.hadoop.hbase.io.HeapSize;
    -036import 
    org.apache.hadoop.hbase.io.TagCompressionContext;
    -037import 
    org.apache.hadoop.hbase.io.util.Dictionary;
    -038import 
    org.apache.hadoop.hbase.io.util.StreamUtils;
    -039import 
    org.apache.hadoop.hbase.util.ByteBufferUtils;
    -040import 
    org.apache.hadoop.hbase.util.ByteRange;
    -041import 
    org.apache.hadoop.hbase.util.Bytes;
    -042import 
    org.apache.hadoop.hbase.util.ClassSize;
    -043import 
    org.apache.yetus.audience.InterfaceAudience;
    -044
    -045import 
    com.google.common.annotations.VisibleForTesting;
    -046
    -047/**
    -048 * Utility methods helpful slinging 
    {@link Cell} instances. It has more powerful and
    -049 * rich set of APIs than those in {@link 
    CellUtil} for internal usage.
    -050 */
    -051@InterfaceAudience.Private
    -052// TODO : Make Tag IA.LimitedPrivate and 
    move some of the Util methods to CP exposed Util class
    -053public class PrivateCellUtil {
    +032import java.util.Optional;
    +033
    +034import 
    org.apache.hadoop.hbase.KeyValue.Type;
    +035import 
    org.apache.hadoop.hbase.filter.ByteArrayComparable;
    +036import 
    org.apache.hadoop.hbase.io.HeapSize;
    +037import 
    org.apache.hadoop.hbase.io.TagCompressionContext;
    +038import 
    org.apache.hadoop.hbase.io.util.Dictionary;
    +039import 
    org.apache.hadoop.hbase.io.util.StreamUtils;
    +040import 
    org.apache.hadoop.hbase.util.ByteBufferUtils;
    +041import 
    org.apache.hadoop.hbase.util.ByteRange;
    +042import 
    org.apache.hadoop.hbase.util.Bytes;
    +043import 
    org.apache.hadoop.hbase.util.ClassSize;
    +044import 
    org.apache.yetus.audience.InterfaceAudience;
    +045
    +046import 
    com.google.common.annotations.VisibleForTesting;
    +047
    +048/**
    +049 * Utility methods helpful slinging 
    {@link Cell} instances. It has more powerful and
    +050 * rich set of APIs than those in {@link 
    CellUtil} for internal usage.
    +051 */
    +052@InterfaceAudience.Private
    +053public final class PrivateCellUtil {
     054
     055  /**
     056   * Private constructor to keep this 
    class from being instantiated.
     057   */
     058  private PrivateCellUtil() {
    -059
    -060  }
    -061
    -062  /*** ByteRange 
    ***/
    -063
    -064  public static ByteRange 
    fillRowRange(Cell cell, ByteRange range) {
    -065return range.set(cell.getRowArray(), 
    cell.getRowOffset(), cell.getRowLength());
    -066  }
    -067
    -068  public static ByteRange 
    fillFamilyRange(Cell cell, ByteRange range) {
    -069return 
    range.set(cell.getFamilyArray(), cell.getFamilyOffset(), 
    cell.getFamilyLength());
    -070  }
    -071
    -072  public static ByteRange 
    fillQualifierRange(Cell cell, ByteRange range) {
    -073return 
    range.set(cell.getQualifierArray(), cell.getQualifierOffset(),
    -074  cell.getQualifierLength());
    -075  }
    -076
    -077  public static ByteRange 
    fillValueRange(Cell cell, ByteRange range) {
    -078return 
    range.set(cell.getValueArray(), cell.getValueOffset(), 
    cell.getValueLength());
    -079  }
    -080
    -081  public static ByteRange 
    fillTagRange(Cell cell, ByteRange range) {
    -082return range.set(cell.getTagsArray(), 
    cell.getTagsOffset(), cell.getTagsLength());
    -083  }
    -084
    -085  /**
    -086   * Returns tag value in a new byte 
    array. If server-side, use {@link Tag#getValueArray()} with
    -087   * appropriate {@link 
    Tag#getValueOffset()} and {@link Tag#getValueLength()} instead to save on
    -088   * allocations.
    -089   * @param cell
    -090   * @return tag value in a new byte 
    array.
    -091   */
    -092  public static byte[] getTagsArray(Cell 
    cell) {
    -093byte[] output = new 
    byte[cell.getTagsLength()];
    -094copyTagsTo(cell, output, 0);
    -095return output;
    -096  }
    -097
    -098  public static byte[] cloneTags(Cell 
    cell) {
    -099byte[] output = new 
    byte[cell.getTagsLength()];
    -100copyTagsTo(cell, output, 0);
    -101return output;
    -102  }
    -103
    -104  /**
    -105   * Copies the tags info into the tag 
    portion of the cell
    -106   * @param cell
    -107   * @param destination
    -108   * @param destinationOffset
    -109   * @return position after tags
    +059  }
    +060
    +061  /*** ByteRange 
    ***/
    +062
    +063  public static ByteRange 
    fillRowRange(Cell cell, ByteRange range) {
    +064return range.set(cell.getRowArray(), 
    

      1   2   3   >

  • Packages that use DoNotRetryIOException 
    程序包说明PackageDescription
    org.apache.hadoop.hbase.client -
    Provides HBase Client - -Table of Contents - - Overview -Example API Usage - - - Overview - To administer HBase, create and drop tables, list and alter tables, - use Admin.
    +
    Provides HBase Client
    org.apache.hadoop.hbase.coprocessor -
    Table of Contents - -Overview -Coprocessor -RegionObserver -Endpoint -Coprocessor loading - - -Overview -Coprocessors are code that runs in-process on each region server.
    +
    Table of Contents