[08/51] [partial] hbase-site git commit: Published site at 620d70d6186fb800299bcc62ad7179fccfd1be41.

2019-01-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aa3fb87f/devapidocs/src-html/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.html
new file mode 100644
index 000..1fa0940
--- /dev/null
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.html
@@ -0,0 +1,1477 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+Source code
+
+
+
+
+001/**
+002 *
+003 * Licensed to the Apache Software 
Foundation (ASF) under one
+004 * or more contributor license 
agreements.  See the NOTICE file
+005 * distributed with this work for 
additional information
+006 * regarding copyright ownership.  The 
ASF licenses this file
+007 * to you under the Apache License, 
Version 2.0 (the
+008 * "License"); you may not use this file 
except in compliance
+009 * with the License.  You may obtain a 
copy of the License at
+010 *
+011 * 
http://www.apache.org/licenses/LICENSE-2.0
+012 *
+013 * Unless required by applicable law or 
agreed to in writing, software
+014 * distributed under the License is 
distributed on an "AS IS" BASIS,
+015 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
+016 * See the License for the specific 
language governing permissions and
+017 * limitations under the License.
+018 */
+019package 
org.apache.hadoop.hbase.thrift2.client;
+020
+021import java.io.IOException;
+022import java.nio.ByteBuffer;
+023import java.util.EnumSet;
+024import java.util.List;
+025import java.util.Map;
+026import java.util.Set;
+027import java.util.concurrent.Future;
+028import java.util.regex.Pattern;
+029
+030import 
org.apache.commons.lang3.NotImplementedException;
+031import 
org.apache.hadoop.conf.Configuration;
+032import 
org.apache.hadoop.hbase.CacheEvictionStats;
+033import 
org.apache.hadoop.hbase.ClusterMetrics;
+034import 
org.apache.hadoop.hbase.HConstants;
+035import 
org.apache.hadoop.hbase.HRegionInfo;
+036import 
org.apache.hadoop.hbase.HTableDescriptor;
+037import 
org.apache.hadoop.hbase.NamespaceDescriptor;
+038import 
org.apache.hadoop.hbase.NamespaceNotFoundException;
+039import 
org.apache.hadoop.hbase.RegionMetrics;
+040import 
org.apache.hadoop.hbase.ServerName;
+041import 
org.apache.hadoop.hbase.TableName;
+042import 
org.apache.hadoop.hbase.TableNotFoundException;
+043import 
org.apache.hadoop.hbase.client.Admin;
+044import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+045import 
org.apache.hadoop.hbase.client.CompactType;
+046import 
org.apache.hadoop.hbase.client.CompactionState;
+047import 
org.apache.hadoop.hbase.client.Connection;
+048import 
org.apache.hadoop.hbase.client.RegionInfo;
+049import 
org.apache.hadoop.hbase.client.SnapshotDescription;
+050import 
org.apache.hadoop.hbase.client.SnapshotType;
+051import 
org.apache.hadoop.hbase.client.TableDescriptor;
+052import 
org.apache.hadoop.hbase.client.replication.TableCFs;
+053import 
org.apache.hadoop.hbase.client.security.SecurityCapability;
+054import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
+055import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
+056import 
org.apache.hadoop.hbase.quotas.QuotaRetriever;
+057import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
+058import 
org.apache.hadoop.hbase.replication.ReplicationException;
+059import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
+060import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
+061import 
org.apache.hadoop.hbase.replication.SyncReplicationState;
+062import 
org.apache.hadoop.hbase.thrift2.ThriftUtilities;
+063import 
org.apache.hadoop.hbase.thrift2.generated.TColumnFamilyDescriptor;
+064import 
org.apache.hadoop.hbase.thrift2.generated.THBaseService;
+065import 
org.apache.hadoop.hbase.thrift2.generated.TNamespaceDescriptor;
+066import 
org.apache.hadoop.hbase.thrift2.generated.TTableDescriptor;
+067import 
org.apache.hadoop.hbase.thrift2.generated.TTableName;
+068import 
org.apache.hadoop.hbase.util.Bytes;
+069import 
org.apache.hadoop.hbase.util.Pair;
+070import org.apache.thrift.TException;
+071import 
org.apache.thrift.transport.TTransport;
+072import 
org.apache.yetus.audience.InterfaceAudience;
+073
+074@InterfaceAudience.Private
+075public class ThriftAdmin implements Admin 
{
+076
+077  private THBaseService.Client client;
+078  private TTransport transport;
+079  private int operationTimeout;
+080  private Configuration conf;
+081
+082
+083  public ThriftAdmin(THBaseService.Client 
client, TTransport tTransport, Configuration conf) {
+084this.client = client;
+085this.transport = tTransport;
+086this.operationTimeout = 
conf.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT,
+087
HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT);
+088this.conf = conf;
+089  }
+090
+091  @Override
+092  public 

[08/51] [partial] hbase-site git commit: Published site at 281d6429e55149cc4c05430dcc1d1dc136d8b245.

2019-01-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/901d593a/devapidocs/org/apache/hadoop/hbase/quotas/RegionSizeStoreImpl.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/quotas/RegionSizeStoreImpl.html 
b/devapidocs/org/apache/hadoop/hbase/quotas/RegionSizeStoreImpl.html
index 0b85ddf..67d4f04 100644
--- a/devapidocs/org/apache/hadoop/hbase/quotas/RegionSizeStoreImpl.html
+++ b/devapidocs/org/apache/hadoop/hbase/quotas/RegionSizeStoreImpl.html
@@ -50,7 +50,7 @@ var activeTableTab = "activeTableTab";
 
 
 PrevClass
-NextClass
+NextClass
 
 
 Frames
@@ -499,7 +499,7 @@ implements 
 
 PrevClass
-NextClass
+NextClass
 
 
 Frames

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/901d593a/devapidocs/org/apache/hadoop/hbase/quotas/RpcThrottleStorage.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/quotas/RpcThrottleStorage.html 
b/devapidocs/org/apache/hadoop/hbase/quotas/RpcThrottleStorage.html
new file mode 100644
index 000..6de3b41
--- /dev/null
+++ b/devapidocs/org/apache/hadoop/hbase/quotas/RpcThrottleStorage.html
@@ -0,0 +1,387 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+RpcThrottleStorage (Apache HBase 3.0.0-SNAPSHOT API)
+
+
+
+
+
+var methods = {"i0":10,"i1":10};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase.quotas
+Class 
RpcThrottleStorage
+
+
+
+https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.quotas.RpcThrottleStorage
+
+
+
+
+
+
+
+
+@InterfaceAudience.Private
+public class RpcThrottleStorage
+extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+ZK based rpc throttle storage.
+
+
+
+
+
+
+
+
+
+
+
+Field Summary
+
+Fields
+
+Modifier and Type
+Field and Description
+
+
+static https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+RPC_THROTTLE_ZNODE
+
+
+static https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+RPC_THROTTLE_ZNODE_DEFAULT
+
+
+private https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+rpcThrottleZNode
+
+
+private ZKWatcher
+zookeeper
+
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors
+
+Constructor and Description
+
+
+RpcThrottleStorage(ZKWatcherzookeeper,
+  
org.apache.hadoop.conf.Configurationconf)
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All MethodsInstance MethodsConcrete Methods
+
+Modifier and Type
+Method and Description
+
+
+boolean
+isRpcThrottleEnabled()
+
+
+void
+switchRpcThrottle(booleanenable)
+Store the rpc throttle value.
+
+
+
+
+
+
+
+Methods inherited from classjava.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--;
 title="class or interface in java.lang">clone, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-;
 title="class or interface in java.lang">equals, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--;
 title="class or interface in java.lang">finalize, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--;
 title="class or interface in java.lang">getClass, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--;
 title="class or interface in java.lang">hashCode, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notify--;
 title="class or interface in java.lang">notify, https://docs.oracle.com/javase/8/docs/api/ja
 

[08/51] [partial] hbase-site git commit: Published site at 466fa920fee572fe20db3b77ebf539dc304d5f31.

2019-01-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2bf59208/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateTableProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateTableProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateTableProcedureBiConsumer.html
index 736388b..197b99d 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateTableProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateTableProcedureBiConsumer.html
@@ -26,3624 +26,3599 @@
 018package org.apache.hadoop.hbase.client;
 019
 020import static 
org.apache.hadoop.hbase.TableName.META_TABLE_NAME;
-021
-022import com.google.protobuf.Message;
-023import com.google.protobuf.RpcChannel;
-024import java.io.IOException;
-025import java.util.ArrayList;
-026import java.util.Arrays;
-027import java.util.Collections;
-028import java.util.EnumSet;
-029import java.util.HashMap;
-030import java.util.List;
-031import java.util.Map;
-032import java.util.Optional;
-033import java.util.Set;
-034import 
java.util.concurrent.CompletableFuture;
-035import 
java.util.concurrent.ConcurrentHashMap;
-036import java.util.concurrent.TimeUnit;
-037import 
java.util.concurrent.atomic.AtomicReference;
-038import java.util.function.BiConsumer;
-039import java.util.function.Function;
-040import java.util.function.Supplier;
-041import java.util.regex.Pattern;
-042import java.util.stream.Collectors;
-043import java.util.stream.Stream;
-044import org.apache.commons.io.IOUtils;
-045import 
org.apache.hadoop.conf.Configuration;
-046import 
org.apache.hadoop.hbase.AsyncMetaTableAccessor;
-047import 
org.apache.hadoop.hbase.CacheEvictionStats;
-048import 
org.apache.hadoop.hbase.CacheEvictionStatsAggregator;
-049import 
org.apache.hadoop.hbase.ClusterMetrics;
-050import 
org.apache.hadoop.hbase.ClusterMetrics.Option;
-051import 
org.apache.hadoop.hbase.ClusterMetricsBuilder;
-052import 
org.apache.hadoop.hbase.HConstants;
-053import 
org.apache.hadoop.hbase.HRegionLocation;
-054import 
org.apache.hadoop.hbase.MetaTableAccessor;
-055import 
org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
-056import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-057import 
org.apache.hadoop.hbase.RegionLocations;
-058import 
org.apache.hadoop.hbase.RegionMetrics;
-059import 
org.apache.hadoop.hbase.RegionMetricsBuilder;
-060import 
org.apache.hadoop.hbase.ServerName;
-061import 
org.apache.hadoop.hbase.TableExistsException;
-062import 
org.apache.hadoop.hbase.TableName;
-063import 
org.apache.hadoop.hbase.TableNotDisabledException;
-064import 
org.apache.hadoop.hbase.TableNotEnabledException;
-065import 
org.apache.hadoop.hbase.TableNotFoundException;
-066import 
org.apache.hadoop.hbase.UnknownRegionException;
-067import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
-068import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
-069import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.ServerRequestCallerBuilder;
-070import 
org.apache.hadoop.hbase.client.Scan.ReadType;
-071import 
org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil;
-072import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-073import 
org.apache.hadoop.hbase.client.security.SecurityCapability;
-074import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-075import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-076import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-077import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-078import 
org.apache.hadoop.hbase.quotas.QuotaTableUtil;
-079import 
org.apache.hadoop.hbase.replication.ReplicationException;
-080import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-081import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-082import 
org.apache.hadoop.hbase.replication.SyncReplicationState;
-083import 
org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
-084import 
org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
-085import 
org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
-086import 
org.apache.hadoop.hbase.util.Bytes;
-087import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-088import 
org.apache.hadoop.hbase.util.ForeignExceptionUtil;
-089import 
org.apache.yetus.audience.InterfaceAudience;
-090import org.slf4j.Logger;
-091import org.slf4j.LoggerFactory;
-092
-093import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-094import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-095import 
org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback;
-096import 
org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer;
-097import 

[08/51] [partial] hbase-site git commit: Published site at e4b6b4afb933a961f543537875f87a2dc62d3757.

2019-01-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/849d84a8/devapidocs/src-html/org/apache/hadoop/hbase/thrift2/ThriftServer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/thrift2/ThriftServer.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/thrift2/ThriftServer.html
index 783dc34..5898688 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/thrift2/ThriftServer.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/thrift2/ThriftServer.html
@@ -26,609 +26,99 @@
 018 */
 019package 
org.apache.hadoop.hbase.thrift2;
 020
-021import java.io.IOException;
-022import java.net.InetAddress;
-023import java.net.InetSocketAddress;
-024import java.net.UnknownHostException;
-025import java.security.PrivilegedAction;
-026import java.util.Map;
-027import 
java.util.concurrent.ExecutorService;
-028import 
java.util.concurrent.LinkedBlockingQueue;
-029import 
java.util.concurrent.SynchronousQueue;
-030import 
java.util.concurrent.ThreadPoolExecutor;
-031import java.util.concurrent.TimeUnit;
-032
-033import 
javax.security.auth.callback.Callback;
-034import 
javax.security.auth.callback.UnsupportedCallbackException;
-035import 
javax.security.sasl.AuthorizeCallback;
-036import javax.security.sasl.SaslServer;
-037
-038import 
org.apache.hadoop.conf.Configuration;
-039import 
org.apache.hadoop.conf.Configured;
-040import 
org.apache.hadoop.hbase.HBaseConfiguration;
-041import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
-042import 
org.apache.hadoop.hbase.filter.ParseFilter;
-043import 
org.apache.hadoop.hbase.http.InfoServer;
-044import 
org.apache.hadoop.hbase.security.SaslUtil;
-045import 
org.apache.hadoop.hbase.security.SecurityUtil;
-046import 
org.apache.hadoop.hbase.security.UserProvider;
-047import 
org.apache.hadoop.hbase.thrift.CallQueue;
-048import 
org.apache.hadoop.hbase.thrift.THBaseThreadPoolExecutor;
-049import 
org.apache.hadoop.hbase.thrift.ThriftMetrics;
-050import 
org.apache.hadoop.hbase.thrift2.generated.THBaseService;
-051import 
org.apache.hadoop.hbase.util.DNS;
-052import 
org.apache.hadoop.hbase.util.JvmPauseMonitor;
-053import 
org.apache.hadoop.hbase.util.Strings;
-054import 
org.apache.hadoop.security.SaslRpcServer.SaslGssCallbackHandler;
-055import 
org.apache.hadoop.security.UserGroupInformation;
-056import org.apache.hadoop.util.Tool;
-057import 
org.apache.hadoop.util.ToolRunner;
-058import org.apache.thrift.TException;
-059import org.apache.thrift.TProcessor;
-060import 
org.apache.thrift.protocol.TBinaryProtocol;
-061import 
org.apache.thrift.protocol.TCompactProtocol;
-062import 
org.apache.thrift.protocol.TProtocol;
-063import 
org.apache.thrift.protocol.TProtocolFactory;
-064import 
org.apache.thrift.server.THsHaServer;
-065import 
org.apache.thrift.server.TNonblockingServer;
-066import 
org.apache.thrift.server.TServer;
-067import 
org.apache.thrift.server.TThreadPoolServer;
-068import 
org.apache.thrift.server.TThreadedSelectorServer;
-069import 
org.apache.thrift.transport.TFramedTransport;
-070import 
org.apache.thrift.transport.TNonblockingServerSocket;
-071import 
org.apache.thrift.transport.TNonblockingServerTransport;
-072import 
org.apache.thrift.transport.TSaslServerTransport;
-073import 
org.apache.thrift.transport.TServerSocket;
-074import 
org.apache.thrift.transport.TServerTransport;
-075import 
org.apache.thrift.transport.TTransportException;
-076import 
org.apache.thrift.transport.TTransportFactory;
-077import 
org.apache.yetus.audience.InterfaceAudience;
-078import org.slf4j.Logger;
-079import org.slf4j.LoggerFactory;
-080import 
org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
-081import 
org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine;
-082import 
org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLineParser;
-083import 
org.apache.hbase.thirdparty.org.apache.commons.cli.DefaultParser;
-084import 
org.apache.hbase.thirdparty.org.apache.commons.cli.HelpFormatter;
-085import 
org.apache.hbase.thirdparty.org.apache.commons.cli.Option;
-086import 
org.apache.hbase.thirdparty.org.apache.commons.cli.OptionGroup;
-087import 
org.apache.hbase.thirdparty.org.apache.commons.cli.Options;
-088import 
org.apache.hbase.thirdparty.org.apache.commons.cli.ParseException;
-089
-090/**
-091 * ThriftServer - this class starts up a 
Thrift server which implements the HBase API specified in
-092 * the HbaseClient.thrift IDL file.
-093 */
-094@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
-095@SuppressWarnings({ "rawtypes", 
"unchecked" })
-096public class ThriftServer extends 
Configured implements Tool {
-097  private static final Logger log = 
LoggerFactory.getLogger(ThriftServer.class);
-098
-099  /**
-100   * Thrift quality of protection 
configuration key. Valid values can be:
-101   * privacy: authentication, integrity 
and confidentiality checking
-102   * integrity: authentication and 
integrity checking

[08/51] [partial] hbase-site git commit: Published site at 3ab895979b643a2980bcdb7fee2078f14b614210.

2019-01-01 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/mapreduce/CellCounter.CellCounterMapper.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/mapreduce/CellCounter.CellCounterMapper.html
 
b/devapidocs/org/apache/hadoop/hbase/mapreduce/CellCounter.CellCounterMapper.html
index 603735b..84ecbfd 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/mapreduce/CellCounter.CellCounterMapper.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/mapreduce/CellCounter.CellCounterMapper.html
@@ -497,6 +497,6 @@ extends Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/mapreduce/CellCounter.IntSumReducer.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/mapreduce/CellCounter.IntSumReducer.html 
b/devapidocs/org/apache/hadoop/hbase/mapreduce/CellCounter.IntSumReducer.html
index bb074a9..3b84726 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/mapreduce/CellCounter.IntSumReducer.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/mapreduce/CellCounter.IntSumReducer.html
@@ -349,6 +349,6 @@ extends 
org.apache.hadoop.mapreduce.ReducerKey,org.apache.hadoop.io.IntWrita
 
 
 
-Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/mapreduce/CellCounter.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/mapreduce/CellCounter.html 
b/devapidocs/org/apache/hadoop/hbase/mapreduce/CellCounter.html
index ed1f34c..bb26cde 100644
--- a/devapidocs/org/apache/hadoop/hbase/mapreduce/CellCounter.html
+++ b/devapidocs/org/apache/hadoop/hbase/mapreduce/CellCounter.html
@@ -528,6 +528,6 @@ implements org.apache.hadoop.util.Tool
 
 
 
-Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/mapreduce/CellCreator.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/mapreduce/CellCreator.html 
b/devapidocs/org/apache/hadoop/hbase/mapreduce/CellCreator.html
index 3e42624..6de0884 100644
--- a/devapidocs/org/apache/hadoop/hbase/mapreduce/CellCreator.html
+++ b/devapidocs/org/apache/hadoop/hbase/mapreduce/CellCreator.html
@@ -510,6 +510,6 @@ publicCopyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/mapreduce/CellSerialization.CellDeserializer.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/mapreduce/CellSerialization.CellDeserializer.html
 
b/devapidocs/org/apache/hadoop/hbase/mapreduce/CellSerialization.CellDeserializer.html
index 9688902..e0f2fd6 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/mapreduce/CellSerialization.CellDeserializer.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/mapreduce/CellSerialization.CellDeserializer.html
@@ -359,6 +359,6 @@ implements 
org.apache.hadoop.io.serializer.DeserializerCopyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/mapreduce/CellSerialization.CellSerializer.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/mapreduce/CellSerialization.CellSerializer.html
 
b/devapidocs/org/apache/hadoop/hbase/mapreduce/CellSerialization.CellSerializer.html
index 31aaf71..e42f20e 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/mapreduce/CellSerialization.CellSerializer.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/mapreduce/CellSerialization.CellSerializer.html
@@ -359,6 +359,6 @@ implements org.apache.hadoop.io.serializer.SerializerCopyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 

[08/51] [partial] hbase-site git commit: Published site at 7820ba1dbdba58b1002cdfde08eb21aa7a0bb6da.

2018-12-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/09ea0d5f/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
index 0f5a095..50bf692 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ObservedExceptionsInBatch.html
@@ -78,8712 +78,8714 @@
 070import 
java.util.concurrent.locks.ReadWriteLock;
 071import 
java.util.concurrent.locks.ReentrantReadWriteLock;
 072import java.util.function.Function;
-073import 
org.apache.hadoop.conf.Configuration;
-074import org.apache.hadoop.fs.FileStatus;
-075import org.apache.hadoop.fs.FileSystem;
-076import 
org.apache.hadoop.fs.LocatedFileStatus;
-077import org.apache.hadoop.fs.Path;
-078import org.apache.hadoop.hbase.Cell;
-079import 
org.apache.hadoop.hbase.CellBuilderType;
-080import 
org.apache.hadoop.hbase.CellComparator;
-081import 
org.apache.hadoop.hbase.CellComparatorImpl;
-082import 
org.apache.hadoop.hbase.CellScanner;
-083import 
org.apache.hadoop.hbase.CellUtil;
-084import 
org.apache.hadoop.hbase.CompareOperator;
-085import 
org.apache.hadoop.hbase.CompoundConfiguration;
-086import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-087import 
org.apache.hadoop.hbase.DroppedSnapshotException;
-088import 
org.apache.hadoop.hbase.ExtendedCellBuilderFactory;
-089import 
org.apache.hadoop.hbase.HConstants;
-090import 
org.apache.hadoop.hbase.HConstants.OperationStatusCode;
-091import 
org.apache.hadoop.hbase.HDFSBlocksDistribution;
-092import 
org.apache.hadoop.hbase.KeyValue;
-093import 
org.apache.hadoop.hbase.KeyValueUtil;
-094import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-095import 
org.apache.hadoop.hbase.NotServingRegionException;
-096import 
org.apache.hadoop.hbase.PrivateCellUtil;
-097import 
org.apache.hadoop.hbase.RegionTooBusyException;
-098import org.apache.hadoop.hbase.Tag;
-099import org.apache.hadoop.hbase.TagUtil;
-100import 
org.apache.hadoop.hbase.UnknownScannerException;
-101import 
org.apache.hadoop.hbase.client.Append;
-102import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-103import 
org.apache.hadoop.hbase.client.CompactionState;
-104import 
org.apache.hadoop.hbase.client.Delete;
-105import 
org.apache.hadoop.hbase.client.Durability;
-106import 
org.apache.hadoop.hbase.client.Get;
-107import 
org.apache.hadoop.hbase.client.Increment;
-108import 
org.apache.hadoop.hbase.client.IsolationLevel;
-109import 
org.apache.hadoop.hbase.client.Mutation;
-110import 
org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor;
-111import 
org.apache.hadoop.hbase.client.Put;
-112import 
org.apache.hadoop.hbase.client.RegionInfo;
-113import 
org.apache.hadoop.hbase.client.RegionInfoBuilder;
-114import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
-115import 
org.apache.hadoop.hbase.client.Result;
-116import 
org.apache.hadoop.hbase.client.RowMutations;
-117import 
org.apache.hadoop.hbase.client.Scan;
-118import 
org.apache.hadoop.hbase.client.TableDescriptor;
-119import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-120import 
org.apache.hadoop.hbase.conf.ConfigurationManager;
-121import 
org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver;
-122import 
org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
-123import 
org.apache.hadoop.hbase.coprocessor.RegionObserver.MutationType;
-124import 
org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
-125import 
org.apache.hadoop.hbase.exceptions.FailedSanityCheckException;
-126import 
org.apache.hadoop.hbase.exceptions.TimeoutIOException;
-127import 
org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
-128import 
org.apache.hadoop.hbase.filter.ByteArrayComparable;
-129import 
org.apache.hadoop.hbase.filter.FilterWrapper;
-130import 
org.apache.hadoop.hbase.filter.IncompatibleFilterException;
-131import 
org.apache.hadoop.hbase.io.HFileLink;
-132import 
org.apache.hadoop.hbase.io.HeapSize;
-133import 
org.apache.hadoop.hbase.io.TimeRange;
-134import 
org.apache.hadoop.hbase.io.hfile.BlockCache;
-135import 
org.apache.hadoop.hbase.io.hfile.HFile;
-136import 
org.apache.hadoop.hbase.ipc.CallerDisconnectedException;
-137import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
-138import 
org.apache.hadoop.hbase.ipc.RpcCall;
-139import 
org.apache.hadoop.hbase.ipc.RpcServer;
-140import 
org.apache.hadoop.hbase.mob.MobFileCache;
-141import 
org.apache.hadoop.hbase.monitoring.MonitoredTask;
-142import 
org.apache.hadoop.hbase.monitoring.TaskMonitor;
-143import 
org.apache.hadoop.hbase.quotas.RegionServerSpaceQuotaManager;
-144import 

[08/51] [partial] hbase-site git commit: Published site at c448604ceb987d113913f0583452b2abce04db0d.

2018-12-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4f8b8424/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.BlockIndexNotLoadedException.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.BlockIndexNotLoadedException.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.BlockIndexNotLoadedException.html
index 79cb21b..d8d391b 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.BlockIndexNotLoadedException.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.BlockIndexNotLoadedException.html
@@ -378,1508 +378,1510 @@
 370
 371  @Override
 372  public void returnBlock(HFileBlock 
block) {
-373BlockCache blockCache = 
this.cacheConf.getBlockCache();
-374if (blockCache != null  
block != null) {
-375  BlockCacheKey cacheKey = new 
BlockCacheKey(this.getFileContext().getHFileName(),
-376  block.getOffset(), 
this.isPrimaryReplicaReader(), block.getBlockType());
-377  blockCache.returnBlock(cacheKey, 
block);
-378}
-379  }
-380  /**
-381   * @return the first key in the file. 
May be null if file has no entries. Note
-382   * that this is not the first 
row key, but rather the byte form of the
-383   * first KeyValue.
-384   */
-385  @Override
-386  public OptionalCell 
getFirstKey() {
-387if (dataBlockIndexReader == null) {
-388  throw new 
BlockIndexNotLoadedException();
-389}
-390return dataBlockIndexReader.isEmpty() 
? Optional.empty()
-391: 
Optional.of(dataBlockIndexReader.getRootBlockKey(0));
-392  }
-393
-394  /**
-395   * TODO left from {@link HFile} version 
1: move this to StoreFile after Ryan's
-396   * patch goes in to eliminate {@link 
KeyValue} here.
-397   *
-398   * @return the first row key, or null 
if the file is empty.
-399   */
-400  @Override
-401  public Optionalbyte[] 
getFirstRowKey() {
-402// We have to copy the row part to 
form the row key alone
-403return 
getFirstKey().map(CellUtil::cloneRow);
-404  }
-405
-406  /**
-407   * TODO left from {@link HFile} version 
1: move this to StoreFile after
-408   * Ryan's patch goes in to eliminate 
{@link KeyValue} here.
-409   *
-410   * @return the last row key, or null if 
the file is empty.
-411   */
-412  @Override
-413  public Optionalbyte[] 
getLastRowKey() {
-414// We have to copy the row part to 
form the row key alone
-415return 
getLastKey().map(CellUtil::cloneRow);
-416  }
-417
-418  /** @return number of KV entries in 
this HFile */
-419  @Override
-420  public long getEntries() {
-421return trailer.getEntryCount();
-422  }
-423
-424  /** @return comparator */
-425  @Override
-426  public CellComparator getComparator() 
{
-427return comparator;
-428  }
-429
-430  /** @return compression algorithm */
-431  @Override
-432  public Compression.Algorithm 
getCompressionAlgorithm() {
-433return compressAlgo;
-434  }
-435
-436  /**
-437   * @return the total heap size of data 
and meta block indexes in bytes. Does
-438   * not take into account 
non-root blocks of a multilevel data index.
-439   */
-440  @Override
-441  public long indexSize() {
-442return (dataBlockIndexReader != null 
? dataBlockIndexReader.heapSize() : 0)
-443+ ((metaBlockIndexReader != null) 
? metaBlockIndexReader.heapSize()
-444: 0);
-445  }
-446
-447  @Override
-448  public String getName() {
-449return name;
-450  }
-451
-452  @Override
-453  public HFileBlockIndex.BlockIndexReader 
getDataBlockIndexReader() {
-454return dataBlockIndexReader;
-455  }
-456
-457  @Override
-458  public FixedFileTrailer getTrailer() 
{
-459return trailer;
-460  }
-461
-462  @Override
-463  public boolean isPrimaryReplicaReader() 
{
-464return primaryReplicaReader;
-465  }
-466
-467  @Override
-468  public FileInfo loadFileInfo() throws 
IOException {
-469return fileInfo;
-470  }
-471
-472  /**
-473   * An exception thrown when an 
operation requiring a scanner to be seeked
-474   * is invoked on a scanner that is not 
seeked.
-475   */
-476  @SuppressWarnings("serial")
-477  public static class NotSeekedException 
extends IllegalStateException {
-478public NotSeekedException() {
-479  super("Not seeked to a 
key/value");
-480}
-481  }
-482
-483  protected static class HFileScannerImpl 
implements HFileScanner {
-484private ByteBuff blockBuffer;
-485protected final boolean 
cacheBlocks;
-486protected final boolean pread;
-487protected final boolean 
isCompaction;
-488private int currKeyLen;
-489private int currValueLen;
-490private int currMemstoreTSLen;
-491private long currMemstoreTS;
-492// Updated but never read?
-493protected AtomicInteger blockFetches 
= new AtomicInteger(0);
-494protected final HFile.Reader 
reader;
-495private int currTagsLen;
-496// 

[08/51] [partial] hbase-site git commit: Published site at 8bf966c8e936dec4d83bcbe85c5aab543f14a0df.

2018-12-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionRepairException.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionRepairException.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionRepairException.html
index a957d31..62f81b6 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionRepairException.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.RegionRepairException.html
@@ -142,5192 +142,5186 @@
 134import org.apache.hadoop.hbase.wal.WAL;
 135import 
org.apache.hadoop.hbase.wal.WALFactory;
 136import 
org.apache.hadoop.hbase.wal.WALSplitter;
-137import 
org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-138import 
org.apache.hadoop.hbase.zookeeper.ZKUtil;
-139import 
org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-140import 
org.apache.hadoop.hbase.zookeeper.ZNodePaths;
-141import 
org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
-142import 
org.apache.hadoop.ipc.RemoteException;
-143import 
org.apache.hadoop.security.UserGroupInformation;
-144import 
org.apache.hadoop.util.ReflectionUtils;
-145import org.apache.hadoop.util.Tool;
-146import 
org.apache.hadoop.util.ToolRunner;
-147import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-148import 
org.apache.hbase.thirdparty.com.google.common.collect.Sets;
-149import 
org.apache.yetus.audience.InterfaceAudience;
-150import 
org.apache.yetus.audience.InterfaceStability;
-151import 
org.apache.zookeeper.KeeperException;
-152import org.slf4j.Logger;
-153import org.slf4j.LoggerFactory;
-154
-155import 
org.apache.hbase.thirdparty.com.google.common.base.Joiner;
-156import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-157import 
org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;
-158import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
-159import 
org.apache.hbase.thirdparty.com.google.common.collect.Multimap;
-160import 
org.apache.hbase.thirdparty.com.google.common.collect.Ordering;
-161import 
org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;
-162
-163import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-164import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;
-165
-166/**
-167 * HBaseFsck (hbck) is a tool for 
checking and repairing region consistency and
-168 * table integrity problems in a 
corrupted HBase. This tool was written for hbase-1.x. It does not
-169 * work with hbase-2.x; it can read state 
but is not allowed to change state; i.e. effect 'repair'.
-170 * See hbck2 (HBASE-19121) for a hbck 
tool for hbase2.
-171 *
-172 * p
-173 * Region consistency checks verify that 
hbase:meta, region deployment on region
-174 * servers and the state of data in HDFS 
(.regioninfo files) all are in
-175 * accordance.
-176 * p
-177 * Table integrity checks verify that all 
possible row keys resolve to exactly
-178 * one region of a table.  This means 
there are no individual degenerate
-179 * or backwards regions; no holes between 
regions; and that there are no
-180 * overlapping regions.
-181 * p
-182 * The general repair strategy works in 
two phases:
-183 * ol
-184 * li Repair Table Integrity on 
HDFS. (merge or fabricate regions)
-185 * li Repair Region Consistency 
with hbase:meta and assignments
-186 * /ol
-187 * p
-188 * For table integrity repairs, the 
tables' region directories are scanned
-189 * for .regioninfo files.  Each table's 
integrity is then verified.  If there
-190 * are any orphan regions (regions with 
no .regioninfo files) or holes, new
-191 * regions are fabricated.  Backwards 
regions are sidelined as well as empty
-192 * degenerate (endkey==startkey) regions. 
 If there are any overlapping regions,
-193 * a new region is created and all data 
is merged into the new region.
-194 * p
-195 * Table integrity repairs deal solely 
with HDFS and could potentially be done
-196 * offline -- the hbase region servers or 
master do not need to be running.
-197 * This phase can eventually be used to 
completely reconstruct the hbase:meta table in
-198 * an offline fashion.
-199 * p
-200 * Region consistency requires three 
conditions -- 1) valid .regioninfo file
-201 * present in an HDFS region dir,  2) 
valid row with .regioninfo data in META,
-202 * and 3) a region is deployed only at 
the regionserver that was assigned to
-203 * with proper state in the master.
-204 * p
-205 * Region consistency repairs require 
hbase to be online so that hbck can
-206 * contact the HBase master and region 
servers.  The hbck#connect() method must
-207 * first be called successfully.  Much of 
the region consistency information
-208 * is transient and less risky to 
repair.
-209 * p
-210 * If hbck is run from the command line, 
there are a 

[08/51] [partial] hbase-site git commit: Published site at 1acbd36c903b048141866b143507bfce124a5c5f.

2018-11-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5299e667/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.html
index 333b785..da8def9 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.html
@@ -34,1359 +34,1365 @@
 026import java.util.List;
 027import java.util.UUID;
 028import java.util.regex.Pattern;
-029
-030import 
org.apache.commons.lang3.ArrayUtils;
-031import 
org.apache.hadoop.hbase.util.Bytes;
-032import 
org.apache.yetus.audience.InterfaceAudience;
-033
-034/**
-035 * HConstants holds a bunch of 
HBase-related constants
-036 */
-037@InterfaceAudience.Public
-038public final class HConstants {
-039  // NOTICE Please do not add a 
constants here, unless they are referenced by a lot of classes.
-040
-041  //Bytes.UTF8_ENCODING should be updated 
if this changed
-042  /** When we encode strings, we always 
specify UTF8 encoding */
-043  public static final String 
UTF8_ENCODING = "UTF-8";
-044
-045  //Bytes.UTF8_CHARSET should be updated 
if this changed
-046  /** When we encode strings, we always 
specify UTF8 encoding */
-047  public static final Charset 
UTF8_CHARSET = Charset.forName(UTF8_ENCODING);
-048  /**
-049   * Default block size for an HFile.
-050   */
-051  public final static int 
DEFAULT_BLOCKSIZE = 64 * 1024;
-052
-053  /** Used as a magic return value while 
optimized index key feature enabled(HBASE-7845) */
-054  public final static int INDEX_KEY_MAGIC 
= -2;
-055  /*
-056 * Name of directory that holds 
recovered edits written by the wal log
-057 * splitting code, one per region
-058 */
-059  public static final String 
RECOVERED_EDITS_DIR = "recovered.edits";
-060  /**
-061   * The first four bytes of Hadoop RPC 
connections
-062   */
-063  public static final byte[] RPC_HEADER = 
new byte[] { 'H', 'B', 'a', 's' };
-064  public static final byte 
RPC_CURRENT_VERSION = 0;
-065
-066  // HFileBlock constants. TODO THESE 
DEFINES BELONG IN HFILEBLOCK, NOT UP HERE.
-067  // Needed down in hbase-common though 
by encoders but these encoders should not be dealing
-068  // in the internals of hfileblocks. Fix 
encapsulation.
-069
-070  /** The size data structures with minor 
version is 0 */
-071  public static final int 
HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM = MAGIC_LENGTH + 2 * Bytes.SIZEOF_INT
-072  + Bytes.SIZEOF_LONG;
-073  /** The size of a version 2 HFile block 
header, minor version 1.
-074   * There is a 1 byte checksum type, 
followed by a 4 byte bytesPerChecksum
-075   * followed by another 4 byte value to 
store sizeofDataOnDisk.
-076   */
-077  public static final int 
HFILEBLOCK_HEADER_SIZE = HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM +
-078Bytes.SIZEOF_BYTE + 2 * 
Bytes.SIZEOF_INT;
-079  /** Just an array of bytes of the right 
size. */
-080  public static final byte[] 
HFILEBLOCK_DUMMY_HEADER = new byte[HFILEBLOCK_HEADER_SIZE];
-081
-082  //End HFileBlockConstants.
-083
-084  /**
-085   * Status codes used for return values 
of bulk operations.
-086   */
-087  @InterfaceAudience.Private
-088  public enum OperationStatusCode {
-089NOT_RUN,
-090SUCCESS,
-091BAD_FAMILY,
-092STORE_TOO_BUSY,
-093SANITY_CHECK_FAILURE,
-094FAILURE
-095  }
-096
-097  /** long constant for zero */
-098  public static final Long ZERO_L = 
Long.valueOf(0L);
-099  public static final String NINES = 
"99";
-100  public static final String ZEROES = 
"00";
-101
-102  // For migration
-103
-104  /** name of version file */
-105  public static final String 
VERSION_FILE_NAME = "hbase.version";
-106
-107  /**
-108   * Current version of file system.
-109   * Version 4 supports only one kind of 
bloom filter.
-110   * Version 5 changes versions in 
catalog table regions.
-111   * Version 6 enables blockcaching on 
catalog tables.
-112   * Version 7 introduces hfile -- hbase 
0.19 to 0.20..
-113   * Version 8 introduces namespace
-114   */
-115  // public static final String 
FILE_SYSTEM_VERSION = "6";
-116  public static final String 
FILE_SYSTEM_VERSION = "8";
-117
-118  // Configuration parameters
-119
-120  //TODO: Is having HBase homed on port 
60k OK?
-121
-122  /** Cluster is in distributed mode or 
not */
-123  public static final String 
CLUSTER_DISTRIBUTED = "hbase.cluster.distributed";
-124
-125  /** Config for pluggable load balancers 
*/
-126  public static final String 
HBASE_MASTER_LOADBALANCER_CLASS = "hbase.master.loadbalancer.class";
-127
-128  /** Config for balancing the cluster by 
table */
-129  public static final String 
HBASE_MASTER_LOADBALANCE_BYTABLE = "hbase.master.loadbalance.bytable";
-130
-131  /** Config for the max percent of 
regions in transition */
-132  public static final String 

[08/51] [partial] hbase-site git commit: Published site at 130057f13774f6b213cdb06952c805a29d59396e.

2018-11-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/68eae623/testdevapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html 
b/testdevapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html
index 6e82899..152a081 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html
@@ -234,10 +234,10 @@
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
+org.apache.hadoop.hbase.procedure2.TestProcedureRecovery.TestStateMachineProcedure.State
 org.apache.hadoop.hbase.procedure2.TestStateMachineProcedure.TestSMProcedureState
-org.apache.hadoop.hbase.procedure2.TestYieldProcedures.TestStateMachineProcedure.State
 org.apache.hadoop.hbase.procedure2.TestProcedureBypass.StuckStateMachineState
-org.apache.hadoop.hbase.procedure2.TestProcedureRecovery.TestStateMachineProcedure.State
+org.apache.hadoop.hbase.procedure2.TestYieldProcedures.TestStateMachineProcedure.State
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/68eae623/testdevapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html 
b/testdevapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
index d7c69fc..06907ed 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
@@ -701,10 +701,10 @@
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.regionserver.TestAtomicOperation.TestStep
 org.apache.hadoop.hbase.regionserver.TestMultiLogThreshold.ActionType
-org.apache.hadoop.hbase.regionserver.DataBlockEncodingTool.Manipulation
 org.apache.hadoop.hbase.regionserver.TestRegionServerReadRequestMetrics.Metric
+org.apache.hadoop.hbase.regionserver.TestAtomicOperation.TestStep
+org.apache.hadoop.hbase.regionserver.DataBlockEncodingTool.Manipulation
 org.apache.hadoop.hbase.regionserver.TestCacheOnWriteInSchema.CacheOnWriteType
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/68eae623/testdevapidocs/org/apache/hadoop/hbase/security/access/TestAccessController.MyShellBasedUnixGroupsMapping.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/security/access/TestAccessController.MyShellBasedUnixGroupsMapping.html
 
b/testdevapidocs/org/apache/hadoop/hbase/security/access/TestAccessController.MyShellBasedUnixGroupsMapping.html
index 79ffc82..da97f01 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/security/access/TestAccessController.MyShellBasedUnixGroupsMapping.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/security/access/TestAccessController.MyShellBasedUnixGroupsMapping.html
@@ -122,7 +122,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public static class TestAccessController.MyShellBasedUnixGroupsMapping
+public static class TestAccessController.MyShellBasedUnixGroupsMapping
 extends org.apache.hadoop.security.ShellBasedUnixGroupsMapping
 implements org.apache.hadoop.security.GroupMappingServiceProvider
 
@@ -221,7 +221,7 @@ implements 
org.apache.hadoop.security.GroupMappingServiceProvider
 
 
 MyShellBasedUnixGroupsMapping
-publicMyShellBasedUnixGroupsMapping()
+publicMyShellBasedUnixGroupsMapping()
 
 
 
@@ -238,7 +238,7 @@ implements 
org.apache.hadoop.security.GroupMappingServiceProvider
 
 
 getGroups
-publichttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringgetGroups(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringuser)
+publichttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in 

[08/51] [partial] hbase-site git commit: Published site at d5e4faacc354c1bc4d93efa71ca97ee3a056123e.

2018-10-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b5e107c3/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureBypass.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureBypass.html 
b/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureBypass.html
index afc4268..9ad4207 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureBypass.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureBypass.html
@@ -49,7 +49,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-PrevClass
+PrevClass
 NextClass
 
 
@@ -535,7 +535,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 
-PrevClass
+PrevClass
 NextClass
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b5e107c3/testdevapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureTestingUtility.NoopProcedure.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureTestingUtility.NoopProcedure.html
 
b/testdevapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureTestingUtility.NoopProcedure.html
index 885d5c2..fbb062a 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureTestingUtility.NoopProcedure.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/procedure2/class-use/ProcedureTestingUtility.NoopProcedure.html
@@ -156,6 +156,26 @@
 static class
 TestMasterProcedureScheduler.TestTableProcedureWithEvent
 
+
+static class
+TestSchedulerQueueDeadLock.TableExclusiveProcedure
+
+
+static class
+TestSchedulerQueueDeadLock.TableExclusiveProcedureWithId
+
+
+static class
+TestSchedulerQueueDeadLock.TableShardParentProcedure
+
+
+static class
+TestSchedulerQueueDeadLock.TableSharedProcedure
+
+
+static class
+TestSchedulerQueueDeadLock.TableSharedProcedureWithId
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b5e107c3/testdevapidocs/org/apache/hadoop/hbase/procedure2/class-use/TestLockAndQueue.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/procedure2/class-use/TestLockAndQueue.html
 
b/testdevapidocs/org/apache/hadoop/hbase/procedure2/class-use/TestLockAndQueue.html
new file mode 100644
index 000..4838f1a
--- /dev/null
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/procedure2/class-use/TestLockAndQueue.html
@@ -0,0 +1,125 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+Uses of Class org.apache.hadoop.hbase.procedure2.TestLockAndQueue 
(Apache HBase 3.0.0-SNAPSHOT Test API)
+
+
+
+
+
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev
+Next
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+
+
+
+Uses of 
Classorg.apache.hadoop.hbase.procedure2.TestLockAndQueue
+
+No usage of 
org.apache.hadoop.hbase.procedure2.TestLockAndQueue
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev
+Next
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+
+
+Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+
+

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b5e107c3/testdevapidocs/org/apache/hadoop/hbase/procedure2/package-frame.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/procedure2/package-frame.html 
b/testdevapidocs/org/apache/hadoop/hbase/procedure2/package-frame.html
index 2e7f2ac..238aa0d 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/procedure2/package-frame.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/procedure2/package-frame.html
@@ -22,6 +22,7 @@
 TestChildProcedures.TestChildProcedure
 TestChildProcedures.TestProcEnv
 TestChildProcedures.TestRootProcedure
+TestLockAndQueue
 TestProcedureBypass
 TestProcedureBypass.RootProcedure
 TestProcedureBypass.StuckProcedure

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b5e107c3/testdevapidocs/org/apache/hadoop/hbase/procedure2/package-summary.html

[08/51] [partial] hbase-site git commit: Published site at 3fe8649b2c9ba1271c25e8f476548907e4c7a90d.

2018-10-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8f09a71d/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormat.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormat.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormat.html
index 30963fa..257263c 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormat.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormat.html
@@ -81,187 +81,198 @@
 073
 074  private ProcedureWALFormat() {}
 075
-076  public static void 
load(IteratorProcedureWALFile logs, ProcedureStoreTracker tracker,
-077  Loader loader) throws IOException 
{
-078ProcedureWALFormatReader reader = new 
ProcedureWALFormatReader(tracker, loader);
-079tracker.setKeepDeletes(true);
-080try {
-081  // Ignore the last log which is 
current active log.
-082  while (logs.hasNext()) {
-083ProcedureWALFile log = 
logs.next();
-084log.open();
-085try {
-086  reader.read(log);
-087} finally {
-088  log.close();
-089}
-090  }
-091  reader.finish();
-092
-093  // The tracker is now updated with 
all the procedures read from the logs
-094  if (tracker.isPartial()) {
-095tracker.setPartialFlag(false);
-096  }
-097  tracker.resetModified();
-098} finally {
-099  tracker.setKeepDeletes(false);
-100}
-101  }
-102
-103  public static void 
writeHeader(OutputStream stream, ProcedureWALHeader header)
-104  throws IOException {
-105header.writeDelimitedTo(stream);
-106  }
-107
-108  /*
-109   * +-+
-110   * | END OF WAL DATA | ---+
-111   * +-+ |
-112   * | | |
-113   * | Tracker | |
-114   * | | |
-115   * +-+ |
-116   * | version | |
-117   * +-+ |
-118   * |  TRAILER_MAGIC  | |
-119   * +-+ |
-120   * |  offset |-+
-121   * +-+
-122   */
-123  public static long 
writeTrailer(FSDataOutputStream stream, ProcedureStoreTracker tracker)
-124  throws IOException {
-125long offset = stream.getPos();
-126
-127// Write EOF Entry
-128ProcedureWALEntry.newBuilder()
-129  
.setType(ProcedureWALEntry.Type.PROCEDURE_WAL_EOF)
-130  
.build().writeDelimitedTo(stream);
-131
-132// Write Tracker
-133
tracker.toProto().writeDelimitedTo(stream);
-134
-135stream.write(TRAILER_VERSION);
-136StreamUtils.writeLong(stream, 
TRAILER_MAGIC);
-137StreamUtils.writeLong(stream, 
offset);
-138return stream.getPos() - offset;
-139  }
-140
-141  public static ProcedureWALHeader 
readHeader(InputStream stream)
-142  throws IOException {
-143ProcedureWALHeader header;
-144try {
-145  header = 
ProcedureWALHeader.parseDelimitedFrom(stream);
-146} catch 
(InvalidProtocolBufferException e) {
-147  throw new 
InvalidWALDataException(e);
-148}
-149
-150if (header == null) {
-151  throw new 
InvalidWALDataException("No data available to read the Header");
-152}
-153
-154if (header.getVersion()  0 || 
header.getVersion() != HEADER_VERSION) {
-155  throw new 
InvalidWALDataException("Invalid Header version. got " + header.getVersion() 
+
-156  " expected " + 
HEADER_VERSION);
-157}
-158
-159if (header.getType()  0 || 
header.getType()  LOG_TYPE_MAX_VALID) {
-160  throw new 
InvalidWALDataException("Invalid header type. got " + header.getType());
-161}
-162
-163return header;
-164  }
-165
-166  public static ProcedureWALTrailer 
readTrailer(FSDataInputStream stream, long startPos, long size)
-167  throws IOException {
-168// Beginning of the Trailer Jump. 17 
= 1 byte version + 8 byte magic + 8 byte offset
-169long trailerPos = size - 17;
-170
-171if (trailerPos  startPos) {
-172  throw new 
InvalidWALDataException("Missing trailer: size=" + size + " startPos=" + 
startPos);
-173}
-174
-175stream.seek(trailerPos);
-176int version = stream.read();
-177if (version != TRAILER_VERSION) {
-178  throw new 
InvalidWALDataException("Invalid Trailer version. got " + version +
-179  " expected " + 
TRAILER_VERSION);
-180}
+076  /**
+077   * Load all the procedures in these 
ProcedureWALFiles, and rebuild the given {@code tracker} if
+078   * needed, i.e, the {@code tracker} is 
a partial one.
+079   * p/
+080   * The method in the give {@code 
loader} will be called at the end after we load all the
+081   * procedures and construct the 
hierarchy.
+082   * p/
+083   * And we will call the {@link 
ProcedureStoreTracker#resetModified()} method for the given
+084   * {@code tracker} before returning, as 
it will 

[08/51] [partial] hbase-site git commit: Published site at 7adf590106826b9e4432cfeee06acdc0ccff8c6e.

2018-10-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/425db230/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestHRegion.FlushThread.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestHRegion.FlushThread.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestHRegion.FlushThread.html
index ed3db7a..156dabb 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestHRegion.FlushThread.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestHRegion.FlushThread.html
@@ -5542,785 +5542,825 @@
 5534  }
 5535
 5536  @Test
-5537  public void testWriteRequestsCounter() 
throws IOException {
-5538byte[] fam = 
Bytes.toBytes("info");
-5539byte[][] families = { fam };
-5540this.region = initHRegion(tableName, 
method, CONF, families);
+5537  public void 
testReverseScanWhenPutCellsAfterOpenReverseScan() throws Exception {
+5538byte[] cf1 = Bytes.toBytes("CF1");
+5539byte[][] families = { cf1 };
+5540byte[] col = Bytes.toBytes("C");
 5541
-5542Assert.assertEquals(0L, 
region.getWriteRequestsCount());
-5543
-5544Put put = new Put(row);
-5545put.addColumn(fam, fam, fam);
-5546
-5547Assert.assertEquals(0L, 
region.getWriteRequestsCount());
-5548region.put(put);
-5549Assert.assertEquals(1L, 
region.getWriteRequestsCount());
-5550region.put(put);
-5551Assert.assertEquals(2L, 
region.getWriteRequestsCount());
-5552region.put(put);
-5553Assert.assertEquals(3L, 
region.getWriteRequestsCount());
-5554
-region.delete(new Delete(row));
-5556Assert.assertEquals(4L, 
region.getWriteRequestsCount());
-5557  }
-5558
-5559  @Test
-5560  public void 
testOpenRegionWrittenToWAL() throws Exception {
-5561final ServerName serverName = 
ServerName.valueOf(name.getMethodName(), 100, 42);
-5562final RegionServerServices rss = 
spy(TEST_UTIL.createMockRegionServerService(serverName));
-5563
-5564HTableDescriptor htd = new 
HTableDescriptor(TableName.valueOf(name.getMethodName()));
-5565htd.addFamily(new 
HColumnDescriptor(fam1));
-5566htd.addFamily(new 
HColumnDescriptor(fam2));
-5567
-5568HRegionInfo hri = new 
HRegionInfo(htd.getTableName(),
-5569  HConstants.EMPTY_BYTE_ARRAY, 
HConstants.EMPTY_BYTE_ARRAY);
-5570
-5571// open the region w/o rss and wal 
and flush some files
-5572region =
-5573 
HBaseTestingUtility.createRegionAndWAL(hri, TEST_UTIL.getDataTestDir(), 
TEST_UTIL
-5574 .getConfiguration(), 
htd);
-5575assertNotNull(region);
-5576
-5577// create a file in fam1 for the 
region before opening in OpenRegionHandler
-5578region.put(new 
Put(Bytes.toBytes("a")).addColumn(fam1, fam1, fam1));
-5579region.flush(true);
-5580
HBaseTestingUtility.closeRegionAndWAL(region);
+5542HBaseConfiguration conf = new 
HBaseConfiguration();
+5543this.region = initHRegion(tableName, 
method, conf, families);
+5544
+5545Put put = new 
Put(Bytes.toBytes("16"));
+5546put.addColumn(cf1, col, 
Bytes.toBytes("val"));
+5547region.put(put);
+5548Put put2 = new 
Put(Bytes.toBytes("15"));
+5549put2.addColumn(cf1, col, 
Bytes.toBytes("val"));
+5550region.put(put2);
+5551
+5552// Create a reverse scan
+5553Scan scan = new 
Scan(Bytes.toBytes("16"));
+5554scan.setReversed(true);
+RegionScannerImpl scanner = 
region.getScanner(scan);
+5556
+5557// Put a lot of cells that have 
sequenceIDs grater than the readPt of the reverse scan
+5558for (int i = 10; i  20; 
i++) {
+5559  Put p = new Put(Bytes.toBytes("" + 
i));
+5560  p.addColumn(cf1, col, 
Bytes.toBytes("" + i));
+5561  region.put(p);
+5562}
+5563ListCell currRow = new 
ArrayList();
+5564boolean hasNext;
+5565do {
+5566  hasNext = scanner.next(currRow);
+5567} while (hasNext);
+5568
+5569assertEquals(2, currRow.size());
+5570assertEquals("16", 
Bytes.toString(currRow.get(0).getRowArray(),
+5571  currRow.get(0).getRowOffset(), 
currRow.get(0).getRowLength()));
+5572assertEquals("15", 
Bytes.toString(currRow.get(1).getRowArray(),
+5573  currRow.get(1).getRowOffset(), 
currRow.get(1).getRowLength()));
+5574  }
+5575
+5576  @Test
+5577  public void testWriteRequestsCounter() 
throws IOException {
+5578byte[] fam = 
Bytes.toBytes("info");
+5579byte[][] families = { fam };
+5580this.region = initHRegion(tableName, 
method, CONF, families);
 5581
-5582ArgumentCaptorWALEdit 
editCaptor = ArgumentCaptor.forClass(WALEdit.class);
+5582Assert.assertEquals(0L, 
region.getWriteRequestsCount());
 5583
-5584// capture append() calls
-5585WAL wal = mockWAL();
-5586when(rss.getWAL((HRegionInfo) 
any())).thenReturn(wal);
-5587
-5588region = HRegion.openHRegion(hri, 
htd, rss.getWAL(hri),
-5589  TEST_UTIL.getConfiguration(), 

[08/51] [partial] hbase-site git commit: Published site at 5fbb227deb365fe812d433fe39b85ac4b0ddee20.

2018-10-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9ebe686/devapidocs/org/apache/hadoop/hbase/util/ByteBufferUtils.Converter.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/util/ByteBufferUtils.Converter.html 
b/devapidocs/org/apache/hadoop/hbase/util/ByteBufferUtils.Converter.html
new file mode 100644
index 000..61475fa
--- /dev/null
+++ b/devapidocs/org/apache/hadoop/hbase/util/ByteBufferUtils.Converter.html
@@ -0,0 +1,421 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+ByteBufferUtils.Converter (Apache HBase 3.0.0-SNAPSHOT API)
+
+
+
+
+
+var methods = 
{"i0":6,"i1":6,"i2":6,"i3":6,"i4":6,"i5":6,"i6":6,"i7":6,"i8":6,"i9":6};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],4:["t3","Abstract Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase.util
+Class 
ByteBufferUtils.Converter
+
+
+
+https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.util.ByteBufferUtils.Converter
+
+
+
+
+
+
+
+Direct Known Subclasses:
+ByteBufferUtils.ConverterHolder.PureJavaConverter,
 ByteBufferUtils.ConverterHolder.UnsafeConverter
+
+
+Enclosing class:
+ByteBufferUtils
+
+
+
+abstract static class ByteBufferUtils.Converter
+extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+
+
+
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors
+
+Constructor and Description
+
+
+Converter()
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All MethodsInstance MethodsAbstract Methods
+
+Modifier and Type
+Method and Description
+
+
+(package private) abstract void
+putInt(https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBufferbuffer,
+  intval)
+
+
+(package private) abstract int
+putInt(https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBufferbuffer,
+  intindex,
+  intval)
+
+
+(package private) abstract int
+putLong(https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBufferbuffer,
+   intindex,
+   longval)
+
+
+(package private) abstract void
+putLong(https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBufferbuffer,
+   longval)
+
+
+(package private) abstract int
+putShort(https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBufferbuffer,
+intindex,
+shortval)
+
+
+(package private) abstract void
+putShort(https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBufferbuffer,
+shortval)
+
+
+(package private) abstract int
+toInt(https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in 
java.nio">ByteBufferbuffer)
+
+
+(package private) abstract int
+toInt(https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBufferbuffer,
+ intoffset)
+
+
+(package private) abstract long
+toLong(https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBufferbuffer,
+  intoffset)
+
+
+(package private) abstract short
+toShort(https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBufferbuffer,
+   intoffset)
+
+
+
+
+
+
+Methods inherited from classjava.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--;
 title="class or interface in 

[08/51] [partial] hbase-site git commit: Published site at 821e4d7de2d576189f4288d1c2acf9e9a9471f5c.

2018-10-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/323b17d9/testdevapidocs/src-html/org/apache/hadoop/hbase/master/TestDeadServer.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/TestDeadServer.html 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/TestDeadServer.html
index 11d5ba1..35c9eee 100644
--- a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/TestDeadServer.html
+++ b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/TestDeadServer.html
@@ -131,60 +131,62 @@
 123
 124DeadServer d = new DeadServer();
 125
-126
-127d.add(hostname123);
-128mee.incValue(1);
-129d.add(hostname1234);
-130mee.incValue(1);
-131d.add(hostname12345);
-132
-133ListPairServerName, 
Long copy = d.copyDeadServersSince(2L);
-134Assert.assertEquals(2, 
copy.size());
-135
-136Assert.assertEquals(hostname1234, 
copy.get(0).getFirst());
-137Assert.assertEquals(new Long(2L), 
copy.get(0).getSecond());
-138
-139Assert.assertEquals(hostname12345, 
copy.get(1).getFirst());
-140Assert.assertEquals(new Long(3L), 
copy.get(1).getSecond());
-141
-142EnvironmentEdgeManager.reset();
-143  }
-144
-145  @Test
-146  public void testClean(){
-147DeadServer d = new DeadServer();
-148d.add(hostname123);
-149
-150
d.cleanPreviousInstance(hostname12345);
-151Assert.assertFalse(d.isEmpty());
-152
-153
d.cleanPreviousInstance(hostname1234);
-154Assert.assertFalse(d.isEmpty());
-155
-156
d.cleanPreviousInstance(hostname123_2);
-157Assert.assertTrue(d.isEmpty());
-158  }
-159
-160  @Test
-161  public void testClearDeadServer(){
-162DeadServer d = new DeadServer();
-163d.add(hostname123);
-164d.add(hostname1234);
-165Assert.assertEquals(2, d.size());
-166
+126d.add(hostname123);
+127mee.incValue(1);
+128d.add(hostname1234);
+129mee.incValue(1);
+130d.add(hostname12345);
+131
+132ListPairServerName, 
Long copy = d.copyDeadServersSince(2L);
+133Assert.assertEquals(2, 
copy.size());
+134
+135Assert.assertEquals(hostname1234, 
copy.get(0).getFirst());
+136Assert.assertEquals(new Long(2L), 
copy.get(0).getSecond());
+137
+138Assert.assertEquals(hostname12345, 
copy.get(1).getFirst());
+139Assert.assertEquals(new Long(3L), 
copy.get(1).getSecond());
+140
+141EnvironmentEdgeManager.reset();
+142  }
+143
+144  @Test
+145  public void testClean(){
+146DeadServer d = new DeadServer();
+147d.add(hostname123);
+148
+149
d.cleanPreviousInstance(hostname12345);
+150Assert.assertFalse(d.isEmpty());
+151
+152
d.cleanPreviousInstance(hostname1234);
+153Assert.assertFalse(d.isEmpty());
+154
+155
d.cleanPreviousInstance(hostname123_2);
+156Assert.assertTrue(d.isEmpty());
+157  }
+158
+159  @Test
+160  public void testClearDeadServer(){
+161DeadServer d = new DeadServer();
+162d.add(hostname123);
+163d.add(hostname1234);
+164Assert.assertEquals(2, d.size());
+165
+166d.finish(hostname123);
 167d.removeDeadServer(hostname123);
 168Assert.assertEquals(1, d.size());
-169d.removeDeadServer(hostname1234);
-170Assert.assertTrue(d.isEmpty());
-171
-172d.add(hostname1234);
-173
Assert.assertFalse(d.removeDeadServer(hostname123_2));
-174Assert.assertEquals(1, d.size());
-175
Assert.assertTrue(d.removeDeadServer(hostname1234));
-176Assert.assertTrue(d.isEmpty());
-177  }
-178}
-179
+169d.finish(hostname1234);
+170d.removeDeadServer(hostname1234);
+171Assert.assertTrue(d.isEmpty());
+172
+173d.add(hostname1234);
+174
Assert.assertFalse(d.removeDeadServer(hostname123_2));
+175Assert.assertEquals(1, d.size());
+176d.finish(hostname1234);
+177
Assert.assertTrue(d.removeDeadServer(hostname1234));
+178Assert.assertTrue(d.isEmpty());
+179  }
+180}
+181
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/323b17d9/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestMergeTableRegionsProcedure.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestMergeTableRegionsProcedure.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestMergeTableRegionsProcedure.html
index 72719d2..c3ecaef 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestMergeTableRegionsProcedure.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestMergeTableRegionsProcedure.html
@@ -32,20 +32,20 @@
 024import 
org.apache.hadoop.conf.Configuration;
 025import 
org.apache.hadoop.hbase.HBaseClassTestRule;
 026import 
org.apache.hadoop.hbase.HBaseTestingUtility;
-027import 
org.apache.hadoop.hbase.HConstants;
-028import 
org.apache.hadoop.hbase.MetaTableAccessor;
-029import 
org.apache.hadoop.hbase.TableName;
-030import 

[08/51] [partial] hbase-site git commit: Published site at fa5fa6ecdd071b72b58971058ff3ab9d28c3e709.

2018-10-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d1341859/devapidocs/src-html/org/apache/hadoop/hbase/wal/RegionGroupingProvider.IdentityGroupingStrategy.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/RegionGroupingProvider.IdentityGroupingStrategy.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/RegionGroupingProvider.IdentityGroupingStrategy.html
index 2142742..be2a512 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/RegionGroupingProvider.IdentityGroupingStrategy.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/RegionGroupingProvider.IdentityGroupingStrategy.html
@@ -129,174 +129,171 @@
 121
 122  /** delegate provider for WAL 
creation/roll/close */
 123  public static final String 
DELEGATE_PROVIDER = "hbase.wal.regiongrouping.delegate.provider";
-124  public static final String 
DELEGATE_PROVIDER_CLASS =
-125
"hbase.wal.regiongrouping.delegate.provider.class";
-126  public static final String 
DEFAULT_DELEGATE_PROVIDER = WALFactory.Providers.defaultProvider
-127  .name();
+124  public static final String 
DEFAULT_DELEGATE_PROVIDER = WALFactory.Providers.defaultProvider
+125  .name();
+126
+127  private static final String 
META_WAL_GROUP_NAME = "meta";
 128
-129  private static final String 
META_WAL_GROUP_NAME = "meta";
-130
-131  /** A group-provider mapping, make sure 
one-one rather than many-one mapping */
-132  private final ConcurrentMapString, 
WALProvider cached = new ConcurrentHashMap();
+129  /** A group-provider mapping, make sure 
one-one rather than many-one mapping */
+130  private final ConcurrentMapString, 
WALProvider cached = new ConcurrentHashMap();
+131
+132  private final KeyLockerString 
createLock = new KeyLocker();
 133
-134  private final KeyLockerString 
createLock = new KeyLocker();
-135
-136  private RegionGroupingStrategy 
strategy;
-137  private WALFactory factory;
-138  private Configuration conf;
-139  private ListWALActionsListener 
listeners = new ArrayList();
-140  private String providerId;
-141  private Class? extends 
WALProvider providerClass;
-142
-143  @Override
-144  public void init(WALFactory factory, 
Configuration conf, String providerId) throws IOException {
-145if (null != strategy) {
-146  throw new 
IllegalStateException("WALProvider.init should only be called once.");
-147}
-148this.conf = conf;
-149this.factory = factory;
-150StringBuilder sb = new 
StringBuilder().append(factory.factoryId);
-151if (providerId != null) {
-152  if 
(providerId.startsWith(WAL_FILE_NAME_DELIMITER)) {
-153sb.append(providerId);
-154  } else {
-155
sb.append(WAL_FILE_NAME_DELIMITER).append(providerId);
-156  }
-157}
-158this.providerId = sb.toString();
-159this.strategy = getStrategy(conf, 
REGION_GROUPING_STRATEGY, DEFAULT_REGION_GROUPING_STRATEGY);
-160this.providerClass = 
factory.getProviderClass(DELEGATE_PROVIDER_CLASS, DELEGATE_PROVIDER,
-161DEFAULT_DELEGATE_PROVIDER);
-162  }
-163
-164  private WALProvider 
createProvider(String group) throws IOException {
-165WALProvider provider = 
WALFactory.createProvider(providerClass);
-166provider.init(factory, conf,
-167  
META_WAL_PROVIDER_ID.equals(providerId) ? META_WAL_PROVIDER_ID : group);
-168provider.addWALActionsListener(new 
MetricsWAL());
-169return provider;
-170  }
-171
-172  @Override
-173  public ListWAL getWALs() {
-174return 
cached.values().stream().flatMap(p - 
p.getWALs().stream()).collect(Collectors.toList());
-175  }
-176
-177  private WAL getWAL(String group) throws 
IOException {
-178WALProvider provider = 
cached.get(group);
-179if (provider == null) {
-180  Lock lock = 
createLock.acquireLock(group);
-181  try {
-182provider = cached.get(group);
-183if (provider == null) {
-184  provider = 
createProvider(group);
-185  
listeners.forEach(provider::addWALActionsListener);
-186  cached.put(group, provider);
-187}
-188  } finally {
-189lock.unlock();
-190  }
-191}
-192return provider.getWAL(null);
-193  }
-194
-195  @Override
-196  public WAL getWAL(RegionInfo region) 
throws IOException {
-197String group;
-198if 
(META_WAL_PROVIDER_ID.equals(this.providerId)) {
-199  group = META_WAL_GROUP_NAME;
-200} else {
-201  byte[] id;
-202  byte[] namespace;
-203  if (region != null) {
-204id = 
region.getEncodedNameAsBytes();
-205namespace = 
region.getTable().getNamespace();
-206  } else {
-207id = 
HConstants.EMPTY_BYTE_ARRAY;
-208namespace = null;
-209  }
-210  group = strategy.group(id, 
namespace);
-211}
-212return getWAL(group);
-213  }
-214
-215  @Override
-216  public void shutdown() throws 
IOException {
-217// save the last exception and 
rethrow
-218IOException failure = 

[08/51] [partial] hbase-site git commit: Published site at 6bc7089f9e0793efc9bdd46a84f5ccd9bc4579ad.

2018-09-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/419d0338/testdevapidocs/org/apache/hadoop/hbase/master/replication/class-use/TestModifyPeerProcedureRetryBackoff.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/master/replication/class-use/TestModifyPeerProcedureRetryBackoff.html
 
b/testdevapidocs/org/apache/hadoop/hbase/master/replication/class-use/TestModifyPeerProcedureRetryBackoff.html
new file mode 100644
index 000..bb498f0
--- /dev/null
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/master/replication/class-use/TestModifyPeerProcedureRetryBackoff.html
@@ -0,0 +1,125 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+Uses of Class 
org.apache.hadoop.hbase.master.replication.TestModifyPeerProcedureRetryBackoff 
(Apache HBase 3.0.0-SNAPSHOT Test API)
+
+
+
+
+
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev
+Next
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+
+
+
+Uses of 
Classorg.apache.hadoop.hbase.master.replication.TestModifyPeerProcedureRetryBackoff
+
+No usage of 
org.apache.hadoop.hbase.master.replication.TestModifyPeerProcedureRetryBackoff
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev
+Next
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+
+
+Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+
+

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/419d0338/testdevapidocs/org/apache/hadoop/hbase/master/replication/package-frame.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/master/replication/package-frame.html 
b/testdevapidocs/org/apache/hadoop/hbase/master/replication/package-frame.html
new file mode 100644
index 000..7ac3a9a
--- /dev/null
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/master/replication/package-frame.html
@@ -0,0 +1,21 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+org.apache.hadoop.hbase.master.replication (Apache HBase 3.0.0-SNAPSHOT 
Test API)
+
+
+
+
+org.apache.hadoop.hbase.master.replication
+
+Classes
+
+TestModifyPeerProcedureRetryBackoff
+TestModifyPeerProcedureRetryBackoff.TestModifyPeerProcedure
+
+
+
+

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/419d0338/testdevapidocs/org/apache/hadoop/hbase/master/replication/package-summary.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/master/replication/package-summary.html
 
b/testdevapidocs/org/apache/hadoop/hbase/master/replication/package-summary.html
new file mode 100644
index 000..f735dab
--- /dev/null
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/master/replication/package-summary.html
@@ -0,0 +1,147 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+org.apache.hadoop.hbase.master.replication (Apache HBase 3.0.0-SNAPSHOT 
Test API)
+
+
+
+
+
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevPackage
+NextPackage
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+
+
+
+Packageorg.apache.hadoop.hbase.master.replication
+
+
+
+
+
+Class Summary
+
+Class
+Description
+
+
+
+TestModifyPeerProcedureRetryBackoff
+
+
+
+TestModifyPeerProcedureRetryBackoff.TestModifyPeerProcedure
+
+
+
+
+
+
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevPackage
+NextPackage
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -110,7 +110,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class ProcedureExecutorTEnvironment
+public class ProcedureExecutorTEnvironment
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 Thread Pool that executes the submitted procedures.
  The executor has a ProcedureStore associated.
@@ -429,200 +429,206 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 getActiveExecutorCount()
 
 
+https://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">CollectionProcedureTEnvironment
+getActiveProceduresNoCopy()
+Should only be used when starting up, where the procedure 
workers have not been started.
+
+
+
 https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">Sethttps://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true;
 title="class or interface in java.lang">Long
 getActiveProcIds()
 
-
+
 int
 getCorePoolSize()
 
-
+
 TEnvironment
 getEnvironment()
 
-
+
 long
 getKeepAliveTime(https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true;
 title="class or interface in 
java.util.concurrent">TimeUnittimeUnit)
 
-
+
 protected long
 getLastProcId()
 
-
+
 T extends ProcedureTEnvironmentT
 getProcedure(https://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true;
 title="class or interface in java.lang">ClassTclazz,
 longprocId)
 
-
+
 ProcedureTEnvironment
 getProcedure(longprocId)
 
-
+
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListProcedureTEnvironment
 getProcedures()
 Get procedures.
 
 
-
+
 (package private) RootProcedureStateTEnvironment
 getProcStack(longrootProcId)
 
-
+
 ProcedureTEnvironment
 getResult(longprocId)
 
-
+
 ProcedureTEnvironment
 getResultOrProcedure(longprocId)
 
-
+
 (package private) https://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true;
 title="class or interface in java.lang">Long
 getRootProcedureId(ProcedureTEnvironmentproc)
 
-
+
 (package private) ProcedureScheduler
 getScheduler()
 
-
+
 ProcedureStore
 getStore()
 
-
+
 int
 getWorkerThreadCount()
 
-
+
 private void
 handleInterruptedException(ProcedureTEnvironmentproc,
   https://docs.oracle.com/javase/8/docs/api/java/lang/InterruptedException.html?is-external=true;
 title="class or interface in 
java.lang">InterruptedExceptione)
 
-
+
 void
 init(intnumThreads,
 booleanabortOnCorruption)
 Initialize the procedure executor, but do not start 
workers.
 
 
-
+
 private ProcedureTEnvironment[]
 initializeChildren(RootProcedureStateTEnvironmentprocStack,
   ProcedureTEnvironmentprocedure,
   ProcedureTEnvironment[]subprocs)
 
-
+
 boolean
 isFinished(longprocId)
 Return true 

[08/51] [partial] hbase-site git commit: Published site at 092efb42749bf7fc6ad338c96aae8e7b9d3a2c74.

2018-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.SinkWriter.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.SinkWriter.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.SinkWriter.html
index 63e4b46..514f830 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.SinkWriter.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.SinkWriter.html
@@ -468,15 +468,15 @@
 460   * creating it if necessary.
 461   * @param logEntry
 462   * @param fileNameBeingSplit the file 
being split currently. Used to generate tmp file name.
-463   * @param conf
-464   * @return Path to file into which to 
dump split log edits.
-465   * @throws IOException
-466   */
-467  @SuppressWarnings("deprecation")
-468  @VisibleForTesting
-469  static Path 
getRegionSplitEditsPath(final Entry logEntry, String fileNameBeingSplit,
-470  Configuration conf)
-471  throws IOException {
+463   * @param tmpDirName of the directory 
used to sideline old recovered edits file
+464   * @param conf
+465   * @return Path to file into which to 
dump split log edits.
+466   * @throws IOException
+467   */
+468  @SuppressWarnings("deprecation")
+469  @VisibleForTesting
+470  static Path 
getRegionSplitEditsPath(final Entry logEntry, String fileNameBeingSplit,
+471  String tmpDirName, Configuration 
conf) throws IOException {
 472FileSystem fs = 
FileSystem.get(conf);
 473Path rootDir = 
FSUtils.getRootDir(conf);
 474Path tableDir = 
FSUtils.getTableDir(rootDir, logEntry.getKey().getTableName());
@@ -491,7 +491,7 @@
 483  return null;
 484}
 485if (fs.exists(dir)  
fs.isFile(dir)) {
-486  Path tmp = new Path("/tmp");
+486  Path tmp = new Path(tmpDirName);
 487  if (!fs.exists(tmp)) {
 488fs.mkdirs(tmp);
 489  }
@@ -1520,411 +1520,413 @@
 1512 * @return a path with a write for 
that path. caller should close.
 1513 */
 1514WriterAndPath createWAP(byte[] 
region, Entry entry) throws IOException {
-1515  Path regionedits = 
getRegionSplitEditsPath(entry,
-1516  
fileBeingSplit.getPath().getName(), conf);
-1517  if (regionedits == null) {
-1518return null;
-1519  }
-1520  FileSystem rootFs = 
FileSystem.get(conf);
-1521  if (rootFs.exists(regionedits)) 
{
-1522LOG.warn("Found old edits file. 
It could be the "
-1523+ "result of a previous 
failed split attempt. Deleting " + regionedits + ", length="
-1524+ 
rootFs.getFileStatus(regionedits).getLen());
-1525if (!rootFs.delete(regionedits, 
false)) {
-1526  LOG.warn("Failed delete of old 
{}", regionedits);
-1527}
-1528  }
-1529  Writer w = 
createWriter(regionedits);
-1530  LOG.debug("Creating writer 
path={}", regionedits);
-1531  return new 
WriterAndPath(regionedits, w, entry.getKey().getSequenceId());
-1532}
-1533
-1534void filterCellByStore(Entry 
logEntry) {
-1535  Mapbyte[], Long 
maxSeqIdInStores =
-1536  
regionMaxSeqIdInStores.get(Bytes.toString(logEntry.getKey().getEncodedRegionName()));
-1537  if 
(MapUtils.isEmpty(maxSeqIdInStores)) {
-1538return;
-1539  }
-1540  // Create the array list for the 
cells that aren't filtered.
-1541  // We make the assumption that 
most cells will be kept.
-1542  ArrayListCell keptCells = 
new ArrayList(logEntry.getEdit().getCells().size());
-1543  for (Cell cell : 
logEntry.getEdit().getCells()) {
-1544if 
(CellUtil.matchingFamily(cell, WALEdit.METAFAMILY)) {
-1545  keptCells.add(cell);
-1546} else {
-1547  byte[] family = 
CellUtil.cloneFamily(cell);
-1548  Long maxSeqId = 
maxSeqIdInStores.get(family);
-1549  // Do not skip cell even if 
maxSeqId is null. Maybe we are in a rolling upgrade,
-1550  // or the master was crashed 
before and we can not get the information.
-1551  if (maxSeqId == null || 
maxSeqId.longValue()  logEntry.getKey().getSequenceId()) {
-1552keptCells.add(cell);
-1553  }
-1554}
-1555  }
-1556
-1557  // Anything in the keptCells array 
list is still live.
-1558  // So rather than removing the 
cells from the array list
-1559  // which would be an O(n^2) 
operation, we just replace the list
-1560  
logEntry.getEdit().setCells(keptCells);
-1561}
-1562
-1563@Override
-1564public void append(RegionEntryBuffer 
buffer) throws IOException {
-1565  appendBuffer(buffer, true);
-1566}
-1567
-1568WriterAndPath 
appendBuffer(RegionEntryBuffer buffer, boolean reusable) throws IOException{
-1569  ListEntry entries = 
buffer.entryBuffer;
-1570  if (entries.isEmpty()) {
-1571LOG.warn("got an empty buffer, 
skipping");
-1572return null;
-1573  }

[08/51] [partial] hbase-site git commit: Published site at 613d831429960348dc42c3bdb6ea5d31be15c81c.

2018-08-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7cf6034b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALFactory.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALFactory.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALFactory.html
index d2d8da1..5bbbf0c 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALFactory.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALFactory.html
@@ -90,391 +90,392 @@
 082  static final String 
DEFAULT_WAL_PROVIDER = Providers.defaultProvider.name();
 083
 084  public static final String 
META_WAL_PROVIDER = "hbase.wal.meta_provider";
-085  static final String 
DEFAULT_META_WAL_PROVIDER = Providers.defaultProvider.name();
-086
-087  final String factoryId;
-088  private final WALProvider provider;
-089  // The meta updates are written to a 
different wal. If this
-090  // regionserver holds meta regions, 
then this ref will be non-null.
-091  // lazily intialized; most 
RegionServers don't deal with META
-092  private final 
AtomicReferenceWALProvider metaProvider = new 
AtomicReference();
-093
-094  /**
-095   * Configuration-specified WAL Reader 
used when a custom reader is requested
-096   */
-097  private final Class? extends 
AbstractFSWALProvider.Reader logReaderClass;
-098
-099  /**
-100   * How long to attempt opening 
in-recovery wals
-101   */
-102  private final int timeoutMillis;
-103
-104  private final Configuration conf;
-105
-106  // Used for the singleton WALFactory, 
see below.
-107  private WALFactory(Configuration conf) 
{
-108// this code is duplicated here so we 
can keep our members final.
-109// until we've moved reader/writer 
construction down into providers, this initialization must
-110// happen prior to provider 
initialization, in case they need to instantiate a reader/writer.
-111timeoutMillis = 
conf.getInt("hbase.hlog.open.timeout", 30);
-112/* TODO Both of these are probably 
specific to the fs wal provider */
-113logReaderClass = 
conf.getClass("hbase.regionserver.hlog.reader.impl", ProtobufLogReader.class,
-114  
AbstractFSWALProvider.Reader.class);
-115this.conf = conf;
-116// end required early 
initialization
-117
-118// this instance can't create wals, 
just reader/writers.
-119provider = null;
-120factoryId = SINGLETON_ID;
-121  }
-122
-123  @VisibleForTesting
-124  public Class? extends 
WALProvider getProviderClass(String key, String defaultValue) {
-125try {
-126  Providers provider = 
Providers.valueOf(conf.get(key, defaultValue));
-127  if (provider != 
Providers.defaultProvider) {
-128// User gives a wal provider 
explicitly, just use that one
-129return provider.clazz;
-130  }
-131  // AsyncFSWAL has better 
performance in most cases, and also uses less resources, we will try
-132  // to use it if possible. But it 
deeply hacks into the internal of DFSClient so will be easily
-133  // broken when upgrading hadoop. If 
it is broken, then we fall back to use FSHLog.
-134  if (AsyncFSWALProvider.load()) {
-135return 
AsyncFSWALProvider.class;
-136  } else {
-137return FSHLogProvider.class;
-138  }
-139} catch (IllegalArgumentException 
exception) {
-140  // Fall back to them specifying a 
class name
-141  // Note that the passed default 
class shouldn't actually be used, since the above only fails
-142  // when there is a config value 
present.
-143  return conf.getClass(key, 
Providers.defaultProvider.clazz, WALProvider.class);
-144}
-145  }
-146
-147  static WALProvider 
createProvider(Class? extends WALProvider clazz) throws IOException {
-148LOG.info("Instantiating WALProvider 
of type {}", clazz);
-149try {
-150  return 
clazz.getDeclaredConstructor().newInstance();
-151} catch (Exception e) {
-152  LOG.error("couldn't set up 
WALProvider, the configured class is " + clazz);
-153  LOG.debug("Exception details for 
failure to load WALProvider.", e);
-154  throw new IOException("couldn't set 
up WALProvider", e);
-155}
-156  }
-157
-158  /**
-159   * @param conf must not be null, will 
keep a reference to read params in later reader/writer
-160   *  instances.
-161   * @param factoryId a unique identifier 
for this factory. used i.e. by filesystem implementations
-162   *  to make a directory
-163   */
-164  public WALFactory(Configuration conf, 
String factoryId) throws IOException {
-165// default 
enableSyncReplicationWALProvider is true, only disable 
SyncReplicationWALProvider
-166// for HMaster or HRegionServer which 
take system table only. See HBASE-1
-167this(conf, factoryId, true);
-168  }
-169
-170  /**
-171   * @param conf must not be null, will 
keep a reference to read params in later reader/writer
-172   *  instances.
-173   * @param factoryId a unique identifier 

[08/51] [partial] hbase-site git commit: Published site at ba5d1c1f28301adc99019d9d6c4a04fac98ae511.

2018-07-25 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/804782f0/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
index 95f2a65..073d0d0 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
@@ -931,7 +931,7 @@
 923InitMetaProcedure initMetaProc = 
null;
 924if 
(assignmentManager.getRegionStates().getRegionState(RegionInfoBuilder.FIRST_META_REGIONINFO)
 925  .isOffline()) {
-926  OptionalProcedure? 
optProc = procedureExecutor.getProcedures().stream()
+926  
OptionalProcedureMasterProcedureEnv optProc = 
procedureExecutor.getProcedures().stream()
 927.filter(p - p instanceof 
InitMetaProcedure).findAny();
 928  if (optProc.isPresent()) {
 929initMetaProc = 
(InitMetaProcedure) optProc.get();
@@ -3210,566 +3210,567 @@
 3202  cpHost.preGetProcedures();
 3203}
 3204
-3205final ListProcedure? 
procList = this.procedureExecutor.getProcedures();
-3206
-3207if (cpHost != null) {
-3208  
cpHost.postGetProcedures(procList);
-3209}
-3210
-3211return procList;
-3212  }
-3213
-3214  @Override
-3215  public ListLockedResource 
getLocks() throws IOException {
-3216if (cpHost != null) {
-3217  cpHost.preGetLocks();
-3218}
-3219
-3220MasterProcedureScheduler 
procedureScheduler =
-3221  
procedureExecutor.getEnvironment().getProcedureScheduler();
-3222
-3223final ListLockedResource 
lockedResources = procedureScheduler.getLocks();
-3224
-3225if (cpHost != null) {
-3226  
cpHost.postGetLocks(lockedResources);
-3227}
-3228
-3229return lockedResources;
-3230  }
-3231
-3232  /**
-3233   * Returns the list of table 
descriptors that match the specified request
-3234   * @param namespace the namespace to 
query, or null if querying for all
-3235   * @param regex The regular expression 
to match against, or null if querying for all
-3236   * @param tableNameList the list of 
table names, or null if querying for all
-3237   * @param includeSysTables False to 
match only against userspace tables
-3238   * @return the list of table 
descriptors
-3239   */
-3240  public ListTableDescriptor 
listTableDescriptors(final String namespace, final String regex,
-3241  final ListTableName 
tableNameList, final boolean includeSysTables)
-3242  throws IOException {
-3243ListTableDescriptor htds = 
new ArrayList();
-3244if (cpHost != null) {
-3245  
cpHost.preGetTableDescriptors(tableNameList, htds, regex);
-3246}
-3247htds = getTableDescriptors(htds, 
namespace, regex, tableNameList, includeSysTables);
-3248if (cpHost != null) {
-3249  
cpHost.postGetTableDescriptors(tableNameList, htds, regex);
-3250}
-3251return htds;
-3252  }
-3253
-3254  /**
-3255   * Returns the list of table names 
that match the specified request
-3256   * @param regex The regular expression 
to match against, or null if querying for all
-3257   * @param namespace the namespace to 
query, or null if querying for all
-3258   * @param includeSysTables False to 
match only against userspace tables
-3259   * @return the list of table names
-3260   */
-3261  public ListTableName 
listTableNames(final String namespace, final String regex,
-3262  final boolean includeSysTables) 
throws IOException {
-3263ListTableDescriptor htds = 
new ArrayList();
-3264if (cpHost != null) {
-3265  cpHost.preGetTableNames(htds, 
regex);
-3266}
-3267htds = getTableDescriptors(htds, 
namespace, regex, null, includeSysTables);
-3268if (cpHost != null) {
-3269  cpHost.postGetTableNames(htds, 
regex);
-3270}
-3271ListTableName result = new 
ArrayList(htds.size());
-3272for (TableDescriptor htd: htds) 
result.add(htd.getTableName());
-3273return result;
-3274  }
-3275
-3276  /**
-3277   * @return list of table table 
descriptors after filtering by regex and whether to include system
-3278   *tables, etc.
-3279   * @throws IOException
-3280   */
-3281  private ListTableDescriptor 
getTableDescriptors(final ListTableDescriptor htds,
-3282  final String namespace, final 
String regex, final ListTableName tableNameList,
-3283  final boolean includeSysTables)
-3284  throws IOException {
-3285if (tableNameList == null || 
tableNameList.isEmpty()) {
-3286  // request for all 
TableDescriptors
-3287  CollectionTableDescriptor 
allHtds;
-3288  if (namespace != null  
namespace.length()  0) {
-3289// Do a check on the namespace 
existence. Will fail if does not exist.
-3290
this.clusterSchemaService.getNamespace(namespace);
-3291allHtds = 

[08/51] [partial] hbase-site git commit: Published site at b4759ce6e72f50ccd9d410bd5917dc5a515414f1.

2018-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.TableSkewCostFunction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.TableSkewCostFunction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.TableSkewCostFunction.html
index 233dba3..91b9055 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.TableSkewCostFunction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.TableSkewCostFunction.html
@@ -540,1205 +540,1204 @@
 532  
sm.getRegionMetrics().forEach((byte[] regionName, RegionMetrics rm) - {
 533DequeBalancerRegionLoad 
rLoads = oldLoads.get(Bytes.toString(regionName));
 534if (rLoads == null) {
-535  // There was nothing there
-536  rLoads = new 
ArrayDeque();
-537} else if (rLoads.size() = 
numRegionLoadsToRemember) {
-538  rLoads.remove();
-539}
-540rLoads.add(new 
BalancerRegionLoad(rm));
-541
loads.put(Bytes.toString(regionName), rLoads);
-542  });
-543});
-544
-545for(CostFromRegionLoadFunction cost : 
regionLoadFunctions) {
-546  cost.setLoads(loads);
-547}
-548  }
-549
-550  protected void initCosts(Cluster 
cluster) {
-551for (CostFunction c:costFunctions) 
{
-552  c.init(cluster);
-553}
-554  }
-555
-556  protected void 
updateCostsWithAction(Cluster cluster, Action action) {
-557for (CostFunction c : costFunctions) 
{
-558  c.postAction(action);
-559}
-560  }
-561
-562  /**
-563   * Get the names of the cost 
functions
-564   */
-565  public String[] getCostFunctionNames() 
{
-566if (costFunctions == null) return 
null;
-567String[] ret = new 
String[costFunctions.length];
-568for (int i = 0; i  
costFunctions.length; i++) {
-569  CostFunction c = 
costFunctions[i];
-570  ret[i] = 
c.getClass().getSimpleName();
-571}
-572
-573return ret;
-574  }
-575
-576  /**
-577   * This is the main cost function.  It 
will compute a cost associated with a proposed cluster
-578   * state.  All different costs will be 
combined with their multipliers to produce a double cost.
-579   *
-580   * @param cluster The state of the 
cluster
-581   * @param previousCost the previous 
cost. This is used as an early out.
-582   * @return a double of a cost 
associated with the proposed cluster state.  This cost is an
-583   * aggregate of all individual 
cost functions.
-584   */
-585  protected double computeCost(Cluster 
cluster, double previousCost) {
-586double total = 0;
-587
-588for (int i = 0; i  
costFunctions.length; i++) {
-589  CostFunction c = 
costFunctions[i];
-590  this.tempFunctionCosts[i] = 0.0;
-591
-592  if (c.getMultiplier() = 0) {
-593continue;
-594  }
-595
-596  Float multiplier = 
c.getMultiplier();
-597  Double cost = c.cost();
-598
-599  this.tempFunctionCosts[i] = 
multiplier*cost;
-600  total += 
this.tempFunctionCosts[i];
-601
-602  if (total  previousCost) {
-603break;
-604  }
-605}
-606
-607return total;
-608  }
-609
-610  /** Generates a candidate action to be 
applied to the cluster for cost function search */
-611  abstract static class 
CandidateGenerator {
-612abstract Cluster.Action 
generate(Cluster cluster);
-613
-614/**
-615 * From a list of regions pick a 
random one. Null can be returned which
-616 * {@link 
StochasticLoadBalancer#balanceCluster(Map)} recognize as signal to try a region 
move
-617 * rather than swap.
-618 *
-619 * @param clusterThe state of 
the cluster
-620 * @param server index of the 
server
-621 * @param chanceOfNoSwap Chance that 
this will decide to try a move rather
-622 *   than a 
swap.
-623 * @return a random {@link 
RegionInfo} or null if an asymmetrical move is
-624 * suggested.
-625 */
-626protected int 
pickRandomRegion(Cluster cluster, int server, double chanceOfNoSwap) {
-627  // Check to see if this is just a 
move.
-628  if 
(cluster.regionsPerServer[server].length == 0 || RANDOM.nextFloat()  
chanceOfNoSwap) {
-629// signal a move only.
-630return -1;
-631  }
-632  int rand = 
RANDOM.nextInt(cluster.regionsPerServer[server].length);
-633  return 
cluster.regionsPerServer[server][rand];
-634
-635}
-636protected int 
pickRandomServer(Cluster cluster) {
-637  if (cluster.numServers  1) {
-638return -1;
-639  }
-640
-641  return 
RANDOM.nextInt(cluster.numServers);
-642}
-643
-644protected int pickRandomRack(Cluster 
cluster) {
-645  if (cluster.numRacks  1) {
-646return -1;
-647  }
-648

[08/51] [partial] hbase-site git commit: Published site at e66a6603e36ecd67237ca16acd5e2de03f0d372d.

2018-07-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0c6f447e/apidocs/org/apache/hadoop/hbase/class-use/CellScanner.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/class-use/CellScanner.html 
b/apidocs/org/apache/hadoop/hbase/class-use/CellScanner.html
index dae577c..6b2e30c 100644
--- a/apidocs/org/apache/hadoop/hbase/class-use/CellScanner.html
+++ b/apidocs/org/apache/hadoop/hbase/class-use/CellScanner.html
@@ -1,10 +1,10 @@
 http://www.w3.org/TR/html4/loose.dtd;>
 
-
+
 
 
 
-接口 org.apache.hadoop.hbase.CellScanner的使用 (Apache HBase 
3.0.0-SNAPSHOT API)
+Uses of Interface org.apache.hadoop.hbase.CellScanner (Apache HBase 
3.0.0-SNAPSHOT API)
 
 
 
@@ -12,7 +12,7 @@
 
 
 
-您的浏览器已禁用 JavaScript。
+JavaScript is disabled on your browser.
 
 
 
 
 
-跳过导航链接
+Skip navigation links
 
 
 
-
-概览
-程序包
-ç±»
-使用
-树
-已过时
-索引
-帮助
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
 
 
 
 
-上一个
-下一个
+Prev
+Next
 
 
-框架
-无框架
+Frames
+NoFrames
 
 
-所有类
+AllClasses
 
 
 
 
-

接口的使用
org.apache.hadoop.hbase.CellScanner

+

Uses of Interface
org.apache.hadoop.hbase.CellScanner

  • - - +
    使用CellScanner的程序包  
    + - - + + @@ -89,17 +89,7 @@ @@ -110,37 +100,37 @@ Table of Contents
  • -

    org.apache.hadoop.hbase中CellScanner的使用

    -
  • Packages that use CellScanner 
    程序包说明PackageDescription
    org.apache.hadoop.hbase.client -
    Provides HBase Client - -Table of Contents - - Overview -Example API Usage - - - Overview - To administer HBase, create and drop tables, list and alter tables, - use Admin.
    +
    Provides HBase Client
    - +

    Uses of CellScanner in org.apache.hadoop.hbase

    +
    返回CellScanner的org.apache.hadoop.hbase中的方法 
    +
    Methods in

    [08/51] [partial] hbase-site git commit: Published site at e66a6603e36ecd67237ca16acd5e2de03f0d372d.

    http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5427a45e/apidocs/org/apache/hadoop/hbase/class-use/CellBuilder.html
    --
    diff --git a/apidocs/org/apache/hadoop/hbase/class-use/CellBuilder.html 
    b/apidocs/org/apache/hadoop/hbase/class-use/CellBuilder.html
    index 503a09a..4729dfd 100644
    --- a/apidocs/org/apache/hadoop/hbase/class-use/CellBuilder.html
    +++ b/apidocs/org/apache/hadoop/hbase/class-use/CellBuilder.html
    @@ -1,10 +1,10 @@
     http://www.w3.org/TR/html4/loose.dtd;>
     
    -
    +
     
     
     
    -Uses of Interface org.apache.hadoop.hbase.CellBuilder (Apache HBase 
    3.0.0-SNAPSHOT API)
    +接口 org.apache.hadoop.hbase.CellBuilder的使用 (Apache HBase 
    3.0.0-SNAPSHOT API)
     
     
     
    @@ -12,7 +12,7 @@
     
     
     
    -JavaScript is disabled on your browser.
    +您的浏览器已禁用 JavaScript。
     
     
     
     
     
    -Skip navigation links
    +跳过导航链接
     
     
     
    -
    -Overview
    -Package
    -Class
    -Use
    -Tree
    -Deprecated
    -Index
    -Help
    +
    +概览
    +程序包
    +ç±»
    +使用
    +树
    +已过时
    +索引
    +帮助
     
     
     
     
    -Prev
    -Next
    +上一个
    +下一个
     
     
    -Frames
    -NoFrames
    +框架
    +无框架
     
     
    -AllClasses
    +所有类
     
     
     
     
    -

    Uses of Interface
    org.apache.hadoop.hbase.CellBuilder

    +

    接口的使用
    org.apache.hadoop.hbase.CellBuilder

    • - - +
      Packages that use CellBuilder 
      + - - + + @@ -94,70 +94,70 @@
    • -

      Uses of CellBuilder in org.apache.hadoop.hbase

      -
    • 使用CellBuilder的程序包  
      PackageDescription程序包说明
      - +

      org.apache.hadoop.hbase中CellBuilder的使用

      +
      Methods in org.apache.hadoop.hbase that return CellBuilder 
      + - - + + - +
      返回CellBuilder的org.apache.hadoop.hbase中的方法 
      Modifier and TypeMethod and Description限定符和类型方法和说明
      CellBuilder

      [08/51] [partial] hbase-site git commit: Published site at 0f23784182ab88649de340d75804e0ff20dcd0fc.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bcb555af/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.html
       
      b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.html
      index 05e032c..40ef9f4 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.html
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.html
      @@ -25,767 +25,805 @@
       017 */
       018package 
      org.apache.hadoop.hbase.io.asyncfs;
       019
      -020import static 
      org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleState.READER_IDLE;
      -021import static 
      org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY;
      +020import static 
      org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY;
      +021import static 
      org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleState.READER_IDLE;
       022
      -023import 
      org.apache.hbase.thirdparty.com.google.common.base.Charsets;
      -024import 
      org.apache.hbase.thirdparty.com.google.common.base.Throwables;
      -025import 
      org.apache.hbase.thirdparty.com.google.common.collect.ImmutableSet;
      -026import 
      org.apache.hbase.thirdparty.com.google.common.collect.Maps;
      -027import 
      com.google.protobuf.CodedOutputStream;
      -028
      -029import 
      org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf;
      -030import 
      org.apache.hbase.thirdparty.io.netty.buffer.ByteBufOutputStream;
      -031import 
      org.apache.hbase.thirdparty.io.netty.buffer.CompositeByteBuf;
      -032import 
      org.apache.hbase.thirdparty.io.netty.buffer.Unpooled;
      -033import 
      org.apache.hbase.thirdparty.io.netty.channel.Channel;
      -034import 
      org.apache.hbase.thirdparty.io.netty.channel.ChannelDuplexHandler;
      -035import 
      org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext;
      -036import 
      org.apache.hbase.thirdparty.io.netty.channel.ChannelOutboundHandlerAdapter;
      -037import 
      org.apache.hbase.thirdparty.io.netty.channel.ChannelPipeline;
      -038import 
      org.apache.hbase.thirdparty.io.netty.channel.ChannelPromise;
      -039import 
      org.apache.hbase.thirdparty.io.netty.channel.SimpleChannelInboundHandler;
      -040import 
      org.apache.hbase.thirdparty.io.netty.handler.codec.LengthFieldBasedFrameDecoder;
      -041import 
      org.apache.hbase.thirdparty.io.netty.handler.codec.MessageToByteEncoder;
      -042import 
      org.apache.hbase.thirdparty.io.netty.handler.codec.protobuf.ProtobufDecoder;
      -043import 
      org.apache.hbase.thirdparty.io.netty.handler.codec.protobuf.ProtobufVarint32FrameDecoder;
      -044import 
      org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateEvent;
      -045import 
      org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler;
      -046import 
      org.apache.hbase.thirdparty.io.netty.util.concurrent.Promise;
      -047
      -048import java.io.IOException;
      -049import java.lang.reflect.Field;
      -050import 
      java.lang.reflect.InvocationTargetException;
      -051import java.lang.reflect.Method;
      -052import java.net.InetAddress;
      -053import java.net.InetSocketAddress;
      -054import java.nio.ByteBuffer;
      -055import 
      java.security.GeneralSecurityException;
      -056import java.util.Arrays;
      -057import java.util.Collections;
      -058import java.util.List;
      -059import java.util.Map;
      -060import java.util.Set;
      -061import java.util.concurrent.TimeUnit;
      -062import 
      java.util.concurrent.atomic.AtomicBoolean;
      -063
      -064import 
      javax.security.auth.callback.Callback;
      -065import 
      javax.security.auth.callback.CallbackHandler;
      -066import 
      javax.security.auth.callback.NameCallback;
      -067import 
      javax.security.auth.callback.PasswordCallback;
      -068import 
      javax.security.auth.callback.UnsupportedCallbackException;
      -069import 
      javax.security.sasl.RealmCallback;
      -070import 
      javax.security.sasl.RealmChoiceCallback;
      -071import javax.security.sasl.Sasl;
      -072import javax.security.sasl.SaslClient;
      -073import 
      javax.security.sasl.SaslException;
      -074
      -075import 
      org.apache.commons.codec.binary.Base64;
      -076import 
      org.apache.commons.lang3.StringUtils;
      -077import 
      org.apache.hadoop.conf.Configuration;
      -078import 
      org.apache.hadoop.crypto.CipherOption;
      -079import 
      org.apache.hadoop.crypto.CipherSuite;
      -080import 
      org.apache.hadoop.crypto.CryptoCodec;
      -081import 
      org.apache.hadoop.crypto.Decryptor;
      -082import 
      org.apache.hadoop.crypto.Encryptor;
      -083import 
      org.apache.hadoop.crypto.key.KeyProvider.KeyVersion;
      -084import 
      org.apache.hadoop.fs.FileEncryptionInfo;
      -085import 
      org.apache.yetus.audience.InterfaceAudience;
      -086import org.slf4j.Logger;
      -087import org.slf4j.LoggerFactory;
      -088
      -089import com.google.protobuf.ByteString;
      -090import 
      org.apache.hadoop.hdfs.DFSClient;
      -091import 
      org.apache.hadoop.hdfs.protocol.DatanodeInfo;
      -092import 
      org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
      -093import 
      org.apache.hadoop.hdfs.protocol.datatransfer.InvalidEncryptionKeyException;
      

      [08/51] [partial] hbase-site git commit: Published site at 85b41f36e01214b6485c9352875c84ebf877dab3.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a5c66de0/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ModifyTableFuture.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ModifyTableFuture.html
       
      b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ModifyTableFuture.html
      index c10cfbf..a3e2f4a 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ModifyTableFuture.html
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ModifyTableFuture.html
      @@ -3371,7 +3371,7 @@
       3363private V result = null;
       3364
       3365private final HBaseAdmin admin;
      -3366private final Long procId;
      +3366protected final Long procId;
       3367
       3368public ProcedureFuture(final 
      HBaseAdmin admin, final Long procId) {
       3369  this.admin = admin;
      @@ -3653,653 +3653,651 @@
       3645 * @return a description of the 
      operation
       3646 */
       3647protected String getDescription() 
      {
      -3648  return "Operation: " + 
      getOperationType() + ", "
      -3649  + "Table Name: " + 
      tableName.getNameWithNamespaceInclAsString();
      -3650
      -3651}
      -3652
      -3653protected abstract class 
      TableWaitForStateCallable implements WaitForStateCallable {
      -3654  @Override
      -3655  public void 
      throwInterruptedException() throws InterruptedIOException {
      -3656throw new 
      InterruptedIOException("Interrupted while waiting for operation: "
      -3657+ getOperationType() + " on 
      table: " + tableName.getNameWithNamespaceInclAsString());
      -3658  }
      -3659
      -3660  @Override
      -3661  public void 
      throwTimeoutException(long elapsedTime) throws TimeoutException {
      -3662throw new TimeoutException("The 
      operation: " + getOperationType() + " on table: " +
      -3663tableName.getNameAsString() 
      + " has not completed after " + elapsedTime + "ms");
      -3664  }
      -3665}
      -3666
      -3667@Override
      -3668protected V 
      postOperationResult(final V result, final long deadlineTs)
      -3669throws IOException, 
      TimeoutException {
      -3670  LOG.info(getDescription() + " 
      completed");
      -3671  return 
      super.postOperationResult(result, deadlineTs);
      -3672}
      -3673
      -3674@Override
      -3675protected V 
      postOperationFailure(final IOException exception, final long deadlineTs)
      -3676throws IOException, 
      TimeoutException {
      -3677  LOG.info(getDescription() + " 
      failed with " + exception.getMessage());
      -3678  return 
      super.postOperationFailure(exception, deadlineTs);
      -3679}
      -3680
      -3681protected void 
      waitForTableEnabled(final long deadlineTs)
      -3682throws IOException, 
      TimeoutException {
      -3683  waitForState(deadlineTs, new 
      TableWaitForStateCallable() {
      -3684@Override
      -3685public boolean checkState(int 
      tries) throws IOException {
      -3686  try {
      -3687if 
      (getAdmin().isTableAvailable(tableName)) {
      -3688  return true;
      -3689}
      -3690  } catch 
      (TableNotFoundException tnfe) {
      -3691LOG.debug("Table " + 
      tableName.getNameWithNamespaceInclAsString()
      -3692+ " was not enabled, 
      sleeping. tries=" + tries);
      -3693  }
      -3694  return false;
      -3695}
      -3696  });
      -3697}
      -3698
      -3699protected void 
      waitForTableDisabled(final long deadlineTs)
      -3700throws IOException, 
      TimeoutException {
      -3701  waitForState(deadlineTs, new 
      TableWaitForStateCallable() {
      -3702@Override
      -3703public boolean checkState(int 
      tries) throws IOException {
      -3704  return 
      getAdmin().isTableDisabled(tableName);
      -3705}
      -3706  });
      -3707}
      -3708
      -3709protected void 
      waitTableNotFound(final long deadlineTs)
      -3710throws IOException, 
      TimeoutException {
      -3711  waitForState(deadlineTs, new 
      TableWaitForStateCallable() {
      -3712@Override
      -3713public boolean checkState(int 
      tries) throws IOException {
      -3714  return 
      !getAdmin().tableExists(tableName);
      -3715}
      -3716  });
      -3717}
      -3718
      -3719protected void 
      waitForSchemaUpdate(final long deadlineTs)
      -3720throws IOException, 
      TimeoutException {
      -3721  waitForState(deadlineTs, new 
      TableWaitForStateCallable() {
      -3722@Override
      -3723public boolean checkState(int 
      tries) throws IOException {
      -3724  return 
      getAdmin().getAlterStatus(tableName).getFirst() == 0;
      -3725}
      -3726  });
      -3727}
      -3728
      -3729protected void 
      waitForAllRegionsOnline(final long deadlineTs, final byte[][] splitKeys)
      -3730throws IOException, 
      TimeoutException {
      -3731  final TableDescriptor desc = 
      getTableDescriptor();
      -3732  final AtomicInteger actualRegCount 
      = new AtomicInteger(0);
      -3733  final MetaTableAccessor.Visitor 
      visitor = new MetaTableAccessor.Visitor() {
      -3734@Override
      -3735public boolean visit(Result 
      rowResult) throws IOException {
      -3736  

      [08/51] [partial] hbase-site git commit: Published site at 6198e1fc7dfa85c3bc6b2855f9a5fb5f4b2354ff.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/eb5d2c62/devapidocs/org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.html 
      b/devapidocs/org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.html
      index 39e672d..399dae0 100644
      --- 
      a/devapidocs/org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.html
      +++ 
      b/devapidocs/org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.html
      @@ -18,7 +18,7 @@
       catch(err) {
       }
       //-->
      -var methods = 
      {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10};
      +var methods = 
      {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10};
       var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
      Methods"],8:["t4","Concrete Methods"]};
       var altColor = "altColor";
       var rowColor = "rowColor";
      @@ -115,7 +115,7 @@ var activeTableTab = "activeTableTab";
       
       @InterfaceAudience.Private
        @InterfaceStability.Evolving
      -public class MasterProcedureEnv
      +public class MasterProcedureEnv
       extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
       title="class or interface in java.lang">Object
       implements ConfigurationObserver
       
      @@ -224,46 +224,50 @@ implements getMasterCoprocessorHost()
       
       
      +MasterFileSystem
      +getMasterFileSystem()
      +
      +
       MasterServices
       getMasterServices()
       
      -
      +
       MasterProcedureScheduler
       getProcedureScheduler()
       
      -
      +
       RSProcedureDispatcher
       getRemoteDispatcher()
       
      -
      +
       ReplicationPeerManager
       getReplicationPeerManager()
       
      -
      +
       User
       getRequestUser()
       
      -
      +
       boolean
       isInitialized()
       
      -
      +
       boolean
       isRunning()
       
      -
      +
       void
       onConfigurationChange(org.apache.hadoop.conf.Configurationconf)
       This method would be called by the ConfigurationManager
        object when the Configuration object is reloaded from disk.
       
       
      -
      +
       void
       setEventReady(ProcedureEvent?event,
        booleanisReady)
       
      -
      +
       boolean
       waitInitialized(Procedure?proc)
       
      @@ -295,7 +299,7 @@ implements 
       
       LOG
      -private static finalorg.slf4j.Logger LOG
      +private static finalorg.slf4j.Logger LOG
       
       
       
      @@ -304,7 +308,7 @@ implements 
       
       remoteDispatcher
      -private finalRSProcedureDispatcher remoteDispatcher
      +private finalRSProcedureDispatcher remoteDispatcher
       
       
       
      @@ -313,7 +317,7 @@ implements 
       
       procSched
      -private finalMasterProcedureScheduler procSched
      +private finalMasterProcedureScheduler procSched
       
       
       
      @@ -322,7 +326,7 @@ implements 
       
       master
      -private finalMasterServices master
      +private finalMasterServices master
       
       
       
      @@ -339,7 +343,7 @@ implements 
       
       MasterProcedureEnv
      -publicMasterProcedureEnv(MasterServicesmaster)
      +publicMasterProcedureEnv(MasterServicesmaster)
       
       
       
      @@ -348,7 +352,7 @@ implements 
       
       MasterProcedureEnv
      -publicMasterProcedureEnv(MasterServicesmaster,
      +publicMasterProcedureEnv(MasterServicesmaster,
         RSProcedureDispatcherremoteDispatcher)
       
       
      @@ -366,7 +370,7 @@ implements 
       
       getRequestUser
      -publicUsergetRequestUser()
      +publicUsergetRequestUser()
       
       
       
      @@ -375,7 +379,7 @@ implements 
       
       getMasterServices
      -publicMasterServicesgetMasterServices()
      +publicMasterServicesgetMasterServices()
       
       
       
      @@ -384,7 +388,7 @@ implements 
       
       getMasterConfiguration
      -publicorg.apache.hadoop.conf.ConfigurationgetMasterConfiguration()
      +publicorg.apache.hadoop.conf.ConfigurationgetMasterConfiguration()
       
       
       
      @@ -393,7 +397,7 @@ implements 
       
       getAssignmentManager
      -publicAssignmentManagergetAssignmentManager()
      +publicAssignmentManagergetAssignmentManager()
       
       
       
      @@ -402,7 +406,7 @@ implements 
       
       getMasterCoprocessorHost
      -publicMasterCoprocessorHostgetMasterCoprocessorHost()
      +publicMasterCoprocessorHostgetMasterCoprocessorHost()
       
       
       
      @@ -411,7 +415,7 @@ implements 
       
       getProcedureScheduler
      -publicMasterProcedureSchedulergetProcedureScheduler()
      +publicMasterProcedureSchedulergetProcedureScheduler()
       
       
       
      @@ -420,7 +424,7 @@ implements 
       
       getRemoteDispatcher
      -publicRSProcedureDispatchergetRemoteDispatcher()
      +publicRSProcedureDispatchergetRemoteDispatcher()
       
       
       
      @@ -429,7 +433,16 @@ implements 
       
       getReplicationPeerManager
      -publicReplicationPeerManagergetReplicationPeerManager()
      +publicReplicationPeerManagergetReplicationPeerManager()
      +
      +
      +
      +
      +
      +
      +
      +getMasterFileSystem
      +publicMasterFileSystemgetMasterFileSystem()
       
       
       
      @@ -438,7 +451,7 @@ implements 
       
       isRunning
      -publicbooleanisRunning()
      +publicbooleanisRunning()
       
       
       
      @@ -447,7 +460,7 @@ implements 
       
       isInitialized
      -publicbooleanisInitialized()
      +publicbooleanisInitialized()
       
       
       
      @@ -456,7 +469,7 @@ implements 
       
       waitInitialized
      -publicbooleanwaitInitialized(Procedure?proc)
      +publicbooleanwaitInitialized(Procedure?proc)
       
       
       
      @@ -465,7 +478,7 @@ implements 
       
       setEventReady
      -publicvoidsetEventReady(ProcedureEvent?event,
      +publicvoidsetEventReady(ProcedureEvent?event,
         

      [08/51] [partial] hbase-site git commit: Published site at 14087cc919da9f2e0b1a68f701f6365ad9d1d71f.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/55ce8d97/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregate.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregate.html
       
      b/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregate.html
      index 997342c..92ec202 100644
      --- 
      a/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregate.html
      +++ 
      b/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregate.html
      @@ -18,7 +18,7 @@
       catch(err) {
       }
       //-->
      -var methods = {"i0":6,"i1":6,"i2":6,"i3":6,"i4":6,"i5":6};
      +var methods = {"i0":6,"i1":6,"i2":6,"i3":6,"i4":6,"i5":6,"i6":6};
       var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
      Methods"],4:["t3","Abstract Methods"]};
       var altColor = "altColor";
       var rowColor = "rowColor";
      @@ -129,35 +129,41 @@ public interface 
       long
      +getCpRequestsCount(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">Stringtable)
      +Get the number of CoprocessorService requests that have 
      been issued against this table
      +
      +
      +
      +long
       getMemStoresSize(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">Stringtable)
       Get the memory store size against this table
       
       
      -
      +
       long
       getReadRequestsCount(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">Stringtable)
       Get the number of read requests that have been issued 
      against this table
       
       
      -
      +
       long
       getStoreFilesSize(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">Stringtable)
       Get the store file size against this table
       
       
      -
      +
       long
       getTableSize(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">Stringtable)
       Get the table region size against this table
       
       
      -
      +
       long
       getTotalRequestsCount(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">Stringtable)
       Get the total number of requests that have been issued 
      against this table
       
       
      -
      +
       long
       getWriteRequestsCount(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">Stringtable)
       Get the number of write requests that have been issued 
      against this table
      @@ -188,13 +194,23 @@ public interface Get the number of read requests that have been issued 
      against this table
       
       
      +
      +
      +
      +
      +
      +getCpRequestsCount
      +longgetCpRequestsCount(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">Stringtable)
      +Get the number of CoprocessorService requests that have 
      been issued against this table
      +
      +
       
       
       
       
       
       getWriteRequestsCount
      -longgetWriteRequestsCount(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">Stringtable)
      +longgetWriteRequestsCount(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">Stringtable)
       Get the number of write requests that have been issued 
      against this table
       
       
      @@ -204,7 +220,7 @@ public interface 
       
       getTotalRequestsCount
      -longgetTotalRequestsCount(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">Stringtable)
      +longgetTotalRequestsCount(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">Stringtable)
       Get the total number of requests that have been issued 
      against this table
       
       
      @@ -214,7 +230,7 @@ public interface 
       
       getMemStoresSize
      -longgetMemStoresSize(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">Stringtable)
      +longgetMemStoresSize(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">Stringtable)
       Get the memory store size against this table
       
       
      @@ -224,7 +240,7 @@ public interface 
       
       getStoreFilesSize
      -longgetStoreFilesSize(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">Stringtable)
      +longgetStoreFilesSize(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">Stringtable)
       Get the store file size against this table
       
       
      @@ -234,7 +250,7 @@ public interface 
       
       getTableSize
      -longgetTableSize(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">Stringtable)
      

      [08/51] [partial] hbase-site git commit: Published site at 72784c2d836a4b977667449d3adec5e8d15453f5.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2b11656f/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
       
      b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
      index 74bacd8..546d2b6 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
      @@ -2249,1468 +2249,1484 @@
       2241  }
       2242
       2243  @Override
      -2244  public long addColumn(
      -2245  final TableName tableName,
      -2246  final ColumnFamilyDescriptor 
      column,
      -2247  final long nonceGroup,
      -2248  final long nonce)
      -2249  throws IOException {
      -2250checkInitialized();
      -2251checkTableExists(tableName);
      -2252
      -2253TableDescriptor old = 
      getTableDescriptors().get(tableName);
      -2254if 
      (old.hasColumnFamily(column.getName())) {
      -2255  throw new 
      InvalidFamilyOperationException("Column family '" + column.getNameAsString()
      -2256  + "' in table '" + tableName + 
      "' already exists so cannot be added");
      -2257}
      +2244  public long addColumn(final TableName 
      tableName, final ColumnFamilyDescriptor column,
      +2245  final long nonceGroup, final long 
      nonce) throws IOException {
      +2246checkInitialized();
      +2247checkTableExists(tableName);
      +2248
      +2249return modifyTable(tableName, new 
      TableDescriptorGetter() {
      +2250
      +2251  @Override
      +2252  public TableDescriptor get() 
      throws IOException {
      +2253TableDescriptor old = 
      getTableDescriptors().get(tableName);
      +2254if 
      (old.hasColumnFamily(column.getName())) {
      +2255  throw new 
      InvalidFamilyOperationException("Column family '" + column.getNameAsString()
      +2256  + "' in table '" + 
      tableName + "' already exists so cannot be added");
      +2257}
       2258
      -2259TableDescriptor newDesc = 
      TableDescriptorBuilder
      -2260
      .newBuilder(old).setColumnFamily(column).build();
      -2261return modifyTable(tableName, 
      newDesc, nonceGroup, nonce);
      +2259return 
      TableDescriptorBuilder.newBuilder(old).setColumnFamily(column).build();
      +2260  }
      +2261}, nonceGroup, nonce);
       2262  }
       2263
      -2264  @Override
      -2265  public long modifyColumn(
      -2266  final TableName tableName,
      -2267  final ColumnFamilyDescriptor 
      descriptor,
      -2268  final long nonceGroup,
      -2269  final long nonce)
      -2270  throws IOException {
      -2271checkInitialized();
      -2272checkTableExists(tableName);
      -2273
      -2274TableDescriptor old = 
      getTableDescriptors().get(tableName);
      -2275if (! 
      old.hasColumnFamily(descriptor.getName())) {
      -2276  throw new 
      InvalidFamilyOperationException("Family '" + descriptor.getNameAsString()
      -2277  + "' does not exist, so it 
      cannot be modified");
      -2278}
      -2279
      -2280TableDescriptor td = 
      TableDescriptorBuilder
      -2281.newBuilder(old)
      -2282
      .modifyColumnFamily(descriptor)
      -2283.build();
      -2284
      -2285return modifyTable(tableName, td, 
      nonceGroup, nonce);
      -2286  }
      -2287
      -2288  @Override
      -2289  public long deleteColumn(
      -2290  final TableName tableName,
      -2291  final byte[] columnName,
      -2292  final long nonceGroup,
      -2293  final long nonce)
      -2294  throws IOException {
      -2295checkInitialized();
      -2296checkTableExists(tableName);
      -2297
      -2298TableDescriptor old = 
      getTableDescriptors().get(tableName);
      -2299
      -2300if (! 
      old.hasColumnFamily(columnName)) {
      -2301  throw new 
      InvalidFamilyOperationException("Family '" + Bytes.toString(columnName)
      -2302  + "' does not exist, so it 
      cannot be deleted");
      -2303}
      -2304if (old.getColumnFamilyCount() == 1) 
      {
      -2305  throw new 
      InvalidFamilyOperationException("Family '" + Bytes.toString(columnName)
      -2306  + "' is the only column family 
      in the table, so it cannot be deleted");
      -2307}
      -2308
      -2309TableDescriptor td = 
      TableDescriptorBuilder
      -2310
      .newBuilder(old).removeColumnFamily(columnName).build();
      -2311return modifyTable(tableName, td, 
      nonceGroup, nonce);
      -2312  }
      -2313
      -2314  @Override
      -2315  public long enableTable(final 
      TableName tableName, final long nonceGroup, final long nonce)
      -2316  throws IOException {
      -2317checkInitialized();
      -2318
      -2319return 
      MasterProcedureUtil.submitProcedure(
      -2320new 
      MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
      -2321  @Override
      -2322  protected void run() throws 
      IOException {
      -2323
      getMaster().getMasterCoprocessorHost().preEnableTable(tableName);
      -2324
      -2325// Normally, it would make sense 
      for this authorization check to exist inside
      -2326// AccessController, but because 
      the authorization check is done based on internal state
      -2327// (rather than explicit 
      permissions) we'll do 

      [08/51] [partial] hbase-site git commit: Published site at 9101fc246f86445006bfbcdfda5cc495016dc280.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/65565d77/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterWalManager.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterWalManager.html 
      b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterWalManager.html
      index dd2def7..498695e 100644
      --- a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterWalManager.html
      +++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterWalManager.html
      @@ -34,282 +34,308 @@
       026import java.util.Set;
       027import java.util.concurrent.locks.Lock;
       028import 
      java.util.concurrent.locks.ReentrantLock;
      -029
      -030import 
      org.apache.hadoop.conf.Configuration;
      -031import org.apache.hadoop.fs.FileStatus;
      -032import org.apache.hadoop.fs.FileSystem;
      -033import org.apache.hadoop.fs.Path;
      -034import org.apache.hadoop.fs.PathFilter;
      -035import 
      org.apache.hadoop.hbase.HConstants;
      -036import 
      org.apache.hadoop.hbase.ServerName;
      -037import 
      org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
      -038import 
      org.apache.hadoop.hbase.util.FSUtils;
      -039import 
      org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
      -040import 
      org.apache.hadoop.hbase.wal.WALSplitter;
      -041import 
      org.apache.yetus.audience.InterfaceAudience;
      -042import org.slf4j.Logger;
      -043import org.slf4j.LoggerFactory;
      -044import 
      org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
      -045
      -046/**
      -047 * This class abstracts a bunch of 
      operations the HMaster needs
      -048 * when splitting log files e.g. finding 
      log files, dirs etc.
      -049 */
      -050@InterfaceAudience.Private
      -051public class MasterWalManager {
      -052  private static final Logger LOG = 
      LoggerFactory.getLogger(MasterWalManager.class);
      -053
      -054  final static PathFilter META_FILTER = 
      new PathFilter() {
      -055@Override
      -056public boolean accept(Path p) {
      -057  return 
      AbstractFSWALProvider.isMetaFile(p);
      -058}
      -059  };
      -060
      -061  final static PathFilter NON_META_FILTER 
      = new PathFilter() {
      -062@Override
      -063public boolean accept(Path p) {
      -064  return 
      !AbstractFSWALProvider.isMetaFile(p);
      -065}
      -066  };
      -067
      -068  // metrics for master
      -069  // TODO: Rename it, since those metrics 
      are split-manager related
      -070  private final MetricsMasterFileSystem 
      metricsMasterFilesystem = new MetricsMasterFileSystem();
      -071
      -072  // Keep around for convenience.
      -073  private final MasterServices 
      services;
      -074  private final Configuration conf;
      -075  private final FileSystem fs;
      -076
      -077  // The Path to the old logs dir
      -078  private final Path oldLogDir;
      -079  private final Path rootDir;
      -080
      -081  // create the split log lock
      -082  private final Lock splitLogLock = new 
      ReentrantLock();
      -083  private final SplitLogManager 
      splitLogManager;
      -084
      -085  // Is the fileystem ok?
      -086  private volatile boolean fsOk = true;
      -087
      -088  public MasterWalManager(MasterServices 
      services) throws IOException {
      -089this(services.getConfiguration(), 
      services.getMasterFileSystem().getWALFileSystem(),
      -090  
      services.getMasterFileSystem().getWALRootDir(), services);
      -091  }
      -092
      -093  public MasterWalManager(Configuration 
      conf, FileSystem fs, Path rootDir, MasterServices services)
      -094  throws IOException {
      -095this.fs = fs;
      -096this.conf = conf;
      -097this.rootDir = rootDir;
      -098this.services = services;
      -099this.splitLogManager = new 
      SplitLogManager(services, conf);
      -100
      -101this.oldLogDir = new Path(rootDir, 
      HConstants.HREGION_OLDLOGDIR_NAME);
      -102  }
      -103
      -104  public void stop() {
      -105if (splitLogManager != null) {
      -106  splitLogManager.stop();
      -107}
      -108  }
      -109
      -110  @VisibleForTesting
      -111  SplitLogManager getSplitLogManager() 
      {
      -112return this.splitLogManager;
      -113  }
      -114
      -115  /**
      -116   * Get the directory where old logs 
      go
      -117   * @return the dir
      -118   */
      -119  Path getOldLogDir() {
      -120return this.oldLogDir;
      -121  }
      -122
      -123  public FileSystem getFileSystem() {
      -124return this.fs;
      -125  }
      -126
      -127  /**
      -128   * Checks to see if the file system is 
      still accessible.
      -129   * If not, sets closed
      -130   * @return false if file system is not 
      available
      -131   */
      -132  private boolean checkFileSystem() {
      -133if (this.fsOk) {
      -134  try {
      -135
      FSUtils.checkFileSystemAvailable(this.fs);
      -136
      FSUtils.checkDfsSafeMode(this.conf);
      -137  } catch (IOException e) {
      -138services.abort("Shutting down 
      HBase cluster: file system not available", e);
      -139this.fsOk = false;
      -140  }
      -141}
      -142return this.fsOk;
      -143  }
      -144
      -145  /**
      -146   * Inspect the log directory to find 
      dead servers which need recovery work
      -147   * @return A set of ServerNames which 
      aren't running but still have WAL files left in file system
      -148   */
      -149  public SetServerName 
      getFailedServersFromLogFolders() {
      -150boolean retrySplitting = 
      

      [08/51] [partial] hbase-site git commit: Published site at 0b28155d274910b4e667b949d51f78809a1eff0b.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e11cf2cb/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.NoneCompressor.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.NoneCompressor.html
       
      b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.NoneCompressor.html
      new file mode 100644
      index 000..9df0225
      --- /dev/null
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.NoneCompressor.html
      @@ -0,0 +1,476 @@
      +http://www.w3.org/TR/html4/loose.dtd;>
      +
      +
      +Source code
      +
      +
      +
      +
      +001/*
      +002 * Licensed to the Apache Software 
      Foundation (ASF) under one
      +003 * or more contributor license 
      agreements.  See the NOTICE file
      +004 * distributed with this work for 
      additional information
      +005 * regarding copyright ownership.  The 
      ASF licenses this file
      +006 * to you under the Apache License, 
      Version 2.0 (the
      +007 * "License"); you may not use this file 
      except in compliance
      +008 * with the License.  You may obtain a 
      copy of the License at
      +009 *
      +010 * 
      http://www.apache.org/licenses/LICENSE-2.0
      +011 *
      +012 * Unless required by applicable law or 
      agreed to in writing, software
      +013 * distributed under the License is 
      distributed on an "AS IS" BASIS,
      +014 * WITHOUT WARRANTIES OR CONDITIONS OF 
      ANY KIND, either express or implied.
      +015 * See the License for the specific 
      language governing permissions and
      +016 * limitations under the License.
      +017 */
      +018package 
      org.apache.hadoop.hbase.regionserver.wal;
      +019
      +020import java.io.ByteArrayOutputStream;
      +021import java.io.IOException;
      +022import java.io.InputStream;
      +023import java.io.OutputStream;
      +024
      +025import 
      org.apache.hadoop.conf.Configuration;
      +026import org.apache.hadoop.hbase.Cell;
      +027import 
      org.apache.hadoop.hbase.CellUtil;
      +028import 
      org.apache.hadoop.hbase.HBaseInterfaceAudience;
      +029import 
      org.apache.hadoop.hbase.PrivateCellUtil;
      +030import 
      org.apache.hadoop.hbase.KeyValue;
      +031import 
      org.apache.hadoop.hbase.KeyValueUtil;
      +032import 
      org.apache.yetus.audience.InterfaceAudience;
      +033import 
      org.apache.hadoop.hbase.codec.BaseDecoder;
      +034import 
      org.apache.hadoop.hbase.codec.BaseEncoder;
      +035import 
      org.apache.hadoop.hbase.codec.Codec;
      +036import 
      org.apache.hadoop.hbase.codec.KeyValueCodecWithTags;
      +037import 
      org.apache.hadoop.hbase.io.ByteBuffInputStream;
      +038import 
      org.apache.hadoop.hbase.io.ByteBufferWriter;
      +039import 
      org.apache.hadoop.hbase.io.ByteBufferWriterOutputStream;
      +040import 
      org.apache.hadoop.hbase.io.util.Dictionary;
      +041import 
      org.apache.hadoop.hbase.io.util.StreamUtils;
      +042import 
      org.apache.hadoop.hbase.nio.ByteBuff;
      +043import 
      org.apache.hadoop.hbase.util.ByteBufferUtils;
      +044import 
      org.apache.hadoop.hbase.util.Bytes;
      +045import 
      org.apache.hadoop.hbase.util.ReflectionUtils;
      +046import org.apache.hadoop.io.IOUtils;
      +047
      +048import 
      org.apache.hbase.thirdparty.com.google.protobuf.ByteString;
      +049import 
      org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations;
      +050
      +051
      +052/**
      +053 * Compression in this class is lifted 
      off Compressor/KeyValueCompression.
      +054 * This is a pure coincidence... they are 
      independent and don't have to be compatible.
      +055 *
      +056 * This codec is used at server side for 
      writing cells to WAL as well as for sending edits
      +057 * as part of the distributed splitting 
      process.
      +058 */
      +059@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC,
      +060  HBaseInterfaceAudience.PHOENIX, 
      HBaseInterfaceAudience.CONFIG})
      +061public class WALCellCodec implements 
      Codec {
      +062  /** Configuration key for the class to 
      use when encoding cells in the WAL */
      +063  public static final String 
      WAL_CELL_CODEC_CLASS_KEY = "hbase.regionserver.wal.codec";
      +064
      +065  protected final CompressionContext 
      compression;
      +066
      +067  /**
      +068   * bAll subclasses must 
      implement a no argument constructor/b
      +069   */
      +070  public WALCellCodec() {
      +071this.compression = null;
      +072  }
      +073
      +074  /**
      +075   * Default constructor - ball 
      subclasses must implement a constructor with this signature /b
      +076   * if they are to be dynamically loaded 
      from the {@link Configuration}.
      +077   * @param conf configuration to 
      configure ttthis/tt
      +078   * @param compression compression the 
      codec should support, can be ttnull/tt to indicate no
      +079   *  compression
      +080   */
      +081  public WALCellCodec(Configuration conf, 
      CompressionContext compression) {
      +082this.compression = compression;
      +083  }
      +084
      +085  public static String 
      getWALCellCodecClass(Configuration conf) {
      +086return 
      conf.get(WAL_CELL_CODEC_CLASS_KEY, WALCellCodec.class.getName());
      +087  }
      +088
      +089  /**
      +090   * Create and setup a {@link 
      WALCellCodec} from the {@code cellCodecClsName} and
      +091   * CompressionContext, if {@code 
      cellCodecClsName} is specified.
      +092   * Otherwise Cell Codec classname is 
      read from {@link Configuration}.
      +093 

      [08/51] [partial] hbase-site git commit: Published site at 7d3750bd9fc9747623549c242cc4171e224b3eaf.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.BufferNode.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.BufferNode.html
       
      b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.BufferNode.html
      index 594ef24..17d5c40 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.BufferNode.html
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.BufferNode.html
      @@ -170,241 +170,242 @@
       162  }
       163
       164  /**
      -165   * Add a remote rpc. Be sure to check 
      result for successful add.
      +165   * Add a remote rpc.
       166   * @param key the node identifier
      -167   * @return True if we successfully 
      added the operation.
      -168   */
      -169  public boolean addOperationToNode(final 
      TRemote key, RemoteProcedure rp) {
      +167   */
      +168  public void addOperationToNode(final 
      TRemote key, RemoteProcedure rp)
      +169  throws 
      NullTargetServerDispatchException, NoServerDispatchException, 
      NoNodeDispatchException {
       170if (key == null) {
      -171  // Key is remote server name. Be 
      careful. It could have been nulled by a concurrent
      -172  // ServerCrashProcedure shutting 
      down outstanding RPC requests. See remoteCallFailed.
      -173  return false;
      -174}
      -175assert key != null : "found null key 
      for node";
      -176BufferNode node = nodeMap.get(key);
      -177if (node == null) {
      -178  return false;
      -179}
      -180node.add(rp);
      -181// Check our node still in the map; 
      could have been removed by #removeNode.
      -182return nodeMap.containsValue(node);
      -183  }
      -184
      -185  /**
      -186   * Remove a remote node
      -187   * @param key the node identifier
      -188   */
      -189  public boolean removeNode(final TRemote 
      key) {
      -190final BufferNode node = 
      nodeMap.remove(key);
      -191if (node == null) return false;
      -192node.abortOperationsInQueue();
      -193return true;
      -194  }
      -195
      -196  // 
      
      -197  //  Task Helpers
      -198  // 
      
      -199  protected FutureVoid 
      submitTask(CallableVoid task) {
      -200return threadPool.submit(task);
      -201  }
      -202
      -203  protected FutureVoid 
      submitTask(CallableVoid task, long delay, TimeUnit unit) {
      -204final FutureTaskVoid 
      futureTask = new FutureTask(task);
      -205timeoutExecutor.add(new 
      DelayedTask(futureTask, delay, unit));
      -206return futureTask;
      -207  }
      -208
      -209  protected abstract void 
      remoteDispatch(TRemote key, SetRemoteProcedure operations);
      -210  protected abstract void 
      abortPendingOperations(TRemote key, SetRemoteProcedure operations);
      -211
      -212  /**
      -213   * Data structure with reference to 
      remote operation.
      -214   */
      -215  public static abstract class 
      RemoteOperation {
      -216private final RemoteProcedure 
      remoteProcedure;
      -217
      -218protected RemoteOperation(final 
      RemoteProcedure remoteProcedure) {
      -219  this.remoteProcedure = 
      remoteProcedure;
      -220}
      -221
      -222public RemoteProcedure 
      getRemoteProcedure() {
      -223  return remoteProcedure;
      -224}
      -225  }
      -226
      -227  /**
      -228   * Remote procedure reference.
      -229   */
      -230  public interface 
      RemoteProcedureTEnv, TRemote {
      -231/**
      -232 * For building the remote 
      operation.
      -233 */
      -234RemoteOperation remoteCallBuild(TEnv 
      env, TRemote remote);
      -235
      -236/**
      -237 * Called when the executeProcedure 
      call is failed.
      -238 */
      -239void remoteCallFailed(TEnv env, 
      TRemote remote, IOException exception);
      -240
      -241/**
      -242 * Called when RS tells the remote 
      procedure is succeeded through the
      -243 * {@code reportProcedureDone} 
      method.
      -244 */
      -245void remoteOperationCompleted(TEnv 
      env);
      -246
      -247/**
      -248 * Called when RS tells the remote 
      procedure is failed through the {@code reportProcedureDone}
      -249 * method.
      -250 */
      -251void remoteOperationFailed(TEnv env, 
      RemoteProcedureException error);
      -252  }
      -253
      -254  /**
      -255   * Account of what procedures are 
      running on remote node.
      -256   * @param TEnv
      -257   * @param TRemote
      -258   */
      -259  public interface RemoteNodeTEnv, 
      TRemote {
      -260TRemote getKey();
      -261void add(RemoteProcedureTEnv, 
      TRemote operation);
      -262void dispatch();
      -263  }
      -264
      -265  protected 
      ArrayListMultimapClass?, RemoteOperation 
      buildAndGroupRequestByType(final TEnv env,
      -266  final TRemote remote, final 
      SetRemoteProcedure remoteProcedures) {
      -267final 
      ArrayListMultimapClass?, RemoteOperation requestByType = 
      ArrayListMultimap.create();
      -268for (RemoteProcedure proc: 
      remoteProcedures) {
      -269  RemoteOperation operation = 
      proc.remoteCallBuild(env, remote);
      -270  
      requestByType.put(operation.getClass(), 

      [08/51] [partial] hbase-site git commit: Published site at 997747076d8ec0b4346d7cb99c4b0667a7c14905.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4df09ed9/testapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.html
      --
      diff --git 
      a/testapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.html 
      b/testapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.html
      index 3b08b86..80483ee 100644
      --- a/testapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.html
      +++ b/testapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.html
      @@ -598,7 +598,7 @@
       590   * Start a minidfscluster.
       591   * @param servers How many DNs to 
      start.
       592   * @throws Exception
      -593   * @see {@link 
      #shutdownMiniDFSCluster()}
      +593   * @see #shutdownMiniDFSCluster()
       594   * @return The mini dfs cluster 
      created.
       595   */
       596  public MiniDFSCluster 
      startMiniDFSCluster(int servers) throws Exception {
      @@ -613,7 +613,7 @@
       605   * datanodes will have the same host 
      name.
       606   * @param hosts hostnames DNs to run 
      on.
       607   * @throws Exception
      -608   * @see {@link 
      #shutdownMiniDFSCluster()}
      +608   * @see #shutdownMiniDFSCluster()
       609   * @return The mini dfs cluster 
      created.
       610   */
       611  public MiniDFSCluster 
      startMiniDFSCluster(final String hosts[])
      @@ -631,7 +631,7 @@
       623   * @param servers How many DNs to 
      start.
       624   * @param hosts hostnames DNs to run 
      on.
       625   * @throws Exception
      -626   * @see {@link 
      #shutdownMiniDFSCluster()}
      +626   * @see #shutdownMiniDFSCluster()
       627   * @return The mini dfs cluster 
      created.
       628   */
       629  public MiniDFSCluster 
      startMiniDFSCluster(int servers, final String hosts[])
      @@ -775,7 +775,7 @@
       767   * Start up a minicluster of hbase, 
      dfs, and zookeeper.
       768   * @throws Exception
       769   * @return Mini hbase cluster instance 
      created.
      -770   * @see {@link 
      #shutdownMiniDFSCluster()}
      +770   * @see #shutdownMiniDFSCluster()
       771   */
       772  public MiniHBaseCluster 
      startMiniCluster() throws Exception {
       773return startMiniCluster(1, 1);
      @@ -785,7 +785,7 @@
       777   * Start up a minicluster of hbase, 
      dfs, and zookeeper where WAL's walDir is created separately.
       778   * @throws Exception
       779   * @return Mini hbase cluster instance 
      created.
      -780   * @see {@link 
      #shutdownMiniDFSCluster()}
      +780   * @see #shutdownMiniDFSCluster()
       781   */
       782  public MiniHBaseCluster 
      startMiniCluster(boolean withWALDir) throws Exception {
       783return startMiniCluster(1, 1, 1, 
      null, null, null, false, withWALDir);
      @@ -797,7 +797,7 @@
       789   * (will overwrite if dir already 
      exists)
       790   * @throws Exception
       791   * @return Mini hbase cluster instance 
      created.
      -792   * @see {@link 
      #shutdownMiniDFSCluster()}
      +792   * @see #shutdownMiniDFSCluster()
       793   */
       794  public MiniHBaseCluster 
      startMiniCluster(final int numSlaves, boolean create)
       795  throws Exception {
      @@ -814,7 +814,7 @@
       806   * hbase.regionserver.info.port is -1 
      (i.e. no ui per regionserver) otherwise
       807   * bind errors.
       808   * @throws Exception
      -809   * @see {@link 
      #shutdownMiniCluster()}
      +809   * @see #shutdownMiniCluster()
       810   * @return Mini hbase cluster instance 
      created.
       811   */
       812  public MiniHBaseCluster 
      startMiniCluster(final int numSlaves)
      @@ -831,7 +831,7 @@
       823   * Start minicluster. Whether to create 
      a new root or data dir path even if such a path
       824   * has been created earlier is decided 
      based on flag codecreate/code
       825   * @throws Exception
      -826   * @see {@link 
      #shutdownMiniCluster()}
      +826   * @see #shutdownMiniCluster()
       827   * @return Mini hbase cluster instance 
      created.
       828   */
       829  public MiniHBaseCluster 
      startMiniCluster(final int numMasters,
      @@ -843,7 +843,7 @@
       835  /**
       836   * start minicluster
       837   * @throws Exception
      -838   * @see {@link 
      #shutdownMiniCluster()}
      +838   * @see #shutdownMiniCluster()
       839   * @return Mini hbase cluster instance 
      created.
       840   */
       841  public MiniHBaseCluster 
      startMiniCluster(final int numMasters,
      @@ -880,7 +880,7 @@
       872   * If you start MiniDFSCluster without 
      host names,
       873   * all instances of the datanodes will 
      have the same host name.
       874   * @throws Exception
      -875   * @see {@link 
      #shutdownMiniCluster()}
      +875   * @see #shutdownMiniCluster()
       876   * @return Mini hbase cluster instance 
      created.
       877   */
       878  public MiniHBaseCluster 
      startMiniCluster(final int numMasters,
      @@ -922,7 +922,7 @@
       914   * @param regionserverClass The class 
      to use as HRegionServer, or null for
       915   * default
       916   * @throws Exception
      -917   * @see {@link 
      #shutdownMiniCluster()}
      +917   * @see #shutdownMiniCluster()
       918   * @return Mini hbase cluster instance 
      created.
       919   */
       920  public MiniHBaseCluster 
      startMiniCluster(final int numMasters,
      @@ -1011,7 +1011,7 @@
       1003   * @return Reference to the hbase mini 
      hbase cluster.
       1004   * @throws IOException
       1005   * @throws InterruptedException
      -1006   * @see {@link #startMiniCluster()}
      +1006   * @see #startMiniCluster()
       1007   */
       1008  public MiniHBaseCluster 
      

      [08/51] [partial] hbase-site git commit: Published site at f3d1c021de2264301f68eadb9ef126ff83d7ef53.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/883dde2f/devapidocs/src-html/org/apache/hadoop/hbase/util/Bytes.ByteArrayComparator.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/util/Bytes.ByteArrayComparator.html
       
      b/devapidocs/src-html/org/apache/hadoop/hbase/util/Bytes.ByteArrayComparator.html
      index a242321..b2c1cc6 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/util/Bytes.ByteArrayComparator.html
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/util/Bytes.ByteArrayComparator.html
      @@ -1557,7 +1557,7 @@
       1549  long lw = 
      theUnsafe.getLong(buffer1, offset1Adj + i);
       1550  long rw = 
      theUnsafe.getLong(buffer2, offset2Adj + i);
       1551  if (lw != rw) {
      -1552
      if(!UnsafeAccess.littleEndian) {
      +1552
      if(!UnsafeAccess.LITTLE_ENDIAN) {
       1553  return ((lw + 
      Long.MIN_VALUE)  (rw + Long.MIN_VALUE)) ? -1 : 1;
       1554}
       1555
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/883dde2f/devapidocs/src-html/org/apache/hadoop/hbase/util/Bytes.Comparer.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/util/Bytes.Comparer.html 
      b/devapidocs/src-html/org/apache/hadoop/hbase/util/Bytes.Comparer.html
      index a242321..b2c1cc6 100644
      --- a/devapidocs/src-html/org/apache/hadoop/hbase/util/Bytes.Comparer.html
      +++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/Bytes.Comparer.html
      @@ -1557,7 +1557,7 @@
       1549  long lw = 
      theUnsafe.getLong(buffer1, offset1Adj + i);
       1550  long rw = 
      theUnsafe.getLong(buffer2, offset2Adj + i);
       1551  if (lw != rw) {
      -1552
      if(!UnsafeAccess.littleEndian) {
      +1552
      if(!UnsafeAccess.LITTLE_ENDIAN) {
       1553  return ((lw + 
      Long.MIN_VALUE)  (rw + Long.MIN_VALUE)) ? -1 : 1;
       1554}
       1555
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/883dde2f/devapidocs/src-html/org/apache/hadoop/hbase/util/Bytes.LexicographicalComparerHolder.PureJavaComparer.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/util/Bytes.LexicographicalComparerHolder.PureJavaComparer.html
       
      b/devapidocs/src-html/org/apache/hadoop/hbase/util/Bytes.LexicographicalComparerHolder.PureJavaComparer.html
      index a242321..b2c1cc6 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/util/Bytes.LexicographicalComparerHolder.PureJavaComparer.html
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/util/Bytes.LexicographicalComparerHolder.PureJavaComparer.html
      @@ -1557,7 +1557,7 @@
       1549  long lw = 
      theUnsafe.getLong(buffer1, offset1Adj + i);
       1550  long rw = 
      theUnsafe.getLong(buffer2, offset2Adj + i);
       1551  if (lw != rw) {
      -1552
      if(!UnsafeAccess.littleEndian) {
      +1552
      if(!UnsafeAccess.LITTLE_ENDIAN) {
       1553  return ((lw + 
      Long.MIN_VALUE)  (rw + Long.MIN_VALUE)) ? -1 : 1;
       1554}
       1555
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/883dde2f/devapidocs/src-html/org/apache/hadoop/hbase/util/Bytes.LexicographicalComparerHolder.UnsafeComparer.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/util/Bytes.LexicographicalComparerHolder.UnsafeComparer.html
       
      b/devapidocs/src-html/org/apache/hadoop/hbase/util/Bytes.LexicographicalComparerHolder.UnsafeComparer.html
      index a242321..b2c1cc6 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/util/Bytes.LexicographicalComparerHolder.UnsafeComparer.html
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/util/Bytes.LexicographicalComparerHolder.UnsafeComparer.html
      @@ -1557,7 +1557,7 @@
       1549  long lw = 
      theUnsafe.getLong(buffer1, offset1Adj + i);
       1550  long rw = 
      theUnsafe.getLong(buffer2, offset2Adj + i);
       1551  if (lw != rw) {
      -1552
      if(!UnsafeAccess.littleEndian) {
      +1552
      if(!UnsafeAccess.LITTLE_ENDIAN) {
       1553  return ((lw + 
      Long.MIN_VALUE)  (rw + Long.MIN_VALUE)) ? -1 : 1;
       1554}
       1555
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/883dde2f/devapidocs/src-html/org/apache/hadoop/hbase/util/Bytes.LexicographicalComparerHolder.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/util/Bytes.LexicographicalComparerHolder.html
       
      b/devapidocs/src-html/org/apache/hadoop/hbase/util/Bytes.LexicographicalComparerHolder.html
      index a242321..b2c1cc6 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/util/Bytes.LexicographicalComparerHolder.html
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/util/Bytes.LexicographicalComparerHolder.html
      @@ -1557,7 +1557,7 @@
       1549  long lw = 
      theUnsafe.getLong(buffer1, offset1Adj + i);
       1550  long rw = 
      

      [08/51] [partial] hbase-site git commit: Published site at cf529f18a9959589fa635f78df4840472526ea2c.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncTableTest.html
      --
      diff --git 
      a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncTableTest.html
       
      b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncTableTest.html
      index 3f8844b..cdb9398 100644
      --- 
      a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncTableTest.html
      +++ 
      b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncTableTest.html
      @@ -140,2712 +140,2713 @@
       132public class PerformanceEvaluation 
      extends Configured implements Tool {
       133  static final String RANDOM_SEEK_SCAN = 
      "randomSeekScan";
       134  static final String RANDOM_READ = 
      "randomRead";
      -135  private static final Logger LOG = 
      LoggerFactory.getLogger(PerformanceEvaluation.class.getName());
      -136  private static final ObjectMapper 
      MAPPER = new ObjectMapper();
      -137  static {
      -138
      MAPPER.configure(MapperFeature.SORT_PROPERTIES_ALPHABETICALLY, true);
      -139  }
      -140
      -141  public static final String TABLE_NAME = 
      "TestTable";
      -142  public static final String 
      FAMILY_NAME_BASE = "info";
      -143  public static final byte[] FAMILY_ZERO 
      = Bytes.toBytes("info0");
      -144  public static final byte[] COLUMN_ZERO 
      = Bytes.toBytes("" + 0);
      -145  public static final int 
      DEFAULT_VALUE_LENGTH = 1000;
      -146  public static final int ROW_LENGTH = 
      26;
      -147
      -148  private static final int ONE_GB = 1024 
      * 1024 * 1000;
      -149  private static final int 
      DEFAULT_ROWS_PER_GB = ONE_GB / DEFAULT_VALUE_LENGTH;
      -150  // TODO : should we make this 
      configurable
      -151  private static final int TAG_LENGTH = 
      256;
      -152  private static final DecimalFormat FMT 
      = new DecimalFormat("0.##");
      -153  private static final MathContext CXT = 
      MathContext.DECIMAL64;
      -154  private static final BigDecimal 
      MS_PER_SEC = BigDecimal.valueOf(1000);
      -155  private static final BigDecimal 
      BYTES_PER_MB = BigDecimal.valueOf(1024 * 1024);
      -156  private static final TestOptions 
      DEFAULT_OPTS = new TestOptions();
      -157
      -158  private static MapString, 
      CmdDescriptor COMMANDS = new TreeMap();
      -159  private static final Path PERF_EVAL_DIR 
      = new Path("performance_evaluation");
      -160
      -161  static {
      -162
      addCommandDescriptor(AsyncRandomReadTest.class, "asyncRandomRead",
      -163"Run async random read test");
      -164
      addCommandDescriptor(AsyncRandomWriteTest.class, "asyncRandomWrite",
      -165"Run async random write test");
      -166
      addCommandDescriptor(AsyncSequentialReadTest.class, "asyncSequentialRead",
      -167"Run async sequential read 
      test");
      -168
      addCommandDescriptor(AsyncSequentialWriteTest.class, "asyncSequentialWrite",
      -169"Run async sequential write 
      test");
      -170
      addCommandDescriptor(AsyncScanTest.class, "asyncScan",
      -171"Run async scan test (read every 
      row)");
      -172
      addCommandDescriptor(RandomReadTest.class, RANDOM_READ,
      -173  "Run random read test");
      -174
      addCommandDescriptor(RandomSeekScanTest.class, RANDOM_SEEK_SCAN,
      -175  "Run random seek and scan 100 
      test");
      -176
      addCommandDescriptor(RandomScanWithRange10Test.class, "scanRange10",
      -177  "Run random seek scan with both 
      start and stop row (max 10 rows)");
      -178
      addCommandDescriptor(RandomScanWithRange100Test.class, "scanRange100",
      -179  "Run random seek scan with both 
      start and stop row (max 100 rows)");
      -180
      addCommandDescriptor(RandomScanWithRange1000Test.class, "scanRange1000",
      -181  "Run random seek scan with both 
      start and stop row (max 1000 rows)");
      -182
      addCommandDescriptor(RandomScanWithRange1Test.class, "scanRange1",
      -183  "Run random seek scan with both 
      start and stop row (max 1 rows)");
      -184
      addCommandDescriptor(RandomWriteTest.class, "randomWrite",
      -185  "Run random write test");
      -186
      addCommandDescriptor(SequentialReadTest.class, "sequentialRead",
      -187  "Run sequential read test");
      -188
      addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite",
      -189  "Run sequential write test");
      -190addCommandDescriptor(ScanTest.class, 
      "scan",
      -191  "Run scan test (read every 
      row)");
      -192
      addCommandDescriptor(FilteredScanTest.class, "filterScan",
      -193  "Run scan test using a filter to 
      find a specific row based on it's value " +
      -194  "(make sure to use --rows=20)");
      -195
      addCommandDescriptor(IncrementTest.class, "increment",
      -196  "Increment on each row; clients 
      overlap on keyspace so some concurrent operations");
      -197
      addCommandDescriptor(AppendTest.class, "append",
      -198  "Append on each row; clients 
      overlap on keyspace so some concurrent operations");
      -199
      addCommandDescriptor(CheckAndMutateTest.class, "checkAndMutate",
      -200  "CheckAndMutate on each row; 
      clients overlap on keyspace so some concurrent operations");
      -201
      

      [08/51] [partial] hbase-site git commit: Published site at 021f66d11d2cbb7308308093e29e69d6e7661ee9.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/92a26cfb/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.html 
      b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.html
      index 4c42811..0bc3ddb 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.html
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.html
      @@ -563,381 +563,390 @@
       555// If this is first time we've 
      been put off, then emit a log message.
       556if (fqe.getRequeueCount() = 
      0) {
       557  // Note: We don't impose 
      blockingStoreFiles constraint on meta regions
      -558  LOG.warn("Region " + 
      region.getRegionInfo().getEncodedName() + " has too many " +
      -559"store files; delaying flush 
      up to " + this.blockingWaitTime + "ms");
      -560  if 
      (!this.server.compactSplitThread.requestSplit(region)) {
      -561try {
      -562  
      this.server.compactSplitThread.requestSystemCompaction(region,
      -563
      Thread.currentThread().getName());
      -564} catch (IOException e) {
      -565  e = e instanceof 
      RemoteException ?
      -566  
      ((RemoteException)e).unwrapRemoteException() : e;
      -567  LOG.error("Cache flush 
      failed for region " +
      -568
      Bytes.toStringBinary(region.getRegionInfo().getRegionName()), e);
      -569}
      -570  }
      -571}
      -572
      -573// Put back on the queue.  Have 
      it come back out of the queue
      -574// after a delay of 
      this.blockingWaitTime / 100 ms.
      -575
      this.flushQueue.add(fqe.requeue(this.blockingWaitTime / 100));
      -576// Tell a lie, it's not flushed 
      but it's ok
      -577return true;
      -578  }
      -579}
      -580return flushRegion(region, false, 
      fqe.isForceFlushAllStores(), fqe.getTracker());
      -581  }
      -582
      -583  /**
      -584   * Flush a region.
      -585   * @param region Region to flush.
      -586   * @param emergencyFlush Set if we are 
      being force flushed. If true the region
      -587   * needs to be removed from the flush 
      queue. If false, when we were called
      -588   * from the main flusher run loop and 
      we got the entry to flush by calling
      -589   * poll on the flush queue (which 
      removed it).
      -590   * @param forceFlushAllStores whether 
      we want to flush all store.
      -591   * @return true if the region was 
      successfully flushed, false otherwise. If
      -592   * false, there will be accompanying 
      log messages explaining why the region was
      -593   * not flushed.
      -594   */
      -595  private boolean flushRegion(HRegion 
      region, boolean emergencyFlush, boolean forceFlushAllStores,
      -596  FlushLifeCycleTracker tracker) {
      -597synchronized (this.regionsInQueue) 
      {
      -598  FlushRegionEntry fqe = 
      this.regionsInQueue.remove(region);
      -599  // Use the start time of the 
      FlushRegionEntry if available
      -600  if (fqe != null  
      emergencyFlush) {
      -601// Need to remove from region 
      from delay queue. When NOT an
      -602// emergencyFlush, then item was 
      removed via a flushQueue.poll.
      -603flushQueue.remove(fqe);
      -604  }
      -605}
      -606
      -607tracker.beforeExecution();
      -608lock.readLock().lock();
      -609try {
      -610  notifyFlushRequest(region, 
      emergencyFlush);
      -611  FlushResult flushResult = 
      region.flushcache(forceFlushAllStores, false, tracker);
      -612  boolean shouldCompact = 
      flushResult.isCompactionNeeded();
      -613  // We just want to check the size
      -614  boolean shouldSplit = 
      region.checkSplit() != null;
      -615  if (shouldSplit) {
      -616
      this.server.compactSplitThread.requestSplit(region);
      -617  } else if (shouldCompact) {
      -618
      server.compactSplitThread.requestSystemCompaction(region, 
      Thread.currentThread().getName());
      -619  }
      -620} catch (DroppedSnapshotException ex) 
      {
      -621  // Cache flush can fail in a few 
      places. If it fails in a critical
      -622  // section, we get a 
      DroppedSnapshotException and a replay of wal
      -623  // is required. Currently the only 
      way to do this is a restart of
      -624  // the server. Abort because hdfs 
      is probably bad (HBASE-644 is a case
      -625  // where hdfs was bad but passed 
      the hdfs check).
      -626  server.abort("Replay of WAL 
      required. Forcing server shutdown", ex);
      -627  return false;
      -628} catch (IOException ex) {
      -629  ex = ex instanceof RemoteException 
      ? ((RemoteException) ex).unwrapRemoteException() : ex;
      -630  LOG.error(
      -631"Cache flush failed"
      -632+ (region != null ? (" for 
      region " +
      -633
      Bytes.toStringBinary(region.getRegionInfo().getRegionName()))
      -634  : ""), ex);
      -635  if (!server.checkFileSystem()) {
      -636return false;
      -637  }
      -638} finally {
      -639  lock.readLock().unlock();
      -640  

      [08/51] [partial] hbase-site git commit: Published site at acd0d1e446c164d9c54bfb461b2d449c8d717c07.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f2065178/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomSeekScanTest.html
      --
      diff --git 
      a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomSeekScanTest.html
       
      b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomSeekScanTest.html
      index 2510283..418c60c 100644
      --- 
      a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomSeekScanTest.html
      +++ 
      b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomSeekScanTest.html
      @@ -77,77 +77,77 @@
       069import 
      org.apache.hadoop.hbase.client.RowMutations;
       070import 
      org.apache.hadoop.hbase.client.Scan;
       071import 
      org.apache.hadoop.hbase.client.Table;
      -072import 
      org.apache.hadoop.hbase.filter.BinaryComparator;
      -073import 
      org.apache.hadoop.hbase.filter.Filter;
      -074import 
      org.apache.hadoop.hbase.filter.FilterAllFilter;
      -075import 
      org.apache.hadoop.hbase.filter.FilterList;
      -076import 
      org.apache.hadoop.hbase.filter.PageFilter;
      -077import 
      org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
      -078import 
      org.apache.hadoop.hbase.filter.WhileMatchFilter;
      -079import 
      org.apache.hadoop.hbase.io.compress.Compression;
      -080import 
      org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
      -081import 
      org.apache.hadoop.hbase.io.hfile.RandomDistribution;
      -082import 
      org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
      -083import 
      org.apache.hadoop.hbase.regionserver.BloomType;
      -084import 
      org.apache.hadoop.hbase.regionserver.CompactingMemStore;
      -085import 
      org.apache.hadoop.hbase.trace.HBaseHTraceConfiguration;
      -086import 
      org.apache.hadoop.hbase.trace.SpanReceiverHost;
      -087import 
      org.apache.hadoop.hbase.trace.TraceUtil;
      -088import 
      org.apache.hadoop.hbase.util.ByteArrayHashKey;
      -089import 
      org.apache.hadoop.hbase.util.Bytes;
      -090import 
      org.apache.hadoop.hbase.util.Hash;
      -091import 
      org.apache.hadoop.hbase.util.MurmurHash;
      -092import 
      org.apache.hadoop.hbase.util.Pair;
      -093import 
      org.apache.hadoop.hbase.util.YammerHistogramUtils;
      -094import 
      org.apache.hadoop.io.LongWritable;
      -095import org.apache.hadoop.io.Text;
      -096import org.apache.hadoop.mapreduce.Job;
      -097import 
      org.apache.hadoop.mapreduce.Mapper;
      -098import 
      org.apache.hadoop.mapreduce.lib.input.NLineInputFormat;
      -099import 
      org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
      -100import 
      org.apache.hadoop.mapreduce.lib.reduce.LongSumReducer;
      -101import org.apache.hadoop.util.Tool;
      -102import 
      org.apache.hadoop.util.ToolRunner;
      -103import 
      org.apache.htrace.core.ProbabilitySampler;
      -104import org.apache.htrace.core.Sampler;
      -105import 
      org.apache.htrace.core.TraceScope;
      -106import 
      org.apache.yetus.audience.InterfaceAudience;
      -107import org.slf4j.Logger;
      -108import org.slf4j.LoggerFactory;
      -109import 
      org.apache.hbase.thirdparty.com.google.common.base.MoreObjects;
      -110import 
      org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
      -111
      -112/**
      -113 * Script used evaluating HBase 
      performance and scalability.  Runs a HBase
      -114 * client that steps through one of a set 
      of hardcoded tests or 'experiments'
      -115 * (e.g. a random reads test, a random 
      writes test, etc.). Pass on the
      -116 * command-line which test to run and how 
      many clients are participating in
      -117 * this experiment. Run {@code 
      PerformanceEvaluation --help} to obtain usage.
      -118 *
      -119 * pThis class sets up and runs 
      the evaluation programs described in
      -120 * Section 7, iPerformance 
      Evaluation/i, of the a
      -121 * 
      href="http://labs.google.com/papers/bigtable.html"Bigtable/a;
      -122 * paper, pages 8-10.
      -123 *
      -124 * pBy default, runs as a 
      mapreduce job where each mapper runs a single test
      -125 * client. Can also run as a 
      non-mapreduce, multithreaded application by
      -126 * specifying {@code --nomapred}. Each 
      client does about 1GB of data, unless
      -127 * specified otherwise.
      -128 */
      -129@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
      -130public class PerformanceEvaluation 
      extends Configured implements Tool {
      -131  static final String RANDOM_SEEK_SCAN = 
      "randomSeekScan";
      -132  static final String RANDOM_READ = 
      "randomRead";
      -133  private static final Logger LOG = 
      LoggerFactory.getLogger(PerformanceEvaluation.class.getName());
      -134  private static final ObjectMapper 
      MAPPER = new ObjectMapper();
      -135  static {
      -136
      MAPPER.configure(MapperFeature.SORT_PROPERTIES_ALPHABETICALLY, true);
      -137  }
      -138
      -139  public static final String TABLE_NAME = 
      "TestTable";
      -140  public static final byte[] FAMILY_NAME 
      = Bytes.toBytes("info");
      -141  public static final byte [] COLUMN_ZERO 
      = Bytes.toBytes("" + 0);
      -142  public static final byte [] 
      QUALIFIER_NAME = COLUMN_ZERO;
      +072import 
      org.apache.hadoop.hbase.client.metrics.ScanMetrics;
      +073import 
      org.apache.hadoop.hbase.filter.BinaryComparator;
      +074import 
      org.apache.hadoop.hbase.filter.Filter;
      +075import 
      

      [08/51] [partial] hbase-site git commit: Published site at 87f5b5f3411d96c31b4cb61b9a57ced22be91d1f.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/de18d468/testdevapidocs/org/apache/hadoop/hbase/replication/regionserver/TestSerialReplicationEndpoint.TestEndpoint.html
      --
      diff --git 
      a/testdevapidocs/org/apache/hadoop/hbase/replication/regionserver/TestSerialReplicationEndpoint.TestEndpoint.html
       
      b/testdevapidocs/org/apache/hadoop/hbase/replication/regionserver/TestSerialReplicationEndpoint.TestEndpoint.html
      new file mode 100644
      index 000..e1c873a
      --- /dev/null
      +++ 
      b/testdevapidocs/org/apache/hadoop/hbase/replication/regionserver/TestSerialReplicationEndpoint.TestEndpoint.html
      @@ -0,0 +1,475 @@
      +http://www.w3.org/TR/html4/loose.dtd;>
      +
      +
      +
      +
      +
      +TestSerialReplicationEndpoint.TestEndpoint (Apache HBase 3.0.0-SNAPSHOT 
      Test API)
      +
      +
      +
      +
      +
      +var methods = {"i0":10,"i1":10,"i2":9,"i3":10,"i4":9};
      +var tabs = {65535:["t0","All Methods"],1:["t1","Static 
      Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
      +var altColor = "altColor";
      +var rowColor = "rowColor";
      +var tableTab = "tableTab";
      +var activeTableTab = "activeTableTab";
      +
      +
      +JavaScript is disabled on your browser.
      +
      +
      +
      +
      +
      +Skip navigation links
      +
      +
      +
      +
      +Overview
      +Package
      +Class
      +Use
      +Tree
      +Deprecated
      +Index
      +Help
      +
      +
      +
      +
      +PrevClass
      +NextClass
      +
      +
      +Frames
      +NoFrames
      +
      +
      +AllClasses
      +
      +
      +
      +
      +
      +
      +
      +Summary:
      +Nested|
      +Field|
      +Constr|
      +Method
      +
      +
      +Detail:
      +Field|
      +Constr|
      +Method
      +
      +
      +
      +
      +
      +
      +
      +
      +org.apache.hadoop.hbase.replication.regionserver
      +Class TestSerialReplicationEndpoint.TestEndpoint
      +
      +
      +
      +https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
       title="class or interface in java.lang">java.lang.Object
      +
      +
      +org.apache.hbase.thirdparty.com.google.common.util.concurrent.AbstractService
      +
      +
      +org.apache.hadoop.hbase.replication.BaseReplicationEndpoint
      +
      +
      +org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint
      +
      +
      +org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint
      +
      +
      +org.apache.hadoop.hbase.replication.regionserver.TestSerialReplicationEndpoint.TestEndpoint
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +All Implemented Interfaces:
      +org.apache.hadoop.hbase.Abortable, 
      org.apache.hadoop.hbase.replication.ReplicationEndpoint, 
      org.apache.hadoop.hbase.replication.ReplicationPeerConfigListener, 
      org.apache.hbase.thirdparty.com.google.common.util.concurrent.Service
      +
      +
      +Enclosing class:
      +TestSerialReplicationEndpoint
      +
      +
      +
      +public static class TestSerialReplicationEndpoint.TestEndpoint
      +extends 
      org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +Nested Class Summary
      +
      +
      +
      +
      +Nested classes/interfaces inherited from 
      classorg.apache.hadoop.hbase.replication.HBaseReplicationEndpoint
      +org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint.PeerRegionServerListener
      +
      +
      +
      +
      +
      +Nested classes/interfaces inherited from 
      interfaceorg.apache.hadoop.hbase.replication.ReplicationEndpoint
      +org.apache.hadoop.hbase.replication.ReplicationEndpoint.Context, 
      org.apache.hadoop.hbase.replication.ReplicationEndpoint.ReplicateContext
      +
      +
      +
      +
      +
      +Nested classes/interfaces inherited from 
      interfaceorg.apache.hbase.thirdparty.com.google.common.util.concurrent.Service
      +org.apache.hbase.thirdparty.com.google.common.util.concurrent.Service.Listener,
       
      org.apache.hbase.thirdparty.com.google.common.util.concurrent.Service.State
      +
      +
      +
      +
      +
      +
      +
      +
      +Field Summary
      +
      +Fields
      +
      +Modifier and Type
      +Field and Description
      +
      +
      +private static https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/BlockingQueue.html?is-external=true;
       title="class or interface in 
      java.util.concurrent">BlockingQueueorg.apache.hadoop.hbase.wal.WAL.Entry
      +entryQueue
      +
      +
      +
      +
      +
      +
      +Fields inherited from 
      classorg.apache.hadoop.hbase.replication.BaseReplicationEndpoint
      +ctx, REPLICATION_WALENTRYFILTER_CONFIG_KEY
      +
      +
      +
      +
      +
      +
      +
      +
      +Constructor Summary
      +
      +Constructors
      +
      +Constructor and Description
      +
      +
      +TestEndpoint()
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +Method Summary
      +
      +All MethodsStatic MethodsInstance MethodsConcrete Methods
      +
      +Modifier and Type
      +Method and Description
      +
      +
      +boolean
      +canReplicateToSameCluster()
      +
      +
      +protected https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Callable.html?is-external=true;
       title="class or interface in java.util.concurrent">Callablehttps://docs.oracle.com/javase/8/docs/api/java/lang/Integer.html?is-external=true;
       title="class or interface in java.lang">Integer
      

      [08/51] [partial] hbase-site git commit: Published site at 2a2258656b2fcd92b967131b6c1f037363553bc4.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e0fb1fde/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
       
      b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
      index 79bf967..c8b113b 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
      @@ -115,3514 +115,3517 @@
       107import 
      org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer;
       108import 
      org.apache.hadoop.hbase.master.balancer.ClusterStatusChore;
       109import 
      org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory;
      -110import 
      org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
      -111import 
      org.apache.hadoop.hbase.master.cleaner.LogCleaner;
      -112import 
      org.apache.hadoop.hbase.master.cleaner.ReplicationBarrierCleaner;
      -113import 
      org.apache.hadoop.hbase.master.locking.LockManager;
      -114import 
      org.apache.hadoop.hbase.master.normalizer.NormalizationPlan;
      -115import 
      org.apache.hadoop.hbase.master.normalizer.NormalizationPlan.PlanType;
      -116import 
      org.apache.hadoop.hbase.master.normalizer.RegionNormalizer;
      -117import 
      org.apache.hadoop.hbase.master.normalizer.RegionNormalizerChore;
      -118import 
      org.apache.hadoop.hbase.master.normalizer.RegionNormalizerFactory;
      -119import 
      org.apache.hadoop.hbase.master.procedure.CreateTableProcedure;
      -120import 
      org.apache.hadoop.hbase.master.procedure.DeleteNamespaceProcedure;
      -121import 
      org.apache.hadoop.hbase.master.procedure.DeleteTableProcedure;
      -122import 
      org.apache.hadoop.hbase.master.procedure.DisableTableProcedure;
      -123import 
      org.apache.hadoop.hbase.master.procedure.EnableTableProcedure;
      -124import 
      org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants;
      -125import 
      org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
      -126import 
      org.apache.hadoop.hbase.master.procedure.MasterProcedureScheduler;
      -127import 
      org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil;
      -128import 
      org.apache.hadoop.hbase.master.procedure.ModifyTableProcedure;
      -129import 
      org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch;
      -130import 
      org.apache.hadoop.hbase.master.procedure.RecoverMetaProcedure;
      -131import 
      org.apache.hadoop.hbase.master.procedure.TruncateTableProcedure;
      -132import 
      org.apache.hadoop.hbase.master.replication.AddPeerProcedure;
      -133import 
      org.apache.hadoop.hbase.master.replication.DisablePeerProcedure;
      -134import 
      org.apache.hadoop.hbase.master.replication.EnablePeerProcedure;
      -135import 
      org.apache.hadoop.hbase.master.replication.ModifyPeerProcedure;
      -136import 
      org.apache.hadoop.hbase.master.replication.RemovePeerProcedure;
      -137import 
      org.apache.hadoop.hbase.master.replication.ReplicationPeerManager;
      -138import 
      org.apache.hadoop.hbase.master.replication.UpdatePeerConfigProcedure;
      -139import 
      org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
      -140import 
      org.apache.hadoop.hbase.mob.MobConstants;
      -141import 
      org.apache.hadoop.hbase.monitoring.MemoryBoundedLogMessageBuffer;
      -142import 
      org.apache.hadoop.hbase.monitoring.MonitoredTask;
      -143import 
      org.apache.hadoop.hbase.monitoring.TaskMonitor;
      -144import 
      org.apache.hadoop.hbase.procedure.MasterProcedureManagerHost;
      -145import 
      org.apache.hadoop.hbase.procedure.flush.MasterFlushTableProcedureManager;
      -146import 
      org.apache.hadoop.hbase.procedure2.LockedResource;
      -147import 
      org.apache.hadoop.hbase.procedure2.Procedure;
      -148import 
      org.apache.hadoop.hbase.procedure2.ProcedureEvent;
      -149import 
      org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
      -150import 
      org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher.RemoteProcedure;
      -151import 
      org.apache.hadoop.hbase.procedure2.RemoteProcedureException;
      -152import 
      org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore;
      -153import 
      org.apache.hadoop.hbase.quotas.MasterQuotaManager;
      -154import 
      org.apache.hadoop.hbase.quotas.MasterSpaceQuotaObserver;
      -155import 
      org.apache.hadoop.hbase.quotas.QuotaObserverChore;
      -156import 
      org.apache.hadoop.hbase.quotas.QuotaUtil;
      -157import 
      org.apache.hadoop.hbase.quotas.SnapshotQuotaObserverChore;
      -158import 
      org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshotNotifier;
      -159import 
      org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshotNotifierFactory;
      -160import 
      org.apache.hadoop.hbase.regionserver.DefaultStoreEngine;
      -161import 
      org.apache.hadoop.hbase.regionserver.HRegionServer;
      -162import 
      org.apache.hadoop.hbase.regionserver.HStore;
      -163import 
      org.apache.hadoop.hbase.regionserver.RSRpcServices;
      -164import 
      org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
      -165import 
      org.apache.hadoop.hbase.regionserver.RegionSplitPolicy;
      -166import 
      org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy;
      -167import 
      

      [08/51] [partial] hbase-site git commit: Published site at e468b4022f76688851b3e0c34722f01a56bd624f.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/16541468/devapidocs/org/apache/hadoop/hbase/client/Get.html
      --
      diff --git a/devapidocs/org/apache/hadoop/hbase/client/Get.html 
      b/devapidocs/org/apache/hadoop/hbase/client/Get.html
      index a142212..de5dfd4 100644
      --- a/devapidocs/org/apache/hadoop/hbase/client/Get.html
      +++ b/devapidocs/org/apache/hadoop/hbase/client/Get.html
      @@ -18,7 +18,7 @@
       catch(err) {
       }
       //-->
      -var methods = 
      {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":42,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":42,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":42,"i35":42,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10};
      +var methods = 
      {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":42,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":42,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":42,"i35":42,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":42,"i42":10};
       var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
      Methods"],8:["t4","Concrete Methods"],32:["t6","Deprecated Methods"]};
       var altColor = "altColor";
       var rowColor = "rowColor";
      @@ -147,7 +147,7 @@ implements setTimeRange.
        
        To only retrieve columns with a specific timestamp, execute
      - setTimestamp.
      + setTimestamp.
        
        To limit the number of versions of each column to be returned, execute
        setMaxVersions.
      @@ -530,11 +530,20 @@ implements 
       
       Get
      -setTimeStamp(longtimestamp)
      +setTimestamp(longtimestamp)
       Get versions of columns with the specified timestamp.
       
       
       
      +Get
      +setTimeStamp(longtimestamp)
      +Deprecated.
      +As of release 2.0.0, this 
      will be removed in HBase 3.0.0.
      + Use setTimestamp(long)
       instead
      +
      +
      +
      +
       https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
       title="class or interface in java.util">Maphttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">String,https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
       title="class or interface in java.lang">Object
       toMap(intmaxCols)
       Compile the details beyond the scope of getFingerprint 
      (row, columns,
      @@ -862,8 +871,11 @@ public
       
       setTimeStamp
      -publicGetsetTimeStamp(longtimestamp)
      - throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
       title="class or interface in java.io">IOException
      +https://docs.oracle.com/javase/8/docs/api/java/lang/Deprecated.html?is-external=true;
       title="class or interface in java.lang">@Deprecated
      +publicGetsetTimeStamp(longtimestamp)
      + throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
       title="class or interface in java.io">IOException
      +Deprecated.As of release 2.0.0, this will be removed in HBase 
      3.0.0.
      + Use setTimestamp(long)
       instead
       Get versions of columns with the specified timestamp.
       
       Parameters:
      @@ -875,13 +887,29 @@ public
       
       
      +
      +
      +
      +
      +
      +setTimestamp
      +publicGetsetTimestamp(longtimestamp)
      +Get versions of columns with the specified timestamp.
      +
      +Parameters:
      +timestamp - version timestamp
      +Returns:
      +this for invocation chaining
      +
      +
      +
       
       
       
       
       
       setColumnFamilyTimeRange
      -publicGetsetColumnFamilyTimeRange(byte[]cf,
      +publicGetsetColumnFamilyTimeRange(byte[]cf,
       longminStamp,
       longmaxStamp)
       Description copied from 
      class:Query
      @@ -909,7 +937,7 @@ public
       setMaxVersions
       https://docs.oracle.com/javase/8/docs/api/java/lang/Deprecated.html?is-external=true;
       title="class or interface in java.lang">@Deprecated
      -publicGetsetMaxVersions()
      +publicGetsetMaxVersions()
       Deprecated.It is easy to misunderstand with column family's max 
      versions, so use
        readAllVersions()
       instead.
       Get all available versions.
      @@ -926,7 +954,7 @@ public
       setMaxVersions
       https://docs.oracle.com/javase/8/docs/api/java/lang/Deprecated.html?is-external=true;
       title="class or interface in java.lang">@Deprecated
      -publicGetsetMaxVersions(intmaxVersions)
      +publicGetsetMaxVersions(intmaxVersions)
      throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
       title="class or interface in java.io">IOException
       Deprecated.It is easy to misunderstand with column family's max 
      versions, so use
        readVersions(int)
       instead.
      @@ -947,7 +975,7 @@ public
       
       readAllVersions
      -publicGetreadAllVersions()
      +publicGetreadAllVersions()
       Get all available versions.
       
       Returns:
      @@ -961,7 +989,7 @@ public
       
       readVersions
      -publicGetreadVersions(intversions)
      

      [08/51] [partial] hbase-site git commit: Published site at 64061f896fe21512504e3886a400759e88b519da.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f1ebf5b6/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDecodingContext.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDecodingContext.html
       
      b/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDecodingContext.html
      index b58c054..d30ee5e 100644
      --- 
      a/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDecodingContext.html
      +++ 
      b/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDecodingContext.html
      @@ -166,27 +166,27 @@
       
       
       DataBlockEncoder.EncodedSeeker
      -RowIndexCodecV1.createSeeker(CellComparatorcomparator,
      +CopyKeyDataBlockEncoder.createSeeker(CellComparatorcomparator,
       HFileBlockDecodingContextdecodingCtx)
       
       
       DataBlockEncoder.EncodedSeeker
      -CopyKeyDataBlockEncoder.createSeeker(CellComparatorcomparator,
      +PrefixKeyDeltaEncoder.createSeeker(CellComparatorcomparator,
       HFileBlockDecodingContextdecodingCtx)
       
       
       DataBlockEncoder.EncodedSeeker
      -DiffKeyDeltaEncoder.createSeeker(CellComparatorcomparator,
      +FastDiffDeltaEncoder.createSeeker(CellComparatorcomparator,
       HFileBlockDecodingContextdecodingCtx)
       
       
       DataBlockEncoder.EncodedSeeker
      -FastDiffDeltaEncoder.createSeeker(CellComparatorcomparator,
      +DiffKeyDeltaEncoder.createSeeker(CellComparatorcomparator,
       HFileBlockDecodingContextdecodingCtx)
       
       
       DataBlockEncoder.EncodedSeeker
      -PrefixKeyDeltaEncoder.createSeeker(CellComparatorcomparator,
      +RowIndexCodecV1.createSeeker(CellComparatorcomparator,
       HFileBlockDecodingContextdecodingCtx)
       
       
      @@ -198,13 +198,13 @@
       
       
       https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
       title="class or interface in java.nio">ByteBuffer
      -RowIndexCodecV1.decodeKeyValues(https://docs.oracle.com/javase/8/docs/api/java/io/DataInputStream.html?is-external=true;
       title="class or interface in java.io">DataInputStreamsource,
      -   HFileBlockDecodingContextdecodingCtx)
      +BufferedDataBlockEncoder.decodeKeyValues(https://docs.oracle.com/javase/8/docs/api/java/io/DataInputStream.html?is-external=true;
       title="class or interface in java.io">DataInputStreamsource,
      +   HFileBlockDecodingContextblkDecodingCtx)
       
       
       https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
       title="class or interface in java.nio">ByteBuffer
      -BufferedDataBlockEncoder.decodeKeyValues(https://docs.oracle.com/javase/8/docs/api/java/io/DataInputStream.html?is-external=true;
       title="class or interface in java.io">DataInputStreamsource,
      -   HFileBlockDecodingContextblkDecodingCtx)
      +RowIndexCodecV1.decodeKeyValues(https://docs.oracle.com/javase/8/docs/api/java/io/DataInputStream.html?is-external=true;
       title="class or interface in java.io">DataInputStreamsource,
      +   HFileBlockDecodingContextdecodingCtx)
       
       
       
      @@ -279,18 +279,18 @@
       
       
       HFileBlockDecodingContext
      -HFileDataBlockEncoderImpl.newDataBlockDecodingContext(HFileContextfileContext)
      -
      -
      -HFileBlockDecodingContext
       NoOpDataBlockEncoder.newDataBlockDecodingContext(HFileContextmeta)
       
      -
      +
       HFileBlockDecodingContext
       HFileDataBlockEncoder.newDataBlockDecodingContext(HFileContextfileContext)
       create a encoder specific decoding context for 
      reading.
       
       
      +
      +HFileBlockDecodingContext
      +HFileDataBlockEncoderImpl.newDataBlockDecodingContext(HFileContextfileContext)
      +
       
       
       
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f1ebf5b6/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDefaultDecodingContext.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDefaultDecodingContext.html
       
      b/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDefaultDecodingContext.html
      index 468913a..cbdb3c8 100644
      --- 
      a/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDefaultDecodingContext.html
      +++ 
      b/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDefaultDecodingContext.html
      @@ -116,36 +116,36 @@
        HFileBlockDefaultDecodingContextdecodingCtx)
       
       
      -protected https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
       title="class or interface in java.nio">ByteBuffer
      -CopyKeyDataBlockEncoder.internalDecodeKeyValues(https://docs.oracle.com/javase/8/docs/api/java/io/DataInputStream.html?is-external=true;
       title="class or interface in java.io">DataInputStreamsource,
      +protected abstract https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
       title="class or interface in java.nio">ByteBuffer
      +BufferedDataBlockEncoder.internalDecodeKeyValues(https://docs.oracle.com/javase/8/docs/api/java/io/DataInputStream.html?is-external=true;
       title="class or interface in java.io">DataInputStreamsource,
         

      [08/51] [partial] hbase-site git commit: Published site at 4cb40e6d846ce1f28ffb40d388c9efb753197813.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4dc2a2e8/devapidocs/org/apache/hadoop/hbase/exceptions/class-use/DeserializationException.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/exceptions/class-use/DeserializationException.html
       
      b/devapidocs/org/apache/hadoop/hbase/exceptions/class-use/DeserializationException.html
      index 338b7a4..49b5557 100644
      --- 
      a/devapidocs/org/apache/hadoop/hbase/exceptions/class-use/DeserializationException.html
      +++ 
      b/devapidocs/org/apache/hadoop/hbase/exceptions/class-use/DeserializationException.html
      @@ -144,14 +144,16 @@
       
       
       
      -static HTableDescriptor
      -HTableDescriptor.parseFrom(byte[]bytes)
      +static HColumnDescriptor
      +HColumnDescriptor.parseFrom(byte[]bytes)
       Deprecated.
       
       
       
      -static ClusterId
      -ClusterId.parseFrom(byte[]bytes)
      +static HTableDescriptor
      +HTableDescriptor.parseFrom(byte[]bytes)
      +Deprecated.
      +
       
       
       static HRegionInfo
      @@ -163,10 +165,8 @@
       
       
       
      -static HColumnDescriptor
      -HColumnDescriptor.parseFrom(byte[]bytes)
      -Deprecated.
      -
      +static ClusterId
      +ClusterId.parseFrom(byte[]bytes)
       
       
       static SplitLogTask
      @@ -220,17 +220,17 @@
       TableDescriptorBuilder.ModifyableTableDescriptor.parseFrom(byte[]bytes)
       
       
      -static RegionInfo
      -RegionInfo.parseFrom(byte[]bytes)
      -
      -
       static ColumnFamilyDescriptor
       ColumnFamilyDescriptorBuilder.parseFrom(byte[]pbBytes)
       
      -
      +
       private static ColumnFamilyDescriptor
       ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor.parseFrom(byte[]bytes)
       
      +
      +static RegionInfo
      +RegionInfo.parseFrom(byte[]bytes)
      +
       
       static RegionInfo
       RegionInfo.parseFrom(byte[]bytes,
      @@ -305,151 +305,151 @@
       ByteArrayComparable.parseFrom(byte[]pbBytes)
       
       
      -static SingleColumnValueExcludeFilter
      -SingleColumnValueExcludeFilter.parseFrom(byte[]pbBytes)
      +static ColumnPrefixFilter
      +ColumnPrefixFilter.parseFrom(byte[]pbBytes)
       
       
      -static ValueFilter
      -ValueFilter.parseFrom(byte[]pbBytes)
      +static ColumnCountGetFilter
      +ColumnCountGetFilter.parseFrom(byte[]pbBytes)
       
       
      -static SkipFilter
      -SkipFilter.parseFrom(byte[]pbBytes)
      +static RowFilter
      +RowFilter.parseFrom(byte[]pbBytes)
       
       
      -static FamilyFilter
      -FamilyFilter.parseFrom(byte[]pbBytes)
      +static FuzzyRowFilter
      +FuzzyRowFilter.parseFrom(byte[]pbBytes)
       
       
      -static BinaryPrefixComparator
      -BinaryPrefixComparator.parseFrom(byte[]pbBytes)
      +static BinaryComparator
      +BinaryComparator.parseFrom(byte[]pbBytes)
       
       
      -static NullComparator
      -NullComparator.parseFrom(byte[]pbBytes)
      +static RegexStringComparator
      +RegexStringComparator.parseFrom(byte[]pbBytes)
       
       
      -static BigDecimalComparator
      -BigDecimalComparator.parseFrom(byte[]pbBytes)
      +static Filter
      +Filter.parseFrom(byte[]pbBytes)
      +Concrete implementers can signal a failure condition in 
      their code by throwing an
      + https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
       title="class or interface in java.io">IOException.
      +
       
       
      -static ColumnPrefixFilter
      -ColumnPrefixFilter.parseFrom(byte[]pbBytes)
      +static RandomRowFilter
      +RandomRowFilter.parseFrom(byte[]pbBytes)
       
       
      -static PageFilter
      -PageFilter.parseFrom(byte[]pbBytes)
      +static FirstKeyOnlyFilter
      +FirstKeyOnlyFilter.parseFrom(byte[]pbBytes)
       
       
      -static BitComparator
      -BitComparator.parseFrom(byte[]pbBytes)
      +static SkipFilter
      +SkipFilter.parseFrom(byte[]pbBytes)
       
       
      -static RowFilter
      -RowFilter.parseFrom(byte[]pbBytes)
      +static BinaryPrefixComparator
      +BinaryPrefixComparator.parseFrom(byte[]pbBytes)
       
       
      -static ColumnRangeFilter
      -ColumnRangeFilter.parseFrom(byte[]pbBytes)
      +static TimestampsFilter
      +TimestampsFilter.parseFrom(byte[]pbBytes)
       
       
      -static ColumnCountGetFilter
      -ColumnCountGetFilter.parseFrom(byte[]pbBytes)
      +static ValueFilter
      +ValueFilter.parseFrom(byte[]pbBytes)
       
       
      -static SubstringComparator
      -SubstringComparator.parseFrom(byte[]pbBytes)
      +static KeyOnlyFilter
      +KeyOnlyFilter.parseFrom(byte[]pbBytes)
       
       
      -static MultipleColumnPrefixFilter
      -MultipleColumnPrefixFilter.parseFrom(byte[]pbBytes)
      +static FamilyFilter
      +FamilyFilter.parseFrom(byte[]pbBytes)
       
       
      -static ColumnPaginationFilter
      -ColumnPaginationFilter.parseFrom(byte[]pbBytes)
      +static QualifierFilter
      +QualifierFilter.parseFrom(byte[]pbBytes)
       
       
      -static DependentColumnFilter
      -DependentColumnFilter.parseFrom(byte[]pbBytes)
      +static FilterList
      +FilterList.parseFrom(byte[]pbBytes)
       
       
      -static BinaryComparator
      -BinaryComparator.parseFrom(byte[]pbBytes)
      +static BigDecimalComparator
      +BigDecimalComparator.parseFrom(byte[]pbBytes)
       
       
      -static InclusiveStopFilter
      -InclusiveStopFilter.parseFrom(byte[]pbBytes)
      +static ColumnRangeFilter
      +ColumnRangeFilter.parseFrom(byte[]pbBytes)
       
       
      -static KeyOnlyFilter
      -KeyOnlyFilter.parseFrom(byte[]pbBytes)
      +static ColumnPaginationFilter
      +ColumnPaginationFilter.parseFrom(byte[]pbBytes)
       
       
      -static MultiRowRangeFilter
      -MultiRowRangeFilter.parseFrom(byte[]pbBytes)
      +static SubstringComparator
      +SubstringComparator.parseFrom(byte[]pbBytes)
       
       
      -static Filter
      -Filter.parseFrom(byte[]pbBytes)
      -Concrete implementers can signal a 

      [08/51] [partial] hbase-site git commit: Published site at 8ab7b20f48951d77945181024f5e15842bc253c4.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6eb695c8/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/FSHLog.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/FSHLog.html 
      b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/FSHLog.html
      index 9971079..03c8b000 100644
      --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/FSHLog.html
      +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/FSHLog.html
      @@ -49,1067 +49,1082 @@
       041import org.apache.hadoop.fs.Path;
       042import 
      org.apache.hadoop.hbase.HConstants;
       043import 
      org.apache.hadoop.hbase.client.RegionInfo;
      -044import 
      org.apache.hadoop.hbase.trace.TraceUtil;
      -045import 
      org.apache.hadoop.hbase.util.Bytes;
      -046import 
      org.apache.hadoop.hbase.util.ClassSize;
      -047import 
      org.apache.hadoop.hbase.util.FSUtils;
      -048import 
      org.apache.hadoop.hbase.util.HasThread;
      -049import 
      org.apache.hadoop.hbase.util.Threads;
      -050import 
      org.apache.hadoop.hbase.wal.FSHLogProvider;
      -051import 
      org.apache.hadoop.hbase.wal.WALEdit;
      -052import 
      org.apache.hadoop.hbase.wal.WALKeyImpl;
      -053import 
      org.apache.hadoop.hbase.wal.WALProvider.Writer;
      -054import 
      org.apache.hadoop.hdfs.DFSOutputStream;
      -055import 
      org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
      -056import 
      org.apache.hadoop.hdfs.protocol.DatanodeInfo;
      -057import 
      org.apache.htrace.core.TraceScope;
      -058import 
      org.apache.yetus.audience.InterfaceAudience;
      -059import org.slf4j.Logger;
      -060import org.slf4j.LoggerFactory;
      -061import 
      org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
      -062
      -063/**
      -064 * The default implementation of FSWAL.
      -065 */
      -066@InterfaceAudience.Private
      -067public class FSHLog extends 
      AbstractFSWALWriter {
      -068  // IMPLEMENTATION NOTES:
      -069  //
      -070  // At the core is a ring buffer. Our 
      ring buffer is the LMAX Disruptor. It tries to
      -071  // minimize synchronizations and 
      volatile writes when multiple contending threads as is the case
      -072  // here appending and syncing on a 
      single WAL. The Disruptor is configured to handle multiple
      -073  // producers but it has one consumer 
      only (the producers in HBase are IPC Handlers calling append
      -074  // and then sync). The single 
      consumer/writer pulls the appends and syncs off the ring buffer.
      -075  // When a handler calls sync, it is 
      given back a future. The producer 'blocks' on the future so
      -076  // it does not return until the sync 
      completes. The future is passed over the ring buffer from
      -077  // the producer/handler to the consumer 
      thread where it does its best to batch up the producer
      -078  // syncs so one WAL sync actually spans 
      multiple producer sync invocations. How well the
      -079  // batching works depends on the write 
      rate; i.e. we tend to batch more in times of
      -080  // high writes/syncs.
      -081  //
      -082  // Calls to append now also wait until 
      the append has been done on the consumer side of the
      -083  // disruptor. We used to not wait but 
      it makes the implementation easier to grok if we have
      -084  // the region edit/sequence id after 
      the append returns.
      -085  //
      -086  // TODO: Handlers need to coordinate 
      appending AND syncing. Can we have the threads contend
      -087  // once only? Probably hard given syncs 
      take way longer than an append.
      -088  //
      -089  // The consumer threads pass the syncs 
      off to multiple syncing threads in a round robin fashion
      -090  // to ensure we keep up back-to-back FS 
      sync calls (FS sync calls are the long poll writing the
      -091  // WAL). The consumer thread passes the 
      futures to the sync threads for it to complete
      -092  // the futures when done.
      -093  //
      -094  // The 'sequence' in the below is the 
      sequence of the append/sync on the ringbuffer. It
      -095  // acts as a sort-of transaction id. It 
      is always incrementing.
      -096  //
      -097  // The RingBufferEventHandler class 
      hosts the ring buffer consuming code. The threads that
      -098  // do the actual FS sync are 
      implementations of SyncRunner. SafePointZigZagLatch is a
      -099  // synchronization class used to halt 
      the consumer at a safe point -- just after all outstanding
      -100  // syncs and appends have completed -- 
      so the log roller can swap the WAL out under it.
      -101  //
      -102  // We use ring buffer sequence as txid 
      of FSWALEntry and SyncFuture.
      -103  private static final Logger LOG = 
      LoggerFactory.getLogger(FSHLog.class);
      -104
      -105  /**
      -106   * The nexus at which all incoming 
      handlers meet. Does appends and sync with an ordering. Appends
      -107   * and syncs are each put on the ring 
      which means handlers need to smash up against the ring twice
      -108   * (can we make it once only? ... maybe 
      not since time to append is so different from time to sync
      -109   * and sometimes we don't want to sync 
      or we want to async the sync). The ring is where we make
      -110   * sure of our ordering and it is also 
      where we do batching up of handler sync calls.
      -111   */
      -112  private final 
      

      [08/51] [partial] hbase-site git commit: Published site at 00095a2ef9442e3fd86c04876c9d91f2f8b23ad8.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.FailedProcedure.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.FailedProcedure.html
       
      b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.FailedProcedure.html
      index 3bc66bb..97aa79c 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.FailedProcedure.html
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.FailedProcedure.html
      @@ -1435,459 +1435,460 @@
       1427   */
       1428  private void execProcedure(final 
      RootProcedureState procStack,
       1429  final 
      ProcedureTEnvironment procedure) {
      -1430
      Preconditions.checkArgument(procedure.getState() == ProcedureState.RUNNABLE);
      -1431
      -1432// Procedures can suspend 
      themselves. They skip out by throwing a ProcedureSuspendedException.
      -1433// The exception is caught below and 
      then we hurry to the exit without disturbing state. The
      -1434// idea is that the processing of 
      this procedure will be unsuspended later by an external event
      -1435// such the report of a region open. 
      TODO: Currently, its possible for two worker threads
      -1436// to be working on the same 
      procedure concurrently (locking in procedures is NOT about
      -1437// concurrency but about tying an 
      entity to a procedure; i.e. a region to a particular
      -1438// procedure instance). This can 
      make for issues if both threads are changing state.
      -1439// See 
      env.getProcedureScheduler().wakeEvent(regionNode.getProcedureEvent());
      -1440// in 
      RegionTransitionProcedure#reportTransition for example of Procedure putting
      -1441// itself back on the scheduler 
      making it possible for two threads running against
      -1442// the one Procedure. Might be ok if 
      they are both doing different, idempotent sections.
      -1443boolean suspended = false;
      -1444
      -1445// Whether to 're-' -execute; run 
      through the loop again.
      -1446boolean reExecute = false;
      -1447
      -1448ProcedureTEnvironment[] 
      subprocs = null;
      -1449do {
      -1450  reExecute = false;
      -1451  try {
      -1452subprocs = 
      procedure.doExecute(getEnvironment());
      -1453if (subprocs != null  
      subprocs.length == 0) {
      -1454  subprocs = null;
      -1455}
      -1456  } catch 
      (ProcedureSuspendedException e) {
      -1457if (LOG.isTraceEnabled()) {
      -1458  LOG.trace("Suspend " + 
      procedure);
      -1459}
      -1460suspended = true;
      -1461  } catch (ProcedureYieldException 
      e) {
      -1462if (LOG.isTraceEnabled()) {
      -1463  LOG.trace("Yield " + procedure 
      + ": " + e.getMessage(), e);
      -1464}
      -1465scheduler.yield(procedure);
      -1466return;
      -1467  } catch (InterruptedException e) 
      {
      -1468if (LOG.isTraceEnabled()) {
      -1469  LOG.trace("Yield interrupt " + 
      procedure + ": " + e.getMessage(), e);
      -1470}
      -1471
      handleInterruptedException(procedure, e);
      -1472scheduler.yield(procedure);
      -1473return;
      -1474  } catch (Throwable e) {
      -1475// Catch NullPointerExceptions 
      or similar errors...
      -1476String msg = "CODE-BUG: Uncaught 
      runtime exception: " + procedure;
      -1477LOG.error(msg, e);
      -1478procedure.setFailure(new 
      RemoteProcedureException(msg, e));
      -1479  }
      -1480
      -1481  if (!procedure.isFailed()) {
      -1482if (subprocs != null) {
      -1483  if (subprocs.length == 1 
       subprocs[0] == procedure) {
      -1484// Procedure returned 
      itself. Quick-shortcut for a state machine-like procedure;
      -1485// i.e. we go around this 
      loop again rather than go back out on the scheduler queue.
      -1486subprocs = null;
      -1487reExecute = true;
      -1488if (LOG.isTraceEnabled()) 
      {
      -1489  LOG.trace("Short-circuit 
      to next step on pid=" + procedure.getProcId());
      -1490}
      -1491  } else {
      -1492// Yield the current 
      procedure, and make the subprocedure runnable
      -1493// subprocs may come back 
      'null'.
      -1494subprocs = 
      initializeChildren(procStack, procedure, subprocs);
      -1495LOG.info("Initialized 
      subprocedures=" +
      -1496  (subprocs == null? null:
      -1497
      Stream.of(subprocs).map(e - "{" + e.toString() + "}").
      -1498
      collect(Collectors.toList()).toString()));
      -1499  }
      -1500} else if (procedure.getState() 
      == ProcedureState.WAITING_TIMEOUT) {
      -1501  if (LOG.isTraceEnabled()) {
      -1502LOG.trace("Added to 
      timeoutExecutor " + procedure);
      -1503  }
      -1504  
      timeoutExecutor.add(procedure);
      -1505} else if (!suspended) {
      -1506  // No subtask, so we are 
      done
      -1507  
      procedure.setState(ProcedureState.SUCCESS);
      -1508}
      -1509  }
      

      [08/51] [partial] hbase-site git commit: Published site at 22f4def942f8a3367d0ca6598317e9b9a7d0cfcd.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8b1eaec1/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html
       
      b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html
      index 5020c74..8302e28 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html
      @@ -1388,7 +1388,7 @@
       1380if (columns ==null || 
      columns.isEmpty()) return false;
       1381TableDescriptorBuilder builder = 
      TableDescriptorBuilder.newBuilder(tableName);
       1382for (String columnfamimly : columns) 
      {
      -1383  
      builder.addColumnFamily(ColumnFamilyDescriptorBuilder.of(columnfamimly));
      +1383  
      builder.setColumnFamily(ColumnFamilyDescriptorBuilder.of(columnfamimly));
       1384}
       1385
      fstd.createTableDescriptor(builder.build(), true);
       1386return true;
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8b1eaec1/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html
       
      b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html
      index 5020c74..8302e28 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.ERROR_CODE.html
      @@ -1388,7 +1388,7 @@
       1380if (columns ==null || 
      columns.isEmpty()) return false;
       1381TableDescriptorBuilder builder = 
      TableDescriptorBuilder.newBuilder(tableName);
       1382for (String columnfamimly : columns) 
      {
      -1383  
      builder.addColumnFamily(ColumnFamilyDescriptorBuilder.of(columnfamimly));
      +1383  
      builder.setColumnFamily(ColumnFamilyDescriptorBuilder.of(columnfamimly));
       1384}
       1385
      fstd.createTableDescriptor(builder.build(), true);
       1386return true;
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8b1eaec1/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html 
      b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html
      index 5020c74..8302e28 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html
      @@ -1388,7 +1388,7 @@
       1380if (columns ==null || 
      columns.isEmpty()) return false;
       1381TableDescriptorBuilder builder = 
      TableDescriptorBuilder.newBuilder(tableName);
       1382for (String columnfamimly : columns) 
      {
      -1383  
      builder.addColumnFamily(ColumnFamilyDescriptorBuilder.of(columnfamimly));
      +1383  
      builder.setColumnFamily(ColumnFamilyDescriptorBuilder.of(columnfamimly));
       1384}
       1385
      fstd.createTableDescriptor(builder.build(), true);
       1386return true;
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8b1eaec1/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html
       
      b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html
      index 5020c74..8302e28 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html
      @@ -1388,7 +1388,7 @@
       1380if (columns ==null || 
      columns.isEmpty()) return false;
       1381TableDescriptorBuilder builder = 
      TableDescriptorBuilder.newBuilder(tableName);
       1382for (String columnfamimly : columns) 
      {
      -1383  
      builder.addColumnFamily(ColumnFamilyDescriptorBuilder.of(columnfamimly));
      +1383  
      builder.setColumnFamily(ColumnFamilyDescriptorBuilder.of(columnfamimly));
       1384}
       1385
      fstd.createTableDescriptor(builder.build(), true);
       1386return true;
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8b1eaec1/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HBaseFsckTool.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HBaseFsckTool.html 
      b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HBaseFsckTool.html
      index 5020c74..8302e28 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HBaseFsckTool.html
      +++ 
      

      [08/51] [partial] hbase-site git commit: Published site at 31da4d0bce69b3a47066a5df675756087ce4dc60.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a754d895/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.LogDelegate.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.LogDelegate.html
       
      b/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.LogDelegate.html
      index 24f6fb2..4729c8d 100644
      --- 
      a/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.LogDelegate.html
      +++ 
      b/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.LogDelegate.html
      @@ -105,7 +105,7 @@ var activeTableTab = "activeTableTab";
       
       
       
      -static interface RSRpcServices.LogDelegate
      +static interface RSRpcServices.LogDelegate
       
       
       
      @@ -151,7 +151,7 @@ var activeTableTab = "activeTableTab";
       
       
       logBatchWarning
      -voidlogBatchWarning(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">StringfirstRegionName,
      +voidlogBatchWarning(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">StringfirstRegionName,
        intsum,
        introwSizeWarnThreshold)
       
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a754d895/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerCloseCallBack.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerCloseCallBack.html
       
      b/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerCloseCallBack.html
      index 7874951..82d92da 100644
      --- 
      a/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerCloseCallBack.html
      +++ 
      b/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerCloseCallBack.html
      @@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
       
       
       
      -private static final class RSRpcServices.RegionScannerCloseCallBack
      +private static final class RSRpcServices.RegionScannerCloseCallBack
       extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
       title="class or interface in java.lang">Object
       implements RpcCallback
       An Rpc callback for closing a RegionScanner.
      @@ -209,7 +209,7 @@ implements 
       
       scanner
      -private finalRegionScanner scanner
      +private finalRegionScanner scanner
       
       
       
      @@ -226,7 +226,7 @@ implements 
       
       RegionScannerCloseCallBack
      -publicRegionScannerCloseCallBack(RegionScannerscanner)
      +publicRegionScannerCloseCallBack(RegionScannerscanner)
       
       
       
      @@ -243,7 +243,7 @@ implements 
       
       run
      -publicvoidrun()
      +publicvoidrun()
        throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
       title="class or interface in java.io">IOException
       Description copied from 
      interface:RpcCallback
       Called at the end of an Rpc Call RpcCallContext
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a754d895/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerHolder.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerHolder.html
       
      b/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerHolder.html
      index 89d371b..1a7af7a 100644
      --- 
      a/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerHolder.html
      +++ 
      b/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerHolder.html
      @@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
       
       
       
      -private static final class RSRpcServices.RegionScannerHolder
      +private static final class RSRpcServices.RegionScannerHolder
       extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
       title="class or interface in java.lang">Object
       Holder class which holds the RegionScanner, nextCallSeq and 
      RpcCallbacks together.
       
      @@ -239,7 +239,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
       
       
       nextCallSeq
      -private finalhttps://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true;
       title="class or interface in java.util.concurrent.atomic">AtomicLong nextCallSeq
      +private finalhttps://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true;
       title="class or interface in java.util.concurrent.atomic">AtomicLong nextCallSeq
       
       
       
      @@ -248,7 +248,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
       
       
       scannerName
      -private finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">String scannerName
      +private finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">String scannerName
       
       
       
      @@ -257,7 +257,7 @@ extends 

      [08/51] [partial] hbase-site git commit: Published site at 6b77786dfc46d25ac5bb5f1c8a4a9eb47b52a604.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/81cde4ce/devapidocs/org/apache/hadoop/hbase/ipc/class-use/PriorityFunction.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/ipc/class-use/PriorityFunction.html 
      b/devapidocs/org/apache/hadoop/hbase/ipc/class-use/PriorityFunction.html
      index 2253191..58f9a7c 100644
      --- a/devapidocs/org/apache/hadoop/hbase/ipc/class-use/PriorityFunction.html
      +++ b/devapidocs/org/apache/hadoop/hbase/ipc/class-use/PriorityFunction.html
      @@ -114,15 +114,15 @@
       
       
       private PriorityFunction
      -RpcExecutor.priority
      +SimpleRpcScheduler.priority
       
       
       private PriorityFunction
      -RpcExecutor.CallPriorityComparator.priority
      +RpcExecutor.priority
       
       
       private PriorityFunction
      -SimpleRpcScheduler.priority
      +RpcExecutor.CallPriorityComparator.priority
       
       
       
      @@ -319,7 +319,7 @@
       
       
       RpcScheduler
      -FifoRpcSchedulerFactory.create(org.apache.hadoop.conf.Configurationconf,
      +RpcSchedulerFactory.create(org.apache.hadoop.conf.Configurationconf,
         PriorityFunctionpriority)
       Deprecated.
       
      @@ -333,16 +333,18 @@
       
       
       RpcScheduler
      -RpcSchedulerFactory.create(org.apache.hadoop.conf.Configurationconf,
      +FifoRpcSchedulerFactory.create(org.apache.hadoop.conf.Configurationconf,
         PriorityFunctionpriority)
       Deprecated.
       
       
       
       RpcScheduler
      -FifoRpcSchedulerFactory.create(org.apache.hadoop.conf.Configurationconf,
      +RpcSchedulerFactory.create(org.apache.hadoop.conf.Configurationconf,
         PriorityFunctionpriority,
      -  Abortableserver)
      +  Abortableserver)
      +Constructs a RpcScheduler.
      +
       
       
       RpcScheduler
      @@ -352,11 +354,9 @@
       
       
       RpcScheduler
      -RpcSchedulerFactory.create(org.apache.hadoop.conf.Configurationconf,
      +FifoRpcSchedulerFactory.create(org.apache.hadoop.conf.Configurationconf,
         PriorityFunctionpriority,
      -  Abortableserver)
      -Constructs a RpcScheduler.
      -
      +  Abortableserver)
       
       
       
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/81cde4ce/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcCallback.html
      --
      diff --git a/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcCallback.html 
      b/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcCallback.html
      index e491ef8..c3d3cc2 100644
      --- a/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcCallback.html
      +++ b/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcCallback.html
      @@ -123,13 +123,13 @@
       
       
       void
      -RpcCallContext.setCallBack(RpcCallbackcallback)
      -Sets a callback which has to be executed at the end of this 
      RPC call.
      -
      +ServerCall.setCallBack(RpcCallbackcallback)
       
       
       void
      -ServerCall.setCallBack(RpcCallbackcallback)
      +RpcCallContext.setCallBack(RpcCallbackcallback)
      +Sets a callback which has to be executed at the end of this 
      RPC call.
      +
       
       
       
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/81cde4ce/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcControllerFactory.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcControllerFactory.html 
      b/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcControllerFactory.html
      index c3eee11..f6ddc97 100644
      --- a/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcControllerFactory.html
      +++ b/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcControllerFactory.html
      @@ -131,24 +131,32 @@
       
       
       
      -protected RpcControllerFactory
      -RegionAdminServiceCallable.rpcControllerFactory
      -
      -
       private RpcControllerFactory
       ConnectionImplementation.rpcControllerFactory
       
      +
      +protected RpcControllerFactory
      +ClientScanner.rpcControllerFactory
      +
       
      +protected RpcControllerFactory
      +RegionAdminServiceCallable.rpcControllerFactory
      +
      +
       (package private) RpcControllerFactory
       AsyncConnectionImpl.rpcControllerFactory
       
      -
      +
       private RpcControllerFactory
       HTable.rpcControllerFactory
       
      +
      +private RpcControllerFactory
      +HBaseAdmin.rpcControllerFactory
      +
       
       private RpcControllerFactory
      -RpcRetryingCallerWithReadReplicas.rpcControllerFactory
      +SecureBulkLoadClient.rpcControllerFactory
       
       
       protected RpcControllerFactory
      @@ -156,15 +164,7 @@
       
       
       private RpcControllerFactory
      -HBaseAdmin.rpcControllerFactory
      -
      -
      -private RpcControllerFactory
      -SecureBulkLoadClient.rpcControllerFactory
      -
      -
      -protected RpcControllerFactory
      -ClientScanner.rpcControllerFactory
      +RpcRetryingCallerWithReadReplicas.rpcControllerFactory
       
       
       (package private) RpcControllerFactory
      @@ -181,11 +181,11 @@
       
       
       RpcControllerFactory
      -ClusterConnection.getRpcControllerFactory()
      +ConnectionImplementation.getRpcControllerFactory()
       
       
       RpcControllerFactory
      -ConnectionImplementation.getRpcControllerFactory()
      +ClusterConnection.getRpcControllerFactory()
       
       
       private RpcControllerFactory
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/81cde4ce/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcExecutor.Handler.html
      --
      diff --git 
      

      [08/51] [partial] hbase-site git commit: Published site at 1384da71375427b522b09f06862bb5d629cef52f.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d347bde8/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDecodingContext.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDecodingContext.html
       
      b/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDecodingContext.html
      index d30ee5e..b58c054 100644
      --- 
      a/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDecodingContext.html
      +++ 
      b/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDecodingContext.html
      @@ -166,27 +166,27 @@
       
       
       DataBlockEncoder.EncodedSeeker
      -CopyKeyDataBlockEncoder.createSeeker(CellComparatorcomparator,
      +RowIndexCodecV1.createSeeker(CellComparatorcomparator,
       HFileBlockDecodingContextdecodingCtx)
       
       
       DataBlockEncoder.EncodedSeeker
      -PrefixKeyDeltaEncoder.createSeeker(CellComparatorcomparator,
      +CopyKeyDataBlockEncoder.createSeeker(CellComparatorcomparator,
       HFileBlockDecodingContextdecodingCtx)
       
       
       DataBlockEncoder.EncodedSeeker
      -FastDiffDeltaEncoder.createSeeker(CellComparatorcomparator,
      +DiffKeyDeltaEncoder.createSeeker(CellComparatorcomparator,
       HFileBlockDecodingContextdecodingCtx)
       
       
       DataBlockEncoder.EncodedSeeker
      -DiffKeyDeltaEncoder.createSeeker(CellComparatorcomparator,
      +FastDiffDeltaEncoder.createSeeker(CellComparatorcomparator,
       HFileBlockDecodingContextdecodingCtx)
       
       
       DataBlockEncoder.EncodedSeeker
      -RowIndexCodecV1.createSeeker(CellComparatorcomparator,
      +PrefixKeyDeltaEncoder.createSeeker(CellComparatorcomparator,
       HFileBlockDecodingContextdecodingCtx)
       
       
      @@ -198,13 +198,13 @@
       
       
       https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
       title="class or interface in java.nio">ByteBuffer
      -BufferedDataBlockEncoder.decodeKeyValues(https://docs.oracle.com/javase/8/docs/api/java/io/DataInputStream.html?is-external=true;
       title="class or interface in java.io">DataInputStreamsource,
      -   HFileBlockDecodingContextblkDecodingCtx)
      +RowIndexCodecV1.decodeKeyValues(https://docs.oracle.com/javase/8/docs/api/java/io/DataInputStream.html?is-external=true;
       title="class or interface in java.io">DataInputStreamsource,
      +   HFileBlockDecodingContextdecodingCtx)
       
       
       https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
       title="class or interface in java.nio">ByteBuffer
      -RowIndexCodecV1.decodeKeyValues(https://docs.oracle.com/javase/8/docs/api/java/io/DataInputStream.html?is-external=true;
       title="class or interface in java.io">DataInputStreamsource,
      -   HFileBlockDecodingContextdecodingCtx)
      +BufferedDataBlockEncoder.decodeKeyValues(https://docs.oracle.com/javase/8/docs/api/java/io/DataInputStream.html?is-external=true;
       title="class or interface in java.io">DataInputStreamsource,
      +   HFileBlockDecodingContextblkDecodingCtx)
       
       
       
      @@ -279,17 +279,17 @@
       
       
       HFileBlockDecodingContext
      -NoOpDataBlockEncoder.newDataBlockDecodingContext(HFileContextmeta)
      +HFileDataBlockEncoderImpl.newDataBlockDecodingContext(HFileContextfileContext)
       
       
       HFileBlockDecodingContext
      -HFileDataBlockEncoder.newDataBlockDecodingContext(HFileContextfileContext)
      -create a encoder specific decoding context for 
      reading.
      -
      +NoOpDataBlockEncoder.newDataBlockDecodingContext(HFileContextmeta)
       
       
       HFileBlockDecodingContext
      -HFileDataBlockEncoderImpl.newDataBlockDecodingContext(HFileContextfileContext)
      +HFileDataBlockEncoder.newDataBlockDecodingContext(HFileContextfileContext)
      +create a encoder specific decoding context for 
      reading.
      +
       
       
       
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d347bde8/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDefaultDecodingContext.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDefaultDecodingContext.html
       
      b/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDefaultDecodingContext.html
      index cbdb3c8..468913a 100644
      --- 
      a/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDefaultDecodingContext.html
      +++ 
      b/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDefaultDecodingContext.html
      @@ -116,36 +116,36 @@
        HFileBlockDefaultDecodingContextdecodingCtx)
       
       
      -protected abstract https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
       title="class or interface in java.nio">ByteBuffer
      -BufferedDataBlockEncoder.internalDecodeKeyValues(https://docs.oracle.com/javase/8/docs/api/java/io/DataInputStream.html?is-external=true;
       title="class or interface in java.io">DataInputStreamsource,
      +protected https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
       title="class or interface in java.nio">ByteBuffer
      

      [08/51] [partial] hbase-site git commit: Published site at b7b86839250bf9b295ebc1948826f43a88736d6c.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6b94a2f2/devapidocs/src-html/org/apache/hadoop/hbase/client/TableDescriptorBuilder.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/client/TableDescriptorBuilder.html
       
      b/devapidocs/src-html/org/apache/hadoop/hbase/client/TableDescriptorBuilder.html
      index 05c0542..644bb30 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/client/TableDescriptorBuilder.html
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/client/TableDescriptorBuilder.html
      @@ -1062,366 +1062,375 @@
       1054}
       1055
       1056/**
      -1057 * Returns the configured replicas 
      per region
      +1057 * Return true if there are at least 
      one cf whose replication scope is serial.
       1058 */
       1059@Override
      -1060public int getRegionReplication() 
      {
      -1061  return 
      getOrDefault(REGION_REPLICATION_KEY, Integer::valueOf, 
      DEFAULT_REGION_REPLICATION);
      -1062}
      -1063
      -1064/**
      -1065 * Sets the number of replicas per 
      region.
      -1066 *
      -1067 * @param regionReplication the 
      replication factor per region
      -1068 * @return the modifyable TD
      -1069 */
      -1070public ModifyableTableDescriptor 
      setRegionReplication(int regionReplication) {
      -1071  return 
      setValue(REGION_REPLICATION_KEY, Integer.toString(regionReplication));
      -1072}
      -1073
      -1074/**
      -1075 * @return true if the read-replicas 
      memstore replication is enabled.
      -1076 */
      -1077@Override
      -1078public boolean 
      hasRegionMemStoreReplication() {
      -1079  return 
      getOrDefault(REGION_MEMSTORE_REPLICATION_KEY, Boolean::valueOf, 
      DEFAULT_REGION_MEMSTORE_REPLICATION);
      -1080}
      -1081
      -1082/**
      -1083 * Enable or Disable the memstore 
      replication from the primary region to the
      -1084 * replicas. The replication will be 
      used only for meta operations (e.g.
      -1085 * flush, compaction, ...)
      -1086 *
      -1087 * @param memstoreReplication true 
      if the new data written to the primary
      -1088 * region should be replicated. 
      false if the secondaries can tollerate to
      -1089 * have new data only when the 
      primary flushes the memstore.
      -1090 * @return the modifyable TD
      -1091 */
      -1092public ModifyableTableDescriptor 
      setRegionMemStoreReplication(boolean memstoreReplication) {
      -1093  
      setValue(REGION_MEMSTORE_REPLICATION_KEY, 
      Boolean.toString(memstoreReplication));
      -1094  // If the memstore replication is 
      setup, we do not have to wait for observing a flush event
      -1095  // from primary before starting to 
      serve reads, because gaps from replication is not applicable
      -1096  return 
      setValue(REGION_REPLICA_WAIT_FOR_PRIMARY_FLUSH_CONF_KEY,
      -1097  
      Boolean.toString(memstoreReplication));
      -1098}
      -1099
      -1100public ModifyableTableDescriptor 
      setPriority(int priority) {
      -1101  return setValue(PRIORITY_KEY, 
      Integer.toString(priority));
      -1102}
      -1103
      -1104@Override
      -1105public int getPriority() {
      -1106  return getOrDefault(PRIORITY_KEY, 
      Integer::valueOf, DEFAULT_PRIORITY);
      +1060public boolean 
      hasSerialReplicationScope() {
      +1061  return 
      families.values().stream()
      +1062.anyMatch(column - 
      column.getScope() == HConstants.REPLICATION_SCOPE_SERIAL);
      +1063}
      +1064
      +1065/**
      +1066 * Returns the configured replicas 
      per region
      +1067 */
      +1068@Override
      +1069public int getRegionReplication() 
      {
      +1070  return 
      getOrDefault(REGION_REPLICATION_KEY, Integer::valueOf, 
      DEFAULT_REGION_REPLICATION);
      +1071}
      +1072
      +1073/**
      +1074 * Sets the number of replicas per 
      region.
      +1075 *
      +1076 * @param regionReplication the 
      replication factor per region
      +1077 * @return the modifyable TD
      +1078 */
      +1079public ModifyableTableDescriptor 
      setRegionReplication(int regionReplication) {
      +1080  return 
      setValue(REGION_REPLICATION_KEY, Integer.toString(regionReplication));
      +1081}
      +1082
      +1083/**
      +1084 * @return true if the read-replicas 
      memstore replication is enabled.
      +1085 */
      +1086@Override
      +1087public boolean 
      hasRegionMemStoreReplication() {
      +1088  return 
      getOrDefault(REGION_MEMSTORE_REPLICATION_KEY, Boolean::valueOf, 
      DEFAULT_REGION_MEMSTORE_REPLICATION);
      +1089}
      +1090
      +1091/**
      +1092 * Enable or Disable the memstore 
      replication from the primary region to the
      +1093 * replicas. The replication will be 
      used only for meta operations (e.g.
      +1094 * flush, compaction, ...)
      +1095 *
      +1096 * @param memstoreReplication true 
      if the new data written to the primary
      +1097 * region should be replicated. 
      false if the secondaries can tollerate to
      +1098 * have new data only when the 
      primary flushes the memstore.
      +1099 * @return the modifyable TD
      +1100 */
      +1101public ModifyableTableDescriptor 
      setRegionMemStoreReplication(boolean memstoreReplication) {
      +1102  
      setValue(REGION_MEMSTORE_REPLICATION_KEY, 
      

      [08/51] [partial] hbase-site git commit: Published site at 1d25b60831b8cc8f7ad5fd366f1867de5c20d2f3.

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/eb05e3e3/apidocs/org/apache/hadoop/hbase/client/RowMutations.html
      --
      diff --git a/apidocs/org/apache/hadoop/hbase/client/RowMutations.html 
      b/apidocs/org/apache/hadoop/hbase/client/RowMutations.html
      index b50d2b7..8958b78 100644
      --- a/apidocs/org/apache/hadoop/hbase/client/RowMutations.html
      +++ b/apidocs/org/apache/hadoop/hbase/client/RowMutations.html
      @@ -97,7 +97,7 @@ var activeTableTab = "activeTableTab";
       
       
       
      -http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
       title="class or interface in java.lang">java.lang.Object
      +https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
       title="class or interface in java.lang">java.lang.Object
       
       
       org.apache.hadoop.hbase.client.RowMutations
      @@ -109,13 +109,13 @@ var activeTableTab = "activeTableTab";
       
       
       All Implemented Interfaces:
      -http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
       title="class or interface in java.lang">ComparableRow, Row
      +https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
       title="class or interface in java.lang">ComparableRow, Row
       
       
       
       @InterfaceAudience.Public
       public class RowMutations
      -extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
       title="class or interface in java.lang">Object
      +extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
       title="class or interface in java.lang">Object
       implements Row
       Performs multiple mutations atomically on a single row.
        Currently Put and Delete are supported.
      @@ -192,7 +192,7 @@ implements 
       
       RowMutations
      -add(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
       title="class or interface in java.util">List? extends Mutationmutations)
      +add(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
       title="class or interface in java.util">List? extends Mutationmutations)
       Currently only supports Put and Delete mutations.
       
       
      @@ -222,7 +222,7 @@ implements 
       
       boolean
      -equals(http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
       title="class or interface in java.lang">Objectobj)
      +equals(https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
       title="class or interface in java.lang">Objectobj)
       Deprecated.
       As of release 2.0.0, this 
      will be removed in HBase 3.0.0.
        No replacement
      @@ -234,7 +234,7 @@ implements getMaxPriority()
       
       
      -http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
       title="class or interface in java.util">ListMutation
      +https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
       title="class or interface in java.util">ListMutation
       getMutations()
       
       
      @@ -252,7 +252,7 @@ implements 
       
       static RowMutations
      -of(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
       title="class or interface in java.util">List? extends Mutationmutations)
      +of(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
       title="class or interface in java.util">List? extends Mutationmutations)
       Create a RowMutations 
      with the specified mutations.
       
       
      @@ -261,8 +261,8 @@ implements 
       
       
      -Methods inherited from classjava.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
       title="class or interface in java.lang">Object
      -http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--;
       title="class or interface in java.lang">clone, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--;
       title="class or interface in java.lang">finalize, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--;
       title="class or interface in java.lang">getClass, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notify--;
       title="class or interface in java.lang">notify, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notifyAll--;
       title="class or interface in java.lang">notifyAll, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#toString--;
       title="class or interface in java.lang">toString, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.ht
       ml?is-external=true#wait--" title="class or interface in java.lang">wait, 
      http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-;
       title="class or interface in java.lang">wait, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-int-;
       title="class or interface in java.lang">wait
      +Methods inherited from classjava.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
       title="class or interface in java.lang">Object
      

      [08/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ec8bf761/devapidocs/src-html/org/apache/hadoop/hbase/filter/CompareFilter.CompareOp.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/filter/CompareFilter.CompareOp.html
       
      b/devapidocs/src-html/org/apache/hadoop/hbase/filter/CompareFilter.CompareOp.html
      index e5fdac5..ad7c82a 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/filter/CompareFilter.CompareOp.html
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/filter/CompareFilter.CompareOp.html
      @@ -33,299 +33,303 @@
       025import org.apache.hadoop.hbase.Cell;
       026import 
      org.apache.hadoop.hbase.CompareOperator;
       027import 
      org.apache.hadoop.hbase.PrivateCellUtil;
      -028import 
      org.apache.yetus.audience.InterfaceAudience;
      -029import 
      org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
      -030import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos;
      -031import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
      -032import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.CompareType;
      -033import 
      org.apache.hadoop.hbase.util.Bytes;
      -034
      -035import 
      org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
      -036/**
      -037 * This is a generic filter to be used to 
      filter by comparison.  It takes an
      -038 * operator (equal, greater, not equal, 
      etc) and a byte [] comparator.
      -039 * p
      -040 * To filter by row key, use {@link 
      RowFilter}.
      +028import 
      org.apache.hadoop.hbase.util.Bytes;
      +029import 
      org.apache.yetus.audience.InterfaceAudience;
      +030
      +031import 
      org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
      +032
      +033import 
      org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
      +034import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos;
      +035import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
      +036import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.CompareType;
      +037
      +038/**
      +039 * This is a generic filter to be used to 
      filter by comparison.  It takes an
      +040 * operator (equal, greater, not equal, 
      etc) and a byte [] comparator.
       041 * p
      -042 * To filter by column qualifier, use 
      {@link QualifierFilter}.
      +042 * To filter by row key, use {@link 
      RowFilter}.
       043 * p
      -044 * To filter by value, use {@link 
      SingleColumnValueFilter}.
      +044 * To filter by column family, use {@link 
      FamilyFilter}.
       045 * p
      -046 * These filters can be wrapped with 
      {@link SkipFilter} and {@link WhileMatchFilter}
      -047 * to add more control.
      -048 * p
      -049 * Multiple filters can be combined using 
      {@link FilterList}.
      -050 */
      -051@InterfaceAudience.Public
      -052public abstract class CompareFilter 
      extends FilterBase {
      -053  /**
      -054   * Comparison operators. For filters 
      only!
      -055   * Use {@link CompareOperator} 
      otherwise.
      -056   * It (intentionally) has at least the 
      below enums with same names.
      -057   * @deprecated  since 2.0.0. Will be 
      removed in 3.0.0. Use {@link CompareOperator} instead.
      -058   */
      -059  @Deprecated
      -060  @InterfaceAudience.Public
      -061  public enum CompareOp {
      -062/** less than */
      -063LESS,
      -064/** less than or equal to */
      -065LESS_OR_EQUAL,
      -066/** equals */
      -067EQUAL,
      -068/** not equal */
      -069NOT_EQUAL,
      -070/** greater than or equal to */
      -071GREATER_OR_EQUAL,
      -072/** greater than */
      -073GREATER,
      -074/** no operation */
      -075NO_OP,
      -076  }
      -077
      -078  protected CompareOperator op;
      -079  protected ByteArrayComparable 
      comparator;
      -080
      -081  /**
      -082   * Constructor.
      -083   * @param compareOp the compare op for 
      row matching
      -084   * @param comparator the comparator for 
      row matching
      -085   * @deprecated Since 2.0.0. Will be 
      removed in 3.0.0. Use other constructor.
      -086   */
      -087  @Deprecated
      -088  public CompareFilter(final CompareOp 
      compareOp,
      -089  final ByteArrayComparable 
      comparator) {
      -090
      this(CompareOperator.valueOf(compareOp.name()), comparator);
      -091  }
      -092
      -093  /**
      -094   * Constructor.
      -095   * @param op the compare op for row 
      matching
      -096   * @param comparator the comparator for 
      row matching
      -097   */
      -098  public CompareFilter(final 
      CompareOperator op,
      -099   final 
      ByteArrayComparable comparator) {
      -100this.op = op;
      -101this.comparator = comparator;
      -102  }
      -103
      -104  /**
      -105   * @return operator
      -106   * @deprecated  since 2.0.0. Will be 
      removed in 3.0.0. Use {@link #getCompareOperator()} instead.
      -107   */
      -108  @Deprecated
      -109  public CompareOp getOperator() {
      -110return 
      CompareOp.valueOf(op.name());
      -111  }
      -112
      -113  public CompareOperator 
      getCompareOperator() {
      -114return op;
      +046 * To filter by column qualifier, use 
      {@link QualifierFilter}.
      +047 * p
      +048 * To filter by value, use {@link 
      ValueFilter}.
      +049 * p
      +050 * These filters can be wrapped with 
      {@link SkipFilter} and {@link WhileMatchFilter}
      +051 * to add more control.
      +052 * p
      +053 * Multiple filters can be combined using 
      {@link 

      [08/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
       
      b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
      index 802b925..a3e80ab 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
      @@ -73,229 +73,229 @@
       065import 
      java.util.concurrent.TimeoutException;
       066import 
      java.util.concurrent.atomic.AtomicBoolean;
       067import 
      java.util.concurrent.atomic.AtomicInteger;
      -068import 
      java.util.concurrent.atomic.AtomicLong;
      -069import 
      java.util.concurrent.atomic.LongAdder;
      -070import java.util.concurrent.locks.Lock;
      -071import 
      java.util.concurrent.locks.ReadWriteLock;
      -072import 
      java.util.concurrent.locks.ReentrantReadWriteLock;
      -073import java.util.function.Function;
      -074import 
      org.apache.hadoop.conf.Configuration;
      -075import org.apache.hadoop.fs.FileStatus;
      -076import org.apache.hadoop.fs.FileSystem;
      -077import 
      org.apache.hadoop.fs.LocatedFileStatus;
      -078import org.apache.hadoop.fs.Path;
      -079import org.apache.hadoop.hbase.Cell;
      -080import 
      org.apache.hadoop.hbase.CellBuilderType;
      -081import 
      org.apache.hadoop.hbase.CellComparator;
      -082import 
      org.apache.hadoop.hbase.CellComparatorImpl;
      -083import 
      org.apache.hadoop.hbase.CellScanner;
      -084import 
      org.apache.hadoop.hbase.CellUtil;
      -085import 
      org.apache.hadoop.hbase.CompareOperator;
      -086import 
      org.apache.hadoop.hbase.CompoundConfiguration;
      -087import 
      org.apache.hadoop.hbase.DoNotRetryIOException;
      -088import 
      org.apache.hadoop.hbase.DroppedSnapshotException;
      -089import 
      org.apache.hadoop.hbase.ExtendedCellBuilderFactory;
      -090import 
      org.apache.hadoop.hbase.HConstants;
      -091import 
      org.apache.hadoop.hbase.HConstants.OperationStatusCode;
      -092import 
      org.apache.hadoop.hbase.HDFSBlocksDistribution;
      -093import 
      org.apache.hadoop.hbase.HRegionInfo;
      -094import 
      org.apache.hadoop.hbase.KeyValue;
      -095import 
      org.apache.hadoop.hbase.KeyValueUtil;
      -096import 
      org.apache.hadoop.hbase.NamespaceDescriptor;
      -097import 
      org.apache.hadoop.hbase.NotServingRegionException;
      -098import 
      org.apache.hadoop.hbase.PrivateCellUtil;
      -099import 
      org.apache.hadoop.hbase.RegionTooBusyException;
      -100import 
      org.apache.hadoop.hbase.TableName;
      -101import org.apache.hadoop.hbase.Tag;
      -102import org.apache.hadoop.hbase.TagUtil;
      -103import 
      org.apache.hadoop.hbase.UnknownScannerException;
      -104import 
      org.apache.hadoop.hbase.client.Append;
      -105import 
      org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
      -106import 
      org.apache.hadoop.hbase.client.CompactionState;
      -107import 
      org.apache.hadoop.hbase.client.Delete;
      -108import 
      org.apache.hadoop.hbase.client.Durability;
      -109import 
      org.apache.hadoop.hbase.client.Get;
      -110import 
      org.apache.hadoop.hbase.client.Increment;
      -111import 
      org.apache.hadoop.hbase.client.IsolationLevel;
      -112import 
      org.apache.hadoop.hbase.client.Mutation;
      -113import 
      org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor;
      -114import 
      org.apache.hadoop.hbase.client.Put;
      -115import 
      org.apache.hadoop.hbase.client.RegionInfo;
      -116import 
      org.apache.hadoop.hbase.client.RegionReplicaUtil;
      -117import 
      org.apache.hadoop.hbase.client.Result;
      -118import 
      org.apache.hadoop.hbase.client.RowMutations;
      -119import 
      org.apache.hadoop.hbase.client.Scan;
      -120import 
      org.apache.hadoop.hbase.client.TableDescriptor;
      -121import 
      org.apache.hadoop.hbase.client.TableDescriptorBuilder;
      -122import 
      org.apache.hadoop.hbase.conf.ConfigurationManager;
      -123import 
      org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver;
      -124import 
      org.apache.hadoop.hbase.coprocessor.RegionObserver.MutationType;
      -125import 
      org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
      -126import 
      org.apache.hadoop.hbase.exceptions.FailedSanityCheckException;
      -127import 
      org.apache.hadoop.hbase.exceptions.TimeoutIOException;
      -128import 
      org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
      -129import 
      org.apache.hadoop.hbase.filter.ByteArrayComparable;
      -130import 
      org.apache.hadoop.hbase.filter.FilterWrapper;
      -131import 
      org.apache.hadoop.hbase.filter.IncompatibleFilterException;
      -132import 
      org.apache.hadoop.hbase.io.HFileLink;
      -133import 
      org.apache.hadoop.hbase.io.HeapSize;
      -134import 
      org.apache.hadoop.hbase.io.TimeRange;
      -135import 
      org.apache.hadoop.hbase.io.hfile.HFile;
      -136import 
      org.apache.hadoop.hbase.ipc.CallerDisconnectedException;
      -137import 
      org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
      -138import 
      org.apache.hadoop.hbase.ipc.RpcCall;
      -139import 
      org.apache.hadoop.hbase.ipc.RpcServer;
      -140import 
      org.apache.hadoop.hbase.monitoring.MonitoredTask;
      -141import 
      org.apache.hadoop.hbase.monitoring.TaskMonitor;
      -142import 
      

      [08/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/193b4259/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
       
      b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
      index bd13b53..802b925 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
      @@ -900,7600 +900,7598 @@
       892if 
      (this.getRegionInfo().getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) {
       893  status.setStatus("Writing region 
      info on filesystem");
       894  fs.checkRegionInfoOnFilesystem();
      -895} else {
      -896  if (LOG.isDebugEnabled()) {
      -897LOG.debug("Skipping creation of 
      .regioninfo file for " + this.getRegionInfo());
      -898  }
      -899}
      -900
      -901// Initialize all the HStores
      -902status.setStatus("Initializing all 
      the Stores");
      -903long maxSeqId = 
      initializeStores(reporter, status);
      -904this.mvcc.advanceTo(maxSeqId);
      -905if 
      (ServerRegionReplicaUtil.shouldReplayRecoveredEdits(this)) {
      -906  CollectionHStore stores = 
      this.stores.values();
      -907  try {
      -908// update the stores that we are 
      replaying
      -909
      stores.forEach(HStore::startReplayingFromWAL);
      -910// Recover any edits if 
      available.
      -911maxSeqId = Math.max(maxSeqId,
      -912  
      replayRecoveredEditsIfAny(this.fs.getRegionDir(), maxSeqIdInStores, reporter, 
      status));
      -913// Make sure mvcc is up to max.
      -914this.mvcc.advanceTo(maxSeqId);
      -915  } finally {
      -916// update the stores that we are 
      done replaying
      -917
      stores.forEach(HStore::stopReplayingFromWAL);
      -918  }
      -919}
      -920this.lastReplayedOpenRegionSeqId = 
      maxSeqId;
      +895}
      +896
      +897// Initialize all the HStores
      +898status.setStatus("Initializing all 
      the Stores");
      +899long maxSeqId = 
      initializeStores(reporter, status);
      +900this.mvcc.advanceTo(maxSeqId);
      +901if 
      (ServerRegionReplicaUtil.shouldReplayRecoveredEdits(this)) {
      +902  CollectionHStore stores = 
      this.stores.values();
      +903  try {
      +904// update the stores that we are 
      replaying
      +905
      stores.forEach(HStore::startReplayingFromWAL);
      +906// Recover any edits if 
      available.
      +907maxSeqId = Math.max(maxSeqId,
      +908  
      replayRecoveredEditsIfAny(this.fs.getRegionDir(), maxSeqIdInStores, reporter, 
      status));
      +909// Make sure mvcc is up to max.
      +910this.mvcc.advanceTo(maxSeqId);
      +911  } finally {
      +912// update the stores that we are 
      done replaying
      +913
      stores.forEach(HStore::stopReplayingFromWAL);
      +914  }
      +915}
      +916this.lastReplayedOpenRegionSeqId = 
      maxSeqId;
      +917
      +918
      this.writestate.setReadOnly(ServerRegionReplicaUtil.isReadOnly(this));
      +919this.writestate.flushRequested = 
      false;
      +920this.writestate.compacting.set(0);
       921
      -922
      this.writestate.setReadOnly(ServerRegionReplicaUtil.isReadOnly(this));
      -923this.writestate.flushRequested = 
      false;
      -924this.writestate.compacting.set(0);
      -925
      -926if (this.writestate.writesEnabled) 
      {
      -927  // Remove temporary data left over 
      from old regions
      -928  status.setStatus("Cleaning up 
      temporary data from old regions");
      -929  fs.cleanupTempDir();
      -930}
      -931
      -932if (this.writestate.writesEnabled) 
      {
      -933  status.setStatus("Cleaning up 
      detritus from prior splits");
      -934  // Get rid of any splits or merges 
      that were lost in-progress.  Clean out
      -935  // these directories here on open.  
      We may be opening a region that was
      -936  // being split but we crashed in 
      the middle of it all.
      -937  fs.cleanupAnySplitDetritus();
      -938  fs.cleanupMergesDir();
      -939}
      -940
      -941// Initialize split policy
      -942this.splitPolicy = 
      RegionSplitPolicy.create(this, conf);
      -943
      -944// Initialize flush policy
      -945this.flushPolicy = 
      FlushPolicyFactory.create(this, conf);
      -946
      -947long lastFlushTime = 
      EnvironmentEdgeManager.currentTime();
      -948for (HStore store: stores.values()) 
      {
      -949  
      this.lastStoreFlushTimeMap.put(store, lastFlushTime);
      -950}
      -951
      -952// Use maximum of log sequenceid or 
      that which was found in stores
      -953// (particularly if no recovered 
      edits, seqid will be -1).
      -954long nextSeqid = maxSeqId;
      -955if (this.writestate.writesEnabled) 
      {
      -956  nextSeqid = 
      WALSplitter.writeRegionSequenceIdFile(this.fs.getFileSystem(),
      -957  this.fs.getRegionDir(), 
      nextSeqid, 1);
      -958} else {
      -959  nextSeqid++;
      -960}
      -961
      -962LOG.info("Onlined " + 
      this.getRegionInfo().getShortNameToLog() +
      -963  "; next sequenceid=" + 
      nextSeqid);
      +922if (this.writestate.writesEnabled) 
      {
      +923  // Remove temporary data left over 

      [08/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/94208cfe/devapidocs/org/apache/hadoop/hbase/nio/class-use/ByteBuff.html
      --
      diff --git a/devapidocs/org/apache/hadoop/hbase/nio/class-use/ByteBuff.html 
      b/devapidocs/org/apache/hadoop/hbase/nio/class-use/ByteBuff.html
      index 74a0687..158a6aa 100644
      --- a/devapidocs/org/apache/hadoop/hbase/nio/class-use/ByteBuff.html
      +++ b/devapidocs/org/apache/hadoop/hbase/nio/class-use/ByteBuff.html
      @@ -161,23 +161,23 @@
       
       
       Codec.Decoder
      -CellCodec.getDecoder(ByteBuffbuf)
      +KeyValueCodec.getDecoder(ByteBuffbuf)
       
       
       Codec.Decoder
      -Codec.getDecoder(ByteBuffbuf)
      +CellCodecWithTags.getDecoder(ByteBuffbuf)
       
       
       Codec.Decoder
      -KeyValueCodec.getDecoder(ByteBuffbuf)
      +Codec.getDecoder(ByteBuffbuf)
       
       
       Codec.Decoder
      -KeyValueCodecWithTags.getDecoder(ByteBuffbuf)
      +CellCodec.getDecoder(ByteBuffbuf)
       
       
       Codec.Decoder
      -CellCodecWithTags.getDecoder(ByteBuffbuf)
      +KeyValueCodecWithTags.getDecoder(ByteBuffbuf)
       
       
       Codec.Decoder
      @@ -259,20 +259,20 @@
       
       
       
      -private ByteBuff
      -RowIndexSeekerV1.currentBuffer
      +protected ByteBuff
      +BufferedDataBlockEncoder.SeekerState.currentBuffer
       
       
       protected ByteBuff
      -RowIndexSeekerV1.SeekerState.currentBuffer
      +BufferedDataBlockEncoder.BufferedEncodedSeeker.currentBuffer
       
       
      -protected ByteBuff
      -BufferedDataBlockEncoder.SeekerState.currentBuffer
      +private ByteBuff
      +RowIndexSeekerV1.currentBuffer
       
       
       protected ByteBuff
      -BufferedDataBlockEncoder.BufferedEncodedSeeker.currentBuffer
      +RowIndexSeekerV1.SeekerState.currentBuffer
       
       
       private ByteBuff
      @@ -295,23 +295,23 @@
       
       
       Cell
      -RowIndexCodecV1.getFirstKeyCellInBlock(ByteBuffblock)
      +CopyKeyDataBlockEncoder.getFirstKeyCellInBlock(ByteBuffblock)
       
       
       Cell
      -CopyKeyDataBlockEncoder.getFirstKeyCellInBlock(ByteBuffblock)
      +PrefixKeyDeltaEncoder.getFirstKeyCellInBlock(ByteBuffblock)
       
       
       Cell
      -DiffKeyDeltaEncoder.getFirstKeyCellInBlock(ByteBuffblock)
      +FastDiffDeltaEncoder.getFirstKeyCellInBlock(ByteBuffblock)
       
       
       Cell
      -FastDiffDeltaEncoder.getFirstKeyCellInBlock(ByteBuffblock)
      +DiffKeyDeltaEncoder.getFirstKeyCellInBlock(ByteBuffblock)
       
       
       Cell
      -PrefixKeyDeltaEncoder.getFirstKeyCellInBlock(ByteBuffblock)
      +RowIndexCodecV1.getFirstKeyCellInBlock(ByteBuffblock)
       
       
       void
      @@ -338,11 +338,11 @@
       
       
       void
      -RowIndexSeekerV1.setCurrentBuffer(ByteBuffbuffer)
      +BufferedDataBlockEncoder.BufferedEncodedSeeker.setCurrentBuffer(ByteBuffbuffer)
       
       
       void
      -BufferedDataBlockEncoder.BufferedEncodedSeeker.setCurrentBuffer(ByteBuffbuffer)
      +RowIndexSeekerV1.setCurrentBuffer(ByteBuffbuffer)
       
       
       
      @@ -498,21 +498,21 @@
       
       
       void
      -ByteBufferIOEngine.write(ByteBuffsrcBuffer,
      - longoffset)
      -
      -
      -void
       FileIOEngine.write(ByteBuffsrcBuffer,
        longoffset)
       
      -
      +
       void
       IOEngine.write(ByteBuffsrcBuffer,
        longoffset)
       Transfers the data from the given MultiByteBuffer to 
      IOEngine
       
       
      +
      +void
      +ByteBufferIOEngine.write(ByteBuffsrcBuffer,
      + longoffset)
      +
       
       void
       FileMmapEngine.write(ByteBuffsrcBuffer,
      @@ -812,6 +812,15 @@
        intindex)
       
       
      +MultiByteBuff
      +MultiByteBuff.put(intoffset,
      +   ByteBuffsrc,
      +   intsrcOffset,
      +   intlength)
      +Copies from a src MBB to this MBB.
      +
      +
      +
       abstract ByteBuff
       ByteBuff.put(intoffset,
      ByteBuffsrc,
      @@ -820,22 +829,13 @@
       Copies the contents from the src ByteBuff to this 
      ByteBuff.
       
       
      -
      +
       SingleByteBuff
       SingleByteBuff.put(intoffset,
      ByteBuffsrc,
      intsrcOffset,
      intlength)
       
      -
      -MultiByteBuff
      -MultiByteBuff.put(intoffset,
      -   ByteBuffsrc,
      -   intsrcOffset,
      -   intlength)
      -Copies from a src MBB to this MBB.
      -
      -
       
       static int
       ByteBuff.readCompressedInt(ByteBuffbuf)
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/94208cfe/devapidocs/org/apache/hadoop/hbase/package-tree.html
      --
      diff --git a/devapidocs/org/apache/hadoop/hbase/package-tree.html 
      b/devapidocs/org/apache/hadoop/hbase/package-tree.html
      index cc24c24..11c1c93 100644
      --- a/devapidocs/org/apache/hadoop/hbase/package-tree.html
      +++ b/devapidocs/org/apache/hadoop/hbase/package-tree.html
      @@ -440,20 +440,20 @@
       
       java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
       title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
       title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
       title="class or interface in java.io">Serializable)
       
      -org.apache.hadoop.hbase.HealthChecker.HealthCheckerExitStatus
      -org.apache.hadoop.hbase.ClusterMetrics.Option
      -org.apache.hadoop.hbase.Size.Unit
      -org.apache.hadoop.hbase.HConstants.OperationStatusCode
      -org.apache.hadoop.hbase.Coprocessor.State
      -org.apache.hadoop.hbase.CellBuilderType
       org.apache.hadoop.hbase.ProcedureState
      -org.apache.hadoop.hbase.CompatibilitySingletonFactory.SingletonStorage
      

      [08/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0cd17dc5/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFile.Writer.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFile.Writer.html 
      b/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFile.Writer.html
      index abcb738..c7d05d1 100644
      --- a/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFile.Writer.html
      +++ b/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFile.Writer.html
      @@ -143,17 +143,17 @@
       
       
       void
      -NoOpDataBlockEncoder.saveMetadata(HFile.Writerwriter)
      +HFileDataBlockEncoderImpl.saveMetadata(HFile.Writerwriter)
       
       
       void
      -HFileDataBlockEncoder.saveMetadata(HFile.Writerwriter)
      -Save metadata in HFile which will be written to disk
      -
      +NoOpDataBlockEncoder.saveMetadata(HFile.Writerwriter)
       
       
       void
      -HFileDataBlockEncoderImpl.saveMetadata(HFile.Writerwriter)
      +HFileDataBlockEncoder.saveMetadata(HFile.Writerwriter)
      +Save metadata in HFile which will be written to disk
      +
       
       
       
      @@ -203,18 +203,18 @@
       
       
       
      -abstract void
      -BloomContext.addLastBloomKey(HFile.Writerwriter)
      -Adds the last bloom key to the HFile Writer as part of 
      StorefileWriter close.
      -
      +void
      +RowColBloomContext.addLastBloomKey(HFile.Writerwriter)
       
       
       void
       RowBloomContext.addLastBloomKey(HFile.Writerwriter)
       
       
      -void
      -RowColBloomContext.addLastBloomKey(HFile.Writerwriter)
      +abstract void
      +BloomContext.addLastBloomKey(HFile.Writerwriter)
      +Adds the last bloom key to the HFile Writer as part of 
      StorefileWriter close.
      +
       
       
       static BloomFilterWriter
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0cd17dc5/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFileBlock.Writer.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFileBlock.Writer.html 
      b/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFileBlock.Writer.html
      index 274bfad..479b9d3 100644
      --- 
      a/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFileBlock.Writer.html
      +++ 
      b/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFileBlock.Writer.html
      @@ -106,15 +106,15 @@
       
       
       
      -private HFileBlock.Writer
      -HFileBlockIndex.BlockIndexWriter.blockWriter
      -
      -
       protected HFileBlock.Writer
       HFileWriterImpl.blockWriter
       block writer
       
       
      +
      +private HFileBlock.Writer
      +HFileBlockIndex.BlockIndexWriter.blockWriter
      +
       
       
       
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0cd17dc5/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFileContext.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFileContext.html 
      b/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFileContext.html
      index b293c97..0c892c8 100644
      --- a/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFileContext.html
      +++ b/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFileContext.html
      @@ -136,15 +136,15 @@
       
       
       HFileContext
      -HFileBlockEncodingContext.getHFileContext()
      +HFileBlockDecodingContext.getHFileContext()
       
       
       HFileContext
      -HFileBlockDecodingContext.getHFileContext()
      +HFileBlockDefaultDecodingContext.getHFileContext()
       
       
       HFileContext
      -HFileBlockDefaultDecodingContext.getHFileContext()
      +HFileBlockEncodingContext.getHFileContext()
       
       
       HFileContext
      @@ -224,24 +224,24 @@
       
       
       private HFileContext
      -HFile.WriterFactory.fileContext
      -
      -
      -private HFileContext
       HFileBlock.fileContext
       Meta data that holds meta information on the 
      hfileblock.
       
       
      -
      +
       private HFileContext
       HFileBlock.Writer.fileContext
       Meta data that holds information about the hfileblock
       
       
      -
      +
       private HFileContext
       HFileBlock.FSReaderImpl.fileContext
       
      +
      +private HFileContext
      +HFile.WriterFactory.fileContext
      +
       
       private HFileContext
       HFileReaderImpl.hfileContext
      @@ -277,20 +277,20 @@
       
       
       HFileContext
      +HFileWriterImpl.getFileContext()
      +
      +
      +HFileContext
       HFile.Writer.getFileContext()
       Return the file context for the HFile this writer belongs 
      to
       
       
      -
      +
       HFileContext
       HFile.Reader.getFileContext()
       Return the file context of the HFile this reader belongs 
      to
       
       
      -
      -HFileContext
      -HFileWriterImpl.getFileContext()
      -
       
       HFileContext
       HFileReaderImpl.getFileContext()
      @@ -323,35 +323,35 @@
       
       
       HFileBlockDecodingContext
      -NoOpDataBlockEncoder.newDataBlockDecodingContext(HFileContextmeta)
      +HFileDataBlockEncoderImpl.newDataBlockDecodingContext(HFileContextfileContext)
       
       
       HFileBlockDecodingContext
      -HFileDataBlockEncoder.newDataBlockDecodingContext(HFileContextfileContext)
      -create a encoder specific decoding context for 
      reading.
      -
      +NoOpDataBlockEncoder.newDataBlockDecodingContext(HFileContextmeta)
       
       
       HFileBlockDecodingContext
      -HFileDataBlockEncoderImpl.newDataBlockDecodingContext(HFileContextfileContext)
      +HFileDataBlockEncoder.newDataBlockDecodingContext(HFileContextfileContext)
      +create a encoder specific decoding context for 
      reading.
      +
       
       
       HFileBlockEncodingContext
      

      [08/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/828486ae/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
       
      b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
      index 9c13a58..4d04e3e 100644
      --- 
      a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
      +++ 
      b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
      @@ -133,11 +133,11 @@
       
       
       ProcedureExecutorMasterProcedureEnv
      -MasterServices.getMasterProcedureExecutor()
      +HMaster.getMasterProcedureExecutor()
       
       
       ProcedureExecutorMasterProcedureEnv
      -HMaster.getMasterProcedureExecutor()
      +MasterServices.getMasterProcedureExecutor()
       
       
       private RemoteProcedureDispatcher.RemoteProcedureMasterProcedureEnv,?
      @@ -194,15 +194,15 @@
       
       
       protected Procedure.LockState
      -GCRegionProcedure.acquireLock(MasterProcedureEnvenv)
      +RegionTransitionProcedure.acquireLock(MasterProcedureEnvenv)
       
       
       protected Procedure.LockState
      -MergeTableRegionsProcedure.acquireLock(MasterProcedureEnvenv)
      +GCRegionProcedure.acquireLock(MasterProcedureEnvenv)
       
       
       protected Procedure.LockState
      -RegionTransitionProcedure.acquireLock(MasterProcedureEnvenv)
      +MergeTableRegionsProcedure.acquireLock(MasterProcedureEnvenv)
       
       
       protected boolean
      @@ -295,7 +295,7 @@
       
       
       protected void
      -AssignProcedure.finishTransition(MasterProcedureEnvenv,
      +UnassignProcedure.finishTransition(MasterProcedureEnvenv,
       RegionStates.RegionStateNoderegionNode)
       
       
      @@ -305,7 +305,7 @@
       
       
       protected void
      -UnassignProcedure.finishTransition(MasterProcedureEnvenv,
      +AssignProcedure.finishTransition(MasterProcedureEnvenv,
       RegionStates.RegionStateNoderegionNode)
       
       
      @@ -314,7 +314,7 @@
       
       
       protected ProcedureMetrics
      -AssignProcedure.getProcedureMetrics(MasterProcedureEnvenv)
      +UnassignProcedure.getProcedureMetrics(MasterProcedureEnvenv)
       
       
       protected ProcedureMetrics
      @@ -326,7 +326,7 @@
       
       
       protected ProcedureMetrics
      -UnassignProcedure.getProcedureMetrics(MasterProcedureEnvenv)
      +AssignProcedure.getProcedureMetrics(MasterProcedureEnvenv)
       
       
       (package private) static 
      org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse
      @@ -357,7 +357,7 @@
       
       
       ServerName
      -AssignProcedure.getServer(MasterProcedureEnvenv)
      +UnassignProcedure.getServer(MasterProcedureEnvenv)
       
       
       abstract ServerName
      @@ -367,7 +367,7 @@
       
       
       ServerName
      -UnassignProcedure.getServer(MasterProcedureEnvenv)
      +AssignProcedure.getServer(MasterProcedureEnvenv)
       
       
       private ServerName
      @@ -384,19 +384,19 @@
       
       
       protected boolean
      -MergeTableRegionsProcedure.hasLock(MasterProcedureEnvenv)
      +RegionTransitionProcedure.hasLock(MasterProcedureEnvenv)
       
       
       protected boolean
      -RegionTransitionProcedure.hasLock(MasterProcedureEnvenv)
      +MergeTableRegionsProcedure.hasLock(MasterProcedureEnvenv)
       
       
       protected boolean
      -MergeTableRegionsProcedure.holdLock(MasterProcedureEnvenv)
      +RegionTransitionProcedure.holdLock(MasterProcedureEnvenv)
       
       
       protected boolean
      -RegionTransitionProcedure.holdLock(MasterProcedureEnvenv)
      +MergeTableRegionsProcedure.holdLock(MasterProcedureEnvenv)
       
       
       private boolean
      @@ -510,15 +510,15 @@
       
       
       protected void
      -MergeTableRegionsProcedure.releaseLock(MasterProcedureEnvenv)
      +RegionTransitionProcedure.releaseLock(MasterProcedureEnvenv)
       
       
       protected void
      -RegionTransitionProcedure.releaseLock(MasterProcedureEnvenv)
      +MergeTableRegionsProcedure.releaseLock(MasterProcedureEnvenv)
       
       
       RemoteProcedureDispatcher.RemoteOperation
      -AssignProcedure.remoteCallBuild(MasterProcedureEnvenv,
      +UnassignProcedure.remoteCallBuild(MasterProcedureEnvenv,
      ServerNameserverName)
       
       
      @@ -528,12 +528,12 @@
       
       
       RemoteProcedureDispatcher.RemoteOperation
      -UnassignProcedure.remoteCallBuild(MasterProcedureEnvenv,
      +AssignProcedure.remoteCallBuild(MasterProcedureEnvenv,
      ServerNameserverName)
       
       
       protected boolean
      -AssignProcedure.remoteCallFailed(MasterProcedureEnvenv,
      +UnassignProcedure.remoteCallFailed(MasterProcedureEnvenv,
       RegionStates.RegionStateNoderegionNode,
       http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
       title="class or interface in 
      java.io">IOExceptionexception)
       
      @@ -545,7 +545,7 @@
       
       
       protected boolean
      -UnassignProcedure.remoteCallFailed(MasterProcedureEnvenv,
      +AssignProcedure.remoteCallFailed(MasterProcedureEnvenv,
       RegionStates.RegionStateNoderegionNode,
       http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
       title="class or interface in 
      java.io">IOExceptionexception)
       
      @@ -566,10 +566,10 @@
       
       
       protected void
      -AssignProcedure.reportTransition(MasterProcedureEnvenv,
      +UnassignProcedure.reportTransition(MasterProcedureEnvenv,
       

      [08/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f272b0e8/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/BlockCacheKey.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/BlockCacheKey.html 
      b/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/BlockCacheKey.html
      index b8ce496..570fb68 100644
      --- a/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/BlockCacheKey.html
      +++ b/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/BlockCacheKey.html
      @@ -168,27 +168,39 @@
       
       
       void
      +CombinedBlockCache.cacheBlock(BlockCacheKeycacheKey,
      +  Cacheablebuf)
      +
      +
      +void
       BlockCache.cacheBlock(BlockCacheKeycacheKey,
         Cacheablebuf)
       Add block to cache (defaults to not in-memory).
       
       
      -
      +
       void
       LruBlockCache.cacheBlock(BlockCacheKeycacheKey,
         Cacheablebuf)
       Cache the block with the specified name and buffer.
       
       
      -
      +
       void
      -CombinedBlockCache.cacheBlock(BlockCacheKeycacheKey,
      +MemcachedBlockCache.cacheBlock(BlockCacheKeycacheKey,
         Cacheablebuf)
       
      +
      +void
      +CombinedBlockCache.cacheBlock(BlockCacheKeycacheKey,
      +  Cacheablebuf,
      +  booleaninMemory)
      +
       
       void
      -MemcachedBlockCache.cacheBlock(BlockCacheKeycacheKey,
      -  Cacheablebuf)
      +InclusiveCombinedBlockCache.cacheBlock(BlockCacheKeycacheKey,
      +  Cacheablebuf,
      +  booleaninMemory)
       
       
       void
      @@ -208,18 +220,6 @@
       
       
       void
      -CombinedBlockCache.cacheBlock(BlockCacheKeycacheKey,
      -  Cacheablebuf,
      -  booleaninMemory)
      -
      -
      -void
      -InclusiveCombinedBlockCache.cacheBlock(BlockCacheKeycacheKey,
      -  Cacheablebuf,
      -  booleaninMemory)
      -
      -
      -void
       MemcachedBlockCache.cacheBlock(BlockCacheKeycacheKey,
         Cacheablebuf,
         booleaninMemory)
      @@ -232,21 +232,21 @@
       
       
       boolean
      -BlockCache.evictBlock(BlockCacheKeycacheKey)
      -Evict block from cache.
      -
      +CombinedBlockCache.evictBlock(BlockCacheKeycacheKey)
       
       
       boolean
      -LruBlockCache.evictBlock(BlockCacheKeycacheKey)
      +InclusiveCombinedBlockCache.evictBlock(BlockCacheKeycacheKey)
       
       
       boolean
      -CombinedBlockCache.evictBlock(BlockCacheKeycacheKey)
      +BlockCache.evictBlock(BlockCacheKeycacheKey)
      +Evict block from cache.
      +
       
       
       boolean
      -InclusiveCombinedBlockCache.evictBlock(BlockCacheKeycacheKey)
      +LruBlockCache.evictBlock(BlockCacheKeycacheKey)
       
       
       boolean
      @@ -254,35 +254,35 @@
       
       
       Cacheable
      -BlockCache.getBlock(BlockCacheKeycacheKey,
      +CombinedBlockCache.getBlock(BlockCacheKeycacheKey,
       booleancaching,
       booleanrepeat,
      -booleanupdateCacheMetrics)
      -Fetch block from cache.
      -
      +booleanupdateCacheMetrics)
       
       
       Cacheable
      -LruBlockCache.getBlock(BlockCacheKeycacheKey,
      +InclusiveCombinedBlockCache.getBlock(BlockCacheKeycacheKey,
       booleancaching,
       booleanrepeat,
      -booleanupdateCacheMetrics)
      -Get the buffer of the block with the specified name.
      -
      +booleanupdateCacheMetrics)
       
       
       Cacheable
      -CombinedBlockCache.getBlock(BlockCacheKeycacheKey,
      +BlockCache.getBlock(BlockCacheKeycacheKey,
       booleancaching,
       booleanrepeat,
      -booleanupdateCacheMetrics)
      +booleanupdateCacheMetrics)
      +Fetch block from cache.
      +
       
       
       Cacheable
      -InclusiveCombinedBlockCache.getBlock(BlockCacheKeycacheKey,
      +LruBlockCache.getBlock(BlockCacheKeycacheKey,
       booleancaching,
       booleanrepeat,
      -booleanupdateCacheMetrics)
      +booleanupdateCacheMetrics)
      +Get the buffer of the block with the specified name.
      +
       
       
       Cacheable
      @@ -308,6 +308,11 @@
       CombinedBlockCache.getRefCount(BlockCacheKeycacheKey)
       
       
      +void
      +CombinedBlockCache.returnBlock(BlockCacheKeycacheKey,
      +   Cacheableblock)
      +
      +
       default void
       BlockCache.returnBlock(BlockCacheKeycacheKey,
      Cacheableblock)
      @@ -315,11 +320,6 @@
        is over.
       
       
      -
      -void
      -CombinedBlockCache.returnBlock(BlockCacheKeycacheKey,
      -   Cacheableblock)
      -
       
       
       
      @@ -497,13 +497,13 @@
       
       
       void
      -CachedEntryQueue.add(http://docs.oracle.com/javase/8/docs/api/java/util/Map.Entry.html?is-external=true;
       title="class or interface in java.util">Map.EntryBlockCacheKey,BucketCache.BucketEntryentry)
      -Attempt to add the specified entry to this queue.
      -
      +BucketCache.BucketEntryGroup.add(http://docs.oracle.com/javase/8/docs/api/java/util/Map.Entry.html?is-external=true;
       title="class or interface in java.util">Map.EntryBlockCacheKey,BucketCache.BucketEntryblock)
       
       
       void
      -BucketCache.BucketEntryGroup.add(http://docs.oracle.com/javase/8/docs/api/java/util/Map.Entry.html?is-external=true;
       title="class or interface in java.util">Map.EntryBlockCacheKey,BucketCache.BucketEntryblock)
      +CachedEntryQueue.add(http://docs.oracle.com/javase/8/docs/api/java/util/Map.Entry.html?is-external=true;
       title="class or interface in java.util">Map.EntryBlockCacheKey,BucketCache.BucketEntryentry)
      +Attempt to add the specified entry to this queue.
      +
       
       
       
      
      

      [08/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c83a37c8/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
       
      b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
      index 9c13a58..4d04e3e 100644
      --- 
      a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
      +++ 
      b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
      @@ -133,11 +133,11 @@
       
       
       ProcedureExecutorMasterProcedureEnv
      -MasterServices.getMasterProcedureExecutor()
      +HMaster.getMasterProcedureExecutor()
       
       
       ProcedureExecutorMasterProcedureEnv
      -HMaster.getMasterProcedureExecutor()
      +MasterServices.getMasterProcedureExecutor()
       
       
       private RemoteProcedureDispatcher.RemoteProcedureMasterProcedureEnv,?
      @@ -194,15 +194,15 @@
       
       
       protected Procedure.LockState
      -GCRegionProcedure.acquireLock(MasterProcedureEnvenv)
      +RegionTransitionProcedure.acquireLock(MasterProcedureEnvenv)
       
       
       protected Procedure.LockState
      -MergeTableRegionsProcedure.acquireLock(MasterProcedureEnvenv)
      +GCRegionProcedure.acquireLock(MasterProcedureEnvenv)
       
       
       protected Procedure.LockState
      -RegionTransitionProcedure.acquireLock(MasterProcedureEnvenv)
      +MergeTableRegionsProcedure.acquireLock(MasterProcedureEnvenv)
       
       
       protected boolean
      @@ -295,7 +295,7 @@
       
       
       protected void
      -AssignProcedure.finishTransition(MasterProcedureEnvenv,
      +UnassignProcedure.finishTransition(MasterProcedureEnvenv,
       RegionStates.RegionStateNoderegionNode)
       
       
      @@ -305,7 +305,7 @@
       
       
       protected void
      -UnassignProcedure.finishTransition(MasterProcedureEnvenv,
      +AssignProcedure.finishTransition(MasterProcedureEnvenv,
       RegionStates.RegionStateNoderegionNode)
       
       
      @@ -314,7 +314,7 @@
       
       
       protected ProcedureMetrics
      -AssignProcedure.getProcedureMetrics(MasterProcedureEnvenv)
      +UnassignProcedure.getProcedureMetrics(MasterProcedureEnvenv)
       
       
       protected ProcedureMetrics
      @@ -326,7 +326,7 @@
       
       
       protected ProcedureMetrics
      -UnassignProcedure.getProcedureMetrics(MasterProcedureEnvenv)
      +AssignProcedure.getProcedureMetrics(MasterProcedureEnvenv)
       
       
       (package private) static 
      org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse
      @@ -357,7 +357,7 @@
       
       
       ServerName
      -AssignProcedure.getServer(MasterProcedureEnvenv)
      +UnassignProcedure.getServer(MasterProcedureEnvenv)
       
       
       abstract ServerName
      @@ -367,7 +367,7 @@
       
       
       ServerName
      -UnassignProcedure.getServer(MasterProcedureEnvenv)
      +AssignProcedure.getServer(MasterProcedureEnvenv)
       
       
       private ServerName
      @@ -384,19 +384,19 @@
       
       
       protected boolean
      -MergeTableRegionsProcedure.hasLock(MasterProcedureEnvenv)
      +RegionTransitionProcedure.hasLock(MasterProcedureEnvenv)
       
       
       protected boolean
      -RegionTransitionProcedure.hasLock(MasterProcedureEnvenv)
      +MergeTableRegionsProcedure.hasLock(MasterProcedureEnvenv)
       
       
       protected boolean
      -MergeTableRegionsProcedure.holdLock(MasterProcedureEnvenv)
      +RegionTransitionProcedure.holdLock(MasterProcedureEnvenv)
       
       
       protected boolean
      -RegionTransitionProcedure.holdLock(MasterProcedureEnvenv)
      +MergeTableRegionsProcedure.holdLock(MasterProcedureEnvenv)
       
       
       private boolean
      @@ -510,15 +510,15 @@
       
       
       protected void
      -MergeTableRegionsProcedure.releaseLock(MasterProcedureEnvenv)
      +RegionTransitionProcedure.releaseLock(MasterProcedureEnvenv)
       
       
       protected void
      -RegionTransitionProcedure.releaseLock(MasterProcedureEnvenv)
      +MergeTableRegionsProcedure.releaseLock(MasterProcedureEnvenv)
       
       
       RemoteProcedureDispatcher.RemoteOperation
      -AssignProcedure.remoteCallBuild(MasterProcedureEnvenv,
      +UnassignProcedure.remoteCallBuild(MasterProcedureEnvenv,
      ServerNameserverName)
       
       
      @@ -528,12 +528,12 @@
       
       
       RemoteProcedureDispatcher.RemoteOperation
      -UnassignProcedure.remoteCallBuild(MasterProcedureEnvenv,
      +AssignProcedure.remoteCallBuild(MasterProcedureEnvenv,
      ServerNameserverName)
       
       
       protected boolean
      -AssignProcedure.remoteCallFailed(MasterProcedureEnvenv,
      +UnassignProcedure.remoteCallFailed(MasterProcedureEnvenv,
       RegionStates.RegionStateNoderegionNode,
       http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
       title="class or interface in 
      java.io">IOExceptionexception)
       
      @@ -545,7 +545,7 @@
       
       
       protected boolean
      -UnassignProcedure.remoteCallFailed(MasterProcedureEnvenv,
      +AssignProcedure.remoteCallFailed(MasterProcedureEnvenv,
       RegionStates.RegionStateNoderegionNode,
       http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
       title="class or interface in 
      java.io">IOExceptionexception)
       
      @@ -566,10 +566,10 @@
       
       
       protected void
      -AssignProcedure.reportTransition(MasterProcedureEnvenv,
      +UnassignProcedure.reportTransition(MasterProcedureEnvenv,
       

      [08/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0ab8335e/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/PeerProcedureInterface.PeerOperationType.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/PeerProcedureInterface.PeerOperationType.html
       
      b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/PeerProcedureInterface.PeerOperationType.html
      index bd2f966..3628d68 100644
      --- 
      a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/PeerProcedureInterface.PeerOperationType.html
      +++ 
      b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/PeerProcedureInterface.PeerOperationType.html
      @@ -152,27 +152,27 @@ the order they are declared.
       
       
       PeerProcedureInterface.PeerOperationType
      -DisablePeerProcedure.getPeerOperationType()
      +RefreshPeerProcedure.getPeerOperationType()
       
       
       PeerProcedureInterface.PeerOperationType
      -RemovePeerProcedure.getPeerOperationType()
      +DisablePeerProcedure.getPeerOperationType()
       
       
       PeerProcedureInterface.PeerOperationType
      -EnablePeerProcedure.getPeerOperationType()
      +UpdatePeerConfigProcedure.getPeerOperationType()
       
       
       PeerProcedureInterface.PeerOperationType
      -RefreshPeerProcedure.getPeerOperationType()
      +AddPeerProcedure.getPeerOperationType()
       
       
       PeerProcedureInterface.PeerOperationType
      -AddPeerProcedure.getPeerOperationType()
      +EnablePeerProcedure.getPeerOperationType()
       
       
       PeerProcedureInterface.PeerOperationType
      -UpdatePeerConfigProcedure.getPeerOperationType()
      +RemovePeerProcedure.getPeerOperationType()
       
       
       private static PeerProcedureInterface.PeerOperationType
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0ab8335e/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/ProcedurePrepareLatch.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/ProcedurePrepareLatch.html
       
      b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/ProcedurePrepareLatch.html
      index f7a6279..269bc46 100644
      --- 
      a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/ProcedurePrepareLatch.html
      +++ 
      b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/ProcedurePrepareLatch.html
      @@ -125,11 +125,11 @@
       
       
       private ProcedurePrepareLatch
      -AbstractStateMachineTableProcedure.syncLatch
      +RecoverMetaProcedure.syncLatch
       
       
       private ProcedurePrepareLatch
      -RecoverMetaProcedure.syncLatch
      +AbstractStateMachineTableProcedure.syncLatch
       
       
       
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0ab8335e/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/ServerProcedureInterface.ServerOperationType.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/ServerProcedureInterface.ServerOperationType.html
       
      b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/ServerProcedureInterface.ServerOperationType.html
      index 8b6ceb7..5e8085c 100644
      --- 
      a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/ServerProcedureInterface.ServerOperationType.html
      +++ 
      b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/ServerProcedureInterface.ServerOperationType.html
      @@ -104,13 +104,13 @@
       
       
       ServerProcedureInterface.ServerOperationType
      -ServerProcedureInterface.getServerOperationType()
      -Given an operation type we can take decisions about what to 
      do with pending operations.
      -
      +ServerCrashProcedure.getServerOperationType()
       
       
       ServerProcedureInterface.ServerOperationType
      -ServerCrashProcedure.getServerOperationType()
      +ServerProcedureInterface.getServerOperationType()
      +Given an operation type we can take decisions about what to 
      do with pending operations.
      +
       
       
       static ServerProcedureInterface.ServerOperationType
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0ab8335e/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/TableProcedureInterface.TableOperationType.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/TableProcedureInterface.TableOperationType.html
       
      b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/TableProcedureInterface.TableOperationType.html
      index e736f37..046295e 100644
      --- 
      a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/TableProcedureInterface.TableOperationType.html
      +++ 
      b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/TableProcedureInterface.TableOperationType.html
      @@ -112,19 +112,19 @@
       
       
       TableProcedureInterface.TableOperationType
      -UnassignProcedure.getTableOperationType()
      +MoveRegionProcedure.getTableOperationType()
       
       
       TableProcedureInterface.TableOperationType
      -MoveRegionProcedure.getTableOperationType()
      +GCMergedRegionsProcedure.getTableOperationType()
       
       
       TableProcedureInterface.TableOperationType
      

      [08/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
      --
      diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html 
      b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
      index 7515d7b..3c4825d 100644
      --- a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
      +++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
      @@ -762,7 +762,7 @@
       754boolean wasUp = 
      this.clusterStatusTracker.isClusterUp();
       755if (!wasUp) 
      this.clusterStatusTracker.setClusterUp();
       756
      -757LOG.info("Server active/primary 
      master=" + this.serverName +
      +757LOG.info("Active/primary master=" + 
      this.serverName +
       758", sessionid=0x" +
       759
      Long.toHexString(this.zooKeeper.getRecoverableZooKeeper().getSessionId()) +
       760", setting cluster-up flag (Was=" 
      + wasUp + ")");
      @@ -1161,7 +1161,7 @@
       1153   startProcedureExecutor();
       1154
       1155   // Start log cleaner thread
      -1156   int cleanerInterval = 
      conf.getInt("hbase.master.cleaner.interval", 60 * 1000);
      +1156   int cleanerInterval = 
      conf.getInt("hbase.master.cleaner.interval", 600 * 1000);
       1157   this.logCleaner =
       1158  new LogCleaner(cleanerInterval,
       1159 this, conf, 
      getMasterWalManager().getFileSystem(),
      @@ -1227,2368 +1227,2369 @@
       1219procedureExecutor = new 
      ProcedureExecutor(conf, procEnv, procedureStore, procedureScheduler);
       1220
      configurationManager.registerObserver(procEnv);
       1221
      -1222final int numThreads = 
      conf.getInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS,
      -1223
      Math.max(Runtime.getRuntime().availableProcessors(),
      -1224  
      MasterProcedureConstants.DEFAULT_MIN_MASTER_PROCEDURE_THREADS));
      -1225final boolean abortOnCorruption = 
      conf.getBoolean(
      -1226
      MasterProcedureConstants.EXECUTOR_ABORT_ON_CORRUPTION,
      -1227
      MasterProcedureConstants.DEFAULT_EXECUTOR_ABORT_ON_CORRUPTION);
      -1228procedureStore.start(numThreads);
      -1229procedureExecutor.start(numThreads, 
      abortOnCorruption);
      -1230
      procEnv.getRemoteDispatcher().start();
      -1231  }
      -1232
      -1233  private void stopProcedureExecutor() 
      {
      -1234if (procedureExecutor != null) {
      -1235  
      configurationManager.deregisterObserver(procedureExecutor.getEnvironment());
      -1236  
      procedureExecutor.getEnvironment().getRemoteDispatcher().stop();
      -1237  procedureExecutor.stop();
      -1238  procedureExecutor.join();
      -1239  procedureExecutor = null;
      -1240}
      -1241
      -1242if (procedureStore != null) {
      -1243  
      procedureStore.stop(isAborted());
      -1244  procedureStore = null;
      -1245}
      -1246  }
      -1247
      -1248  private void stopChores() {
      -1249if (this.expiredMobFileCleanerChore 
      != null) {
      -1250  
      this.expiredMobFileCleanerChore.cancel(true);
      -1251}
      -1252if (this.mobCompactChore != null) 
      {
      -1253  
      this.mobCompactChore.cancel(true);
      -1254}
      -1255if (this.balancerChore != null) {
      -1256  this.balancerChore.cancel(true);
      -1257}
      -1258if (this.normalizerChore != null) 
      {
      -1259  
      this.normalizerChore.cancel(true);
      -1260}
      -1261if (this.clusterStatusChore != null) 
      {
      -1262  
      this.clusterStatusChore.cancel(true);
      -1263}
      -1264if (this.catalogJanitorChore != 
      null) {
      -1265  
      this.catalogJanitorChore.cancel(true);
      -1266}
      -1267if (this.clusterStatusPublisherChore 
      != null){
      -1268  
      clusterStatusPublisherChore.cancel(true);
      -1269}
      -1270if (this.mobCompactThread != null) 
      {
      -1271  this.mobCompactThread.close();
      -1272}
      -1273
      -1274if (this.quotaObserverChore != null) 
      {
      -1275  quotaObserverChore.cancel();
      -1276}
      -1277if (this.snapshotQuotaChore != null) 
      {
      -1278  snapshotQuotaChore.cancel();
      -1279}
      -1280  }
      -1281
      -1282  /**
      -1283   * @return Get remote side's 
      InetAddress
      -1284   */
      -1285  InetAddress getRemoteInetAddress(final 
      int port,
      -1286  final long serverStartCode) throws 
      UnknownHostException {
      -1287// Do it out here in its own little 
      method so can fake an address when
      -1288// mocking up in tests.
      -1289InetAddress ia = 
      RpcServer.getRemoteIp();
      -1290
      -1291// The call could be from the local 
      regionserver,
      -1292// in which case, there is no remote 
      address.
      -1293if (ia == null  
      serverStartCode == startcode) {
      -1294  InetSocketAddress isa = 
      rpcServices.getSocketAddress();
      -1295  if (isa != null  
      isa.getPort() == port) {
      -1296ia = isa.getAddress();
      -1297  }
      -1298}
      -1299return ia;
      -1300  }
      -1301
      -1302  /**
      -1303   * @return Maximum time we should run 
      balancer for
      -1304   */
      -1305  private int getMaxBalancingTime() {
      -1306int maxBalancingTime = 
      getConfiguration().getInt(HConstants.HBASE_BALANCER_MAX_BALANCING, -1);
      -1307if (maxBalancingTime == -1) {
      -1308  // if max balancing time isn't 
      set, defaulting it to period time
      -1309  

      [08/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1f2eeb22/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html
       
      b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html
      index b49f000..a0b28cc 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html
      @@ -1903,22 +1903,30 @@
       1895
      .getRegionInfoForReplica(RegionInfoBuilder.FIRST_META_REGIONINFO,
       1896
      RegionInfo.DEFAULT_REPLICA_ID);
       1897RegionState regionStateNode = 
      getRegionStates().getRegionState(hri);
      -1898if 
      (!regionStateNode.getServerName().equals(serverName)) {
      -1899  return;
      -1900}
      -1901// meta has been assigned to crashed 
      server.
      -1902LOG.info("Meta assigned to crashed " 
      + serverName + "; reassigning...");
      -1903// Handle failure and wake event
      -1904RegionTransitionProcedure rtp = 
      getRegionStates().getRegionTransitionProcedure(hri);
      -1905// Do not need to consider for 
      REGION_TRANSITION_QUEUE step
      -1906if (rtp != null  
      rtp.isMeta() 
      -1907rtp.getTransitionState() == 
      RegionTransitionState.REGION_TRANSITION_DISPATCH) {
      -1908  LOG.debug("Failing " + 
      rtp.toString());
      -1909  
      rtp.remoteCallFailed(master.getMasterProcedureExecutor().getEnvironment(), 
      serverName,
      -1910  new 
      ServerCrashException(rtp.getProcId(), serverName));
      -1911}
      -1912  }
      -1913}
      +1898if (regionStateNode == null) {
      +1899  LOG.warn("RegionStateNode is null 
      for " + hri);
      +1900  return;
      +1901}
      +1902ServerName rsnServerName = 
      regionStateNode.getServerName();
      +1903if (rsnServerName != null  
      !rsnServerName.equals(serverName)) {
      +1904  return;
      +1905} else if (rsnServerName == null) 
      {
      +1906  LOG.warn("Empty ServerName in 
      RegionStateNode; proceeding anyways in case latched " +
      +1907  "RecoverMetaProcedure so meta 
      latch gets cleaned up.");
      +1908}
      +1909// meta has been assigned to crashed 
      server.
      +1910LOG.info("Meta assigned to crashed " 
      + serverName + "; reassigning...");
      +1911// Handle failure and wake event
      +1912RegionTransitionProcedure rtp = 
      getRegionStates().getRegionTransitionProcedure(hri);
      +1913// Do not need to consider for 
      REGION_TRANSITION_QUEUE step
      +1914if (rtp != null  
      rtp.isMeta() 
      +1915rtp.getTransitionState() == 
      RegionTransitionState.REGION_TRANSITION_DISPATCH) {
      +1916  LOG.debug("Failing " + 
      rtp.toString());
      +1917  
      rtp.remoteCallFailed(master.getMasterProcedureExecutor().getEnvironment(), 
      serverName,
      +1918  new 
      ServerCrashException(rtp.getProcId(), serverName));
      +1919}
      +1920  }
      +1921}
       
       
       
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1f2eeb22/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionStat.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionStat.html
       
      b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionStat.html
      index b49f000..a0b28cc 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionStat.html
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionStat.html
      @@ -1903,22 +1903,30 @@
       1895
      .getRegionInfoForReplica(RegionInfoBuilder.FIRST_META_REGIONINFO,
       1896
      RegionInfo.DEFAULT_REPLICA_ID);
       1897RegionState regionStateNode = 
      getRegionStates().getRegionState(hri);
      -1898if 
      (!regionStateNode.getServerName().equals(serverName)) {
      -1899  return;
      -1900}
      -1901// meta has been assigned to crashed 
      server.
      -1902LOG.info("Meta assigned to crashed " 
      + serverName + "; reassigning...");
      -1903// Handle failure and wake event
      -1904RegionTransitionProcedure rtp = 
      getRegionStates().getRegionTransitionProcedure(hri);
      -1905// Do not need to consider for 
      REGION_TRANSITION_QUEUE step
      -1906if (rtp != null  
      rtp.isMeta() 
      -1907rtp.getTransitionState() == 
      RegionTransitionState.REGION_TRANSITION_DISPATCH) {
      -1908  LOG.debug("Failing " + 
      rtp.toString());
      -1909  
      rtp.remoteCallFailed(master.getMasterProcedureExecutor().getEnvironment(), 
      serverName,
      -1910  new 
      ServerCrashException(rtp.getProcId(), serverName));
      -1911}
      -1912  }
      -1913}
      +1898if (regionStateNode == null) {
      +1899  LOG.warn("RegionStateNode is null 
      for " + hri);
      +1900  return;
      +1901}
      +1902 

      [08/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/cc6597ec/testdevapidocs/org/apache/hadoop/hbase/client/TestClientNoCluster.FakeServer.html
      --
      diff --git 
      a/testdevapidocs/org/apache/hadoop/hbase/client/TestClientNoCluster.FakeServer.html
       
      b/testdevapidocs/org/apache/hadoop/hbase/client/TestClientNoCluster.FakeServer.html
      index 56d710b..7412089 100644
      --- 
      a/testdevapidocs/org/apache/hadoop/hbase/client/TestClientNoCluster.FakeServer.html
      +++ 
      b/testdevapidocs/org/apache/hadoop/hbase/client/TestClientNoCluster.FakeServer.html
      @@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
       
       
       
      -static class TestClientNoCluster.FakeServer
      +static class TestClientNoCluster.FakeServer
       extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
       title="class or interface in java.lang">Object
       implements 
      org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService.BlockingInterface
       Fake 'server'.
      @@ -272,7 +272,7 @@ implements 
      org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Client
       
       
       multiInvocationsCount
      -privatehttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true;
       title="class or interface in java.util.concurrent.atomic">AtomicInteger multiInvocationsCount
      +privatehttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true;
       title="class or interface in java.util.concurrent.atomic">AtomicInteger multiInvocationsCount
       
       
       
      @@ -281,7 +281,7 @@ implements 
      org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Client
       
       
       meta
      -private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/SortedMap.html?is-external=true;
       title="class or interface in 
      java.util">SortedMapbyte[],org.apache.hadoop.hbase.util.Pairorg.apache.hadoop.hbase.HRegionInfo,org.apache.hadoop.hbase.ServerName
       meta
      +private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/SortedMap.html?is-external=true;
       title="class or interface in 
      java.util">SortedMapbyte[],org.apache.hadoop.hbase.util.Pairorg.apache.hadoop.hbase.HRegionInfo,org.apache.hadoop.hbase.ServerName
       meta
       
       
       
      @@ -290,7 +290,7 @@ implements 
      org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Client
       
       
       sequenceids
      -private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true;
       title="class or interface in java.util.concurrent.atomic">AtomicLong sequenceids
      +private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true;
       title="class or interface in java.util.concurrent.atomic">AtomicLong sequenceids
       
       
       
      @@ -299,7 +299,7 @@ implements 
      org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Client
       
       
       multiPause
      -private finallong multiPause
      +private finallong multiPause
       
       
       
      @@ -308,7 +308,7 @@ implements 
      org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Client
       
       
       tooManyMultiRequests
      -private finalint tooManyMultiRequests
      +private finalint tooManyMultiRequests
       
       
       
      @@ -325,7 +325,7 @@ implements 
      org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Client
       
       
       FakeServer
      -FakeServer(org.apache.hadoop.conf.Configurationc,
      +FakeServer(org.apache.hadoop.conf.Configurationc,
      http://docs.oracle.com/javase/8/docs/api/java/util/SortedMap.html?is-external=true;
       title="class or interface in 
      java.util">SortedMapbyte[],org.apache.hadoop.hbase.util.Pairorg.apache.hadoop.hbase.HRegionInfo,org.apache.hadoop.hbase.ServerNamemeta,
      http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true;
       title="class or interface in 
      java.util.concurrent.atomic">AtomicLongsequenceids)
       
      @@ -344,7 +344,7 @@ implements 
      org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Client
       
       
       get
      -publicorg.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.GetResponseget(org.apache.hbase.thirdparty.com.google.protobuf.RpcControllercontroller,
      +publicorg.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.GetResponseget(org.apache.hbase.thirdparty.com.google.protobuf.RpcControllercontroller,
      
         
      org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.GetRequestrequest)
      
      throws org.apache.hbase.thirdparty.com.google.protobuf.ServiceException
       
      @@ -361,7 +361,7 @@ implements 
      org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Client
       
       
       doGetResponse
      -privateorg.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.GetResponsedoGetResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.GetRequestrequest)
      

      [08/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aa7ffc92/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/TableBackupClient.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/TableBackupClient.html
       
      b/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/TableBackupClient.html
      index add44d1..efa6d95 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/TableBackupClient.html
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/TableBackupClient.html
      @@ -43,13 +43,13 @@
       035import 
      org.apache.hadoop.hbase.backup.BackupType;
       036import 
      org.apache.hadoop.hbase.backup.HBackupFileSystem;
       037import 
      org.apache.hadoop.hbase.backup.impl.BackupManifest.BackupImage;
      -038import 
      org.apache.yetus.audience.InterfaceAudience;
      -039import org.slf4j.Logger;
      -040import org.slf4j.LoggerFactory;
      -041import 
      org.apache.hadoop.hbase.client.Admin;
      -042import 
      org.apache.hadoop.hbase.client.Connection;
      -043import 
      org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
      -044import 
      org.apache.hadoop.hbase.util.FSUtils;
      +038import 
      org.apache.hadoop.hbase.client.Admin;
      +039import 
      org.apache.hadoop.hbase.client.Connection;
      +040import 
      org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
      +041import 
      org.apache.hadoop.hbase.util.FSUtils;
      +042import 
      org.apache.yetus.audience.InterfaceAudience;
      +043import org.slf4j.Logger;
      +044import org.slf4j.LoggerFactory;
       045
       046import 
      org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
       047
      @@ -88,360 +88,355 @@
       080  }
       081
       082  public void init(final Connection conn, 
      final String backupId, BackupRequest request)
      -083  throws IOException
      -084  {
      -085if (request.getBackupType() == 
      BackupType.FULL) {
      -086  backupManager = new 
      BackupManager(conn, conn.getConfiguration());
      -087} else {
      -088  backupManager = new 
      IncrementalBackupManager(conn, conn.getConfiguration());
      -089}
      -090this.backupId = backupId;
      -091this.tableList = 
      request.getTableList();
      -092this.conn = conn;
      -093this.conf = 
      conn.getConfiguration();
      -094this.fs = 
      FSUtils.getCurrentFileSystem(conf);
      -095backupInfo =
      -096
      backupManager.createBackupInfo(backupId, request.getBackupType(), tableList,
      -097  request.getTargetRootDir(), 
      request.getTotalTasks(), request.getBandwidth());
      -098if (tableList == null || 
      tableList.isEmpty()) {
      -099  this.tableList = new 
      ArrayList(backupInfo.getTables());
      -100}
      -101// Start new session
      -102backupManager.startBackupSession();
      -103  }
      -104
      -105  /**
      -106   * Begin the overall backup.
      -107   * @param backupInfo backup info
      -108   * @throws IOException exception
      -109   */
      -110  protected void 
      beginBackup(BackupManager backupManager, BackupInfo backupInfo)
      -111  throws IOException {
      -112
      -113BackupSystemTable.snapshot(conn);
      -114
      backupManager.setBackupInfo(backupInfo);
      -115// set the start timestamp of the 
      overall backup
      -116long startTs = 
      EnvironmentEdgeManager.currentTime();
      -117backupInfo.setStartTs(startTs);
      -118// set overall backup status: 
      ongoing
      -119
      backupInfo.setState(BackupState.RUNNING);
      -120
      backupInfo.setPhase(BackupPhase.REQUEST);
      -121LOG.info("Backup " + 
      backupInfo.getBackupId() + " started at " + startTs + ".");
      -122
      -123
      backupManager.updateBackupInfo(backupInfo);
      -124if (LOG.isDebugEnabled()) {
      -125  LOG.debug("Backup session " + 
      backupInfo.getBackupId() + " has been started.");
      -126}
      -127  }
      -128
      -129  protected String getMessage(Exception 
      e) {
      -130String msg = e.getMessage();
      -131if (msg == null || msg.equals("")) 
      {
      -132  msg = e.getClass().getName();
      -133}
      -134return msg;
      -135  }
      -136
      -137  /**
      -138   * Delete HBase snapshot for backup.
      -139   * @param backupInfo backup info
      -140   * @throws Exception exception
      -141   */
      -142  protected static void 
      deleteSnapshots(final Connection conn, BackupInfo backupInfo, Configuration 
      conf)
      -143  throws IOException {
      -144LOG.debug("Trying to delete snapshot 
      for full backup.");
      -145for (String snapshotName : 
      backupInfo.getSnapshotNames()) {
      -146  if (snapshotName == null) {
      -147continue;
      -148  }
      -149  LOG.debug("Trying to delete 
      snapshot: " + snapshotName);
      -150
      -151  try (Admin admin = conn.getAdmin()) 
      {
      -152
      admin.deleteSnapshot(snapshotName);
      -153  }
      -154  LOG.debug("Deleting the snapshot " 
      + snapshotName + " for backup " + backupInfo.getBackupId()
      -155  + " succeeded.");
      -156}
      -157  }
      -158
      -159  /**
      -160   * Clean up directories with prefix 
      "exportSnapshot-", which are generated when exporting
      -161   * snapshots.
      -162   * @throws IOException exception
      -163   */
      -164  protected static void 
      cleanupExportSnapshotLog(Configuration conf) throws IOException {
      -165FileSystem fs = 
      FSUtils.getCurrentFileSystem(conf);
      -166Path stagingDir =
      

      [08/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/96e5e102/testdevapidocs/org/apache/hadoop/hbase/rest/model/TestVersionModel.html
      --
      diff --git 
      a/testdevapidocs/org/apache/hadoop/hbase/rest/model/TestVersionModel.html 
      b/testdevapidocs/org/apache/hadoop/hbase/rest/model/TestVersionModel.html
      index c64e82d..a60f9ae 100644
      --- a/testdevapidocs/org/apache/hadoop/hbase/rest/model/TestVersionModel.html
      +++ b/testdevapidocs/org/apache/hadoop/hbase/rest/model/TestVersionModel.html
      @@ -100,12 +100,6 @@ var activeTableTab = "activeTableTab";
       http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
       title="class or interface in java.lang">java.lang.Object
       
       
      -junit.framework.Assert
      -
      -
      -junit.framework.TestCase
      -
      -
       org.apache.hadoop.hbase.rest.model.TestModelBaseorg.apache.hadoop.hbase.rest.model.VersionModel
       
       
      @@ -115,20 +109,12 @@ var activeTableTab = "activeTableTab";
       
       
       
      -
      -
      -
      -
       
       
       
      -
      -All Implemented Interfaces:
      -junit.framework.Test
      -
       
       
      -public class TestVersionModel
      +public class TestVersionModel
       extends TestModelBaseorg.apache.hadoop.hbase.rest.model.VersionModel
       
       
      @@ -224,18 +210,11 @@ extends fromJSON,
       fromPB,
       fromXML,
       testBuildModel,
       testFromJSON,
       testFromPB,
       testFromXML,
       testToJSON,
       testToXML, toJSON,
       toPB,
       toXML
       
       
      -
      -
      -
      -Methods inherited from classjunit.framework.TestCase
      -assertEquals, assertEquals, assertEquals, assertEquals, assertEquals, 
      assertEquals, assertEquals, assertEquals, assertEquals, assertEquals, 
      assertEquals, assertEquals, assertEquals, assertEquals, assertEquals, 
      assertEquals, assertEquals, assertEquals, assertEquals, assertEquals, 
      assertFalse, assertFalse, assertNotNull, assertNotNull, assertNotSame, 
      assertNotSame, assertNull, assertNull, assertSame, assertSame, assertTrue, 
      assertTrue, countTestCases, createResult, fail, fail, failNotEquals, 
      failNotSame, failSame, format, getName, run, run, runBare, runTest, setName, 
      setUp, tearDown, toString
      -
      -
       
       
       
       Methods inherited from classjava.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
       title="class or interface in java.lang">Object
      -http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--;
       title="class or interface in java.lang">clone, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-;
       title="class or interface in java.lang">equals, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--;
       title="class or interface in java.lang">finalize, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--;
       title="class or interface in java.lang">getClass, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--;
       title="class or interface in java.lang">hashCode, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notify--;
       title="class or interface in java.lang">notify, http://docs.oracle.com/javase/8/docs/api/java/lang
       /Object.html?is-external=true#notifyAll--" title="class or interface in 
      java.lang">notifyAll, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait--;
       title="class or interface in java.lang">wait, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-;
       title="class or interface in java.lang">wait, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-int-;
       title="class or interface in java.lang">wait
      +http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--;
       title="class or interface in java.lang">clone, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-;
       title="class or interface in java.lang">equals, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--;
       title="class or interface in java.lang">finalize, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--;
       title="class or interface in java.lang">getClass, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--;
       title="class or interface in java.lang">hashCode, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notify--;
       title="class or interface in java.lang">notify, http://docs.oracle.com/javase/8/docs/api/java/lang
       /Object.html?is-external=true#notifyAll--" title="class or interface in 
      java.lang">notifyAll, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#toString--;
       title="class or interface in java.lang">toString, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait--;
       title="class or interface in java.lang">wait, 

      [08/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.html
      --
      diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.html 
      b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.html
      index 5e420c2..7639257 100644
      --- a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.html
      +++ b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.html
      @@ -18,7 +18,7 @@
       catch(err) {
       }
       //-->
      -var methods = 
      {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":9,"i43":9,"i44":9,"i45":9,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":9,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":9,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":9,"i106":10,"i107":10,"i108":10,"i109":10
       
      ,"i110":10,"i111":10,"i112":41,"i113":41,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10,"i135":10,"i136":10,"i137":10,"i138":9,"i139":10,"i140":10,"i141":10,"i142":10,"i143":10,"i144":10,"i145":42,"i146":10,"i147":10,"i148":10,"i149":10,"i150":10,"i151":10,"i152":10,"i153":10,"i154":10,"i155":10,"i156":10,"i157":10,"i158":10,"i159":10,"i160":10,"i161":10,"i162":10,"i163":10,"i164":10,"i165":10,"i166":10,"i167":9,"i168":10,"i169":10,"i170":10,"i171":10,"i172":10,"i173":10,"i174":10,"i175":10,"i176":9,"i177":10,"i178":10,"i179":9,"i180":9,"i181":9,"i182":9,"i183":9,"i184":9,"i185":9,"i186":9,"i187":9,"i188":10,"i189":10,"i190":10,"i191":10,"i192":10,"i193":10,"i194":10,"i195":10,"i196":10,"i197":9,"i198":10,"i199":10,"i200":10,"i201":10,"i202":10,"i203":10,"i204":10,"i205":10,"i206":10,"i207":10,"i208":10,"i209":10,"i210":10
       
      ,"i211":10,"i212":10,"i213":10,"i214":10,"i215":10,"i216":10,"i217":10,"i218":10,"i219":10,"i220":10,"i221":10,"i222":10,"i223":10,"i224":10,"i225":10,"i226":10,"i227":10,"i228":9,"i229":9,"i230":10,"i231":10,"i232":10,"i233":10,"i234":10,"i235":10,"i236":10,"i237":10,"i238":10,"i239":10,"i240":10,"i241":9,"i242":10,"i243":10,"i244":10,"i245":10,"i246":10,"i247":10,"i248":10,"i249":10,"i250":10,"i251":10,"i252":10,"i253":10,"i254":10,"i255":9,"i256":10,"i257":10,"i258":10,"i259":10};
      +var methods = 
      {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":9,"i43":9,"i44":9,"i45":9,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":9,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":9,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":9,"i106":10,"i107":10,"i108":10,"i109":10
       
      

      [08/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/14db89d7/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.HangThenRSRestartExecutor.html
      --
      diff --git 
      a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.HangThenRSRestartExecutor.html
       
      b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.HangThenRSRestartExecutor.html
      index f1db5ca..d8515d7 100644
      --- 
      a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.HangThenRSRestartExecutor.html
      +++ 
      b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.HangThenRSRestartExecutor.html
      @@ -32,813 +32,820 @@
       024import static org.junit.Assert.fail;
       025
       026import java.io.IOException;
      -027import java.net.SocketTimeoutException;
      -028import java.util.NavigableMap;
      -029import java.util.Random;
      -030import java.util.Set;
      -031import java.util.SortedSet;
      -032import 
      java.util.concurrent.ConcurrentSkipListMap;
      -033import 
      java.util.concurrent.ConcurrentSkipListSet;
      -034import 
      java.util.concurrent.ExecutionException;
      -035import java.util.concurrent.Executors;
      -036import java.util.concurrent.Future;
      -037import 
      java.util.concurrent.ScheduledExecutorService;
      -038import java.util.concurrent.TimeUnit;
      -039
      -040import 
      org.apache.hadoop.conf.Configuration;
      -041import 
      org.apache.hadoop.hbase.CategoryBasedTimeout;
      -042import 
      org.apache.hadoop.hbase.DoNotRetryIOException;
      -043import 
      org.apache.hadoop.hbase.HBaseTestingUtility;
      -044import 
      org.apache.hadoop.hbase.NotServingRegionException;
      -045import 
      org.apache.hadoop.hbase.ServerName;
      -046import 
      org.apache.hadoop.hbase.TableName;
      -047import 
      org.apache.hadoop.hbase.client.RegionInfo;
      -048import 
      org.apache.hadoop.hbase.client.RegionInfoBuilder;
      -049import 
      org.apache.hadoop.hbase.client.RetriesExhaustedException;
      -050import 
      org.apache.hadoop.hbase.exceptions.UnexpectedStateException;
      -051import 
      org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
      -052import 
      org.apache.hadoop.hbase.master.MasterServices;
      -053import 
      org.apache.hadoop.hbase.master.RegionState.State;
      -054import 
      org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants;
      -055import 
      org.apache.hadoop.hbase.master.procedure.ProcedureSyncWait;
      -056import 
      org.apache.hadoop.hbase.master.procedure.RSProcedureDispatcher;
      -057import 
      org.apache.hadoop.hbase.procedure2.Procedure;
      -058import 
      org.apache.hadoop.hbase.procedure2.ProcedureMetrics;
      -059import 
      org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
      -060import 
      org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore;
      -061import 
      org.apache.hadoop.hbase.procedure2.util.StringUtils;
      -062import 
      org.apache.hadoop.hbase.regionserver.RegionServerAbortedException;
      -063import 
      org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
      -064import 
      org.apache.hadoop.hbase.testclassification.MasterTests;
      -065import 
      org.apache.hadoop.hbase.testclassification.MediumTests;
      -066import 
      org.apache.hadoop.hbase.util.Bytes;
      -067import 
      org.apache.hadoop.hbase.util.FSUtils;
      -068import 
      org.apache.hadoop.ipc.RemoteException;
      -069import org.junit.After;
      -070import org.junit.Before;
      -071import org.junit.Ignore;
      -072import org.junit.Rule;
      -073import org.junit.Test;
      -074import 
      org.junit.experimental.categories.Category;
      -075import 
      org.junit.rules.ExpectedException;
      -076import org.junit.rules.TestName;
      -077import org.junit.rules.TestRule;
      -078import org.slf4j.Logger;
      -079import org.slf4j.LoggerFactory;
      -080import 
      org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
      -081import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
      -082import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
      -083import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest;
      -084import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse;
      -085import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest;
      -086import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo;
      -087import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse;
      -088import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse.RegionOpeningState;
      -089import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition;
      -090import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
      -091import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest;
      -092
      -093@Category({MasterTests.class, 
      MediumTests.class})
      -094public class TestAssignmentManager {
      -095  private static final Logger LOG = 
      

      [08/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/testdevapidocs/src-html/org/apache/hadoop/hbase/TestCellUtil.TestCellScannable.html
      --
      diff --git 
      a/testdevapidocs/src-html/org/apache/hadoop/hbase/TestCellUtil.TestCellScannable.html
       
      b/testdevapidocs/src-html/org/apache/hadoop/hbase/TestCellUtil.TestCellScannable.html
      index 232ef56..bc3a6d0 100644
      --- 
      a/testdevapidocs/src-html/org/apache/hadoop/hbase/TestCellUtil.TestCellScannable.html
      +++ 
      b/testdevapidocs/src-html/org/apache/hadoop/hbase/TestCellUtil.TestCellScannable.html
      @@ -29,610 +29,626 @@
       021import static 
      org.junit.Assert.assertEquals;
       022import static 
      org.junit.Assert.assertFalse;
       023import static 
      org.junit.Assert.assertTrue;
      -024
      -025import java.io.ByteArrayOutputStream;
      -026import java.io.IOException;
      -027import java.math.BigDecimal;
      -028import java.nio.ByteBuffer;
      -029import java.util.ArrayList;
      -030import java.util.List;
      -031import java.util.NavigableMap;
      -032import java.util.TreeMap;
      -033import 
      org.apache.hadoop.hbase.testclassification.MiscTests;
      -034import 
      org.apache.hadoop.hbase.testclassification.SmallTests;
      -035import 
      org.apache.hadoop.hbase.util.Bytes;
      -036import org.junit.Assert;
      -037import org.junit.Test;
      -038import 
      org.junit.experimental.categories.Category;
      -039
      -040@Category({MiscTests.class, 
      SmallTests.class})
      -041public class TestCellUtil {
      -042  /**
      -043   * CellScannable used in test. Returns 
      a {@link TestCellScanner}
      -044   */
      -045  private static class TestCellScannable 
      implements CellScannable {
      -046private final int cellsCount;
      -047TestCellScannable(final int 
      cellsCount) {
      -048  this.cellsCount = cellsCount;
      -049}
      -050@Override
      -051public CellScanner cellScanner() {
      -052  return new 
      TestCellScanner(this.cellsCount);
      -053}
      -054  }
      -055
      -056  /**
      -057   * CellScanner used in test.
      -058   */
      -059  private static class TestCellScanner 
      implements CellScanner {
      -060private int count = 0;
      -061private Cell current = null;
      -062private final int cellsCount;
      -063
      -064TestCellScanner(final int cellsCount) 
      {
      -065  this.cellsCount = cellsCount;
      -066}
      -067
      -068@Override
      -069public Cell current() {
      -070  return this.current;
      -071}
      -072
      -073@Override
      -074public boolean advance() throws 
      IOException {
      -075  if (this.count  cellsCount) {
      -076this.current = new 
      TestCell(this.count);
      -077this.count++;
      -078return true;
      -079  }
      -080  return false;
      -081}
      -082  }
      -083
      -084  /**
      -085   * Cell used in test. Has row only.
      -086   */
      -087  private static class TestCell 
      implements Cell {
      -088private final byte [] row;
      -089
      -090TestCell(final int i) {
      -091  this.row = Bytes.toBytes(i);
      -092}
      -093
      -094@Override
      -095public byte[] getRowArray() {
      -096  return this.row;
      -097}
      -098
      -099@Override
      -100public int getRowOffset() {
      -101  return 0;
      -102}
      -103
      -104@Override
      -105public short getRowLength() {
      -106  return (short)this.row.length;
      -107}
      -108
      -109@Override
      -110public byte[] getFamilyArray() {
      -111  // TODO Auto-generated method 
      stub
      -112  return null;
      -113}
      -114
      -115@Override
      -116public int getFamilyOffset() {
      -117  // TODO Auto-generated method 
      stub
      -118  return 0;
      -119}
      -120
      -121@Override
      -122public byte getFamilyLength() {
      -123  // TODO Auto-generated method 
      stub
      -124  return 0;
      -125}
      -126
      -127@Override
      -128public byte[] getQualifierArray() {
      -129  // TODO Auto-generated method 
      stub
      -130  return null;
      -131}
      -132
      -133@Override
      -134public int getQualifierOffset() {
      -135  // TODO Auto-generated method 
      stub
      -136  return 0;
      -137}
      -138
      -139@Override
      -140public int getQualifierLength() {
      -141  // TODO Auto-generated method 
      stub
      -142  return 0;
      -143}
      -144
      -145@Override
      -146public long getTimestamp() {
      -147  // TODO Auto-generated method 
      stub
      -148  return 0;
      -149}
      -150
      -151@Override
      -152public byte getTypeByte() {
      -153  // TODO Auto-generated method 
      stub
      -154  return 0;
      -155}
      -156
      -157@Override
      -158public byte[] getValueArray() {
      -159  // TODO Auto-generated method 
      stub
      -160  return null;
      -161}
      -162
      -163@Override
      -164public int getValueOffset() {
      -165  // TODO Auto-generated method 
      stub
      -166  return 0;
      -167}
      -168
      -169@Override
      -170public int getValueLength() {
      -171  // TODO Auto-generated method 
      stub
      -172  return 0;
      -173}
      -174
      -175@Override
      -176public byte[] getTagsArray() {
      -177  // TODO Auto-generated method 
      stub
      -178  return null;
      -179}
      -180
      -181@Override
      -182public int getTagsOffset() {
      -183  // TODO Auto-generated method 
      stub
      -184  return 0;
      -185}
      -186
      -187@Override
      -188public long 

      [08/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/testdevapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/TestUserScanQueryMatcher.AlwaysIncludeFilter.html
      --
      diff --git 
      a/testdevapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/TestUserScanQueryMatcher.AlwaysIncludeFilter.html
       
      b/testdevapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/TestUserScanQueryMatcher.AlwaysIncludeFilter.html
      new file mode 100644
      index 000..b2f85ca
      --- /dev/null
      +++ 
      b/testdevapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/TestUserScanQueryMatcher.AlwaysIncludeFilter.html
      @@ -0,0 +1,339 @@
      +http://www.w3.org/TR/html4/loose.dtd;>
      +
      +
      +
      +
      +
      +TestUserScanQueryMatcher.AlwaysIncludeFilter (Apache HBase 
      3.0.0-SNAPSHOT Test API)
      +
      +
      +
      +
      +
      +var methods = {"i0":10};
      +var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
      Methods"],8:["t4","Concrete Methods"]};
      +var altColor = "altColor";
      +var rowColor = "rowColor";
      +var tableTab = "tableTab";
      +var activeTableTab = "activeTableTab";
      +
      +
      +JavaScript is disabled on your browser.
      +
      +
      +
      +
      +
      +Skip navigation links
      +
      +
      +
      +
      +Overview
      +Package
      +Class
      +Use
      +Tree
      +Deprecated
      +Index
      +Help
      +
      +
      +
      +
      +PrevClass
      +NextClass
      +
      +
      +Frames
      +NoFrames
      +
      +
      +AllClasses
      +
      +
      +
      +
      +
      +
      +
      +Summary:
      +Nested|
      +Field|
      +Constr|
      +Method
      +
      +
      +Detail:
      +Field|
      +Constr|
      +Method
      +
      +
      +
      +
      +
      +
      +
      +
      +org.apache.hadoop.hbase.regionserver.querymatcher
      +Class TestUserScanQueryMatcher.AlwaysIncludeFilter
      +
      +
      +
      +http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
       title="class or interface in java.lang">java.lang.Object
      +
      +
      +org.apache.hadoop.hbase.filter.Filter
      +
      +
      +org.apache.hadoop.hbase.filter.FilterBase
      +
      +
      +org.apache.hadoop.hbase.regionserver.querymatcher.TestUserScanQueryMatcher.AlwaysIncludeFilter
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +Enclosing class:
      +TestUserScanQueryMatcher
      +
      +
      +
      +private class TestUserScanQueryMatcher.AlwaysIncludeFilter
      +extends org.apache.hadoop.hbase.filter.FilterBase
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +Nested Class Summary
      +
      +
      +
      +
      +Nested classes/interfaces inherited from 
      classorg.apache.hadoop.hbase.filter.Filter
      +org.apache.hadoop.hbase.filter.Filter.ReturnCode
      +
      +
      +
      +
      +
      +
      +
      +
      +Field Summary
      +
      +
      +
      +
      +Fields inherited from classorg.apache.hadoop.hbase.filter.Filter
      +reversed
      +
      +
      +
      +
      +
      +
      +
      +
      +Constructor Summary
      +
      +Constructors
      +
      +Modifier
      +Constructor and Description
      +
      +
      +private 
      +AlwaysIncludeFilter()
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +Method Summary
      +
      +All MethodsInstance MethodsConcrete Methods
      +
      +Modifier and Type
      +Method and Description
      +
      +
      +org.apache.hadoop.hbase.filter.Filter.ReturnCode
      +filterKeyValue(org.apache.hadoop.hbase.Cellc)
      +
      +
      +
      +
      +
      +
      +Methods inherited from 
      classorg.apache.hadoop.hbase.filter.FilterBase
      +createFilterFromArguments, filterAllRemaining, filterRow, 
      filterRowCells, filterRowKey, filterRowKey, getNextCellHint, hasFilterRow, 
      isFamilyEssential, reset, toByteArray, toString, transformCell
      +
      +
      +
      +
      +
      +Methods inherited from 
      classorg.apache.hadoop.hbase.filter.Filter
      +filterCell, isReversed, parseFrom, setReversed
      +
      +
      +
      +
      +
      +Methods inherited from classjava.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
       title="class or interface in java.lang">Object
      +http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--;
       title="class or interface in java.lang">clone, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-;
       title="class or interface in java.lang">equals, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--;
       title="class or interface in java.lang">finalize, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--;
       title="class or interface in java.lang">getClass, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--;
       title="class or interface in java.lang">hashCode, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notify--;
       title="class or interface in java.lang">notify, http://docs.oracle.com/javase/8/docs/api/java/lang
       /Object.html?is-external=true#notifyAll--" title="class or interface in 
      java.lang">notifyAll, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait--;
       title="class or interface in java.lang">wait, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-;
       

      [08/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/49431b18/devapidocs/org/apache/hadoop/hbase/master/cleaner/TimeToLiveHFileCleaner.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/master/cleaner/TimeToLiveHFileCleaner.html 
      b/devapidocs/org/apache/hadoop/hbase/master/cleaner/TimeToLiveHFileCleaner.html
      index 4d445d2..57647d4 100644
      --- 
      a/devapidocs/org/apache/hadoop/hbase/master/cleaner/TimeToLiveHFileCleaner.html
      +++ 
      b/devapidocs/org/apache/hadoop/hbase/master/cleaner/TimeToLiveHFileCleaner.html
      @@ -49,7 +49,7 @@ var activeTableTab = "activeTableTab";
       
       
       
      -PrevClass
      +PrevClass
       NextClass
       
       
      @@ -397,7 +397,7 @@ extends 
       
      -PrevClass
      +PrevClass
       NextClass
       
       
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/49431b18/devapidocs/org/apache/hadoop/hbase/master/cleaner/class-use/ReplicationZKNodeCleaner.ReplicationQueueDeletor.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/master/cleaner/class-use/ReplicationZKNodeCleaner.ReplicationQueueDeletor.html
       
      b/devapidocs/org/apache/hadoop/hbase/master/cleaner/class-use/ReplicationZKNodeCleaner.ReplicationQueueDeletor.html
      deleted file mode 100644
      index 670cadd..000
      --- 
      a/devapidocs/org/apache/hadoop/hbase/master/cleaner/class-use/ReplicationZKNodeCleaner.ReplicationQueueDeletor.html
      +++ /dev/null
      @@ -1,165 +0,0 @@
      -http://www.w3.org/TR/html4/loose.dtd;>
      -
      -
      -
      -
      -
      -Uses of Class 
      org.apache.hadoop.hbase.master.cleaner.ReplicationZKNodeCleaner.ReplicationQueueDeletor
       (Apache HBase 3.0.0-SNAPSHOT API)
      -
      -
      -
      -
      -
      -
      -
      -JavaScript is disabled on your browser.
      -
      -
      -
      -
      -
      -Skip navigation links
      -
      -
      -
      -
      -Overview
      -Package
      -Class
      -Use
      -Tree
      -Deprecated
      -Index
      -Help
      -
      -
      -
      -
      -Prev
      -Next
      -
      -
      -Frames
      -NoFrames
      -
      -
      -AllClasses
      -
      -
      -
      -
      -
      -
      -
      -
      -
      -
      -Uses of 
      Classorg.apache.hadoop.hbase.master.cleaner.ReplicationZKNodeCleaner.ReplicationQueueDeletor
      -
      -
      -
      -
      -
      -Packages that use ReplicationZKNodeCleaner.ReplicationQueueDeletor
      -
      -Package
      -Description
      -
      -
      -
      -org.apache.hadoop.hbase.master.cleaner
      -
      -
      -
      -
      -
      -
      -
      -
      -
      -
      -Uses of ReplicationZKNodeCleaner.ReplicationQueueDeletor
       in org.apache.hadoop.hbase.master.cleaner
      -
      -Fields in org.apache.hadoop.hbase.master.cleaner
       declared as ReplicationZKNodeCleaner.ReplicationQueueDeletor
      -
      -Modifier and Type
      -Field and Description
      -
      -
      -
      -private ReplicationZKNodeCleaner.ReplicationQueueDeletor
      -ReplicationZKNodeCleaner.queueDeletor
      -
      -
      -
      -
      -
      -
      -
      -
      -
      -
      -
      -
      -Skip navigation links
      -
      -
      -
      -
      -Overview
      -Package
      -Class
      -Use
      -Tree
      -Deprecated
      -Index
      -Help
      -
      -
      -
      -
      -Prev
      -Next
      -
      -
      -Frames
      -NoFrames
      -
      -
      -AllClasses
      -
      -
      -
      -
      -
      -
      -
      -
      -
      -Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
      reserved.
      -
      -
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/49431b18/devapidocs/org/apache/hadoop/hbase/master/cleaner/class-use/ReplicationZKNodeCleaner.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/master/cleaner/class-use/ReplicationZKNodeCleaner.html
       
      b/devapidocs/org/apache/hadoop/hbase/master/cleaner/class-use/ReplicationZKNodeCleaner.html
      deleted file mode 100644
      index f10364b..000
      --- 
      a/devapidocs/org/apache/hadoop/hbase/master/cleaner/class-use/ReplicationZKNodeCleaner.html
      +++ /dev/null
      @@ -1,200 +0,0 @@
      -http://www.w3.org/TR/html4/loose.dtd;>
      -
      -
      -
      -
      -
      -Uses of Class 
      org.apache.hadoop.hbase.master.cleaner.ReplicationZKNodeCleaner (Apache HBase 
      3.0.0-SNAPSHOT API)
      -
      -
      -
      -
      -
      -
      -
      -JavaScript is disabled on your browser.
      -
      -
      -
      -
      -
      -Skip navigation links
      -
      -
      -
      -
      -Overview
      -Package
      -Class
      -Use
      -Tree
      -Deprecated
      -Index
      -Help
      -
      -
      -
      -
      -Prev
      -Next
      -
      -
      -Frames
      -NoFrames
      -
      -
      -AllClasses
      -
      -
      -
      -var methods = {"i0":9,"i1":9};
      -var tabs = {65535:["t0","All Methods"],1:["t1","Static 
      Methods"],8:["t4","Concrete Methods"]};
      -var altColor = "altColor";
      -var rowColor = "rowColor";
      -var tableTab = "tableTab";
      -var activeTableTab = "activeTableTab";
      -
      -
      -JavaScript is disabled on your browser.
      -
      -
      -
      -
      -
      -Skip navigation links
      -
      -
      -
      -
      -Overview
      -Package
      -Class
      -Use
      -Tree
      -Deprecated
      -Index
      -Help
      -
      -
      -
      -
      -PrevClass
      -NextClass
      -
      -
      -Frames
      -NoFrames
      -
      -
      -AllClasses
      -
      -
      -
      -
      -
      -
      -
      -Summary:
      -Nested|
      -Enum Constants|
      -Field|
      -Method
      -
      -
      -Detail:
      -Enum Constants|
      -Field|
      -Method
      -
      -
      -
      -
      -
      -
      -
      -
      -org.apache.hadoop.hbase
      -Enum 
      ClusterStatus.Option
      -
      -
      -
      -http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
       title="class or interface in java.lang">java.lang.Object
      -
      -
      -http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
       title="class or interface in java.lang">java.lang.EnumClusterStatus.Option
      -
      -
      -org.apache.hadoop.hbase.ClusterStatus.Option
      -
      -
      -
      -
      -
      -
      -
      -
      -
      -All Implemented Interfaces:
      -http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
       title="class or interface in java.io">Serializable, http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
       title="class or interface in java.lang">ComparableClusterStatus.Option
      -
      -
      -Enclosing class:
      -ClusterStatus
      -
      -
      -
      -public static enum ClusterStatus.Option
      -extends http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
       title="class or interface in java.lang">EnumClusterStatus.Option
      -Kinds of ClusterStatus
      -
      -
      -
      -
      -
      -
      -
      -
      -
      -
      -
      -Enum Constant Summary
      -
      -Enum Constants
      -
      -Enum Constant and Description
      -
      -
      -BACKUP_MASTERS
      -status about master
      -
      -
      -
      -BALANCER_ON
      -status about cluster id
      -
      -
      -
      -CLUSTER_ID
      -status about hbase version
      -
      -
      -
      -DEAD_SERVERS
      -status about live region servers
      -
      -
      -
      -HBASE_VERSION
      -
      -
      -LIVE_SERVERS
      -status about balancer is on or not
      -
      -
      -
      -MASTER
      -status about dead region servers
      -
      -
      -
      -MASTER_COPROCESSORS
      -status about backup masters
      -
      -
      -
      -MASTER_INFO_PORT
      -status about regions in transition
      -
      -
      -
      -REGIONS_IN_TRANSITION
      -status about master coprocessors
      -
      -
      -
      -
      -
      -
      -
      -
      -
      -
      -Method Summary
      -
      -All MethodsStatic MethodsConcrete Methods
      -
      -Modifier and Type
      -Method and Description
      -
      -
      -static ClusterStatus.Option
      -valueOf(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">Stringname)
      -Returns the enum constant of this type with the specified 
      name.
      -
      -
      -
      -static ClusterStatus.Option[]
      -values()
      -Returns an array containing the constants of this enum 
      type, in
      -the order they are declared.
      -
      -
      -
      -
      -
      -
      -
      -Methods inherited from classjava.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
       title="class or interface in java.lang">Enum
      -http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true#clone--;
       title="class or interface in java.lang">clone, http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true#compareTo-E-;
       title="class or interface in java.lang">compareTo, http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true#equals-java.lang.Object-;
       title="class or interface in java.lang">equals, http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true#finalize--;
       title="class or interface in java.lang">finalize, http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true#getDeclaringClass--;
       title="class or interface in java.lang">getDeclaringClass, http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true#hashCode--;
       title="class or interface in java.lang">hashCode, http://docs.oracle.com/javase/8/docs/
       api/java/lang/Enum.html?is-external=true#name--" title="class or interface in 
      java.lang">name, 

      [08/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/505bbb2e/devapidocs/org/apache/hadoop/hbase/backup/impl/BackupManifest.html
      --
      diff --git a/devapidocs/org/apache/hadoop/hbase/backup/impl/BackupManifest.html 
      b/devapidocs/org/apache/hadoop/hbase/backup/impl/BackupManifest.html
      index 621b7a0..d5e6cae 100644
      --- a/devapidocs/org/apache/hadoop/hbase/backup/impl/BackupManifest.html
      +++ b/devapidocs/org/apache/hadoop/hbase/backup/impl/BackupManifest.html
      @@ -110,7 +110,7 @@ var activeTableTab = "activeTableTab";
       
       
       @InterfaceAudience.Private
      -public class BackupManifest
      +public class BackupManifest
       extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
       title="class or interface in java.lang">Object
       Backup manifest contains all the meta data of a backup 
      image. The manifest info will be bundled
        as manifest file together with data. So that each backup image will contain 
      all the info needed
      @@ -161,7 +161,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       backupImage
       
       
      -private static 
      org.apache.commons.logging.Log
      +private static org.slf4j.Logger
       LOG
       
       
      @@ -328,7 +328,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       LOG
      -private static finalorg.apache.commons.logging.Log LOG
      +private static finalorg.slf4j.Logger LOG
       
       
       
      @@ -337,7 +337,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       MANIFEST_FILE_NAME
      -public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">String MANIFEST_FILE_NAME
      +public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">String MANIFEST_FILE_NAME
       
       See Also:
       Constant
       Field Values
      @@ -350,7 +350,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       tableBackupDir
      -privatehttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">String tableBackupDir
      +privatehttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">String tableBackupDir
       
       
       
      @@ -359,7 +359,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       backupImage
      -privateBackupManifest.BackupImage backupImage
      +privateBackupManifest.BackupImage backupImage
       
       
       
      @@ -376,7 +376,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       BackupManifest
      -publicBackupManifest(BackupInfobackup)
      +publicBackupManifest(BackupInfobackup)
       Construct manifest for a ongoing backup.
       
       Parameters:
      @@ -390,7 +390,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       BackupManifest
      -publicBackupManifest(BackupInfobackup,
      +publicBackupManifest(BackupInfobackup,
         TableNametable)
       Construct a table level manifest for a backup of the named 
      table.
       
      @@ -405,7 +405,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       BackupManifest
      -publicBackupManifest(org.apache.hadoop.conf.Configurationconf,
      +publicBackupManifest(org.apache.hadoop.conf.Configurationconf,
         org.apache.hadoop.fs.PathbackupPath)
      throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
       title="class or interface in java.io">IOException
       Construct manifest from a backup directory.
      @@ -424,7 +424,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       BackupManifest
      -publicBackupManifest(org.apache.hadoop.fs.FileSystemfs,
      +publicBackupManifest(org.apache.hadoop.fs.FileSystemfs,
         org.apache.hadoop.fs.PathbackupPath)
      throws BackupException
       Construct manifest from a backup directory.
      @@ -451,7 +451,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       getType
      -publicBackupTypegetType()
      +publicBackupTypegetType()
       
       
       
      @@ -460,7 +460,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       getTableList
      -publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
       title="class or interface in java.util">ListTableNamegetTableList()
      +publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
       title="class or interface in java.util">ListTableNamegetTableList()
       Get the table set of this image.
       
       Returns:
      @@ -474,7 +474,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       store
      -publicvoidstore(org.apache.hadoop.conf.Configurationconf)
      +publicvoidstore(org.apache.hadoop.conf.Configurationconf)
      throws BackupException
       TODO: fix it. Persist the manifest file.
       
      @@ -490,7 +490,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       

      [08/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dad9a249/devapidocs/org/apache/hadoop/hbase/thrift/ThriftServerRunner.HBaseHandler.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/thrift/ThriftServerRunner.HBaseHandler.html
       
      b/devapidocs/org/apache/hadoop/hbase/thrift/ThriftServerRunner.HBaseHandler.html
      index ff18b00..c53e12e 100644
      --- 
      a/devapidocs/org/apache/hadoop/hbase/thrift/ThriftServerRunner.HBaseHandler.html
      +++ 
      b/devapidocs/org/apache/hadoop/hbase/thrift/ThriftServerRunner.HBaseHandler.html
      @@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
       
       
       
      -public static class ThriftServerRunner.HBaseHandler
      +public static class ThriftServerRunner.HBaseHandler
       extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
       title="class or interface in java.lang">Object
       implements org.apache.hadoop.hbase.thrift.generated.Hbase.Iface
       The HBaseHandler is a glue object that connects Thrift RPC 
      calls to the
      @@ -721,7 +721,7 @@ implements 
      org.apache.hadoop.hbase.thrift.generated.Hbase.Iface
       
       
       conf
      -protectedorg.apache.hadoop.conf.Configuration conf
      +protectedorg.apache.hadoop.conf.Configuration conf
       
       
       
      @@ -730,7 +730,7 @@ implements 
      org.apache.hadoop.hbase.thrift.generated.Hbase.Iface
       
       
       LOG
      -protected static finalorg.apache.commons.logging.Log LOG
      +protected static finalorg.apache.commons.logging.Log LOG
       
       
       
      @@ -739,7 +739,7 @@ implements 
      org.apache.hadoop.hbase.thrift.generated.Hbase.Iface
       
       
       nextScannerId
      -protectedint nextScannerId
      +protectedint nextScannerId
       
       
       
      @@ -748,7 +748,7 @@ implements 
      org.apache.hadoop.hbase.thrift.generated.Hbase.Iface
       
       
       scannerMap
      -protectedhttp://docs.oracle.com/javase/8/docs/api/java/util/HashMap.html?is-external=true;
       title="class or interface in java.util">HashMaphttp://docs.oracle.com/javase/8/docs/api/java/lang/Integer.html?is-external=true;
       title="class or interface in java.lang">Integer,ThriftServerRunner.ResultScannerWrapper 
      scannerMap
      +protectedhttp://docs.oracle.com/javase/8/docs/api/java/util/HashMap.html?is-external=true;
       title="class or interface in java.util">HashMaphttp://docs.oracle.com/javase/8/docs/api/java/lang/Integer.html?is-external=true;
       title="class or interface in java.lang">Integer,ThriftServerRunner.ResultScannerWrapper 
      scannerMap
       
       
       
      @@ -757,7 +757,7 @@ implements 
      org.apache.hadoop.hbase.thrift.generated.Hbase.Iface
       
       
       metrics
      -privateThriftMetrics metrics
      +privateThriftMetrics metrics
       
       
       
      @@ -766,7 +766,7 @@ implements 
      org.apache.hadoop.hbase.thrift.generated.Hbase.Iface
       
       
       connectionCache
      -private finalConnectionCache connectionCache
      +private finalConnectionCache connectionCache
       
       
       
      @@ -775,7 +775,7 @@ implements 
      org.apache.hadoop.hbase.thrift.generated.Hbase.Iface
       
       
       coalescer
      -IncrementCoalescer coalescer
      +IncrementCoalescer coalescer
       
       
       
      @@ -784,7 +784,7 @@ implements 
      org.apache.hadoop.hbase.thrift.generated.Hbase.Iface
       
       
       CLEANUP_INTERVAL
      -static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">String CLEANUP_INTERVAL
      +static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">String CLEANUP_INTERVAL
       
       See Also:
       Constant
       Field Values
      @@ -797,7 +797,7 @@ implements 
      org.apache.hadoop.hbase.thrift.generated.Hbase.Iface
       
       
       MAX_IDLETIME
      -static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">String MAX_IDLETIME
      +static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">String MAX_IDLETIME
       
       See Also:
       Constant
       Field Values
      @@ -818,7 +818,7 @@ implements 
      org.apache.hadoop.hbase.thrift.generated.Hbase.Iface
       
       
       HBaseHandler
      -protectedHBaseHandler(org.apache.hadoop.conf.Configurationc,
      +protectedHBaseHandler(org.apache.hadoop.conf.Configurationc,
      UserProvideruserProvider)
       throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
       title="class or interface in java.io">IOException
       
      @@ -841,7 +841,7 @@ implements 
      org.apache.hadoop.hbase.thrift.generated.Hbase.Iface
       
       
       getAllColumns
      -byte[][]getAllColumns(Tabletable)
      +byte[][]getAllColumns(Tabletable)
       throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
       title="class or interface in java.io">IOException
       Returns a list of all the column families for a given 
      Table.
       
      @@ -858,7 +858,7 @@ implements 
      org.apache.hadoop.hbase.thrift.generated.Hbase.Iface
       
       
       getTable
      -publicTablegetTable(byte[]tableName)
      +publicTablegetTable(byte[]tableName)
      throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
       title="class or interface in java.io">IOException
       

      [08/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c4c0cfa5/apidocs/src-html/org/apache/hadoop/hbase/util/Base64.Base64InputStream.html
      --
      diff --git 
      a/apidocs/src-html/org/apache/hadoop/hbase/util/Base64.Base64InputStream.html 
      b/apidocs/src-html/org/apache/hadoop/hbase/util/Base64.Base64InputStream.html
      index 6646ba1..70481ce 100644
      --- 
      a/apidocs/src-html/org/apache/hadoop/hbase/util/Base64.Base64InputStream.html
      +++ 
      b/apidocs/src-html/org/apache/hadoop/hbase/util/Base64.Base64InputStream.html
      @@ -43,1651 +43,1652 @@
       035import java.io.OutputStream;
       036import java.io.Serializable;
       037import 
      java.io.UnsupportedEncodingException;
      -038import java.util.zip.GZIPInputStream;
      -039import java.util.zip.GZIPOutputStream;
      -040
      -041import org.apache.commons.logging.Log;
      -042import 
      org.apache.commons.logging.LogFactory;
      -043import 
      org.apache.yetus.audience.InterfaceAudience;
      -044
      -045/**
      -046 * Encodes and decodes to and from Base64 
      notation.
      -047 *
      -048 * p
      -049 * Homepage: a 
      href="http://iharder.net/base64"http://iharder.net/base64/a;.
      -050 * /p
      -051 *
      -052 * p
      -053 * Change Log:
      -054 * /p
      -055 * ul
      -056 *   liv2.2.1 - Fixed bug using 
      URL_SAFE and ORDERED encodings. Fixed bug
      -057 * when using very small files 
      (~lt; 40 bytes)./li
      -058 *   liv2.2 - Added some helper 
      methods for encoding/decoding directly from
      -059 * one file to the next. Also added a 
      main() method to support command
      -060 * line encoding/decoding from one 
      file to the next. Also added these
      -061 * Base64 dialects:
      -062 * ol
      -063 *   liThe default is RFC3548 
      format./li
      -064 *   liUsing Base64.URLSAFE 
      generates URL and file name friendly format as
      -065 * described in Section 4 of 
      RFC3548.
      -066 * 
      http://www.faqs.org/rfcs/rfc3548.html/li;
      -067 *   liUsing Base64.ORDERED 
      generates URL and file name friendly format
      -068 * that preserves lexical 
      ordering as described in
      -069 * 
      http://www.faqs.org/qa/rfcc-1940.html/li;
      -070 * /ol
      -071 * p
      -072 * Special thanks to Jim Kellerman at 
      a href="http://www.powerset.com/";
      -073 * http://www.powerset.com//a; 
      for contributing the new Base64 dialects.
      -074 *   /li
      -075 *
      -076 *   liv2.1 - Cleaned up javadoc 
      comments and unused variables and methods.
      -077 * Added some convenience methods for 
      reading and writing to and from files.
      -078 *   /li
      -079 *   liv2.0.2 - Now specifies 
      UTF-8 encoding in places where the code fails on
      -080 * systems with other encodings (like 
      EBCDIC)./li
      -081 *   liv2.0.1 - Fixed an error 
      when decoding a single byte, that is, when the
      -082 * encoded data was a single 
      byte./li
      -083 *   liv2.0 - I got rid of 
      methods that used booleans to set options. Now
      -084 * everything is more consolidated 
      and cleaner. The code now detects when
      -085 * data that's being decoded is 
      gzip-compressed and will decompress it
      -086 * automatically. Generally things 
      are cleaner. You'll probably have to
      -087 * change some method calls that you 
      were making to support the new options
      -088 * format (ttint/tts 
      that you "OR" together)./li
      -089 *   liv1.5.1 - Fixed bug when 
      decompressing and decoding to a byte[] using
      -090 * ttdecode( String s, 
      boolean gzipCompressed )/tt. Added the ability to
      -091 * "suspend" encoding in the Output 
      Stream so you can turn on and off the
      -092 * encoding if you need to embed 
      base64 data in an otherwise "normal" stream
      -093 * (like an XML file)./li
      -094 *   liv1.5 - Output stream pases 
      on flush() command but doesn't do anything
      -095 * itself. This helps when using GZIP 
      streams. Added the ability to
      -096 * GZip-compress objects before 
      encoding them./li
      -097 *   liv1.4 - Added helper 
      methods to read/write files./li
      -098 *   liv1.3.6 - Fixed 
      OutputStream.flush() so that 'position' is reset./li
      -099 *   liv1.3.5 - Added flag to 
      turn on and off line breaks. Fixed bug in input
      -100 * stream where last buffer being 
      read, if not completely full, was not
      -101 * returned./li
      -102 *   liv1.3.4 - Fixed when 
      "improperly padded stream" error was thrown at the
      -103 * wrong time./li
      -104 *   liv1.3.3 - Fixed I/O streams 
      which were totally messed up./li
      -105 * /ul
      -106 *
      -107 * p
      -108 * I am placing this code in the Public 
      Domain. Do with it as you will. This
      -109 * software comes with no guarantees or 
      warranties but with plenty of
      -110 * well-wishing instead!
      -111 * p
      -112 * Please visit a 
      href="http://iharder.net/base64"http://iharder.net/base64/a;
      -113 * periodically to check for updates or 
      to contribute improvements.
      -114 * p
      -115 * author: Robert Harder, 
      r...@iharder.net
      -116 * br
      -117 * version: 2.2.1
      -118 */
      -119@InterfaceAudience.Public
      -120public class Base64 {
      -121
      -122  /*  P U B L I C   F I E L D S 
       */
      -123
      -124  /** No options specified. Value is 
      zero. */
      -125  public final static int NO_OPTIONS = 
      0;
      -126
      -127  /** 

      [08/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/071f974b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.html
       
      b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.html
      index a9c1142..12ade22 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.html
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.html
      @@ -388,196 +388,200 @@
       380
       381  @Override
       382  public DatanodeInfo[] getPipeline() {
      -383State state = this.state;
      -384return state == State.STREAMING || 
      state == State.CLOSING ? locations : new DatanodeInfo[0];
      -385  }
      -386
      -387  private void 
      flushBuffer(CompletableFutureLong future, ByteBuf dataBuf,
      -388  long nextPacketOffsetInBlock, 
      boolean syncBlock) {
      -389int dataLen = 
      dataBuf.readableBytes();
      -390int chunkLen = 
      summer.getBytesPerChecksum();
      -391int trailingPartialChunkLen = dataLen 
      % chunkLen;
      -392int numChecks = dataLen / chunkLen + 
      (trailingPartialChunkLen != 0 ? 1 : 0);
      -393int checksumLen = numChecks * 
      summer.getChecksumSize();
      -394ByteBuf checksumBuf = 
      alloc.directBuffer(checksumLen);
      -395
      summer.calculateChunkedSums(dataBuf.nioBuffer(), checksumBuf.nioBuffer(0, 
      checksumLen));
      -396
      checksumBuf.writerIndex(checksumLen);
      -397PacketHeader header = new 
      PacketHeader(4 + checksumLen + dataLen, nextPacketOffsetInBlock,
      -398nextPacketSeqno, false, dataLen, 
      syncBlock);
      -399int headerLen = 
      header.getSerializedSize();
      -400ByteBuf headerBuf = 
      alloc.buffer(headerLen);
      -401
      header.putInBuffer(headerBuf.nioBuffer(0, headerLen));
      -402headerBuf.writerIndex(headerLen);
      -403Callback c = new Callback(future, 
      nextPacketOffsetInBlock + dataLen, datanodeList);
      -404waitingAckQueue.addLast(c);
      -405// recheck again after we pushed the 
      callback to queue
      -406if (state != State.STREAMING 
       waitingAckQueue.peekFirst() == c) {
      -407  future.completeExceptionally(new 
      IOException("stream already broken"));
      -408  // it's the one we have just pushed 
      or just a no-op
      -409  waitingAckQueue.removeFirst();
      -410  return;
      -411}
      -412datanodeList.forEach(ch - {
      -413  
      ch.write(headerBuf.retainedDuplicate());
      -414  
      ch.write(checksumBuf.retainedDuplicate());
      -415  
      ch.writeAndFlush(dataBuf.retainedDuplicate());
      -416});
      -417checksumBuf.release();
      -418headerBuf.release();
      -419dataBuf.release();
      -420nextPacketSeqno++;
      -421  }
      -422
      -423  private void 
      flush0(CompletableFutureLong future, boolean syncBlock) {
      -424if (state != State.STREAMING) {
      -425  future.completeExceptionally(new 
      IOException("stream already broken"));
      -426  return;
      -427}
      -428int dataLen = buf.readableBytes();
      -429if (dataLen == 
      trailingPartialChunkLength) {
      -430  // no new data
      -431  long lengthAfterFlush = 
      nextPacketOffsetInBlock + dataLen;
      -432  Callback lastFlush = 
      waitingAckQueue.peekLast();
      -433  if (lastFlush != null) {
      -434Callback c = new Callback(future, 
      lengthAfterFlush, Collections.emptyList());
      -435waitingAckQueue.addLast(c);
      -436// recheck here if we have 
      already removed the previous callback from the queue
      -437if (waitingAckQueue.peekFirst() 
      == c) {
      -438  // all previous callbacks have 
      been removed
      -439  // notice that this does mean 
      we will always win here because the background thread may
      -440  // have already started to mark 
      the future here as completed in the completed or failed
      -441  // methods but haven't removed 
      it from the queue yet. That's also why the removeFirst
      -442  // call below may be a no-op.
      -443  if (state != State.STREAMING) 
      {
      -444
      future.completeExceptionally(new IOException("stream already broken"));
      -445  } else {
      -446
      future.complete(lengthAfterFlush);
      -447  }
      -448  // it's the one we have just 
      pushed or just a no-op
      -449  
      waitingAckQueue.removeFirst();
      -450}
      -451  } else {
      -452// we must have acked all the 
      data so the ackedBlockLength must be same with
      -453// lengthAfterFlush
      -454
      future.complete(lengthAfterFlush);
      -455  }
      -456  return;
      -457}
      -458
      -459if (encryptor != null) {
      -460  ByteBuf encryptBuf = 
      alloc.directBuffer(dataLen);
      -461  buf.readBytes(encryptBuf, 
      trailingPartialChunkLength);
      -462  int toEncryptLength = dataLen - 
      trailingPartialChunkLength;
      -463  try {
      -464
      encryptor.encrypt(buf.nioBuffer(trailingPartialChunkLength, toEncryptLength),
      -465  
      encryptBuf.nioBuffer(trailingPartialChunkLength, toEncryptLength));
      -466  } catch (IOException e) {
      -467

      [08/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dc4e5c85/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.AckHandler.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.AckHandler.html
       
      b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.AckHandler.html
      index dd54dd2..a9c1142 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.AckHandler.html
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.AckHandler.html
      @@ -388,195 +388,196 @@
       380
       381  @Override
       382  public DatanodeInfo[] getPipeline() {
      -383return locations;
      -384  }
      -385
      -386  private void 
      flushBuffer(CompletableFutureLong future, ByteBuf dataBuf,
      -387  long nextPacketOffsetInBlock, 
      boolean syncBlock) {
      -388int dataLen = 
      dataBuf.readableBytes();
      -389int chunkLen = 
      summer.getBytesPerChecksum();
      -390int trailingPartialChunkLen = dataLen 
      % chunkLen;
      -391int numChecks = dataLen / chunkLen + 
      (trailingPartialChunkLen != 0 ? 1 : 0);
      -392int checksumLen = numChecks * 
      summer.getChecksumSize();
      -393ByteBuf checksumBuf = 
      alloc.directBuffer(checksumLen);
      -394
      summer.calculateChunkedSums(dataBuf.nioBuffer(), checksumBuf.nioBuffer(0, 
      checksumLen));
      -395
      checksumBuf.writerIndex(checksumLen);
      -396PacketHeader header = new 
      PacketHeader(4 + checksumLen + dataLen, nextPacketOffsetInBlock,
      -397nextPacketSeqno, false, dataLen, 
      syncBlock);
      -398int headerLen = 
      header.getSerializedSize();
      -399ByteBuf headerBuf = 
      alloc.buffer(headerLen);
      -400
      header.putInBuffer(headerBuf.nioBuffer(0, headerLen));
      -401headerBuf.writerIndex(headerLen);
      -402Callback c = new Callback(future, 
      nextPacketOffsetInBlock + dataLen, datanodeList);
      -403waitingAckQueue.addLast(c);
      -404// recheck again after we pushed the 
      callback to queue
      -405if (state != State.STREAMING 
       waitingAckQueue.peekFirst() == c) {
      -406  future.completeExceptionally(new 
      IOException("stream already broken"));
      -407  // it's the one we have just pushed 
      or just a no-op
      -408  waitingAckQueue.removeFirst();
      -409  return;
      -410}
      -411datanodeList.forEach(ch - {
      -412  
      ch.write(headerBuf.retainedDuplicate());
      -413  
      ch.write(checksumBuf.retainedDuplicate());
      -414  
      ch.writeAndFlush(dataBuf.retainedDuplicate());
      -415});
      -416checksumBuf.release();
      -417headerBuf.release();
      -418dataBuf.release();
      -419nextPacketSeqno++;
      -420  }
      -421
      -422  private void 
      flush0(CompletableFutureLong future, boolean syncBlock) {
      -423if (state != State.STREAMING) {
      -424  future.completeExceptionally(new 
      IOException("stream already broken"));
      -425  return;
      -426}
      -427int dataLen = buf.readableBytes();
      -428if (dataLen == 
      trailingPartialChunkLength) {
      -429  // no new data
      -430  long lengthAfterFlush = 
      nextPacketOffsetInBlock + dataLen;
      -431  Callback lastFlush = 
      waitingAckQueue.peekLast();
      -432  if (lastFlush != null) {
      -433Callback c = new Callback(future, 
      lengthAfterFlush, Collections.emptyList());
      -434waitingAckQueue.addLast(c);
      -435// recheck here if we have 
      already removed the previous callback from the queue
      -436if (waitingAckQueue.peekFirst() 
      == c) {
      -437  // all previous callbacks have 
      been removed
      -438  // notice that this does mean 
      we will always win here because the background thread may
      -439  // have already started to mark 
      the future here as completed in the completed or failed
      -440  // methods but haven't removed 
      it from the queue yet. That's also why the removeFirst
      -441  // call below may be a no-op.
      -442  if (state != State.STREAMING) 
      {
      -443
      future.completeExceptionally(new IOException("stream already broken"));
      -444  } else {
      -445
      future.complete(lengthAfterFlush);
      -446  }
      -447  // it's the one we have just 
      pushed or just a no-op
      -448  
      waitingAckQueue.removeFirst();
      -449}
      -450  } else {
      -451// we must have acked all the 
      data so the ackedBlockLength must be same with
      -452// lengthAfterFlush
      -453
      future.complete(lengthAfterFlush);
      -454  }
      -455  return;
      -456}
      -457
      -458if (encryptor != null) {
      -459  ByteBuf encryptBuf = 
      alloc.directBuffer(dataLen);
      -460  buf.readBytes(encryptBuf, 
      trailingPartialChunkLength);
      -461  int toEncryptLength = dataLen - 
      trailingPartialChunkLength;
      -462  try {
      -463
      encryptor.encrypt(buf.nioBuffer(trailingPartialChunkLength, toEncryptLength),
      -464  
      encryptBuf.nioBuffer(trailingPartialChunkLength, toEncryptLength));
      -465  } catch (IOException e) {
      -466encryptBuf.release();
      -467
      

      [08/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4abd958d/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.WALItem.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.WALItem.html
       
      b/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.WALItem.html
      index ae9896e..cb2d47f 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.WALItem.html
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.WALItem.html
      @@ -41,25 +41,25 @@
       033import java.util.TreeMap;
       034import java.util.TreeSet;
       035
      -036import 
      org.apache.commons.lang3.StringUtils;
      -037import org.apache.commons.logging.Log;
      -038import 
      org.apache.commons.logging.LogFactory;
      -039import 
      org.apache.hadoop.conf.Configuration;
      -040import org.apache.hadoop.fs.Path;
      -041import org.apache.hadoop.hbase.Cell;
      -042import 
      org.apache.hadoop.hbase.CellUtil;
      -043import 
      org.apache.hadoop.hbase.HBaseConfiguration;
      -044import 
      org.apache.hadoop.hbase.HColumnDescriptor;
      -045import 
      org.apache.hadoop.hbase.HTableDescriptor;
      -046import 
      org.apache.hadoop.hbase.NamespaceDescriptor;
      -047import 
      org.apache.hadoop.hbase.ServerName;
      -048import 
      org.apache.hadoop.hbase.TableName;
      -049import 
      org.apache.hadoop.hbase.backup.BackupInfo;
      -050import 
      org.apache.hadoop.hbase.backup.BackupInfo.BackupState;
      -051import 
      org.apache.hadoop.hbase.backup.BackupRestoreConstants;
      -052import 
      org.apache.hadoop.hbase.backup.BackupType;
      -053import 
      org.apache.hadoop.hbase.backup.util.BackupUtils;
      -054import 
      org.apache.yetus.audience.InterfaceAudience;
      +036import 
      org.apache.commons.lang3.ArrayUtils;
      +037import 
      org.apache.commons.lang3.StringUtils;
      +038import org.apache.commons.logging.Log;
      +039import 
      org.apache.commons.logging.LogFactory;
      +040import 
      org.apache.hadoop.conf.Configuration;
      +041import org.apache.hadoop.fs.Path;
      +042import org.apache.hadoop.hbase.Cell;
      +043import 
      org.apache.hadoop.hbase.CellUtil;
      +044import 
      org.apache.hadoop.hbase.HBaseConfiguration;
      +045import 
      org.apache.hadoop.hbase.HColumnDescriptor;
      +046import 
      org.apache.hadoop.hbase.HTableDescriptor;
      +047import 
      org.apache.hadoop.hbase.NamespaceDescriptor;
      +048import 
      org.apache.hadoop.hbase.ServerName;
      +049import 
      org.apache.hadoop.hbase.TableName;
      +050import 
      org.apache.hadoop.hbase.backup.BackupInfo;
      +051import 
      org.apache.hadoop.hbase.backup.BackupInfo.BackupState;
      +052import 
      org.apache.hadoop.hbase.backup.BackupRestoreConstants;
      +053import 
      org.apache.hadoop.hbase.backup.BackupType;
      +054import 
      org.apache.hadoop.hbase.backup.util.BackupUtils;
       055import 
      org.apache.hadoop.hbase.client.Admin;
       056import 
      org.apache.hadoop.hbase.client.Connection;
       057import 
      org.apache.hadoop.hbase.client.Delete;
      @@ -70,12 +70,12 @@
       062import 
      org.apache.hadoop.hbase.client.Scan;
       063import 
      org.apache.hadoop.hbase.client.SnapshotDescription;
       064import 
      org.apache.hadoop.hbase.client.Table;
      -065import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos;
      -066import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
      -067import 
      org.apache.hadoop.hbase.util.ArrayUtils;
      -068import 
      org.apache.hadoop.hbase.util.Bytes;
      -069import 
      org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
      -070import 
      org.apache.hadoop.hbase.util.Pair;
      +065import 
      org.apache.hadoop.hbase.util.Bytes;
      +066import 
      org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
      +067import 
      org.apache.hadoop.hbase.util.Pair;
      +068import 
      org.apache.yetus.audience.InterfaceAudience;
      +069import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos;
      +070import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
       071
       072/**
       073 * This class provides API to access 
      backup system tablebr
      @@ -585,1396 +585,1397 @@
       577try (Table table = 
      connection.getTable(tableName)) {
       578  Put put = 
      createPutForStartBackupSession();
       579  // First try to put if row does not 
      exist
      -580  if 
      (!table.checkAndPut(ACTIVE_SESSION_ROW, SESSIONS_FAMILY, ACTIVE_SESSION_COL, 
      null, put)) {
      -581// Row exists, try to put if 
      value == ACTIVE_SESSION_NO
      -582if 
      (!table.checkAndPut(ACTIVE_SESSION_ROW, SESSIONS_FAMILY, ACTIVE_SESSION_COL,
      -583  ACTIVE_SESSION_NO, put)) {
      -584  throw new IOException("There is 
      an active backup exclusive operation");
      -585}
      -586  }
      -587}
      -588  }
      -589
      -590  private Put 
      createPutForStartBackupSession() {
      -591Put put = new 
      Put(ACTIVE_SESSION_ROW);
      -592put.addColumn(SESSIONS_FAMILY, 
      ACTIVE_SESSION_COL, ACTIVE_SESSION_YES);
      -593return put;
      -594  }
      -595
      -596  public void 
      finishBackupExclusiveOperation() throws IOException {
      -597LOG.debug("Finish backup exclusive 
      operation");
      -598
      -599try (Table table = 
      connection.getTable(tableName)) {
      -600  Put put = 
      createPutForStopBackupSession();
      -601  if 
      

      [08/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e23b49ba/devapidocs/org/apache/hadoop/hbase/coprocessor/ReadOnlyConfiguration.html
      --
      diff --git 
      a/devapidocs/org/apache/hadoop/hbase/coprocessor/ReadOnlyConfiguration.html 
      b/devapidocs/org/apache/hadoop/hbase/coprocessor/ReadOnlyConfiguration.html
      new file mode 100644
      index 000..11482df
      --- /dev/null
      +++ b/devapidocs/org/apache/hadoop/hbase/coprocessor/ReadOnlyConfiguration.html
      @@ -0,0 +1,1778 @@
      +http://www.w3.org/TR/html4/loose.dtd;>
      +
      +
      +
      +
      +
      +ReadOnlyConfiguration (Apache HBase 3.0.0-SNAPSHOT API)
      +
      +
      +
      +
      +
      +var methods = 
      {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10};
      +var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
      Methods"],8:["t4","Concrete Methods"]};
      +var altColor = "altColor";
      +var rowColor = "rowColor";
      +var tableTab = "tableTab";
      +var activeTableTab = "activeTableTab";
      +
      +
      +JavaScript is disabled on your browser.
      +
      +
      +
      +
      +
      +Skip navigation links
      +
      +
      +
      +
      +Overview
      +Package
      +Class
      +Use
      +Tree
      +Deprecated
      +Index
      +Help
      +
      +
      +
      +
      +PrevClass
      +NextClass
      +
      +
      +Frames
      +NoFrames
      +
      +
      +AllClasses
      +
      +
      +
      +
      +
      +
      +
      +Summary:
      +Nested|
      +Field|
      +Constr|
      +Method
      +
      +
      +Detail:
      +Field|
      +Constr|
      +Method
      +
      +
      +
      +
      +
      +
      +
      +
      +org.apache.hadoop.hbase.coprocessor
      +Class 
      ReadOnlyConfiguration
      +
      +
      +
      +http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
       title="class or interface in java.lang">java.lang.Object
      +
      +
      +org.apache.hadoop.conf.Configuration
      +
      +
      +org.apache.hadoop.hbase.coprocessor.ReadOnlyConfiguration
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +All Implemented Interfaces:
      +http://docs.oracle.com/javase/8/docs/api/java/lang/Iterable.html?is-external=true;
       title="class or interface in java.lang">Iterablehttp://docs.oracle.com/javase/8/docs/api/java/util/Map.Entry.html?is-external=true;
       title="class or interface in java.util">Map.Entryhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">String,http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">String, 
      org.apache.hadoop.io.Writable
      +
      +
      +
      +@InterfaceAudience.Private
      +class ReadOnlyConfiguration
      +extends org.apache.hadoop.conf.Configuration
      +Wraps a Configuration to make it read-only.
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +Nested Class Summary
      +
      +
      +
      +
      +Nested classes/interfaces inherited from 
      classorg.apache.hadoop.conf.Configuration
      +org.apache.hadoop.conf.Configuration.DeprecationDelta, 
      org.apache.hadoop.conf.Configuration.IntegerRanges
      +
      +
      +
      +
      +
      +
      +
      +
      +Field Summary
      +
      +Fields
      +
      +Modifier and Type
      +Field and Description
      +
      +
      +private 
      org.apache.hadoop.conf.Configuration
      +conf
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +Constructor Summary
      +
      +Constructors
      +
      +Constructor and Description
      +
      +
      +ReadOnlyConfiguration(org.apache.hadoop.conf.Configurationconf)
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +Method Summary
      +
      +All MethodsInstance MethodsConcrete Methods
      +
      +Modifier and Type
      +Method and Description
      +
      +
      +void
      +addResource(org.apache.hadoop.conf.Configurationconf)
      +
      +
      +void
      +addResource(http://docs.oracle.com/javase/8/docs/api/java/io/InputStream.html?is-external=true;
       title="class or interface in 
      java.io">InputStreamin)
      +
      +
      +void
      +addResource(http://docs.oracle.com/javase/8/docs/api/java/io/InputStream.html?is-external=true;
       title="class or interface in java.io">InputStreamin,
      +   http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">Stringname)
      +
      +
      +void
      +addResource(org.apache.hadoop.fs.Pathfile)
      +
      +
      +void
      +addResource(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
       title="class or interface in java.lang">Stringname)
      +
      +
      +void
      +addResource(http://docs.oracle.com/javase/8/docs/api/java/net/URL.html?is-external=true;
       title="class or interface in java.net">URLurl)
      +
      +
      +void
      +clear()
      +
      +
      

      [08/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d171b896/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-convergence.html
      --
      diff --git 
      a/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-convergence.html
       
      b/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-convergence.html
      index e273bdf..70187ae 100644
      --- 
      a/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-convergence.html
      +++ 
      b/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-convergence.html
      @@ -7,7 +7,7 @@
         
       
       
      -
      +
       
       Apache HBase - Exemplar for hbase-client archetype  Reactor 
      Dependency Convergence
       
      @@ -488,22 +488,22 @@
       3.4.10
       
       
      -org.apache.hbase:hbase-assembly:pom:3.0.0-SNAPSHOT+-org.apache.hbase:hbase-server:jar:3.0.0-SNAPSHOT:compile|+-org.apache.hbase:hbase-client:jar:3.0.0-SNAPSHOT:compile||\-(org.apache.zookeeper:zookeeper:jar:3.4.10:compile
       - version managed from 3.4.6; omitted for duplicate)|+-org.apache.zookeeper:zookeeper:jar:3.4.10:compile|+-org.apache.hadoop:hadoop-common:jar:2.7.4:compile||\-(org.apache.zookeeper:zookeeper:jar:3.4.10:compile
       - version managed from 3.4.6; omitted for duplicate)|+-org.apache.hadoop:hadoop-auth:jar:2.7.4:compile||\-(org.apache.zookeeper:zookeeper:jar:3.4.10:compile
       - version managed from 3.4.6; omitted for duplicate)|\-org.apache.hadoop:hadoop-client:jar:2.7.4:compile|\-org.apache.had
       oop:hadoop-mapreduce-client-app:jar:2.7.4:compile|\-org.apache.hadoop:hadoop-mapreduce-client-common:jar:2.7.4:compile|\-org.apache.hadoop:hadoop-yarn-server-common:jar:2.7.4:compile|\-(org.apache.zookeeper:zookeeper:jar:3.4.10:compile
       - version managed from 3.4.6; omitted for duplicate)+-org.apache.hbase:hbase-mapreduce:jar:3.0.0-SNAPSHOT:compile|\-(org.apache.zookeeper:zookeeper:jar:3.4.10:compile - 
      version managed from 3.4.6; omitted for duplicate)+-org.apache.hbase:hbase-mapreduce:test-jar:tests:3.0.0-SNAPSHOT:test|\-(org.apache.zookeeper:zookeeper:jar:3.4.10:test - 
      version managed from 3.4.6; omitted for duplicate)+-org.apache.hbase:hbase-testing-util:jar:3.0.0-SNAP
       SHOT:test|+-org.apache.hbase:hbase-server:test-jar:tests:3.0.0-SNAPSHOT:test||\-(org.apache.zookeeper:zookeeper:jar:3.4.10:test
       - version managed from 3.4.6; omitted for duplicate)|\-org.apache.hadoop:hadoop-minicluster:jar:2.7.4:test|+-org.apache.hadoop:hadoop-common:test-jar:tests:2.7.4:test||\-(org.apache.zookeeper:zookeeper:jar:3.4.10:test
       - version managed from 3.4.6; omitted for duplicate)|\-org.apache.hadoop:hadoop-yarn-server-tests:test-jar:tests:2.7.4:test|\-org.apache.hadoop:hadoop-yarn-server-resourcemanager:jar:2.7.4:test|\-(org.apache.zookeeper:zookeeper:jar:3.4.10:test
       - version managed from 3.4.6; omitted for dupli
       cate)+-org.apache.hbase:hbase-backup:jar:3.0.0-SNAPSHOT:compile|\-(org.apache.zookeeper:zookeeper:jar:3.4.10:compile - 
      version managed from 3.4.6; omitted for duplicate)+-org.apache.hbase:hbase-replication:jar:3.0.0-SNAPSHOT:compile|\-(org.apache.zookeeper:zookeeper:jar:3.4.10:compile - 
      version managed from 3.4.6; omitted for duplicate)+-org.apache.hbase:hbase-examples:jar:3.0.0-SNAPSHOT:compile|\-(org.apache.zookeeper:zookeeper:jar:3.4.10:compile - 
      version managed from 3.4.6; omitted for duplicate)+-org.apache.hbase:hbase-zookeeper:jar:3.0.0-SNAPSHOT:compile|\-(org.apache.zookeeper:zookeeper:jar:3.4.10:compile - 
      version managed from 3.4.6; omitted for duplicate)\-org.apache.hbase:hbase-rsgroup:jar:3.0.0-SNAPSHOT:compile\-(org.apache.zookeeper:zookeeper:jar:3.4.10:compile 
      - version managed from 3.4.6
       ; omitted for duplicate)
      -org.apache.hbase:hbase-backup:jar:3.0.0-SNAPSHOT+-org.apache.hbase:hbase-client:jar:3.0.0-SNAPSHOT:compile|+-(org.apache.zookeeper:zookeeper:jar:3.4.10:compile - 
      version managed from 3.4.6; omitted for duplicate)|\-org.apache.hadoop:hadoop-auth:jar:2.7.4:compile|\-(org.apache.zookeeper:zookeeper:jar:3.4.10:compile
       - version managed from 3.4.6; omitted for duplicate)+-org.apache.hbase:hbase-server:jar:3.0.0-SNAPSHOT:compile|+-org.apache.hbase:hbase-zookeeper:jar:3.0.0-SNAPSHOT:compile||\-(org.apache.zookeeper:zookeeper:jar:3.4.10:compile
       - version managed from 3.4.6; omitted for duplicate)|+-org.apache.hbase:hbase-replication:jar:3.0.0-SNAPSHOT:compile||\-(org.apache.zookeeper:zookeeper:jar:3.4.10:compile
       - version managed from 3.4.6; omitted for dup
       licate)|+-(org.apache.zookeeper:zookeeper:jar:3.4.10:compile - 
      version managed from 3.4.6; omitted for duplicate)|\-org.apache.hadoop:hadoop-client:jar:2.7.4:compile|\-org.apache.hadoop:hadoop-mapreduce-client-app:jar:2.7.4:compile|\-org.apache.hadoop:hadoop-mapreduce-client-common:jar:2.7.4:compile|\-org.apache.hadoop:hadoop-yarn-server-common:jar:2.7.4:compile|\-(org.apache.zookeeper:zookeeper:jar:3.4.10:compile
       - version managed from 3.4.6; omitted for 

      [08/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c54c242b/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/dependencies.html
      --
      diff --git 
      a/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/dependencies.html
       
      b/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/dependencies.html
      index c631df5..49740f6 100644
      --- 
      a/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/dependencies.html
      +++ 
      b/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/dependencies.html
      @@ -7,7 +7,7 @@
         
       
       
      -
      +
       
       Apache HBase - Exemplar for hbase-shaded-client archetype  
      Project Dependencies
       
      @@ -1489,26 +1489,8 @@ The following provides more details on the included 
      cryptographic software:
       production. Metrics provides a powerful toolkit of ways to measure the 
      behavior of critical
       components in your production environment.
       URL: http://metrics.codahale.com/metrics-core/;>http://metrics.codahale.com/metrics-core/
      -Project Licenses: http://www.apache.org/licenses/LICENSE-2.0.html;>Apache License 
      2.0
      -org.apache.curator:curator-framework:jar:4.0.0 (compile) 
      -
      -
      -Curator Framework
      -
      -
      -Description: High-level API that greatly simplifies using 
      ZooKeeper.
      -URL: http://curator.apache.org/curator-framework;>http://curator.apache.org/curator-framework
      -Project Licenses: http://www.apache.org/licenses/LICENSE-2.0.txt;>The Apache Software 
      License, Version 2.0
      -org.apache.curator:curator-client:jar:4.0.0 (compile) 
      -
      -
      -Curator Client
      -
      -
      -Description: Low-level API
      -URL: http://curator.apache.org/curator-client;>http://curator.apache.org/curator-client
      -Project Licenses: http://www.apache.org/licenses/LICENSE-2.0.txt;>The Apache Software 
      License, Version 2.0
      -org.apache.hbase:hbase-server:jar:3.0.0-SNAPSHOT (test) 
      +Project Licenses: http://www.apache.org/licenses/LICENSE-2.0.html;>Apache License 
      2.0
      +org.apache.hbase:hbase-server:jar:3.0.0-SNAPSHOT (test) 
       
       
       Apache HBase - Server
      @@ -1518,7 +1500,7 @@ The following provides more details on the included 
      cryptographic software:
       URL: http://hbase.apache.org/hbase-build-configuration/hbase-server;>http://hbase.apache.org/hbase-build-configuration/hbase-server
       Project Licenses: https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
      2.0
       
      -org.apache.hbase:hbase-http:jar:3.0.0-SNAPSHOT (test) 
      +org.apache.hbase:hbase-http:jar:3.0.0-SNAPSHOT (test) 
       
       
       Apache HBase - HTTP
      @@ -1528,7 +1510,7 @@ The following provides more details on the included 
      cryptographic software:
       URL: http://hbase.apache.org/hbase-build-configuration/hbase-http;>http://hbase.apache.org/hbase-build-configuration/hbase-http
       Project Licenses: https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
      2.0
       
      -org.eclipse.jetty:jetty-util:jar:9.3.19.v20170502 (test) 
      +org.eclipse.jetty:jetty-util:jar:9.3.19.v20170502 (test) 
       
       
       Jetty :: Utilities
      @@ -1537,7 +1519,7 @@ The following provides more details on the included 
      cryptographic software:
       Description: Utility classes for Jetty
       URL: http://www.eclipse.org/jetty;>http://www.eclipse.org/jetty
       Project Licenses: http://www.apache.org/licenses/LICENSE-2.0;>Apache Software License - 
      Version 2.0, http://www.eclipse.org/org/documents/epl-v10.php;>Eclipse Public License 
      - Version 1.0
      -org.eclipse.jetty:jetty-util-ajax:jar:9.3.19.v20170502 (test) 
      +org.eclipse.jetty:jetty-util-ajax:jar:9.3.19.v20170502 (test) 
       
       
       Jetty :: Utilities :: Ajax(JSON)
      @@ -1546,7 +1528,7 @@ The following provides more details on the included 
      cryptographic software:
       Description: JSON/Ajax Utility classes for Jetty
       URL: http://www.eclipse.org/jetty;>http://www.eclipse.org/jetty
       Project Licenses: http://www.apache.org/licenses/LICENSE-2.0;>Apache Software License - 
      Version 2.0, http://www.eclipse.org/org/documents/epl-v10.php;>Eclipse Public License 
      - Version 1.0
      -org.eclipse.jetty:jetty-http:jar:9.3.19.v20170502 (test) 
      +org.eclipse.jetty:jetty-http:jar:9.3.19.v20170502 (test) 
       
       
       Jetty :: Http Utility
      @@ -1555,7 +1537,7 @@ The following provides more details on the included 
      cryptographic software:
       Description: Administrative parent pom for Jetty modules
       URL: http://www.eclipse.org/jetty;>http://www.eclipse.org/jetty
       Project Licenses: http://www.apache.org/licenses/LICENSE-2.0;>Apache Software License - 
      Version 2.0, http://www.eclipse.org/org/documents/epl-v10.php;>Eclipse Public License 
      - Version 1.0
      -org.eclipse.jetty:jetty-security:jar:9.3.19.v20170502 (test) 
      +org.eclipse.jetty:jetty-security:jar:9.3.19.v20170502 (test) 
       
       
       Jetty :: Security
      @@ -1564,7 +1546,7 @@ The following provides more details on the included 
      cryptographic software:
       Description: Jetty security infrastructure
       URL: http://www.eclipse.org/jetty;>http://www.eclipse.org/jetty
       Project Licenses: 

      [08/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.NameNodeException.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.NameNodeException.html
       
      b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.NameNodeException.html
      index 25e368d..d0f781f 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.NameNodeException.html
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.NameNodeException.html
      @@ -25,798 +25,798 @@
       017 */
       018package 
      org.apache.hadoop.hbase.io.asyncfs;
       019
      -020import static 
      org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelOption.CONNECT_TIMEOUT_MILLIS;
      -021import static 
      org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleState.READER_IDLE;
      -022import static 
      org.apache.hadoop.fs.CreateFlag.CREATE;
      -023import static 
      org.apache.hadoop.fs.CreateFlag.OVERWRITE;
      -024import static 
      org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createEncryptor;
      -025import static 
      org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.trySaslNegotiate;
      +020import static 
      org.apache.hadoop.fs.CreateFlag.CREATE;
      +021import static 
      org.apache.hadoop.fs.CreateFlag.OVERWRITE;
      +022import static 
      org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createEncryptor;
      +023import static 
      org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.trySaslNegotiate;
      +024import static 
      org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelOption.CONNECT_TIMEOUT_MILLIS;
      +025import static 
      org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleState.READER_IDLE;
       026import static 
      org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY;
       027import static 
      org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME;
       028import static 
      org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT;
       029import static 
      org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage.PIPELINE_SETUP_CREATE;
       030
      -031import 
      org.apache.hadoop.hbase.shaded.com.google.common.base.Throwables;
      -032import 
      org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableMap;
      -033import 
      com.google.protobuf.CodedOutputStream;
      -034
      -035import 
      org.apache.hadoop.hbase.shaded.io.netty.bootstrap.Bootstrap;
      -036import 
      org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBuf;
      -037import 
      org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBufAllocator;
      -038import 
      org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBufOutputStream;
      -039import 
      org.apache.hadoop.hbase.shaded.io.netty.buffer.PooledByteBufAllocator;
      -040import 
      org.apache.hadoop.hbase.shaded.io.netty.channel.Channel;
      -041import 
      org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelFuture;
      -042import 
      org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelFutureListener;
      -043import 
      org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandler;
      -044import 
      org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandlerContext;
      -045import 
      org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelInitializer;
      -046import 
      org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelPipeline;
      -047import 
      org.apache.hadoop.hbase.shaded.io.netty.channel.EventLoop;
      -048import 
      org.apache.hadoop.hbase.shaded.io.netty.channel.SimpleChannelInboundHandler;
      -049import 
      org.apache.hadoop.hbase.shaded.io.netty.handler.codec.protobuf.ProtobufDecoder;
      -050import 
      org.apache.hadoop.hbase.shaded.io.netty.handler.codec.protobuf.ProtobufVarint32FrameDecoder;
      -051import 
      org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleStateEvent;
      -052import 
      org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleStateHandler;
      -053import 
      org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.Future;
      -054import 
      org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.FutureListener;
      -055import 
      org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.Promise;
      -056
      -057import java.io.IOException;
      -058import 
      java.lang.reflect.InvocationTargetException;
      -059import java.lang.reflect.Method;
      -060import java.util.ArrayList;
      -061import java.util.EnumSet;
      -062import java.util.List;
      -063import java.util.concurrent.TimeUnit;
      -064
      -065import org.apache.commons.logging.Log;
      -066import 
      org.apache.commons.logging.LogFactory;
      -067import 
      org.apache.hadoop.conf.Configuration;
      -068import 
      org.apache.hadoop.crypto.CryptoProtocolVersion;
      -069import 
      org.apache.hadoop.crypto.Encryptor;
      -070import org.apache.hadoop.fs.CreateFlag;
      -071import org.apache.hadoop.fs.FileSystem;
      -072import 
      org.apache.hadoop.fs.FileSystemLinkResolver;
      -073import org.apache.hadoop.fs.Path;
      -074import 
      org.apache.hadoop.fs.UnresolvedLinkException;
      -075import 
      

      [08/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ModifyColumnFamilyFuture.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ModifyColumnFamilyFuture.html
       
      b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ModifyColumnFamilyFuture.html
      index d438f22..7c59e27 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ModifyColumnFamilyFuture.html
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ModifyColumnFamilyFuture.html
      @@ -1290,8 +1290,8 @@
       1282   CompactType 
      compactType) throws IOException {
       1283switch (compactType) {
       1284  case MOB:
      -1285
      compact(this.connection.getAdminForMaster(), getMobRegionInfo(tableName), 
      major,
      -1286  columnFamily);
      +1285
      compact(this.connection.getAdminForMaster(), 
      RegionInfo.createMobRegionInfo(tableName),
      +1286major, columnFamily);
       1287break;
       1288  case NORMAL:
       1289checkTableExists(tableName);
      @@ -3248,7 +3248,7 @@
       3240  new 
      CallableAdminProtos.GetRegionInfoResponse.CompactionState() {
       3241@Override
       3242public 
      AdminProtos.GetRegionInfoResponse.CompactionState call() throws Exception {
      -3243  RegionInfo info = 
      getMobRegionInfo(tableName);
      +3243  RegionInfo info = 
      RegionInfo.createMobRegionInfo(tableName);
       3244  GetRegionInfoRequest 
      request =
       3245
      RequestConverter.buildGetRegionInfoRequest(info.getRegionName(), true);
       3246  GetRegionInfoResponse 
      response = masterAdmin.getRegionInfo(rpcController, request);
      @@ -3312,7 +3312,7 @@
       3304}
       3305break;
       3306  default:
      -3307throw new 
      IllegalArgumentException("Unknowne compactType: " + compactType);
      +3307throw new 
      IllegalArgumentException("Unknown compactType: " + compactType);
       3308}
       3309if (state != null) {
       3310  return 
      ProtobufUtil.createCompactionState(state);
      @@ -3847,325 +3847,320 @@
       3839});
       3840  }
       3841
      -3842  private RegionInfo 
      getMobRegionInfo(TableName tableName) {
      -3843return 
      RegionInfoBuilder.newBuilder(tableName).setStartKey(Bytes.toBytes(".mob")).setRegionId(0)
      -3844.build();
      -3845  }
      -3846
      -3847  private RpcControllerFactory 
      getRpcControllerFactory() {
      -3848return this.rpcControllerFactory;
      -3849  }
      -3850
      -3851  @Override
      -3852  public void addReplicationPeer(String 
      peerId, ReplicationPeerConfig peerConfig, boolean enabled)
      -3853  throws IOException {
      -3854executeCallable(new 
      MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
      -3855  @Override
      -3856  protected Void rpcCall() throws 
      Exception {
      -3857
      master.addReplicationPeer(getRpcController(),
      -3858  
      RequestConverter.buildAddReplicationPeerRequest(peerId, peerConfig, 
      enabled));
      -3859return null;
      -3860  }
      -3861});
      -3862  }
      -3863
      -3864  @Override
      -3865  public void 
      removeReplicationPeer(String peerId) throws IOException {
      -3866executeCallable(new 
      MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
      -3867  @Override
      -3868  protected Void rpcCall() throws 
      Exception {
      -3869
      master.removeReplicationPeer(getRpcController(),
      -3870  
      RequestConverter.buildRemoveReplicationPeerRequest(peerId));
      -3871return null;
      -3872  }
      -3873});
      -3874  }
      -3875
      -3876  @Override
      -3877  public void 
      enableReplicationPeer(final String peerId) throws IOException {
      -3878executeCallable(new 
      MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
      -3879  @Override
      -3880  protected Void rpcCall() throws 
      Exception {
      -3881
      master.enableReplicationPeer(getRpcController(),
      -3882  
      RequestConverter.buildEnableReplicationPeerRequest(peerId));
      -3883return null;
      -3884  }
      -3885});
      -3886  }
      -3887
      -3888  @Override
      -3889  public void 
      disableReplicationPeer(final String peerId) throws IOException {
      -3890executeCallable(new 
      MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
      -3891  @Override
      -3892  protected Void rpcCall() throws 
      Exception {
      -3893
      master.disableReplicationPeer(getRpcController(),
      -3894  
      RequestConverter.buildDisableReplicationPeerRequest(peerId));
      -3895return null;
      -3896  }
      -3897});
      -3898  }
      -3899
      -3900  @Override
      -3901  public ReplicationPeerConfig 
      getReplicationPeerConfig(final String peerId) throws IOException {
      -3902return executeCallable(new 
      MasterCallableReplicationPeerConfig(getConnection(),
      -3903getRpcControllerFactory()) {
      -3904  @Override
      -3905  protected ReplicationPeerConfig 
      rpcCall() throws Exception {
      -3906GetReplicationPeerConfigResponse 
      response = master.getReplicationPeerConfig(
      -3907  getRpcController(), 
      

      [08/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
       
      b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
      index 29ea7b3..6ed75c9 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
      @@ -1313,7093 +1313,7082 @@
       1305
       1306  @Override
       1307  public boolean isSplittable() {
      -1308boolean result = isAvailable() 
       !hasReferences();
      -1309LOG.info("ASKED IF SPLITTABLE " + 
      result + " " + getRegionInfo().getShortNameToLog(),
      -1310  new Throwable("LOGGING: 
      REMOVE"));
      -1311// REMOVE BELOW
      -1312LOG.info("DEBUG LIST ALL FILES");
      -1313for (HStore store : 
      this.stores.values()) {
      -1314  LOG.info("store " + 
      store.getColumnFamilyName());
      -1315  for (HStoreFile sf : 
      store.getStorefiles()) {
      -1316
      LOG.info(sf.toStringDetailed());
      -1317  }
      -1318}
      -1319return result;
      -1320  }
      -1321
      -1322  @Override
      -1323  public boolean isMergeable() {
      -1324if (!isAvailable()) {
      -1325  LOG.debug("Region " + this
      -1326  + " is not mergeable because 
      it is closing or closed");
      -1327  return false;
      -1328}
      -1329if (hasReferences()) {
      -1330  LOG.debug("Region " + this
      -1331  + " is not mergeable because 
      it has references");
      -1332  return false;
      -1333}
      -1334
      -1335return true;
      +1308return isAvailable()  
      !hasReferences();
      +1309  }
      +1310
      +1311  @Override
      +1312  public boolean isMergeable() {
      +1313if (!isAvailable()) {
      +1314  LOG.debug("Region " + this
      +1315  + " is not mergeable because 
      it is closing or closed");
      +1316  return false;
      +1317}
      +1318if (hasReferences()) {
      +1319  LOG.debug("Region " + this
      +1320  + " is not mergeable because 
      it has references");
      +1321  return false;
      +1322}
      +1323
      +1324return true;
      +1325  }
      +1326
      +1327  public boolean areWritesEnabled() {
      +1328synchronized(this.writestate) {
      +1329  return 
      this.writestate.writesEnabled;
      +1330}
      +1331  }
      +1332
      +1333  @VisibleForTesting
      +1334  public MultiVersionConcurrencyControl 
      getMVCC() {
      +1335return mvcc;
       1336  }
       1337
      -1338  public boolean areWritesEnabled() {
      -1339synchronized(this.writestate) {
      -1340  return 
      this.writestate.writesEnabled;
      -1341}
      -1342  }
      -1343
      -1344  @VisibleForTesting
      -1345  public MultiVersionConcurrencyControl 
      getMVCC() {
      -1346return mvcc;
      -1347  }
      -1348
      -1349  @Override
      -1350  public long getMaxFlushedSeqId() {
      -1351return maxFlushedSeqId;
      +1338  @Override
      +1339  public long getMaxFlushedSeqId() {
      +1340return maxFlushedSeqId;
      +1341  }
      +1342
      +1343  /**
      +1344   * @return readpoint considering given 
      IsolationLevel. Pass {@code null} for default
      +1345   */
      +1346  public long 
      getReadPoint(IsolationLevel isolationLevel) {
      +1347if (isolationLevel != null 
       isolationLevel == IsolationLevel.READ_UNCOMMITTED) {
      +1348  // This scan can read even 
      uncommitted transactions
      +1349  return Long.MAX_VALUE;
      +1350}
      +1351return mvcc.getReadPoint();
       1352  }
       1353
      -1354  /**
      -1355   * @return readpoint considering given 
      IsolationLevel. Pass {@code null} for default
      -1356   */
      -1357  public long 
      getReadPoint(IsolationLevel isolationLevel) {
      -1358if (isolationLevel != null 
       isolationLevel == IsolationLevel.READ_UNCOMMITTED) {
      -1359  // This scan can read even 
      uncommitted transactions
      -1360  return Long.MAX_VALUE;
      -1361}
      -1362return mvcc.getReadPoint();
      -1363  }
      -1364
      -1365  public boolean 
      isLoadingCfsOnDemandDefault() {
      -1366return 
      this.isLoadingCfsOnDemandDefault;
      -1367  }
      -1368
      -1369  /**
      -1370   * Close down this HRegion.  Flush the 
      cache, shut down each HStore, don't
      -1371   * service any more calls.
      -1372   *
      -1373   * pThis method could take 
      some time to execute, so don't call it from a
      -1374   * time-sensitive thread.
      -1375   *
      -1376   * @return Vector of all the storage 
      files that the HRegion's component
      -1377   * HStores make use of.  It's a list 
      of all StoreFile objects. Returns empty
      -1378   * vector if already closed and null 
      if judged that it should not close.
      -1379   *
      -1380   * @throws IOException e
      -1381   * @throws DroppedSnapshotException 
      Thrown when replay of wal is required
      -1382   * because a Snapshot was not properly 
      persisted. The region is put in closing mode, and the
      -1383   * caller MUST abort after this.
      -1384   */
      -1385  public Mapbyte[], 
      ListHStoreFile close() throws IOException {
      -1386return close(false);
      -1387  }
      -1388
      -1389  private final Object closeLock = new 
      Object();
      -1390
      -1391  /** Conf key for the periodic flush 
      interval */
      

      [08/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b1eb7453/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.html
      --
      diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.html 
      b/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.html
      index d98042d..d549086 100644
      --- a/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.html
      +++ b/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.html
      @@ -42,2537 +42,2536 @@
       034
       035import org.apache.commons.logging.Log;
       036import 
      org.apache.commons.logging.LogFactory;
      -037import 
      org.apache.yetus.audience.InterfaceAudience;
      +037import 
      org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
       038import 
      org.apache.hadoop.hbase.util.ByteBufferUtils;
       039import 
      org.apache.hadoop.hbase.util.Bytes;
       040import 
      org.apache.hadoop.hbase.util.ClassSize;
       041import 
      org.apache.hadoop.io.RawComparator;
      -042
      -043import 
      org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
      -044/**
      -045 * An HBase Key/Value. This is the 
      fundamental HBase Type.
      -046 * p
      -047 * HBase applications and users should 
      use the Cell interface and avoid directly using KeyValue and
      -048 * member functions not defined in 
      Cell.
      -049 * p
      -050 * If being used client-side, the primary 
      methods to access individual fields are
      -051 * {@link #getRowArray()}, {@link 
      #getFamilyArray()}, {@link #getQualifierArray()},
      -052 * {@link #getTimestamp()}, and {@link 
      #getValueArray()}. These methods allocate new byte arrays
      -053 * and return copies. Avoid their use 
      server-side.
      -054 * p
      -055 * Instances of this class are immutable. 
      They do not implement Comparable but Comparators are
      -056 * provided. Comparators change with 
      context, whether user table or a catalog table comparison. Its
      -057 * critical you use the appropriate 
      comparator. There are Comparators for normal HFiles, Meta's
      -058 * Hfiles, and bloom filter keys.
      -059 * p
      -060 * KeyValue wraps a byte array and takes 
      offsets and lengths into passed array at where to start
      -061 * interpreting the content as KeyValue. 
      The KeyValue format inside a byte array is:
      -062 * codelt;keylengthgt; 
      lt;valuelengthgt; lt;keygt; 
      lt;valuegt;/code Key is further
      -063 * decomposed as: 
      codelt;rowlengthgt; lt;rowgt; 
      lt;columnfamilylengthgt;
      -064 * lt;columnfamilygt; 
      lt;columnqualifiergt;
      -065 * lt;timestampgt; 
      lt;keytypegt;/code The coderowlength/code 
      maximum is
      -066 * 
      codeShort.MAX_SIZE/code, column family length maximum is 
      codeByte.MAX_SIZE/code, and
      -067 * column qualifier + key length must be 
      lt; codeInteger.MAX_SIZE/code. The column does not
      -068 * contain the family/qualifier 
      delimiter, {@link #COLUMN_FAMILY_DELIMITER}br
      -069 * KeyValue can optionally contain Tags. 
      When it contains tags, it is added in the byte array after
      -070 * the value part. The format for this 
      part is: 
      codelt;tagslengthgt;lt;tagsbytesgt;/code.
      -071 * codetagslength/code 
      maximum is codeShort.MAX_SIZE/code. The 
      codetagsbytes/code
      -072 * contain one or more tags where as each 
      tag is of the form
      -073 * 
      codelt;taglengthgt;lt;tagtypegt;lt;tagbytesgt;/code.
       codetagtype/code is one byte
      -074 * and codetaglength/code 
      maximum is codeShort.MAX_SIZE/code and it includes 1 byte 
      type
      -075 * length and actual tag bytes length.
      -076 */
      -077@InterfaceAudience.Private
      -078public class KeyValue implements 
      ExtendedCell {
      -079  private static final 
      ArrayListTag EMPTY_ARRAY_LIST = new ArrayList();
      -080
      -081  private static final Log LOG = 
      LogFactory.getLog(KeyValue.class);
      -082
      -083  public static final long FIXED_OVERHEAD 
      = ClassSize.OBJECT + // the KeyValue object itself
      -084  ClassSize.REFERENCE + // pointer to 
      "bytes"
      -085  2 * Bytes.SIZEOF_INT + // offset, 
      length
      -086  Bytes.SIZEOF_LONG;// memstoreTS
      -087
      -088  /**
      -089   * Colon character in UTF-8
      -090   */
      -091  public static final char 
      COLUMN_FAMILY_DELIMITER = ':';
      -092
      -093  public static final byte[] 
      COLUMN_FAMILY_DELIM_ARRAY =
      -094new 
      byte[]{COLUMN_FAMILY_DELIMITER};
      -095
      -096  /**
      -097   * Comparator for plain key/values; 
      i.e. non-catalog table key/values. Works on Key portion
      -098   * of KeyValue only.
      -099   * @deprecated Use {@link 
      CellComparator#getInstance()} instead. Deprecated for hbase 2.0, remove for 
      hbase 3.0.
      -100   */
      -101  @Deprecated
      -102  public static final KVComparator 
      COMPARATOR = new KVComparator();
      -103  /**
      -104   * A {@link KVComparator} for 
      codehbase:meta/code catalog table
      -105   * {@link KeyValue}s.
      -106   * @deprecated Use {@link 
      CellComparatorImpl#META_COMPARATOR} instead. Deprecated for hbase 2.0, remove 
      for hbase 3.0.
      -107   */
      -108  @Deprecated
      -109  public static final KVComparator 
      META_COMPARATOR = new MetaComparator();
      -110
      -111  /** Size of the key length field in 
      bytes*/
      -112  public static final int KEY_LENGTH_SIZE 
      = Bytes.SIZEOF_INT;
      -113
      -114  /** Size of the key type field in bytes 
      */
      -115  public static final int TYPE_SIZE 

      [08/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9118853f/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncReplicationAdminApi.html
      --
      diff --git 
      a/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncReplicationAdminApi.html
       
      b/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncReplicationAdminApi.html
      index 9147827..bced64f 100644
      --- 
      a/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncReplicationAdminApi.html
      +++ 
      b/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncReplicationAdminApi.html
      @@ -408,7 +408,7 @@ extends 
       
       testRemovePeerTableCFs
      -publicvoidtestRemovePeerTableCFs()
      +publicvoidtestRemovePeerTableCFs()
       throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
       title="class or interface in java.lang">Exception
       
       Throws:
      @@ -422,7 +422,7 @@ extends 
       
       testSetPeerNamespaces
      -publicvoidtestSetPeerNamespaces()
      +publicvoidtestSetPeerNamespaces()
      throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
       title="class or interface in java.lang">Exception
       
       Throws:
      @@ -436,7 +436,7 @@ extends 
       
       testNamespacesAndTableCfsConfigConflict
      -publicvoidtestNamespacesAndTableCfsConfigConflict()
      +publicvoidtestNamespacesAndTableCfsConfigConflict()
        throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
       title="class or interface in java.lang">Exception
       
       Throws:
      @@ -450,7 +450,7 @@ extends 
       
       testPeerBandwidth
      -publicvoidtestPeerBandwidth()
      +publicvoidtestPeerBandwidth()
      throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
       title="class or interface in java.lang">Exception
       
       Throws:
      
      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9118853f/testdevapidocs/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.html
      --
      diff --git 
      a/testdevapidocs/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.html
       
      b/testdevapidocs/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.html
      index f05d5e0..51cb2b5 100644
      --- 
      a/testdevapidocs/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.html
      +++ 
      b/testdevapidocs/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.html
      @@ -18,7 +18,7 @@
       catch(err) {
       }
       //-->
      -var methods = 
      {"i0":9,"i1":9,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10};
      +var methods = 
      {"i0":9,"i1":9,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10};
       var tabs = {65535:["t0","All Methods"],1:["t1","Static 
      Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
       var altColor = "altColor";
       var rowColor = "rowColor";
      @@ -248,12 +248,20 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       void
      -testRemovePeerTableCFs()
      +testPeerConfigConflict()
       
       
       void
      +testRemovePeerTableCFs()
      +
      +
      +void
       testSetPeerNamespaces()
       
      +
      +void
      +testSetReplicateAllUserTables()
      +
       
       
       
      @@ -418,7 +426,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       tearDownAfterClass
      -public staticvoidtearDownAfterClass()
      +public staticvoidtearDownAfterClass()
      throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
       title="class or interface in java.lang">Exception
       
       Throws:
      @@ -432,7 +440,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       testAddRemovePeer
      -publicvoidtestAddRemovePeer()
      +publicvoidtestAddRemovePeer()
      throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
       title="class or interface in java.lang">Exception
       Simple testing of adding and removing peers, basically 
      shows that
        all interactions with ZK work
      @@ -448,7 +456,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       testAddPeerWithState
      -publicvoidtestAddPeerWithState()
      +publicvoidtestAddPeerWithState()
         throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
       title="class or interface in java.lang">Exception
       
       Throws:
      @@ -462,7 +470,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       testPeerConfig
      -publicvoidtestPeerConfig()
      +publicvoidtestPeerConfig()
       throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
       title="class or interface in java.lang">Exception
       Tests that the peer configuration used by ReplicationAdmin 
      contains all
        the peer's properties.
      @@ -478,7 +486,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
       
       
       testAddPeerWithUnDeletedQueues
      

      [08/51] [partial] hbase-site git commit: Published site at .

      http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1a616706/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteColumnFamilyFuture.html
      --
      diff --git 
      a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteColumnFamilyFuture.html
       
      b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteColumnFamilyFuture.html
      index 02e4554..d438f22 100644
      --- 
      a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteColumnFamilyFuture.html
      +++ 
      b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteColumnFamilyFuture.html
      @@ -111,4067 +111,4061 @@
       103import 
      org.apache.hadoop.util.StringUtils;
       104import 
      org.apache.yetus.audience.InterfaceAudience;
       105import 
      org.apache.yetus.audience.InterfaceStability;
      -106
      -107import 
      org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
      -108import 
      org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
      -109import 
      org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
      -110import 
      org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
      -111import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos;
      -112import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
      -113import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest;
      -114import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest;
      -115import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
      -116import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
      -117import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
      -118import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
      -119import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest;
      -120import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse;
      -121import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest;
      -122import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
      -123import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
      -124import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceRequest;
      -125import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceResponse;
      -126import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
      -127import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
      -128import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
      -129import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
      -130import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos;
      -131import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
      -132import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
      -133import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
      -134import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
      -135import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
      -136import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersRequest;
      -137import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
      -138import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
      -139import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest;
      -140import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse;
      -141import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
      -142import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
      -143import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
      -144import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse;
      -145import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
      -146import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableRequest;
      -147import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableResponse;
      -148import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest;
      -149import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse;
      -150import 
      org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
      -151import 
      

        1   2   3   >