[17/51] [partial] hbase-site git commit: Published site at 620d70d6186fb800299bcc62ad7179fccfd1be41.

2019-01-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aa3fb87f/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html
index e763690..f66043c 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html
@@ -1301,610 +1301,613 @@
 1293}
 1294RegionStateNode regionNode = 
regionStates.getOrCreateRegionStateNode(regionInfo);
 1295// Do not need to lock on 
regionNode, as we can make sure that before we finish loading
-1296// meta, all the related 
procedures can not be executed. The only exception is formeta
+1296// meta, all the related 
procedures can not be executed. The only exception is for meta
 1297// region related operations, 
but here we do not load the informations for meta region.
 1298
regionNode.setState(localState);
 1299
regionNode.setLastHost(lastHost);
 1300
regionNode.setRegionLocation(regionLocation);
 1301
regionNode.setOpenSeqNum(openSeqNum);
 1302
-1303if (localState == State.OPEN) 
{
-1304  assert regionLocation != null 
: "found null region location for " + regionNode;
-1305  
regionStates.addRegionToServer(regionNode);
-1306} else if (localState == 
State.OFFLINE || regionInfo.isOffline()) {
-1307  
regionStates.addToOfflineRegions(regionNode);
-1308}
-1309  }
-1310});
-1311
-1312// every assignment is blocked until 
meta is loaded.
-1313wakeMetaLoadedEvent();
-1314  }
-1315
-1316  /**
-1317   * Used to check if the meta loading 
is done.
-1318   * 

-1319 * if not we throw PleaseHoldException since we are rebuilding the RegionStates -1320 * @param hri region to check if it is already rebuild -1321 * @throws PleaseHoldException if meta has not been loaded yet -1322 */ -1323 private void checkMetaLoaded(RegionInfo hri) throws PleaseHoldException { -1324if (!isRunning()) { -1325 throw new PleaseHoldException("AssignmentManager not running"); -1326} -1327boolean meta = isMetaRegion(hri); -1328boolean metaLoaded = isMetaLoaded(); -1329if (!meta && !metaLoaded) { -1330 throw new PleaseHoldException( -1331"Master not fully online; hbase:meta=" + meta + ", metaLoaded=" + metaLoaded); -1332} -1333 } -1334 -1335 // -1336 // TODO: Metrics -1337 // -1338 public int getNumRegionsOpened() { -1339// TODO: Used by TestRegionPlacement.java and assume monotonically increasing value -1340return 0; -1341 } -1342 -1343 public long submitServerCrash(ServerName serverName, boolean shouldSplitWal) { -1344boolean carryingMeta; -1345long pid; -1346ServerStateNode serverNode = regionStates.getServerNode(serverName); -1347if(serverNode == null){ -1348 LOG.info("Skip to add SCP for {} since this server should be OFFLINE already", serverName); -1349 return -1; -1350} -1351// we hold the write lock here for fencing on reportRegionStateTransition. Once we set the -1352// server state to CRASHED, we will no longer accept the reportRegionStateTransition call from -1353// this server. This is used to simplify the implementation for TRSP and SCP, where we can make -1354// sure that, the region list fetched by SCP will not be changed any more. -1355serverNode.writeLock().lock(); -1356try { -1357 ProcedureExecutor procExec = this.master.getMasterProcedureExecutor(); -1358 carryingMeta = isCarryingMeta(serverName); -1359 if (!serverNode.isInState(ServerState.ONLINE)) { -1360LOG.info( -1361 "Skip to add SCP for {} with meta= {}, " + -1362 "since there should be a SCP is processing or already done for this server node", -1363 serverName, carryingMeta); -1364return -1; -1365 } else { -1366 serverNode.setState(ServerState.CRASHED); -1367pid = procExec.submitProcedure(new ServerCrashProcedure(procExec.getEnvironment(), -1368serverName, shouldSplitWal, carryingMeta)); -1369LOG.info( -1370 "Added {} to dead servers which carryingMeta={}, submitted ServerCrashProcedure pid={}", -1371 serverName, carryingMeta, pid); -1372 } -1373} finally { -1374 serverNode.writeLock().unlock(


[17/51] [partial] hbase-site git commit: Published site at 281d6429e55149cc4c05430dcc1d1dc136d8b245.

2019-01-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/901d593a/devapidocs/org/apache/hadoop/hbase/master/procedure/SplitWALProcedure.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/SplitWALProcedure.html 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/SplitWALProcedure.html
new file mode 100644
index 000..5722edf
--- /dev/null
+++ b/devapidocs/org/apache/hadoop/hbase/master/procedure/SplitWALProcedure.html
@@ -0,0 +1,793 @@
+http://www.w3.org/TR/html4/loose.dtd";>
+
+
+
+
+
+SplitWALProcedure (Apache HBase 3.0.0-SNAPSHOT API)
+
+
+
+
+
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev Class
+Next Class
+
+
+Frames
+No Frames
+
+
+All Classes
+
+
+
+
+
+
+
+Summary: 
+Nested | 
+Field | 
+Constr | 
+Method
+
+
+Detail: 
+Field | 
+Constr | 
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase.master.procedure
+Class SplitWALProcedure
+
+
+
+https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.procedure2.Procedure
+
+
+org.apache.hadoop.hbase.procedure2.StateMachineProcedure
+
+
+org.apache.hadoop.hbase.master.procedure.SplitWALProcedure
+
+
+
+
+
+
+
+
+
+
+
+All Implemented Interfaces:
+https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true";
 title="class or interface in java.lang">Comparable>, ServerProcedureInterface
+
+
+
+@InterfaceAudience.Private
+public class SplitWALProcedure
+extends StateMachineProcedure
+implements ServerProcedureInterface
+The procedure is to split a WAL. It will get an available 
region server and
+ schedule a SplitWALRemoteProcedure
 to actually send the request to region
+ server to split this WAL.
+ It also check if the split wal task really succeed. If the WAL still exists, 
it will
+ schedule another region server to split this WAL.
+
+
+
+
+
+
+
+
+
+
+
+Nested Class Summary
+
+
+
+
+Nested classes/interfaces inherited from 
class org.apache.hadoop.hbase.procedure2.StateMachineProcedure
+StateMachineProcedure.Flow
+
+
+
+
+
+Nested classes/interfaces inherited from 
class org.apache.hadoop.hbase.procedure2.Procedure
+Procedure.LockState
+
+
+
+
+
+Nested classes/interfaces inherited from 
interface org.apache.hadoop.hbase.master.procedure.ServerProcedureInterface
+ServerProcedureInterface.ServerOperationType
+
+
+
+
+
+
+
+
+Field Summary
+
+Fields 
+
+Modifier and Type
+Field and Description
+
+
+private int
+attempts 
+
+
+private ServerName
+crashedServer 
+
+
+private static org.slf4j.Logger
+LOG 
+
+
+private https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
+walPath 
+
+
+private ServerName
+worker 
+
+
+
+
+
+
+Fields inherited from class org.apache.hadoop.hbase.procedure2.StateMachineProcedure
+stateCount
+
+
+
+
+
+Fields inherited from class org.apache.hadoop.hbase.procedure2.Procedure
+NO_PROC_ID,
 NO_TIMEOUT
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors 
+
+Constructor and Description
+
+
+SplitWALProcedure() 
+
+
+SplitWALProcedure(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String walPath,
+ ServerName crashedServer) 
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All Methods Instance Methods Concrete Methods 
+
+Modifier and Type
+Method and Description
+
+
+protected void
+afterReplay(MasterProcedureEnv env)
+Called when the procedure is ready to be added to the queue 
after
+ the loading/replay operation.
+
+
+
+protected void
+deserializeStateData(ProcedureStateSerializer serializer)
+Called on store load to allow the user to decode the 
previously serialized
+ state.
+
+
+
+protected StateMa

[17/51] [partial] hbase-site git commit: Published site at 466fa920fee572fe20db3b77ebf539dc304d5f31.

2019-01-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2bf59208/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.ScanSingleRegionCallerBuilder.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.ScanSingleRegionCallerBuilder.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.ScanSingleRegionCallerBuilder.html
index 2e150bc..0b315b8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.ScanSingleRegionCallerBuilder.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.ScanSingleRegionCallerBuilder.html
@@ -25,22 +25,22 @@
 017 */
 018package org.apache.hadoop.hbase.client;
 019
-020import static 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions.checkArgument;
-021import static 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions.checkNotNull;
-022import static 
org.apache.hadoop.hbase.client.ConnectionUtils.retries2Attempts;
+020import static 
org.apache.hadoop.hbase.client.ConnectionUtils.retries2Attempts;
+021import static 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions.checkArgument;
+022import static 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions.checkNotNull;
 023
-024import 
org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer;
-025
-026import java.util.List;
-027import 
java.util.concurrent.CompletableFuture;
-028import java.util.concurrent.TimeUnit;
-029
-030import 
org.apache.hadoop.hbase.HRegionLocation;
-031import 
org.apache.hadoop.hbase.ServerName;
-032import 
org.apache.hadoop.hbase.TableName;
-033import 
org.apache.yetus.audience.InterfaceAudience;
-034import 
org.apache.hadoop.hbase.client.metrics.ScanMetrics;
-035import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
+024import java.util.List;
+025import 
java.util.concurrent.CompletableFuture;
+026import java.util.concurrent.TimeUnit;
+027import 
org.apache.hadoop.hbase.HRegionLocation;
+028import 
org.apache.hadoop.hbase.ServerName;
+029import 
org.apache.hadoop.hbase.TableName;
+030import 
org.apache.hadoop.hbase.client.metrics.ScanMetrics;
+031import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
+032import 
org.apache.yetus.audience.InterfaceAudience;
+033
+034import 
org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer;
+035
 036import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService;
 037import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanResponse;
 038
@@ -83,432 +83,441 @@
 075
 076private RegionLocateType locateType = 
RegionLocateType.CURRENT;
 077
-078public 
SingleRequestCallerBuilder table(TableName tableName) {
-079  this.tableName = tableName;
-080  return this;
-081}
-082
-083public 
SingleRequestCallerBuilder row(byte[] row) {
-084  this.row = row;
-085  return this;
-086}
-087
-088public 
SingleRequestCallerBuilder action(
-089
AsyncSingleRequestRpcRetryingCaller.Callable callable) {
-090  this.callable = callable;
-091  return this;
-092}
-093
-094public 
SingleRequestCallerBuilder operationTimeout(long operationTimeout, 
TimeUnit unit) {
-095  this.operationTimeoutNs = 
unit.toNanos(operationTimeout);
-096  return this;
-097}
-098
-099public 
SingleRequestCallerBuilder rpcTimeout(long rpcTimeout, TimeUnit unit) 
{
-100  this.rpcTimeoutNs = 
unit.toNanos(rpcTimeout);
-101  return this;
-102}
-103
-104public 
SingleRequestCallerBuilder locateType(RegionLocateType locateType) {
-105  this.locateType = locateType;
-106  return this;
-107}
-108
-109public 
SingleRequestCallerBuilder pause(long pause, TimeUnit unit) {
-110  this.pauseNs = 
unit.toNanos(pause);
-111  return this;
-112}
-113
-114public 
SingleRequestCallerBuilder maxAttempts(int maxAttempts) {
-115  this.maxAttempts = maxAttempts;
-116  return this;
-117}
-118
-119public 
SingleRequestCallerBuilder startLogErrorsCnt(int startLogErrorsCnt) 
{
-120  this.startLogErrorsCnt = 
startLogErrorsCnt;
-121  return this;
-122}
-123
-124public 
AsyncSingleRequestRpcRetryingCaller build() {
-125  return new 
AsyncSingleRequestRpcRetryingCaller<>(retryTimer, conn,
-126  checkNotNull(tableName, 
"tableName is null"), checkNotNull(row, "row is null"),
-127  checkNotNull(locateType, 
"locateType is null"), checkNotNull(callable, "action is null"),
-128  pauseNs, maxAttempts, 
operationTimeoutNs, rpcTimeoutNs, startLogErrorsCnt);
+078private int replicaId = 
RegionReplicaUtil.DEFAULT_REPLICA_ID;
+079
+080public 
SingleRequestCallerBuilder table(TableName tableName) {
+081  this.tableName = tableName;
+082  return this;
+083}
+084
+085public

[17/51] [partial] hbase-site git commit: Published site at e4b6b4afb933a961f543537875f87a2dc62d3757.

2019-01-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/849d84a8/devapidocs/src-html/org/apache/hadoop/hbase/thrift/ThriftServer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/thrift/ThriftServer.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/thrift/ThriftServer.html
index 8d94cde..ab92b9d 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/thrift/ThriftServer.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/thrift/ThriftServer.html
@@ -26,222 +26,778 @@
 018
 019package org.apache.hadoop.hbase.thrift;
 020
-021import 
org.apache.hadoop.conf.Configuration;
-022import 
org.apache.hadoop.hbase.HBaseConfiguration;
-023import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
-024import 
org.apache.hadoop.hbase.http.InfoServer;
-025import 
org.apache.hadoop.hbase.thrift.ThriftServerRunner.ImplType;
-026import 
org.apache.hadoop.hbase.util.VersionInfo;
-027import 
org.apache.hadoop.util.Shell.ExitCodeException;
-028import 
org.apache.yetus.audience.InterfaceAudience;
-029import org.slf4j.Logger;
-030import org.slf4j.LoggerFactory;
-031import 
org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine;
-032import 
org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLineParser;
-033import 
org.apache.hbase.thirdparty.org.apache.commons.cli.DefaultParser;
-034import 
org.apache.hbase.thirdparty.org.apache.commons.cli.HelpFormatter;
-035import 
org.apache.hbase.thirdparty.org.apache.commons.cli.Options;
-036
-037/**
-038 * ThriftServer- this class starts up a 
Thrift server which implements the
-039 * Hbase API specified in the 
Hbase.thrift IDL file. The server runs in an
-040 * independent process.
-041 */
-042@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
-043public class ThriftServer {
-044
-045  private static final Logger LOG = 
LoggerFactory.getLogger(ThriftServer.class);
-046
-047  private static final String 
MIN_WORKERS_OPTION = "minWorkers";
-048  private static final String 
MAX_WORKERS_OPTION = "workers";
-049  private static final String 
MAX_QUEUE_SIZE_OPTION = "queue";
-050  private static final String 
KEEP_ALIVE_SEC_OPTION = "keepAliveSec";
-051  static final String BIND_OPTION = 
"bind";
-052  static final String COMPACT_OPTION = 
"compact";
-053  static final String FRAMED_OPTION = 
"framed";
-054  static final String PORT_OPTION = 
"port";
-055  static final String INFOPORT_OPTION = 
"infoport";
-056
-057  private static final String 
DEFAULT_BIND_ADDR = "0.0.0.0";
-058  private static final int 
DEFAULT_LISTEN_PORT = 9090;
-059
-060  private Configuration conf;
-061  ThriftServerRunner serverRunner;
-062
-063  private InfoServer infoServer;
-064
-065  private static final String 
READ_TIMEOUT_OPTION = "readTimeout";
-066
-067  //
-068  // Main program and support routines
-069  //
-070
-071  public ThriftServer(Configuration conf) 
{
-072this.conf = 
HBaseConfiguration.create(conf);
-073  }
+021import static 
org.apache.hadoop.hbase.thrift.Constants.BACKLOG_CONF_DEAFULT;
+022import static 
org.apache.hadoop.hbase.thrift.Constants.BACKLOG_CONF_KEY;
+023import static 
org.apache.hadoop.hbase.thrift.Constants.BIND_CONF_KEY;
+024import static 
org.apache.hadoop.hbase.thrift.Constants.BIND_OPTION;
+025import static 
org.apache.hadoop.hbase.thrift.Constants.COMPACT_CONF_DEFAULT;
+026import static 
org.apache.hadoop.hbase.thrift.Constants.COMPACT_CONF_KEY;
+027import static 
org.apache.hadoop.hbase.thrift.Constants.COMPACT_OPTION;
+028import static 
org.apache.hadoop.hbase.thrift.Constants.DEFAULT_BIND_ADDR;
+029import static 
org.apache.hadoop.hbase.thrift.Constants.DEFAULT_HTTP_MAX_HEADER_SIZE;
+030import static 
org.apache.hadoop.hbase.thrift.Constants.DEFAULT_LISTEN_PORT;
+031import static 
org.apache.hadoop.hbase.thrift.Constants.FRAMED_CONF_DEFAULT;
+032import static 
org.apache.hadoop.hbase.thrift.Constants.FRAMED_CONF_KEY;
+033import static 
org.apache.hadoop.hbase.thrift.Constants.FRAMED_OPTION;
+034import static 
org.apache.hadoop.hbase.thrift.Constants.HTTP_MAX_THREADS_KEY;
+035import static 
org.apache.hadoop.hbase.thrift.Constants.HTTP_MAX_THREADS_KEY_DEFAULT;
+036import static 
org.apache.hadoop.hbase.thrift.Constants.HTTP_MIN_THREADS_KEY;
+037import static 
org.apache.hadoop.hbase.thrift.Constants.HTTP_MIN_THREADS_KEY_DEFAULT;
+038import static 
org.apache.hadoop.hbase.thrift.Constants.INFOPORT_OPTION;
+039import static 
org.apache.hadoop.hbase.thrift.Constants.KEEP_ALIVE_SEC_OPTION;
+040import static 
org.apache.hadoop.hbase.thrift.Constants.MAX_FRAME_SIZE_CONF_DEFAULT;
+041import static 
org.apache.hadoop.hbase.thrift.Constants.MAX_FRAME_SIZE_CONF_KEY;
+042import static 
org.apache.hadoop.hbase.thrift.Constants.MAX_QUEUE_SIZE_OPTION;
+043import static 
org.apache.hadoop.hbase.thrift.Constants.MAX_WORKERS_OPTION;
+044import static 
org.apache.hadoop.hbase.thrift.Constants.MIN_WORKERS_OPTION;
+045import static 
org.apache.hadoop.hbase.thrift.Constants.PORT_

[17/51] [partial] hbase-site git commit: Published site at 3ab895979b643a2980bcdb7fee2078f14b614210.

2019-01-01 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/io/MetricsIOWrapper.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/io/MetricsIOWrapper.html 
b/devapidocs/org/apache/hadoop/hbase/io/MetricsIOWrapper.html
index 9dd2c50..174528c 100644
--- a/devapidocs/org/apache/hadoop/hbase/io/MetricsIOWrapper.html
+++ b/devapidocs/org/apache/hadoop/hbase/io/MetricsIOWrapper.html
@@ -222,6 +222,6 @@ public interface Copyright © 2007–2018 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
+Copyright © 2007–2019 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/io/MetricsIOWrapperImpl.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/io/MetricsIOWrapperImpl.html 
b/devapidocs/org/apache/hadoop/hbase/io/MetricsIOWrapperImpl.html
index 3584406..47f3067 100644
--- a/devapidocs/org/apache/hadoop/hbase/io/MetricsIOWrapperImpl.html
+++ b/devapidocs/org/apache/hadoop/hbase/io/MetricsIOWrapperImpl.html
@@ -277,6 +277,6 @@ implements Copyright © 2007–2018 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
+Copyright © 2007–2019 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/io/Reference.Range.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/io/Reference.Range.html 
b/devapidocs/org/apache/hadoop/hbase/io/Reference.Range.html
index 2a1495e..408a714 100644
--- a/devapidocs/org/apache/hadoop/hbase/io/Reference.Range.html
+++ b/devapidocs/org/apache/hadoop/hbase/io/Reference.Range.html
@@ -349,6 +349,6 @@ not permitted.)
 
 
 
-Copyright © 2007–2018 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
+Copyright © 2007–2019 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/io/Reference.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/io/Reference.html 
b/devapidocs/org/apache/hadoop/hbase/io/Reference.html
index 30a523c..4b621ca 100644
--- a/devapidocs/org/apache/hadoop/hbase/io/Reference.html
+++ b/devapidocs/org/apache/hadoop/hbase/io/Reference.html
@@ -636,6 +636,6 @@ public void Copyright © 2007–2018 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
+Copyright © 2007–2019 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/io/SizedCellScanner.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/io/SizedCellScanner.html 
b/devapidocs/org/apache/hadoop/hbase/io/SizedCellScanner.html
index 5cba171..963f4e8 100644
--- a/devapidocs/org/apache/hadoop/hbase/io/SizedCellScanner.html
+++ b/devapidocs/org/apache/hadoop/hbase/io/SizedCellScanner.html
@@ -201,6 +201,6 @@ extends 
 
 
-Copyright © 2007–2018 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
+Copyright © 2007–2019 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/io/TagCompressionContext.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/io/TagCompressionContext.html 
b/devapidocs/org/apache/hadoop/hbase/io/TagCompressionContext.html
index f747f41..369cac1 100644
--- a/devapidocs/org/apache/hadoop/hbase/io/TagCompressionContext.html
+++ b/devapidocs/org/apache/hadoop/hbase/io/TagCompressionContext.html
@@ -480,6 +480,6 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 
-Copyright © 2007–2018 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
+Copyright © 2007–2019 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/io/TimeRange.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/io/TimeRange.html 
b/devapidocs/org/apache/hadoop/hbase/io/TimeRange.html
index f0e970b..813932f 100644
--- a/devapidocs/org/apache/hadoop/hbase/io/TimeRange.html
+++ b/devapidocs/org/apache/hadoop/hbase/io/TimeRange.html
@@ -750,6 +750,6 @@ public boolean Copyrig

[17/51] [partial] hbase-site git commit: Published site at 7820ba1dbdba58b1002cdfde08eb21aa7a0bb6da.

2018-12-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/09ea0d5f/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/RegionObserver.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/RegionObserver.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/RegionObserver.html
index 2c6f38a..c537059 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/RegionObserver.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/RegionObserver.html
@@ -28,1039 +28,1086 @@
 020package 
org.apache.hadoop.hbase.coprocessor;
 021
 022import java.io.IOException;
-023import java.util.List;
-024import java.util.Map;
-025
-026import org.apache.hadoop.fs.FileSystem;
-027import org.apache.hadoop.fs.Path;
-028import org.apache.hadoop.hbase.Cell;
-029import 
org.apache.hadoop.hbase.CompareOperator;
-030import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
-031import 
org.apache.hadoop.hbase.client.Append;
-032import 
org.apache.hadoop.hbase.client.Delete;
-033import 
org.apache.hadoop.hbase.client.Durability;
-034import 
org.apache.hadoop.hbase.client.Get;
-035import 
org.apache.hadoop.hbase.client.Increment;
-036import 
org.apache.hadoop.hbase.client.Mutation;
-037import 
org.apache.hadoop.hbase.client.Put;
-038import 
org.apache.hadoop.hbase.client.RegionInfo;
-039import 
org.apache.hadoop.hbase.client.Result;
-040import 
org.apache.hadoop.hbase.client.Scan;
-041import 
org.apache.hadoop.hbase.filter.ByteArrayComparable;
-042import 
org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
-043import 
org.apache.hadoop.hbase.io.Reference;
-044import 
org.apache.hadoop.hbase.io.hfile.CacheConfig;
-045import 
org.apache.hadoop.hbase.regionserver.FlushLifeCycleTracker;
-046import 
org.apache.hadoop.hbase.regionserver.InternalScanner;
-047import 
org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress;
-048import 
org.apache.hadoop.hbase.regionserver.OperationStatus;
-049import 
org.apache.hadoop.hbase.regionserver.Region;
-050import 
org.apache.hadoop.hbase.regionserver.Region.Operation;
-051import 
org.apache.hadoop.hbase.regionserver.RegionScanner;
-052import 
org.apache.hadoop.hbase.regionserver.ScanOptions;
-053import 
org.apache.hadoop.hbase.regionserver.ScanType;
-054import 
org.apache.hadoop.hbase.regionserver.Store;
-055import 
org.apache.hadoop.hbase.regionserver.StoreFile;
-056import 
org.apache.hadoop.hbase.regionserver.StoreFileReader;
-057import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker;
-058import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
-059import 
org.apache.hadoop.hbase.regionserver.querymatcher.DeleteTracker;
-060import 
org.apache.hadoop.hbase.util.Pair;
-061import 
org.apache.hadoop.hbase.wal.WALEdit;
-062import 
org.apache.hadoop.hbase.wal.WALKey;
-063import 
org.apache.yetus.audience.InterfaceAudience;
-064import 
org.apache.yetus.audience.InterfaceStability;
-065
-066/**
-067 * Coprocessors implement this interface 
to observe and mediate client actions on the region.
-068 * 

-069 * Since most implementations will be interested in only a subset of hooks, this class uses -070 * 'default' functions to avoid having to add unnecessary overrides. When the functions are -071 * non-empty, it's simply to satisfy the compiler by returning value of expected (non-void) type. It -072 * is done in a way that these default definitions act as no-op. So our suggestion to implementation -073 * would be to not call these 'default' methods from overrides. -074 *

-075 *

Exception Handling


-076 * For all functions, exception handling is done as follows: -077 *
    -078 *
  • Exceptions of type {@link IOException} are reported back to client.
  • -079 *
  • For any other kind of exception: -080 *
      -081 *
    • If the configuration {@link CoprocessorHost#ABORT_ON_ERROR_KEY} is set to true, then the -082 * server aborts.
    • -083 *
    • Otherwise, coprocessor is removed from the server and -084 * {@link org.apache.hadoop.hbase.DoNotRetryIOException} is returned to the client.
    • -085 *
    -086 *
  • -087 *
-088 *

-089 *

For Split Related Hooks


-090 * In hbase2/AMv2, master runs splits, so the split related hooks are moved to -091 * {@link MasterObserver}. -092 *

-093 *

Increment Column Value


-094 * We do not call this hook anymore. -095 */ -096@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) -097@InterfaceStability.Evolving -098// TODO as method signatures need to break, update to -099// ObserverContext -100// so we can use additional environment state that isn't exposed to coprocessors. -101public interface RegionObserver { -102 /** Mutation type for postMutationBeforeWAL hook */ -103 enum MutationType { -104APPEND, INCREMENT -105 } -106 -107 /** -108 * Called before the reg

[17/51] [partial] hbase-site git commit: Published site at c448604ceb987d113913f0583452b2abce04db0d.

2018-12-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4f8b8424/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/CacheConfig.ExternalBlockCaches.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/CacheConfig.ExternalBlockCaches.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/CacheConfig.ExternalBlockCaches.html
deleted file mode 100644
index f32b223..000
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/CacheConfig.ExternalBlockCaches.html
+++ /dev/null
@@ -1,756 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd";>
-
-
-Source code
-
-
-
-
-001/**
-002 * Licensed to the Apache Software 
Foundation (ASF) under one
-003 * or more contributor license 
agreements.  See the NOTICE file
-004 * distributed with this work for 
additional information
-005 * regarding copyright ownership.  The 
ASF licenses this file
-006 * to you under the Apache License, 
Version 2.0 (the
-007 * "License"); you may not use this file 
except in compliance
-008 * with the License.  You may obtain a 
copy of the License at
-009 *
-010 * 
http://www.apache.org/licenses/LICENSE-2.0
-011 *
-012 * Unless required by applicable law or 
agreed to in writing, software
-013 * distributed under the License is 
distributed on an "AS IS" BASIS,
-014 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
-015 * See the License for the specific 
language governing permissions and
-016 * limitations under the License.
-017 */
-018package 
org.apache.hadoop.hbase.io.hfile;
-019
-020import static 
org.apache.hadoop.hbase.HConstants.BUCKET_CACHE_IOENGINE_KEY;
-021import static 
org.apache.hadoop.hbase.HConstants.BUCKET_CACHE_SIZE_KEY;
-022
-023import java.io.IOException;
-024
-025import 
org.apache.hadoop.conf.Configuration;
-026import 
org.apache.hadoop.hbase.HConstants;
-027import 
org.apache.yetus.audience.InterfaceAudience;
-028import org.slf4j.Logger;
-029import org.slf4j.LoggerFactory;
-030import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-031import 
org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory;
-032import 
org.apache.hadoop.hbase.io.hfile.bucket.BucketCache;
-033import 
org.apache.hadoop.hbase.io.util.MemorySizeUtil;
-034import 
org.apache.hadoop.hbase.util.ReflectionUtils;
-035import 
org.apache.hadoop.util.StringUtils;
-036
-037import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-038
-039
-040/**
-041 * Stores all of the cache objects and 
configuration for a single HFile.
-042 */
-043@InterfaceAudience.Private
-044public class CacheConfig {
-045  private static final Logger LOG = 
LoggerFactory.getLogger(CacheConfig.class.getName());
-046
-047
-048  /**
-049   * Disabled cache configuration
-050   */
-051  public static final CacheConfig 
DISABLED = new CacheConfig();
-052
-053  /**
-054   * Configuration key to cache data 
blocks on read. Bloom blocks and index blocks are always be
-055   * cached if the block cache is 
enabled.
-056   */
-057  public static final String 
CACHE_DATA_ON_READ_KEY = "hbase.block.data.cacheonread";
-058
-059  /**
-060   * Configuration key to cache data 
blocks on write. There are separate
-061   * switches for bloom blocks and 
non-root index blocks.
-062   */
-063  public static final String 
CACHE_BLOCKS_ON_WRITE_KEY =
-064  "hbase.rs.cacheblocksonwrite";
-065
-066  /**
-067   * Configuration key to cache leaf and 
intermediate-level index blocks on
-068   * write.
-069   */
-070  public static final String 
CACHE_INDEX_BLOCKS_ON_WRITE_KEY =
-071  "hfile.block.index.cacheonwrite";
-072
-073  /**
-074   * Configuration key to cache compound 
bloom filter blocks on write.
-075   */
-076  public static final String 
CACHE_BLOOM_BLOCKS_ON_WRITE_KEY =
-077  "hfile.block.bloom.cacheonwrite";
-078
-079  /**
-080   * Configuration key to cache data 
blocks in compressed and/or encrypted format.
-081   */
-082  public static final String 
CACHE_DATA_BLOCKS_COMPRESSED_KEY =
-083  
"hbase.block.data.cachecompressed";
-084
-085  /**
-086   * Configuration key to evict all 
blocks of a given file from the block cache
-087   * when the file is closed.
-088   */
-089  public static final String 
EVICT_BLOCKS_ON_CLOSE_KEY =
-090  "hbase.rs.evictblocksonclose";
-091
-092  /**
-093   * Configuration keys for Bucket 
cache
-094   */
-095
-096  /**
-097   * If the chosen ioengine can persist 
its state across restarts, the path to the file to persist
-098   * to. This file is NOT the data file. 
It is a file into which we will serialize the map of
-099   * what is in the data file. For 
example, if you pass the following argument as
-100   * BUCKET_CACHE_IOENGINE_KEY 
("hbase.bucketcache.ioengine"),
-101   * 
file:/tmp/bucketcache.data , then we will write the 
bucketcache data to the file
-102   * 
/tmp/bucketcache.data but the metadata on where the 
data is in the supplied file
-1

[17/51] [partial] hbase-site git commit: Published site at 8bf966c8e936dec4d83bcbe85c5aab543f14a0df.

2018-12-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html
index a957d31..62f81b6 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html
@@ -142,5192 +142,5186 @@
 134import org.apache.hadoop.hbase.wal.WAL;
 135import 
org.apache.hadoop.hbase.wal.WALFactory;
 136import 
org.apache.hadoop.hbase.wal.WALSplitter;
-137import 
org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-138import 
org.apache.hadoop.hbase.zookeeper.ZKUtil;
-139import 
org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-140import 
org.apache.hadoop.hbase.zookeeper.ZNodePaths;
-141import 
org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
-142import 
org.apache.hadoop.ipc.RemoteException;
-143import 
org.apache.hadoop.security.UserGroupInformation;
-144import 
org.apache.hadoop.util.ReflectionUtils;
-145import org.apache.hadoop.util.Tool;
-146import 
org.apache.hadoop.util.ToolRunner;
-147import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-148import 
org.apache.hbase.thirdparty.com.google.common.collect.Sets;
-149import 
org.apache.yetus.audience.InterfaceAudience;
-150import 
org.apache.yetus.audience.InterfaceStability;
-151import 
org.apache.zookeeper.KeeperException;
-152import org.slf4j.Logger;
-153import org.slf4j.LoggerFactory;
-154
-155import 
org.apache.hbase.thirdparty.com.google.common.base.Joiner;
-156import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-157import 
org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;
-158import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
-159import 
org.apache.hbase.thirdparty.com.google.common.collect.Multimap;
-160import 
org.apache.hbase.thirdparty.com.google.common.collect.Ordering;
-161import 
org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;
-162
-163import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-164import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;
-165
-166/**
-167 * HBaseFsck (hbck) is a tool for 
checking and repairing region consistency and
-168 * table integrity problems in a 
corrupted HBase. This tool was written for hbase-1.x. It does not
-169 * work with hbase-2.x; it can read state 
but is not allowed to change state; i.e. effect 'repair'.
-170 * See hbck2 (HBASE-19121) for a hbck 
tool for hbase2.
-171 *
-172 * 

-173 * Region consistency checks verify that hbase:meta, region deployment on region -174 * servers and the state of data in HDFS (.regioninfo files) all are in -175 * accordance. -176 *

-177 * Table integrity checks verify that all possible row keys resolve to exactly -178 * one region of a table. This means there are no individual degenerate -179 * or backwards regions; no holes between regions; and that there are no -180 * overlapping regions. -181 *

-182 * The general repair strategy works in two phases: -183 *

    -184 *
  1. Repair Table Integrity on HDFS. (merge or fabricate regions) -185 *
  2. Repair Region Consistency with hbase:meta and assignments -186 *
-187 *

-188 * For table integrity repairs, the tables' region directories are scanned -189 * for .regioninfo files. Each table's integrity is then verified. If there -190 * are any orphan regions (regions with no .regioninfo files) or holes, new -191 * regions are fabricated. Backwards regions are sidelined as well as empty -192 * degenerate (endkey==startkey) regions. If there are any overlapping regions, -193 * a new region is created and all data is merged into the new region. -194 *

-195 * Table integrity repairs deal solely with HDFS and could potentially be done -196 * offline -- the hbase region servers or master do not need to be running. -197 * This phase can eventually be used to completely reconstruct the hbase:meta table in -198 * an offline fashion. -199 *

-200 * Region consistency requires three conditions -- 1) valid .regioninfo file -201 * present in an HDFS region dir, 2) valid row with .regioninfo data in META, -202 * and 3) a region is deployed only at the regionserver that was assigned to -203 * with proper state in the master. -204 *

-205 * Region consistency repairs require hbase to be online so that hbck can -206 * contact the HBase master and region servers. The hbck#connect() method must -207 * first be called successfully. Much of the region consistency information -208 * is transient and less risky to repair. -209 *

-210 * If hbck is run from the command line, there are a handful of arguments t


[17/51] [partial] hbase-site git commit: Published site at 1acbd36c903b048141866b143507bfce124a5c5f.

2018-11-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5299e667/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
index 39c0719..0dcbecc 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
@@ -121,7 +121,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-class HRegion.RegionScannerImpl
+class HRegion.RegionScannerImpl
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 implements RegionScanner, Shipper, RpcCallback
 RegionScannerImpl is used to combine scanners from multiple 
Stores (aka column families).
@@ -425,7 +425,7 @@ implements 
 
 storeHeap
-KeyValueHeap storeHeap
+KeyValueHeap storeHeap
 
 
 
@@ -434,7 +434,7 @@ implements 
 
 joinedHeap
-KeyValueHeap joinedHeap
+KeyValueHeap joinedHeap
 Heap of key-values that are not essential for the provided 
filters and are thus read
  on demand, if on-demand column family loading is enabled.
 
@@ -445,7 +445,7 @@ implements 
 
 joinedContinuationRow
-protected Cell joinedContinuationRow
+protected Cell joinedContinuationRow
 If the joined heap data gathering is interrupted due to 
scan limits, this will
  contain the row for which we are populating the values.
 
@@ -456,7 +456,7 @@ implements 
 
 filterClosed
-private boolean filterClosed
+private boolean filterClosed
 
 
 
@@ -465,7 +465,7 @@ implements 
 
 stopRow
-protected final byte[] stopRow
+protected final byte[] stopRow
 
 
 
@@ -474,7 +474,7 @@ implements 
 
 includeStopRow
-protected final boolean includeStopRow
+protected final boolean includeStopRow
 
 
 
@@ -483,7 +483,7 @@ implements 
 
 region
-protected final HRegion region
+protected final HRegion region
 
 
 
@@ -492,7 +492,7 @@ implements 
 
 comparator
-protected final CellComparator comparator
+protected final CellComparator comparator
 
 
 
@@ -501,7 +501,7 @@ implements 
 
 readPt
-private final long readPt
+private final long readPt
 
 
 
@@ -510,7 +510,7 @@ implements 
 
 maxResultSize
-private final long maxResultSize
+private final long maxResultSize
 
 
 
@@ -519,7 +519,7 @@ implements 
 
 defaultScannerContext
-private final ScannerContext defaultScannerContext
+private final ScannerContext defaultScannerContext
 
 
 
@@ -528,7 +528,7 @@ implements 
 
 filter
-private final FilterWrapper filter
+private final FilterWrapper filter
 
 
 
@@ -545,7 +545,7 @@ implements 
 
 RegionScannerImpl
-RegionScannerImpl(Scan scan,
+RegionScannerImpl(Scan scan,
   https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List additionalScanners,
   HRegion region)
throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
@@ -561,7 +561,7 @@ implements 
 
 RegionScannerImpl
-RegionScannerImpl(Scan scan,
+RegionScannerImpl(Scan scan,
   https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List additionalScanners,
   HRegion region,
   long nonceGroup,
@@ -587,7 +587,7 @@ implements 
 
 getRegionInfo
-public RegionInfo getRegionInfo()
+public RegionInfo getRegionInfo()
 
 Specified by:
 getRegionInfo in
 interface RegionScanner
@@ -602,7 +602,7 @@ implements 
 
 initializeScanners
-protected void initializeScanners(Scan scan,
+protected void initializeScanners(Scan scan,
   https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List additionalScanners)
throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 
@@ -617,7 +617,7 @@ implements 
 
 initializeKVHeap
-protected void initializeKVHeap(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List scanners,
+protected void initializeKVHeap(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List scanners,
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List joinedScanners,
 HRegion region)
  throws 

[17/51] [partial] hbase-site git commit: Published site at 130057f13774f6b213cdb06952c805a29d59396e.

2018-11-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/68eae623/devapidocs/src-html/org/apache/hadoop/hbase/security/access/TableAuthManager.PermissionCache.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/security/access/TableAuthManager.PermissionCache.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/security/access/TableAuthManager.PermissionCache.html
deleted file mode 100644
index 2e68b22..000
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/security/access/TableAuthManager.PermissionCache.html
+++ /dev/null
@@ -1,859 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd";>
-
-
-Source code
-
-
-
-
-001/**
-002 * Licensed to the Apache Software 
Foundation (ASF) under one
-003 * or more contributor license 
agreements.  See the NOTICE file
-004 * distributed with this work for 
additional information
-005 * regarding copyright ownership.  The 
ASF licenses this file
-006 * to you under the Apache License, 
Version 2.0 (the
-007 * "License"); you may not use this file 
except in compliance
-008 * with the License.  You may obtain a 
copy of the License at
-009 *
-010 * 
http://www.apache.org/licenses/LICENSE-2.0
-011 *
-012 * Unless required by applicable law or 
agreed to in writing, software
-013 * distributed under the License is 
distributed on an "AS IS" BASIS,
-014 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
-015 * See the License for the specific 
language governing permissions and
-016 * limitations under the License.
-017 */
-018
-019package 
org.apache.hadoop.hbase.security.access;
-020
-021import static 
org.apache.hadoop.hbase.util.CollectionUtils.computeIfAbsent;
-022
-023import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-024import 
org.apache.hbase.thirdparty.com.google.common.collect.ArrayListMultimap;
-025import 
org.apache.hbase.thirdparty.com.google.common.collect.ListMultimap;
-026import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
-027
-028import java.io.Closeable;
-029import java.io.IOException;
-030import java.util.HashMap;
-031import java.util.List;
-032import java.util.Map;
-033import 
java.util.concurrent.ConcurrentSkipListMap;
-034import 
java.util.concurrent.atomic.AtomicLong;
-035
-036import 
org.apache.hadoop.conf.Configuration;
-037import 
org.apache.hadoop.hbase.AuthUtil;
-038import org.apache.hadoop.hbase.Cell;
-039import 
org.apache.hadoop.hbase.TableName;
-040import 
org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-041import 
org.apache.yetus.audience.InterfaceAudience;
-042import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-043import 
org.apache.hadoop.hbase.log.HBaseMarkers;
-044import 
org.apache.hadoop.hbase.security.Superusers;
-045import 
org.apache.hadoop.hbase.security.User;
-046import 
org.apache.hadoop.hbase.security.UserProvider;
-047import 
org.apache.hadoop.hbase.util.Bytes;
-048import 
org.apache.zookeeper.KeeperException;
-049import org.slf4j.Logger;
-050import org.slf4j.LoggerFactory;
-051
-052/**
-053 * Performs authorization checks for a 
given user's assigned permissions
-054 */
-055@InterfaceAudience.Private
-056public class TableAuthManager implements 
Closeable {
-057  private static class 
PermissionCache {
-058/** Cache of user permissions */
-059private ListMultimap 
userCache = ArrayListMultimap.create();
-060/** Cache of group permissions */
-061private ListMultimap 
groupCache = ArrayListMultimap.create();
-062
-063public List getUser(String 
user) {
-064  return userCache.get(user);
-065}
-066
-067public void putUser(String user, T 
perm) {
-068  userCache.put(user, perm);
-069}
-070
-071public List 
replaceUser(String user, Iterable perms) {
-072  return 
userCache.replaceValues(user, perms);
-073}
-074
-075public List getGroup(String 
group) {
-076  return groupCache.get(group);
-077}
-078
-079public void putGroup(String group, T 
perm) {
-080  groupCache.put(group, perm);
-081}
-082
-083public List 
replaceGroup(String group, Iterable perms) {
-084  return 
groupCache.replaceValues(group, perms);
-085}
-086
-087/**
-088 * Returns a combined map of user and 
group permissions, with group names
-089 * distinguished according to {@link 
AuthUtil#isGroupPrincipal(String)}.
-090 */
-091public ListMultimap 
getAllPermissions() {
-092  ListMultimap tmp = 
ArrayListMultimap.create();
-093  tmp.putAll(userCache);
-094  for (String group : 
groupCache.keySet()) {
-095
tmp.putAll(AuthUtil.toGroupEntry(group), groupCache.get(group));
-096  }
-097  return tmp;
-098}
-099  }
-100
-101  private static final Logger LOG = 
LoggerFactory.getLogger(TableAuthManager.class);
-102
-103  /** Cache of global permissions */
-104  private vo

[17/51] [partial] hbase-site git commit: Published site at d5e4faacc354c1bc4d93efa71ca97ee3a056123e.

2018-10-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b5e107c3/downloads.html
--
diff --git a/downloads.html b/downloads.html
index 966b3dc..5aaeba8 100644
--- a/downloads.html
+++ b/downloads.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Apache HBase Downloads
 
@@ -461,7 +461,7 @@ under the License. -->
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-10-27
+  Last Published: 
2018-10-29
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b5e107c3/export_control.html
--
diff --git a/export_control.html b/export_control.html
index d0947c2..f952f1d 100644
--- a/export_control.html
+++ b/export_control.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – 
   Export Control
@@ -341,7 +341,7 @@ for more details.
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-10-27
+  Last Published: 
2018-10-29
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b5e107c3/index.html
--
diff --git a/index.html b/index.html
index e0f934e..85192d6 100644
--- a/index.html
+++ b/index.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Apache HBase™ Home
 
@@ -421,7 +421,7 @@ Apache HBase is an open-source, distributed, versioned, 
non-relational database
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-10-27
+  Last Published: 
2018-10-29
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b5e107c3/integration.html
--
diff --git a/integration.html b/integration.html
index 4e0d0bd..3479bf8 100644
--- a/integration.html
+++ b/integration.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – CI Management
 
@@ -301,7 +301,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-10-27
+  Last Published: 
2018-10-29
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b5e107c3/issue-tracking.html
--
diff --git a/issue-tracking.html b/issue-tracking.html
index 2e0d74c..fd76f65 100644
--- a/issue-tracking.html
+++ b/issue-tracking.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Issue Management
 
@@ -298,7 +298,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-10-27
+  Last Published: 
2018-10-29
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b5e107c3/license.html
--
diff --git a/license.html b/license.html
index a3b8dd7..afe8dbb 100644
--- a/license.html
+++ b/license.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Project Licenses
 
@@ -501,7 +501,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-10-27
+  Last Published: 
2018-10-29
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b5e107c3/mail-lists.html
--
diff --git a/mail-lists.html b/mail-lists.html
index c204231..20b589b 100644
--- a/mail-lists.html
+++ b/mail-lists.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Project Mailing Lists
 
@@ -351,7 +351,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-10-27
+  Last Published: 
2018-10-29
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b5e107c3/metrics.html
--
diff --git a/metrics.html b/metrics.html
index 3af8d04..2d68844 100644
--- a/metrics.html
+++ b/m

[17/51] [partial] hbase-site git commit: Published site at 3fe8649b2c9ba1271c25e8f476548907e4c7a90d.

2018-10-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8f09a71d/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.ServerLocalityCostFunction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.ServerLocalityCostFunction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.ServerLocalityCostFunction.html
index c7d99b2..9d1542c 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.ServerLocalityCostFunction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.ServerLocalityCostFunction.html
@@ -382,1357 +382,1365 @@
 374for (int i = 0; i < 
this.curFunctionCosts.length; i++) {
 375  curFunctionCosts[i] = 
tempFunctionCosts[i];
 376}
-377LOG.info("start 
StochasticLoadBalancer.balancer, initCost=" + currentCost + ", functionCost="
-378+ functionCost());
+377double initCost = currentCost;
+378double newCost = currentCost;
 379
-380double initCost = currentCost;
-381double newCost = currentCost;
-382
-383long computedMaxSteps;
-384if (runMaxSteps) {
-385  computedMaxSteps = 
Math.max(this.maxSteps,
-386  ((long)cluster.numRegions * 
(long)this.stepsPerRegion * (long)cluster.numServers));
-387} else {
-388  computedMaxSteps = 
Math.min(this.maxSteps,
-389  ((long)cluster.numRegions * 
(long)this.stepsPerRegion * (long)cluster.numServers));
-390}
-391// Perform a stochastic walk to see 
if we can get a good fit.
-392long step;
-393
-394for (step = 0; step < 
computedMaxSteps; step++) {
-395  Cluster.Action action = 
nextAction(cluster);
-396
-397  if (action.type == Type.NULL) {
-398continue;
-399  }
-400
-401  cluster.doAction(action);
-402  updateCostsWithAction(cluster, 
action);
-403
-404  newCost = computeCost(cluster, 
currentCost);
-405
-406  // Should this be kept?
-407  if (newCost < currentCost) {
-408currentCost = newCost;
-409
-410// save for JMX
-411curOverallCost = currentCost;
-412for (int i = 0; i < 
this.curFunctionCosts.length; i++) {
-413  curFunctionCosts[i] = 
tempFunctionCosts[i];
-414}
-415  } else {
-416// Put things back the way they 
were before.
-417// TODO: undo by remembering old 
values
-418Action undoAction = 
action.undoAction();
-419cluster.doAction(undoAction);
-420updateCostsWithAction(cluster, 
undoAction);
-421  }
-422
-423  if 
(EnvironmentEdgeManager.currentTime() - startTime >
-424  maxRunningTime) {
-425break;
-426  }
-427}
-428long endTime = 
EnvironmentEdgeManager.currentTime();
-429
-430
metricsBalancer.balanceCluster(endTime - startTime);
-431
-432// update costs metrics
-433updateStochasticCosts(tableName, 
curOverallCost, curFunctionCosts);
-434if (initCost > currentCost) {
-435  plans = 
createRegionPlans(cluster);
-436  LOG.info("Finished computing new 
load balance plan. Computation took {}" +
-437" to try {} different iterations. 
 Found a solution that moves " +
-438"{} regions; Going from a 
computed cost of {}" +
-439" to a new cost of {}", 
java.time.Duration.ofMillis(endTime - startTime),
-440step, plans.size(), initCost, 
currentCost);
-441  return plans;
-442}
-443LOG.info("Could not find a better 
load balance plan.  Tried {} different configurations in " +
-444  "{}, and did not find anything with 
a computed cost less than {}", step,
-445  java.time.Duration.ofMillis(endTime 
- startTime), initCost);
-446return null;
-447  }
-448
-449  /**
-450   * update costs to JMX
-451   */
-452  private void 
updateStochasticCosts(TableName tableName, Double overall, Double[] subCosts) 
{
-453if (tableName == null) return;
-454
-455// check if the metricsBalancer is 
MetricsStochasticBalancer before casting
-456if (metricsBalancer instanceof 
MetricsStochasticBalancer) {
-457  MetricsStochasticBalancer balancer 
= (MetricsStochasticBalancer) metricsBalancer;
-458  // overall cost
-459  
balancer.updateStochasticCost(tableName.getNameAsString(),
-460"Overall", "Overall cost", 
overall);
-461
-462  // each cost function
-463  for (int i = 0; i < 
costFunctions.length; i++) {
-464CostFunction costFunction = 
costFunctions[i];
-465String costFunctionName = 
costFunction.getClass().getSimpleName();
-466Double costPercent = (overall == 
0) ? 0 : (subCosts[i] / overall);
-467// TODO: cost function may need a 
specific description
-468
balancer.updateStochasticCost(tableName.getNameAsString(), costFunctionName,
-469  "The percent of " + 
costFunctionName, costPercent);
-470  }

[17/51] [partial] hbase-site git commit: Published site at 7adf590106826b9e4432cfeee06acdc0ccff8c6e.

2018-10-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/425db230/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.PortAllocator.AvailablePortChecker.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.PortAllocator.AvailablePortChecker.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.PortAllocator.AvailablePortChecker.html
index 0c894de..8729895 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.PortAllocator.AvailablePortChecker.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.PortAllocator.AvailablePortChecker.html
@@ -179,4145 +179,4146 @@
 171 * avoiding port contention if another 
local HBase instance is already running).
 172 * 

To preserve test data directories, pass the system property "hbase.testing.preserve.testdir" 173 * setting it to true. -174 */ -175@InterfaceAudience.Public -176@SuppressWarnings("deprecation") -177public class HBaseTestingUtility extends HBaseZKTestingUtility { -178 -179 /** -180 * System property key to get test directory value. Name is as it is because mini dfs has -181 * hard-codings to put test data here. It should NOT be used directly in HBase, as it's a property -182 * used in mini dfs. -183 * @deprecated can be used only with mini dfs -184 */ -185 @Deprecated -186 private static final String TEST_DIRECTORY_KEY = "test.build.data"; -187 -188 public static final String REGIONS_PER_SERVER_KEY = "hbase.test.regions-per-server"; -189 /** -190 * The default number of regions per regionserver when creating a pre-split -191 * table. -192 */ -193 public static final int DEFAULT_REGIONS_PER_SERVER = 3; -194 +174 * Trigger pre commit. +175 */ +176@InterfaceAudience.Public +177@SuppressWarnings("deprecation") +178public class HBaseTestingUtility extends HBaseZKTestingUtility { +179 +180 /** +181 * System property key to get test directory value. Name is as it is because mini dfs has +182 * hard-codings to put test data here. It should NOT be used directly in HBase, as it's a property +183 * used in mini dfs. +184 * @deprecated can be used only with mini dfs +185 */ +186 @Deprecated +187 private static final String TEST_DIRECTORY_KEY = "test.build.data"; +188 +189 public static final String REGIONS_PER_SERVER_KEY = "hbase.test.regions-per-server"; +190 /** +191 * The default number of regions per regionserver when creating a pre-split +192 * table. +193 */ +194 public static final int DEFAULT_REGIONS_PER_SERVER = 3; 195 -196 public static final String PRESPLIT_TEST_TABLE_KEY = "hbase.test.pre-split-table"; -197 public static final boolean PRESPLIT_TEST_TABLE = true; -198 -199 private MiniDFSCluster dfsCluster = null; -200 -201 private volatile HBaseCluster hbaseCluster = null; -202 private MiniMRCluster mrCluster = null; -203 -204 /** If there is a mini cluster running for this testing utility instance. */ -205 private volatile boolean miniClusterRunning; -206 -207 private String hadoopLogDir; -208 -209 /** Directory on test filesystem where we put the data for this instance of -210* HBaseTestingUtility*/ -211 private Path dataTestDirOnTestFS = null; -212 -213 /** -214 * Shared cluster connection. -215 */ -216 private volatile Connection connection; -217 -218 /** Filesystem URI used for map-reduce mini-cluster setup */ -219 private static String FS_URI; -220 -221 /** This is for unit tests parameterized with a single boolean. */ -222 public static final List MEMSTORETS_TAGS_PARAMETRIZED = memStoreTSAndTagsCombination(); -223 -224 /** -225 * Checks to see if a specific port is available. -226 * -227 * @param port the port number to check for availability -228 * @return true if the port is available, or false if not -229 */ -230 public static boolean available(int port) { -231ServerSocket ss = null; -232DatagramSocket ds = null; -233try { -234 ss = new ServerSocket(port); -235 ss.setReuseAddress(true); -236 ds = new DatagramSocket(port); -237 ds.setReuseAddress(true); -238 return true; -239} catch (IOException e) { -240 // Do nothing -241} finally { -242 if (ds != null) { -243ds.close(); -244 } -245 -246 if (ss != null) { -247try { -248 ss.close(); -249} catch (IOException e) { -250 /* should not be thrown */ -251} -252 } -253} -254 -255return false; -256 } -257 -258 /** -259 * Create all combinations of Bloom filters and compression algorithms for -260 * testing. -261 */ -262 private static List bloomAndCompressionCombinations() { -263List configurations = new ArrayList<>(); -264for (Compression.Algorithm comprAlgo : -265 HBaseCommonTestingUtility.COMPR


[17/51] [partial] hbase-site git commit: Published site at 5fbb227deb365fe812d433fe39b85ac4b0ddee20.

2018-10-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9ebe686/devapidocs/org/apache/hadoop/hbase/master/replication/SyncReplicationReplayWALProcedure.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/replication/SyncReplicationReplayWALProcedure.html
 
b/devapidocs/org/apache/hadoop/hbase/master/replication/SyncReplicationReplayWALProcedure.html
index a145e1f..169aa47 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/replication/SyncReplicationReplayWALProcedure.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/replication/SyncReplicationReplayWALProcedure.html
@@ -317,7 +317,7 @@ extends Procedure
-addStackIndex,
 afterReplay,
 beforeReplay,
 compareTo,
 completionCleanup,
 doExecute,
 doRollback,
 elapsedTime,
 getChildrenLatch,
 getException,
 getLastUpdate,
 getNonceKey,
 getOwner,
 getParentProcId,
 getProcedureMetrics,
 getProcId,
 getProcIdHashCode, getProcName,
 getResult,
 getRootProcedureId,
 getRootProcId,
 getStackIndexes,
 getState,
 getSubmittedTime,
 getTimeout,
 getTimeoutTimestamp,
 hasChildren,
 hasException,
 hasLock,
 hasOwner,
 hasParent,
 hasTimeout,
 haveSameParent,
 incChildrenLatch,
 isBypass,
 isFailed,
 isFinished,
 isInitializing,
 isRunnable,
 isSuccess,
 isWaiting,
 removeStackIndex,
 setAbortFailure,
 setChildrenLatch,
 setFailure,
 setFailure,
 setLastUpdate,
 setNonceKey,
 setOwner,
 setOwner, setParentProcId,
 setProcId,
 setResult,
 setRootProcId,
 setStackIndexes,
 setState,
 setSubmittedTime,
 setTimeout, shouldWaitClientAck,
 skipPersistence,
 toString,
 toStringClass,
 toStringClassDetails,
 toStringDetails,
 toStringSimpleSB,
 updateMetricsOnFinish, updateMetricsOnSubmit,
 updateTimestamp,
 wasExecuted
+addStackIndex,
 afterReplay,
 beforeReplay,
 bypass,
 compareTo,
 completionCleanup,
 doExecute,
 doRollback,
 elapsedTime,
 getChildrenLatch,
 getException,
 getLastUpdate,
 getNonceKey,
 getOwner,
 getParentProcId,
 getProcedureMetrics,
 getProcId, getProcIdHashCode,
 getProcName,
 getResult,
 getRootProcedureId,
 getRootProcId,
 getStackIndexes,
 getState,
 getSubmittedTime,
 <
 a 
href="../../../../../../org/apache/hadoop/hbase/procedure2/Procedure.html#getTimeout--">getTimeout,
 getTimeoutTimestamp,
 hasChildren,
 hasException,
 hasLock,
 hasOwner,
 hasParent,
 hasTimeout,
 haveSameParent, incChildrenLatch,
 isBypass,
 isFailed,
 isFinished,
 isInitializing,
 isRunnable,
 isSuccess,
 isWaiting,
 removeStackIndex, setAbortFailure,
 setChildrenLatch,
 setFailure,
 setFailure,
 setLastUpdate,
 setNonceKey,
 setOwner, setOwner,
 setParentProcId,
 setProcId,
 setResult,
 setRootProcId,
 setStackIndexes,
 setState,
 setSubmittedTime, setTimeout,
 shouldWaitClientAck,
 skipPersistence,
 toString,
 toStringClass,
 toStringClassDetails,
 toStringDetails,
 toStringSimpleSB, updateMetricsOnFinish,
 updateMetricsOnSubmit,
 updateTimestamp,
 wasExecuted
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9ebe686/devapidocs/org/apache/hadoop/hbase/master/replication/SyncReplicationReplayWALRemoteProcedure.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/replication/SyncReplicationReplayWALRemoteProcedure.html
 
b/devapidocs/org/apache/hadoop/hbase/master/replication/SyncReplicationReplayWALRemoteProcedure.html
index 7d47bb7..f33b4da 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/replication/SyncReplicationReplayWALRemoteProcedure.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/replication/SyncReplicationReplayWALRemoteProcedure.html
@@ -322,7 +322,7 @@ implements Procedure
-acquireLock,
 addStackIndex,
 afterReplay,
 beforeReplay,
 compareTo,
 completionCleanup,
 doExecute,
 doRollba
 ck, elapsedTime,
 getChildrenLatch,
 getException,
 getLastUpdate,
 getNonceKey,
 getOwner,
 getParentProcId,
 getProcedureMetrics,
 getProcId, getProcIdHashCode,
 getProcName,
 getResult,
 getRootProcedureId,
 getRootProcId,
 getStackIndexes,
 getState,
 getSubmittedT
 ime, getTimeout,
 getTimeoutTimestamp,
 hasChildren,
 hasException,
 hasLock,
 hasOwner,
 hasParent,
 hasTimeout,
 haveSameParent,
 holdLock,
 incChildrenLatch,
 isBypass,
 isFailed,
 isFinished,
 isInitializing,
 isRunnable,
 isSuccess,
 isWaiting, isYieldAfterExecutionStep,
 releaseLock,
 removeStackIndex,
 setAbortFailure,
 setChildrenLatch,
 setFailure,
 setFailure, setLastUpdate,
 setNonceKey,
 setOwner,
 setOwner,
 setParentProcId,
 setProcId,
 setResult,
 setRootProcId, setStackIndexes,
 setState,
 setSubmittedTime,
 setTimeout,
 setTimeoutFailure,
 shouldWaitClientAck,
 skipPersistence,
 toString, toStringClass,
 toStringClassDetails,
 toStringDetails,
 toStringSimpleSB,
 toStringState,
 updateMetricsOnFinish,
 updateMetricsOnSubmit,
 updateTimestamp,
 waitInitialized,
 wasExecuted
+acquireLock,
 addStackIndex,
 afterReplay,
 beforeReplay,
 bypass,
 compareTo,
 completionCleanup,
 doExecute,
 
 doRollback,
 elapsedTime,
 ge

[17/51] [partial] hbase-site git commit: Published site at 821e4d7de2d576189f4288d1c2acf9e9a9471f5c.

2018-10-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/323b17d9/testdevapidocs/org/apache/hadoop/hbase/client/package-summary.html
--
diff --git a/testdevapidocs/org/apache/hadoop/hbase/client/package-summary.html 
b/testdevapidocs/org/apache/hadoop/hbase/client/package-summary.html
index d81409c..843112f 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/client/package-summary.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/client/package-summary.html
@@ -205,851 +205,899 @@
 
 
 
-SimpleScanResultConsumer
+RestoreSnapshotFromClientAfterSplittingRegionsTestBase
  
 
 
+RestoreSnapshotFromClientAfterTruncateTestBase
+ 
+
+
+RestoreSnapshotFromClientCloneTestBase
+ 
+
+
+RestoreSnapshotFromClientGetCompactionStateTestBase
+ 
+
+
+RestoreSnapshotFromClientSchemaChangeTestBase
+ 
+
+
+RestoreSnapshotFromClientSimpleTestBase
+ 
+
+
+RestoreSnapshotFromClientTestBase
+
+Base class for testing restore snapshot
+
+
+
+SimpleScanResultConsumer
+ 
+
+
 TestAdmin1
 
 Class to test HBaseAdmin.
 
 
-
+
 TestAdmin2
 
 Class to test HBaseAdmin.
 
 
-
+
 TestAdminShell
  
 
-
+
 TestAllowPartialScanResultCache
  
 
-
+
 TestAlwaysSetScannerId
 
 Testcase to make sure that we always set scanner id in 
ScanResponse.
 
 
-
+
 TestAppendFromClientSide
 
 Run Append tests that use the HBase clients;
 
 
-
+
 TestAsyncAdminBase
 
 Class to test AsyncAdmin.
 
 
-
+
 TestAsyncAdminBuilder
  
 
-
+
 TestAsyncAdminBuilder.TestMaxRetriesCoprocessor
  
 
-
+
 TestAsyncAdminBuilder.TestOperationTimeoutCoprocessor
  
 
-
+
 TestAsyncAdminBuilder.TestRpcTimeoutCoprocessor
  
 
-
+
 TestAsyncAggregationClient
  
 
-
+
 TestAsyncBufferMutator
  
 
-
+
 TestAsyncClusterAdminApi
  
 
-
+
 TestAsyncClusterAdminApi2
 
 Only used to test stopMaster/stopRegionServer/shutdown 
methods.
 
 
-
+
 TestAsyncDecommissionAdminApi
  
 
-
+
 TestAsyncMetaRegionLocator
  
 
-
+
 TestAsyncNamespaceAdminApi
 
 Class to test asynchronous namespace admin operations.
 
 
-
+
 TestAsyncNonMetaRegionLocator
  
 
-
+
 TestAsyncNonMetaRegionLocatorConcurrenyLimit
  
 
-
+
 TestAsyncNonMetaRegionLocatorConcurrenyLimit.CountingRegionObserver
  
 
-
+
 TestAsyncProcedureAdminApi
 
 Class to test asynchronous procedure admin operations.
 
 
-
+
 TestAsyncProcess
  
 
-
+
 TestAsyncProcess.AsyncProcessForThrowableCheck
  
 
-
+
 TestAsyncProcess.AsyncProcessWithFailure
  
 
-
+
 TestAsyncProcess.CallerWithFailure
  
 
-
+
 TestAsyncProcess.CountingThreadFactory
  
 
-
+
 TestAsyncProcess.MyAsyncProcess
  
 
-
+
 TestAsyncProcess.MyAsyncProcessWithReplicas
  
 
-
+
 TestAsyncProcess.MyAsyncRequestFutureImpl
  
 
-
+
 TestAsyncProcess.MyClientBackoffPolicy
 
 Make the backoff time always different on each call.
 
 
-
+
 TestAsyncProcess.MyConnectionImpl
 
 Returns our async process.
 
 
-
+
 TestAsyncProcess.MyConnectionImpl.TestRegistry
  
 
-
+
 TestAsyncProcess.MyConnectionImpl2
 
 Returns our async process.
 
 
-
+
 TestAsyncProcess.MyThreadPoolExecutor
  
 
-
+
 TestAsyncProcessWithRegionException
 
 The purpose of this test is to make sure the region 
exception won't corrupt the results
  of batch.
 
 
-
+
 TestAsyncProcessWithRegionException.MyAsyncProcess
  
 
-
+
 TestAsyncQuotaAdminApi
  
 
-
+
 TestAsyncRegionAdminApi
 
 Class to test asynchronous region admin operations.
 
 
-
+
 TestAsyncRegionAdminApi2
 
 Class to test asynchronous region admin operations.
 
 
-
+
 TestAsyncRegionLocatorTimeout
  
 
-
+
 TestAsyncRegionLocatorTimeout.SleepRegionObserver
  
 
-
+
 TestAsyncReplicationAdminApi
 
 Class to test asynchronous replication admin 
operations.
 
 
-
+
 TestAsyncReplicationAdminApiWithClusters
 
 Class to test asynchronous replication admin operations 
when more than 1 cluster
 
 
-
+
 TestAsyncResultScannerCursor
  
 
-
+
 TestAsyncSingleRequestRpcRetryingCaller
  
 
-
+
 TestAsyncSnapshotAdminApi
  
 
-
+
 TestAsyncTable
  
 
-
+
 TestAsyncTableAdminApi
 
 Class to test asynchronous table admin operations.
 
 
-
+
 TestAsyncTableAdminApi2
 
 Class to test asynchronous table admin operations
 
 
-
+
 TestAsyncTableAdminApi3
 
 Class to test asynchronous table admin operations.
 
 
-
+
 TestAsyncTableBatch
  
 
-
+
 TestAsyncTableBatch.ErrorInjectObserver
  
 
-
+
 TestAsyncTableGetMultiThreaded
 
 Will split the table, and move region randomly when 
testing.
 
 
-
+
 TestAsyncTableGetMultiThreadedWithBasicCompaction
  
 
-
+
 TestAsyncTableGetMultiThreadedWithEagerCompaction
  
 
-
+
 TestAsyncTableLocatePrefetch
  
 
-
+
 TestAsyncTableNoncedRetry
  
 
-
+
 TestAsyncTableScan
  
 
-
+
 TestAsyncTableScanAll
  
 
-
+
 TestAsyncTableScanMetrics
  
 
-
+
 TestAsyncTableScanner
  
 
-
+
 TestAsyncTableScannerCloseWhileSuspending
  
 
-
+
 TestAsyncTableScanRenewLease
  
 
-
+
 TestAsyncTableScanRenewLease.RenewLeaseConsumer
  
 
-
+
 TestAsyncToolAdminApi
 
 Test the admin operations for Balancer, Normalizer, 
CleanerChore, and CatalogJanitor.
 
 
-
+
 TestAttributes
  
 
-
+

[17/51] [partial] hbase-site git commit: Published site at fa5fa6ecdd071b72b58971058ff3ab9d28c3e709.

2018-10-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d1341859/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.LeaseRecovery.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.LeaseRecovery.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.LeaseRecovery.html
index e1b183b..b456cd2 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.LeaseRecovery.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.LeaseRecovery.html
@@ -53,1338 +53,1354 @@
 045import 
org.apache.hadoop.hbase.log.HBaseMarkers;
 046import 
org.apache.hadoop.hbase.procedure2.Procedure;
 047import 
org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
-048import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStoreBase;
-049import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStoreTracker;
-050import 
org.apache.hadoop.hbase.procedure2.util.ByteSlot;
-051import 
org.apache.hadoop.hbase.procedure2.util.StringUtils;
-052import 
org.apache.hadoop.hbase.util.CommonFSUtils;
-053import 
org.apache.hadoop.hbase.util.Threads;
-054import 
org.apache.hadoop.ipc.RemoteException;
-055import 
org.apache.yetus.audience.InterfaceAudience;
-056import org.slf4j.Logger;
-057import org.slf4j.LoggerFactory;
-058
-059import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-060import 
org.apache.hbase.thirdparty.org.apache.commons.collections4.queue.CircularFifoQueue;
-061
-062import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureWALHeader;
-063
-064/**
-065 * WAL implementation of the 
ProcedureStore.
-066 * 

-067 * When starting, the upper layer will first call {@link #start(int)}, then {@link #recoverLease()}, -068 * then {@link #load(ProcedureLoader)}. -069 *

-070 * In {@link #recoverLease()}, we will get the lease by closing all the existing wal files(by -071 * calling recoverFileLease), and creating a new wal writer. And we will also get the list of all -072 * the old wal files. -073 *

-074 * FIXME: notice that the current recover lease implementation is problematic, it can not deal with -075 * the races if there are two master both wants to acquire the lease... -076 *

-077 * In {@link #load(ProcedureLoader)} method, we will load all the active procedures. See the -078 * comments of this method for more details. -079 *

-080 * The actual logging way is a bit like our FileSystem based WAL implementation as RS side. There is -081 * a {@link #slots}, which is more like the ring buffer, and in the insert, update and delete -082 * methods we will put thing into the {@link #slots} and wait. And there is a background sync -083 * thread(see the {@link #syncLoop()} method) which get data from the {@link #slots} and write them -084 * to the FileSystem, and notify the caller that we have finished. -085 *

-086 * TODO: try using disruptor to increase performance and simplify the logic? -087 *

-088 * The {@link #storeTracker} keeps track of the modified procedures in the newest wal file, which is -089 * also the one being written currently. And the deleted bits in it are for all the procedures, not -090 * only the ones in the newest wal file. And when rolling a log, we will first store it in the -091 * trailer of the current wal file, and then reset its modified bits, so that it can start to track -092 * the modified procedures for the new wal file. -093 *

-094 * The {@link #holdingCleanupTracker} is used to test whether we are safe to delete the oldest wal -095 * file. When there are log rolling and there are more than 1 wal files, we will make use of it. It -096 * will first be initialized to the oldest file's tracker(which is stored in the trailer), using the -097 * method {@link ProcedureStoreTracker#resetTo(ProcedureStoreTracker, boolean)}, and then merge it -098 * with the tracker of every newer wal files, using the -099 * {@link ProcedureStoreTracker#setDeletedIfModifiedInBoth(ProcedureStoreTracker)}. If we find out -100 * that all the modified procedures for the oldest wal file are modified or deleted in newer wal -101 * files, then we can delete it. -102 * @see ProcedureWALPrettyPrinter for printing content of a single WAL. -103 * @see #main(String[]) to parse a directory of MasterWALProcs. -104 */ -105@InterfaceAudience.Private -106public class WALProcedureStore extends ProcedureStoreBase { -107 private static final Logger LOG = LoggerFactory.getLogger(WALProcedureStore.class); -108 public static final String LOG_PREFIX = "pv2-"; -109 /** Used to construct the name of the log directory for master procedures */ -110 public static final String MASTER_PROCEDURE_LOGDIR = "MasterProcWALs"; -111 -112 -113 public interface LeaseRecovery { -114v


[17/51] [partial] hbase-site git commit: Published site at 6bc7089f9e0793efc9bdd46a84f5ccd9bc4579ad.

2018-09-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/419d0338/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureUtil.CompatStateSerializer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureUtil.CompatStateSerializer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureUtil.CompatStateSerializer.html
index 0d97a1c..297bc43 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureUtil.CompatStateSerializer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureUtil.CompatStateSerializer.html
@@ -29,331 +29,335 @@
 021import java.io.InputStream;
 022import java.lang.reflect.Constructor;
 023import java.lang.reflect.Modifier;
-024import 
org.apache.hadoop.hbase.HConstants;
-025import 
org.apache.yetus.audience.InterfaceAudience;
-026import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-027import 
org.apache.hbase.thirdparty.com.google.protobuf.Any;
-028import 
org.apache.hbase.thirdparty.com.google.protobuf.Internal;
-029import 
org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException;
-030import 
org.apache.hbase.thirdparty.com.google.protobuf.Message;
-031import 
org.apache.hbase.thirdparty.com.google.protobuf.Parser;
-032import 
org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations;
-033import 
org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos;
-034import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos;
-035import 
org.apache.hadoop.hbase.util.NonceKey;
-036
-037/**
-038 * Helper to convert to/from 
ProcedureProtos
-039 */
-040@InterfaceAudience.Private
-041public final class ProcedureUtil {
-042  private ProcedureUtil() { }
-043
-044  // 
==
-045  //  Reflection helpers to 
create/validate a Procedure object
-046  // 
==
-047  private static Procedure 
newProcedure(String className) throws BadProcedureException {
-048try {
-049  Class clazz = 
Class.forName(className);
-050  if 
(!Modifier.isPublic(clazz.getModifiers())) {
-051throw new Exception("the " + 
clazz + " class is not public");
-052  }
-053
-054  @SuppressWarnings("rawtypes")
-055  Constructor ctor = clazz.asSubclass(Procedure.class).getConstructor();
-056  assert ctor != null : "no 
constructor found";
-057  if 
(!Modifier.isPublic(ctor.getModifiers())) {
-058throw new Exception("the " + 
clazz + " constructor is not public");
-059  }
-060  return ctor.newInstance();
-061} catch (Exception e) {
-062  throw new BadProcedureException(
-063"The procedure class " + 
className + " must be accessible and have an empty constructor",
-064e);
-065}
-066  }
-067
-068  static void 
validateClass(Procedure proc) throws BadProcedureException {
-069try {
-070  Class clazz = 
proc.getClass();
-071  if 
(!Modifier.isPublic(clazz.getModifiers())) {
-072throw new Exception("the " + 
clazz + " class is not public");
-073  }
-074
-075  Constructor ctor = 
clazz.getConstructor();
-076  assert ctor != null;
-077  if 
(!Modifier.isPublic(ctor.getModifiers())) {
-078throw new Exception("the " + 
clazz + " constructor is not public");
-079  }
-080} catch (Exception e) {
-081  throw new 
BadProcedureException("The procedure class " + proc.getClass().getName() +
-082" must be accessible and have an 
empty constructor", e);
-083}
-084  }
-085
-086  // 
==
-087  //  convert to and from Procedure 
object
-088  // 
==
-089
-090  /**
-091   * A serializer for our Procedures. 
Instead of the previous serializer, it
-092   * uses the stateMessage list to store 
the internal state of the Procedures.
-093   */
-094  private static class StateSerializer 
implements ProcedureStateSerializer {
-095private final 
ProcedureProtos.Procedure.Builder builder;
-096private int deserializeIndex;
-097
-098public 
StateSerializer(ProcedureProtos.Procedure.Builder builder) {
-099  this.builder = builder;
-100}
-101
-102@Override
-103public void serialize(Message 
message) throws IOException {
-104  Any packedMessage = 
Any.pack(message);
-105  
builder.addStateMessage(packedMessage);
-106}
-107
-108@Override
-109public  M 
deserialize(Class clazz)
-110throws IOException {
-111  if (deserializeIndex >= 
builder.getStateMessageCount()) {
-112throw new IOException("Invalid 
state message index: " + deserializeIndex);
-113  }
-114
-115  try {
-116Any pac

[17/51] [partial] hbase-site git commit: Published site at d7e08317d2f214e4cca7b67578aba0ed7a567d54.

2018-09-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/37cf49a6/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.TableDescriptorGetter.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.TableDescriptorGetter.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.TableDescriptorGetter.html
index 976894f..721035e 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.TableDescriptorGetter.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.TableDescriptorGetter.html
@@ -3020,926 +3020,927 @@
 3012}
 3013  }
 3014
-3015  void checkServiceStarted() throws 
ServerNotRunningYetException {
-3016if (!serviceStarted) {
-3017  throw new 
ServerNotRunningYetException("Server is not running yet");
-3018}
-3019  }
-3020
-3021  public static class 
MasterStoppedException extends DoNotRetryIOException {
-3022MasterStoppedException() {
-3023  super();
-3024}
-3025  }
-3026
-3027  void checkInitialized() throws 
PleaseHoldException, ServerNotRunningYetException,
-3028  MasterNotRunningException, 
MasterStoppedException {
-3029checkServiceStarted();
-3030if (!isInitialized()) {
-3031  throw new 
PleaseHoldException("Master is initializing");
-3032}
-3033if (isStopped()) {
-3034  throw new 
MasterStoppedException();
-3035}
-3036  }
-3037
-3038  /**
-3039   * Report whether this master is 
currently the active master or not.
-3040   * If not active master, we are parked 
on ZK waiting to become active.
-3041   *
-3042   * This method is used for testing.
-3043   *
-3044   * @return true if active master, 
false if not.
-3045   */
-3046  @Override
-3047  public boolean isActiveMaster() {
-3048return activeMaster;
-3049  }
-3050
-3051  /**
-3052   * Report whether this master has 
completed with its initialization and is
-3053   * ready.  If ready, the master is 
also the active master.  A standby master
-3054   * is never ready.
-3055   *
-3056   * This method is used for testing.
-3057   *
-3058   * @return true if master is ready to 
go, false if not.
-3059   */
-3060  @Override
-3061  public boolean isInitialized() {
-3062return initialized.isReady();
-3063  }
-3064
-3065  /**
-3066   * Report whether this master is in 
maintenance mode.
-3067   *
-3068   * @return true if master is in 
maintenanceMode
-3069   */
-3070  @Override
-3071  public boolean isInMaintenanceMode() 
throws IOException {
-3072if (!isInitialized()) {
-3073  throw new 
PleaseHoldException("Master is initializing");
-3074}
-3075return 
maintenanceModeTracker.isInMaintenanceMode();
-3076  }
-3077
-3078  @VisibleForTesting
-3079  public void setInitialized(boolean 
isInitialized) {
-3080
procedureExecutor.getEnvironment().setEventReady(initialized, isInitialized);
-3081  }
-3082
-3083  @Override
-3084  public ProcedureEvent 
getInitializedEvent() {
-3085return initialized;
-3086  }
-3087
-3088  /**
-3089   * Compute the average load across all 
region servers.
-3090   * Currently, this uses a very naive 
computation - just uses the number of
-3091   * regions being served, ignoring 
stats about number of requests.
-3092   * @return the average load
-3093   */
-3094  public double getAverageLoad() {
-3095if (this.assignmentManager == null) 
{
-3096  return 0;
-3097}
-3098
-3099RegionStates regionStates = 
this.assignmentManager.getRegionStates();
-3100if (regionStates == null) {
-3101  return 0;
-3102}
-3103return 
regionStates.getAverageLoad();
-3104  }
-3105
-3106  /*
-3107   * @return the count of region split 
plans executed
-3108   */
-3109  public long getSplitPlanCount() {
-3110return splitPlanCount;
-3111  }
-3112
-3113  /*
-3114   * @return the count of region merge 
plans executed
-3115   */
-3116  public long getMergePlanCount() {
-3117return mergePlanCount;
-3118  }
-3119
-3120  @Override
-3121  public boolean registerService(Service 
instance) {
-3122/*
-3123 * No stacking of instances is 
allowed for a single service name
-3124 */
-3125Descriptors.ServiceDescriptor 
serviceDesc = instance.getDescriptorForType();
-3126String serviceName = 
CoprocessorRpcUtils.getServiceName(serviceDesc);
-3127if 
(coprocessorServiceHandlers.containsKey(serviceName)) {
-3128  LOG.error("Coprocessor service 
"+serviceName+
-3129  " already registered, 
rejecting request from "+instance
-3130  );
-3131  return false;
-3132}
-3133
-3134
coprocessorServiceHandlers.put(serviceName, instance);
-3135if (LOG.isDebugEnabled()) {
-3136  LOG.debug("Registered master 
coprocessor service: service="+serviceName);
-3137}
-3138return true;
-3139  }
-3140
-3141  /**
-3142   * Utility for constructing an 
instance of the passed HMaster class.
-3143   * @param masterClass
-3144   * @return HMaster instance.
-3145   */
-3146 

[17/51] [partial] hbase-site git commit: Published site at 8eaaa63114a64bcaeaf0ed9bdd88615ee22255c1.

2018-09-25 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f6f9d4f3/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html
index 8cc5add..34858d6 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html
@@ -2188,1428 +2188,1428 @@
 2180  }
 2181
 2182  @Override
-2183  public void postOpenDeployTasks(final 
PostOpenDeployContext context)
-2184  throws KeeperException, 
IOException {
-2185HRegion r = context.getRegion();
-2186long masterSystemTime = 
context.getMasterSystemTime();
-2187rpcServices.checkOpen();
-2188LOG.info("Post open deploy tasks for 
" + r.getRegionInfo().getRegionNameAsString());
-2189// Do checks to see if we need to 
compact (references or too many files)
-2190for (HStore s : r.stores.values()) 
{
-2191  if (s.hasReferences() || 
s.needsCompaction()) {
-2192
this.compactSplitThread.requestSystemCompaction(r, s, "Opening Region");
-2193  }
-2194}
-2195long openSeqNum = 
r.getOpenSeqNum();
-2196if (openSeqNum == 
HConstants.NO_SEQNUM) {
-2197  // If we opened a region, we 
should have read some sequence number from it.
-2198  LOG.error("No sequence number 
found when opening " +
-2199
r.getRegionInfo().getRegionNameAsString());
-2200  openSeqNum = 0;
-2201}
-2202
-2203// Notify master
-2204if (!reportRegionStateTransition(new 
RegionStateTransitionContext(
-2205TransitionCode.OPENED, 
openSeqNum, masterSystemTime, r.getRegionInfo( {
-2206  throw new IOException("Failed to 
report opened region to master: "
-2207+ 
r.getRegionInfo().getRegionNameAsString());
-2208}
-2209
-2210triggerFlushInPrimaryRegion(r);
-2211
-2212LOG.debug("Finished post open deploy 
task for " + r.getRegionInfo().getRegionNameAsString());
-2213  }
-2214
-2215  @Override
-2216  public boolean 
reportRegionStateTransition(final RegionStateTransitionContext context) {
-2217TransitionCode code = 
context.getCode();
-2218long openSeqNum = 
context.getOpenSeqNum();
-2219long masterSystemTime = 
context.getMasterSystemTime();
-2220RegionInfo[] hris = 
context.getHris();
-2221
-if (TEST_SKIP_REPORTING_TRANSITION) 
{
-2223  // This is for testing only in 
case there is no master
-2224  // to handle the region transition 
report at all.
-2225  if (code == TransitionCode.OPENED) 
{
-2226Preconditions.checkArgument(hris 
!= null && hris.length == 1);
-2227if (hris[0].isMetaRegion()) {
-2228  try {
-2229
MetaTableLocator.setMetaLocation(getZooKeeper(), serverName,
-2230
hris[0].getReplicaId(),State.OPEN);
-2231  } catch (KeeperException e) 
{
-2232LOG.info("Failed to update 
meta location", e);
-2233return false;
-2234  }
-2235} else {
-2236  try {
-2237
MetaTableAccessor.updateRegionLocation(clusterConnection,
-2238  hris[0], serverName, 
openSeqNum, masterSystemTime);
-2239  } catch (IOException e) {
-2240LOG.info("Failed to update 
meta", e);
-2241return false;
-2242  }
-2243}
-2244  }
-2245  return true;
-2246}
-2247
-2248
ReportRegionStateTransitionRequest.Builder builder =
-2249  
ReportRegionStateTransitionRequest.newBuilder();
-2250
builder.setServer(ProtobufUtil.toServerName(serverName));
-2251RegionStateTransition.Builder 
transition = builder.addTransitionBuilder();
-2252
transition.setTransitionCode(code);
-2253if (code == TransitionCode.OPENED 
&& openSeqNum >= 0) {
-2254  
transition.setOpenSeqNum(openSeqNum);
-2255}
-2256for (RegionInfo hri: hris) {
-2257  
transition.addRegionInfo(ProtobufUtil.toRegionInfo(hri));
-2258}
-2259ReportRegionStateTransitionRequest 
request = builder.build();
-2260int tries = 0;
-2261long pauseTime = 
INIT_PAUSE_TIME_MS;
-2262// Keep looping till we get an 
error. We want to send reports even though server is going down.
-2263// Only go down if clusterConnection 
is null. It is set to null almost as last thing as the
-2264// HRegionServer does down.
-2265while (this.clusterConnection != 
null && !this.clusterConnection.isClosed()) {
-2266  
RegionServerStatusService.BlockingInterface rss = rssStub;
-2267  try {
-2268if (rss == null) {
-2269  
createRegionServerStatusStub();
-2270  continue;
-2271}
-2272
ReportRegionStateTransitionResponse response =
-2273   

[17/51] [partial] hbase-site git commit: Published site at cd161d976ef47b84e904f2d54bac65d2f3417c2a.

2018-09-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fa1bebf8/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.BalanceSwitchMode.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.BalanceSwitchMode.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.BalanceSwitchMode.html
index b56bd67..bba0c5e 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.BalanceSwitchMode.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.BalanceSwitchMode.html
@@ -37,2318 +37,2428 @@
 029import java.util.Map;
 030import java.util.Map.Entry;
 031import java.util.Set;
-032import java.util.stream.Collectors;
-033import 
org.apache.hadoop.conf.Configuration;
-034import 
org.apache.hadoop.hbase.ClusterMetricsBuilder;
-035import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-036import 
org.apache.hadoop.hbase.HConstants;
-037import 
org.apache.hadoop.hbase.MetaTableAccessor;
-038import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-039import org.apache.hadoop.hbase.Server;
-040import 
org.apache.hadoop.hbase.ServerMetrics;
-041import 
org.apache.hadoop.hbase.ServerMetricsBuilder;
-042import 
org.apache.hadoop.hbase.ServerName;
-043import 
org.apache.hadoop.hbase.TableName;
-044import 
org.apache.hadoop.hbase.UnknownRegionException;
-045import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-046import 
org.apache.hadoop.hbase.client.Connection;
-047import 
org.apache.hadoop.hbase.client.MasterSwitchType;
-048import 
org.apache.hadoop.hbase.client.RegionInfo;
-049import 
org.apache.hadoop.hbase.client.RegionInfoBuilder;
-050import 
org.apache.hadoop.hbase.client.TableDescriptor;
-051import 
org.apache.hadoop.hbase.client.TableState;
-052import 
org.apache.hadoop.hbase.client.VersionInfoUtil;
-053import 
org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil;
-054import 
org.apache.hadoop.hbase.coprocessor.MasterCoprocessor;
-055import 
org.apache.hadoop.hbase.errorhandling.ForeignException;
-056import 
org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
-057import 
org.apache.hadoop.hbase.io.hfile.HFile;
-058import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
-059import 
org.apache.hadoop.hbase.ipc.PriorityFunction;
-060import 
org.apache.hadoop.hbase.ipc.QosPriority;
-061import 
org.apache.hadoop.hbase.ipc.RpcServer;
-062import 
org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface;
-063import 
org.apache.hadoop.hbase.ipc.RpcServerFactory;
-064import 
org.apache.hadoop.hbase.ipc.RpcServerInterface;
-065import 
org.apache.hadoop.hbase.ipc.ServerRpcController;
-066import 
org.apache.hadoop.hbase.master.assignment.RegionStates;
-067import 
org.apache.hadoop.hbase.master.locking.LockProcedure;
-068import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil;
-069import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil.NonceProcedureRunnable;
-070import 
org.apache.hadoop.hbase.mob.MobUtils;
-071import 
org.apache.hadoop.hbase.procedure.MasterProcedureManager;
-072import 
org.apache.hadoop.hbase.procedure2.LockType;
-073import 
org.apache.hadoop.hbase.procedure2.LockedResource;
-074import 
org.apache.hadoop.hbase.procedure2.Procedure;
-075import 
org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
-076import 
org.apache.hadoop.hbase.procedure2.ProcedureUtil;
-077import 
org.apache.hadoop.hbase.procedure2.RemoteProcedureException;
-078import 
org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.AccessControlService;
-079import 
org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsService;
-080import 
org.apache.hadoop.hbase.quotas.MasterQuotaManager;
-081import 
org.apache.hadoop.hbase.quotas.QuotaObserverChore;
-082import 
org.apache.hadoop.hbase.quotas.QuotaUtil;
-083import 
org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot;
-084import 
org.apache.hadoop.hbase.regionserver.RSRpcServices;
-085import 
org.apache.hadoop.hbase.regionserver.RpcSchedulerFactory;
-086import 
org.apache.hadoop.hbase.replication.ReplicationException;
-087import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-088import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-089import 
org.apache.hadoop.hbase.security.User;
-090import 
org.apache.hadoop.hbase.security.access.AccessChecker;
-091import 
org.apache.hadoop.hbase.security.access.AccessController;
-092import 
org.apache.hadoop.hbase.security.access.Permission;
-093import 
org.apache.hadoop.hbase.security.visibility.VisibilityController;
-094import 
org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
-095import 
org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
-096import 
org.apache.hadoop.hbase.util.Bytes;
-097import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-098import 
org.apache.hadoop.hbase.util.ForeignExceptionUtil;
-099impor

[17/51] [partial] hbase-site git commit: Published site at c6a65ba63fce85ac7c4b62b96ef2bbe6c35d2f00.

2018-09-04 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/293abb17/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManagerUtil.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManagerUtil.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManagerUtil.html
index b34af43..1ccb0e1 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManagerUtil.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManagerUtil.html
@@ -26,181 +26,204 @@
 018package 
org.apache.hadoop.hbase.master.assignment;
 019
 020import java.io.IOException;
-021import java.util.Collections;
-022import java.util.List;
-023import java.util.ListIterator;
-024import java.util.stream.Collectors;
-025import java.util.stream.IntStream;
-026import java.util.stream.Stream;
-027import 
org.apache.hadoop.hbase.HBaseIOException;
-028import 
org.apache.hadoop.hbase.ServerName;
-029import 
org.apache.hadoop.hbase.client.RegionInfo;
-030import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
-031import 
org.apache.hadoop.hbase.favored.FavoredNodesManager;
-032import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-033import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
-034import 
org.apache.hadoop.hbase.wal.WALSplitter;
-035import 
org.apache.yetus.audience.InterfaceAudience;
-036
-037import 
org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
-038
-039import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-040import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-041import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-042import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
-043import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
-044
-045/**
-046 * Utility for this assignment package 
only.
-047 */
-048@InterfaceAudience.Private
-049final class AssignmentManagerUtil {
-050  private AssignmentManagerUtil() {
-051  }
-052
-053  /**
-054   * Raw call to remote regionserver to 
get info on a particular region.
-055   * @throws IOException Let it out so 
can report this IOE as reason for failure
-056   */
-057  static GetRegionInfoResponse 
getRegionInfoResponse(final MasterProcedureEnv env,
-058  final ServerName regionLocation, 
final RegionInfo hri) throws IOException {
-059return getRegionInfoResponse(env, 
regionLocation, hri, false);
-060  }
-061
+021import java.util.ArrayList;
+022import java.util.Collections;
+023import java.util.List;
+024import java.util.ListIterator;
+025import java.util.stream.Collectors;
+026import java.util.stream.IntStream;
+027import java.util.stream.Stream;
+028
+029import 
org.apache.commons.lang3.ArrayUtils;
+030import 
org.apache.hadoop.hbase.HBaseIOException;
+031import 
org.apache.hadoop.hbase.ServerName;
+032import 
org.apache.hadoop.hbase.client.RegionInfo;
+033import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
+034import 
org.apache.hadoop.hbase.favored.FavoredNodesManager;
+035import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
+036import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+037import 
org.apache.hadoop.hbase.wal.WALSplitter;
+038import 
org.apache.yetus.audience.InterfaceAudience;
+039
+040import 
org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
+041
+042import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+043import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
+044import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
+045import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
+046import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
+047
+048/**
+049 * Utility for this assignment package 
only.
+050 */
+051@InterfaceAudience.Private
+052final class AssignmentManagerUtil {
+053  private static final int 
DEFAULT_REGION_REPLICA = 1;
+054
+055  private AssignmentManagerUtil() {
+056  }
+057
+058  /**
+059   * Raw call to remote regionserver to 
get info on a particular region.
+060   * @throws IOException Let it out so 
can report this IOE as reason for failure
+061   */
 062  static GetRegionInfoResponse 
getRegionInfoResponse(final MasterProcedureEnv env,
-063  final ServerName regionLocation, 
final RegionInfo hri, boolean includeBestSplitRow)
-064  throws IOException {
-065// TODO: There is no timeout on this 
controller. Set one!
-066HBaseRpcController controller =
-067  
env.getMasterServices().getClusterConnection().getRpcControllerFactory().newController();
-068final AdminService.BlockingInterface 
admin =
-069  
env.getMasterServices().getClusterConnection().getAdmin(regionLocation);
-070GetRegionInfoReques

[17/51] [partial] hbase-site git commit: Published site at 7c1fad4992a169a35b4457e6f4afcb30d04406e9.

2018-08-31 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/74f60271/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.CallCleanup.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.CallCleanup.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.CallCleanup.html
index 51bf304..db6ac15 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.CallCleanup.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.CallCleanup.html
@@ -199,614 +199,635 @@
 191  protected static final int 
DEFAULT_WARN_RESPONSE_TIME = 1; // milliseconds
 192  protected static final int 
DEFAULT_WARN_RESPONSE_SIZE = 100 * 1024 * 1024;
 193
-194  protected static final ObjectMapper 
MAPPER = new ObjectMapper();
-195
-196  protected final int maxRequestSize;
-197  protected final int warnResponseTime;
-198  protected final int warnResponseSize;
+194  protected static final int 
DEFAULT_TRACE_LOG_MAX_LENGTH = 1000;
+195  protected static final String 
TRACE_LOG_MAX_LENGTH = "hbase.ipc.trace.log.max.length";
+196  protected static final String 
KEY_WORD_TRUNCATED = " ";
+197
+198  protected static final ObjectMapper 
MAPPER = new ObjectMapper();
 199
-200  protected final int 
minClientRequestTimeout;
-201
-202  protected final Server server;
-203  protected final 
List services;
-204
-205  protected final RpcScheduler 
scheduler;
-206
-207  protected UserProvider userProvider;
+200  protected final int maxRequestSize;
+201  protected final int warnResponseTime;
+202  protected final int warnResponseSize;
+203
+204  protected final int 
minClientRequestTimeout;
+205
+206  protected final Server server;
+207  protected final 
List services;
 208
-209  protected final ByteBufferPool 
reservoir;
-210  // The requests and response will use 
buffers from ByteBufferPool, when the size of the
-211  // request/response is at least this 
size.
-212  // We make this to be 1/6th of the pool 
buffer size.
-213  protected final int 
minSizeForReservoirUse;
-214
-215  protected volatile boolean 
allowFallbackToSimpleAuth;
-216
-217  /**
-218   * Used to get details for scan with a 
scanner_id
-219 * TODO try to figure out a better way and remove reference from regionserver package later. -220 */ -221 private RSRpcServices rsRpcServices; -222 -223 @FunctionalInterface -224 protected static interface CallCleanup { -225void run(); -226 } -227 -228 /** -229 * Datastructure for passing a {@link BlockingService} and its associated class of -230 * protobuf service interface. For example, a server that fielded what is defined -231 * in the client protobuf service would pass in an implementation of the client blocking service -232 * and then its ClientService.BlockingInterface.class. Used checking connection setup. -233 */ -234 public static class BlockingServiceAndInterface { -235private final BlockingService service; -236private final Class serviceInterface; -237public BlockingServiceAndInterface(final BlockingService service, -238final Class serviceInterface) { -239 this.service = service; -240 this.serviceInterface = serviceInterface; -241} -242public Class getServiceInterface() { -243 return this.serviceInterface; -244} -245public BlockingService getBlockingService() { -246 return this.service; -247} -248 } -249 -250 /** -251 * Constructs a server listening on the named port and address. -252 * @param server hosting instance of {@link Server}. We will do authentications if an -253 * instance else pass null for no authentication check. -254 * @param name Used keying this rpc servers' metrics and for naming the Listener thread. -255 * @param services A list of services. -256 * @param bindAddress Where to listen -257 * @param conf -258 * @param scheduler -259 * @param reservoirEnabled Enable ByteBufferPool or not. -260 */ -261 public RpcServer(final Server server, final String name, -262 final List services, -263 final InetSocketAddress bindAddress, Configuration conf, -264 RpcScheduler scheduler, boolean reservoirEnabled) throws IOException { -265if (reservoirEnabled) { -266 int poolBufSize = conf.getInt(ByteBufferPool.BUFFER_SIZE_KEY, -267 ByteBufferPool.DEFAULT_BUFFER_SIZE); -268 // The max number of buffers to be pooled in the ByteBufferPool. The default value been -269 // selected based on the #handlers configured. When it is read request, 2 MB is the max size -270 // at which we will send back one RPC request. Means max we need 2 MB for creating the -271 // response cell block. (Well it might be much lesser than this because in 2 MB size calc, we -272 // include the heap size overhead of each cells also.)

[17/51] [partial] hbase-site git commit: Published site at 3afe9fb7e6ebfa71187cbe131558a83fae61cecd.

2018-08-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/424d7e41/testapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.html
--
diff --git 
a/testapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.html 
b/testapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.html
index 4330868..1910d0a 100644
--- a/testapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.html
+++ b/testapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.html
@@ -72,4103 +72,4235 @@
 064import 
org.apache.hadoop.hbase.Waiter.Predicate;
 065import 
org.apache.hadoop.hbase.client.Admin;
 066import 
org.apache.hadoop.hbase.client.BufferedMutator;
-067import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-068import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
-069import 
org.apache.hadoop.hbase.client.Connection;
-070import 
org.apache.hadoop.hbase.client.ConnectionFactory;
-071import 
org.apache.hadoop.hbase.client.Consistency;
-072import 
org.apache.hadoop.hbase.client.Delete;
-073import 
org.apache.hadoop.hbase.client.Durability;
-074import 
org.apache.hadoop.hbase.client.Get;
-075import 
org.apache.hadoop.hbase.client.HBaseAdmin;
-076import 
org.apache.hadoop.hbase.client.ImmutableHRegionInfo;
-077import 
org.apache.hadoop.hbase.client.ImmutableHTableDescriptor;
-078import 
org.apache.hadoop.hbase.client.Put;
-079import 
org.apache.hadoop.hbase.client.RegionInfo;
-080import 
org.apache.hadoop.hbase.client.RegionInfoBuilder;
-081import 
org.apache.hadoop.hbase.client.RegionLocator;
-082import 
org.apache.hadoop.hbase.client.Result;
-083import 
org.apache.hadoop.hbase.client.ResultScanner;
-084import 
org.apache.hadoop.hbase.client.Scan;
-085import 
org.apache.hadoop.hbase.client.Table;
-086import 
org.apache.hadoop.hbase.client.TableDescriptor;
-087import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-088import 
org.apache.hadoop.hbase.client.TableState;
-089import 
org.apache.hadoop.hbase.fs.HFileSystem;
-090import 
org.apache.hadoop.hbase.io.compress.Compression;
-091import 
org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
-092import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
-093import 
org.apache.hadoop.hbase.io.hfile.ChecksumUtil;
-094import 
org.apache.hadoop.hbase.io.hfile.HFile;
-095import 
org.apache.hadoop.hbase.ipc.RpcServerInterface;
-096import 
org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
-097import 
org.apache.hadoop.hbase.mapreduce.MapreduceTestingShim;
-098import 
org.apache.hadoop.hbase.master.HMaster;
-099import 
org.apache.hadoop.hbase.master.RegionState;
-100import 
org.apache.hadoop.hbase.master.ServerManager;
-101import 
org.apache.hadoop.hbase.master.assignment.AssignmentManager;
-102import 
org.apache.hadoop.hbase.master.assignment.RegionStateStore;
-103import 
org.apache.hadoop.hbase.master.assignment.RegionStates;
-104import 
org.apache.hadoop.hbase.regionserver.BloomType;
-105import 
org.apache.hadoop.hbase.regionserver.ChunkCreator;
-106import 
org.apache.hadoop.hbase.regionserver.HRegion;
-107import 
org.apache.hadoop.hbase.regionserver.HRegionServer;
-108import 
org.apache.hadoop.hbase.regionserver.HStore;
-109import 
org.apache.hadoop.hbase.regionserver.InternalScanner;
-110import 
org.apache.hadoop.hbase.regionserver.MemStoreLABImpl;
-111import 
org.apache.hadoop.hbase.regionserver.Region;
-112import 
org.apache.hadoop.hbase.regionserver.RegionScanner;
-113import 
org.apache.hadoop.hbase.regionserver.RegionServerServices;
-114import 
org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
-115import 
org.apache.hadoop.hbase.security.HBaseKerberosUtils;
-116import 
org.apache.hadoop.hbase.security.User;
-117import 
org.apache.hadoop.hbase.security.visibility.VisibilityLabelsCache;
-118import 
org.apache.hadoop.hbase.trace.TraceUtil;
-119import 
org.apache.hadoop.hbase.util.Bytes;
-120import 
org.apache.hadoop.hbase.util.CommonFSUtils;
-121import 
org.apache.hadoop.hbase.util.FSTableDescriptors;
-122import 
org.apache.hadoop.hbase.util.FSUtils;
-123import 
org.apache.hadoop.hbase.util.JVMClusterUtil;
-124import 
org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
-125import 
org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
-126import 
org.apache.hadoop.hbase.util.Pair;
-127import 
org.apache.hadoop.hbase.util.RegionSplitter;
-128import 
org.apache.hadoop.hbase.util.RegionSplitter.SplitAlgorithm;
-129import 
org.apache.hadoop.hbase.util.RetryCounter;
-130import 
org.apache.hadoop.hbase.util.Threads;
-131import org.apache.hadoop.hbase.wal.WAL;
-132import 
org.apache.hadoop.hbase.wal.WALFactory;
-133import 
org.apache.hadoop.hbase.zookeeper.EmptyWatcher;
-134import 
org.apache.hadoop.hbase.zookeeper.ZKConfig;
-135import 
org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-136import 
org.apache.hadoop.hdfs.DFSClient;
-137import 
org.apache.hadoop.hdfs.DistributedFileSystem;
-138import 
org.apache.hadoop.hd

[17/51] [partial] hbase-site git commit: Published site at a452487a9b82bfd33bc10683c3f8b8ae74d58883.

2018-08-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0cf79db0/devapidocs/org/apache/hadoop/hbase/filter/FilterListWithOR.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/filter/FilterListWithOR.html 
b/devapidocs/org/apache/hadoop/hbase/filter/FilterListWithOR.html
index 3282c7f..ece1819 100644
--- a/devapidocs/org/apache/hadoop/hbase/filter/FilterListWithOR.html
+++ b/devapidocs/org/apache/hadoop/hbase/filter/FilterListWithOR.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -125,7 +125,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class FilterListWithOR
+public class FilterListWithOR
 extends FilterListBase
 FilterListWithOR represents an ordered list of filters 
which will be evaluated with an OR
  operator.
@@ -224,18 +224,22 @@ extends 
 boolean
+equals(https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object obj) 
+
+
+boolean
 filterAllRemaining()
 Filters that never filter all remaining can inherit this 
implementation that
  never stops the filter early.
 
 
-
+
 Filter.ReturnCode
 filterCell(Cell c)
 A way to filter based on the column family, column 
qualifier and/or the column value.
 
 
-
+
 boolean
 filterRow()
 Filters that never filter by rows based on previously 
gathered state from
@@ -243,7 +247,7 @@ extends 
+
 boolean
 filterRowKey(byte[] rowKey,
 int offset,
@@ -252,38 +256,42 @@ extends 
+
 boolean
 filterRowKey(Cell firstRowCell)
 Filters a row based on the row key.
 
 
-
+
 protected https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
 formatLogFilters(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List logFilters) 
 
-
+
 Cell
 getNextCellHint(Cell currentCell)
 Filters that are not sure which key must be next seeked to, 
can inherit
  this implementation that, by default, returns a null Cell.
 
 
-
+
+int
+hashCode() 
+
+
 private Filter.ReturnCode
 mergeReturnCode(Filter.ReturnCode rc,
Filter.ReturnCode localRC)
 FilterList with MUST_PASS_ONE choose the minimal forward 
step among sub-filter in filter list.
 
 
-
+
 void
 reset()
 Filters that are purely stateless and do nothing in their 
reset() methods can inherit
  this null/empty implementation.
 
 
-
+
 private boolean
 shouldPassCurrentCellToFilter(Cell prevCell,
  Cell currentCell,
@@ -293,13 +301,13 @@ extends 
+
 private void
 updatePrevCellList(int index,
   Cell currentCell,
   Filter.ReturnCode currentRC) 
 
-
+
 private void
 updatePrevFilterRCList(int index,
   Filter.ReturnCode currentRC) 
@@ -331,7 +339,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
-https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--";
 title="class or interface in java.lang">clone, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-";
 title="class or interface in java.lang">equals, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--";
 title="class or interface in java.lang">finalize, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--";
 title="class or interface in java.lang">getClass, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--";
 title="class or interface in java.lang">hashCode, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notify--";
 title="class or interface in java.lang">notify, https://docs.oracle.com/javase/8/docs/api/ja
 va/lang/Object.html?is-external=true#notifyAll--" title="class or interface in 
java.lang">notifyAll, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait--";
 title="class or interface in java.lang">wait, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-";
 title="class or interface in java.lang">wait, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-int-";
 title="class or interface in java.lang">wait
+https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=tru

[17/51] [partial] hbase-site git commit: Published site at 6a5b4f2a5c188f8eef4f2250b8b7db7dd1e750e4.

2018-08-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ff05a18/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
index db8431b..a8cb7c4 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
@@ -885,7766 +885,7797 @@
 877   * @return What the next sequence 
(edit) id should be.
 878   * @throws IOException e
 879   */
-880  private long initialize(final 
CancelableProgressable reporter) throws IOException {
-881
-882//Refuse to open the region if there 
is no column family in the table
-883if 
(htableDescriptor.getColumnFamilyCount() == 0) {
-884  throw new 
DoNotRetryIOException("Table " + 
htableDescriptor.getTableName().getNameAsString()+
-885  " should have at least one 
column family.");
-886}
-887
-888MonitoredTask status = 
TaskMonitor.get().createStatus("Initializing region " + this);
-889long nextSeqId = -1;
-890try {
-891  nextSeqId = 
initializeRegionInternals(reporter, status);
-892  return nextSeqId;
-893} finally {
-894  // nextSeqid will be -1 if the 
initialization fails.
-895  // At least it will be 0 
otherwise.
-896  if (nextSeqId == -1) {
-897status.abort("Exception during 
region " + getRegionInfo().getRegionNameAsString() +
-898  " initialization.");
-899  }
-900}
-901  }
-902
-903  private long 
initializeRegionInternals(final CancelableProgressable reporter,
-904  final MonitoredTask status) throws 
IOException {
-905if (coprocessorHost != null) {
-906  status.setStatus("Running 
coprocessor pre-open hook");
-907  coprocessorHost.preOpen();
-908}
-909
-910// Write HRI to a file in case we 
need to recover hbase:meta
-911// Only the primary replica should 
write .regioninfo
-912if 
(this.getRegionInfo().getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) {
-913  status.setStatus("Writing region 
info on filesystem");
-914  fs.checkRegionInfoOnFilesystem();
-915}
-916
-917// Initialize all the HStores
-918status.setStatus("Initializing all 
the Stores");
-919long maxSeqId = 
initializeStores(reporter, status);
-920this.mvcc.advanceTo(maxSeqId);
-921if 
(ServerRegionReplicaUtil.shouldReplayRecoveredEdits(this)) {
-922  Collection stores = 
this.stores.values();
-923  try {
-924// update the stores that we are 
replaying
-925LOG.debug("replaying wal for " + 
this.getRegionInfo().getEncodedName());
-926
stores.forEach(HStore::startReplayingFromWAL);
-927// Recover any edits if 
available.
-928maxSeqId = Math.max(maxSeqId,
-929  
replayRecoveredEditsIfAny(this.fs.getRegionDir(), maxSeqIdInStores, reporter, 
status));
-930// Make sure mvcc is up to max.
-931this.mvcc.advanceTo(maxSeqId);
-932  } finally {
-933LOG.debug("stopping wal replay 
for " + this.getRegionInfo().getEncodedName());
-934// update the stores that we are 
done replaying
-935
stores.forEach(HStore::stopReplayingFromWAL);
-936  }
-937}
-938this.lastReplayedOpenRegionSeqId = 
maxSeqId;
-939
-940
this.writestate.setReadOnly(ServerRegionReplicaUtil.isReadOnly(this));
-941this.writestate.flushRequested = 
false;
-942this.writestate.compacting.set(0);
-943
-944if (this.writestate.writesEnabled) 
{
-945  LOG.debug("Cleaning up temporary 
data for " + this.getRegionInfo().getEncodedName());
-946  // Remove temporary data left over 
from old regions
-947  status.setStatus("Cleaning up 
temporary data from old regions");
-948  fs.cleanupTempDir();
-949}
-950
-951if (this.writestate.writesEnabled) 
{
-952  status.setStatus("Cleaning up 
detritus from prior splits");
-953  // Get rid of any splits or merges 
that were lost in-progress.  Clean out
-954  // these directories here on open.  
We may be opening a region that was
-955  // being split but we crashed in 
the middle of it all.
-956  LOG.debug("Cleaning up detritus for 
" + this.getRegionInfo().getEncodedName());
-957  fs.cleanupAnySplitDetritus();
-958  fs.cleanupMergesDir();
-959}
+880  @VisibleForTesting
+881  long initialize(final 
CancelableProgressable reporter) throws IOException {
+882
+883//Refuse to open the region if there 
is no column family in the table
+884if 
(htableDescriptor.getColumnFamilyCount() == 0) {
+885  throw new 
DoNotRetryIOException("Table " + 
htableDescriptor.getTableName().getNameAsString()+
+886  " should have at least one 
column family.");
+887}
+888
+889MonitoredTask status = 
Ta

[17/51] [partial] hbase-site git commit: Published site at 63f2d3cbdc8151f5f61f33e0a078c51b9ac076a5.

2018-08-21 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7ae6a80c/devapidocs/org/apache/hadoop/hbase/master/assignment/package-use.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/package-use.html 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/package-use.html
index 38275f2..b886e22 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/assignment/package-use.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/assignment/package-use.html
@@ -139,16 +139,6 @@
 AssignmentManager.RegionInTransitionStat 
 
 
-AssignProcedure
-Procedure that describe the assignment of a single 
region.
-
-
-
-AssignProcedure.CompareAssignProcedure
-Sort AssignProcedures such that meta and system assigns 
come first before user-space assigns.
-
-
-
 GCRegionProcedure
 GC a Region that is no longer in use.
 
@@ -159,53 +149,55 @@
 
 
 
-MoveRegionProcedure
-Procedure that implements a RegionPlan.
+RegionRemoteProcedureBase
+The base class for the remote procedures used to open/close 
a region.
 
 
 
+RegionStateNode
+Current Region State.
+
+
+
 RegionStates
 RegionStates contains a set of Maps that describes the 
in-memory state of the AM, with
  the regions available in the system, the region in transition, the offline 
regions and
  the servers holding regions.
 
 
-
-RegionStates.RegionFailedOpen 
-
 
-RegionStates.RegionStateNode
-Current Region State.
-
+RegionStates.RegionFailedOpen 
 
 
 RegionStates.RegionStateStampComparator 
 
 
-RegionStates.ServerReportEvent 
+RegionStateStore
+Store Region State to hbase:meta table.
+
 
 
-RegionStates.ServerState
-Server State.
-
+RegionStateStore.RegionStateVisitor 
 
 
-RegionStates.ServerStateNode
-State of Server; list of hosted regions, etc.
+RegionTransitionProcedure
+Deprecated. 
+Do not use any 
more.
+
 
 
 
-RegionStateStore
-Store Region State to hbase:meta table.
+ServerState
+Server State.
 
 
 
-RegionStateStore.RegionStateVisitor 
+ServerStateNode
+State of Server; list of hosted regions, etc.
+
 
 
-RegionTransitionProcedure
-Base class for the Assign and Unassign Procedure.
-
+ServerStateNode.ServerReportEvent 
 
 
 SplitTableRegionProcedure
@@ -213,8 +205,8 @@
 
 
 
-UnassignProcedure
-Procedure that describes the unassignment of a single 
region.
+TransitRegionStateProcedure
+The procedure to deal with the state transition of a 
region.
 
 
 
@@ -234,11 +226,6 @@
 The AssignmentManager is the coordinator for region 
assign/unassign operations.
 
 
-
-MoveRegionProcedure
-Procedure that implements a RegionPlan.
-
-
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7ae6a80c/devapidocs/org/apache/hadoop/hbase/master/balancer/package-tree.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/balancer/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/master/balancer/package-tree.html
index 1799704..abd92a9 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/balancer/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/balancer/package-tree.html
@@ -198,8 +198,8 @@
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true";
 title="class or interface in java.lang">Enum (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true";
 title="class or interface in java.lang">Comparable, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true";
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action.Type
 org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.LocalityType
+org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action.Type
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7ae6a80c/devapidocs/org/apache/hadoop/hbase/master/class-use/AssignmentListener.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/class-use/AssignmentListener.html 
b/devapidocs/org/apache/hadoop/hbase/master/class-use/AssignmentListener.html
deleted file mode 100644
index ff66f8f..000
--- 
a/devapidocs/org/apache/hadoop/hbase/master/class-use/AssignmentListener.html
+++ /dev/null
@@ -1,188 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd";>
-
-
-
-
-
-Uses of Interface org.apache.hadoop.hbase.master.AssignmentListener 
(Apache HBase 3.0.0-SNAPSHOT API)
-
-
-
-
-
-
-
-JavaScript is disabled on your browser.
-
-
-
-
-
-Skip navigation links
-
-
-
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
-

[17/51] [partial] hbase-site git commit: Published site at 092efb42749bf7fc6ad338c96aae8e7b9d3a2c74.

2018-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALPrettyPrinter.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALPrettyPrinter.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALPrettyPrinter.html
index f6d4321..dc69851 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALPrettyPrinter.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALPrettyPrinter.html
@@ -58,379 +58,383 @@
 050import 
org.apache.hbase.thirdparty.org.apache.commons.cli.PosixParser;
 051
 052import 
com.fasterxml.jackson.databind.ObjectMapper;
-053
-054/**
-055 * WALPrettyPrinter prints the contents 
of a given WAL with a variety of
-056 * options affecting formatting and 
extent of content.
-057 *
-058 * It targets two usage cases: pretty 
printing for ease of debugging directly by
-059 * humans, and JSON output for 
consumption by monitoring and/or maintenance
-060 * scripts.
-061 *
-062 * It can filter by row, region, or 
sequence id.
+053import org.slf4j.Logger;
+054import org.slf4j.LoggerFactory;
+055
+056/**
+057 * WALPrettyPrinter prints the contents 
of a given WAL with a variety of
+058 * options affecting formatting and 
extent of content.
+059 *
+060 * It targets two usage cases: pretty 
printing for ease of debugging directly by
+061 * humans, and JSON output for 
consumption by monitoring and/or maintenance
+062 * scripts.
 063 *
-064 * It can also toggle output of values.
+064 * It can filter by row, region, or 
sequence id.
 065 *
-066 */
-067@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
-068@InterfaceStability.Evolving
-069public class WALPrettyPrinter {
-070  private boolean outputValues;
-071  private boolean outputJSON;
-072  // The following enable filtering by 
sequence, region, and row, respectively
-073  private long sequence;
-074  private String region;
-075  private String row;
-076  // enable in order to output a single 
list of transactions from several files
-077  private boolean persistentOutput;
-078  private boolean firstTxn;
-079  // useful for programmatic capture of 
JSON output
-080  private PrintStream out;
-081  // for JSON encoding
-082  private static final ObjectMapper 
MAPPER = new ObjectMapper();
-083
-084  /**
-085   * Basic constructor that simply 
initializes values to reasonable defaults.
-086   */
-087  public WALPrettyPrinter() {
-088outputValues = false;
-089outputJSON = false;
-090sequence = -1;
-091region = null;
-092row = null;
-093persistentOutput = false;
-094firstTxn = true;
-095out = System.out;
-096  }
-097
-098  /**
-099   * Fully specified constructor.
-100   *
-101   * @param outputValues
-102   *  when true, enables output 
of values along with other log
-103   *  information
-104   * @param outputJSON
-105   *  when true, enables output 
in JSON format rather than a
-106   *  "pretty string"
-107   * @param sequence
-108   *  when nonnegative, serves as 
a filter; only log entries with this
-109   *  sequence id will be 
printed
-110   * @param region
-111   *  when not null, serves as a 
filter; only log entries from this
-112   *  region will be printed
-113   * @param row
-114   *  when not null, serves as a 
filter; only log entries from this row
-115   *  will be printed
-116   * @param persistentOutput
-117   *  keeps a single list running 
for multiple files. if enabled, the
-118   *  endPersistentOutput() 
method must be used!
-119   * @param out
-120   *  Specifies an alternative to 
stdout for the destination of this
-121   *  PrettyPrinter's output.
-122   */
-123  public WALPrettyPrinter(boolean 
outputValues, boolean outputJSON,
-124  long sequence, String region, 
String row, boolean persistentOutput,
-125  PrintStream out) {
-126this.outputValues = outputValues;
-127this.outputJSON = outputJSON;
-128this.sequence = sequence;
-129this.region = region;
-130this.row = row;
-131this.persistentOutput = 
persistentOutput;
-132if (persistentOutput) {
-133  beginPersistentOutput();
-134}
-135this.out = out;
-136this.firstTxn = true;
-137  }
-138
-139  /**
-140   * turns value output on
-141   */
-142  public void enableValues() {
-143outputValues = true;
-144  }
-145
-146  /**
-147   * turns value output off
-148   */
-149  public void disableValues() {
-150outputValues = false;
-151  }
-152
-153  /**
-154   * turns JSON output on
-155   */
-156  public void enableJSON() {
-157outputJSON = true;
-158  }
-159
-160  /**
-161   * turns JSON output off, and turns on 
"pretty strings" for human consumption
-162   */
-163  public void disableJSON() {
-164outputJSON = false;
-165  }
-166
-167  /**
-168   * sets the region by which output will 
be filtered
-169   *
-17

[17/51] [partial] hbase-site git commit: Published site at 613d831429960348dc42c3bdb6ea5d31be15c81c.

2018-08-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7cf6034b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.SharedMemoryBucketEntry.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.SharedMemoryBucketEntry.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.SharedMemoryBucketEntry.html
index bd3c59e..21e240a 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.SharedMemoryBucketEntry.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.SharedMemoryBucketEntry.html
@@ -33,62 +33,62 @@
 025import java.io.FileNotFoundException;
 026import java.io.FileOutputStream;
 027import java.io.IOException;
-028import java.io.ObjectInputStream;
-029import java.io.ObjectOutputStream;
-030import java.io.Serializable;
-031import java.nio.ByteBuffer;
-032import java.util.ArrayList;
-033import java.util.Comparator;
-034import java.util.HashSet;
-035import java.util.Iterator;
-036import java.util.List;
-037import java.util.Map;
-038import java.util.NavigableSet;
-039import java.util.PriorityQueue;
-040import java.util.Set;
-041import 
java.util.concurrent.ArrayBlockingQueue;
-042import 
java.util.concurrent.BlockingQueue;
-043import 
java.util.concurrent.ConcurrentHashMap;
-044import 
java.util.concurrent.ConcurrentMap;
-045import 
java.util.concurrent.ConcurrentSkipListSet;
-046import java.util.concurrent.Executors;
-047import 
java.util.concurrent.ScheduledExecutorService;
-048import java.util.concurrent.TimeUnit;
-049import 
java.util.concurrent.atomic.AtomicInteger;
-050import 
java.util.concurrent.atomic.AtomicLong;
-051import 
java.util.concurrent.atomic.LongAdder;
-052import java.util.concurrent.locks.Lock;
-053import 
java.util.concurrent.locks.ReentrantLock;
-054import 
java.util.concurrent.locks.ReentrantReadWriteLock;
-055import 
org.apache.hadoop.conf.Configuration;
-056import 
org.apache.hadoop.hbase.HBaseConfiguration;
-057import 
org.apache.hadoop.hbase.io.HeapSize;
-058import 
org.apache.hadoop.hbase.io.hfile.BlockCache;
-059import 
org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
-060import 
org.apache.hadoop.hbase.io.hfile.BlockCacheUtil;
-061import 
org.apache.hadoop.hbase.io.hfile.BlockPriority;
-062import 
org.apache.hadoop.hbase.io.hfile.BlockType;
-063import 
org.apache.hadoop.hbase.io.hfile.CacheStats;
-064import 
org.apache.hadoop.hbase.io.hfile.Cacheable;
-065import 
org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType;
-066import 
org.apache.hadoop.hbase.io.hfile.CacheableDeserializer;
-067import 
org.apache.hadoop.hbase.io.hfile.CacheableDeserializerIdManager;
-068import 
org.apache.hadoop.hbase.io.hfile.CachedBlock;
-069import 
org.apache.hadoop.hbase.io.hfile.HFileBlock;
-070import 
org.apache.hadoop.hbase.nio.ByteBuff;
-071import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-072import 
org.apache.hadoop.hbase.util.HasThread;
-073import 
org.apache.hadoop.hbase.util.IdReadWriteLock;
-074import 
org.apache.hadoop.hbase.util.IdReadWriteLock.ReferenceType;
-075import 
org.apache.hadoop.hbase.util.UnsafeAvailChecker;
-076import 
org.apache.hadoop.util.StringUtils;
-077import 
org.apache.yetus.audience.InterfaceAudience;
-078import org.slf4j.Logger;
-079import org.slf4j.LoggerFactory;
-080
-081import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-082import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-083import 
org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
+028import java.io.Serializable;
+029import java.nio.ByteBuffer;
+030import java.util.ArrayList;
+031import java.util.Comparator;
+032import java.util.HashSet;
+033import java.util.Iterator;
+034import java.util.List;
+035import java.util.Map;
+036import java.util.NavigableSet;
+037import java.util.PriorityQueue;
+038import java.util.Set;
+039import 
java.util.concurrent.ArrayBlockingQueue;
+040import 
java.util.concurrent.BlockingQueue;
+041import 
java.util.concurrent.ConcurrentHashMap;
+042import 
java.util.concurrent.ConcurrentMap;
+043import 
java.util.concurrent.ConcurrentSkipListSet;
+044import java.util.concurrent.Executors;
+045import 
java.util.concurrent.ScheduledExecutorService;
+046import java.util.concurrent.TimeUnit;
+047import 
java.util.concurrent.atomic.AtomicInteger;
+048import 
java.util.concurrent.atomic.AtomicLong;
+049import 
java.util.concurrent.atomic.LongAdder;
+050import java.util.concurrent.locks.Lock;
+051import 
java.util.concurrent.locks.ReentrantLock;
+052import 
java.util.concurrent.locks.ReentrantReadWriteLock;
+053import 
org.apache.hadoop.conf.Configuration;
+054import 
org.apache.hadoop.hbase.HBaseConfiguration;
+055import 
org.apache.hadoop.hbase.io.HeapSize;
+056import 
org.apache.hadoop.hbase.io.hfile.BlockCache;
+057import 
org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
+058import

[17/51] [partial] hbase-site git commit: Published site at ba5d1c1f28301adc99019d9d6c4a04fac98ae511.

2018-07-25 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/804782f0/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureInMemoryChore.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureInMemoryChore.html 
b/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureInMemoryChore.html
index c0deab4..e3b7912 100644
--- a/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureInMemoryChore.html
+++ b/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureInMemoryChore.html
@@ -243,7 +243,7 @@ extends Procedure
-acquireLock,
 addStackIndex,
 afterReplay,
 beforeReplay,
 compareTo,
 completionCleanup,
 doAcquireLock,
 doExecute,
 doReleaseLock,
 doRollback,
 elapsedTime,
 getChildrenLatch,
 getException,
 getLastUpdate,
 getNonceKey,
 getOwner,
 getParentProcId,
 getProcedureMetrics,
 getProcId,
 getProcIdHashCode,
 getProcName,
 getResult,
 getRootProcedureId,
 getRootProcId,
 getStackIndexes,
 getState, getSubmittedTime,
 getTimeout,
 getTimeoutTimestamp,
 hasChildren,
 hasException,
 hasLock,
 hasOwner,
 hasParent,
 hasTimeout, haveSameParent,
 holdLock,
 incChildrenLatch,
 isFailed,
 isFinished,
 isInitializing,
 isRunnable,
 isSuccess,
 isWaiting,
 isYieldAfterExecutionStep,
 releaseLock,
 removeStackIndex,
 setAbortFailure,
 setChildrenLatch,
 setFailure,
 setFailure, setLastUpdate,
 setNonceKey,
 setOwner,
 setOwner,
 setParentProcId,
 setProcId,
 setResult,
 setRootProcId, setStackIndexes,
 setState,
 setSubmittedTime,
 setTimeout,
 setTimeoutFailure,
 shouldWaitClientAck,
 toString,
 toS
 tringClass, toStringClassDetails,
 toStringDetails,
 toStringSimpleSB,
 toStringState,
 tryRunnable,
 updateMetricsOnFinish,
 updateMetricsOnSubmit,
 updateTimestamp, wasExecuted
+acquireLock,
 addStackIndex,
 afterReplay,
 beforeReplay,
 compareTo,
 completionCleanup,
 doAcquireLock,
 doExecute, doReleaseLock,
 doRollback,
 elapsedTime,
 getChildrenLatch,
 getException,
 getLastUpdate,
 getNonceKey,
 getOwner,
 getParentProcId,
 getProcedureMetrics,
 getProcId,
 getProcIdHashCode,
 getProcName,
 getResult,
 getRootProcedureId,
 getRootProcId,
 getStackIndexes, getState,
 getSubmittedTime,
 getTimeout,
 getTimeoutTimestamp,
 hasChildren,
 hasException,
 hasLock,
 hasOwner,
 hasParent, hasTimeout,
 haveSameParent,
 holdLock,
 incChildrenLatch,
 isFailed,
 isFinished,
 isInitializing,
 isRunnable,
 isSuccess,
 isWaiting,
 isYieldAfterExecutionStep,
 lockedWhenLoading,
 releaseLock,
 removeStackIndex,
 restoreLock,
 setAbortFailure,
 setChildrenLatch, 
setFailure,
 setFailure,
 setLastUpdate,
 setNonceKey,
 setOwner,
 setOwner,
 setParent
 ProcId, setProcId,
 setResult,
 setRootProcId,
 setStackIndexes,
 setState,
 setSubmittedTime,
 setTimeout,
 setTimeoutFailu
 re, shouldWaitClientAck,
 toString,
 toStringClass,
 toStringClassDetails,
 toStringDetails,
 toStringSimpleSB,
 toStringState,
 tryRunnable,
 updateMetricsOnFinish,
 updateMetricsOnSubmit,
 updateTimestamp,
 waitInitialized,
 wasExecuted
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/804782f0/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureUtil.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureUtil.html 
b/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureUtil.html
index 2ddbe31..9e56968 100644
--- a/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureUtil.html
+++ b/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureUtil.html
@@ -299,7 +299,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 convertToProcedure
-public static Procedure convertToProcedure(org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure proto)
+public static Procedure convertToProcedure(org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure proto)
 throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 Helper to convert the protobuf procedure.
  Used by ProcedureStore implementations.
@@ -320,7 +320,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 convertToProtoResourceType
-public 
static org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockedResourceType convertToProtoResourceType(LockedResourceType resourceType)
+public 
static org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockedResourceType convertToProtoResourceType(LockedResourceType resourceType)
 
 
 
@@ -329,7 +329,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 convertToProtoLockType
-public 
static org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockType convertToProtoLockType(LockType lockType)
+public 
static org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockType convertTo

[17/51] [partial] hbase-site git commit: Published site at b4759ce6e72f50ccd9d410bd5917dc5a515414f1.

2018-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RandomCandidateGenerator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RandomCandidateGenerator.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RandomCandidateGenerator.html
index 233dba3..91b9055 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RandomCandidateGenerator.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RandomCandidateGenerator.html
@@ -540,1205 +540,1204 @@
 532  
sm.getRegionMetrics().forEach((byte[] regionName, RegionMetrics rm) -> {
 533Deque 
rLoads = oldLoads.get(Bytes.toString(regionName));
 534if (rLoads == null) {
-535  // There was nothing there
-536  rLoads = new 
ArrayDeque<>();
-537} else if (rLoads.size() >= 
numRegionLoadsToRemember) {
-538  rLoads.remove();
-539}
-540rLoads.add(new 
BalancerRegionLoad(rm));
-541
loads.put(Bytes.toString(regionName), rLoads);
-542  });
-543});
-544
-545for(CostFromRegionLoadFunction cost : 
regionLoadFunctions) {
-546  cost.setLoads(loads);
-547}
-548  }
-549
-550  protected void initCosts(Cluster 
cluster) {
-551for (CostFunction c:costFunctions) 
{
-552  c.init(cluster);
-553}
-554  }
-555
-556  protected void 
updateCostsWithAction(Cluster cluster, Action action) {
-557for (CostFunction c : costFunctions) 
{
-558  c.postAction(action);
-559}
-560  }
-561
-562  /**
-563   * Get the names of the cost 
functions
-564   */
-565  public String[] getCostFunctionNames() 
{
-566if (costFunctions == null) return 
null;
-567String[] ret = new 
String[costFunctions.length];
-568for (int i = 0; i < 
costFunctions.length; i++) {
-569  CostFunction c = 
costFunctions[i];
-570  ret[i] = 
c.getClass().getSimpleName();
-571}
-572
-573return ret;
-574  }
-575
-576  /**
-577   * This is the main cost function.  It 
will compute a cost associated with a proposed cluster
-578   * state.  All different costs will be 
combined with their multipliers to produce a double cost.
-579   *
-580   * @param cluster The state of the 
cluster
-581   * @param previousCost the previous 
cost. This is used as an early out.
-582   * @return a double of a cost 
associated with the proposed cluster state.  This cost is an
-583   * aggregate of all individual 
cost functions.
-584   */
-585  protected double computeCost(Cluster 
cluster, double previousCost) {
-586double total = 0;
-587
-588for (int i = 0; i < 
costFunctions.length; i++) {
-589  CostFunction c = 
costFunctions[i];
-590  this.tempFunctionCosts[i] = 0.0;
-591
-592  if (c.getMultiplier() <= 0) {
-593continue;
-594  }
-595
-596  Float multiplier = 
c.getMultiplier();
-597  Double cost = c.cost();
-598
-599  this.tempFunctionCosts[i] = 
multiplier*cost;
-600  total += 
this.tempFunctionCosts[i];
-601
-602  if (total > previousCost) {
-603break;
-604  }
-605}
-606
-607return total;
-608  }
-609
-610  /** Generates a candidate action to be 
applied to the cluster for cost function search */
-611  abstract static class 
CandidateGenerator {
-612abstract Cluster.Action 
generate(Cluster cluster);
-613
-614/**
-615 * From a list of regions pick a 
random one. Null can be returned which
-616 * {@link 
StochasticLoadBalancer#balanceCluster(Map)} recognize as signal to try a region 
move
-617 * rather than swap.
-618 *
-619 * @param clusterThe state of 
the cluster
-620 * @param server index of the 
server
-621 * @param chanceOfNoSwap Chance that 
this will decide to try a move rather
-622 *   than a 
swap.
-623 * @return a random {@link 
RegionInfo} or null if an asymmetrical move is
-624 * suggested.
-625 */
-626protected int 
pickRandomRegion(Cluster cluster, int server, double chanceOfNoSwap) {
-627  // Check to see if this is just a 
move.
-628  if 
(cluster.regionsPerServer[server].length == 0 || RANDOM.nextFloat() < 
chanceOfNoSwap) {
-629// signal a move only.
-630return -1;
-631  }
-632  int rand = 
RANDOM.nextInt(cluster.regionsPerServer[server].length);
-633  return 
cluster.regionsPerServer[server][rand];
-634
-635}
-636protected int 
pickRandomServer(Cluster cluster) {
-637  if (cluster.numServers < 1) {
-638return -1;
-639  }
-640
-641  return 
RANDOM.nextInt(cluster.numServers);
-642}
-643
-644protected int pickRandomRack(Cluster 
cluster) {
-645  if (cluster.numRacks < 1) {
-646retu

[17/51] [partial] hbase-site git commit: Published site at e66a6603e36ecd67237ca16acd5e2de03f0d372d.

2018-07-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0c6f447e/apidocs/org/apache/hadoop/hbase/ServerName.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/ServerName.html 
b/apidocs/org/apache/hadoop/hbase/ServerName.html
index 40b8d8e..82a2c34 100644
--- a/apidocs/org/apache/hadoop/hbase/ServerName.html
+++ b/apidocs/org/apache/hadoop/hbase/ServerName.html
@@ -1,6 +1,6 @@
 http://www.w3.org/TR/html4/loose.dtd";>
 
-
+
 
 
 
@@ -19,45 +19,45 @@
 }
 //-->
 var methods = 
{"i0":10,"i1":10,"i2":10,"i3":42,"i4":10,"i5":10,"i6":10,"i7":10,"i8":41,"i9":41,"i10":41,"i11":10,"i12":10,"i13":10,"i14":9,"i15":9,"i16":41,"i17":41,"i18":9,"i19":41,"i20":9,"i21":10,"i22":10,"i23":9,"i24":9,"i25":9};
-var tabs = 
{65535:["t0","所有方法"],1:["t1","静态方法"],2:["t2","实例方法"],8:["t4","å
…·ä½“方法"],32:["t6","已过时的方法"]};
+var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete 
Methods"],32:["t6","Deprecated Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
 var tableTab = "tableTab";
 var activeTableTab = "activeTableTab";
 
 
-您的浏览器已禁用 JavaScript。
+JavaScript is disabled on your browser.
 
 
 
 
 
-跳过导航链接
+Skip navigation links
 
 
 
-
-概览
-程序包
-ç±»
-使用
-树
-已过时
-索引
-帮助
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
 
 
 
 
-上一个类
-下一个类
+Prev Class
+Next Class
 
 
-框架
-无框架
+Frames
+No Frames
 
 
-所有类
+All Classes
 
 
 

[17/51] [partial] hbase-site git commit: Published site at e66a6603e36ecd67237ca16acd5e2de03f0d372d.

2018-07-19 Thread zhangduo
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5427a45e/apidocs/org/apache/hadoop/hbase/ServerMetrics.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/ServerMetrics.html 
b/apidocs/org/apache/hadoop/hbase/ServerMetrics.html
index 29337c5..5e17cdd 100644
--- a/apidocs/org/apache/hadoop/hbase/ServerMetrics.html
+++ b/apidocs/org/apache/hadoop/hbase/ServerMetrics.html
@@ -1,6 +1,6 @@
 http://www.w3.org/TR/html4/loose.dtd";>
 
-
+
 
 
 
@@ -19,45 +19,45 @@
 }
 //-->
 var methods = 
{"i0":6,"i1":6,"i2":6,"i3":6,"i4":6,"i5":6,"i6":6,"i7":6,"i8":6,"i9":6,"i10":6,"i11":6,"i12":18,"i13":18};
-var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],4:["t3","Abstract Methods"],16:["t5","Default Methods"]};
+var tabs = 
{65535:["t0","所有方法"],2:["t2","实例方法"],4:["t3","抽象方法"],16:["t5","默认方法"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
 var tableTab = "tableTab";
 var activeTableTab = "activeTableTab";
 
 
-JavaScript is disabled on your browser.
+您的浏览器已禁用 JavaScript。
 
 
 
 
 
-Skip navigation links
+跳过导航链接
 
 
 
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
+
+概览
+程序包
+ç±»
+使用
+树
+已过时
+索引
+帮助
 
 
 
 
-Prev Class
-Next Class
+上一个类
+下一个类
 
 
-Frames
-No Frames
+框架
+无框架
 
 
-All Classes
+所有类
 
 
 

[17/51] [partial] hbase-site git commit: Published site at 0f23784182ab88649de340d75804e0ff20dcd0fc.

2018-07-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bcb555af/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.DecryptHandler.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.DecryptHandler.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.DecryptHandler.html
index 05e032c..40ef9f4 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.DecryptHandler.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.DecryptHandler.html
@@ -25,767 +25,805 @@
 017 */
 018package 
org.apache.hadoop.hbase.io.asyncfs;
 019
-020import static 
org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleState.READER_IDLE;
-021import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY;
+020import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY;
+021import static 
org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleState.READER_IDLE;
 022
-023import 
org.apache.hbase.thirdparty.com.google.common.base.Charsets;
-024import 
org.apache.hbase.thirdparty.com.google.common.base.Throwables;
-025import 
org.apache.hbase.thirdparty.com.google.common.collect.ImmutableSet;
-026import 
org.apache.hbase.thirdparty.com.google.common.collect.Maps;
-027import 
com.google.protobuf.CodedOutputStream;
-028
-029import 
org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf;
-030import 
org.apache.hbase.thirdparty.io.netty.buffer.ByteBufOutputStream;
-031import 
org.apache.hbase.thirdparty.io.netty.buffer.CompositeByteBuf;
-032import 
org.apache.hbase.thirdparty.io.netty.buffer.Unpooled;
-033import 
org.apache.hbase.thirdparty.io.netty.channel.Channel;
-034import 
org.apache.hbase.thirdparty.io.netty.channel.ChannelDuplexHandler;
-035import 
org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext;
-036import 
org.apache.hbase.thirdparty.io.netty.channel.ChannelOutboundHandlerAdapter;
-037import 
org.apache.hbase.thirdparty.io.netty.channel.ChannelPipeline;
-038import 
org.apache.hbase.thirdparty.io.netty.channel.ChannelPromise;
-039import 
org.apache.hbase.thirdparty.io.netty.channel.SimpleChannelInboundHandler;
-040import 
org.apache.hbase.thirdparty.io.netty.handler.codec.LengthFieldBasedFrameDecoder;
-041import 
org.apache.hbase.thirdparty.io.netty.handler.codec.MessageToByteEncoder;
-042import 
org.apache.hbase.thirdparty.io.netty.handler.codec.protobuf.ProtobufDecoder;
-043import 
org.apache.hbase.thirdparty.io.netty.handler.codec.protobuf.ProtobufVarint32FrameDecoder;
-044import 
org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateEvent;
-045import 
org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler;
-046import 
org.apache.hbase.thirdparty.io.netty.util.concurrent.Promise;
-047
-048import java.io.IOException;
-049import java.lang.reflect.Field;
-050import 
java.lang.reflect.InvocationTargetException;
-051import java.lang.reflect.Method;
-052import java.net.InetAddress;
-053import java.net.InetSocketAddress;
-054import java.nio.ByteBuffer;
-055import 
java.security.GeneralSecurityException;
-056import java.util.Arrays;
-057import java.util.Collections;
-058import java.util.List;
-059import java.util.Map;
-060import java.util.Set;
-061import java.util.concurrent.TimeUnit;
-062import 
java.util.concurrent.atomic.AtomicBoolean;
-063
-064import 
javax.security.auth.callback.Callback;
-065import 
javax.security.auth.callback.CallbackHandler;
-066import 
javax.security.auth.callback.NameCallback;
-067import 
javax.security.auth.callback.PasswordCallback;
-068import 
javax.security.auth.callback.UnsupportedCallbackException;
-069import 
javax.security.sasl.RealmCallback;
-070import 
javax.security.sasl.RealmChoiceCallback;
-071import javax.security.sasl.Sasl;
-072import javax.security.sasl.SaslClient;
-073import 
javax.security.sasl.SaslException;
-074
-075import 
org.apache.commons.codec.binary.Base64;
-076import 
org.apache.commons.lang3.StringUtils;
-077import 
org.apache.hadoop.conf.Configuration;
-078import 
org.apache.hadoop.crypto.CipherOption;
-079import 
org.apache.hadoop.crypto.CipherSuite;
-080import 
org.apache.hadoop.crypto.CryptoCodec;
-081import 
org.apache.hadoop.crypto.Decryptor;
-082import 
org.apache.hadoop.crypto.Encryptor;
-083import 
org.apache.hadoop.crypto.key.KeyProvider.KeyVersion;
-084import 
org.apache.hadoop.fs.FileEncryptionInfo;
-085import 
org.apache.yetus.audience.InterfaceAudience;
-086import org.slf4j.Logger;
-087import org.slf4j.LoggerFactory;
-088
-089import com.google.protobuf.ByteString;
-090import 
org.apache.hadoop.hdfs.DFSClient;
-091import 
org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-092import 
org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
-093import 
org.

[17/51] [partial] hbase-site git commit: Published site at 85b41f36e01214b6485c9352875c84ebf877dab3.

2018-06-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a5c66de0/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.AbortProcedureFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.AbortProcedureFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.AbortProcedureFuture.html
index c10cfbf..a3e2f4a 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.AbortProcedureFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.AbortProcedureFuture.html
@@ -3371,7 +3371,7 @@
 3363private V result = null;
 3364
 3365private final HBaseAdmin admin;
-3366private final Long procId;
+3366protected final Long procId;
 3367
 3368public ProcedureFuture(final 
HBaseAdmin admin, final Long procId) {
 3369  this.admin = admin;
@@ -3653,653 +3653,651 @@
 3645 * @return a description of the 
operation
 3646 */
 3647protected String getDescription() 
{
-3648  return "Operation: " + 
getOperationType() + ", "
-3649  + "Table Name: " + 
tableName.getNameWithNamespaceInclAsString();
-3650
-3651}
-3652
-3653protected abstract class 
TableWaitForStateCallable implements WaitForStateCallable {
-3654  @Override
-3655  public void 
throwInterruptedException() throws InterruptedIOException {
-3656throw new 
InterruptedIOException("Interrupted while waiting for operation: "
-3657+ getOperationType() + " on 
table: " + tableName.getNameWithNamespaceInclAsString());
-3658  }
-3659
-3660  @Override
-3661  public void 
throwTimeoutException(long elapsedTime) throws TimeoutException {
-3662throw new TimeoutException("The 
operation: " + getOperationType() + " on table: " +
-3663tableName.getNameAsString() 
+ " has not completed after " + elapsedTime + "ms");
-3664  }
-3665}
-3666
-3667@Override
-3668protected V 
postOperationResult(final V result, final long deadlineTs)
-3669throws IOException, 
TimeoutException {
-3670  LOG.info(getDescription() + " 
completed");
-3671  return 
super.postOperationResult(result, deadlineTs);
-3672}
-3673
-3674@Override
-3675protected V 
postOperationFailure(final IOException exception, final long deadlineTs)
-3676throws IOException, 
TimeoutException {
-3677  LOG.info(getDescription() + " 
failed with " + exception.getMessage());
-3678  return 
super.postOperationFailure(exception, deadlineTs);
-3679}
-3680
-3681protected void 
waitForTableEnabled(final long deadlineTs)
-3682throws IOException, 
TimeoutException {
-3683  waitForState(deadlineTs, new 
TableWaitForStateCallable() {
-3684@Override
-3685public boolean checkState(int 
tries) throws IOException {
-3686  try {
-3687if 
(getAdmin().isTableAvailable(tableName)) {
-3688  return true;
-3689}
-3690  } catch 
(TableNotFoundException tnfe) {
-3691LOG.debug("Table " + 
tableName.getNameWithNamespaceInclAsString()
-3692+ " was not enabled, 
sleeping. tries=" + tries);
-3693  }
-3694  return false;
-3695}
-3696  });
-3697}
-3698
-3699protected void 
waitForTableDisabled(final long deadlineTs)
-3700throws IOException, 
TimeoutException {
-3701  waitForState(deadlineTs, new 
TableWaitForStateCallable() {
-3702@Override
-3703public boolean checkState(int 
tries) throws IOException {
-3704  return 
getAdmin().isTableDisabled(tableName);
-3705}
-3706  });
-3707}
-3708
-3709protected void 
waitTableNotFound(final long deadlineTs)
-3710throws IOException, 
TimeoutException {
-3711  waitForState(deadlineTs, new 
TableWaitForStateCallable() {
-3712@Override
-3713public boolean checkState(int 
tries) throws IOException {
-3714  return 
!getAdmin().tableExists(tableName);
-3715}
-3716  });
-3717}
-3718
-3719protected void 
waitForSchemaUpdate(final long deadlineTs)
-3720throws IOException, 
TimeoutException {
-3721  waitForState(deadlineTs, new 
TableWaitForStateCallable() {
-3722@Override
-3723public boolean checkState(int 
tries) throws IOException {
-3724  return 
getAdmin().getAlterStatus(tableName).getFirst() == 0;
-3725}
-3726  });
-3727}
-3728
-3729protected void 
waitForAllRegionsOnline(final long deadlineTs, final byte[][] splitKeys)
-3730throws IOException, 
TimeoutException {
-3731  final TableDescriptor desc = 
getTableDescriptor();
-3732  final AtomicInteger actualRegCount 
= new AtomicInteger(0);
-3733  final MetaTableAccessor.Visitor 
visitor = new MetaTableAccessor.Visitor() {
-3734@Override
-3735public boolean visit(Result 
rowResult) throws IOExce

[17/51] [partial] hbase-site git commit: Published site at 6198e1fc7dfa85c3bc6b2855f9a5fb5f4b2354ff.

2018-06-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/eb5d2c62/devapidocs/org/apache/hadoop/hbase/coprocessor/example/ExampleMasterObserverWithMetrics.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/coprocessor/example/ExampleMasterObserverWithMetrics.html
 
b/devapidocs/org/apache/hadoop/hbase/coprocessor/example/ExampleMasterObserverWithMetrics.html
index 4fef3fb..291d5be 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/coprocessor/example/ExampleMasterObserverWithMetrics.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/coprocessor/example/ExampleMasterObserverWithMetrics.html
@@ -288,7 +288,7 @@ implements MasterObserver
-postAbortProcedure,
 postAddReplicationPeer,
 postAddRSGroup,
 postAssign,
 postBalance, postBalanceRSGroup,
 postBalanceSwitch,
 postClearDeadServers,
 postCloneSnapshot,
 postCompletedCreateTableAction,
 postCompletedDeleteTableAction,
 postCompletedDisableTableAction,
 postCompletedEnableTableAction,
 postCompletedMergeRegionsAction,
 postCompletedModifyTableAction,
 postCompletedModifyTableAction,
 postCompletedSplitRegionAction,
 postCompletedTruncateTableAction,
 postCreateNamespace,
 postDecommissionRegionServers,
 postDeleteNamespace, postDeleteSnapshot,
 postDeleteTable,
 postDisableReplicationPeer,
 postDisableTable,
 postEnableReplicationPeer,
 postEnableTable,
 postGetClusterMetrics,
 postGetLocks,
 postGetNamespaceDescriptor,
 postGetProcedures,
 postGetReplicationPeerConfig,
 postGetRSGroupInfo,
 postGetRSGroupInfoOfServer,
 postGetRSGroupInfoOfTable, postGetTableDescriptors,
 postGetTableNames,
 postListDecommissionedRegionServers,
 postListNamespaceDescriptors,
 postListReplicationPeers,
 postListRSGroups,
 postListSnapshot,
 postLockHeartbeat,
 postMergeRegions,
 postMergeRegionsCommitAction,
 postModifyNamespace,
 postModifyNamespace,
 postModifyTable,
 postModifyTable,
 postMove,
 postMoveServers,
 postMoveServersAndTables,
 postMoveTables,
 postRecommissionRegionServer,
 postRegionOffline,
 postRemoveReplicationPeer,
 postRemoveRSGroup, postRemoveServers,
 postRequestLock,
 postRestoreSnapshot,
 postRollBackMergeRegionsAction, postRollBackSplitRegionAction,
 postSetNamespaceQuota,
 postSetSplitOrMergeEnabled,
 pos
 tSetTableQuota, postSetUserQuota,
 postSetUserQuota,
 postSetUserQuota,
 postSnapshot, postStartMaster,
 postTableFlush,
 postTruncateTable,
 postUnassign,
 postUpdateReplicationPeerConfig,
 preAbortProcedure,
 preAddReplicationPeer,
 preAddRSGroup,
 preAssign, preBalance,
 preBalanceRSGroup,
 preBalanceSwitch,
 preClearDeadServers,
 preCloneSnapshot
 , preCreateNamespace,
 preCreateTableAction,
 preDecommissionRegionServers,
 preDeleteNamespace,
 preDeleteSnapshot,
 preDeleteTable,
 preDeleteTableAction,
 preDisableReplicationPeer,
 preDisableTa
 bleAction, preEnableReplicationPeer,
 preEnableTable,
 preEnableTableAction,
 preGetClusterMetrics,
 preGetLocks,
  preGetNamespaceDescriptor,
 preGetProcedures,
 preGetReplicationPeerConfig,
 preGetRSGroupInfo,
 
 preGetRSGroupInfoOfServer, preGetRSGroupInfoOfTable,
 preGetTableDescriptors,
 preGetTableNames,
 preListDecommissionedRegionServers,
 preListNamespaceDescriptors,
 preListReplicationPeers,
 preListRSGroups,
 preListSnapshot,
 preLockHeartbeat,
 preMasterInitialization,
 preMergeRegions,
 preMergeRegionsAction,
 preMergeRegionsCommitAction,
 preModifyNamespace,
 preModifyNamespace,
 preModifyTable,
 preModifyTable,
 preModifyTableAction,
 preModifyTableAction,
 preMove,
 preMoveServers,
 preMoveServersAndTables,
 preMoveTables,
 preRecommissionRegionServer,
 preRegionOffline,
 preRemoveReplicationPeer, 
preRemoveRSGroup,
 preRemoveServers,
 preRequestLock,
 preRestoreSnapshot, href="../../../../../../org/apache/hadoop/hbase/coprocessor/MasterObserver.html#preSetNamespaceQuota-org.apache.hadoop.hbase.coprocessor.ObserverContext-java.lang.String-org.apache.hadoop.hbase.quotas.GlobalQuotaSettings-">preSetNamespaceQuota,
 > href="../../../../../../org/apache/hadoop/hbase/coprocessor/MasterObserver.html#preSetSplitOrMergeEnabled-org.apache.hadoop.hbase.coprocessor.ObserverContext-boolean-org.apache.hadoop.hbase.client.MasterSwitchType-">preSetSplitOrMergeEnabled,
 > href="../../../../../../org/apache/hadoop/hbase/coprocessor/MasterObserver.html#preSetTableQuota-org.apache.hadoop.hbase.coprocessor.ObserverContext-org.apache.hadoop.hbase.TableName-org.apache.hadoop.hbase.quotas.GlobalQuotaSettings-">preSetTableQuota,
 > href="../../../../../../org/apache/hadoop/hbase/coprocessor/MasterObserver.html#preSetUserQuota-org.apache.hadoop.hbase.coprocessor.ObserverContext-java.lang.String-org.apache.hadoop.hbase.quotas.GlobalQuot
 aSettings-">preSetUserQuota, preSetUserQuota,
 preSetUserQuota,
 preShutdown,
 preSnapshot,
  preSplitRegion,
 preSplitRegionAction,
 preSplitRegionAfterMETAAction,
 preSplitRegionBeforeMETAAction,
 preStopMaster, preTableFlush,
 preTruncateTable,
 preTruncateTableAction,
 preUnassign,
 preUpdateReplicationPeerCon

[17/51] [partial] hbase-site git commit: Published site at 14087cc919da9f2e0b1a68f701f6365ad9d1d71f.

2018-06-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/55ce8d97/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.html
index 000c3ff..683c1f3 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":9,"i42":9,"i43":9,"i44":9,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":9,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":9,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":9,"i107":10,"i108":10,"i109":10
 
,"i110":10,"i111":10,"i112":10,"i113":41,"i114":41,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10,"i135":10,"i136":10,"i137":10,"i138":10,"i139":9,"i140":10,"i141":10,"i142":10,"i143":10,"i144":10,"i145":10,"i146":10,"i147":10,"i148":42,"i149":10,"i150":10,"i151":10,"i152":10,"i153":10,"i154":10,"i155":10,"i156":10,"i157":10,"i158":10,"i159":10,"i160":10,"i161":10,"i162":10,"i163":10,"i164":10,"i165":10,"i166":10,"i167":10,"i168":10,"i169":10,"i170":9,"i171":10,"i172":10,"i173":10,"i174":10,"i175":10,"i176":10,"i177":10,"i178":10,"i179":9,"i180":10,"i181":10,"i182":9,"i183":9,"i184":9,"i185":9,"i186":9,"i187":9,"i188":9,"i189":9,"i190":9,"i191":9,"i192":10,"i193":10,"i194":10,"i195":10,"i196":10,"i197":10,"i198":10,"i199":10,"i200":10,"i201":9,"i202":10,"i203":10,"i204":10,"i205":10,"i206":10,"i207":10,"i208":10,"i209":10,"i210":10,
 
"i211":10,"i212":10,"i213":10,"i214":10,"i215":10,"i216":10,"i217":10,"i218":10,"i219":10,"i220":10,"i221":10,"i222":10,"i223":10,"i224":10,"i225":10,"i226":10,"i227":10,"i228":10,"i229":10,"i230":10,"i231":10,"i232":9,"i233":9,"i234":10,"i235":10,"i236":10,"i237":10,"i238":10,"i239":10,"i240":10,"i241":10,"i242":10,"i243":10,"i244":10,"i245":9,"i246":10,"i247":10,"i248":10,"i249":10,"i250":10,"i251":10,"i252":10,"i253":9,"i254":10,"i255":10,"i256":10,"i257":10,"i258":10,"i259":9,"i260":10,"i261":10,"i262":10,"i263":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":9,"i42":9,"i43":9,"i44":9,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":9,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":9,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":9,"i108":10,"i109":10
 
,"i110":10,"i111":10,"i112":10,"i113":10,"i114":41,"i115":41,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10,"i135":10,"i136":10,"i137":10,"i138":10,"i139":10,"i140":9,"i141":10,"i142":10,"i143":10,"i144":10,"i145":10,"i146":10,"i147":10,"i148":10,"i149":42,"i150":10,"i151":10,"i152":10,"i153":10,"i154":10,"i155":10,"i156":10,"i157":10,"i158":10,"i159":10,"i160":10,"i161":10,"i162":10,"i163":10,"i164":10,"i165":10,"i166":10,"i167":10,"i168":10,"i169":10,"i170":10,"i171":9,"i172":10,"i173":10,"i174":10,"i175":10,"i176":10,"i177":10,"i178":10,"i179":10,"i180":9,"i181":10,"i182":10,"i183":9,"i184":9,"i185":9,"i186":9,"i187":9,"i188":9,"i189":9,"i190":9,"i191":9,"i192":9,"i193":10,"i194":10,"i195":10,"i196":10,"i197":10,"i198":10,"i199":10,"i200":10,"i201":10,"i202":9,"i2

[17/51] [partial] hbase-site git commit: Published site at 72784c2d836a4b977667449d3adec5e8d15453f5.

2018-06-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2b11656f/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.SplitTableRegionFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.SplitTableRegionFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.SplitTableRegionFuture.html
index b6e7636..592c2cc 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.SplitTableRegionFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.SplitTableRegionFuture.html
@@ -356,3901 +356,3924 @@
 348  public Future 
modifyTableAsync(TableDescriptor td) throws IOException {
 349ModifyTableResponse response = 
executeCallable(
 350  new 
MasterCallable(getConnection(), 
getRpcControllerFactory()) {
-351@Override
-352protected ModifyTableResponse 
rpcCall() throws Exception {
-353  
setPriority(td.getTableName());
-354  ModifyTableRequest request = 
RequestConverter.buildModifyTableRequest(
-355td.getTableName(), td, 
ng.getNonceGroup(), ng.newNonce());
-356  return 
master.modifyTable(getRpcController(), request);
-357}
-358  });
-359return new ModifyTableFuture(this, 
td.getTableName(), response);
-360  }
-361
-362  @Override
-363  public List 
listTableDescriptorsByNamespace(byte[] name) throws IOException {
-364return executeCallable(new 
MasterCallable>(getConnection(),
-365getRpcControllerFactory()) {
-366  @Override
-367  protected 
List rpcCall() throws Exception {
-368return 
master.listTableDescriptorsByNamespace(getRpcController(),
-369
ListTableDescriptorsByNamespaceRequest.newBuilder()
-370  
.setNamespaceName(Bytes.toString(name)).build())
-371.getTableSchemaList()
-372.stream()
-373
.map(ProtobufUtil::toTableDescriptor)
-374
.collect(Collectors.toList());
-375  }
-376});
-377  }
-378
-379  @Override
-380  public List 
listTableDescriptors(List tableNames) throws IOException {
-381return executeCallable(new 
MasterCallable>(getConnection(),
-382getRpcControllerFactory()) {
-383  @Override
-384  protected 
List rpcCall() throws Exception {
-385GetTableDescriptorsRequest req 
=
-386
RequestConverter.buildGetTableDescriptorsRequest(tableNames);
-387  return 
ProtobufUtil.toTableDescriptorList(master.getTableDescriptors(getRpcController(),
-388  req));
-389  }
-390});
-391  }
-392
-393  @Override
-394  public List 
getRegions(final ServerName sn) throws IOException {
-395AdminService.BlockingInterface admin 
= this.connection.getAdmin(sn);
-396// TODO: There is no timeout on this 
controller. Set one!
-397HBaseRpcController controller = 
rpcControllerFactory.newController();
-398return 
ProtobufUtil.getOnlineRegions(controller, admin);
-399  }
-400
-401  @Override
-402  public List 
getRegions(TableName tableName) throws IOException {
-403if 
(TableName.isMetaTableName(tableName)) {
-404  return 
Arrays.asList(RegionInfoBuilder.FIRST_META_REGIONINFO);
-405} else {
-406  return 
MetaTableAccessor.getTableRegions(connection, tableName, true);
-407}
-408  }
-409
-410  private static class 
AbortProcedureFuture extends ProcedureFuture {
-411private boolean isAbortInProgress;
-412
-413public AbortProcedureFuture(
-414final HBaseAdmin admin,
-415final Long procId,
-416final Boolean abortProcResponse) 
{
-417  super(admin, procId);
-418  this.isAbortInProgress = 
abortProcResponse;
-419}
-420
-421@Override
-422public Boolean get(long timeout, 
TimeUnit unit)
-423throws InterruptedException, 
ExecutionException, TimeoutException {
-424  if (!this.isAbortInProgress) {
-425return false;
-426  }
-427  super.get(timeout, unit);
-428  return true;
-429}
-430  }
-431
-432  /** @return Connection used by this 
object. */
-433  @Override
-434  public Connection getConnection() {
-435return connection;
-436  }
-437
-438  @Override
-439  public boolean tableExists(final 
TableName tableName) throws IOException {
-440return executeCallable(new 
RpcRetryingCallable() {
-441  @Override
-442  protected Boolean rpcCall(int 
callTimeout) throws Exception {
-443return 
MetaTableAccessor.tableExists(connection, tableName);
-444  }
-445});
-446  }
-447
-448  @Override
-449  public HTableDescriptor[] listTables() 
throws IOException {
-450return listTables((Pattern)null, 
false);
-451  }
-452
-453  @Override
-454  public HTableDescriptor[] 
l

[17/51] [partial] hbase-site git commit: Published site at 9101fc246f86445006bfbcdfda5cc495016dc280.

2018-06-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/65565d77/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.Visitor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.Visitor.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.Visitor.html
index fea2b5a..c7a6cc4 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.Visitor.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.Visitor.html
@@ -1354,816 +1354,824 @@
 1346   */
 1347  public static void 
putsToMetaTable(final Connection connection, final List ps)
 1348  throws IOException {
-1349try (Table t = 
getMetaHTable(connection)) {
-1350  debugLogMutations(ps);
-1351  t.put(ps);
-1352}
-1353  }
-1354
-1355  /**
-1356   * Delete the passed 
d from the hbase:meta 
table.
-1357   * @param connection connection we're 
using
-1358   * @param d Delete to add to 
hbase:meta
-1359   */
-1360  private static void 
deleteFromMetaTable(final Connection connection, final Delete d)
-1361  throws IOException {
-1362List dels = new 
ArrayList<>(1);
-1363dels.add(d);
-1364deleteFromMetaTable(connection, 
dels);
-1365  }
-1366
-1367  /**
-1368   * Delete the passed 
deletes from the hbase:meta 
table.
-1369   * @param connection connection we're 
using
-1370   * @param deletes Deletes to add to 
hbase:meta  This list should support #remove.
-1371   */
-1372  private static void 
deleteFromMetaTable(final Connection connection, final List 
deletes)
-1373  throws IOException {
-1374try (Table t = 
getMetaHTable(connection)) {
-1375  debugLogMutations(deletes);
-1376  t.delete(deletes);
-1377}
-1378  }
-1379
-1380  /**
-1381   * Deletes some replica columns 
corresponding to replicas for the passed rows
-1382   * @param metaRows rows in 
hbase:meta
-1383   * @param replicaIndexToDeleteFrom the 
replica ID we would start deleting from
-1384   * @param numReplicasToRemove how many 
replicas to remove
-1385   * @param connection connection we're 
using to access meta table
-1386   */
-1387  public static void 
removeRegionReplicasFromMeta(Set metaRows,
-1388int replicaIndexToDeleteFrom, int 
numReplicasToRemove, Connection connection)
-1389  throws IOException {
-1390int absoluteIndex = 
replicaIndexToDeleteFrom + numReplicasToRemove;
-1391for (byte[] row : metaRows) {
-1392  long now = 
EnvironmentEdgeManager.currentTime();
-1393  Delete deleteReplicaLocations = 
new Delete(row);
-1394  for (int i = 
replicaIndexToDeleteFrom; i < absoluteIndex; i++) {
-1395
deleteReplicaLocations.addColumns(getCatalogFamily(),
-1396  getServerColumn(i), now);
-1397
deleteReplicaLocations.addColumns(getCatalogFamily(),
-1398  getSeqNumColumn(i), now);
-1399
deleteReplicaLocations.addColumns(getCatalogFamily(),
-1400  getStartCodeColumn(i), now);
-1401  }
-1402  deleteFromMetaTable(connection, 
deleteReplicaLocations);
-1403}
-1404  }
-1405
-1406  /**
-1407   * Execute the passed 
mutations against hbase:meta 
table.
-1408   * @param connection connection we're 
using
-1409   * @param mutations Puts and Deletes 
to execute on hbase:meta
-1410   * @throws IOException
-1411   */
-1412  public static void 
mutateMetaTable(final Connection connection,
-1413 
final List mutations)
-1414throws IOException {
-1415Table t = 
getMetaHTable(connection);
-1416try {
-1417  debugLogMutations(mutations);
-1418  t.batch(mutations, null);
-1419} catch (InterruptedException e) {
-1420  InterruptedIOException ie = new 
InterruptedIOException(e.getMessage());
-1421  ie.initCause(e);
-1422  throw ie;
-1423} finally {
-1424  t.close();
-1425}
-1426  }
-1427
-1428  private static void 
addRegionStateToPut(Put put, RegionState.State state) throws IOException {
-1429
put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY)
-1430.setRow(put.getRow())
-1431
.setFamily(HConstants.CATALOG_FAMILY)
-1432
.setQualifier(getRegionStateColumn())
-1433
.setTimestamp(put.getTimestamp())
-1434.setType(Cell.Type.Put)
-1435
.setValue(Bytes.toBytes(state.name()))
-1436.build());
-1437  }
-1438
-1439  /**
-1440   * Adds daughter region infos to 
hbase:meta row for the specified region. Note that this does not
-1441   * add its daughter's as different 
rows, but adds information about the daughters in the same row
-1442   * as the parent. Use
-1443   * {@link #splitRegion(Connection, 
RegionInfo, long, RegionInfo, RegionInfo, ServerName, int)}
-1444   * if you want to do that.
-1445   * @param connection connection we're 
using
-1446   * @param regionInfo

[17/51] [partial] hbase-site git commit: Published site at 0b28155d274910b4e667b949d51f78809a1eff0b.

2018-06-14 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e11cf2cb/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/AbstractProtobufLogWriter.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/AbstractProtobufLogWriter.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/AbstractProtobufLogWriter.html
index 5dbbaf4..c771708 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/AbstractProtobufLogWriter.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/AbstractProtobufLogWriter.html
@@ -191,71 +191,74 @@
 183this.cellEncoder = 
codec.getEncoder(getOutputStreamForCellEncoder());
 184if (doCompress) {
 185  this.compressor = 
codec.getByteStringCompressor();
-186}
-187  }
-188
-189  protected void initAfterHeader(boolean 
doCompress) throws IOException {
-190initAfterHeader0(doCompress);
-191  }
-192
-193  // should be called in sub classes's 
initAfterHeader method to init SecureWALCellCodec.
-194  protected final void 
secureInitAfterHeader(boolean doCompress, Encryptor encryptor)
-195  throws IOException {
-196if 
(conf.getBoolean(HConstants.ENABLE_WAL_ENCRYPTION, false) && encryptor 
!= null) {
-197  WALCellCodec codec = 
SecureWALCellCodec.getCodec(this.conf, encryptor);
-198  this.cellEncoder = 
codec.getEncoder(getOutputStreamForCellEncoder());
-199  // We do not support compression
-200  this.compressionContext = null;
-201} else {
-202  initAfterHeader0(doCompress);
-203}
-204  }
-205
-206  void setWALTrailer(WALTrailer 
walTrailer) {
-207this.trailer = walTrailer;
-208  }
-209
-210  public long getLength() {
-211return length.get();
-212  }
-213
-214  private WALTrailer 
buildWALTrailer(WALTrailer.Builder builder) {
-215return builder.build();
-216  }
-217
-218  protected void writeWALTrailer() {
-219try {
-220  int trailerSize = 0;
-221  if (this.trailer == null) {
-222// use default trailer.
-223LOG.warn("WALTrailer is null. 
Continuing with default.");
-224this.trailer = 
buildWALTrailer(WALTrailer.newBuilder());
-225trailerSize = 
this.trailer.getSerializedSize();
-226  } else if ((trailerSize = 
this.trailer.getSerializedSize()) > this.trailerWarnSize) {
-227// continue writing after warning 
the user.
-228LOG.warn("Please investigate 
WALTrailer usage. Trailer size > maximum size : " + trailerSize
-229+ " > " + 
this.trailerWarnSize);
-230  }
-231  
length.set(writeWALTrailerAndMagic(trailer, 
ProtobufLogReader.PB_WAL_COMPLETE_MAGIC));
-232  this.trailerWritten = true;
-233} catch (IOException ioe) {
-234  LOG.warn("Failed to write trailer, 
non-fatal, continuing...", ioe);
-235}
-236  }
-237
-238  protected abstract void 
initOutput(FileSystem fs, Path path, boolean overwritable, int bufferSize,
-239  short replication, long blockSize) 
throws IOException, StreamLacksCapabilityException;
+186} else {
+187  this.compressor = 
WALCellCodec.getNoneCompressor();
+188}
+189  }
+190
+191  protected void initAfterHeader(boolean 
doCompress) throws IOException {
+192initAfterHeader0(doCompress);
+193  }
+194
+195  // should be called in sub classes's 
initAfterHeader method to init SecureWALCellCodec.
+196  protected final void 
secureInitAfterHeader(boolean doCompress, Encryptor encryptor)
+197  throws IOException {
+198if 
(conf.getBoolean(HConstants.ENABLE_WAL_ENCRYPTION, false) && encryptor 
!= null) {
+199  WALCellCodec codec = 
SecureWALCellCodec.getCodec(this.conf, encryptor);
+200  this.cellEncoder = 
codec.getEncoder(getOutputStreamForCellEncoder());
+201  // We do not support compression
+202  this.compressionContext = null;
+203  this.compressor = 
WALCellCodec.getNoneCompressor();
+204} else {
+205  initAfterHeader0(doCompress);
+206}
+207  }
+208
+209  void setWALTrailer(WALTrailer 
walTrailer) {
+210this.trailer = walTrailer;
+211  }
+212
+213  public long getLength() {
+214return length.get();
+215  }
+216
+217  private WALTrailer 
buildWALTrailer(WALTrailer.Builder builder) {
+218return builder.build();
+219  }
+220
+221  protected void writeWALTrailer() {
+222try {
+223  int trailerSize = 0;
+224  if (this.trailer == null) {
+225// use default trailer.
+226LOG.warn("WALTrailer is null. 
Continuing with default.");
+227this.trailer = 
buildWALTrailer(WALTrailer.newBuilder());
+228trailerSize = 
this.trailer.getSerializedSize();
+229  } else if ((trailerSize = 
this.trailer.getSerializedSize()) > this.trailerWarnSize) {
+230// continue writing after warning 
the user.
+231LOG.warn("Please investigate 
WALTrailer usage. Trailer size > maximum size : " + trailerSize
+232+ " > " + 
this.trailerWarnSize);
+233   

[17/51] [partial] hbase-site git commit: Published site at 7d3750bd9fc9747623549c242cc4171e224b3eaf.

2018-06-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionStateNode.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionStateNode.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionStateNode.html
index 5420d82..6ea3672 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionStateNode.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionStateNode.html
@@ -316,687 +316,728 @@
 308}
 309  }
 310
-311  public enum ServerState { ONLINE, 
SPLITTING, OFFLINE }
-312  public static class ServerStateNode 
implements Comparable {
-313private final ServerReportEvent 
reportEvent;
-314
-315private final 
Set regions;
-316private final ServerName 
serverName;
-317
-318private volatile ServerState state = 
ServerState.ONLINE;
-319private volatile int versionNumber = 
0;
-320
-321public ServerStateNode(final 
ServerName serverName) {
-322  this.serverName = serverName;
-323  this.regions = 
ConcurrentHashMap.newKeySet();
-324  this.reportEvent = new 
ServerReportEvent(serverName);
-325}
-326
-327public ServerName getServerName() {
-328  return serverName;
-329}
+311  /**
+312   * Server State.
+313   */
+314  public enum ServerState {
+315/**
+316 * Initial state. Available.
+317 */
+318ONLINE,
+319
+320/**
+321 * Server expired/crashed. Currently 
undergoing WAL splitting.
+322 */
+323SPLITTING,
+324
+325/**
+326 * WAL splitting done.
+327 */
+328OFFLINE
+329  }
 330
-331public ServerState getState() {
-332  return state;
-333}
-334
-335public int getVersionNumber() {
-336  return versionNumber;
-337}
-338
-339public ProcedureEvent 
getReportEvent() {
-340  return reportEvent;
-341}
+331  /**
+332   * State of Server; list of hosted 
regions, etc.
+333   */
+334  public static class ServerStateNode 
implements Comparable {
+335private final ServerReportEvent 
reportEvent;
+336
+337private final 
Set regions;
+338private final ServerName 
serverName;
+339
+340private volatile ServerState state = 
ServerState.ONLINE;
+341private volatile int versionNumber = 
0;
 342
-343public boolean isInState(final 
ServerState... expected) {
-344  boolean expectedState = false;
-345  if (expected != null) {
-346for (int i = 0; i < 
expected.length; ++i) {
-347  expectedState |= (state == 
expected[i]);
-348}
-349  }
-350  return expectedState;
+343public ServerStateNode(final 
ServerName serverName) {
+344  this.serverName = serverName;
+345  this.regions = 
ConcurrentHashMap.newKeySet();
+346  this.reportEvent = new 
ServerReportEvent(serverName);
+347}
+348
+349public ServerName getServerName() {
+350  return serverName;
 351}
 352
-353public void setState(final 
ServerState state) {
-354  this.state = state;
+353public ServerState getState() {
+354  return state;
 355}
 356
-357public void setVersionNumber(final 
int versionNumber) {
-358  this.versionNumber = 
versionNumber;
+357public int getVersionNumber() {
+358  return versionNumber;
 359}
 360
-361public Set 
getRegions() {
-362  return regions;
+361public ProcedureEvent 
getReportEvent() {
+362  return reportEvent;
 363}
 364
-365public int getRegionCount() {
-366  return regions.size();
+365public boolean isOffline() {
+366  return 
this.state.equals(ServerState.OFFLINE);
 367}
 368
-369public ArrayList 
getRegionInfoList() {
-370  ArrayList hris = 
new ArrayList(regions.size());
-371  for (RegionStateNode region: 
regions) {
-372
hris.add(region.getRegionInfo());
-373  }
-374  return hris;
-375}
-376
-377public void addRegion(final 
RegionStateNode regionNode) {
-378  this.regions.add(regionNode);
-379}
-380
-381public void removeRegion(final 
RegionStateNode regionNode) {
-382  this.regions.remove(regionNode);
-383}
-384
-385@Override
-386public int compareTo(final 
ServerStateNode other) {
-387  return 
getServerName().compareTo(other.getServerName());
-388}
-389
-390@Override
-391public int hashCode() {
-392  return 
getServerName().hashCode();
+369public boolean isInState(final 
ServerState... expected) {
+370  boolean expectedState = false;
+371  if (expected != null) {
+372for (int i = 0; i < 
expected.length; ++i) {
+373  expectedState |= (state == 
expected[i]);
+374}
+375  }
+376  return 

[17/51] [partial] hbase-site git commit: Published site at 997747076d8ec0b4346d7cb99c4b0667a7c14905.

2018-05-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4df09ed9/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
index 3da432b..d30fa8f 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
@@ -928,7690 +928,7698 @@
 920  Collection stores = 
this.stores.values();
 921  try {
 922// update the stores that we are 
replaying
-923
stores.forEach(HStore::startReplayingFromWAL);
-924// Recover any edits if 
available.
-925maxSeqId = Math.max(maxSeqId,
-926  
replayRecoveredEditsIfAny(this.fs.getRegionDir(), maxSeqIdInStores, reporter, 
status));
-927// Make sure mvcc is up to max.
-928this.mvcc.advanceTo(maxSeqId);
-929  } finally {
-930// update the stores that we are 
done replaying
-931
stores.forEach(HStore::stopReplayingFromWAL);
-932  }
-933}
-934this.lastReplayedOpenRegionSeqId = 
maxSeqId;
-935
-936
this.writestate.setReadOnly(ServerRegionReplicaUtil.isReadOnly(this));
-937this.writestate.flushRequested = 
false;
-938this.writestate.compacting.set(0);
-939
-940if (this.writestate.writesEnabled) 
{
-941  // Remove temporary data left over 
from old regions
-942  status.setStatus("Cleaning up 
temporary data from old regions");
-943  fs.cleanupTempDir();
-944}
-945
-946if (this.writestate.writesEnabled) 
{
-947  status.setStatus("Cleaning up 
detritus from prior splits");
-948  // Get rid of any splits or merges 
that were lost in-progress.  Clean out
-949  // these directories here on open.  
We may be opening a region that was
-950  // being split but we crashed in 
the middle of it all.
-951  fs.cleanupAnySplitDetritus();
-952  fs.cleanupMergesDir();
-953}
-954
-955// Initialize split policy
-956this.splitPolicy = 
RegionSplitPolicy.create(this, conf);
-957
-958// Initialize flush policy
-959this.flushPolicy = 
FlushPolicyFactory.create(this, conf);
-960
-961long lastFlushTime = 
EnvironmentEdgeManager.currentTime();
-962for (HStore store: stores.values()) 
{
-963  
this.lastStoreFlushTimeMap.put(store, lastFlushTime);
-964}
-965
-966// Use maximum of log sequenceid or 
that which was found in stores
-967// (particularly if no recovered 
edits, seqid will be -1).
-968long maxSeqIdFromFile =
-969  
WALSplitter.getMaxRegionSequenceId(fs.getFileSystem(), fs.getRegionDir());
-970long nextSeqId = Math.max(maxSeqId, 
maxSeqIdFromFile) + 1;
-971if (writestate.writesEnabled) {
-972  
WALSplitter.writeRegionSequenceIdFile(fs.getFileSystem(), fs.getRegionDir(), 
nextSeqId - 1);
-973}
-974
-975LOG.info("Opened {}; next 
sequenceid={}", this.getRegionInfo().getShortNameToLog(), nextSeqId);
-976
-977// A region can be reopened if failed 
a split; reset flags
-978this.closing.set(false);
-979this.closed.set(false);
-980
-981if (coprocessorHost != null) {
-982  status.setStatus("Running 
coprocessor post-open hooks");
-983  coprocessorHost.postOpen();
-984}
+923LOG.debug("replaying wal for " + 
this.getRegionInfo().getEncodedName());
+924
stores.forEach(HStore::startReplayingFromWAL);
+925// Recover any edits if 
available.
+926maxSeqId = Math.max(maxSeqId,
+927  
replayRecoveredEditsIfAny(this.fs.getRegionDir(), maxSeqIdInStores, reporter, 
status));
+928// Make sure mvcc is up to max.
+929this.mvcc.advanceTo(maxSeqId);
+930  } finally {
+931LOG.debug("stopping wal replay 
for " + this.getRegionInfo().getEncodedName());
+932// update the stores that we are 
done replaying
+933
stores.forEach(HStore::stopReplayingFromWAL);
+934  }
+935}
+936this.lastReplayedOpenRegionSeqId = 
maxSeqId;
+937
+938
this.writestate.setReadOnly(ServerRegionReplicaUtil.isReadOnly(this));
+939this.writestate.flushRequested = 
false;
+940this.writestate.compacting.set(0);
+941
+942if (this.writestate.writesEnabled) 
{
+943  LOG.debug("Cleaning up temporary 
data for " + this.getRegionInfo().getEncodedName());
+944  // Remove temporary data left over 
from old regions
+945  status.setStatus("Cleaning up 
temporary data from old regions");
+946  fs.cleanupTempDir();
+947}
+948
+949if (this.writestate.writesEnabled) 
{
+950  status.setStatus("Cleaning up 
detritus from prior splits");
+951  // Get rid of any splits or merges 
that were lost in-progress.  Clean out
+952  // these directories here on open.  
We may be opening a regio

[17/51] [partial] hbase-site git commit: Published site at f3d1c021de2264301f68eadb9ef126ff83d7ef53.

2018-05-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/883dde2f/devapidocs/src-html/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.RSGroupAdminServiceImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.RSGroupAdminServiceImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.RSGroupAdminServiceImpl.html
index 85373ba..d253aa8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.RSGroupAdminServiceImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.RSGroupAdminServiceImpl.html
@@ -41,208 +41,208 @@
 033import 
org.apache.hadoop.hbase.CoprocessorEnvironment;
 034import 
org.apache.hadoop.hbase.HBaseIOException;
 035import 
org.apache.hadoop.hbase.HConstants;
-036import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-037import 
org.apache.hadoop.hbase.ServerName;
-038import 
org.apache.hadoop.hbase.TableName;
-039import 
org.apache.hadoop.hbase.client.RegionInfo;
-040import 
org.apache.hadoop.hbase.client.SnapshotDescription;
-041import 
org.apache.hadoop.hbase.client.TableDescriptor;
-042import 
org.apache.hadoop.hbase.constraint.ConstraintException;
-043import 
org.apache.hadoop.hbase.coprocessor.CoreCoprocessor;
-044import 
org.apache.hadoop.hbase.coprocessor.HasMasterServices;
-045import 
org.apache.hadoop.hbase.coprocessor.MasterCoprocessor;
-046import 
org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
-047import 
org.apache.hadoop.hbase.coprocessor.MasterObserver;
-048import 
org.apache.hadoop.hbase.coprocessor.ObserverContext;
-049import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
-050import 
org.apache.hadoop.hbase.ipc.RpcServer;
-051import 
org.apache.hadoop.hbase.master.MasterServices;
-052import 
org.apache.hadoop.hbase.net.Address;
-053import 
org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-054import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
-055import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos;
-056import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupRequest;
-057import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupResponse;
-058import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupRequest;
-059import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupResponse;
-060import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerRequest;
-061import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerResponse;
-062import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableRequest;
-063import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableResponse;
-064import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoRequest;
-065import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoResponse;
-066import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest;
-067import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse;
-068import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest;
-069import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse;
-070import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersRequest;
-071import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersResponse;
-072import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesRequest;
-073import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesResponse;
-074import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RSGroupAdminService;
-075import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupRequest;
-076import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupResponse;
-077import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersRequest;
-078import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersResponse;
-079import 
org.apache.hadoop.hbase.security.User;
-080import 
org.apache.hadoop.hbase.security.UserProvider;
-081import 
org.apache.hadoop.hbase.security.access.AccessChecker;
-082import 
org.apache.hadoop.hbase.security.access.Permission.Action;
-083import 
org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-084import 
org.apache.yetus.audience.InterfaceAudience;
-085import org.slf4j.Logger;
-086import org.slf4j.LoggerFactory;
-087
-088import 
org.apache.hbase.thirdparty.com.google.common.collect.Sets;
+036import 
org.apache.hadoop.hbase.MasterNotRunningException;
+037import 
org.apache.hadoop.hbase.NamespaceDescripto

[17/51] [partial] hbase-site git commit: Published site at cf529f18a9959589fa635f78df4840472526ea2c.

2018-05-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/testdevapidocs/org/apache/hadoop/hbase/backup/package-tree.html
--
diff --git a/testdevapidocs/org/apache/hadoop/hbase/backup/package-tree.html 
b/testdevapidocs/org/apache/hadoop/hbase/backup/package-tree.html
index e159b3f..3168ee3 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/backup/package-tree.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/backup/package-tree.html
@@ -145,8 +145,8 @@
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true";
 title="class or interface in java.lang">Enum (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true";
 title="class or interface in java.lang">Comparable, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true";
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.backup.TestBackupDeleteWithFailures.Failure
 org.apache.hadoop.hbase.backup.TestIncrementalBackupMergeWithFailures.FailurePhase
+org.apache.hadoop.hbase.backup.TestBackupDeleteWithFailures.Failure
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncAdminBuilder.TestMaxRetriesCoprocessor.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncAdminBuilder.TestMaxRetriesCoprocessor.html
 
b/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncAdminBuilder.TestMaxRetriesCoprocessor.html
index 2041495..d7f6323 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncAdminBuilder.TestMaxRetriesCoprocessor.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncAdminBuilder.TestMaxRetriesCoprocessor.html
@@ -225,7 +225,7 @@ implements 
org.apache.hadoop.hbase.coprocessor.MasterCoprocessor, org.apache.had
 
 
 Methods inherited from 
interface org.apache.hadoop.hbase.coprocessor.MasterObserver
-postAbortProcedure, postAddReplicationPeer, postAddRSGroup, postAssign, 
postBalance, postBalanceRSGroup, postBalanceSwitch, postClearDeadServers, 
postCloneSnapshot, postCompletedCreateTableAction, 
postCompletedDeleteTableAction, postCompletedDisableTableAction, 
postCompletedEnableTableAction, postCompletedMergeRegionsAction, 
postCompletedModifyTableAction, postCompletedSplitRegionAction, 
postCompletedTruncateTableAction, postCreateNamespace, postCreateTable, 
postDecommissionRegionServers, postDeleteNamespace, postDeleteSnapshot, 
postDeleteTable, postDisableReplicationPeer, postDisableTable, 
postEnableReplicationPeer, postEnableTable, postGetClusterMetrics, 
postGetLocks, postGetNamespaceDescriptor, postGetProcedures, 
postGetReplicationPeerConfig, postGetTableDescriptors, postGetTableNames, 
postListDecommissionedRegionServers, postListNamespaceDescriptors, 
postListReplicationPeers, postListSnapshot, postLockHeartbeat, 
postMergeRegions, postMergeRegionsCommitAction, postModifyNam
 espace, postModifyTable, postMove, postMoveServers, postMoveServersAndTables, 
postMoveTables, postRecommissionRegionServer, postRegionOffline, 
postRemoveReplicationPeer, postRemoveRSGroup, postRemoveServers, 
postRequestLock, postRestoreSnapshot, postRollBackMergeRegionsAction, 
postRollBackSplitRegionAction, postSetNamespaceQuota, 
postSetSplitOrMergeEnabled, postSetTableQuota, postSetUserQuota, 
postSetUserQuota, postSetUserQuota, postSnapshot, postStartMaster, 
postTableFlush, postTruncateTable, postUnassign, 
postUpdateReplicationPeerConfig, preAbortProcedure, preAddReplicationPeer, 
preAddRSGroup, preAssign, preBalance, preBalanceRSGroup, preBalanceSwitch, 
preClearDeadServers, preCloneSnapshot, preCreateNamespace, preCreateTable, 
preCreateTableAction, preDecommissionRegionServers, preDeleteNamespace, 
preDeleteSnapshot, preDeleteTable, preDeleteTableAction, 
preDisableReplicationPeer, preDisableTable, preDisableTableAction, 
preEnableReplicationPeer, preEnableTable, preEnableTableAction,
  preGetClusterMetrics, preGetLocks, preGetProcedures, 
preGetReplicationPeerConfig, preGetTableDescriptors, preGetTableNames, 
preListDecommissionedRegionServers, preListNamespaceDescriptors, 
preListReplicationPeers, preListSnapshot, preLockHeartbeat, 
preMasterInitialization, preMergeRegions, preMergeRegionsAction, 
preMergeRegionsCommitAction, preModifyNamespace, preModifyTable, 
preModifyTableAction, preMove, preMoveServers, preMoveServersAndTables, 
preMoveTables, preRecommissionRegionServer, preRegionOffline, 
preRemoveReplicationPeer, preRemoveRSGroup, preRemoveServers, preRequestLock, 
preRestoreSnapshot, preSetNamespaceQuota, preSetSplitOrMergeEnabled, 
preSetTableQuota, preSetUserQuota, preSetUserQuota, preSetUserQuota, 
preShutdown, preSnapshot, preSplitRegion, preSplitRegionAction, 
preSplitRegionAfterMETAAction, preSplitRegionBeforeMETAAction, preSt

[17/51] [partial] hbase-site git commit: Published site at 021f66d11d2cbb7308308093e29e69d6e7661ee9.

2018-05-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/92a26cfb/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ReplayBatchOperation.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ReplayBatchOperation.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ReplayBatchOperation.html
index 4a879bb..7d27402 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ReplayBatchOperation.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ReplayBatchOperation.html
@@ -300,7 +300,7 @@
 292  private Map coprocessorServiceHandlers = 
Maps.newHashMap();
 293
 294  // Track data size in all memstores
-295  private final MemStoreSizing 
memStoreSize = new MemStoreSizing();
+295  private final MemStoreSizing 
memStoreSizing = new ThreadSafeMemStoreSizing();
 296  private final RegionServicesForStores 
regionServicesForStores = new RegionServicesForStores(this);
 297
 298  // Debug possible data loss due to WAL 
off
@@ -1218,7389 +1218,7399 @@
 1210   * Increase the size of mem store in 
this region and the size of global mem
 1211   * store
 1212   */
-1213  public void 
incMemStoreSize(MemStoreSize memStoreSize) {
-1214if (this.rsAccounting != null) {
-1215  
rsAccounting.incGlobalMemStoreSize(memStoreSize);
-1216}
-1217long dataSize;
-1218synchronized (this.memStoreSize) {
-1219  
this.memStoreSize.incMemStoreSize(memStoreSize);
-1220  dataSize = 
this.memStoreSize.getDataSize();
-1221}
-1222
checkNegativeMemStoreDataSize(dataSize, memStoreSize.getDataSize());
-1223  }
-1224
-1225  public void 
decrMemStoreSize(MemStoreSize memStoreSize) {
-1226if (this.rsAccounting != null) {
-1227  
rsAccounting.decGlobalMemStoreSize(memStoreSize);
-1228}
-1229long size;
-1230synchronized (this.memStoreSize) {
-1231  
this.memStoreSize.decMemStoreSize(memStoreSize);
-1232  size = 
this.memStoreSize.getDataSize();
+1213  void incMemStoreSize(MemStoreSize mss) 
{
+1214incMemStoreSize(mss.getDataSize(), 
mss.getHeapSize(), mss.getOffHeapSize());
+1215  }
+1216
+1217  void incMemStoreSize(long 
dataSizeDelta, long heapSizeDelta, long offHeapSizeDelta) {
+1218if (this.rsAccounting != null) {
+1219  
rsAccounting.incGlobalMemStoreSize(dataSizeDelta, heapSizeDelta, 
offHeapSizeDelta);
+1220}
+1221long dataSize =
+1222
this.memStoreSizing.incMemStoreSize(dataSizeDelta, heapSizeDelta, 
offHeapSizeDelta);
+1223
checkNegativeMemStoreDataSize(dataSize, dataSizeDelta);
+1224  }
+1225
+1226  void decrMemStoreSize(MemStoreSize 
mss) {
+1227decrMemStoreSize(mss.getDataSize(), 
mss.getHeapSize(), mss.getOffHeapSize());
+1228  }
+1229
+1230  void decrMemStoreSize(long 
dataSizeDelta, long heapSizeDelta, long offHeapSizeDelta) {
+1231if (this.rsAccounting != null) {
+1232  
rsAccounting.decGlobalMemStoreSize(dataSizeDelta, heapSizeDelta, 
offHeapSizeDelta);
 1233}
-1234checkNegativeMemStoreDataSize(size, 
-memStoreSize.getDataSize());
-1235  }
-1236
-1237  private void 
checkNegativeMemStoreDataSize(long memStoreDataSize, long delta) {
-1238// This is extremely bad if we make 
memStoreSize negative. Log as much info on the offending
-1239// caller as possible. (memStoreSize 
might be a negative value already -- freeing memory)
-1240if (memStoreDataSize < 0) {
-1241  LOG.error("Asked to modify this 
region's (" + this.toString()
-1242  + ") memStoreSize to a 
negative value which is incorrect. Current memStoreSize="
-1243  + (memStoreDataSize - delta) + 
", delta=" + delta, new Exception());
-1244}
-1245  }
-1246
-1247  @Override
-1248  public RegionInfo getRegionInfo() {
-1249return this.fs.getRegionInfo();
-1250  }
-1251
-1252  /**
-1253   * @return Instance of {@link 
RegionServerServices} used by this HRegion.
-1254   * Can be null.
-1255   */
-1256  RegionServerServices 
getRegionServerServices() {
-1257return this.rsServices;
-1258  }
-1259
-1260  @Override
-1261  public long getReadRequestsCount() {
-1262return readRequestsCount.sum();
-1263  }
-1264
-1265  @Override
-1266  public long 
getFilteredReadRequestsCount() {
-1267return 
filteredReadRequestsCount.sum();
-1268  }
-1269
-1270  @Override
-1271  public long getWriteRequestsCount() 
{
-1272return writeRequestsCount.sum();
-1273  }
-1274
-1275  @Override
-1276  public long getMemStoreDataSize() {
-1277return memStoreSize.getDataSize();
-1278  }
-1279
-1280  @Override
-1281  public long getMemStoreHeapSize() {
-1282return memStoreSize.getHeapSize();
-1283  }
-1284
-1285  @Override
-1286  public long getMemStoreOffHeapSize() 
{
-1287return 
memStoreSize.getOffHeapSize();
-1288  }
-1289
-1290  /** @return store services for this 
region, to access services required by store level needs */

[17/51] [partial] hbase-site git commit: Published site at acd0d1e446c164d9c54bfb461b2d449c8d717c07.

2018-05-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f2065178/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.EvaluationMapTask.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.EvaluationMapTask.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.EvaluationMapTask.html
index 2510283..418c60c 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.EvaluationMapTask.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.EvaluationMapTask.html
@@ -77,77 +77,77 @@
 069import 
org.apache.hadoop.hbase.client.RowMutations;
 070import 
org.apache.hadoop.hbase.client.Scan;
 071import 
org.apache.hadoop.hbase.client.Table;
-072import 
org.apache.hadoop.hbase.filter.BinaryComparator;
-073import 
org.apache.hadoop.hbase.filter.Filter;
-074import 
org.apache.hadoop.hbase.filter.FilterAllFilter;
-075import 
org.apache.hadoop.hbase.filter.FilterList;
-076import 
org.apache.hadoop.hbase.filter.PageFilter;
-077import 
org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
-078import 
org.apache.hadoop.hbase.filter.WhileMatchFilter;
-079import 
org.apache.hadoop.hbase.io.compress.Compression;
-080import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
-081import 
org.apache.hadoop.hbase.io.hfile.RandomDistribution;
-082import 
org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
-083import 
org.apache.hadoop.hbase.regionserver.BloomType;
-084import 
org.apache.hadoop.hbase.regionserver.CompactingMemStore;
-085import 
org.apache.hadoop.hbase.trace.HBaseHTraceConfiguration;
-086import 
org.apache.hadoop.hbase.trace.SpanReceiverHost;
-087import 
org.apache.hadoop.hbase.trace.TraceUtil;
-088import 
org.apache.hadoop.hbase.util.ByteArrayHashKey;
-089import 
org.apache.hadoop.hbase.util.Bytes;
-090import 
org.apache.hadoop.hbase.util.Hash;
-091import 
org.apache.hadoop.hbase.util.MurmurHash;
-092import 
org.apache.hadoop.hbase.util.Pair;
-093import 
org.apache.hadoop.hbase.util.YammerHistogramUtils;
-094import 
org.apache.hadoop.io.LongWritable;
-095import org.apache.hadoop.io.Text;
-096import org.apache.hadoop.mapreduce.Job;
-097import 
org.apache.hadoop.mapreduce.Mapper;
-098import 
org.apache.hadoop.mapreduce.lib.input.NLineInputFormat;
-099import 
org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
-100import 
org.apache.hadoop.mapreduce.lib.reduce.LongSumReducer;
-101import org.apache.hadoop.util.Tool;
-102import 
org.apache.hadoop.util.ToolRunner;
-103import 
org.apache.htrace.core.ProbabilitySampler;
-104import org.apache.htrace.core.Sampler;
-105import 
org.apache.htrace.core.TraceScope;
-106import 
org.apache.yetus.audience.InterfaceAudience;
-107import org.slf4j.Logger;
-108import org.slf4j.LoggerFactory;
-109import 
org.apache.hbase.thirdparty.com.google.common.base.MoreObjects;
-110import 
org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
-111
-112/**
-113 * Script used evaluating HBase 
performance and scalability.  Runs a HBase
-114 * client that steps through one of a set 
of hardcoded tests or 'experiments'
-115 * (e.g. a random reads test, a random 
writes test, etc.). Pass on the
-116 * command-line which test to run and how 
many clients are participating in
-117 * this experiment. Run {@code 
PerformanceEvaluation --help} to obtain usage.
-118 *
-119 * 

This class sets up and runs the evaluation programs described in -120 * Section 7, Performance Evaluation, of the Bigtable; -122 * paper, pages 8-10. -123 * -124 *

By default, runs as a mapreduce job where each mapper runs a single test -125 * client. Can also run as a non-mapreduce, multithreaded application by -126 * specifying {@code --nomapred}. Each client does about 1GB of data, unless -127 * specified otherwise. -128 */ -129@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) -130public class PerformanceEvaluation extends Configured implements Tool { -131 static final String RANDOM_SEEK_SCAN = "randomSeekScan"; -132 static final String RANDOM_READ = "randomRead"; -133 private static final Logger LOG = LoggerFactory.getLogger(PerformanceEvaluation.class.getName()); -134 private static final ObjectMapper MAPPER = new ObjectMapper(); -135 static { -136 MAPPER.configure(MapperFeature.SORT_PROPERTIES_ALPHABETICALLY, true); -137 } -138 -139 public static final String TABLE_NAME = "TestTable"; -140 public static final byte[] FAMILY_NAME = Bytes.toBytes("info"); -141 public static final byte [] COLUMN_ZERO = Bytes.toBytes("" + 0); -142 public static final byte [] QUALIFIER_NAME = COLUMN_ZERO; +072import org.apache.hadoop.hbase.client.metrics.ScanMetrics; +073import org.apache.hadoop.hbase.filter.BinaryComparator; +074import org.apache.hadoop.hbase.filter.Filter; +075import org.apache


[17/51] [partial] hbase-site git commit: Published site at 87f5b5f3411d96c31b4cb61b9a57ced22be91d1f.

2018-05-04 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/de18d468/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemOverlapMerge.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemOverlapMerge.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemOverlapMerge.html
index e1bc325..63e7421 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemOverlapMerge.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemOverlapMerge.html
@@ -66,5125 +66,5224 @@
 058import 
java.util.concurrent.TimeoutException;
 059import 
java.util.concurrent.atomic.AtomicBoolean;
 060import 
java.util.concurrent.atomic.AtomicInteger;
-061import org.apache.commons.io.IOUtils;
-062import 
org.apache.commons.lang3.RandomStringUtils;
-063import 
org.apache.commons.lang3.StringUtils;
-064import 
org.apache.hadoop.conf.Configuration;
-065import 
org.apache.hadoop.conf.Configured;
-066import 
org.apache.hadoop.fs.FSDataOutputStream;
-067import org.apache.hadoop.fs.FileStatus;
-068import org.apache.hadoop.fs.FileSystem;
-069import org.apache.hadoop.fs.Path;
-070import 
org.apache.hadoop.fs.permission.FsAction;
-071import 
org.apache.hadoop.fs.permission.FsPermission;
-072import 
org.apache.hadoop.hbase.Abortable;
-073import org.apache.hadoop.hbase.Cell;
-074import 
org.apache.hadoop.hbase.CellUtil;
-075import 
org.apache.hadoop.hbase.ClusterMetrics;
-076import 
org.apache.hadoop.hbase.ClusterMetrics.Option;
-077import 
org.apache.hadoop.hbase.HBaseConfiguration;
-078import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
-079import 
org.apache.hadoop.hbase.HConstants;
-080import 
org.apache.hadoop.hbase.HRegionInfo;
-081import 
org.apache.hadoop.hbase.HRegionLocation;
-082import 
org.apache.hadoop.hbase.KeyValue;
-083import 
org.apache.hadoop.hbase.MasterNotRunningException;
-084import 
org.apache.hadoop.hbase.MetaTableAccessor;
-085import 
org.apache.hadoop.hbase.RegionLocations;
-086import 
org.apache.hadoop.hbase.ServerName;
-087import 
org.apache.hadoop.hbase.TableName;
-088import 
org.apache.hadoop.hbase.ZooKeeperConnectionException;
-089import 
org.apache.hadoop.hbase.client.Admin;
-090import 
org.apache.hadoop.hbase.client.ClusterConnection;
-091import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-092import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
-093import 
org.apache.hadoop.hbase.client.Connection;
-094import 
org.apache.hadoop.hbase.client.ConnectionFactory;
-095import 
org.apache.hadoop.hbase.client.Delete;
-096import 
org.apache.hadoop.hbase.client.Get;
-097import 
org.apache.hadoop.hbase.client.Put;
-098import 
org.apache.hadoop.hbase.client.RegionInfo;
-099import 
org.apache.hadoop.hbase.client.RegionInfoBuilder;
-100import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
-101import 
org.apache.hadoop.hbase.client.Result;
-102import 
org.apache.hadoop.hbase.client.RowMutations;
-103import 
org.apache.hadoop.hbase.client.Table;
-104import 
org.apache.hadoop.hbase.client.TableDescriptor;
-105import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-106import 
org.apache.hadoop.hbase.client.TableState;
-107import 
org.apache.hadoop.hbase.io.FileLink;
-108import 
org.apache.hadoop.hbase.io.HFileLink;
-109import 
org.apache.hadoop.hbase.io.hfile.CacheConfig;
-110import 
org.apache.hadoop.hbase.io.hfile.HFile;
-111import 
org.apache.hadoop.hbase.log.HBaseMarkers;
-112import 
org.apache.hadoop.hbase.master.MasterFileSystem;
-113import 
org.apache.hadoop.hbase.master.RegionState;
-114import 
org.apache.hadoop.hbase.regionserver.HRegion;
-115import 
org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
-116import 
org.apache.hadoop.hbase.regionserver.StoreFileInfo;
-117import 
org.apache.hadoop.hbase.replication.ReplicationException;
-118import 
org.apache.hadoop.hbase.security.AccessDeniedException;
-119import 
org.apache.hadoop.hbase.security.UserProvider;
-120import 
org.apache.hadoop.hbase.util.Bytes.ByteArrayComparator;
-121import 
org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter.ERROR_CODE;
-122import 
org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker;
-123import 
org.apache.hadoop.hbase.util.hbck.ReplicationChecker;
-124import 
org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandler;
-125import 
org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandlerImpl;
-126import org.apache.hadoop.hbase.wal.WAL;
-127import 
org.apache.hadoop.hbase.wal.WALFactory;
-128import 
org.apache.hadoop.hbase.wal.WALSplitter;
-129import 
org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-130import 
org.apache.hadoop.hbase.zookeeper.ZKUtil;
-131import 
org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-132import 
org.apache.hadoop.hbase.zookeeper.ZNodePaths;
-133import 
org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
-134import 
org.apache.hadoop.ipc.RemoteException;
-135import 
org.apache.h

[17/51] [partial] hbase-site git commit: Published site at 2912c953551bedbfbf30c32c156ed7bb187d54c3.

2018-04-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html
index 8302e28..c370eb9 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.ErrorReporter.html
@@ -2113,3031 +2113,3033 @@
 2105
errors.reportError(ERROR_CODE.ORPHAN_TABLE_STATE,
 2106tableName + " unable to 
delete dangling table state " + tableState);
 2107  }
-2108} else {
-2109  
errors.reportError(ERROR_CODE.ORPHAN_TABLE_STATE,
-2110  tableName + " has dangling 
table state " + tableState);
-2111}
-2112  }
-2113}
-2114// check that all tables have 
states
-2115for (TableName tableName : 
tablesInfo.keySet()) {
-2116  if (isTableIncluded(tableName) 
&& !tableStates.containsKey(tableName)) {
-2117if (fixMeta) {
-2118  
MetaTableAccessor.updateTableState(connection, tableName, 
TableState.State.ENABLED);
-2119  TableState newState = 
MetaTableAccessor.getTableState(connection, tableName);
-2120  if (newState == null) {
-2121
errors.reportError(ERROR_CODE.NO_TABLE_STATE,
-2122"Unable to change state 
for table " + tableName + " in meta ");
-2123  }
-2124} else {
-2125  
errors.reportError(ERROR_CODE.NO_TABLE_STATE,
-2126  tableName + " has no state 
in meta ");
-2127}
-2128  }
-2129}
-2130  }
-2131
-2132  private void preCheckPermission() 
throws IOException, AccessDeniedException {
-2133if 
(shouldIgnorePreCheckPermission()) {
-2134  return;
-2135}
-2136
-2137Path hbaseDir = 
FSUtils.getRootDir(getConf());
-2138FileSystem fs = 
hbaseDir.getFileSystem(getConf());
-2139UserProvider userProvider = 
UserProvider.instantiate(getConf());
-2140UserGroupInformation ugi = 
userProvider.getCurrent().getUGI();
-2141FileStatus[] files = 
fs.listStatus(hbaseDir);
-2142for (FileStatus file : files) {
-2143  try {
-2144FSUtils.checkAccess(ugi, file, 
FsAction.WRITE);
-2145  } catch (AccessDeniedException 
ace) {
-2146LOG.warn("Got 
AccessDeniedException when preCheckPermission ", ace);
-2147
errors.reportError(ERROR_CODE.WRONG_USAGE, "Current user " + 
ugi.getUserName()
-2148  + " does not have write perms 
to " + file.getPath()
-2149  + ". Please rerun hbck as hdfs 
user " + file.getOwner());
-2150throw ace;
-2151  }
-2152}
-2153  }
-2154
-2155  /**
-2156   * Deletes region from meta table
-2157   */
-2158  private void deleteMetaRegion(HbckInfo 
hi) throws IOException {
-2159
deleteMetaRegion(hi.metaEntry.getRegionName());
-2160  }
-2161
-2162  /**
-2163   * Deletes region from meta table
-2164   */
-2165  private void deleteMetaRegion(byte[] 
metaKey) throws IOException {
-2166Delete d = new Delete(metaKey);
-2167meta.delete(d);
-2168LOG.info("Deleted " + 
Bytes.toString(metaKey) + " from META" );
-2169  }
-2170
-2171  /**
-2172   * Reset the split parent region info 
in meta table
-2173   */
-2174  private void resetSplitParent(HbckInfo 
hi) throws IOException {
-2175RowMutations mutations = new 
RowMutations(hi.metaEntry.getRegionName());
-2176Delete d = new 
Delete(hi.metaEntry.getRegionName());
-2177
d.addColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER);
-2178
d.addColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER);
-2179mutations.add(d);
-2180
-2181RegionInfo hri = 
RegionInfoBuilder.newBuilder(hi.metaEntry)
-2182.setOffline(false)
-2183.setSplit(false)
-2184.build();
-2185Put p = 
MetaTableAccessor.makePutFromRegionInfo(hri, 
EnvironmentEdgeManager.currentTime());
-2186mutations.add(p);
-2187
-2188meta.mutateRow(mutations);
-2189LOG.info("Reset split parent " + 
hi.metaEntry.getRegionNameAsString() + " in META" );
-2190  }
-2191
-2192  /**
-2193   * This backwards-compatibility 
wrapper for permanently offlining a region
-2194   * that should not be alive.  If the 
region server does not support the
-2195   * "offline" method, it will use the 
closest unassign method instead.  This
-2196   * will basically work until one 
attempts to disable or delete the affected
-2197   * table.  The problem has to do with 
in-memory only master state, so
-2198   * restarting the HMaster or failing 
over to another should fix this.
-2199   */
-2200  private void offline(byte[] 
regionName) throws IOException {
-2201String regionString = 
Bytes.toStringBinary(regionName);
-2202if (!rsSupportsOffline) {
-2203  LOG.warn("Using unass

[17/51] [partial] hbase-site git commit: Published site at 2a2258656b2fcd92b967131b6c1f037363553bc4.

2018-03-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e0fb1fde/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.ServerErrors.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.ServerErrors.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.ServerErrors.html
index d7aa8b1..98a45a0 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.ServerErrors.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.ServerErrors.html
@@ -680,1330 +680,1333 @@
 672}
 673List locations 
= new ArrayList<>();
 674for (RegionInfo regionInfo : regions) 
{
-675  RegionLocations list = 
locateRegion(tableName, regionInfo.getStartKey(), useCache, true);
-676  if (list != null) {
-677for (HRegionLocation loc : 
list.getRegionLocations()) {
-678  if (loc != null) {
-679locations.add(loc);
-680  }
-681}
-682  }
-683}
-684return locations;
-685  }
-686
-687  @Override
-688  public HRegionLocation 
locateRegion(final TableName tableName, final byte[] row)
-689  throws IOException {
-690RegionLocations locations = 
locateRegion(tableName, row, true, true);
-691return locations == null ? null : 
locations.getRegionLocation();
-692  }
-693
-694  @Override
-695  public HRegionLocation 
relocateRegion(final TableName tableName, final byte[] row)
-696  throws IOException {
-697RegionLocations locations =
-698  relocateRegion(tableName, row, 
RegionReplicaUtil.DEFAULT_REPLICA_ID);
-699return locations == null ? null
-700  : 
locations.getRegionLocation(RegionReplicaUtil.DEFAULT_REPLICA_ID);
-701  }
-702
-703  @Override
-704  public RegionLocations 
relocateRegion(final TableName tableName,
-705  final byte [] row, int replicaId) 
throws IOException{
-706// Since this is an explicit request 
not to use any caching, finding
-707// disabled tables should not be 
desirable.  This will ensure that an exception is thrown when
-708// the first time a disabled table is 
interacted with.
-709if 
(!tableName.equals(TableName.META_TABLE_NAME) && 
isTableDisabled(tableName)) {
-710  throw new 
TableNotEnabledException(tableName.getNameAsString() + " is disabled.");
-711}
-712
-713return locateRegion(tableName, row, 
false, true, replicaId);
-714  }
+675  if 
(!RegionReplicaUtil.isDefaultReplica(regionInfo)) {
+676continue;
+677  }
+678  RegionLocations list = 
locateRegion(tableName, regionInfo.getStartKey(), useCache, true);
+679  if (list != null) {
+680for (HRegionLocation loc : 
list.getRegionLocations()) {
+681  if (loc != null) {
+682locations.add(loc);
+683  }
+684}
+685  }
+686}
+687return locations;
+688  }
+689
+690  @Override
+691  public HRegionLocation 
locateRegion(final TableName tableName, final byte[] row)
+692  throws IOException {
+693RegionLocations locations = 
locateRegion(tableName, row, true, true);
+694return locations == null ? null : 
locations.getRegionLocation();
+695  }
+696
+697  @Override
+698  public HRegionLocation 
relocateRegion(final TableName tableName, final byte[] row)
+699  throws IOException {
+700RegionLocations locations =
+701  relocateRegion(tableName, row, 
RegionReplicaUtil.DEFAULT_REPLICA_ID);
+702return locations == null ? null
+703  : 
locations.getRegionLocation(RegionReplicaUtil.DEFAULT_REPLICA_ID);
+704  }
+705
+706  @Override
+707  public RegionLocations 
relocateRegion(final TableName tableName,
+708  final byte [] row, int replicaId) 
throws IOException{
+709// Since this is an explicit request 
not to use any caching, finding
+710// disabled tables should not be 
desirable.  This will ensure that an exception is thrown when
+711// the first time a disabled table is 
interacted with.
+712if 
(!tableName.equals(TableName.META_TABLE_NAME) && 
isTableDisabled(tableName)) {
+713  throw new 
TableNotEnabledException(tableName.getNameAsString() + " is disabled.");
+714}
 715
-716  @Override
-717  public RegionLocations 
locateRegion(final TableName tableName, final byte[] row, boolean useCache,
-718  boolean retry) throws IOException 
{
-719return locateRegion(tableName, row, 
useCache, retry, RegionReplicaUtil.DEFAULT_REPLICA_ID);
-720  }
-721
-722  @Override
-723  public RegionLocations 
locateRegion(final TableName tableName, final byte[] row, boolean useCache,
-724  boolean retry, int replicaId) 
throws IOException {
-725checkClosed();
-726if (tableName == null || 
tableName.getName().length == 0) {
-727  throw new 
IllegalArgumentException("table name

[17/51] [partial] hbase-site git commit: Published site at e468b4022f76688851b3e0c34722f01a56bd624f.

2018-03-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/16541468/devapidocs/org/apache/hadoop/hbase/class-use/ClusterMetrics.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/ClusterMetrics.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/ClusterMetrics.html
index 1621237..4c9e7b7 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/ClusterMetrics.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/ClusterMetrics.html
@@ -242,27 +242,27 @@
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFuture
-AsyncHBaseAdmin.getClusterMetrics() 
+RawAsyncHBaseAdmin.getClusterMetrics() 
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFuture
-AsyncAdmin.getClusterMetrics() 
+AsyncHBaseAdmin.getClusterMetrics() 
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFuture
-RawAsyncHBaseAdmin.getClusterMetrics() 
+AsyncAdmin.getClusterMetrics() 
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFuture
-AsyncHBaseAdmin.getClusterMetrics(https://docs.oracle.com/javase/8/docs/api/java/util/EnumSet.html?is-external=true";
 title="class or interface in java.util">EnumSet options) 
+RawAsyncHBaseAdmin.getClusterMetrics(https://docs.oracle.com/javase/8/docs/api/java/util/EnumSet.html?is-external=true";
 title="class or interface in java.util">EnumSet options) 
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFuture
-AsyncAdmin.getClusterMetrics(https://docs.oracle.com/javase/8/docs/api/java/util/EnumSet.html?is-external=true";
 title="class or interface in java.util">EnumSet options) 
+AsyncHBaseAdmin.getClusterMetrics(https://docs.oracle.com/javase/8/docs/api/java/util/EnumSet.html?is-external=true";
 title="class or interface in java.util">EnumSet options) 
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFuture
-RawAsyncHBaseAdmin.getClusterMetrics(https://docs.oracle.com/javase/8/docs/api/java/util/EnumSet.html?is-external=true";
 title="class or interface in java.util">EnumSet options) 
+AsyncAdmin.getClusterMetrics(https://docs.oracle.com/javase/8/docs/api/java/util/EnumSet.html?is-external=true";
 title="class or interface in java.util">EnumSet options) 
 
 
 
@@ -408,11 +408,11 @@
 
 
 void
-BaseLoadBalancer.setClusterMetrics(ClusterMetrics st) 
+RegionLocationFinder.setClusterMetrics(ClusterMetrics status) 
 
 
 void
-RegionLocationFinder.setClusterMetrics(ClusterMetrics status) 
+BaseLoadBalancer.setClusterMetrics(ClusterMetrics st) 
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/16541468/devapidocs/org/apache/hadoop/hbase/class-use/CompareOperator.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/CompareOperator.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/CompareOperator.html
index 804b5df..21d4377 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/CompareOperator.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/CompareOperator.html
@@ -169,11 +169,11 @@ the order they are declared.
 
 
 private CompareOperator
-RawAsyncTableImpl.CheckAndMutateBuilderImpl.op 
+HTable.CheckAndMutateBuilderImpl.op 
 
 
 private CompareOperator
-HTable.CheckAndMutateBuilderImpl.op 
+RawAsyncTableImpl.CheckAndMutateBuilderImpl.op 
 
 
 
@@ -257,23 +257,23 @@ the order they are declared.
 
 
 
-AsyncTable.CheckAndMutateBuilder
-AsyncTable.CheckAndMutateBuilder.ifMatches(CompareOperator compareOp,
+Table.CheckAndMutateBuilder
+Table.CheckAndMutateBuilder.ifMatches(CompareOperator compareOp,
  byte[] value) 
 
 
 Table.CheckAndMutateBuilder
-Table.CheckAndMutateBuilder.ifMatches(CompareOperator compareOp,
+HTable.CheckAndMutateBuilderImpl.ifMatches(CompareOperator compareOp,
  byte[] value) 
 
 
 AsyncTable.CheckAndMutateBuilder
-RawAsyncTableImpl.CheckAndMutateBuilderImpl.ifMatches(CompareOperator compareOp,
+AsyncTable.CheckAndMutateBuilder.ifMatches(CompareOperator compareOp,
  byte[] value) 
 
 
-Table.CheckAndMutateBuilder
-HTable.CheckAndMutateBu

[17/51] [partial] hbase-site git commit: Published site at 64061f896fe21512504e3886a400759e88b519da.

2018-03-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f1ebf5b6/devapidocs/org/apache/hadoop/hbase/client/class-use/ResultScanner.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/ResultScanner.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/ResultScanner.html
index 3616545..6209920 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/ResultScanner.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/ResultScanner.html
@@ -208,9 +208,9 @@ service.
 
 
 
-ResultScanner
-HTable.getScanner(byte[] family)
-The underlying HTable must 
not be closed.
+default ResultScanner
+AsyncTable.getScanner(byte[] family)
+Gets a scanner on the current table for the given 
family.
 
 
 
@@ -220,16 +220,16 @@ service.
 
 
 
-default ResultScanner
-AsyncTable.getScanner(byte[] family)
-Gets a scanner on the current table for the given 
family.
+ResultScanner
+HTable.getScanner(byte[] family)
+The underlying HTable must 
not be closed.
 
 
 
-ResultScanner
-HTable.getScanner(byte[] family,
+default ResultScanner
+AsyncTable.getScanner(byte[] family,
   byte[] qualifier)
-The underlying HTable must 
not be closed.
+Gets a scanner on the current table for the given family 
and qualifier.
 
 
 
@@ -240,37 +240,37 @@ service.
 
 
 
-default ResultScanner
-AsyncTable.getScanner(byte[] family,
+ResultScanner
+HTable.getScanner(byte[] family,
   byte[] qualifier)
-Gets a scanner on the current table for the given family 
and qualifier.
+The underlying HTable must 
not be closed.
 
 
 
 ResultScanner
-RawAsyncTableImpl.getScanner(Scan scan) 
-
-
-ResultScanner
-HTable.getScanner(Scan scan)
-The underlying HTable must 
not be closed.
+AsyncTable.getScanner(Scan scan)
+Returns a scanner on the current table as specified by the 
Scan 
object.
 
 
-
+
 ResultScanner
 Table.getScanner(Scan scan)
 Returns a scanner on the current table as specified by the 
Scan
  object.
 
 
-
+
 ResultScanner
 AsyncTableImpl.getScanner(Scan scan) 
 
+
+ResultScanner
+RawAsyncTableImpl.getScanner(Scan scan) 
+
 
 ResultScanner
-AsyncTable.getScanner(Scan scan)
-Returns a scanner on the current table as specified by the 
Scan 
object.
+HTable.getScanner(Scan scan)
+The underlying HTable must 
not be closed.
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f1ebf5b6/devapidocs/org/apache/hadoop/hbase/client/class-use/RetriesExhaustedWithDetailsException.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/RetriesExhaustedWithDetailsException.html
 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/RetriesExhaustedWithDetailsException.html
index 62cfd60..8fa3f76 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/RetriesExhaustedWithDetailsException.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/RetriesExhaustedWithDetailsException.html
@@ -106,11 +106,11 @@
 
 
 RetriesExhaustedWithDetailsException
-AsyncRequestFutureImpl.getErrors() 
+AsyncRequestFuture.getErrors() 
 
 
 RetriesExhaustedWithDetailsException
-AsyncRequestFuture.getErrors() 
+AsyncRequestFutureImpl.getErrors() 
 
 
 (package private) RetriesExhaustedWithDetailsException

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f1ebf5b6/devapidocs/org/apache/hadoop/hbase/client/class-use/RetryingCallable.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/RetryingCallable.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/RetryingCallable.html
index eec52bf..5b32e1b 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/RetryingCallable.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/RetryingCallable.html
@@ -234,36 +234,28 @@
 
 
 
-T
-RpcRetryingCallerImpl.callWithoutRetries(RetryingCallable callable,
-  int callTimeout) 
-
-
 T
 RpcRetryingCaller.callWithoutRetries(RetryingCallable callable,
   int callTimeout)
 Call the server once only.
 
 
-
+
 T
-RpcRetryingCallerImpl.callWithRetries(RetryingCallable callable,
-   int callTimeout) 
+RpcRetryingCallerImpl.callWithoutRetries(RetryingCallable callable,
+  int callTimeout) 
 
-
+
 T
 RpcRetryingCaller.callWithRetries(RetryingCallable callable,
int callTimeout)
 Retries if invocation fails.
 
 
-
-RetryingCallerInterceptorContext
-NoOpRetryingInterceptorContext.prepare(RetryingCallable callable) 
-
 
-FastFailInterceptorContext
-FastFailInterceptorContext.prepare(RetryingCallable callable) 
+T
+RpcRetryingCallerImpl.callWithRetries(RetryingCallable callable,
+   int callTimeout) 
 
 
 abstract RetryingCallerInterceptorContext
@@ -275,13 +267,11 @@
 
 
 RetryingCallerInterceptorContext
-NoOpRetryingInterceptorContext.prepare(RetryingCallable

[17/51] [partial] hbase-site git commit: Published site at 4cb40e6d846ce1f28ffb40d388c9efb753197813.

2018-03-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4dc2a2e8/devapidocs/org/apache/hadoop/hbase/client/class-use/Result.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/class-use/Result.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/Result.html
index a85184f..12b5bec 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/Result.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/Result.html
@@ -292,7 +292,7 @@ service.
 
 
 private static HRegionLocation
-AsyncMetaTableAccessor.getRegionLocation(Result r,
+MetaTableAccessor.getRegionLocation(Result r,
  RegionInfo regionInfo,
  int replicaId)
 Returns the HRegionLocation parsed from the given meta row 
Result
@@ -301,7 +301,7 @@ service.
 
 
 private static HRegionLocation
-MetaTableAccessor.getRegionLocation(Result r,
+AsyncMetaTableAccessor.getRegionLocation(Result r,
  RegionInfo regionInfo,
  int replicaId)
 Returns the HRegionLocation parsed from the given meta row 
Result
@@ -309,14 +309,14 @@ service.
 
 
 
-private static https://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true";
 title="class or interface in java.util">Optional
-AsyncMetaTableAccessor.getRegionLocations(Result r)
+static RegionLocations
+MetaTableAccessor.getRegionLocations(Result r)
 Returns an HRegionLocationList extracted from the 
result.
 
 
 
-static RegionLocations
-MetaTableAccessor.getRegionLocations(Result r)
+private static https://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true";
 title="class or interface in java.util">Optional
+AsyncMetaTableAccessor.getRegionLocations(Result r)
 Returns an HRegionLocationList extracted from the 
result.
 
 
@@ -334,42 +334,42 @@ service.
 
 
 private static long
-AsyncMetaTableAccessor.getSeqNumDuringOpen(Result r,
+MetaTableAccessor.getSeqNumDuringOpen(Result r,
int replicaId)
 The latest seqnum that the server writing to meta observed 
when opening the region.
 
 
 
 private static long
-MetaTableAccessor.getSeqNumDuringOpen(Result r,
+AsyncMetaTableAccessor.getSeqNumDuringOpen(Result r,
int replicaId)
 The latest seqnum that the server writing to meta observed 
when opening the region.
 
 
 
-private static https://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true";
 title="class or interface in java.util">Optional
-AsyncMetaTableAccessor.getServerName(Result r,
+static ServerName
+MetaTableAccessor.getServerName(Result r,
  int replicaId)
 Returns a ServerName from catalog table Result.
 
 
 
-static ServerName
-MetaTableAccessor.getServerName(Result r,
+private static https://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true";
 title="class or interface in java.util">Optional
+AsyncMetaTableAccessor.getServerName(Result r,
  int replicaId)
 Returns a ServerName from catalog table Result.
 
 
 
-private static https://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true";
 title="class or interface in java.util">Optional
-AsyncMetaTableAccessor.getTableState(Result r) 
-
-
 static TableState
 MetaTableAccessor.getTableState(Result r)
 Decode table state from META Result.
 
 
+
+private static https://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true";
 title="class or interface in java.util">Optional
+AsyncMetaTableAccessor.getTableState(Result r) 
+
 
 void
 AsyncMetaTableAccessor.MetaTableScanResultConsumer.onNext(Result[] results,
@@ -465,13 +465,13 @@ service.
 ClientScanner.cache 
 
 
-private https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
-CompleteScanResultCache.partialResults 
-
-
 private https://docs.oracle.com/javase/8/docs/api/java/util/Deque.html?is-external=true";
 title="class or interface in java.util">Deque
 BatchScanResultCache.partialResults 
 
+
+private https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
+CompleteScanResultCache.partialResults 
+
 
 private https://docs.oracle.com/javase/8/docs/api/java/util/Queue.html?is-external=true";
 title="class or interface in java.util">Queue
 AsyncTableResultScanner.queue 
@@ -494,7 +494,7 @@ service.
 
 
 Result[]
-AllowPartialScanResultCache.addAndGet(Result[] results,
+BatchScanResultCache.addAndGet(Result[] results,
  boolean isHeartbeatMessage) 
 
 
@@ -504,24 +504,20 @@ service.
 
 
 Result[]
-BatchScanResultCache.addAndGet(Result[] results,
+AllowPartialScanResultCache.addAndGet(Result[] results,
  boolean isHeartbeatMessage) 
 
 
 Result
-Table.append(Append append)
-Appends values to one 

[17/51] [partial] hbase-site git commit: Published site at 8ab7b20f48951d77945181024f5e15842bc253c4.

2018-03-21 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6eb695c8/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushHandler.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushHandler.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushHandler.html
index 9ee12ef..4c42811 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushHandler.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushHandler.html
@@ -51,889 +51,893 @@
 043import 
org.apache.hadoop.hbase.HConstants;
 044import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
 045import 
org.apache.hadoop.hbase.regionserver.HRegion.FlushResult;
-046import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-047import 
org.apache.hadoop.hbase.trace.TraceUtil;
-048import 
org.apache.hadoop.hbase.util.Bytes;
-049import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-050import 
org.apache.hadoop.hbase.util.HasThread;
-051import 
org.apache.hadoop.hbase.util.ServerRegionReplicaUtil;
-052import 
org.apache.hadoop.hbase.util.Threads;
-053import 
org.apache.hadoop.ipc.RemoteException;
-054import 
org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix;
-055import 
org.apache.htrace.core.TraceScope;
-056import 
org.apache.yetus.audience.InterfaceAudience;
-057import org.slf4j.Logger;
-058import org.slf4j.LoggerFactory;
-059
-060/**
-061 * Thread that flushes cache on request
-062 *
-063 * NOTE: This class extends Thread rather 
than Chore because the sleep time
-064 * can be interrupted when there is 
something to do, rather than the Chore
-065 * sleep time which is invariant.
-066 *
-067 * @see FlushRequester
-068 */
-069@InterfaceAudience.Private
-070class MemStoreFlusher implements 
FlushRequester {
-071  private static final Logger LOG = 
LoggerFactory.getLogger(MemStoreFlusher.class);
-072
-073  private Configuration conf;
-074  // These two data members go together.  
Any entry in the one must have
-075  // a corresponding entry in the 
other.
-076  private final 
BlockingQueue flushQueue = new DelayQueue<>();
-077  private final Map regionsInQueue = new HashMap<>();
-078  private AtomicBoolean wakeupPending = 
new AtomicBoolean();
-079
-080  private final long 
threadWakeFrequency;
-081  private final HRegionServer server;
-082  private final ReentrantReadWriteLock 
lock = new ReentrantReadWriteLock();
-083  private final Object blockSignal = new 
Object();
-084
-085  private long blockingWaitTime;
-086  private final LongAdder 
updatesBlockedMsHighWater = new LongAdder();
-087
-088  private final FlushHandler[] 
flushHandlers;
-089  private 
List flushRequestListeners = new 
ArrayList<>(1);
-090
-091  private FlushType flushType;
-092
-093  /**
-094   * Singleton instance inserted into 
flush queue used for signaling.
-095   */
-096  private static final FlushQueueEntry 
WAKEUPFLUSH_INSTANCE = new FlushQueueEntry() {
-097@Override
-098public long getDelay(TimeUnit unit) 
{
-099  return 0;
-100}
-101
-102@Override
-103public int compareTo(Delayed o) {
-104  return -1;
-105}
-106
-107@Override
-108public boolean equals(Object obj) {
-109  return obj == this;
-110}
-111
-112@Override
-113public int hashCode() {
-114  return 42;
-115}
-116  };
+046import 
org.apache.hadoop.hbase.trace.TraceUtil;
+047import 
org.apache.hadoop.hbase.util.Bytes;
+048import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+049import 
org.apache.hadoop.hbase.util.HasThread;
+050import 
org.apache.hadoop.hbase.util.ServerRegionReplicaUtil;
+051import 
org.apache.hadoop.hbase.util.Threads;
+052import 
org.apache.hadoop.ipc.RemoteException;
+053import 
org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix;
+054import 
org.apache.htrace.core.TraceScope;
+055import 
org.apache.yetus.audience.InterfaceAudience;
+056import org.slf4j.Logger;
+057import org.slf4j.LoggerFactory;
+058
+059/**
+060 * Thread that flushes cache on request
+061 *
+062 * NOTE: This class extends Thread rather 
than Chore because the sleep time
+063 * can be interrupted when there is 
something to do, rather than the Chore
+064 * sleep time which is invariant.
+065 *
+066 * @see FlushRequester
+067 */
+068@InterfaceAudience.Private
+069class MemStoreFlusher implements 
FlushRequester {
+070  private static final Logger LOG = 
LoggerFactory.getLogger(MemStoreFlusher.class);
+071
+072  private Configuration conf;
+073  // These two data members go together.  
Any entry in the one must have
+074  // a corresponding entry in the 
other.
+075  private final 
BlockingQueue flushQueue = new DelayQueue<>();
+076  private final Map regionsInQueue = new HashMap<>();
+077  p

[17/51] [partial] hbase-site git commit: Published site at 00095a2ef9442e3fd86c04876c9d91f2f8b23ad8.

2018-03-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.html
index 74fbf67..33418d0 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.html
@@ -27,287 +27,296 @@
 019package 
org.apache.hadoop.hbase.io.hfile.bucket;
 020
 021import java.io.File;
-022import java.io.FileNotFoundException;
-023import java.io.IOException;
-024import java.io.RandomAccessFile;
-025import java.nio.ByteBuffer;
-026import 
java.nio.channels.ClosedChannelException;
-027import java.nio.channels.FileChannel;
-028import java.util.Arrays;
-029import 
org.apache.hadoop.hbase.io.hfile.Cacheable;
-030import 
org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType;
-031import 
org.apache.hadoop.hbase.io.hfile.CacheableDeserializer;
-032import 
org.apache.hadoop.hbase.nio.ByteBuff;
-033import 
org.apache.hadoop.hbase.nio.SingleByteBuff;
-034import 
org.apache.hadoop.util.StringUtils;
-035import 
org.apache.yetus.audience.InterfaceAudience;
-036import org.slf4j.Logger;
-037import org.slf4j.LoggerFactory;
-038
-039import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-040import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-041
-042/**
-043 * IO engine that stores data to a file 
on the local file system.
-044 */
-045@InterfaceAudience.Private
-046public class FileIOEngine implements 
IOEngine {
-047  private static final Logger LOG = 
LoggerFactory.getLogger(FileIOEngine.class);
-048  public static final String 
FILE_DELIMITER = ",";
-049  private final String[] filePaths;
-050  private final FileChannel[] 
fileChannels;
-051  private final RandomAccessFile[] 
rafs;
-052
-053  private final long sizePerFile;
-054  private final long capacity;
-055
-056  private FileReadAccessor readAccessor = 
new FileReadAccessor();
-057  private FileWriteAccessor writeAccessor 
= new FileWriteAccessor();
-058
-059  public FileIOEngine(long capacity, 
boolean maintainPersistence, String... filePaths)
-060  throws IOException {
-061this.sizePerFile = capacity / 
filePaths.length;
-062this.capacity = this.sizePerFile * 
filePaths.length;
-063this.filePaths = filePaths;
-064this.fileChannels = new 
FileChannel[filePaths.length];
-065if (!maintainPersistence) {
-066  for (String filePath : filePaths) 
{
-067File file = new File(filePath);
-068if (file.exists()) {
-069  if (LOG.isDebugEnabled()) {
-070LOG.debug("File " + filePath 
+ " already exists. Deleting!!");
-071  }
-072  file.delete();
-073  // If deletion fails still we 
can manage with the writes
-074}
-075  }
-076}
-077this.rafs = new 
RandomAccessFile[filePaths.length];
-078for (int i = 0; i < 
filePaths.length; i++) {
-079  String filePath = filePaths[i];
-080  try {
-081rafs[i] = new 
RandomAccessFile(filePath, "rw");
-082long totalSpace = new 
File(filePath).getTotalSpace();
-083if (totalSpace < sizePerFile) 
{
-084  // The next setting length will 
throw exception,logging this message
-085  // is just used for the detail 
reason of exception,
-086  String msg = "Only " + 
StringUtils.byteDesc(totalSpace)
-087  + " total space under " + 
filePath + ", not enough for requested "
-088  + 
StringUtils.byteDesc(sizePerFile);
-089  LOG.warn(msg);
-090}
-091rafs[i].setLength(sizePerFile);
-092fileChannels[i] = 
rafs[i].getChannel();
-093LOG.info("Allocating cache " + 
StringUtils.byteDesc(sizePerFile)
-094+ ", on the path:" + 
filePath);
-095  } catch (IOException fex) {
-096LOG.error("Failed allocating 
cache on " + filePath, fex);
-097shutdown();
-098throw fex;
-099  }
-100}
-101  }
-102
-103  @Override
-104  public String toString() {
-105return "ioengine=" + 
this.getClass().getSimpleName() + ", paths="
-106+ Arrays.asList(filePaths) + ", 
capacity=" + String.format("%,d", this.capacity);
-107  }
-108
-109  /**
-110   * File IO engine is always able to 
support persistent storage for the cache
-111   * @return true
-112   */
-113  @Override
-114  public boolean isPersistent() {
-115return true;
-116  }
-117
-118  /**
-119   * Transfers data from file to the 
given byte buffer
-120   * @param offset The offset in the file 
where the first byte to be read
-121   * @param length The length of buffer 
that should be allocated for reading
-122   *   from the file 
channel
-123   * @return number of bytes read
-124  

[17/51] [partial] hbase-site git commit: Published site at 22f4def942f8a3367d0ca6598317e9b9a7d0cfcd.

2018-03-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8b1eaec1/devapidocs/src-html/org/apache/hadoop/hbase/client/example/ExportEndpointExample.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/example/ExportEndpointExample.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/example/ExportEndpointExample.html
index 6553e14..f2c7811 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/example/ExportEndpointExample.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/example/ExportEndpointExample.html
@@ -64,8 +64,8 @@
 056 Admin admin = con.getAdmin()) 
{
 057  TableDescriptor desc = 
TableDescriptorBuilder.newBuilder(tableName)
 058  // MUST mount the export 
endpoint
-059  
.addCoprocessor(Export.class.getName())
-060  
.addColumnFamily(ColumnFamilyDescriptorBuilder.of(family))
+059  
.setCoprocessor(Export.class.getName())
+060  
.setColumnFamily(ColumnFamilyDescriptorBuilder.of(family))
 061  .build();
 062  admin.createTable(desc);
 063

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8b1eaec1/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
index c0d1f4b..6e98f08 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
@@ -2165,7 +2165,7 @@
 2157}
 2158
 2159TableDescriptor newDesc = 
TableDescriptorBuilder
-2160
.newBuilder(old).addColumnFamily(column).build();
+2160
.newBuilder(old).setColumnFamily(column).build();
 2161return modifyTable(tableName, 
newDesc, nonceGroup, nonce);
 2162  }
 2163

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8b1eaec1/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
index c0d1f4b..6e98f08 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
@@ -2165,7 +2165,7 @@
 2157}
 2158
 2159TableDescriptor newDesc = 
TableDescriptorBuilder
-2160
.newBuilder(old).addColumnFamily(column).build();
+2160
.newBuilder(old).setColumnFamily(column).build();
 2161return modifyTable(tableName, 
newDesc, nonceGroup, nonce);
 2162  }
 2163

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8b1eaec1/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
index c0d1f4b..6e98f08 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
@@ -2165,7 +2165,7 @@
 2157}
 2158
 2159TableDescriptor newDesc = 
TableDescriptorBuilder
-2160
.newBuilder(old).addColumnFamily(column).build();
+2160
.newBuilder(old).setColumnFamily(column).build();
 2161return modifyTable(tableName, 
newDesc, nonceGroup, nonce);
 2162  }
 2163

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8b1eaec1/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
index c0d1f4b..6e98f08 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
@@ -2165,7 +2165,7 @@
 2157}
 2158
 2159TableDescriptor newDesc = 
TableDescriptorBuilder
-2160
.newBuilder(old).addColumnFamily(column).build();
+2160
.newBuilder(old).setColumnFamily(column).build();
 2161return modifyTable(tableName, 
newDesc, nonceGroup, nonce);
 2162  }
 2163

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8b1eaec1/devapidocs/src-html/org/apache/hadoop/hbase/master/TableStateManager.TableStateNotFoundException.html
--

[17/51] [partial] hbase-site git commit: Published site at 31da4d0bce69b3a47066a5df675756087ce4dc60.

2018-03-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a754d895/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
index 3da1e22..74d6ab8 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":6,"i4":10,"i5":10,"i6":6,"i7":6,"i8":10,"i9":10,"i10":10,"i11":10,"i12":6,"i13":6,"i14":6,"i15":6,"i16":6,"i17":10,"i18":10,"i19":6,"i20":10,"i21":10,"i22":6,"i23":10,"i24":6,"i25":10,"i26":10,"i27":6};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":6,"i4":10,"i5":10,"i6":6,"i7":6,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":6,"i14":6,"i15":6,"i16":6,"i17":6,"i18":10,"i19":10,"i20":6,"i21":10,"i22":10,"i23":6,"i24":10,"i25":6,"i26":10,"i27":10,"i28":6};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],4:["t3","Abstract Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private abstract static class HRegion.BatchOperation
+private abstract static class HRegion.BatchOperation
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 Class that tracks the progress of a batch operations, 
accumulating status codes and tracking
  the index at which processing is proceeding. These batch operations may get 
split into
@@ -292,61 +292,65 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
int readyToWriteCount) 
 
 
+private void
+doFinishHotnessProtector(MiniBatchOperationInProgress miniBatchOp) 
+
+
 void
 doPostOpCleanupForMiniBatch(MiniBatchOperationInProgress miniBatchOp,
WALEdit walEdit,
boolean success) 
 
-
+
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">ListUUID>
 getClusterIds() 
 
-
+
 abstract Mutation
 getMutation(int index) 
 
-
+
 abstract Mutation[]
 getMutationsForCoprocs()
 This method is potentially expensive and useful mostly for 
non-replay CP path.
 
 
-
+
 abstract long
 getNonce(int index) 
 
-
+
 abstract long
 getNonceGroup(int index) 
 
-
+
 abstract long
 getOrigLogSeqNum() 
 
-
+
 (package private) boolean
 isAtomic() 
 
-
+
 boolean
 isDone() 
 
-
+
 abstract boolean
 isInReplay() 
 
-
+
 boolean
 isOperationPending(int index) 
 
-
+
 MiniBatchOperationInProgress
 lockRowsAndBuildMiniBatch(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List acquiredRowLocks)
 Creates Mini-batch of all operations [nextIndexToProcess, 
lastIndexExclusive) for which
  a row lock can be acquired.
 
 
-
+
 abstract void
 prepareMiniBatchOperations(MiniBatchOperationInProgress miniBatchOp,
   long timestamp,
@@ -355,15 +359,15 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
   count, tags and timestamp for all cells of all operations in a 
mini-batch.
 
 
-
+
 int
 size() 
 
-
+
 abstract void
 startRegionOperation() 
 
-
+
 void
 visitBatchOperations(boolean pendingOnly,
 int lastIndexExclusive,
@@ -371,12 +375,12 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 Helper method for visiting pending/ all batch 
operations
 
 
-
+
 protected void
 writeMiniBatchOperationsToMemStore(MiniBatchOperationInProgress miniBatchOp,
   long writeNumber) 
 
-
+
 abstract MultiVersionConcurrencyControl.WriteEntry
 writeMiniBatchOperationsToMemStore(MiniBatchOperationInProgress miniBatchOp,
   MultiVersionConcurrencyControl.WriteEntry writeEntry)
@@ -411,7 +415,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 operations
-protected final T[] operations
+protected final T[] operations
 
 
 
@@ -420,7 +424,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 retCodeDetails
-protected final OperationStatus[] retCodeDetails
+protected final OperationStatus[] retCodeDetails
 
 
 
@@ -429,7 +433,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 walEditsFromCoprocessors
-protected final WALEdit[] walEditsFromCoprocessors
+protected final WALEdit[] walEditsFromCopro

[17/51] [partial] hbase-site git commit: Published site at 6b77786dfc46d25ac5bb5f1c8a4a9eb47b52a604.

2018-03-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/81cde4ce/devapidocs/org/apache/hadoop/hbase/client/class-use/TableDescriptor.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/TableDescriptor.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/TableDescriptor.html
index af78556..f5f674e 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/TableDescriptor.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/TableDescriptor.html
@@ -443,14 +443,14 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 TableDescriptor
-HTable.getDescriptor() 
-
-
-TableDescriptor
 Table.getDescriptor()
 Gets the table 
descriptor for this table.
 
 
+
+TableDescriptor
+HTable.getDescriptor() 
+
 
 TableDescriptor
 Admin.getDescriptor(TableName tableName)
@@ -503,51 +503,51 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFuture
-AsyncAdmin.getDescriptor(TableName tableName)
-Method for getting the tableDescriptor
-
+AsyncHBaseAdmin.getDescriptor(TableName tableName) 
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFuture
-RawAsyncHBaseAdmin.getDescriptor(TableName tableName) 
+AsyncAdmin.getDescriptor(TableName tableName)
+Method for getting the tableDescriptor
+
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFuture
-AsyncHBaseAdmin.getDescriptor(TableName tableName) 
+RawAsyncHBaseAdmin.getDescriptor(TableName tableName) 
 
 
 private https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureList>
 RawAsyncHBaseAdmin.getTableDescriptors(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsRequest request) 
 
 
-default https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureList>
-AsyncAdmin.listTableDescriptors()
-List all the userspace tables.
-
-
-
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
 Admin.listTableDescriptors()
 List all the userspace tables.
 
 
-
+
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
 HBaseAdmin.listTableDescriptors() 
 
+
+default https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureList>
+AsyncAdmin.listTableDescriptors()
+List all the userspace tables.
+
+
 
 https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureList>
-AsyncAdmin.listTableDescriptors(boolean includeSysTables)
-List all the tables.
-
+AsyncHBaseAdmin.listTableDescriptors(boolean includeSysTables) 
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureList>
-RawAsyncHBaseAdmin.listTableDescriptors(boolean includeSysTables) 
+AsyncAdmin.listTableDescriptors(boolean includeSysTables)
+List all the tables.
+
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureList>
-AsyncHBaseAdmin.listTableDescriptors(boolean includeSysTables)

[17/51] [partial] hbase-site git commit: Published site at 1384da71375427b522b09f06862bb5d629cef52f.

2018-03-06 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d347bde8/devapidocs/org/apache/hadoop/hbase/client/class-use/ResultScanner.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/ResultScanner.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/ResultScanner.html
index 6209920..3616545 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/ResultScanner.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/ResultScanner.html
@@ -208,9 +208,9 @@ service.
 
 
 
-default ResultScanner
-AsyncTable.getScanner(byte[] family)
-Gets a scanner on the current table for the given 
family.
+ResultScanner
+HTable.getScanner(byte[] family)
+The underlying HTable must 
not be closed.
 
 
 
@@ -220,16 +220,16 @@ service.
 
 
 
-ResultScanner
-HTable.getScanner(byte[] family)
-The underlying HTable must 
not be closed.
+default ResultScanner
+AsyncTable.getScanner(byte[] family)
+Gets a scanner on the current table for the given 
family.
 
 
 
-default ResultScanner
-AsyncTable.getScanner(byte[] family,
+ResultScanner
+HTable.getScanner(byte[] family,
   byte[] qualifier)
-Gets a scanner on the current table for the given family 
and qualifier.
+The underlying HTable must 
not be closed.
 
 
 
@@ -240,37 +240,37 @@ service.
 
 
 
-ResultScanner
-HTable.getScanner(byte[] family,
+default ResultScanner
+AsyncTable.getScanner(byte[] family,
   byte[] qualifier)
-The underlying HTable must 
not be closed.
+Gets a scanner on the current table for the given family 
and qualifier.
 
 
 
 ResultScanner
-AsyncTable.getScanner(Scan scan)
-Returns a scanner on the current table as specified by the 
Scan 
object.
-
+RawAsyncTableImpl.getScanner(Scan scan) 
 
 
 ResultScanner
-Table.getScanner(Scan scan)
-Returns a scanner on the current table as specified by the 
Scan
- object.
+HTable.getScanner(Scan scan)
+The underlying HTable must 
not be closed.
 
 
 
 ResultScanner
-AsyncTableImpl.getScanner(Scan scan) 
+Table.getScanner(Scan scan)
+Returns a scanner on the current table as specified by the 
Scan
+ object.
+
 
 
 ResultScanner
-RawAsyncTableImpl.getScanner(Scan scan) 
+AsyncTableImpl.getScanner(Scan scan) 
 
 
 ResultScanner
-HTable.getScanner(Scan scan)
-The underlying HTable must 
not be closed.
+AsyncTable.getScanner(Scan scan)
+Returns a scanner on the current table as specified by the 
Scan 
object.
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d347bde8/devapidocs/org/apache/hadoop/hbase/client/class-use/RetriesExhaustedWithDetailsException.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/RetriesExhaustedWithDetailsException.html
 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/RetriesExhaustedWithDetailsException.html
index 8fa3f76..62cfd60 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/RetriesExhaustedWithDetailsException.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/RetriesExhaustedWithDetailsException.html
@@ -106,11 +106,11 @@
 
 
 RetriesExhaustedWithDetailsException
-AsyncRequestFuture.getErrors() 
+AsyncRequestFutureImpl.getErrors() 
 
 
 RetriesExhaustedWithDetailsException
-AsyncRequestFutureImpl.getErrors() 
+AsyncRequestFuture.getErrors() 
 
 
 (package private) RetriesExhaustedWithDetailsException

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d347bde8/devapidocs/org/apache/hadoop/hbase/client/class-use/RetryingCallable.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/RetryingCallable.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/RetryingCallable.html
index 5b32e1b..eec52bf 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/RetryingCallable.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/RetryingCallable.html
@@ -234,28 +234,36 @@
 
 
 
+T
+RpcRetryingCallerImpl.callWithoutRetries(RetryingCallable callable,
+  int callTimeout) 
+
+
 T
 RpcRetryingCaller.callWithoutRetries(RetryingCallable callable,
   int callTimeout)
 Call the server once only.
 
 
-
+
 T
-RpcRetryingCallerImpl.callWithoutRetries(RetryingCallable callable,
-  int callTimeout) 
+RpcRetryingCallerImpl.callWithRetries(RetryingCallable callable,
+   int callTimeout) 
 
-
+
 T
 RpcRetryingCaller.callWithRetries(RetryingCallable callable,
int callTimeout)
 Retries if invocation fails.
 
 
+
+RetryingCallerInterceptorContext
+NoOpRetryingInterceptorContext.prepare(RetryingCallable callable) 
+
 
-T
-RpcRetryingCallerImpl.callWithRetries(RetryingCallable callable,
-   int callTimeout) 
+FastFailInterceptorContext
+FastFailInterceptorContext.prepare(RetryingCallable callable) 
 
 
 abstract RetryingCallerInterceptorC

[17/51] [partial] hbase-site git commit: Published site at b7b86839250bf9b295ebc1948826f43a88736d6c.

2018-03-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6b94a2f2/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.CollectAllVisitor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.CollectAllVisitor.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.CollectAllVisitor.html
index df5fa53..8fffb89 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.CollectAllVisitor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.CollectAllVisitor.html
@@ -42,1927 +42,2060 @@
 034import java.util.TreeMap;
 035import java.util.regex.Matcher;
 036import java.util.regex.Pattern;
-037import 
org.apache.hadoop.conf.Configuration;
-038import 
org.apache.hadoop.hbase.Cell.Type;
-039import 
org.apache.hadoop.hbase.client.Connection;
-040import 
org.apache.hadoop.hbase.client.ConnectionFactory;
-041import 
org.apache.hadoop.hbase.client.Consistency;
-042import 
org.apache.hadoop.hbase.client.Delete;
-043import 
org.apache.hadoop.hbase.client.Get;
-044import 
org.apache.hadoop.hbase.client.Mutation;
-045import 
org.apache.hadoop.hbase.client.Put;
-046import 
org.apache.hadoop.hbase.client.RegionInfo;
-047import 
org.apache.hadoop.hbase.client.RegionInfoBuilder;
-048import 
org.apache.hadoop.hbase.client.RegionLocator;
-049import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
-050import 
org.apache.hadoop.hbase.client.RegionServerCallable;
-051import 
org.apache.hadoop.hbase.client.Result;
-052import 
org.apache.hadoop.hbase.client.ResultScanner;
-053import 
org.apache.hadoop.hbase.client.Scan;
-054import 
org.apache.hadoop.hbase.client.Table;
-055import 
org.apache.hadoop.hbase.client.TableState;
-056import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-057import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
-058import 
org.apache.hadoop.hbase.master.RegionState;
-059import 
org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-060import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
-061import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier;
-062import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-063import 
org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos;
-064import 
org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MutateRowsRequest;
-065import 
org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MutateRowsResponse;
-066import 
org.apache.hadoop.hbase.util.Bytes;
-067import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-068import 
org.apache.hadoop.hbase.util.ExceptionUtil;
-069import 
org.apache.hadoop.hbase.util.Pair;
-070import 
org.apache.hadoop.hbase.util.PairOfSameType;
-071import 
org.apache.yetus.audience.InterfaceAudience;
-072import org.slf4j.Logger;
-073import org.slf4j.LoggerFactory;
-074
-075import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-076
-077/**
-078 * 

-079 * Read/write operations on region and assignment information store in hbase:meta. -080 *

+037import java.util.stream.Collectors; +038import java.util.stream.Stream; +039import org.apache.hadoop.conf.Configuration; +040import org.apache.hadoop.hbase.Cell.Type; +041import org.apache.hadoop.hbase.client.Connection; +042import org.apache.hadoop.hbase.client.ConnectionFactory; +043import org.apache.hadoop.hbase.client.Consistency; +044import org.apache.hadoop.hbase.client.Delete; +045import org.apache.hadoop.hbase.client.Get; +046import org.apache.hadoop.hbase.client.Mutation; +047import org.apache.hadoop.hbase.client.Put; +048import org.apache.hadoop.hbase.client.RegionInfo; +049import org.apache.hadoop.hbase.client.RegionInfoBuilder; +050import org.apache.hadoop.hbase.client.RegionLocator; +051import org.apache.hadoop.hbase.client.RegionReplicaUtil; +052import org.apache.hadoop.hbase.client.RegionServerCallable; +053import org.apache.hadoop.hbase.client.Result; +054import org.apache.hadoop.hbase.client.ResultScanner; +055import org.apache.hadoop.hbase.client.Scan; +056import org.apache.hadoop.hbase.client.Table; +057import org.apache.hadoop.hbase.client.TableState; +058import org.apache.hadoop.hbase.exceptions.DeserializationException; +059import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; +060import org.apache.hadoop.hbase.master.RegionState; +061import org.apache.hadoop.hbase.master.RegionState.State; +062import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +063import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; +064import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier; +065import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType; +066import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos; +067import org.apache.hadoop.hbase.protobuf.gen

[17/51] [partial] hbase-site git commit: Published site at 1d25b60831b8cc8f7ad5fd366f1867de5c20d2f3.

2018-03-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/eb05e3e3/apidocs/org/apache/hadoop/hbase/client/ConnectionFactory.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/client/ConnectionFactory.html 
b/apidocs/org/apache/hadoop/hbase/client/ConnectionFactory.html
index 7e0f75a..e3dba54 100644
--- a/apidocs/org/apache/hadoop/hbase/client/ConnectionFactory.html
+++ b/apidocs/org/apache/hadoop/hbase/client/ConnectionFactory.html
@@ -97,7 +97,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">java.lang.Object
+https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">java.lang.Object
 
 
 org.apache.hadoop.hbase.client.ConnectionFactory
@@ -111,7 +111,7 @@ var activeTableTab = "activeTableTab";
 
 @InterfaceAudience.Public
 public class ConnectionFactory
-extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
+extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 A non-instantiable class that manages creation of Connections. Managing the 
lifecycle of
  the Connections to the cluster is 
the responsibility of the caller. From a
  Connection, Table 
implementations are retrieved with
@@ -155,7 +155,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 Field and Description
 
 
-static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
+static https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
 HBASE_CLIENT_ASYNC_CONNECTION_IMPL 
 
 
@@ -195,20 +195,20 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 Method and Description
 
 
-static http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFuture
+static https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFuture
 createAsyncConnection()
 Call createAsyncConnection(Configuration)
 using default HBaseConfiguration.
 
 
 
-static http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFuture
+static https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFuture
 createAsyncConnection(org.apache.hadoop.conf.Configuration conf)
 Call createAsyncConnection(Configuration,
 User) using the given conf and a
  User object created by UserProvider.
 
 
 
-static http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFuture
+static https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFuture
 createAsyncConnection(org.apache.hadoop.conf.Configuration conf,
  User user)
 Create a new AsyncConnection instance using the passed 
conf and user.
@@ -229,14 +229,14 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 static Connection
 createConnection(org.apache.hadoop.conf.Configuration conf,
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html?is-external=true";
 title="class or interface in 
java.util.concurrent">ExecutorService pool)
+https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html?is-external=true";
 title="class or interface in 
java.util.concurrent">ExecutorService pool)
 Create a new Connection instance using the passed 
conf instance.
 
 
 
 static Connection
 createConnection(org.apache.hadoop.conf.Configuration conf,
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html?is-external=true";
 title="class or interface in 
java.util.concurrent">ExecutorService pool,
+https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html?is-external=true";
 title="class or interface in 
java.util.concurrent">ExecutorService pool,
 User user)
 Create a new Connection instance using the passed 
conf instance.
 
@@ -253,8 +253,8 @@ extends http://docs.orac

[17/51] [partial] hbase-site git commit: Published site at .

2018-02-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ec8bf761/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceState.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceState.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceState.html
index 93f650f..d7aa8b1 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceState.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceState.html
@@ -546,1472 +546,1464 @@
 538return this.conf;
 539  }
 540
-541  /**
-542   * @return true if the master is 
running, throws an exception otherwise
-543   * @throws 
org.apache.hadoop.hbase.MasterNotRunningException - if the master is not 
running
-544   * @deprecated this has been deprecated 
without a replacement
-545   */
-546  @Deprecated
-547  @Override
-548  public boolean isMasterRunning()
-549  throws MasterNotRunningException, 
ZooKeeperConnectionException {
-550// When getting the master 
connection, we check it's running,
-551// so if there is no exception, it 
means we've been able to get a
-552// connection on a running master
-553MasterKeepAliveConnection m = 
getKeepAliveMasterService();
-554m.close();
-555return true;
-556  }
-557
-558  @Override
-559  public HRegionLocation 
getRegionLocation(final TableName tableName,
-560  final byte [] row, boolean 
reload)
-561  throws IOException {
-562return reload? 
relocateRegion(tableName, row): locateRegion(tableName, row);
-563  }
-564
-565
-566  @Override
-567  public boolean isTableEnabled(TableName 
tableName) throws IOException {
-568return 
getTableState(tableName).inStates(TableState.State.ENABLED);
-569  }
-570
-571  @Override
-572  public boolean 
isTableDisabled(TableName tableName) throws IOException {
-573return 
getTableState(tableName).inStates(TableState.State.DISABLED);
-574  }
-575
-576  @Override
-577  public boolean isTableAvailable(final 
TableName tableName, @Nullable final byte[][] splitKeys)
-578  throws IOException {
-579if (this.closed) {
-580  throw new IOException(toString() + 
" closed");
-581}
-582try {
-583  if (!isTableEnabled(tableName)) {
-584LOG.debug("Table " + tableName + 
" not enabled");
-585return false;
-586  }
-587  List> locations =
-588
MetaTableAccessor.getTableRegionsAndLocations(this, tableName, true);
-589
-590  int notDeployed = 0;
-591  int regionCount = 0;
-592  for (Pair pair : locations) {
-593RegionInfo info = 
pair.getFirst();
-594if (pair.getSecond() == null) {
-595  if (LOG.isDebugEnabled()) {
-596LOG.debug("Table " + 
tableName + " has not deployed region " + pair.getFirst()
-597.getEncodedName());
-598  }
-599  notDeployed++;
-600} else if (splitKeys != null
-601&& 
!Bytes.equals(info.getStartKey(), HConstants.EMPTY_BYTE_ARRAY)) {
-602  for (byte[] splitKey : 
splitKeys) {
-603// Just check if the splitkey 
is available
-604if 
(Bytes.equals(info.getStartKey(), splitKey)) {
-605  regionCount++;
-606  break;
-607}
-608  }
-609} else {
-610  // Always empty start row 
should be counted
-611  regionCount++;
-612}
-613  }
-614  if (notDeployed > 0) {
-615if (LOG.isDebugEnabled()) {
-616  LOG.debug("Table " + tableName 
+ " has " + notDeployed + " regions");
-617}
-618return false;
-619  } else if (splitKeys != null 
&& regionCount != splitKeys.length + 1) {
-620if (LOG.isDebugEnabled()) {
-621  LOG.debug("Table " + tableName 
+ " expected to have " + (splitKeys.length + 1)
-622  + " regions, but only " + 
regionCount + " available");
-623}
-624return false;
-625  } else {
-626if (LOG.isDebugEnabled()) {
-627  LOG.debug("Table " + tableName 
+ " should be available");
-628}
-629return true;
-630  }
-631} catch (TableNotFoundException tnfe) 
{
-632  LOG.warn("Table " + tableName + " 
not enabled, it is not exists");
-633  return false;
-634}
-635  }
-636
-637  @Override
-638  public HRegionLocation 
locateRegion(final byte[] regionName) throws IOException {
-639RegionLocations locations = 
locateRegion(RegionInfo.getTable(regionName),
-640  RegionInfo.getStartKey(regionName), 
false, true);
-641return locations == null ? null : 
locations.getRegionLocation();
+541  private void checkClosed() throws 
DoNotRetryIOException {
+542if (this.closed) {
+543  throw new 
DoNotRetryIOException(toString(

[17/51] [partial] hbase-site git commit: Published site at .

2018-02-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/FlushLargeStoresPolicy.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/FlushLargeStoresPolicy.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/FlushLargeStoresPolicy.html
index 8c02b6e..1a90c1d 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/FlushLargeStoresPolicy.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/FlushLargeStoresPolicy.html
@@ -51,55 +51,65 @@
 043
 044  protected long flushSizeLowerBound = 
-1;
 045
-046  protected long 
getFlushSizeLowerBound(HRegion region) { int familyNumber = 
region.getTableDescriptor().getColumnFamilyCount();
-047// For multiple families, lower bound 
is the "average flush size" by default
-048// unless setting in configuration is 
larger.
-049long flushSizeLowerBound = 
region.getMemStoreFlushSize() / familyNumber;
-050long minimumLowerBound =
-051
getConf().getLong(HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND_MIN,
-052  
DEFAULT_HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND_MIN);
-053if (minimumLowerBound > 
flushSizeLowerBound) {
-054  flushSizeLowerBound = 
minimumLowerBound;
-055}
-056// use the setting in table 
description if any
-057String flushedSizeLowerBoundString 
=
-058
region.getTableDescriptor().getValue(HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND);
-059if (flushedSizeLowerBoundString == 
null) {
-060  LOG.debug("No {} set in table {} 
descriptor;" +
-061  "using 
region.getMemStoreFlushSize/# of families ({}) instead.",
-062  
HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND,
-063  
region.getTableDescriptor().getTableName(),
-064  
StringUtils.humanSize(flushSizeLowerBound) + ")");
-065} else {
-066  try {
-067flushSizeLowerBound = 
Long.parseLong(flushedSizeLowerBoundString);
-068  } catch (NumberFormatException nfe) 
{
-069// fall back for fault setting
-070LOG.warn("Number format exception 
parsing {} for table {}: {}, {}; " +
-071"using 
region.getMemStoreFlushSize/# of families ({}) instead.",
-072
HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND,
-073
region.getTableDescriptor().getTableName(),
-074
flushedSizeLowerBoundString,
-075nfe,
-076flushSizeLowerBound);
-077
-078  }
-079}
-080return flushSizeLowerBound;
-081  }
-082
-083  protected boolean shouldFlush(HStore 
store) {
-084if 
(store.getMemStoreSize().getDataSize() > this.flushSizeLowerBound) {
-085  LOG.debug("Flush {} of {}; 
memstoreSize={} > lowerBound={}",
-086  store.getColumnFamilyName(),
-087  
region.getRegionInfo().getEncodedName(),
-088  
store.getMemStoreSize().getDataSize(),
-089  this.flushSizeLowerBound);
-090  return true;
-091}
-092return false;
-093  }
-094}
+046  protected void 
setFlushSizeLowerBounds(HRegion region) {
+047int familyNumber = 
region.getTableDescriptor().getColumnFamilyCount();
+048// For multiple families, lower bound 
is the "average flush size" by default
+049// unless setting in configuration is 
larger.
+050flushSizeLowerBound = 
region.getMemStoreFlushSize() / familyNumber;
+051long minimumLowerBound =
+052
getConf().getLong(HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND_MIN,
+053  
DEFAULT_HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND_MIN);
+054if (minimumLowerBound > 
flushSizeLowerBound) {
+055  flushSizeLowerBound = 
minimumLowerBound;
+056}
+057// use the setting in table 
description if any
+058String flushedSizeLowerBoundString 
=
+059
region.getTableDescriptor().getValue(HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND);
+060if (flushedSizeLowerBoundString == 
null) {
+061  LOG.debug("No {} set in table {} 
descriptor;"
+062  + "using 
region.getMemStoreFlushHeapSize/# of families ({}) "
+063  + "instead."
+064  , 
HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND
+065  , 
region.getTableDescriptor().getTableName()
+066  , 
StringUtils.humanSize(flushSizeLowerBound)
+067  + ")");
+068} else {
+069  try {
+070flushSizeLowerBound = 
Long.parseLong(flushedSizeLowerBoundString);
+071  } catch (NumberFormatException nfe) 
{
+072// fall back for fault setting
+073LOG.warn("Number format exception 
parsing {} for table {}: {}, {}; "
+074+ "using 
region.getMemStoreFlushHeapSize/# of families ({}) "
+075+ "and 
region.getMemStoreFlushOffHeapSize/# of families ({}) "
+076+ "instead."
+077, 
HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND
+078, 
region.getTableDescriptor().getTableName()
+079, 
flushedS

[17/51] [partial] hbase-site git commit: Published site at .

2018-02-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/193b4259/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.html
index bd13b53..802b925 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.html
@@ -900,7600 +900,7598 @@
 892if 
(this.getRegionInfo().getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) {
 893  status.setStatus("Writing region 
info on filesystem");
 894  fs.checkRegionInfoOnFilesystem();
-895} else {
-896  if (LOG.isDebugEnabled()) {
-897LOG.debug("Skipping creation of 
.regioninfo file for " + this.getRegionInfo());
-898  }
-899}
-900
-901// Initialize all the HStores
-902status.setStatus("Initializing all 
the Stores");
-903long maxSeqId = 
initializeStores(reporter, status);
-904this.mvcc.advanceTo(maxSeqId);
-905if 
(ServerRegionReplicaUtil.shouldReplayRecoveredEdits(this)) {
-906  Collection stores = 
this.stores.values();
-907  try {
-908// update the stores that we are 
replaying
-909
stores.forEach(HStore::startReplayingFromWAL);
-910// Recover any edits if 
available.
-911maxSeqId = Math.max(maxSeqId,
-912  
replayRecoveredEditsIfAny(this.fs.getRegionDir(), maxSeqIdInStores, reporter, 
status));
-913// Make sure mvcc is up to max.
-914this.mvcc.advanceTo(maxSeqId);
-915  } finally {
-916// update the stores that we are 
done replaying
-917
stores.forEach(HStore::stopReplayingFromWAL);
-918  }
-919}
-920this.lastReplayedOpenRegionSeqId = 
maxSeqId;
+895}
+896
+897// Initialize all the HStores
+898status.setStatus("Initializing all 
the Stores");
+899long maxSeqId = 
initializeStores(reporter, status);
+900this.mvcc.advanceTo(maxSeqId);
+901if 
(ServerRegionReplicaUtil.shouldReplayRecoveredEdits(this)) {
+902  Collection stores = 
this.stores.values();
+903  try {
+904// update the stores that we are 
replaying
+905
stores.forEach(HStore::startReplayingFromWAL);
+906// Recover any edits if 
available.
+907maxSeqId = Math.max(maxSeqId,
+908  
replayRecoveredEditsIfAny(this.fs.getRegionDir(), maxSeqIdInStores, reporter, 
status));
+909// Make sure mvcc is up to max.
+910this.mvcc.advanceTo(maxSeqId);
+911  } finally {
+912// update the stores that we are 
done replaying
+913
stores.forEach(HStore::stopReplayingFromWAL);
+914  }
+915}
+916this.lastReplayedOpenRegionSeqId = 
maxSeqId;
+917
+918
this.writestate.setReadOnly(ServerRegionReplicaUtil.isReadOnly(this));
+919this.writestate.flushRequested = 
false;
+920this.writestate.compacting.set(0);
 921
-922
this.writestate.setReadOnly(ServerRegionReplicaUtil.isReadOnly(this));
-923this.writestate.flushRequested = 
false;
-924this.writestate.compacting.set(0);
-925
-926if (this.writestate.writesEnabled) 
{
-927  // Remove temporary data left over 
from old regions
-928  status.setStatus("Cleaning up 
temporary data from old regions");
-929  fs.cleanupTempDir();
-930}
-931
-932if (this.writestate.writesEnabled) 
{
-933  status.setStatus("Cleaning up 
detritus from prior splits");
-934  // Get rid of any splits or merges 
that were lost in-progress.  Clean out
-935  // these directories here on open.  
We may be opening a region that was
-936  // being split but we crashed in 
the middle of it all.
-937  fs.cleanupAnySplitDetritus();
-938  fs.cleanupMergesDir();
-939}
-940
-941// Initialize split policy
-942this.splitPolicy = 
RegionSplitPolicy.create(this, conf);
-943
-944// Initialize flush policy
-945this.flushPolicy = 
FlushPolicyFactory.create(this, conf);
-946
-947long lastFlushTime = 
EnvironmentEdgeManager.currentTime();
-948for (HStore store: stores.values()) 
{
-949  
this.lastStoreFlushTimeMap.put(store, lastFlushTime);
-950}
-951
-952// Use maximum of log sequenceid or 
that which was found in stores
-953// (particularly if no recovered 
edits, seqid will be -1).
-954long nextSeqid = maxSeqId;
-955if (this.writestate.writesEnabled) 
{
-956  nextSeqid = 
WALSplitter.writeRegionSequenceIdFile(this.fs.getFileSystem(),
-957  this.fs.getRegionDir(), 
nextSeqid, 1);
-958} else {
-959  nextSeqid++;
-960}
-961
-962LOG.info("Onlined " + 
this.getRegionInfo().getShortNameToLog() +
-963  "; next sequenceid=" + 
nextSeqid);
+922if (this.writestate.writesEnabled) 
{
+923  // Remove temporary data l

[17/51] [partial] hbase-site git commit: Published site at .

2018-02-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/94208cfe/devapidocs/org/apache/hadoop/hbase/io/class-use/ImmutableBytesWritable.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/class-use/ImmutableBytesWritable.html 
b/devapidocs/org/apache/hadoop/hbase/io/class-use/ImmutableBytesWritable.html
index 6e37f0b..49f85aa 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/io/class-use/ImmutableBytesWritable.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/io/class-use/ImmutableBytesWritable.html
@@ -162,11 +162,11 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 ImmutableBytesWritable
-TableSnapshotInputFormat.TableSnapshotRecordReader.createKey() 
+TableRecordReader.createKey() 
 
 
 ImmutableBytesWritable
-TableRecordReader.createKey() 
+TableSnapshotInputFormat.TableSnapshotRecordReader.createKey() 
 
 
 ImmutableBytesWritable
@@ -183,9 +183,11 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 org.apache.hadoop.mapred.RecordReader
-TableSnapshotInputFormat.getRecordReader(org.apache.hadoop.mapred.InputSplit split,
+TableInputFormatBase.getRecordReader(org.apache.hadoop.mapred.InputSplit split,
org.apache.hadoop.mapred.JobConf job,
-   
org.apache.hadoop.mapred.Reporter reporter) 
+   org.apache.hadoop.mapred.Reporter reporter)
+Builds a TableRecordReader.
+
 
 
 org.apache.hadoop.mapred.RecordReader
@@ -195,11 +197,9 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 org.apache.hadoop.mapred.RecordReader
-TableInputFormatBase.getRecordReader(org.apache.hadoop.mapred.InputSplit split,
+TableSnapshotInputFormat.getRecordReader(org.apache.hadoop.mapred.InputSplit split,
org.apache.hadoop.mapred.JobConf job,
-   org.apache.hadoop.mapred.Reporter reporter)
-Builds a TableRecordReader.
-
+   
org.apache.hadoop.mapred.Reporter reporter) 
 
 
 
@@ -218,10 +218,12 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 void
-RowCounter.RowCounterMapper.map(ImmutableBytesWritable row,
-   Result values,
+IdentityTableMap.map(ImmutableBytesWritable key,
+   Result value,
org.apache.hadoop.mapred.OutputCollector output,
-   org.apache.hadoop.mapred.Reporter reporter) 
+   org.apache.hadoop.mapred.Reporter reporter)
+Pass the key, value to reduce
+
 
 
 void
@@ -234,21 +236,19 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 void
-IdentityTableMap.map(ImmutableBytesWritable key,
-   Result value,
+RowCounter.RowCounterMapper.map(ImmutableBytesWritable row,
+   Result values,
org.apache.hadoop.mapred.OutputCollector output,
-   org.apache.hadoop.mapred.Reporter reporter)
-Pass the key, value to reduce
-
+   org.apache.hadoop.mapred.Reporter reporter) 
 
 
 boolean
-TableSnapshotInputFormat.TableSnapshotRecordReader.next(ImmutableBytesWritable key,
+TableRecordReader.next(ImmutableBytesWritable key,
 Result value) 
 
 
 boolean
-TableRecordReader.next(ImmutableBytesWritable key,
+TableSnapshotInputFormat.TableSnapshotRecordReader.next(ImmutableBytesWritable key,
 Result value) 
 
 
@@ -281,10 +281,12 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 void
-RowCounter.RowCounterMapper.map(ImmutableBytesWritable row,
-   Result values,
+IdentityTableMap.map(ImmutableBytesWritable key,
+   Result value,
org.apache.hadoop.mapred.OutputCollector output,
-   org.apache.hadoop.mapred.Reporter reporter) 
+   org.apache.hadoop.mapred.Reporter reporter)
+Pass the key, value to reduce
+
 
 
 void
@@ -297,12 +299,10 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 void
-IdentityTableMap.map(ImmutableBytesWritable key,
-   Result value,
+RowCounter.RowCounterMapper.map(ImmutableBytesWritable row,
+   Result values,
org.apache.hadoop.mapred.OutputCollector output,
-   org.apache.hadoop.mapred.Reporter reporter)
-Pass the key, value to reduce
-
+   org.apache.hadoop.mapred.Reporter reporter) 
 
 
 void
@@ -349,7 +349,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 private ImmutableBytesWritable
-TableRecordReaderImpl.key 
+MultithreadedTableMapper.SubMapRecordReader.key 
 
 
 private ImmutableBytesWritable
@@ -357,7 +357,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 private ImmutableBytesWritable
-MultithreadedTableMapper.SubMapRecordReader.key 
+TableRecordReaderImpl.key 
 
 
 (package private) ImmutableBytesWritable
@@ -427,33 +427,33 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 ImmutableBytesWritable
-TableS

[17/51] [partial] hbase-site git commit: Published site at .

2018-02-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0cd17dc5/devapidocs/org/apache/hadoop/hbase/client/class-use/TableDescriptor.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/TableDescriptor.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/TableDescriptor.html
index 98104cb..56a2ea1 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/TableDescriptor.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/TableDescriptor.html
@@ -449,13 +449,13 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 TableDescriptor
-Table.getDescriptor()
-Gets the table 
descriptor for this table.
-
+HTable.getDescriptor() 
 
 
 TableDescriptor
-HTable.getDescriptor() 
+Table.getDescriptor()
+Gets the table 
descriptor for this table.
+
 
 
 TableDescriptor
@@ -509,52 +509,52 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFuture
-AsyncHBaseAdmin.getDescriptor(TableName tableName) 
-
-
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFuture
 AsyncAdmin.getDescriptor(TableName tableName)
 Method for getting the tableDescriptor
 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFuture
 RawAsyncHBaseAdmin.getDescriptor(TableName tableName) 
 
+
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFuture
+AsyncHBaseAdmin.getDescriptor(TableName tableName) 
+
 
 private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureList>
 RawAsyncHBaseAdmin.getTableDescriptors(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsRequest request) 
 
 
-http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
-Admin.listTableDescriptors()
+default http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureList>
+AsyncAdmin.listTableDescriptors()
 List all the userspace tables.
 
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
-HBaseAdmin.listTableDescriptors() 
-
-
-default http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureList>
-AsyncAdmin.listTableDescriptors()
+Admin.listTableDescriptors()
 List all the userspace tables.
 
 
-
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureList>
-AsyncHBaseAdmin.listTableDescriptors(boolean includeSysTables) 
-
 
+http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
+HBaseAdmin.listTableDescriptors() 
+
+
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureList>
 AsyncAdmin.listTableDescriptors(boolean includeSysTables)
 List all the tables.
 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureList

[17/51] [partial] hbase-site git commit: Published site at .

2018-02-14 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/828486ae/devapidocs/org/apache/hadoop/hbase/filter/class-use/Filter.ReturnCode.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/filter/class-use/Filter.ReturnCode.html 
b/devapidocs/org/apache/hadoop/hbase/filter/class-use/Filter.ReturnCode.html
index bb2794a..0c342b2 100644
--- a/devapidocs/org/apache/hadoop/hbase/filter/class-use/Filter.ReturnCode.html
+++ b/devapidocs/org/apache/hadoop/hbase/filter/class-use/Filter.ReturnCode.html
@@ -151,115 +151,115 @@
 
 
 Filter.ReturnCode
-ColumnPrefixFilter.filterCell(Cell cell) 
+FilterListWithAND.filterCell(Cell c) 
 
 
 Filter.ReturnCode
-ColumnCountGetFilter.filterCell(Cell c) 
+ValueFilter.filterCell(Cell c) 
 
 
 Filter.ReturnCode
-RowFilter.filterCell(Cell v) 
+SkipFilter.filterCell(Cell c) 
 
 
 Filter.ReturnCode
-FuzzyRowFilter.filterCell(Cell c) 
+FamilyFilter.filterCell(Cell c) 
 
 
 Filter.ReturnCode
-Filter.filterCell(Cell c)
-A way to filter based on the column family, column 
qualifier and/or the column value.
-
+ColumnPrefixFilter.filterCell(Cell cell) 
 
 
 Filter.ReturnCode
-RandomRowFilter.filterCell(Cell c) 
+PageFilter.filterCell(Cell ignored) 
 
 
 Filter.ReturnCode
-FirstKeyOnlyFilter.filterCell(Cell c) 
+RowFilter.filterCell(Cell v) 
 
 
 Filter.ReturnCode
-SkipFilter.filterCell(Cell c) 
+ColumnRangeFilter.filterCell(Cell c) 
 
 
 Filter.ReturnCode
-TimestampsFilter.filterCell(Cell c) 
+ColumnCountGetFilter.filterCell(Cell c) 
 
 
 Filter.ReturnCode
-ValueFilter.filterCell(Cell c) 
+MultipleColumnPrefixFilter.filterCell(Cell c) 
 
 
 Filter.ReturnCode
-KeyOnlyFilter.filterCell(Cell ignored) 
+ColumnPaginationFilter.filterCell(Cell c) 
 
 
 Filter.ReturnCode
-FamilyFilter.filterCell(Cell c) 
+DependentColumnFilter.filterCell(Cell c) 
 
 
 Filter.ReturnCode
-QualifierFilter.filterCell(Cell c) 
+FilterListWithOR.filterCell(Cell c) 
 
 
 Filter.ReturnCode
-FilterList.filterCell(Cell c) 
+InclusiveStopFilter.filterCell(Cell c) 
 
 
 Filter.ReturnCode
-ColumnRangeFilter.filterCell(Cell c) 
+KeyOnlyFilter.filterCell(Cell ignored) 
 
 
 Filter.ReturnCode
-ColumnPaginationFilter.filterCell(Cell c) 
+MultiRowRangeFilter.filterCell(Cell ignored) 
 
 
 Filter.ReturnCode
-FilterListWithAND.filterCell(Cell c) 
+Filter.filterCell(Cell c)
+A way to filter based on the column family, column 
qualifier and/or the column value.
+
 
 
 Filter.ReturnCode
-WhileMatchFilter.filterCell(Cell c) 
+FirstKeyOnlyFilter.filterCell(Cell c) 
 
 
 Filter.ReturnCode
-MultiRowRangeFilter.filterCell(Cell ignored) 
+WhileMatchFilter.filterCell(Cell c) 
 
 
 Filter.ReturnCode
-PrefixFilter.filterCell(Cell c) 
+FirstKeyValueMatchingQualifiersFilter.filterCell(Cell c)
+Deprecated. 
+ 
 
 
 Filter.ReturnCode
-DependentColumnFilter.filterCell(Cell c) 
+TimestampsFilter.filterCell(Cell c) 
 
 
 Filter.ReturnCode
-FirstKeyValueMatchingQualifiersFilter.filterCell(Cell c)
-Deprecated. 
- 
+FuzzyRowFilter.filterCell(Cell c) 
 
 
 Filter.ReturnCode
-PageFilter.filterCell(Cell ignored) 
+FilterList.filterCell(Cell c) 
 
 
 Filter.ReturnCode
-FilterListWithOR.filterCell(Cell c) 
+RandomRowFilter.filterCell(Cell c) 
 
 
 Filter.ReturnCode
-InclusiveStopFilter.filterCell(Cell c) 
+PrefixFilter.filterCell(Cell c) 
 
 
 Filter.ReturnCode
-MultipleColumnPrefixFilter.filterCell(Cell c) 
+SingleColumnValueFilter.filterCell(Cell c) 
 
 
 Filter.ReturnCode
-SingleColumnValueFilter.filterCell(Cell c) 
+QualifierFilter.filterCell(Cell c) 
 
 
 Filter.ReturnCode
@@ -275,158 +275,158 @@
 
 
 Filter.ReturnCode
-ColumnPrefixFilter.filterKeyValue(Cell c)
+ValueFilter.filterKeyValue(Cell c)
 Deprecated. 
 
 
 
 Filter.ReturnCode
-ColumnCountGetFilter.filterKeyValue(Cell c)
+SkipFilter.filterKeyValue(Cell c)
 Deprecated. 
 
 
 
 Filter.ReturnCode
-RowFilter.filterKeyValue(Cell c)
-Deprecated. 
-
+FilterListBase.filterKeyValue(Cell c) 
 
 
 Filter.ReturnCode
-FuzzyRowFilter.filterKeyValue(Cell c)
+FamilyFilter.filterKeyValue(Cell c)
 Deprecated. 
 
 
 
 Filter.ReturnCode
-Filter.filterKeyValue(Cell c)
-Deprecated. 
-As of release 2.0.0, this 
will be removed in HBase 3.0.0.
- Instead use filterCell(Cell)
-
+ColumnPrefixFilter.filterKeyValue(Cell c)
+Deprecated. 
 
 
 
 Filter.ReturnCode
-RandomRowFilter.filterKeyValue(Cell c)
+PageFilter.filterKeyValue(Cell c)
 Deprecated. 
 
 
 
 Filter.ReturnCode
-FirstKeyOnlyFilter.filterKeyValue(Cell c)
+RowFilter.filterKeyValue(Cell c)
 Deprecated. 
 
 
 
 Filter.ReturnCode
-SkipFilter.filterKeyValue(Cell c)
+ColumnRangeFilter.filterKeyValue(Cell c)
 Deprecated. 
 
 
 
 Filter.ReturnCode
-TimestampsFilter.filterKeyValue(Cell c)
+ColumnCountGetFilter.filterKeyValue(Cell c)
 Deprecated. 
 
 
 
 Filter.ReturnCode
-ValueFilter.filterKeyValue(Cell c)
+MultipleColumnPrefixFilter.filterKeyValue(Cell c)
 Deprecated. 
 
 
 
 Filter.ReturnCode
-KeyOnlyFilter.filterKeyValue(Cell ignored)
+ColumnPaginationFilter.filterKeyValue(Cell c)
 Depre

[17/51] [partial] hbase-site git commit: Published site at .

2018-02-13 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f272b0e8/devapidocs/org/apache/hadoop/hbase/client/class-use/RpcRetryingCallerFactory.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/RpcRetryingCallerFactory.html
 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/RpcRetryingCallerFactory.html
index d481372..5e1590b 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/RpcRetryingCallerFactory.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/RpcRetryingCallerFactory.html
@@ -126,15 +126,15 @@
 
 
 private RpcRetryingCallerFactory
-RegionCoprocessorRpcChannel.rpcCallerFactory 
+ConnectionImplementation.rpcCallerFactory 
 
 
 private RpcRetryingCallerFactory
-ConnectionImplementation.rpcCallerFactory 
+HTable.rpcCallerFactory 
 
 
 private RpcRetryingCallerFactory
-HTable.rpcCallerFactory 
+RegionCoprocessorRpcChannel.rpcCallerFactory 
 
 
 private RpcRetryingCallerFactory
@@ -155,21 +155,21 @@
 
 
 RpcRetryingCallerFactory
-ConnectionImplementation.getNewRpcRetryingCallerFactory(org.apache.hadoop.conf.Configuration conf) 
-
-
-RpcRetryingCallerFactory
 ClusterConnection.getNewRpcRetryingCallerFactory(org.apache.hadoop.conf.Configuration conf)
 Returns a new RpcRetryingCallerFactory from the given 
Configuration.
 
 
+
+RpcRetryingCallerFactory
+ConnectionImplementation.getNewRpcRetryingCallerFactory(org.apache.hadoop.conf.Configuration conf) 
+
 
 RpcRetryingCallerFactory
-ConnectionImplementation.getRpcRetryingCallerFactory() 
+ClusterConnection.getRpcRetryingCallerFactory() 
 
 
 RpcRetryingCallerFactory
-ClusterConnection.getRpcRetryingCallerFactory() 
+ConnectionImplementation.getRpcRetryingCallerFactory() 
 
 
 static RpcRetryingCallerFactory

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f272b0e8/devapidocs/org/apache/hadoop/hbase/client/class-use/Scan.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/class-use/Scan.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/Scan.html
index 6384833..018438c 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/Scan.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/Scan.html
@@ -283,27 +283,27 @@ service.
 
 
 private Scan
-ScannerCallableWithReplicas.scan 
+AsyncScanSingleRegionRpcRetryingCaller.scan 
 
 
 protected Scan
-ClientScanner.scan 
+ScannerCallable.scan 
 
 
 private Scan
-AsyncClientScanner.scan 
+ScannerCallableWithReplicas.scan 
 
 
-private Scan
-AsyncRpcRetryingCallerFactory.ScanSingleRegionCallerBuilder.scan 
+protected Scan
+ClientScanner.scan 
 
 
 private Scan
-AsyncScanSingleRegionRpcRetryingCaller.scan 
+AsyncClientScanner.scan 
 
 
-protected Scan
-ScannerCallable.scan 
+private Scan
+AsyncRpcRetryingCallerFactory.ScanSingleRegionCallerBuilder.scan 
 
 
 private Scan
@@ -339,11 +339,11 @@ service.
 
 
 protected Scan
-ClientScanner.getScan() 
+ScannerCallable.getScan() 
 
 
 protected Scan
-ScannerCallable.getScan() 
+ClientScanner.getScan() 
 
 
 Scan
@@ -638,29 +638,29 @@ service.
 
 
 ResultScanner
-AsyncTable.getScanner(Scan scan)
-Returns a scanner on the current table as specified by the 
Scan 
object.
-
+RawAsyncTableImpl.getScanner(Scan scan) 
 
 
 ResultScanner
-Table.getScanner(Scan scan)
-Returns a scanner on the current table as specified by the 
Scan
- object.
+HTable.getScanner(Scan scan)
+The underlying HTable must 
not be closed.
 
 
 
 ResultScanner
-AsyncTableImpl.getScanner(Scan scan) 
+Table.getScanner(Scan scan)
+Returns a scanner on the current table as specified by the 
Scan
+ object.
+
 
 
 ResultScanner
-RawAsyncTableImpl.getScanner(Scan scan) 
+AsyncTableImpl.getScanner(Scan scan) 
 
 
 ResultScanner
-HTable.getScanner(Scan scan)
-The underlying HTable must 
not be closed.
+AsyncTable.getScanner(Scan scan)
+Returns a scanner on the current table as specified by the 
Scan 
object.
 
 
 
@@ -703,9 +703,7 @@ service.
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureList>
-AsyncTable.scanAll(Scan scan)
-Return all the results that match the given scan 
object.
-
+RawAsyncTableImpl.scanAll(Scan scan) 
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureList>
@@ -713,7 +711,9 @@ service.
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFuture

[17/51] [partial] hbase-site git commit: Published site at .

2018-02-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c83a37c8/devapidocs/org/apache/hadoop/hbase/filter/class-use/Filter.ReturnCode.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/filter/class-use/Filter.ReturnCode.html 
b/devapidocs/org/apache/hadoop/hbase/filter/class-use/Filter.ReturnCode.html
index bb2794a..0c342b2 100644
--- a/devapidocs/org/apache/hadoop/hbase/filter/class-use/Filter.ReturnCode.html
+++ b/devapidocs/org/apache/hadoop/hbase/filter/class-use/Filter.ReturnCode.html
@@ -151,115 +151,115 @@
 
 
 Filter.ReturnCode
-ColumnPrefixFilter.filterCell(Cell cell) 
+FilterListWithAND.filterCell(Cell c) 
 
 
 Filter.ReturnCode
-ColumnCountGetFilter.filterCell(Cell c) 
+ValueFilter.filterCell(Cell c) 
 
 
 Filter.ReturnCode
-RowFilter.filterCell(Cell v) 
+SkipFilter.filterCell(Cell c) 
 
 
 Filter.ReturnCode
-FuzzyRowFilter.filterCell(Cell c) 
+FamilyFilter.filterCell(Cell c) 
 
 
 Filter.ReturnCode
-Filter.filterCell(Cell c)
-A way to filter based on the column family, column 
qualifier and/or the column value.
-
+ColumnPrefixFilter.filterCell(Cell cell) 
 
 
 Filter.ReturnCode
-RandomRowFilter.filterCell(Cell c) 
+PageFilter.filterCell(Cell ignored) 
 
 
 Filter.ReturnCode
-FirstKeyOnlyFilter.filterCell(Cell c) 
+RowFilter.filterCell(Cell v) 
 
 
 Filter.ReturnCode
-SkipFilter.filterCell(Cell c) 
+ColumnRangeFilter.filterCell(Cell c) 
 
 
 Filter.ReturnCode
-TimestampsFilter.filterCell(Cell c) 
+ColumnCountGetFilter.filterCell(Cell c) 
 
 
 Filter.ReturnCode
-ValueFilter.filterCell(Cell c) 
+MultipleColumnPrefixFilter.filterCell(Cell c) 
 
 
 Filter.ReturnCode
-KeyOnlyFilter.filterCell(Cell ignored) 
+ColumnPaginationFilter.filterCell(Cell c) 
 
 
 Filter.ReturnCode
-FamilyFilter.filterCell(Cell c) 
+DependentColumnFilter.filterCell(Cell c) 
 
 
 Filter.ReturnCode
-QualifierFilter.filterCell(Cell c) 
+FilterListWithOR.filterCell(Cell c) 
 
 
 Filter.ReturnCode
-FilterList.filterCell(Cell c) 
+InclusiveStopFilter.filterCell(Cell c) 
 
 
 Filter.ReturnCode
-ColumnRangeFilter.filterCell(Cell c) 
+KeyOnlyFilter.filterCell(Cell ignored) 
 
 
 Filter.ReturnCode
-ColumnPaginationFilter.filterCell(Cell c) 
+MultiRowRangeFilter.filterCell(Cell ignored) 
 
 
 Filter.ReturnCode
-FilterListWithAND.filterCell(Cell c) 
+Filter.filterCell(Cell c)
+A way to filter based on the column family, column 
qualifier and/or the column value.
+
 
 
 Filter.ReturnCode
-WhileMatchFilter.filterCell(Cell c) 
+FirstKeyOnlyFilter.filterCell(Cell c) 
 
 
 Filter.ReturnCode
-MultiRowRangeFilter.filterCell(Cell ignored) 
+WhileMatchFilter.filterCell(Cell c) 
 
 
 Filter.ReturnCode
-PrefixFilter.filterCell(Cell c) 
+FirstKeyValueMatchingQualifiersFilter.filterCell(Cell c)
+Deprecated. 
+ 
 
 
 Filter.ReturnCode
-DependentColumnFilter.filterCell(Cell c) 
+TimestampsFilter.filterCell(Cell c) 
 
 
 Filter.ReturnCode
-FirstKeyValueMatchingQualifiersFilter.filterCell(Cell c)
-Deprecated. 
- 
+FuzzyRowFilter.filterCell(Cell c) 
 
 
 Filter.ReturnCode
-PageFilter.filterCell(Cell ignored) 
+FilterList.filterCell(Cell c) 
 
 
 Filter.ReturnCode
-FilterListWithOR.filterCell(Cell c) 
+RandomRowFilter.filterCell(Cell c) 
 
 
 Filter.ReturnCode
-InclusiveStopFilter.filterCell(Cell c) 
+PrefixFilter.filterCell(Cell c) 
 
 
 Filter.ReturnCode
-MultipleColumnPrefixFilter.filterCell(Cell c) 
+SingleColumnValueFilter.filterCell(Cell c) 
 
 
 Filter.ReturnCode
-SingleColumnValueFilter.filterCell(Cell c) 
+QualifierFilter.filterCell(Cell c) 
 
 
 Filter.ReturnCode
@@ -275,158 +275,158 @@
 
 
 Filter.ReturnCode
-ColumnPrefixFilter.filterKeyValue(Cell c)
+ValueFilter.filterKeyValue(Cell c)
 Deprecated. 
 
 
 
 Filter.ReturnCode
-ColumnCountGetFilter.filterKeyValue(Cell c)
+SkipFilter.filterKeyValue(Cell c)
 Deprecated. 
 
 
 
 Filter.ReturnCode
-RowFilter.filterKeyValue(Cell c)
-Deprecated. 
-
+FilterListBase.filterKeyValue(Cell c) 
 
 
 Filter.ReturnCode
-FuzzyRowFilter.filterKeyValue(Cell c)
+FamilyFilter.filterKeyValue(Cell c)
 Deprecated. 
 
 
 
 Filter.ReturnCode
-Filter.filterKeyValue(Cell c)
-Deprecated. 
-As of release 2.0.0, this 
will be removed in HBase 3.0.0.
- Instead use filterCell(Cell)
-
+ColumnPrefixFilter.filterKeyValue(Cell c)
+Deprecated. 
 
 
 
 Filter.ReturnCode
-RandomRowFilter.filterKeyValue(Cell c)
+PageFilter.filterKeyValue(Cell c)
 Deprecated. 
 
 
 
 Filter.ReturnCode
-FirstKeyOnlyFilter.filterKeyValue(Cell c)
+RowFilter.filterKeyValue(Cell c)
 Deprecated. 
 
 
 
 Filter.ReturnCode
-SkipFilter.filterKeyValue(Cell c)
+ColumnRangeFilter.filterKeyValue(Cell c)
 Deprecated. 
 
 
 
 Filter.ReturnCode
-TimestampsFilter.filterKeyValue(Cell c)
+ColumnCountGetFilter.filterKeyValue(Cell c)
 Deprecated. 
 
 
 
 Filter.ReturnCode
-ValueFilter.filterKeyValue(Cell c)
+MultipleColumnPrefixFilter.filterKeyValue(Cell c)
 Deprecated. 
 
 
 
 Filter.ReturnCode
-KeyOnlyFilter.filterKeyValue(Cell ignored)
+ColumnPaginationFilter.filterKeyValue(Cell c)
 Depre

[17/51] [partial] hbase-site git commit: Published site at .

2018-02-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0ab8335e/devapidocs/org/apache/hadoop/hbase/filter/class-use/Filter.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/filter/class-use/Filter.html 
b/devapidocs/org/apache/hadoop/hbase/filter/class-use/Filter.html
index 463f4fa..65795ae 100644
--- a/devapidocs/org/apache/hadoop/hbase/filter/class-use/Filter.html
+++ b/devapidocs/org/apache/hadoop/hbase/filter/class-use/Filter.html
@@ -488,15 +488,15 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 static Filter
-SingleColumnValueExcludeFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
+ColumnPrefixFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
 
 
 static Filter
-ValueFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
+ColumnCountGetFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
 
 
 static Filter
-FamilyFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
+RowFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
 
 
 static Filter
@@ -506,63 +506,63 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 static Filter
-ColumnPrefixFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
+FirstKeyOnlyFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
 
 
 static Filter
-PageFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
+TimestampsFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
 
 
 static Filter
-RowFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
+ValueFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
 
 
 static Filter
-ColumnRangeFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
+KeyOnlyFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
 
 
 static Filter
-ColumnCountGetFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
+FamilyFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
 
 
 static Filter
-MultipleColumnPrefixFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
+QualifierFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
 
 
 static Filter
-ColumnPaginationFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
+ColumnRangeFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or 

[17/51] [partial] hbase-site git commit: Published site at .

2018-02-04 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.EnvironmentPriorityComparator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.EnvironmentPriorityComparator.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.EnvironmentPriorityComparator.html
index 77fb9b5..c4e8c8b 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.EnvironmentPriorityComparator.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.EnvironmentPriorityComparator.html
@@ -165,520 +165,519 @@
 157E env = 
checkAndLoadInstance(implClass, priority, conf);
 158if (env != null) {
 159  
this.coprocEnvironments.add(env);
-160  LOG.info(
-161  "System coprocessor " + 
className + " was loaded " + "successfully with priority (" + priority + 
").");
-162  ++priority;
-163}
-164  } catch (Throwable t) {
-165// We always abort if system 
coprocessors cannot be loaded
-166abortServer(className, t);
-167  }
-168}
-169  }
-170
-171  /**
-172   * Load a coprocessor implementation 
into the host
-173   * @param path path to implementation 
jar
-174   * @param className the main class 
name
-175   * @param priority chaining priority
-176   * @param conf configuration for 
coprocessor
-177   * @throws java.io.IOException 
Exception
-178   */
-179  public E load(Path path, String 
className, int priority,
-180  Configuration conf) throws 
IOException {
-181String[] includedClassPrefixes = 
null;
-182if 
(conf.get(HConstants.CP_HTD_ATTR_INCLUSION_KEY) != null){
-183  String prefixes = 
conf.get(HConstants.CP_HTD_ATTR_INCLUSION_KEY);
-184  includedClassPrefixes = 
prefixes.split(";");
-185}
-186return load(path, className, 
priority, conf, includedClassPrefixes);
-187  }
-188
-189  /**
-190   * Load a coprocessor implementation 
into the host
-191   * @param path path to implementation 
jar
-192   * @param className the main class 
name
-193   * @param priority chaining priority
-194   * @param conf configuration for 
coprocessor
-195   * @param includedClassPrefixes class 
name prefixes to include
-196   * @throws java.io.IOException 
Exception
-197   */
-198  public E load(Path path, String 
className, int priority,
-199  Configuration conf, String[] 
includedClassPrefixes) throws IOException {
-200Class implClass;
-201LOG.debug("Loading coprocessor class 
" + className + " with path " +
-202path + " and priority " + 
priority);
-203
-204ClassLoader cl = null;
-205if (path == null) {
-206  try {
-207implClass = 
getClass().getClassLoader().loadClass(className);
-208  } catch (ClassNotFoundException e) 
{
-209throw new IOException("No jar 
path specified for " + className);
-210  }
-211} else {
-212  cl = 
CoprocessorClassLoader.getClassLoader(
-213path, 
getClass().getClassLoader(), pathPrefix, conf);
-214  try {
-215implClass = 
((CoprocessorClassLoader)cl).loadClass(className, includedClassPrefixes);
-216  } catch (ClassNotFoundException e) 
{
-217throw new IOException("Cannot 
load external coprocessor class " + className, e);
-218  }
-219}
-220
-221//load custom code for coprocessor
-222Thread currentThread = 
Thread.currentThread();
-223ClassLoader hostClassLoader = 
currentThread.getContextClassLoader();
-224try{
-225  // switch temporarily to the thread 
classloader for custom CP
-226  
currentThread.setContextClassLoader(cl);
-227  E cpInstance = 
checkAndLoadInstance(implClass, priority, conf);
-228  return cpInstance;
-229} finally {
-230  // restore the fresh (host) 
classloader
-231  
currentThread.setContextClassLoader(hostClassLoader);
-232}
-233  }
-234
-235  @VisibleForTesting
-236  public void load(Class implClass, int priority, Configuration conf)
-237  throws IOException {
-238E env = 
checkAndLoadInstance(implClass, priority, conf);
-239coprocEnvironments.add(env);
-240  }
-241
-242  /**
-243   * @param implClass Implementation 
class
-244   * @param priority priority
-245   * @param conf configuration
-246   * @throws java.io.IOException 
Exception
-247   */
-248  public E 
checkAndLoadInstance(Class implClass, int priority, Configuration 
conf)
-249  throws IOException {
-250// create the instance
-251C impl;
-252try {
-253  impl = 
checkAndGetInstance(implClass);
-254  if (impl == null) {
-255LOG.error("Cannot load 
coprocessor " + implClass.getSimpleName());
-256return null;
-257  }
-258} catch 
(InstantiationException|IllegalAccessException e) {
-259  throw new IOException(e);
-260}
-261// cre

[17/51] [partial] hbase-site git commit: Published site at .

2018-02-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1f2eeb22/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.BucketEntryGroup.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.BucketEntryGroup.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.BucketEntryGroup.html
index b7c24d7..eecd2f9 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.BucketEntryGroup.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.BucketEntryGroup.html
@@ -44,792 +44,792 @@
 036import java.util.List;
 037import java.util.Map;
 038import java.util.NavigableSet;
-039import java.util.Objects;
-040import java.util.PriorityQueue;
-041import java.util.Set;
-042import 
java.util.concurrent.ArrayBlockingQueue;
-043import 
java.util.concurrent.BlockingQueue;
-044import 
java.util.concurrent.ConcurrentHashMap;
-045import 
java.util.concurrent.ConcurrentMap;
-046import 
java.util.concurrent.ConcurrentSkipListSet;
-047import java.util.concurrent.Executors;
-048import 
java.util.concurrent.ScheduledExecutorService;
-049import java.util.concurrent.TimeUnit;
-050import 
java.util.concurrent.atomic.AtomicInteger;
-051import 
java.util.concurrent.atomic.AtomicLong;
-052import 
java.util.concurrent.atomic.LongAdder;
-053import java.util.concurrent.locks.Lock;
-054import 
java.util.concurrent.locks.ReentrantLock;
-055import 
java.util.concurrent.locks.ReentrantReadWriteLock;
-056import 
org.apache.hadoop.conf.Configuration;
-057import 
org.apache.hadoop.hbase.HBaseConfiguration;
-058import 
org.apache.hadoop.hbase.io.HeapSize;
-059import 
org.apache.hadoop.hbase.io.hfile.BlockCache;
-060import 
org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
-061import 
org.apache.hadoop.hbase.io.hfile.BlockCacheUtil;
-062import 
org.apache.hadoop.hbase.io.hfile.BlockPriority;
-063import 
org.apache.hadoop.hbase.io.hfile.BlockType;
-064import 
org.apache.hadoop.hbase.io.hfile.CacheStats;
-065import 
org.apache.hadoop.hbase.io.hfile.Cacheable;
-066import 
org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType;
-067import 
org.apache.hadoop.hbase.io.hfile.CacheableDeserializer;
-068import 
org.apache.hadoop.hbase.io.hfile.CacheableDeserializerIdManager;
-069import 
org.apache.hadoop.hbase.io.hfile.CachedBlock;
-070import 
org.apache.hadoop.hbase.io.hfile.HFileBlock;
-071import 
org.apache.hadoop.hbase.nio.ByteBuff;
-072import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-073import 
org.apache.hadoop.hbase.util.HasThread;
-074import 
org.apache.hadoop.hbase.util.IdReadWriteLock;
-075import 
org.apache.hadoop.hbase.util.IdReadWriteLock.ReferenceType;
-076import 
org.apache.hadoop.util.StringUtils;
-077import 
org.apache.yetus.audience.InterfaceAudience;
-078import org.slf4j.Logger;
-079import org.slf4j.LoggerFactory;
-080
-081import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-082import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-083import 
org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
-084
-085/**
-086 * BucketCache uses {@link 
BucketAllocator} to allocate/free blocks, and uses
-087 * BucketCache#ramCache and 
BucketCache#backingMap in order to
-088 * determine if a given element is in the 
cache. The bucket cache can use on-heap or
-089 * off-heap memory {@link 
ByteBufferIOEngine} or in a file {@link FileIOEngine} to
-090 * store/read the block data.
-091 *
-092 * 

Eviction is via a similar algorithm as used in -093 * {@link org.apache.hadoop.hbase.io.hfile.LruBlockCache} -094 * -095 *

BucketCache can be used as mainly a block cache (see -096 * {@link org.apache.hadoop.hbase.io.hfile.CombinedBlockCache}), combined with -097 * LruBlockCache to decrease CMS GC and heap fragmentation. -098 * -099 *

It also can be used as a secondary cache (e.g. using a file on ssd/fusionio to store -100 * blocks) to enlarge cache space via -101 * {@link org.apache.hadoop.hbase.io.hfile.LruBlockCache#setVictimCache} -102 */ -103@InterfaceAudience.Private -104public class BucketCache implements BlockCache, HeapSize { -105 private static final Logger LOG = LoggerFactory.getLogger(BucketCache.class); -106 -107 /** Priority buckets config */ -108 static final String SINGLE_FACTOR_CONFIG_NAME = "hbase.bucketcache.single.factor"; -109 static final String MULTI_FACTOR_CONFIG_NAME = "hbase.bucketcache.multi.factor"; -110 static final String MEMORY_FACTOR_CONFIG_NAME = "hbase.bucketcache.memory.factor"; -111 static final String EXTRA_FREE_FACTOR_CONFIG_NAME = "hbase.bucketcache.extrafreefactor"; -112 static final String ACCEPT_FACTOR_CONFIG_NAME = "hbase.bucketcache.acceptfactor"; -113 static final String MIN_FACTOR_CONFIG_NAME = "hbase.bucketcache.minfactor"; -114 -115 /** Priority buckets */ -116 @VisibleForTesting -117 static


[17/51] [partial] hbase-site git commit: Published site at .

2018-01-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/cc6597ec/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncProcess.MyConnectionImpl2.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncProcess.MyConnectionImpl2.html
 
b/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncProcess.MyConnectionImpl2.html
index 00dcd66..d4ba227 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncProcess.MyConnectionImpl2.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncProcess.MyConnectionImpl2.html
@@ -122,7 +122,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-static class TestAsyncProcess.MyConnectionImpl2
+static class TestAsyncProcess.MyConnectionImpl2
 extends TestAsyncProcess.MyConnectionImpl
 Returns our async process.
 
@@ -566,7 +566,7 @@ extends 
 
 hrl
-http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in 
java.util">List hrl
+http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in 
java.util">List hrl
 
 
 
@@ -575,7 +575,7 @@ extends 
 
 usedRegions
-final boolean[] usedRegions
+final boolean[] usedRegions
 
 
 
@@ -650,7 +650,7 @@ extends 
 
 MyConnectionImpl2
-protected MyConnectionImpl2(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in 
java.util">List hrl)
+protected MyConnectionImpl2(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in 
java.util">List hrl)
  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 
 Throws:
@@ -672,7 +672,7 @@ extends 
 
 locateRegion
-public org.apache.hadoop.hbase.RegionLocations locateRegion(org.apache.hadoop.hbase.TableName tableName,
+public org.apache.hadoop.hbase.RegionLocations locateRegion(org.apache.hadoop.hbase.TableName tableName,
 byte[] row,
 
boolean useCache,
 boolean retry,

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/cc6597ec/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncProcess.MyThreadPoolExecutor.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncProcess.MyThreadPoolExecutor.html
 
b/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncProcess.MyThreadPoolExecutor.html
index 6ee709e..ba35d6b 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncProcess.MyThreadPoolExecutor.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncProcess.MyThreadPoolExecutor.html
@@ -127,7 +127,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-static class TestAsyncProcess.MyThreadPoolExecutor
+static class TestAsyncProcess.MyThreadPoolExecutor
 extends http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ThreadPoolExecutor.html?is-external=true";
 title="class or interface in java.util.concurrent">ThreadPoolExecutor
 
 
@@ -229,7 +229,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/T
 
 
 MyThreadPoolExecutor
-public MyThreadPoolExecutor(int coreThreads,
+public MyThreadPoolExecutor(int coreThreads,
 int maxThreads,
 long keepAliveTime,
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true";
 title="class or interface in java.util.concurrent">TimeUnit timeunit,
@@ -250,7 +250,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/T
 
 
 submit
-public http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Future.html?is-external=true";
 title="class or interface in java.util.concurrent">Future submit(http://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.html?is-external=true";
 title="class or interface in java.lang">Runnable runnable)
+public http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Future.html?is-external=true";
 title="class or interface in java.util.concurrent">Future submit(http://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.html?is-external=true";
 title="class or interface in java.lang">Runnable runnable)
 
 Specified by:
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html?is-external=true#submit-java.lang.Runnable-";
 title="class or interface in java.util.concurrent">submit in 
interface http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.

[17/51] [partial] hbase-site git commit: Published site at .

2018-01-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aa7ffc92/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupManifest.BackupImage.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupManifest.BackupImage.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupManifest.BackupImage.html
index 7509dcf..ec2aa41 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupManifest.BackupImage.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupManifest.BackupImage.html
@@ -64,152 +64,152 @@
 056 */
 057@InterfaceAudience.Private
 058public class BackupManifest {
-059
-060  private static final Logger LOG = 
LoggerFactory.getLogger(BackupManifest.class);
-061
-062  // manifest file name
-063  public static final String 
MANIFEST_FILE_NAME = ".backup.manifest";
-064
-065  /**
-066   * Backup image, the dependency graph 
is made up by series of backup images BackupImage contains
-067   * all the relevant information to 
restore the backup and is used during restore operation
-068   */
-069
-070  public static class BackupImage 
implements Comparable {
+059  private static final Logger LOG = 
LoggerFactory.getLogger(BackupManifest.class);
+060
+061  // manifest file name
+062  public static final String 
MANIFEST_FILE_NAME = ".backup.manifest";
+063
+064  /**
+065   * Backup image, the dependency graph 
is made up by series of backup images BackupImage contains
+066   * all the relevant information to 
restore the backup and is used during restore operation
+067   */
+068  public static class BackupImage 
implements Comparable {
+069static class Builder {
+070  BackupImage image;
 071
-072static class Builder {
-073  BackupImage image;
-074
-075  Builder() {
-076image = new BackupImage();
-077  }
-078
-079  Builder withBackupId(String 
backupId) {
-080image.setBackupId(backupId);
-081return this;
-082  }
-083
-084  Builder withType(BackupType type) 
{
-085image.setType(type);
-086return this;
-087  }
-088
-089  Builder withRootDir(String rootDir) 
{
-090image.setRootDir(rootDir);
-091return this;
-092  }
-093
-094  Builder 
withTableList(List tableList) {
-095image.setTableList(tableList);
-096return this;
-097  }
-098
-099  Builder withStartTime(long 
startTime) {
-100image.setStartTs(startTime);
-101return this;
-102  }
-103
-104  Builder withCompleteTime(long 
completeTime) {
-105
image.setCompleteTs(completeTime);
-106return this;
-107  }
-108
-109  BackupImage build() {
-110return image;
-111  }
-112
-113}
-114
-115private String backupId;
-116private BackupType type;
-117private String rootDir;
-118private List 
tableList;
-119private long startTs;
-120private long completeTs;
-121private ArrayList 
ancestors;
-122private HashMap> incrTimeRanges;
-123
-124static Builder newBuilder() {
-125  return new Builder();
-126}
-127
-128public BackupImage() {
-129  super();
-130}
-131
-132private BackupImage(String backupId, 
BackupType type, String rootDir,
-133List tableList, 
long startTs, long completeTs) {
-134  this.backupId = backupId;
-135  this.type = type;
-136  this.rootDir = rootDir;
-137  this.tableList = tableList;
-138  this.startTs = startTs;
-139  this.completeTs = completeTs;
-140}
-141
-142static BackupImage 
fromProto(BackupProtos.BackupImage im) {
-143  String backupId = 
im.getBackupId();
-144  String rootDir = 
im.getBackupRootDir();
-145  long startTs = im.getStartTs();
-146  long completeTs = 
im.getCompleteTs();
-147  List 
tableListList = im.getTableListList();
-148  List tableList = 
new ArrayList();
-149  for (HBaseProtos.TableName tn : 
tableListList) {
-150
tableList.add(ProtobufUtil.toTableName(tn));
-151  }
-152
-153  
List ancestorList = im.getAncestorsList();
-154
-155  BackupType type =
-156  im.getBackupType() == 
BackupProtos.BackupType.FULL ? BackupType.FULL
-157  : BackupType.INCREMENTAL;
-158
-159  BackupImage image = new 
BackupImage(backupId, type, rootDir, tableList, startTs, completeTs);
-160  for (BackupProtos.BackupImage img : 
ancestorList) {
-161
image.addAncestor(fromProto(img));
-162  }
-163  
image.setIncrTimeRanges(loadIncrementalTimestampMap(im));
-164  return image;
-165}
-166
-167BackupProtos.BackupImage toProto() 
{
-168  BackupProtos.BackupImage.Builder 
builder = BackupProtos.BackupImage.newBuilder();
-169  builder.setBack

[17/51] [partial] hbase-site git commit: Published site at .

2018-01-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/96e5e102/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-convergence.html
--
diff --git 
a/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-convergence.html
 
b/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-convergence.html
index 5715df3..6152a81 100644
--- 
a/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-convergence.html
+++ 
b/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-convergence.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Exemplar for hbase-client archetype – Reactor 
Dependency Convergence
 
@@ -865,7 +865,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-01-25
+  Last Published: 
2018-01-26
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/96e5e102/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-info.html
--
diff --git 
a/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-info.html
 
b/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-info.html
index 20a7c7f..3864fd9 100644
--- 
a/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-info.html
+++ 
b/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-info.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Exemplar for hbase-client archetype – 
Dependency Information
 
@@ -147,7 +147,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-01-25
+  Last Published: 
2018-01-26
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/96e5e102/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-management.html
--
diff --git 
a/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-management.html
 
b/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-management.html
index 0be17a8..0812f5d 100644
--- 
a/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-management.html
+++ 
b/hbase-build-configuration/hbase-archetypes/hbase-client-project/dependency-management.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Exemplar for hbase-client archetype – Project 
Dependency Management
 
@@ -810,7 +810,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-01-25
+  Last Published: 
2018-01-26
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/96e5e102/hbase-build-configuration/hbase-archetypes/hbase-client-project/index.html
--
diff --git 
a/hbase-build-configuration/hbase-archetypes/hbase-client-project/index.html 
b/hbase-build-configuration/hbase-archetypes/hbase-client-project/index.html
index 6818c18..09df4b1 100644
--- a/hbase-build-configuration/hbase-archetypes/hbase-client-project/index.html
+++ b/hbase-build-configuration/hbase-archetypes/hbase-client-project/index.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Exemplar for hbase-client archetype – 
About
 
@@ -119,7 +119,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-01-25
+  Last Published: 
2018-01-26
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/96e5e102/hbase-build-configuration/hbase-archetypes/hbase-client-project/integration.html
--
diff --git 
a/hbase-build-configuration/hbase-archetypes/hbase-client-project/integration.html
 
b/hbase-build-configuration/hbase-archetypes/hbase-client-project/integration.html
index c66f1fc..bc76784 100644
--- 
a/hbase-build-configuration/hbase-archetypes/hbase-client-project/integration.html
+++ 
b/hbase-build-configuration/hbase-archetypes/hbase-client-project/integration.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Exemplar for hbase-client archetype – CI 
Management
 
@@ -126,7 +126,7 @@
  

[17/51] [partial] hbase-site git commit: Published site at .

2018-01-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.html 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.html
index 1e5e269..eecc720 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/assignment/RegionStates.html
@@ -110,7 +110,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class RegionStates
+public class RegionStates
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 RegionStates contains a set of Maps that describes the 
in-memory state of the AM, with
  the regions available in the system, the region in transition, the offline 
regions and
@@ -513,7 +513,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 LOG
-private static final org.slf4j.Logger LOG
+private static final org.slf4j.Logger LOG
 
 
 
@@ -522,7 +522,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 STATES_EXPECTED_ON_OPEN
-protected static final RegionState.State[] STATES_EXPECTED_ON_OPEN
+protected static final RegionState.State[] STATES_EXPECTED_ON_OPEN
 
 
 
@@ -531,7 +531,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 STATES_EXPECTED_ON_CLOSE
-protected static final RegionState.State[] STATES_EXPECTED_ON_CLOSE
+protected static final RegionState.State[] STATES_EXPECTED_ON_CLOSE
 
 
 
@@ -540,7 +540,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 REGION_STATE_STAMP_COMPARATOR
-public static final RegionStates.RegionStateStampComparator
 REGION_STATE_STAMP_COMPARATOR
+public static final RegionStates.RegionStateStampComparator
 REGION_STATE_STAMP_COMPARATOR
 
 
 
@@ -549,7 +549,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 regionsMap
-private final http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentSkipListMap.html?is-external=true";
 title="class or interface in 
java.util.concurrent">ConcurrentSkipListMap 
regionsMap
+private final http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentSkipListMap.html?is-external=true";
 title="class or interface in 
java.util.concurrent">ConcurrentSkipListMap 
regionsMap
 RegionName -- i.e. RegionInfo.getRegionName() -- as bytes 
to RegionStates.RegionStateNode
 
 
@@ -559,7 +559,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 regionInTransition
-private final http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentSkipListMap.html?is-external=true";
 title="class or interface in 
java.util.concurrent">ConcurrentSkipListMap 
regionInTransition
+private final http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentSkipListMap.html?is-external=true";
 title="class or interface in 
java.util.concurrent">ConcurrentSkipListMap 
regionInTransition
 
 
 
@@ -568,7 +568,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 regionOffline
-private final http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentSkipListMap.html?is-external=true";
 title="class or interface in 
java.util.concurrent">ConcurrentSkipListMap 
regionOffline
+private final http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentSkipListMap.html?is-external=true";
 title="class or interface in 
java.util.concurrent">ConcurrentSkipListMap 
regionOffline
 Regions marked as offline on a read of hbase:meta. Unused 
or at least, once
  offlined, regions have no means of coming on line again. TODO.
 
@@ -579,7 +579,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 regionFailedOpen
-private final http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentSkipListMap.html?is-external=true";
 title="class or interface in 
java.util.concurrent">ConcurrentSkipListMap
 regionFailedOpen
+private final http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentSkipListMap.html?is-external=true";
 title="class or interface in 
java.util.concurrent">ConcurrentSkipListMap
 regionFailedOpen
 
 
 
@@ -588,7 +588,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 serverMap
-private final http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentHashMap.html?is-external

[17/51] [partial] hbase-site git commit: Published site at .

2018-01-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/14db89d7/testdevapidocs/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.GoodRsExecutor.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.GoodRsExecutor.html
 
b/testdevapidocs/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.GoodRsExecutor.html
index d57d7e9..8cf5dfe 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.GoodRsExecutor.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.GoodRsExecutor.html
@@ -126,7 +126,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private class TestAssignmentManager.GoodRsExecutor
+private class TestAssignmentManager.GoodRsExecutor
 extends TestAssignmentManager.NoopRsExecutor
 
 
@@ -210,7 +210,7 @@ extends 
 
 GoodRsExecutor
-private GoodRsExecutor()
+private GoodRsExecutor()
 
 
 
@@ -227,7 +227,7 @@ extends 
 
 execOpenRegion
-protected org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse.RegionOpeningState execOpenRegion(org.apache.hadoop.hbase.ServerName server,
+protected org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse.RegionOpeningState execOpenRegion(org.apache.hadoop.hbase.ServerName server,

  
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo openReq)

   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 
@@ -244,7 +244,7 @@ extends 
 
 execCloseRegion
-protected org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse execCloseRegion(org.apache.hadoop.hbase.ServerName server,
+protected org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse execCloseRegion(org.apache.hadoop.hbase.ServerName server,

 byte[] regionName)

  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/14db89d7/testdevapidocs/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.HangOnCloseThenRSCrashExecutor.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.HangOnCloseThenRSCrashExecutor.html
 
b/testdevapidocs/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.HangOnCloseThenRSCrashExecutor.html
index 00277d6..043599d 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.HangOnCloseThenRSCrashExecutor.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.HangOnCloseThenRSCrashExecutor.html
@@ -127,7 +127,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private class TestAssignmentManager.HangOnCloseThenRSCrashExecutor
+private class TestAssignmentManager.HangOnCloseThenRSCrashExecutor
 extends TestAssignmentManager.GoodRsExecutor
 
 
@@ -236,7 +236,7 @@ extends 
 
 TYPES_OF_FAILURE
-public static final int TYPES_OF_FAILURE
+public static final int TYPES_OF_FAILURE
 
 See Also:
 Constant
 Field Values
@@ -249,7 +249,7 @@ extends 
 
 invocations
-private int invocations
+private int invocations
 
 
 
@@ -266,7 +266,7 @@ extends 
 
 HangOnCloseThenRSCrashExecutor
-private HangOnCloseThenRSCrashExecutor()
+private HangOnCloseThenRSCrashExecutor()
 
 
 
@@ -283,7 +283,7 @@ extends 
 
 execCloseRegion
-protected org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse execCloseRegion(org.apache.hadoop.hbase.ServerName server,
+protected org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse execCloseRegion(org.apache.hadoop.hbase.ServerName server,

 byte[] regionName)

  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/14db89d7/testdevapidocs/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.HangThenRSCrashExecutor.html
---

[17/51] [partial] hbase-site git commit: Published site at .

2018-01-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/testdevapidocs/org/apache/hadoop/hbase/client/TestHCM.SleepAndFailFirstTime.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/client/TestHCM.SleepAndFailFirstTime.html
 
b/testdevapidocs/org/apache/hadoop/hbase/client/TestHCM.SleepAndFailFirstTime.html
deleted file mode 100644
index da314cd..000
--- 
a/testdevapidocs/org/apache/hadoop/hbase/client/TestHCM.SleepAndFailFirstTime.html
+++ /dev/null
@@ -1,534 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd";>
-
-
-
-
-
-TestHCM.SleepAndFailFirstTime (Apache HBase 3.0.0-SNAPSHOT Test 
API)
-
-
-
-
-
-var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10};
-var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
-var altColor = "altColor";
-var rowColor = "rowColor";
-var tableTab = "tableTab";
-var activeTableTab = "activeTableTab";
-
-
-JavaScript is disabled on your browser.
-
-
-
-
-
-Skip navigation links
-
-
-
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
-
-
-
-
-Prev Class
-Next Class
-
-
-Frames
-No Frames
-
-
-All Classes
-
-
-
-
-
-
-
-Summary: 
-Nested | 
-Field | 
-Constr | 
-Method
-
-
-Detail: 
-Field | 
-Constr | 
-Method
-
-
-
-
-
-
-
-
-org.apache.hadoop.hbase.client
-Class 
TestHCM.SleepAndFailFirstTime
-
-
-
-http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">java.lang.Object
-
-
-org.apache.hadoop.hbase.client.TestHCM.SleepAndFailFirstTime
-
-
-
-
-
-
-
-All Implemented Interfaces:
-org.apache.hadoop.hbase.Coprocessor, 
org.apache.hadoop.hbase.coprocessor.RegionCoprocessor, 
org.apache.hadoop.hbase.coprocessor.RegionObserver
-
-
-Enclosing class:
-TestHCM
-
-
-
-public static class TestHCM.SleepAndFailFirstTime
-extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
-implements org.apache.hadoop.hbase.coprocessor.RegionCoprocessor, 
org.apache.hadoop.hbase.coprocessor.RegionObserver
-This copro sleeps 20 second. The first call it fails. The 
second time, it works.
-
-
-
-
-
-
-
-
-
-
-
-Nested Class Summary
-
-
-
-
-Nested classes/interfaces inherited from 
interface org.apache.hadoop.hbase.Coprocessor
-org.apache.hadoop.hbase.Coprocessor.State
-
-
-
-
-
-Nested classes/interfaces inherited from 
interface org.apache.hadoop.hbase.coprocessor.RegionObserver
-org.apache.hadoop.hbase.coprocessor.RegionObserver.MutationType
-
-
-
-
-
-
-
-
-Field Summary
-
-Fields 
-
-Modifier and Type
-Field and Description
-
-
-(package private) static http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true";
 title="class or interface in 
java.util.concurrent.atomic">AtomicLong
-ct 
-
-
-(package private) static long
-DEFAULT_SLEEP_TIME 
-
-
-(package private) static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
-SLEEP_TIME_CONF_KEY 
-
-
-(package private) static http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true";
 title="class or interface in 
java.util.concurrent.atomic">AtomicLong
-sleepTime 
-
-
-
-
-
-
-Fields inherited from 
interface org.apache.hadoop.hbase.Coprocessor
-PRIORITY_HIGHEST, PRIORITY_LOWEST, PRIORITY_SYSTEM, PRIORITY_USER, 
VERSION
-
-
-
-
-
-
-
-
-Constructor Summary
-
-Constructors 
-
-Constructor and Description
-
-
-SleepAndFailFirstTime() 
-
-
-
-
-
-
-
-
-
-Method Summary
-
-All Methods Instance Methods Concrete Methods 
-
-Modifier and Type
-Method and Description
-
-
-http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true";
 title="class or interface in 
java.util">Optional
-getRegionObserver() 
-
-
-void
-postOpen(org.apache.hadoop.hbase.coprocessor.ObserverContext c) 
-
-
-void
-preDelete(org.apache.hadoop.hbase.coprocessor.ObserverContext e,
- org.apache.hadoop.hbase.client.Delete delete,
- org.apache.hadoop.hbase.wal.WALEdit edit,
- 
org.apache.hadoop.hbase.client.Durability durability) 
-
-
-void
-preGetOp(org.apache.hadoop.hbase.coprocessor.ObserverContext

[17/51] [partial] hbase-site git commit: Published site at .

2018-01-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/testapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.html
--
diff --git 
a/testapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.html 
b/testapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.html
index b39e25e..a34364c 100644
--- a/testapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.html
+++ b/testapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.html
@@ -2318,1804 +2318,1803 @@
 2310Configuration confForWAL = new 
Configuration(conf);
 2311confForWAL.set(HConstants.HBASE_DIR, 
rootDir.toString());
 2312return (new WALFactory(confForWAL,
-2313
Collections.singletonList(new MetricsWAL()),
-2314"hregion-" + 
RandomStringUtils.randomNumeric(8))).
-2315
getWAL(hri.getEncodedNameAsBytes(), hri.getTable().getNamespace());
-2316  }
-2317
-2318  /**
-2319   * Create a region with it's own WAL. 
Be sure to call
-2320   * {@link 
HBaseTestingUtility#closeRegionAndWAL(HRegion)} to clean up all resources.
-2321   */
-2322  public static HRegion 
createRegionAndWAL(final RegionInfo info, final Path rootDir,
-2323  final Configuration conf, final 
TableDescriptor htd) throws IOException {
-2324return createRegionAndWAL(info, 
rootDir, conf, htd, true);
-2325  }
-2326
-2327  /**
-2328   * Create a region with it's own WAL. 
Be sure to call
-2329   * {@link 
HBaseTestingUtility#closeRegionAndWAL(HRegion)} to clean up all resources.
-2330   */
-2331  public static HRegion 
createRegionAndWAL(final RegionInfo info, final Path rootDir,
-2332  final Configuration conf, final 
TableDescriptor htd, boolean initialize)
-2333  throws IOException {
-2334
ChunkCreator.initialize(MemStoreLABImpl.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, 
null);
-2335WAL wal = createWal(conf, rootDir, 
info);
-2336return HRegion.createHRegion(info, 
rootDir, conf, htd, wal, initialize);
-2337  }
-2338
-2339  /**
-2340   * Returns all rows from the 
hbase:meta table.
-2341   *
-2342   * @throws IOException When reading 
the rows fails.
-2343   */
-2344  public List 
getMetaTableRows() throws IOException {
-2345// TODO: Redo using 
MetaTableAccessor class
-2346Table t = 
getConnection().getTable(TableName.META_TABLE_NAME);
-2347List rows = new 
ArrayList<>();
-2348ResultScanner s = t.getScanner(new 
Scan());
-2349for (Result result : s) {
-2350  LOG.info("getMetaTableRows: row 
-> " +
-2351
Bytes.toStringBinary(result.getRow()));
-2352  rows.add(result.getRow());
-2353}
-2354s.close();
-2355t.close();
-2356return rows;
-2357  }
-2358
-2359  /**
-2360   * Returns all rows from the 
hbase:meta table for a given user table
-2361   *
-2362   * @throws IOException When reading 
the rows fails.
-2363   */
-2364  public List 
getMetaTableRows(TableName tableName) throws IOException {
-2365// TODO: Redo using 
MetaTableAccessor.
-2366Table t = 
getConnection().getTable(TableName.META_TABLE_NAME);
-2367List rows = new 
ArrayList<>();
-2368ResultScanner s = t.getScanner(new 
Scan());
-2369for (Result result : s) {
-2370  RegionInfo info = 
MetaTableAccessor.getRegionInfo(result);
-2371  if (info == null) {
-2372LOG.error("No region info for 
row " + Bytes.toString(result.getRow()));
-2373// TODO figure out what to do 
for this new hosed case.
-2374continue;
-2375  }
-2376
-2377  if 
(info.getTable().equals(tableName)) {
-2378LOG.info("getMetaTableRows: row 
-> " +
-2379
Bytes.toStringBinary(result.getRow()) + info);
-2380rows.add(result.getRow());
-2381  }
-2382}
-2383s.close();
-2384t.close();
-2385return rows;
-2386  }
-2387
-2388  /*
-2389   * Find any other region server which 
is different from the one identified by parameter
-2390   * @param rs
-2391   * @return another region server
-2392   */
-2393  public HRegionServer 
getOtherRegionServer(HRegionServer rs) {
-2394for 
(JVMClusterUtil.RegionServerThread rst :
-2395  
getMiniHBaseCluster().getRegionServerThreads()) {
-2396  if (!(rst.getRegionServer() == 
rs)) {
-2397return rst.getRegionServer();
-2398  }
-2399}
-2400return null;
-2401  }
-2402
-2403  /**
-2404   * Tool to get the reference to the 
region server object that holds the
-2405   * region of the specified user 
table.
-2406   * It first searches for the meta rows 
that contain the region of the
-2407   * specified table, then gets the 
index of that RS, and finally retrieves
-2408   * the RS's reference.
-2409   * @param tableName user table to 
lookup in hbase:meta
-2410   * @return region server that holds 
it, null if the row doesn't exist
-2411   * @throws IOException
-2412   * @throws InterruptedException
-2413   */
-2414  public HRegionServer 
getRSForF

[17/51] [partial] hbase-site git commit: Published site at .

2018-01-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/49431b18/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/RegionCoprocessorEnvironment.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/RegionCoprocessorEnvironment.html
 
b/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/RegionCoprocessorEnvironment.html
index f06f7be..14bddf4 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/RegionCoprocessorEnvironment.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/RegionCoprocessorEnvironment.html
@@ -1097,11 +1097,11 @@
 
 
 
-(package private) void
+private void
 AccessController.initialize(RegionCoprocessorEnvironment e) 
 
 
-(package private) AuthResult
+private AuthResult
 AccessController.permissionGranted(AccessController.OpType opType,
  User user,
  RegionCoprocessorEnvironment e,
@@ -1112,7 +1112,7 @@
 
 
 
-(package private) AuthResult
+private AuthResult
 AccessController.permissionGranted(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String request,
  User user,
  Permission.Action permRequest,
@@ -1123,7 +1123,7 @@
 
 
 
-(package private) void
+private void
 AccessController.updateACL(RegionCoprocessorEnvironment e,
  http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">MapList> familyMap)
 Writes all table ACLs for the tables in the given Map up 
into ZooKeeper

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/49431b18/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/RegionServerCoprocessorEnvironment.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/RegionServerCoprocessorEnvironment.html
 
b/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/RegionServerCoprocessorEnvironment.html
index 916e90e..d634c7e 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/RegionServerCoprocessorEnvironment.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/RegionServerCoprocessorEnvironment.html
@@ -131,6 +131,12 @@
 
 
 default void
+RegionServerObserver.postExecuteProcedures(ObserverContext ctx)
+This will be called after executing procedures
+
+
+
+default void
 RegionServerObserver.postReplicateLogEntries(ObserverContext ctx)
 Deprecated. 
 As of release 2.0.0 with 
out any replacement. This is maintained for internal
@@ -138,18 +144,24 @@
 
 
 
-
+
 default void
 RegionServerObserver.postRollWALWriterRequest(ObserverContext ctx)
 This will be called after executing user request to roll a 
region server WAL.
 
 
-
+
 default void
 RegionServerObserver.preClearCompactionQueues(ObserverContext ctx)
 This will be called before clearing compaction queues
 
 
+
+default void
+RegionServerObserver.preExecuteProcedures(ObserverContext ctx)
+This will be called before executing procedures
+
+
 
 default void
 RegionServerObserver.preReplicateLogEntries(ObserverContext ctx)
@@ -228,14 +240,18 @@
 
 
 void
-AccessController.preReplicateLogEntries(ObserverContext ctx) 
+AccessController.preExecuteProcedures(ObserverContext ctx) 
 
 
 void
-AccessController.preRollWALWriterRequest(ObserverContext ctx) 
+AccessController.preReplicateLogEntries(ObserverContext ctx) 
 
 
 void
+AccessController.preRollWALWriterRequest(ObserverContext ctx) 
+
+
+void
 AccessController.preStopRegionServer(ObserverContext ctx) 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/49431b18/devapidocs/org/apache/hadoop/hbase/exceptions/class-use/DeserializationException.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/exceptions/class-use/DeserializationException.html
 
b/devapidocs/org/apache/hadoop/hbase/exceptions/class-use/DeserializationException.html
index f7219bb..a07a830 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/exceptions/class-use/DeserializationException.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/exceptions/class-use/DeserializationException.html
@@ -112,24 +112,18 @@
  
 
 
-org.apache.hadoop.hbase.replication
-
-Multi Cluster Replication
-
-
-
 org.apache.hadoop.hbase

[17/51] [partial] hbase-site git commit: Published site at .

2018-01-04 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7c40c62/devapidocs/org/apache/hadoop/hbase/master/MasterCoprocessorHost.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/MasterCoprocessorHost.html 
b/devapidocs/org/apache/hadoop/hbase/master/MasterCoprocessorHost.html
index 0e397dc..e3f0e57 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/MasterCoprocessorHost.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/MasterCoprocessorHost.html
@@ -115,7 +115,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class MasterCoprocessorHost
+public class MasterCoprocessorHost
 extends CoprocessorHost
 Provides the coprocessor framework and environment for 
master oriented
  operations.  HMaster interacts with the 
loaded coprocessors
@@ -377,7 +377,7 @@ extends 
 void
-postGetClusterStatus(ClusterStatus status) 
+postGetClusterMetrics(ClusterMetrics status) 
 
 
 void
@@ -680,7 +680,7 @@ extends 
 void
-preGetClusterStatus() 
+preGetClusterMetrics() 
 
 
 void
@@ -956,7 +956,7 @@ extends 
 
 LOG
-private static final org.slf4j.Logger LOG
+private static final org.slf4j.Logger LOG
 
 
 
@@ -965,7 +965,7 @@ extends 
 
 masterServices
-private MasterServices masterServices
+private MasterServices masterServices
 
 
 
@@ -974,7 +974,7 @@ extends 
 
 masterObserverGetter
-private CoprocessorHost.ObserverGetter 
masterObserverGetter
+private CoprocessorHost.ObserverGetter 
masterObserverGetter
 
 
 
@@ -991,7 +991,7 @@ extends 
 
 MasterCoprocessorHost
-public MasterCoprocessorHost(MasterServices services,
+public MasterCoprocessorHost(MasterServices services,
  
org.apache.hadoop.conf.Configuration conf)
 
 
@@ -1009,7 +1009,7 @@ extends 
 
 createEnvironment
-public MasterCoprocessorHost.MasterEnvironment createEnvironment(MasterCoprocessor instance,
+public MasterCoprocessorHost.MasterEnvironment createEnvironment(MasterCoprocessor instance,
  
int priority,
  int seq,
  
org.apache.hadoop.conf.Configuration conf)
@@ -1027,7 +1027,7 @@ extends 
 
 checkAndGetInstance
-public MasterCoprocessor checkAndGetInstance(http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true";
 title="class or interface in java.lang">Class implClass)
+public MasterCoprocessor checkAndGetInstance(http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true";
 title="class or interface in java.lang">Class implClass)
   throws http://docs.oracle.com/javase/8/docs/api/java/lang/InstantiationException.html?is-external=true";
 title="class or interface in java.lang">InstantiationException,
  http://docs.oracle.com/javase/8/docs/api/java/lang/IllegalAccessException.html?is-external=true";
 title="class or interface in java.lang">IllegalAccessException
 Description copied from 
class: CoprocessorHost
@@ -1051,7 +1051,7 @@ extends 
 
 preCreateNamespace
-public void preCreateNamespace(NamespaceDescriptor ns)
+public void preCreateNamespace(NamespaceDescriptor ns)
 throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 
 Throws:
@@ -1065,7 +1065,7 @@ extends 
 
 postCreateNamespace
-public void postCreateNamespace(NamespaceDescriptor ns)
+public void postCreateNamespace(NamespaceDescriptor ns)
  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 
 Throws:
@@ -1079,7 +1079,7 @@ extends 
 
 preDeleteNamespace
-public void preDeleteNamespace(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String namespaceName)
+public void preDeleteNamespace(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String namespaceName)
 throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 
 Throws:
@@ -1093,7 +1093,7 @@ extends 
 
 postDeleteNamespace
-public void postDeleteNamespace(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String namespaceName)
+public void postDeleteNamespace(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String namespaceName)
   

[17/51] [partial] hbase-site git commit: Published site at .

2018-01-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bb398572/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TableProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TableProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TableProcedureBiConsumer.html
index 5b3b750..a1f3f7e 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TableProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TableProcedureBiConsumer.html
@@ -97,3307 +97,3304 @@
 089import 
org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer;
 090import 
org.apache.hbase.thirdparty.io.netty.util.Timeout;
 091import 
org.apache.hbase.thirdparty.io.netty.util.TimerTask;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesResponse;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-100import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
-101import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
-102import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
-103import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
-104import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
-105import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest;
-106import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse;
-107import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest;
-108import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse;
-109import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest;
-110import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse;
-111import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
-112import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse;
-113import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-114import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-115import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-116import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-117import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-118import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-119import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-120import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-121import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-122import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-123import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
-124import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersRequest;
-125import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersResponse;
-126import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-127import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
-128import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest;
-129import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse;
-130import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersRequest;
-131import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersResponse;
-132import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
-133import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
-134import 
org.apache.hadoop.hbase.sh

[17/51] [partial] hbase-site git commit: Published site at .

2018-01-01 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/filter/class-use/PageFilter.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/filter/class-use/PageFilter.html 
b/devapidocs/org/apache/hadoop/hbase/filter/class-use/PageFilter.html
index 0784895..e6e28cf 100644
--- a/devapidocs/org/apache/hadoop/hbase/filter/class-use/PageFilter.html
+++ b/devapidocs/org/apache/hadoop/hbase/filter/class-use/PageFilter.html
@@ -163,6 +163,6 @@
 
 
 
-Copyright © 2007–2017 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
+Copyright © 2007–2018 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/filter/class-use/ParseConstants.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/filter/class-use/ParseConstants.html 
b/devapidocs/org/apache/hadoop/hbase/filter/class-use/ParseConstants.html
index 3b4ff42..af64b7e 100644
--- a/devapidocs/org/apache/hadoop/hbase/filter/class-use/ParseConstants.html
+++ b/devapidocs/org/apache/hadoop/hbase/filter/class-use/ParseConstants.html
@@ -120,6 +120,6 @@
 
 
 
-Copyright © 2007–2017 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
+Copyright © 2007–2018 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/filter/class-use/ParseFilter.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/filter/class-use/ParseFilter.html 
b/devapidocs/org/apache/hadoop/hbase/filter/class-use/ParseFilter.html
index 198cd94..e3de418 100644
--- a/devapidocs/org/apache/hadoop/hbase/filter/class-use/ParseFilter.html
+++ b/devapidocs/org/apache/hadoop/hbase/filter/class-use/ParseFilter.html
@@ -120,6 +120,6 @@
 
 
 
-Copyright © 2007–2017 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
+Copyright © 2007–2018 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/filter/class-use/PrefixFilter.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/filter/class-use/PrefixFilter.html 
b/devapidocs/org/apache/hadoop/hbase/filter/class-use/PrefixFilter.html
index 30f0ad0..612ced7 100644
--- a/devapidocs/org/apache/hadoop/hbase/filter/class-use/PrefixFilter.html
+++ b/devapidocs/org/apache/hadoop/hbase/filter/class-use/PrefixFilter.html
@@ -163,6 +163,6 @@
 
 
 
-Copyright © 2007–2017 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
+Copyright © 2007–2018 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/filter/class-use/QualifierFilter.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/filter/class-use/QualifierFilter.html 
b/devapidocs/org/apache/hadoop/hbase/filter/class-use/QualifierFilter.html
index 44b450b..4b1d07b 100644
--- a/devapidocs/org/apache/hadoop/hbase/filter/class-use/QualifierFilter.html
+++ b/devapidocs/org/apache/hadoop/hbase/filter/class-use/QualifierFilter.html
@@ -163,6 +163,6 @@
 
 
 
-Copyright © 2007–2017 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
+Copyright © 2007–2018 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/filter/class-use/RandomRowFilter.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/filter/class-use/RandomRowFilter.html 
b/devapidocs/org/apache/hadoop/hbase/filter/class-use/RandomRowFilter.html
index ee5b73c..910c0dd 100644
--- a/devapidocs/org/apache/hadoop/hbase/filter/class-use/RandomRowFilter.html
+++ b/devapidocs/org/apache/hadoop/hbase/filter/class-use/RandomRowFilter.html
@@ -163,6 +163,6 @@
 
 
 
-Copyright © 2007–2017 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
+Copyright © 2007–2018 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/filter/class-use/RegexStringComparator.Engine.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/filter/class-use/R

[17/51] [partial] hbase-site git commit: Published site at .

2017-12-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/testdevapidocs/org/apache/hadoop/hbase/client/TestBufferedMutator.MyBufferedMutator.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/client/TestBufferedMutator.MyBufferedMutator.html
 
b/testdevapidocs/org/apache/hadoop/hbase/client/TestBufferedMutator.MyBufferedMutator.html
index 0137768..413f827 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/client/TestBufferedMutator.MyBufferedMutator.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/client/TestBufferedMutator.MyBufferedMutator.html
@@ -152,7 +152,7 @@ extends 
org.apache.hadoop.hbase.client.BufferedMutatorImpl
 
 
 Fields inherited from 
interface org.apache.hadoop.hbase.client.BufferedMutator
-CLASSNAME_KEY
+CLASSNAME_KEY, MIN_WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS
 
 
 
@@ -187,7 +187,7 @@ extends 
org.apache.hadoop.hbase.client.BufferedMutatorImpl
 
 
 Methods inherited from 
class org.apache.hadoop.hbase.client.BufferedMutatorImpl
-close, flush, getAsyncProcess, getConfiguration, 
getCurrentWriteBufferSize, getName, getPool, getWriteBufferSize, mutate, 
mutate, setOperationTimeout, setRpcTimeout, size, validatePut
+close, flush, getAsyncProcess, getConfiguration, 
getCurrentWriteBufferSize, getExecutedWriteBufferPeriodicFlushes, getName, 
getPool, getWriteBufferPeriodicFlushTimeoutMs, 
getWriteBufferPeriodicFlushTimerTickMs, getWriteBufferSize, mutate, mutate, 
setOperationTimeout, setRpcTimeout, setWriteBufferPeriodicFlush, size, 
validatePut
 
 
 
@@ -196,6 +196,13 @@ extends 
org.apache.hadoop.hbase.client.BufferedMutatorImpl
 Methods inherited from class java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--";
 title="class or interface in java.lang">clone, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-";
 title="class or interface in java.lang">equals, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--";
 title="class or interface in java.lang">finalize, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--";
 title="class or interface in java.lang">getClass, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--";
 title="class or interface in java.lang">hashCode, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notify--";
 title="class or interface in java.lang">notify, http://docs.oracle.com/javase/8/docs/api/java/lang
 /Object.html?is-external=true#notifyAll--" title="class or interface in 
java.lang">notifyAll, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#toString--";
 title="class or interface in java.lang">toString, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait--";
 title="class or interface in java.lang">wait, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-";
 title="class or interface in java.lang">wait, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-int-";
 title="class or interface in java.lang">wait
 
+
+
+
+
+Methods inherited from 
interface org.apache.hadoop.hbase.client.BufferedMutator
+disableWriteBufferPeriodicFlush, setWriteBufferPeriodicFlush
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/testdevapidocs/org/apache/hadoop/hbase/client/TestBufferedMutatorParams.MockExceptionListener.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/client/TestBufferedMutatorParams.MockExceptionListener.html
 
b/testdevapidocs/org/apache/hadoop/hbase/client/TestBufferedMutatorParams.MockExceptionListener.html
index 27a8d11..68a3d32 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/client/TestBufferedMutatorParams.MockExceptionListener.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/client/TestBufferedMutatorParams.MockExceptionListener.html
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private static class TestBufferedMutatorParams.MockExceptionListener
+private static class TestBufferedMutatorParams.MockExceptionListener
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 implements 
org.apache.hadoop.hbase.client.BufferedMutator.ExceptionListener
 Just to create an instance, this doesn't actually 
function.
@@ -191,7 +191,7 @@ implements 
org.apache.hadoop.hbase.client.BufferedMutator.ExceptionListener
 
 MockExceptionListener
-private MockExceptionListener()
+private MockExceptionListener()
 
 
 
@@ -208,7 +208,7 @@ implements 
org.apache.hadoop.hbas

[17/51] [partial] hbase-site git commit: Published site at .

2017-12-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/63d6f712/devapidocs/org/apache/hadoop/hbase/ipc/NettyRpcServerRequestDecoder.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/ipc/NettyRpcServerRequestDecoder.html 
b/devapidocs/org/apache/hadoop/hbase/ipc/NettyRpcServerRequestDecoder.html
index b2afa5e..567961d 100644
--- a/devapidocs/org/apache/hadoop/hbase/ipc/NettyRpcServerRequestDecoder.html
+++ b/devapidocs/org/apache/hadoop/hbase/ipc/NettyRpcServerRequestDecoder.html
@@ -100,10 +100,10 @@ var activeTableTab = "activeTableTab";
 http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">java.lang.Object
 
 
-org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandlerAdapter
+org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerAdapter
 
 
-org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelInboundHandlerAdapter
+org.apache.hbase.thirdparty.io.netty.channel.ChannelInboundHandlerAdapter
 
 
 org.apache.hadoop.hbase.ipc.NettyRpcServerRequestDecoder
@@ -119,13 +119,13 @@ var activeTableTab = "activeTableTab";
 
 
 All Implemented Interfaces:
-org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandler, 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelInboundHandler
+org.apache.hbase.thirdparty.io.netty.channel.ChannelHandler, 
org.apache.hbase.thirdparty.io.netty.channel.ChannelInboundHandler
 
 
 
 @InterfaceAudience.Private
 class NettyRpcServerRequestDecoder
-extends 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelInboundHandlerAdapter
+extends 
org.apache.hbase.thirdparty.io.netty.channel.ChannelInboundHandlerAdapter
 Decoder for rpc request.
 
 Since:
@@ -144,11 +144,11 @@ extends 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelInboundHandlerAda
 
 Nested Class Summary
 
-
+
 
 
-Nested classes/interfaces inherited from 
interface org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandler
-org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandler.Sharable
+Nested classes/interfaces inherited from 
interface org.apache.hbase.thirdparty.io.netty.channel.ChannelHandler
+org.apache.hbase.thirdparty.io.netty.channel.ChannelHandler.Sharable
 
 
 
@@ -165,7 +165,7 @@ extends 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelInboundHandlerAda
 Field and Description
 
 
-private 
org.apache.hadoop.hbase.shaded.io.netty.channel.group.ChannelGroup
+private 
org.apache.hbase.thirdparty.io.netty.channel.group.ChannelGroup
 allChannels 
 
 
@@ -191,7 +191,7 @@ extends 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelInboundHandlerAda
 Constructor and Description
 
 
-NettyRpcServerRequestDecoder(org.apache.hadoop.hbase.shaded.io.netty.channel.group.ChannelGroup allChannels,
+NettyRpcServerRequestDecoder(org.apache.hbase.thirdparty.io.netty.channel.group.ChannelGroup allChannels,
 MetricsHBaseServer metrics) 
 
 
@@ -211,20 +211,20 @@ extends 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelInboundHandlerAda
 
 
 void
-channelActive(org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandlerContext ctx) 
+channelActive(org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext ctx) 
 
 
 void
-channelInactive(org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandlerContext ctx) 
+channelInactive(org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext ctx) 
 
 
 void
-channelRead(org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandlerContext ctx,
+channelRead(org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext ctx,
http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object msg) 
 
 
 void
-exceptionCaught(org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandlerContext ctx,
+exceptionCaught(org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext ctx,
http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true";
 title="class or interface in java.lang">Throwable e) 
 
 
@@ -233,17 +233,17 @@ extends 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelInboundHandlerAda
 
 
 
-
+
 
 
-Methods inherited from 
class org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelInboundHandlerAdapter
+Methods inherited from 
class org.apache.hbase.thirdparty.io.netty.channel.ChannelInboundHandlerAdapter
 channelReadComplete, channelRegistered, channelUnregistered, 
channelWritabilityChanged, userEventTriggered
 
 
-
+
 
 
-Methods inherited from 
class org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandlerAdapter
+Methods inherited from 
class org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerAdapter
 ensureNotSharable, handlerAdded, handlerRemoved, isSharable
 
 
@@ -254,10 +254,10 @@ extends 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelInboundHandlerAda
 http://docs.oracle.com

[17/51] [partial] hbase-site git commit: Published site at .

2017-12-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d449e87f/devapidocs/org/apache/hadoop/hbase/client/Delete.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/Delete.html 
b/devapidocs/org/apache/hadoop/hbase/client/Delete.html
index 23003e1..3e28886 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/Delete.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/Delete.html
@@ -554,7 +554,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
   http://docs.oracle.com/javase/8/docs/api/java/util/NavigableMap.html?is-external=true";
 title="class or interface in java.util">NavigableMapList> familyMap)
 Construct the Delete with user defined data. NOTED:
  1) all cells in the familyMap must have the delete type.
- see Cell.DataType
+ see Cell.Type
  2) the row of each cell must be same with passed row.
 
 Parameters:

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d449e87f/devapidocs/org/apache/hadoop/hbase/client/Increment.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/Increment.html 
b/devapidocs/org/apache/hadoop/hbase/client/Increment.html
index 158e274..4642921 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/Increment.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/Increment.html
@@ -505,7 +505,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
  long ts,
  http://docs.oracle.com/javase/8/docs/api/java/util/NavigableMap.html?is-external=true";
 title="class or interface in java.util">NavigableMapList> familyMap)
 Construct the Increment with user defined data. NOTED:
- 1) all cells in the familyMap must have the DataType.Put
+ 1) all cells in the familyMap must have the Type.Put
  2) the row of each cell must be same with passed row.
 
 Parameters:

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d449e87f/devapidocs/org/apache/hadoop/hbase/client/MultiAction.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/MultiAction.html 
b/devapidocs/org/apache/hadoop/hbase/client/MultiAction.html
index b190086..2669a81 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/MultiAction.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/MultiAction.html
@@ -110,7 +110,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public final class MultiAction
+public final class MultiAction
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 Container for Actions (i.e. Get, Delete, or Put), which are 
grouped by
  regionName. Intended to be used with AsyncProcess.
@@ -240,7 +240,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 actions
-protected http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">MapList> actions
+protected http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">MapList> actions
 
 
 
@@ -249,7 +249,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 nonceGroup
-private long nonceGroup
+private long nonceGroup
 
 
 
@@ -266,7 +266,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 MultiAction
-public MultiAction()
+public MultiAction()
 
 
 
@@ -283,7 +283,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 size
-public int size()
+public int size()
 Get the total number of Actions
 
 Returns:
@@ -297,7 +297,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 add
-public void add(byte[] regionName,
+public void add(byte[] regionName,
 Action a)
 Add an Action to this container based on it's regionName. 
If the regionName
  is wrong, the initial execution will fail, but will be automatically
@@ -315,7 +315,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 add
-public void add(byte[] regionName,
+public void add(byte[] regionName,
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List

[17/51] [partial] hbase-site git commit: Published site at .

2017-12-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d2b28a1a/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.KeyOnlyKeyValue.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.KeyOnlyKeyValue.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.KeyOnlyKeyValue.html
index f8eace7..66b6656 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.KeyOnlyKeyValue.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.KeyOnlyKeyValue.html
@@ -27,2569 +27,2540 @@
 019 */
 020package org.apache.hadoop.hbase;
 021
-022import static 
org.apache.hadoop.hbase.Tag.TAG_LENGTH_SIZE;
-023import static 
org.apache.hadoop.hbase.util.Bytes.len;
-024
-025import java.io.DataInput;
-026import java.io.DataOutput;
-027import java.io.IOException;
-028import java.io.OutputStream;
-029import java.nio.ByteBuffer;
-030import java.util.ArrayList;
-031import java.util.Arrays;
-032import java.util.HashMap;
-033import java.util.Iterator;
-034import java.util.List;
-035import java.util.Map;
-036import java.util.Optional;
-037
-038import 
org.apache.hadoop.hbase.util.ByteBufferUtils;
-039import 
org.apache.hadoop.hbase.util.Bytes;
-040import 
org.apache.hadoop.hbase.util.ClassSize;
-041import 
org.apache.hadoop.io.RawComparator;
-042import 
org.apache.yetus.audience.InterfaceAudience;
-043import org.slf4j.Logger;
-044import org.slf4j.LoggerFactory;
-045
-046import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
-047
-048/**
-049 * An HBase Key/Value. This is the 
fundamental HBase Type.
+022import static 
org.apache.hadoop.hbase.util.Bytes.len;
+023
+024import java.io.DataInput;
+025import java.io.DataOutput;
+026import java.io.IOException;
+027import java.io.OutputStream;
+028import java.nio.ByteBuffer;
+029import java.util.ArrayList;
+030import java.util.Arrays;
+031import java.util.HashMap;
+032import java.util.Iterator;
+033import java.util.List;
+034import java.util.Map;
+035import 
org.apache.hadoop.hbase.util.ByteBufferUtils;
+036import 
org.apache.hadoop.hbase.util.Bytes;
+037import 
org.apache.hadoop.hbase.util.ClassSize;
+038import 
org.apache.hadoop.io.RawComparator;
+039import 
org.apache.yetus.audience.InterfaceAudience;
+040import org.slf4j.Logger;
+041import org.slf4j.LoggerFactory;
+042
+043import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
+044
+045/**
+046 * An HBase Key/Value. This is the 
fundamental HBase Type.
+047 * 

+048 * HBase applications and users should use the Cell interface and avoid directly using KeyValue and +049 * member functions not defined in Cell. 050 *

-051 * HBase applications and users should use the Cell interface and avoid directly using KeyValue and -052 * member functions not defined in Cell. -053 *

-054 * If being used client-side, the primary methods to access individual fields are -055 * {@link #getRowArray()}, {@link #getFamilyArray()}, {@link #getQualifierArray()}, -056 * {@link #getTimestamp()}, and {@link #getValueArray()}. These methods allocate new byte arrays -057 * and return copies. Avoid their use server-side. -058 *

-059 * Instances of this class are immutable. They do not implement Comparable but Comparators are -060 * provided. Comparators change with context, whether user table or a catalog table comparison. Its -061 * critical you use the appropriate comparator. There are Comparators for normal HFiles, Meta's -062 * Hfiles, and bloom filter keys. -063 *

-064 * KeyValue wraps a byte array and takes offsets and lengths into passed array at where to start -065 * interpreting the content as KeyValue. The KeyValue format inside a byte array is: -066 * <keylength> <valuelength> <key> <value> Key is further -067 * decomposed as: <rowlength> <row> <columnfamilylength> -068 * <columnfamily> <columnqualifier> -069 * <timestamp> <keytype> The rowlength maximum is -070 * Short.MAX_SIZE, column family length maximum is Byte.MAX_SIZE, and -071 * column qualifier + key length must be < Integer.MAX_SIZE. The column does not -072 * contain the family/qualifier delimiter, {@link #COLUMN_FAMILY_DELIMITER}
-073 * KeyValue can optionally contain Tags. When it contains tags, it is added in the byte array after -074 * the value part. The format for this part is: <tagslength><tagsbytes>. -075 * tagslength maximum is Short.MAX_SIZE. The tagsbytes -076 * contain one or more tags where as each tag is of the form -077 * <taglength><tagtype><tagbytes>. tagtype is one byte -078 * and taglength maximum is Short.MAX_SIZE and it includes 1 byte type -079 * length and actual tag bytes


[17/51] [partial] hbase-site git commit: Published site at .

2017-12-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/devapidocs/org/apache/hadoop/hbase/util/IncrementingEnvironmentEdge.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/util/IncrementingEnvironmentEdge.html 
b/devapidocs/org/apache/hadoop/hbase/util/IncrementingEnvironmentEdge.html
index dfc3068..10e70a5 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/IncrementingEnvironmentEdge.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/IncrementingEnvironmentEdge.html
@@ -50,7 +50,7 @@ var activeTableTab = "activeTableTab";
 
 
 Prev Class
-Next Class
+Next Class
 
 
 Frames
@@ -321,7 +321,7 @@ implements 
 
 Prev Class
-Next Class
+Next Class
 
 
 Frames

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/devapidocs/org/apache/hadoop/hbase/util/IterableUtils.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/util/IterableUtils.html 
b/devapidocs/org/apache/hadoop/hbase/util/IterableUtils.html
deleted file mode 100644
index a757d87..000
--- a/devapidocs/org/apache/hadoop/hbase/util/IterableUtils.html
+++ /dev/null
@@ -1,310 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd";>
-
-
-
-
-
-IterableUtils (Apache HBase 3.0.0-SNAPSHOT API)
-
-
-
-
-
-var methods = {"i0":9};
-var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],8:["t4","Concrete Methods"]};
-var altColor = "altColor";
-var rowColor = "rowColor";
-var tableTab = "tableTab";
-var activeTableTab = "activeTableTab";
-
-
-JavaScript is disabled on your browser.
-
-
-
-
-
-Skip navigation links
-
-
-
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
-
-
-
-
-Prev Class
-Next Class
-
-
-Frames
-No Frames
-
-
-All Classes
-
-
-
-
-
-
-
-Summary: 
-Nested | 
-Field | 
-Constr | 
-Method
-
-
-Detail: 
-Field | 
-Constr | 
-Method
-
-
-
-
-
-
-
-
-org.apache.hadoop.hbase.util
-Class IterableUtils
-
-
-
-http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">java.lang.Object
-
-
-org.apache.hadoop.hbase.util.IterableUtils
-
-
-
-
-
-
-
-
-@InterfaceAudience.Private
-public class IterableUtils
-extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
-Utility methods for Iterable including null-safe 
handlers.
-
-
-
-
-
-
-
-
-
-
-
-Field Summary
-
-Fields 
-
-Modifier and Type
-Field and Description
-
-
-private static http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">ListObject>
-EMPTY_LIST 
-
-
-
-
-
-
-
-
-
-Constructor Summary
-
-Constructors 
-
-Constructor and Description
-
-
-IterableUtils() 
-
-
-
-
-
-
-
-
-
-Method Summary
-
-All Methods Static Methods Concrete Methods 
-
-Modifier and Type
-Method and Description
-
-
-static  http://docs.oracle.com/javase/8/docs/api/java/lang/Iterable.html?is-external=true";
 title="class or interface in java.lang">Iterable
-nullSafe(http://docs.oracle.com/javase/8/docs/api/java/lang/Iterable.html?is-external=true";
 title="class or interface in 
java.lang">Iterable in) 
-
-
-
-
-
-
-Methods inherited from class java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
-http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--";
 title="class or interface in java.lang">clone, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-";
 title="class or interface in java.lang">equals, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--";
 title="class or interface in java.lang">finalize, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--";
 title="class or interface in java.lang">getClass, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--";
 title="class or interface in java.lang">hashCode, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notify--";
 title="class or interface in java.lang">notify, http://docs.oracle.com/javase/8/docs/api/java/lang
 /Object.html?is-external=true#notifyAll--" title="class or interface in 
java.lang">notifyAll, http://d

[17/51] [partial] hbase-site git commit: Published site at .

2017-12-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7c0589c0/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ModifyColumnFamilyFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ModifyColumnFamilyFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ModifyColumnFamilyFuture.html
index 6fecbc9..2accda0 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ModifyColumnFamilyFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ModifyColumnFamilyFuture.html
@@ -34,4140 +34,4141 @@
 026import 
java.nio.charset.StandardCharsets;
 027import java.util.ArrayList;
 028import java.util.Arrays;
-029import java.util.Collection;
-030import java.util.EnumSet;
-031import java.util.HashMap;
-032import java.util.Iterator;
-033import java.util.LinkedList;
-034import java.util.List;
-035import java.util.Map;
-036import java.util.Set;
-037import java.util.concurrent.Callable;
-038import 
java.util.concurrent.ExecutionException;
-039import java.util.concurrent.Future;
-040import java.util.concurrent.TimeUnit;
-041import 
java.util.concurrent.TimeoutException;
-042import 
java.util.concurrent.atomic.AtomicInteger;
-043import 
java.util.concurrent.atomic.AtomicReference;
-044import java.util.regex.Pattern;
-045import java.util.stream.Collectors;
-046import java.util.stream.Stream;
-047import 
org.apache.hadoop.conf.Configuration;
-048import 
org.apache.hadoop.hbase.Abortable;
-049import 
org.apache.hadoop.hbase.CacheEvictionStats;
-050import 
org.apache.hadoop.hbase.CacheEvictionStatsBuilder;
-051import 
org.apache.hadoop.hbase.ClusterMetrics.Option;
-052import 
org.apache.hadoop.hbase.ClusterStatus;
-053import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-054import 
org.apache.hadoop.hbase.HBaseConfiguration;
-055import 
org.apache.hadoop.hbase.HConstants;
-056import 
org.apache.hadoop.hbase.HRegionInfo;
-057import 
org.apache.hadoop.hbase.HRegionLocation;
-058import 
org.apache.hadoop.hbase.HTableDescriptor;
-059import 
org.apache.hadoop.hbase.MasterNotRunningException;
-060import 
org.apache.hadoop.hbase.MetaTableAccessor;
-061import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-062import 
org.apache.hadoop.hbase.NamespaceNotFoundException;
-063import 
org.apache.hadoop.hbase.NotServingRegionException;
-064import 
org.apache.hadoop.hbase.RegionLoad;
-065import 
org.apache.hadoop.hbase.RegionLocations;
-066import 
org.apache.hadoop.hbase.ServerName;
-067import 
org.apache.hadoop.hbase.TableExistsException;
-068import 
org.apache.hadoop.hbase.TableName;
-069import 
org.apache.hadoop.hbase.TableNotDisabledException;
-070import 
org.apache.hadoop.hbase.TableNotFoundException;
-071import 
org.apache.hadoop.hbase.UnknownRegionException;
-072import 
org.apache.hadoop.hbase.ZooKeeperConnectionException;
-073import 
org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil;
-074import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-075import 
org.apache.hadoop.hbase.client.security.SecurityCapability;
-076import 
org.apache.hadoop.hbase.exceptions.TimeoutIOException;
-077import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
-078import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
-079import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-080import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-081import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-082import 
org.apache.hadoop.hbase.quotas.QuotaRetriever;
-083import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-084import 
org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
-085import 
org.apache.hadoop.hbase.replication.ReplicationException;
-086import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-087import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-088import 
org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
-089import 
org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
-090import 
org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
-091import 
org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
-092import 
org.apache.hadoop.hbase.snapshot.UnknownSnapshotException;
-093import 
org.apache.hadoop.hbase.util.Addressing;
-094import 
org.apache.hadoop.hbase.util.Bytes;
-095import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-096import 
org.apache.hadoop.hbase.util.ForeignExceptionUtil;
-097import 
org.apache.hadoop.hbase.util.Pair;
-098import 
org.apache.hadoop.ipc.RemoteException;
-099import 
org.apache.hadoop.util.StringUtils;
-100import 
org.apache.yetus.audience.InterfaceAudience;
-101import 
org.apache.yetus.audience.InterfaceStability;
-102import org.slf4j.Logger;
-103import org.slf4j.LoggerFactory;
-104
-105import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
-106import 
org.apache.hadoop.hbase.shaded.com.google.protob

[17/51] [partial] hbase-site git commit: Published site at .

2017-12-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c4b2cc17/book.html
--
diff --git a/book.html b/book.html
index e96c1ac..5df6ac7 100644
--- a/book.html
+++ b/book.html
@@ -258,46 +258,47 @@
 155. Capacity Planning and Region Sizing
 156. Table Rename
 157. RegionServer Grouping
+158. Region Normalizer
 
 
 Building and Developing Apache HBase
 
-158. Getting Involved
-159. Apache HBase Repositories
-160. IDEs
-161. Building Apache HBase
-162. Releasing Apache HBase
-163. Voting on Release Candidates
-164. Generating the HBase Reference Guide
-165. Updating https://hbase.apache.org";>hbase.apache.org
-166. Tests
-167. Developer Guidelines
+159. Getting Involved
+160. Apache HBase Repositories
+161. IDEs
+162. Building Apache HBase
+163. Releasing Apache HBase
+164. Voting on Release Candidates
+165. Generating the HBase Reference Guide
+166. Updating https://hbase.apache.org";>hbase.apache.org
+167. Tests
+168. Developer Guidelines
 
 
 Unit Testing HBase Applications
 
-168. JUnit
-169. Mockito
-170. MRUnit
-171. 
Integration Testing with an HBase Mini-Cluster
+169. JUnit
+170. Mockito
+171. MRUnit
+172. 
Integration Testing with an HBase Mini-Cluster
 
 
 Protobuf in HBase
 
-172. Protobuf
+173. Protobuf
 
 
 ZooKeeper
 
-173. Using existing 
ZooKeeper ensemble
-174. SASL Authentication with ZooKeeper
+174. Using existing 
ZooKeeper ensemble
+175. SASL Authentication with ZooKeeper
 
 
 Community
 
-175. Decisions
-176. Community Roles
-177. Commit Message format
+176. Decisions
+177. Community Roles
+178. Commit Message format
 
 
 Appendix
@@ -307,7 +308,7 @@
 Appendix C: hbck In Depth
 Appendix D: Access Control Matrix
 Appendix E: Compression and Data Block Encoding In 
HBase
-178. Enable Data Block 
Encoding
+179. Enable Data Block 
Encoding
 Appendix F: SQL over HBase
 Appendix G: YCSB
 Appendix H: HFile format
@@ -316,8 +317,8 @@
 Appendix K: HBase and the Apache Software 
Foundation
 Appendix L: Apache HBase Orca
 Appendix M: Enabling Dapper-like Tracing in 
HBase
-179. Client Modifications
-180. Tracing from HBase Shell
+180. Client Modifications
+181. Tracing from HBase Shell
 Appendix N: 0.95 RPC Specification
 
 
@@ -2154,21 +2155,6 @@ Some configurations would only appear in source code; 
the only way to identify t
 
 
 
-
-
-hbase.fs.tmp.dir
-
-
-Description
-A staging directory in default file system (HDFS) for keeping temporary 
data.
-
-
-Default
-/user/${user.name}/hbase-staging
-
-
-
-
 
 
 hbase.cluster.distributed
@@ -2310,7 +2296,7 @@ Some configurations would only appear in source code; the 
only way to identify t
 
 
 Description
-How long a Procedure WAL stays will remain in the 
{hbase.rootdir}/oldWALs/masterProcedureWALs directory, after which it will be 
cleaned by a Master thread. The value is in milliseconds.
+How long a Procedure WAL will remain in the 
{hbase.rootdir}/MasterProcedureWALs directory, after which it will be cleaned 
by a Master thread. The value is in milliseconds.
 
 
 Default
@@ -2340,7 +2326,7 @@ Some configurations would only appear in source code; the 
only way to identify t
 
 
 Description
-Whether or not the Master listens to the Master web UI port 
(hbase.master.info.port) and redirects requests to the web UI server shared by 
the Master and RegionServer.
+Whether or not the Master listens to the Master web UI port 
(hbase.master.info.port) and redirects requests to the web UI server shared by 
the Master and RegionServer. Config. makes sense when Master is serving Regions 
(not the default).
 
 
 Default
@@ -2349,6 +2335,21 @@ Some configurations would only appear in source code; 
the only way to identify t
 
 
 
+
+
+hbase.master.fileSplitTimeout
+
+
+Description
+Splitting a region, how long to wait on the file-splitting step before 
aborting the attempt. Default: 60. This setting used to be known as 
hbase.regionserver.fileSplitTimeout in hbase-1.x. Split is now run master-side 
hence the rename (If a 'hbase.master.fileSplitTimeout' setting found, will use 
it to prime the current 'hbase.master.fileSplitTimeout' Configuration.
+
+
+Default
+60
+
+
+
+
 
 
 hbase.regionserver.port
@@ -2415,7 +2416,7 @@ Some configurations would only appear in source code; the 
only way to identify t
 
 
 Description
-Count of RPC Listener instances spun up on RegionServers. Same property is 
used by the Master for count of master handlers.
+Count of RPC Listener instances spun up on RegionServers. Same property is 
used by the Master for count of master handlers. Too many handlers can be 
counter-productive. Make it a multiple of CPU count. If mostly read-only, 
handlers count close to cpu count does well. Start with twice the CPU count and 
tune from there.
 
 
 Default
@@ -4010,6 +4011,36 @@ Some configurations would only appear in source code; 
the only way to identify t
 
 
 
+
+
+hbase.coprocessor.master.classes
+
+
+Description
+A comma-separated list of 
org.apache.hadoop.hbase

[17/51] [partial] hbase-site git commit: Published site at .

2017-12-21 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/505bbb2e/dependency-convergence.html
--
diff --git a/dependency-convergence.html b/dependency-convergence.html
index ba40f22..546aa95 100644
--- a/dependency-convergence.html
+++ b/dependency-convergence.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Reactor Dependency Convergence
 
@@ -293,7 +293,7 @@
 319
 
 Number of unique artifacts (NOA):
-348
+349
 
 Number of version-conflicting artifacts (NOC):
 20
@@ -557,6 +557,176 @@
 org.apache.hbase:hbase-thrift:jar:3.0.0-SNAPSHOT+- org.apache.hbase:hbase-server:jar:3.0.0-SNAPSHOT:compile|  \- org.apache.hadoop:hadoop-hdfs:jar:2.7.4:compile| \- (commons-lang:commons-lang:jar:2.6:compile
 - omitted for duplicate)+- org.apache.hbase:hbase-testing-util:jar:3.0.0-SNAPSHOT:test|  \- org.apache.hadoop:hadoop-hdfs:test-jar:tests:2.7.4:test| \- (commons-lang:commons-lang:jar:2.6:test
 - omitted for duplicate)+- org.apache.hadoop:hadoop-client:jar:2.7.4:compile|  +- org.apache.hadoop:hadoop-mapreduce-client-app:jar:2.7.4:compile|  |  \- org.apache.hadoop:hadoop-mapreduce-client-common:jar:2.7.4:compile|  | \- org.apache.hadoop:hadoop-yarn-client:jar:2.7.4:compile|  | 
 0;  \- (commons-lang:commons-lang:jar:2.6:compile - omitted for 
duplicate)|  \- org.apache.hadoop:hadoop-yarn-api:jar:2.7.4:compile| \- (commons-lang:commons-lang:jar:2.6:compile
 - omitted for duplicate)+- org.apache.hadoop:hadoop-mapreduce-client-core:jar:2.7.4:compile|  \- org.apache.hadoop:hadoop-yarn-common:jar:2.7.4:compile| \- (commons-lang:commons-lang:jar:2.6:compile
 - omitted for duplicate)+- org.apache.hadoop:hadoop-common:jar:2.7.4:compile|  \- commons-lang:commons-lang:jar:2.6:compile\- org.apache.hadoop:hadoop-minicluster:jar:2.7.4:test   +- org.apache.hadoop:hadoop-common:test-jar:tests:2.7.4:test   |  \- (commons-lang:commons-lang:jar:2.6:test
 - omitted for duplicate)   \- org.apache.hadoop:had
 oop-yarn-server-tests:test-jar:tests:2.7.4:test  +- org.apache.hadoop:hadoop-yarn-server-nodemanager:jar:2.7.4:test  |  \- (commons-lang:commons-lang:jar:2.6:test
 - omitted for duplicate)  \- org.apache.hadoop:hadoop-yarn-server-resourcemanager:jar:2.7.4:test \- (commons-lang:commons-lang:jar:2.6:test
 - omitted for duplicate)
 org.apache.hbase:hbase-zookeeper:jar:3.0.0-SNAPSHOT\- org.apache.hadoop:hadoop-common:jar:2.7.4:compile   \- commons-lang:commons-lang:jar:2.6:compile
 
+commons-logging:commons-logging
+
+
+
+
+
+
+1.0.4
+
+
+org.apache.hbase:hbase-assembly:pom:3.0.0-SNAPSHOT\- org.apache.hbase:hbase-server:jar:3.0.0-SNAPSHOT:compile   \- org.apache.hadoop:hadoop-common:jar:2.7.4:compile  \- commons-httpclient:commons-httpclient:jar:3.1:compile \- (commons-logging:commons-logging:jar:1.0.4:compile
 - omitted for conflict with 1.1.3)
+org.apache.hbase:hbase-backup:jar:3.0.0-SNAPSHOT\- org.apache.hadoop:hadoop-common:jar:2.7.4:compile   \- commons-httpclient:commons-httpclient:jar:3.1:compile  \- (commons-logging:commons-logging:jar:1.0.4:compile
 - omitted for conflict with 1.1.3)
+org.apache.hbase:hbase-client-project:jar:3.0.0-SNAPSHOT\- org.apache.hbase:hbase-common:jar:3.0.0-SNAPSHOT:compile   \- org.apache.hadoop:hadoop-common:jar:2.7.4:compile  \- commons-httpclient:commons-httpclient:jar:3.1:compile \- (commons-logging:commons-logging:jar:1.0.4:compile
 - omitted for conflict with 1.1.3)
+org.apache.hbase:hbase-client:jar:3.0.0-SNAPSHOT\- org.apache.hadoop:hadoop-common:jar:2.7.4:compile   \- commons-httpclient:commons-httpclient:jar:3.1:compile  \- (commons-logging:commons-logging:jar:1.0.4:compile
 - omitted for conflict with 1.1.3)
+org.apache.hbase:hbase-common:jar:3.0.0-SNAPSHOT\- org.apache.hadoop:hadoop-common:jar:2.7.4:compile   \- commons-httpclient:commons-httpclient:jar:3.1:compile  \- (commons-logging:commons-logging:jar:1.0.4:compile
 - omitted for conflict with 1.1.3)
+org.apache.hbase:hbase-endpoint:jar:3.0.0-SNAPSHOT\- org.apache.hadoop:hadoop-common:jar:2.7.4:compile   \- commons-httpclient:commons-httpclient:jar:3.1:compile  \- (commons-logging:commons-logging:jar:1.0.4:compile
 - omitted for conflict with 1.1.3)
+org.apache.hbase:hbase-examples:jar:3.0.0-SNAPSHOT\- org.apache.hadoop:hadoop-common:jar:2.7.4:compile   \- commons-httpclient:commons-httpclient:jar:3.1:compile  \- (commons-logging:commons-logging:jar:1.0.4:compile
 - omitted for conflict with 1.1.3)
+org.apache.hbase:hbase-external-blockcache:jar:3.0.0-SNAPSHOT\- org.apache.hadoop:hadoop-common:jar:2.7.4:compile   \- commons-httpclient:commons-httpclient:jar:3.1:compile  \- (commons-logging:commons-logging:jar:1.0.4:compile
 - omitted for conflict with 1.1.3)
+org.apache.hbase:hbase-hadoop-compat:jar:3.0.0-SNAPSHOT\- org.apache.hbase:hbase-metrics-api:jar:3.0.0-SNAPSHOT:compile   \- org.apache.hbase:hbase-common:jar:3.0.0-SNAPSHOT:compile  \- org.apache

[17/51] [partial] hbase-site git commit: Published site at .

2017-12-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d0f1a9f6/devapidocs/src-html/org/apache/hadoop/hbase/master/replication/ReplicationManager.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/replication/ReplicationManager.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/replication/ReplicationManager.html
index 85cb39c..849adb3 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/replication/ReplicationManager.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/replication/ReplicationManager.html
@@ -145,69 +145,70 @@
 137  
checkNamespacesAndTableCfsConfigConflict(peerConfig.getExcludeNamespaces(),
 138
peerConfig.getExcludeTableCFsMap());
 139} else {
-140  if 
((peerConfig.getExcludeNamespaces() != null && 
!peerConfig.getNamespaces().isEmpty())
-141  || 
(peerConfig.getExcludeTableCFsMap() != null
-142  && 
!peerConfig.getTableCFsMap().isEmpty())) {
-143throw new ReplicationException(
-144"Need clean 
exclude-namespaces or exclude-table-cfs config firstly"
-145+ " when replicate_all 
flag is false");
-146  }
-147  
checkNamespacesAndTableCfsConfigConflict(peerConfig.getNamespaces(),
-148peerConfig.getTableCFsMap());
-149}
-150
checkConfiguredWALEntryFilters(peerConfig);
-151  }
-152
-153  /**
-154   * Set a namespace in the peer config 
means that all tables in this namespace will be replicated
-155   * to the peer cluster.
-156   * 1. If peer config already has a 
namespace, then not allow set any table of this namespace
-157   *to the peer config.
-158   * 2. If peer config already has a 
table, then not allow set this table's namespace to the peer
-159   *config.
-160   *
-161   * Set a exclude namespace in the peer 
config means that all tables in this namespace can't be
-162   * replicated to the peer cluster.
-163   * 1. If peer config already has a 
exclude namespace, then not allow set any exclude table of
-164   *this namespace to the peer 
config.
-165   * 2. If peer config already has a 
exclude table, then not allow set this table's namespace
-166   *as a exclude namespace.
-167   */
-168  private void 
checkNamespacesAndTableCfsConfigConflict(Set namespaces,
-169  Map> tableCfs) throws ReplicationException {
-170if (namespaces == null || 
namespaces.isEmpty()) {
-171  return;
-172}
-173if (tableCfs == null || 
tableCfs.isEmpty()) {
-174  return;
-175}
-176for (Map.Entry> entry : tableCfs.entrySet()) {
-177  TableName table = entry.getKey();
-178  if 
(namespaces.contains(table.getNamespaceAsString())) {
-179throw new 
ReplicationException("Table-cfs " + table + " is conflict with namespaces "
-180+ 
table.getNamespaceAsString() + " in peer config");
-181  }
-182}
-183  }
-184
-185  private void 
checkConfiguredWALEntryFilters(ReplicationPeerConfig peerConfig)
-186  throws IOException {
-187String filterCSV = 
peerConfig.getConfiguration().
-188
get(BaseReplicationEndpoint.REPLICATION_WALENTRYFILTER_CONFIG_KEY);
-189if (filterCSV != null && 
!filterCSV.isEmpty()){
-190  String [] filters = 
filterCSV.split(",");
-191  for (String filter : filters) {
-192try {
-193  Class clazz = 
Class.forName(filter);
-194  Object o = 
clazz.newInstance();
-195} catch (Exception e) {
-196  throw new 
DoNotRetryIOException("Configured WALEntryFilter " + filter +
-197  " could not be created. 
Failing add/update " + "peer operation.", e);
-198}
-199  }
-200}
-201  }
-202}
+140  if 
((peerConfig.getExcludeNamespaces() != null
+141  && 
!peerConfig.getExcludeNamespaces().isEmpty())
+142  || 
(peerConfig.getExcludeTableCFsMap() != null
+143  && 
!peerConfig.getExcludeTableCFsMap().isEmpty())) {
+144throw new ReplicationException(
+145"Need clean 
exclude-namespaces or exclude-table-cfs config firstly"
+146+ " when replicate_all 
flag is false");
+147  }
+148  
checkNamespacesAndTableCfsConfigConflict(peerConfig.getNamespaces(),
+149peerConfig.getTableCFsMap());
+150}
+151
checkConfiguredWALEntryFilters(peerConfig);
+152  }
+153
+154  /**
+155   * Set a namespace in the peer config 
means that all tables in this namespace will be replicated
+156   * to the peer cluster.
+157   * 1. If peer config already has a 
namespace, then not allow set any table of this namespace
+158   *to the peer config.
+159   * 2. If peer config already has a 
table, then not allow set this table's namespace to the peer
+160   *config.
+161   *
+162   * Set a exclude namespace in the peer 
config means that all tables in this namespace can't be
+1

[17/51] [partial] hbase-site git commit: Published site at .

2017-12-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dad9a249/devapidocs/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.html 
b/devapidocs/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.html
index 9da69f4..dae10fa 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.html
@@ -123,7 +123,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-abstract class BufferedDataBlockEncoder
+abstract class BufferedDataBlockEncoder
 extends AbstractDataBlockEncoder
 Base class for all data block encoders that use a 
buffer.
 
@@ -359,7 +359,7 @@ extends 
 
 INITIAL_KEY_BUFFER_SIZE
-private static int INITIAL_KEY_BUFFER_SIZE
+private static int INITIAL_KEY_BUFFER_SIZE
 TODO: This datablockencoder is dealing in internals of 
hfileblocks. Purge reference to HFBs
 
 
@@ -377,7 +377,7 @@ extends 
 
 BufferedDataBlockEncoder
-BufferedDataBlockEncoder()
+BufferedDataBlockEncoder()
 
 
 
@@ -394,7 +394,7 @@ extends 
 
 decodeKeyValues
-public http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true";
 title="class or interface in java.nio">ByteBuffer decodeKeyValues(http://docs.oracle.com/javase/8/docs/api/java/io/DataInputStream.html?is-external=true";
 title="class or interface in java.io">DataInputStream source,
+public http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true";
 title="class or interface in java.nio">ByteBuffer decodeKeyValues(http://docs.oracle.com/javase/8/docs/api/java/io/DataInputStream.html?is-external=true";
 title="class or interface in java.io">DataInputStream source,
   HFileBlockDecodingContext blkDecodingCtx)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 Description copied from 
interface: DataBlockEncoder
@@ -415,7 +415,7 @@ extends 
 
 compareCommonRowPrefix
-public static int compareCommonRowPrefix(Cell left,
+public static int compareCommonRowPrefix(Cell left,
  Cell right,
  int rowCommonPrefix)
 common prefixes
@@ -427,7 +427,7 @@ extends 
 
 compareCommonFamilyPrefix
-public static int compareCommonFamilyPrefix(Cell left,
+public static int compareCommonFamilyPrefix(Cell left,
 Cell right,
 int familyCommonPrefix)
 
@@ -438,7 +438,7 @@ extends 
 
 compareCommonQualifierPrefix
-public static int compareCommonQualifierPrefix(Cell left,
+public static int compareCommonQualifierPrefix(Cell left,
Cell right,
int qualCommonPrefix)
 
@@ -449,7 +449,7 @@ extends 
 
 afterEncodingKeyValue
-protected final int afterEncodingKeyValue(Cell cell,
+protected final int afterEncodingKeyValue(Cell cell,
   http://docs.oracle.com/javase/8/docs/api/java/io/DataOutputStream.html?is-external=true";
 title="class or interface in java.io">DataOutputStream out,
   HFileBlockDefaultEncodingContext encodingCtx)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
@@ -471,7 +471,7 @@ extends 
 
 afterDecodingKeyValue
-protected final void afterDecodingKeyValue(http://docs.oracle.com/javase/8/docs/api/java/io/DataInputStream.html?is-external=true";
 title="class or interface in java.io">DataInputStream source,
+protected final void afterDecodingKeyValue(http://docs.oracle.com/javase/8/docs/api/java/io/DataInputStream.html?is-external=true";
 title="class or interface in java.io">DataInputStream source,
http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true";
 title="class or interface in java.nio">ByteBuffer dest,
HFileBlockDefaultDecodingContext decodingCtx)
 throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
@@ -487,7 +487,7 @@ extends 
 
 internalDecodeKeyValues
-protected abstract http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true";
 title="class or interface in java.nio">ByteBuffer internalDecodeKeyValues(http://docs.oracle.com/javase/8/docs/api/java/io/DataInputStream.html?is-external=true";
 title="class or interface in java.io">DataInputStream sour

[17/51] [partial] hbase-site git commit: Published site at .

2017-12-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c4c0cfa5/apidocs/src-html/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.html
--
diff --git 
a/apidocs/src-html/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.html 
b/apidocs/src-html/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.html
index 2ded650..d00b0f9 100644
--- 
a/apidocs/src-html/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.html
+++ 
b/apidocs/src-html/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.html
@@ -286,183 +286,189 @@
 278return CompareFilter.compare(this.op, 
compareResult);
 279  }
 280
-281  public boolean filterRow() {
-282// If column was found, return false 
if it was matched, true if it was not
-283// If column not found, return true 
if we filter if missing, false if not
-284return this.foundColumn? 
!this.matchedColumn: this.filterIfMissing;
-285  }
-286  
-287  public boolean hasFilterRow() {
-288return true;
-289  }
-290
-291  public void reset() {
-292foundColumn = false;
-293matchedColumn = false;
-294  }
-295
-296  /**
-297   * Get whether entire row should be 
filtered if column is not found.
-298   * @return true if row should be 
skipped if column not found, false if row
-299   * should be let through anyways
-300   */
-301  public boolean getFilterIfMissing() {
-302return filterIfMissing;
-303  }
-304
-305  /**
-306   * Set whether entire row should be 
filtered if column is not found.
-307   * 

-308 * If true, the entire row will be skipped if the column is not found. -309 *

-310 * If false, the row will pass if the column is not found. This is default. -311 * @param filterIfMissing flag -312 */ -313 public void setFilterIfMissing(boolean filterIfMissing) { -314this.filterIfMissing = filterIfMissing; -315 } -316 -317 /** -318 * Get whether only the latest version of the column value should be compared. -319 * If true, the row will be returned if only the latest version of the column -320 * value matches. If false, the row will be returned if any version of the -321 * column value matches. The default is true. -322 * @return return value -323 */ -324 public boolean getLatestVersionOnly() { -325return latestVersionOnly; -326 } -327 -328 /** -329 * Set whether only the latest version of the column value should be compared. -330 * If true, the row will be returned if only the latest version of the column -331 * value matches. If false, the row will be returned if any version of the -332 * column value matches. The default is true. -333 * @param latestVersionOnly flag -334 */ -335 public void setLatestVersionOnly(boolean latestVersionOnly) { -336this.latestVersionOnly = latestVersionOnly; -337 } -338 -339 public static Filter createFilterFromArguments(ArrayList filterArguments) { -340 Preconditions.checkArgument(filterArguments.size() == 4 || filterArguments.size() == 6, -341"Expected 4 or 6 but got: %s", filterArguments.size()); -342byte [] family = ParseFilter.removeQuotesFromByteArray(filterArguments.get(0)); -343byte [] qualifier = ParseFilter.removeQuotesFromByteArray(filterArguments.get(1)); -344CompareOperator op = ParseFilter.createCompareOperator(filterArguments.get(2)); -345 org.apache.hadoop.hbase.filter.ByteArrayComparable comparator = ParseFilter.createComparator( -346 ParseFilter.removeQuotesFromByteArray(filterArguments.get(3))); -347 -348if (comparator instanceof RegexStringComparator || -349comparator instanceof SubstringComparator) { -350 if (op != CompareOperator.EQUAL && -351 op != CompareOperator.NOT_EQUAL) { -352throw new IllegalArgumentException ("A regexstring comparator and substring comparator " + -353 "can only be used with EQUAL and NOT_EQUAL"); -354 } -355} -356 -357SingleColumnValueFilter filter = new SingleColumnValueFilter(family, qualifier, -358 op, comparator); +281 @Override +282 public boolean filterRow() { +283// If column was found, return false if it was matched, true if it was not +284// If column not found, return true if we filter if missing, false if not +285return this.foundColumn? !this.matchedColumn: this.filterIfMissing; +286 } +287 +288 @Override +289 public boolean hasFilterRow() { +290return true; +291 } +292 +293 @Override +294 public void reset() { +295foundColumn = false; +296matchedColumn = false; +297 } +298 +299 /** +300 * Get whether entire row should be filtered if column is not found. +301 * @return true if row should be skipped if column not found, false if row +302 * should be let through anyways +303 */ +304 public boolean getFilterIfMissing() { +305return fi


[17/51] [partial] hbase-site git commit: Published site at .

2017-12-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/071f974b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.html
index f1a2443..a469e93 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.html
@@ -1350,415 +1350,415 @@
 1342return delete;
 1343  }
 1344
-1345  public static Put 
makeBarrierPut(byte[] encodedRegionName, long seq, byte[] tableName) {
-1346byte[] seqBytes = 
Bytes.toBytes(seq);
-1347return new Put(encodedRegionName)
-1348
.addImmutable(HConstants.REPLICATION_BARRIER_FAMILY, seqBytes, seqBytes)
-1349
.addImmutable(HConstants.REPLICATION_META_FAMILY, tableNameCq, tableName);
-1350  }
-1351
-1352
-1353  public static Put 
makeDaughterPut(byte[] encodedRegionName, byte[] value) {
-1354return new 
Put(encodedRegionName).addImmutable(HConstants.REPLICATION_META_FAMILY,
-1355daughterNameCq, value);
-1356  }
-1357
-1358  public static Put makeParentPut(byte[] 
encodedRegionName, byte[] value) {
-1359return new 
Put(encodedRegionName).addImmutable(HConstants.REPLICATION_META_FAMILY,
-1360parentNameCq, value);
-1361  }
-1362
-1363  /**
-1364   * Adds split daughters to the Put
-1365   */
-1366  public static Put 
addDaughtersToPut(Put put, RegionInfo splitA, RegionInfo splitB) {
-1367if (splitA != null) {
-1368  put.addImmutable(
-1369HConstants.CATALOG_FAMILY, 
HConstants.SPLITA_QUALIFIER, RegionInfo.toByteArray(splitA));
-1370}
-1371if (splitB != null) {
-1372  put.addImmutable(
-1373HConstants.CATALOG_FAMILY, 
HConstants.SPLITB_QUALIFIER, RegionInfo.toByteArray(splitB));
-1374}
-1375return put;
-1376  }
-1377
-1378  /**
-1379   * Put the passed 
puts to the hbase:meta 
table.
-1380   * Non-atomic for multi puts.
-1381   * @param connection connection we're 
using
-1382   * @param puts Put to add to 
hbase:meta
-1383   * @throws IOException
-1384   */
-1385  public static void 
putToMetaTable(final Connection connection, final Put... puts)
-1386throws IOException {
-1387put(getMetaHTable(connection), 
Arrays.asList(puts));
-1388  }
-1389
-1390  /**
-1391   * @param t Table to use (will be 
closed when done).
-1392   * @param puts puts to make
-1393   * @throws IOException
-1394   */
-1395  private static void put(final Table t, 
final List puts) throws IOException {
-1396try {
-1397  if (METALOG.isDebugEnabled()) {
-1398
METALOG.debug(mutationsToString(puts));
-1399  }
-1400  t.put(puts);
-1401} finally {
-1402  t.close();
-1403}
-1404  }
-1405
-1406  /**
-1407   * Put the passed 
ps to the hbase:meta table.
-1408   * @param connection connection we're 
using
-1409   * @param ps Put to add to 
hbase:meta
-1410   * @throws IOException
-1411   */
-1412  public static void 
putsToMetaTable(final Connection connection, final List ps)
-1413throws IOException {
-1414Table t = 
getMetaHTable(connection);
-1415try {
-1416  if (METALOG.isDebugEnabled()) {
-1417
METALOG.debug(mutationsToString(ps));
-1418  }
-1419  t.put(ps);
-1420} finally {
-1421  t.close();
-1422}
-1423  }
-1424
-1425  /**
-1426   * Delete the passed 
d from the hbase:meta 
table.
-1427   * @param connection connection we're 
using
-1428   * @param d Delete to add to 
hbase:meta
-1429   * @throws IOException
-1430   */
-1431  static void deleteFromMetaTable(final 
Connection connection, final Delete d)
-1432throws IOException {
-1433List dels = new 
ArrayList<>(1);
-1434dels.add(d);
-1435deleteFromMetaTable(connection, 
dels);
-1436  }
-1437
-1438  /**
-1439   * Delete the passed 
deletes from the hbase:meta 
table.
-1440   * @param connection connection we're 
using
-1441   * @param deletes Deletes to add to 
hbase:meta  This list should support #remove.
-1442   * @throws IOException
-1443   */
-1444  public static void 
deleteFromMetaTable(final Connection connection, final List 
deletes)
-1445throws IOException {
-1446Table t = 
getMetaHTable(connection);
-1447try {
-1448  if (METALOG.isDebugEnabled()) {
-1449
METALOG.debug(mutationsToString(deletes));
-1450  }
-1451  t.delete(deletes);
-1452} finally {
-1453  t.close();
-1454}
-1455  }
-1456
-1457  /**
-1458   * Deletes some replica columns 
corresponding to replicas for the passed rows
-1459   * @param metaRows rows in 
hbase:meta
-1460   * @param replicaIndexToDeleteFrom the 
replica ID we would start deleting from
-1461   * @param numReplicasToRemove how many 
replicas to remove
-1462   *

[17/51] [partial] hbase-site git commit: Published site at .

2017-12-14 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dc4e5c85/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.RestoreSnapshotFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.RestoreSnapshotFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.RestoreSnapshotFuture.html
index 7c59e27..c904c56 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.RestoreSnapshotFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.RestoreSnapshotFuture.html
@@ -119,4048 +119,4054 @@
 111import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
 112import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest;
 113import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest;
-114import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-115import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-116import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
-117import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
-118import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest;
-119import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse;
-120import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest;
-121import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
-122import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
-123import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceRequest;
-124import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceResponse;
-125import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
-126import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-127import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-128import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-129import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos;
-130import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-131import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-132import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-133import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-134import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-135import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersRequest;
-136import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-137import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
-138import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest;
-139import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse;
-140import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
-141import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
-142import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
-143import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse;
-144import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
-145import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableRequest;
-146import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableResponse;
-147import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest;
-148import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse;
-149import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
-150import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse;
-151import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest;
-152import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse;
-153import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest;
-154import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
-155import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetLocksRequest;
-156import 

[17/51] [partial] hbase-site git commit: Published site at .

2017-12-13 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4abd958d/devapidocs/org/apache/hadoop/hbase/coprocessor/Export.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/coprocessor/Export.html 
b/devapidocs/org/apache/hadoop/hbase/coprocessor/Export.html
index aacc485..69c1f6c 100644
--- a/devapidocs/org/apache/hadoop/hbase/coprocessor/Export.html
+++ b/devapidocs/org/apache/hadoop/hbase/coprocessor/Export.html
@@ -120,7 +120,7 @@ var activeTableTab = "activeTableTab";
 
 @InterfaceAudience.LimitedPrivate(value="Coprocesssor")
  @InterfaceStability.Evolving
-public class Export
+public class Export
 extends org.apache.hadoop.hbase.protobuf.generated.ExportProtos.ExportService
 implements RegionCoprocessor
 Export an HBase table. Writes content to sequence files up 
in HDFS. Use
@@ -393,7 +393,7 @@ implements 
 
 LOG
-private static final org.apache.commons.logging.Log LOG
+private static final org.apache.commons.logging.Log LOG
 
 
 
@@ -402,7 +402,7 @@ implements 
 
 DEFAULT_CODEC
-private static final http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true";
 title="class or interface in java.lang">Class DEFAULT_CODEC
+private static final http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true";
 title="class or interface in java.lang">Class DEFAULT_CODEC
 
 
 
@@ -411,7 +411,7 @@ implements 
 
 DEFAULT_TYPE
-private static 
final org.apache.hadoop.io.SequenceFile.CompressionType DEFAULT_TYPE
+private static 
final org.apache.hadoop.io.SequenceFile.CompressionType DEFAULT_TYPE
 
 
 
@@ -420,7 +420,7 @@ implements 
 
 env
-private RegionCoprocessorEnvironment env
+private RegionCoprocessorEnvironment env
 
 
 
@@ -429,7 +429,7 @@ implements 
 
 userProvider
-private UserProvider userProvider
+private UserProvider userProvider
 
 
 
@@ -446,7 +446,7 @@ implements 
 
 Export
-public Export()
+public Export()
 
 
 
@@ -463,7 +463,7 @@ implements 
 
 main
-public static void main(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String[] args)
+public static void main(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String[] args)
  throws http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true";
 title="class or interface in java.lang">Throwable
 
 Throws:
@@ -477,7 +477,7 @@ implements 
 
 run
-static http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">Map run(org.apache.hadoop.conf.Configuration conf,
+static http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">Map run(org.apache.hadoop.conf.Configuration conf,
http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String[] args)
 throws http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true";
 title="class or interface in java.lang">Throwable
 
@@ -492,7 +492,7 @@ implements 
 
 run
-public static http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">Map run(org.apache.hadoop.conf.Configuration conf,
+public static http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">Map run(org.apache.hadoop.conf.Configuration conf,
   TableName tableName,
   Scan scan,
   
org.apache.hadoop.fs.Path dir)
@@ -509,7 +509,7 @@ implements 
 
 getCompression
-private static boolean getCompression(org.apache.hadoop.hbase.protobuf.generated.ExportProtos.ExportRequest request)
+private static boolean getCompression(org.apache.hadoop.hbase.protobuf.generated.ExportProtos.ExportRequest request)
 
 
 
@@ -518,7 +518,7 @@ implements 
 
 getCompressionType
-private 
static org.apache.hadoop.io.SequenceFile.CompressionType getCompressionType(org.apache.hadoop.hbase.protobuf.generated.ExportProtos.ExportRequest request)
+private 
static org.apache.hadoop.io.SequenceFile.CompressionType getCompressionType(org.apache.hadoop.hbase.protobuf.generated.ExportProtos.ExportRequest request)
 
 
 
@@ -527,7 +527,7 @@ implements 
 
 getCompressionCodec
-private 
static org.apache.hadoop.io.compress.CompressionCodec getCompressionCodec(org.apache.hadoop.conf.Configuration conf,
+private 
static org.apache.hadoop.io.compress.Co

[17/51] [partial] hbase-site git commit: Published site at .

2017-12-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e23b49ba/devapidocs/org/apache/hadoop/hbase/SizeCachedKeyValue.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/SizeCachedKeyValue.html 
b/devapidocs/org/apache/hadoop/hbase/SizeCachedKeyValue.html
index 494edb6..5f59155 100644
--- a/devapidocs/org/apache/hadoop/hbase/SizeCachedKeyValue.html
+++ b/devapidocs/org/apache/hadoop/hbase/SizeCachedKeyValue.html
@@ -49,7 +49,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-Prev Class
+Prev Class
 Next Class
 
 
@@ -114,7 +114,7 @@ var activeTableTab = "activeTableTab";
 
 
 All Implemented Interfaces:
-http://docs.oracle.com/javase/8/docs/api/java/lang/Cloneable.html?is-external=true";
 title="class or interface in java.lang">Cloneable, Cell, ExtendedCell, HeapSize, RawCell, SettableSequenceId, SettableTimestamp
+http://docs.oracle.com/javase/8/docs/api/java/lang/Cloneable.html?is-external=true";
 title="class or interface in java.lang">Cloneable, Cell, ExtendedCell, HeapSize, RawCell
 
 
 Direct Known Subclasses:
@@ -254,7 +254,7 @@ extends KeyValue
-checkParameters,
 clone, 
create,
 create,
 createKeyOnly,
 deepClone,
 equals,
 getBuffer,
 getDelimiter,
 getDelimiterInReverse, getFamilyArray,
 getFamilyLength,
 getFamilyLength,
 getFamilyOffset,
 getKey, 
getKeyDataStructureSize,
 getKeyOffset,
 getKeyString,
 getKeyValueDataStructureSize,
 getKeyValueDataStructureSize,
 getKeyValueDataStructureSize,
 getLength,
 getOffset,
 getQualifierArray,
 getQualifierLength,
 getQualifierOffset,
 getRowArray,
 getRowOffset,
 getSeque
 nceId, getSerializedSize,
 getTags, 
getTagsArray,
 getTagsLength,
 getTagsOffset,
 getTimestamp,
 getTimestamp,
 getTimestampOffset,
 getTypeByte,
 getValueArray,
 getValueLength, getValueOffset,
 hashCode,
 humanReadableTimestamp,
 isLatestTimestamp,
 keyToString,
 keyToString,
 oswrite,
 setSequenceId,
 setTimestamp, setTimestamp,
 shallowCopy,
 toString,
 toStringMap,
 updateLatestStamp,
 write,
 write,
 write,
 writeByteArray
+checkParameters,
 clone, 
create,
 create,
 createKeyOnly,
 deepClone,
 equals,
 getBuffer,
 getDelimiter,
 getDelimiterInReverse, getFamilyArray,
 getFamilyLength,
 getFamilyLength,
 getFamilyOffset,
 getKey, 
getKeyDataStructureSize,
 getKeyOffset,
 getKeyString,
 getKeyValueDataStructureSize,
 getKeyValueDataStructureSize,
 getKeyValueDataStructureSize,
 getLength,
 getOffset,
 getQualifierArray,
 getQualifierLength,
 getQualifierOffset,
 getRowArray,
 getRowOffset,
 getSeque
 nceId, getSerializedSize,
 getTags, 
getTagsArray,
 getTagsLength,
 getTagsOffset,
 getTimestamp,
 getTimestamp,
 getTimestampOffset,
 getTypeByte,
 getValueArray,
 getValueLength, getValueOffset,
 hashCode,
 humanReadableTimestamp,
 isLatestTimestamp,
 keyToString,
 keyToString,
 oswrite,
 setSequenceId,
 se
 tTimestamp, setTimestamp,
 shallowCopy,
 toString,
 toStringMap,
 updateLatestStamp,
 write,
 write,
 write,
 writeByteArray
 
 
 
@@ -433,7 +433,7 @@ extends 
 
-Prev Class
+Prev Class
 Next Class
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e23b49ba/devapidocs/org/apache/hadoop/hbase/SizeCachedNoTagsKeyValue.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/SizeCachedNoTagsKeyValue.html 
b/devapidocs/org/apache/hadoop/hbase/SizeCachedNoTagsKeyValue.html
index 104c423..b740274 100644
--- a/devapidocs/org/apache/hadoop/hbase/SizeCachedNoTagsKeyValue.html
+++ b/devapidocs/org/apache/hadoop/hbase/SizeCachedNoTagsKeyValue.html
@@ -119,7 +119,7 @@ var activeTableTab = "activeTableTab";
 
 
 All Implemented Interfaces:
-http://docs.oracle.com/javase/8/docs/api/java/lang/Cloneable.html?is-external=true";
 title="class or interface in java.lang">Cloneable, Cell, ExtendedCell, HeapSize, RawCell, SettableSequenceId, SettableTimestamp
+http://docs.oracle.com/javase/8/docs/api/java/lang/Cloneable.html?is-external=true";
 title="class or interface in java.lang">Cloneable, Cell, ExtendedCell, HeapSize, RawCell
 
 
 
@@ -241,7 +241,7 @@ extends 
 
 Methods inherited from class org.apache.hadoop.hbase.KeyValue
-checkParameters,
 clone, 
create,
 create,
 createKeyOnly,
 deepClone,
 equals,
 getBuffer,
 getDelimiter,
 getDelimiterInReverse, getFamilyArray,
 getFamilyLength,
 getFamilyLength,
 getFamilyOffset,
 getKey, 
getKeyDataStructureSize,
 getKeyOffset,
 getKeyString,
 getKeyValueDataStructureSize,
 getKeyValueDataStructureSize,
 getKeyValueDataStructureSize,
 getLength,
 getOffset,
 getQualifierArray,
 getQualifierLength,
 getQualifierOffset,
 getRowArray,
 getRowOffset,
 getSeque
 nceId, getTags, 
getTagsArray,
 getTagsOffset,
 getTimestamp,
 getTimestamp,
 getTimestampOffset,
 getTypeByte,
 getValueArray,
 getValueLength,
 getValueOffset,
 hashCode, humanReadableTimestamp,
 isLatestTimestamp,
 keyToString,
 keyToString,
 oswrite,
 setSequenceId,
 setTimestamp,
 setTimestamp,
 shallowCopy, href="../../../../org/apache/hadoo

[17/51] [partial] hbase-site git commit: Published site at .

2017-12-06 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d171b896/hbase-annotations/dependency-convergence.html
--
diff --git a/hbase-annotations/dependency-convergence.html 
b/hbase-annotations/dependency-convergence.html
index ecbd0c9..f699666 100644
--- a/hbase-annotations/dependency-convergence.html
+++ b/hbase-annotations/dependency-convergence.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Annotations – Reactor Dependency 
Convergence
 
@@ -488,22 +488,22 @@
 3.4.10
 
 
-org.apache.hbase:hbase-assembly:pom:3.0.0-SNAPSHOT+- org.apache.hbase:hbase-server:jar:3.0.0-SNAPSHOT:compile|  +- org.apache.hbase:hbase-client:jar:3.0.0-SNAPSHOT:compile|  |  \- (org.apache.zookeeper:zookeeper:jar:3.4.10:compile
 - version managed from 3.4.6; omitted for duplicate)|  +- org.apache.zookeeper:zookeeper:jar:3.4.10:compile|  +- org.apache.hadoop:hadoop-common:jar:2.7.4:compile|  |  \- (org.apache.zookeeper:zookeeper:jar:3.4.10:compile
 - version managed from 3.4.6; omitted for duplicate)|  +- org.apache.hadoop:hadoop-auth:jar:2.7.4:compile|  |  \- (org.apache.zookeeper:zookeeper:jar:3.4.10:compile
 - version managed from 3.4.6; omitted for duplicate)|  \- org.apache.hadoop:hadoop-client:jar:2.7.4:compile| \- org.apache.had
 oop:hadoop-mapreduce-client-app:jar:2.7.4:compile|\- org.apache.hadoop:hadoop-mapreduce-client-common:jar:2.7.4:compile|   \- org.apache.hadoop:hadoop-yarn-server-common:jar:2.7.4:compile|  \- (org.apache.zookeeper:zookeeper:jar:3.4.10:compile
 - version managed from 3.4.6; omitted for duplicate)+- org.apache.hbase:hbase-mapreduce:jar:3.0.0-SNAPSHOT:compile|  \- (org.apache.zookeeper:zookeeper:jar:3.4.10:compile - 
version managed from 3.4.6; omitted for duplicate)+- org.apache.hbase:hbase-mapreduce:test-jar:tests:3.0.0-SNAPSHOT:test|  \- (org.apache.zookeeper:zookeeper:jar:3.4.10:test - 
version managed from 3.4.6; omitted for duplicate)+- org.apache.hbase:hbase-testing-util:jar:3.0.0-SNAP
 SHOT:test|  +- org.apache.hbase:hbase-server:test-jar:tests:3.0.0-SNAPSHOT:test|  |  \- (org.apache.zookeeper:zookeeper:jar:3.4.10:test
 - version managed from 3.4.6; omitted for duplicate)|  \- org.apache.hadoop:hadoop-minicluster:jar:2.7.4:test| +- org.apache.hadoop:hadoop-common:test-jar:tests:2.7.4:test| |  \- (org.apache.zookeeper:zookeeper:jar:3.4.10:test
 - version managed from 3.4.6; omitted for duplicate)| \- org.apache.hadoop:hadoop-yarn-server-tests:test-jar:tests:2.7.4:test|\- org.apache.hadoop:hadoop-yarn-server-resourcemanager:jar:2.7.4:test|   \- (org.apache.zookeeper:zookeeper:jar:3.4.10:test
 - version managed from 3.4.6; omitted for dupli
 cate)+- org.apache.hbase:hbase-backup:jar:3.0.0-SNAPSHOT:compile|  \- (org.apache.zookeeper:zookeeper:jar:3.4.10:compile - 
version managed from 3.4.6; omitted for duplicate)+- org.apache.hbase:hbase-replication:jar:3.0.0-SNAPSHOT:compile|  \- (org.apache.zookeeper:zookeeper:jar:3.4.10:compile - 
version managed from 3.4.6; omitted for duplicate)+- org.apache.hbase:hbase-examples:jar:3.0.0-SNAPSHOT:compile|  \- (org.apache.zookeeper:zookeeper:jar:3.4.10:compile - 
version managed from 3.4.6; omitted for duplicate)+- org.apache.hbase:hbase-zookeeper:jar:3.0.0-SNAPSHOT:compile|  \- (org.apache.zookeeper:zookeeper:jar:3.4.10:compile - 
version managed from 3.4.6; omitted for duplicate)\- org.apache.hbase:hbase-rsgroup:jar:3.0.0-SNAPSHOT:compile   \- (org.apache.zookeeper:zookeeper:jar:3.4.10:compile 
- version managed from 3.4.6
 ; omitted for duplicate)
-org.apache.hbase:hbase-backup:jar:3.0.0-SNAPSHOT+- org.apache.hbase:hbase-client:jar:3.0.0-SNAPSHOT:compile|  +- (org.apache.zookeeper:zookeeper:jar:3.4.10:compile - 
version managed from 3.4.6; omitted for duplicate)|  \- org.apache.hadoop:hadoop-auth:jar:2.7.4:compile| \- (org.apache.zookeeper:zookeeper:jar:3.4.10:compile
 - version managed from 3.4.6; omitted for duplicate)+- org.apache.hbase:hbase-server:jar:3.0.0-SNAPSHOT:compile|  +- org.apache.hbase:hbase-zookeeper:jar:3.0.0-SNAPSHOT:compile|  |  \- (org.apache.zookeeper:zookeeper:jar:3.4.10:compile
 - version managed from 3.4.6; omitted for duplicate)|  +- org.apache.hbase:hbase-replication:jar:3.0.0-SNAPSHOT:compile|  |  \- (org.apache.zookeeper:zookeeper:jar:3.4.10:compile
 - version managed from 3.4.6; omitted for dup
 licate)|  +- (org.apache.zookeeper:zookeeper:jar:3.4.10:compile - 
version managed from 3.4.6; omitted for duplicate)|  \- org.apache.hadoop:hadoop-client:jar:2.7.4:compile| \- org.apache.hadoop:hadoop-mapreduce-client-app:jar:2.7.4:compile|\- org.apache.hadoop:hadoop-mapreduce-client-common:jar:2.7.4:compile|   \- org.apache.hadoop:hadoop-yarn-server-common:jar:2.7.4:compile|  \- (org.apache.zookeeper:zookeeper:jar:3.4.10:compile
 - version managed from 3.4.6; omitted for duplicate)+- org.apache.hbase:hbase-server:test-jar:tests:3.0.0-SNAPSHOT:

[17/51] [partial] hbase-site git commit: Published site at .

2017-12-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c54c242b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html
index 3edfbef..9707b2c 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html
@@ -2459,5936 +2459,5935 @@
 2451  }
 2452
 2453  for (HStore s : storesToFlush) {
-2454MemStoreSize flushableSize = 
s.getFlushableSize();
-2455
totalSizeOfFlushableStores.incMemStoreSize(flushableSize);
-2456
storeFlushCtxs.put(s.getColumnFamilyDescriptor().getName(),
-2457  
s.createFlushContext(flushOpSeqId, tracker));
-2458// for writing stores to WAL
-2459
committedFiles.put(s.getColumnFamilyDescriptor().getName(), null);
-2460
storeFlushableSize.put(s.getColumnFamilyDescriptor().getName(), 
flushableSize);
-2461  }
-2462
-2463  // write the snapshot start to 
WAL
-2464  if (wal != null && 
!writestate.readOnly) {
-2465FlushDescriptor desc = 
ProtobufUtil.toFlushDescriptor(FlushAction.START_FLUSH,
-2466getRegionInfo(), 
flushOpSeqId, committedFiles);
-2467// No sync. Sync is below where 
no updates lock and we do FlushAction.COMMIT_FLUSH
-2468WALUtil.writeFlushMarker(wal, 
this.getReplicationScope(), getRegionInfo(), desc, false,
-2469mvcc);
-2470  }
-2471
-2472  // Prepare flush (take a 
snapshot)
-2473  for (StoreFlushContext flush : 
storeFlushCtxs.values()) {
-2474flush.prepare();
-2475  }
-2476} catch (IOException ex) {
-2477  doAbortFlushToWAL(wal, 
flushOpSeqId, committedFiles);
-2478  throw ex;
-2479} finally {
-2480  
this.updatesLock.writeLock().unlock();
-2481}
-2482String s = "Finished memstore 
snapshotting " + this + ", syncing WAL and waiting on mvcc, " +
-2483"flushsize=" + 
totalSizeOfFlushableStores;
-2484status.setStatus(s);
-2485doSyncOfUnflushedWALChanges(wal, 
getRegionInfo());
-2486return new 
PrepareFlushResult(storeFlushCtxs, committedFiles, storeFlushableSize, 
startTime,
-2487flushOpSeqId, flushedSeqId, 
totalSizeOfFlushableStores);
-2488  }
-2489
-2490  /**
-2491   * Utility method broken out of 
internalPrepareFlushCache so that method is smaller.
-2492   */
-2493  private void 
logFatLineOnFlush(Collection storesToFlush, long sequenceId) {
-2494if (!LOG.isInfoEnabled()) {
-2495  return;
-2496}
-2497// Log a fat line detailing what is 
being flushed.
-2498StringBuilder perCfExtras = null;
-2499if (!isAllFamilies(storesToFlush)) 
{
-2500  perCfExtras = new 
StringBuilder();
-2501  for (HStore store: storesToFlush) 
{
-2502perCfExtras.append("; 
").append(store.getColumnFamilyName());
-2503perCfExtras.append("=")
-2504
.append(StringUtils.byteDesc(store.getFlushableSize().getDataSize()));
-2505  }
-2506}
-2507LOG.info("Flushing " + + 
storesToFlush.size() + "/" + stores.size() +
-2508" column families, memstore=" + 
StringUtils.byteDesc(this.memstoreDataSize.get()) +
-2509((perCfExtras != null && 
perCfExtras.length() > 0)? perCfExtras.toString(): "") +
-2510((wal != null) ? "" : "; WAL is 
null, using passed sequenceid=" + sequenceId));
-2511  }
-2512
-2513  private void doAbortFlushToWAL(final 
WAL wal, final long flushOpSeqId,
-2514  final Map> committedFiles) {
-2515if (wal == null) return;
-2516try {
-2517  FlushDescriptor desc = 
ProtobufUtil.toFlushDescriptor(FlushAction.ABORT_FLUSH,
-2518  getRegionInfo(), flushOpSeqId, 
committedFiles);
-2519  WALUtil.writeFlushMarker(wal, 
this.getReplicationScope(), getRegionInfo(), desc, false,
-2520  mvcc);
-2521} catch (Throwable t) {
-2522  LOG.warn("Received unexpected 
exception trying to write ABORT_FLUSH marker to WAL:" +
-2523  
StringUtils.stringifyException(t));
-2524  // ignore this since we will be 
aborting the RS with DSE.
-2525}
-2526// we have called 
wal.startCacheFlush(), now we have to abort it
-2527
wal.abortCacheFlush(this.getRegionInfo().getEncodedNameAsBytes());
-2528  }
-2529
-2530  /**
-2531   * Sync unflushed WAL changes. See 
HBASE-8208 for details
-2532   */
-2533  private static void 
doSyncOfUnflushedWALChanges(final WAL wal, final RegionInfo hri)
-2534  throws IOException {
-2535if (wal == null) {
-2536  return;
-2537}
-2538try {
-2539  wal.sync(); // ensure that flush 
marker is sync'ed
-2540} catch (IOException ioe) {
-2541  
wal.abortCacheFlush(hri.getEncodedNameAsBytes());
-2542  throw ioe;
-2543}
-2544  }
-2545
-2546  /**
-2547   * @ret

[17/51] [partial] hbase-site git commit: Published site at .

2017-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.Callback.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.Callback.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.Callback.html
index de6cb11..dd54dd2 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.Callback.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.Callback.html
@@ -25,563 +25,558 @@
 017 */
 018package 
org.apache.hadoop.hbase.io.asyncfs;
 019
-020import static 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleState.READER_IDLE;
-021import static 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleState.WRITER_IDLE;
-022import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.HEART_BEAT_SEQNO;
-023import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.READ_TIMEOUT;
-024import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile;
-025import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.endFileLease;
-026import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.getStatus;
+020import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.HEART_BEAT_SEQNO;
+021import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.READ_TIMEOUT;
+022import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile;
+023import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.endFileLease;
+024import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.getStatus;
+025import static 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleState.READER_IDLE;
+026import static 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleState.WRITER_IDLE;
 027import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY;
 028
-029
-030import 
org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBuf;
-031import 
org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBufAllocator;
-032import 
org.apache.hadoop.hbase.shaded.io.netty.channel.Channel;
-033import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandler.Sharable;
-034import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandlerContext;
-035import 
org.apache.hadoop.hbase.shaded.io.netty.channel.EventLoop;
-036import 
org.apache.hadoop.hbase.shaded.io.netty.channel.SimpleChannelInboundHandler;
-037import 
org.apache.hadoop.hbase.shaded.io.netty.handler.codec.protobuf.ProtobufDecoder;
-038import 
org.apache.hadoop.hbase.shaded.io.netty.handler.codec.protobuf.ProtobufVarint32FrameDecoder;
-039import 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleStateEvent;
-040import 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleStateHandler;
-041import 
org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.Future;
-042import 
org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.Promise;
-043import 
org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.PromiseCombiner;
-044
-045import java.io.IOException;
-046import java.nio.ByteBuffer;
-047import java.util.ArrayDeque;
-048import java.util.Collection;
-049import java.util.Collections;
-050import java.util.Deque;
-051import java.util.IdentityHashMap;
-052import java.util.List;
-053import java.util.Set;
-054import 
java.util.concurrent.CompletableFuture;
-055import java.util.concurrent.TimeUnit;
-056import java.util.function.Supplier;
-057
-058import 
org.apache.hadoop.conf.Configuration;
-059import 
org.apache.hadoop.crypto.Encryptor;
-060import org.apache.hadoop.fs.Path;
+029import java.io.IOException;
+030import java.io.InterruptedIOException;
+031import java.nio.ByteBuffer;
+032import java.util.Collection;
+033import java.util.Collections;
+034import java.util.Iterator;
+035import java.util.List;
+036import java.util.Set;
+037import 
java.util.concurrent.CompletableFuture;
+038import 
java.util.concurrent.ConcurrentHashMap;
+039import 
java.util.concurrent.ConcurrentLinkedDeque;
+040import 
java.util.concurrent.ExecutionException;
+041import java.util.concurrent.TimeUnit;
+042import java.util.function.Supplier;
+043
+044import 
org.apache.hadoop.conf.Configuration;
+045import 
org.apache.hadoop.crypto.Encryptor;
+046import org.apache.hadoop.fs.Path;
+047import 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.CancelOnClose;
+048import 
org.apache.hadoop.hbase.util.CancelableProgressable;
+049import 
org.apache.hadoop.hbase.util.FSUtils;
+050import 
org.apache.hadoop.hdfs.DFSClient;
+051import 
org.apache.hadoop.hdfs.Distribute

[17/51] [partial] hbase-site git commit: Published site at .

2017-11-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html
index e6498a8..6cb32c4 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html
@@ -252,496 +252,500 @@
 244  }
 245
 246  @Override
-247  public CompletableFuture 
compact(TableName tableName) {
-248return 
wrap(rawAdmin.compact(tableName));
-249  }
-250
-251  @Override
-252  public CompletableFuture 
compact(TableName tableName, byte[] columnFamily) {
-253return 
wrap(rawAdmin.compact(tableName, columnFamily));
-254  }
-255
-256  @Override
-257  public CompletableFuture 
compactRegion(byte[] regionName) {
-258return 
wrap(rawAdmin.compactRegion(regionName));
-259  }
-260
-261  @Override
-262  public CompletableFuture 
compactRegion(byte[] regionName, byte[] columnFamily) {
-263return 
wrap(rawAdmin.compactRegion(regionName, columnFamily));
-264  }
-265
-266  @Override
-267  public CompletableFuture 
majorCompact(TableName tableName) {
-268return 
wrap(rawAdmin.majorCompact(tableName));
-269  }
-270
-271  @Override
-272  public CompletableFuture 
majorCompact(TableName tableName, byte[] columnFamily) {
-273return 
wrap(rawAdmin.majorCompact(tableName, columnFamily));
-274  }
-275
-276  @Override
-277  public CompletableFuture 
majorCompactRegion(byte[] regionName) {
-278return 
wrap(rawAdmin.majorCompactRegion(regionName));
-279  }
-280
-281  @Override
-282  public CompletableFuture 
majorCompactRegion(byte[] regionName, byte[] columnFamily) {
-283return 
wrap(rawAdmin.majorCompactRegion(regionName, columnFamily));
-284  }
-285
-286  @Override
-287  public CompletableFuture 
compactRegionServer(ServerName serverName) {
-288return 
wrap(rawAdmin.compactRegionServer(serverName));
-289  }
-290
-291  @Override
-292  public CompletableFuture 
majorCompactRegionServer(ServerName serverName) {
-293return 
wrap(rawAdmin.majorCompactRegionServer(serverName));
-294  }
-295
-296  @Override
-297  public CompletableFuture 
mergeSwitch(boolean on) {
-298return 
wrap(rawAdmin.mergeSwitch(on));
-299  }
-300
-301  @Override
-302  public CompletableFuture 
isMergeEnabled() {
-303return 
wrap(rawAdmin.isMergeEnabled());
-304  }
-305
-306  @Override
-307  public CompletableFuture 
splitSwitch(boolean on) {
-308return 
wrap(rawAdmin.splitSwitch(on));
-309  }
-310
-311  @Override
-312  public CompletableFuture 
isSplitEnabled() {
-313return 
wrap(rawAdmin.isSplitEnabled());
-314  }
-315
-316  @Override
-317  public CompletableFuture 
mergeRegions(byte[] nameOfRegionA, byte[] nameOfRegionB,
-318  boolean forcible) {
-319return 
wrap(rawAdmin.mergeRegions(nameOfRegionA, nameOfRegionB, forcible));
-320  }
-321
-322  @Override
-323  public CompletableFuture 
split(TableName tableName) {
-324return 
wrap(rawAdmin.split(tableName));
-325  }
-326
-327  @Override
-328  public CompletableFuture 
split(TableName tableName, byte[] splitPoint) {
-329return wrap(rawAdmin.split(tableName, 
splitPoint));
-330  }
-331
-332  @Override
-333  public CompletableFuture 
splitRegion(byte[] regionName) {
-334return 
wrap(rawAdmin.splitRegion(regionName));
-335  }
-336
-337  @Override
-338  public CompletableFuture 
splitRegion(byte[] regionName, byte[] splitPoint) {
-339return 
wrap(rawAdmin.splitRegion(regionName, splitPoint));
-340  }
-341
-342  @Override
-343  public CompletableFuture 
assign(byte[] regionName) {
-344return 
wrap(rawAdmin.assign(regionName));
-345  }
-346
-347  @Override
-348  public CompletableFuture 
unassign(byte[] regionName, boolean forcible) {
-349return 
wrap(rawAdmin.unassign(regionName, forcible));
-350  }
-351
-352  @Override
-353  public CompletableFuture 
offline(byte[] regionName) {
-354return 
wrap(rawAdmin.offline(regionName));
-355  }
-356
-357  @Override
-358  public CompletableFuture 
move(byte[] regionName) {
-359return 
wrap(rawAdmin.move(regionName));
-360  }
-361
-362  @Override
-363  public CompletableFuture 
move(byte[] regionName, ServerName destServerName) {
-364return wrap(rawAdmin.move(regionName, 
destServerName));
-365  }
-366
-367  @Override
-368  public CompletableFuture 
setQuota(QuotaSettings quota) {
-369return 
wrap(rawAdmin.setQuota(quota));
-370  }
-371
-372  @Override
-373  public 
CompletableFuture> getQuota(QuotaFilter filter) 
{
-374return 
wrap(rawAdmin.getQuota(filter));
-375  }
-376
-377  @Override
-378  public CompletableFuture 

  1   2   3   4   >