[15/51] [partial] hbase-site git commit: Published site at 620d70d6186fb800299bcc62ad7179fccfd1be41.

2019-01-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aa3fb87f/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
index e763690..f66043c 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
@@ -1301,610 +1301,613 @@
 1293}
 1294RegionStateNode regionNode = 
regionStates.getOrCreateRegionStateNode(regionInfo);
 1295// Do not need to lock on 
regionNode, as we can make sure that before we finish loading
-1296// meta, all the related 
procedures can not be executed. The only exception is formeta
+1296// meta, all the related 
procedures can not be executed. The only exception is for meta
 1297// region related operations, 
but here we do not load the informations for meta region.
 1298
regionNode.setState(localState);
 1299
regionNode.setLastHost(lastHost);
 1300
regionNode.setRegionLocation(regionLocation);
 1301
regionNode.setOpenSeqNum(openSeqNum);
 1302
-1303if (localState == State.OPEN) 
{
-1304  assert regionLocation != null 
: "found null region location for " + regionNode;
-1305  
regionStates.addRegionToServer(regionNode);
-1306} else if (localState == 
State.OFFLINE || regionInfo.isOffline()) {
-1307  
regionStates.addToOfflineRegions(regionNode);
-1308}
-1309  }
-1310});
-1311
-1312// every assignment is blocked until 
meta is loaded.
-1313wakeMetaLoadedEvent();
-1314  }
-1315
-1316  /**
-1317   * Used to check if the meta loading 
is done.
-1318   * p/
-1319   * if not we throw PleaseHoldException 
since we are rebuilding the RegionStates
-1320   * @param hri region to check if it is 
already rebuild
-1321   * @throws PleaseHoldException if meta 
has not been loaded yet
-1322   */
-1323  private void 
checkMetaLoaded(RegionInfo hri) throws PleaseHoldException {
-1324if (!isRunning()) {
-1325  throw new 
PleaseHoldException("AssignmentManager not running");
-1326}
-1327boolean meta = isMetaRegion(hri);
-1328boolean metaLoaded = 
isMetaLoaded();
-1329if (!meta  !metaLoaded) 
{
-1330  throw new PleaseHoldException(
-1331"Master not fully online; 
hbase:meta=" + meta + ", metaLoaded=" + metaLoaded);
-1332}
-1333  }
-1334
-1335  // 

-1336  //  TODO: Metrics
-1337  // 

-1338  public int getNumRegionsOpened() {
-1339// TODO: Used by 
TestRegionPlacement.java and assume monotonically increasing value
-1340return 0;
-1341  }
-1342
-1343  public long 
submitServerCrash(ServerName serverName, boolean shouldSplitWal) {
-1344boolean carryingMeta;
-1345long pid;
-1346ServerStateNode serverNode = 
regionStates.getServerNode(serverName);
-1347if(serverNode == null){
-1348  LOG.info("Skip to add SCP for {} 
since this server should be OFFLINE already", serverName);
-1349  return -1;
-1350}
-1351// we hold the write lock here for 
fencing on reportRegionStateTransition. Once we set the
-1352// server state to CRASHED, we will 
no longer accept the reportRegionStateTransition call from
-1353// this server. This is used to 
simplify the implementation for TRSP and SCP, where we can make
-1354// sure that, the region list 
fetched by SCP will not be changed any more.
-1355serverNode.writeLock().lock();
-1356try {
-1357  
ProcedureExecutorMasterProcedureEnv procExec = 
this.master.getMasterProcedureExecutor();
-1358  carryingMeta = 
isCarryingMeta(serverName);
-1359  if 
(!serverNode.isInState(ServerState.ONLINE)) {
-1360LOG.info(
-1361  "Skip to add SCP for {} with 
meta= {}, " +
-1362  "since there should be a 
SCP is processing or already done for this server node",
-1363  serverName, carryingMeta);
-1364return -1;
-1365  } else {
-1366
serverNode.setState(ServerState.CRASHED);
-1367pid = 
procExec.submitProcedure(new ServerCrashProcedure(procExec.getEnvironment(),
-1368serverName, shouldSplitWal, 
carryingMeta));
-1369LOG.info(
-1370  "Added {} to dead servers 
which carryingMeta={}, submitted ServerCrashProcedure pid={}",
-1371  serverName, carryingMeta, 
pid);
-1372  }
-1373} finally {
-1374  serverNode.writeLock().unlock();
-1375}
-1376return pid;
-1377  }
-1378
-1379  public void offlineRegion(final 
RegionInfo regionInfo) {
-1380

[15/51] [partial] hbase-site git commit: Published site at 281d6429e55149cc4c05430dcc1d1dc136d8b245.

2019-01-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/901d593a/devapidocs/org/apache/hadoop/hbase/master/procedure/SwitchRpcThrottleProcedure.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/SwitchRpcThrottleProcedure.html
 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/SwitchRpcThrottleProcedure.html
new file mode 100644
index 000..09dd917
--- /dev/null
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/SwitchRpcThrottleProcedure.html
@@ -0,0 +1,770 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+SwitchRpcThrottleProcedure (Apache HBase 3.0.0-SNAPSHOT API)
+
+
+
+
+
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase.master.procedure
+Class 
SwitchRpcThrottleProcedure
+
+
+
+https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.procedure2.ProcedureTEnvironment
+
+
+org.apache.hadoop.hbase.procedure2.StateMachineProcedureMasterProcedureEnv,org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SwitchRpcThrottleState
+
+
+org.apache.hadoop.hbase.master.procedure.SwitchRpcThrottleProcedure
+
+
+
+
+
+
+
+
+
+
+
+All Implemented Interfaces:
+https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableProcedureMasterProcedureEnv, ServerProcedureInterface
+
+
+
+@InterfaceAudience.Private
+public class SwitchRpcThrottleProcedure
+extends StateMachineProcedureMasterProcedureEnv,org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SwitchRpcThrottleState
+implements ServerProcedureInterface
+The procedure to switch rpc throttle
+
+
+
+
+
+
+
+
+
+
+
+Nested Class Summary
+
+
+
+
+Nested classes/interfaces inherited from 
classorg.apache.hadoop.hbase.procedure2.StateMachineProcedure
+StateMachineProcedure.Flow
+
+
+
+
+
+Nested classes/interfaces inherited from 
classorg.apache.hadoop.hbase.procedure2.Procedure
+Procedure.LockState
+
+
+
+
+
+Nested classes/interfaces inherited from 
interfaceorg.apache.hadoop.hbase.master.procedure.ServerProcedureInterface
+ServerProcedureInterface.ServerOperationType
+
+
+
+
+
+
+
+
+Field Summary
+
+Fields
+
+Modifier and Type
+Field and Description
+
+
+(package private) int
+attempts
+
+
+private static org.slf4j.Logger
+LOG
+
+
+(package private) boolean
+rpcThrottleEnabled
+
+
+(package private) RpcThrottleStorage
+rpcThrottleStorage
+
+
+(package private) ServerName
+serverName
+
+
+(package private) ProcedurePrepareLatch
+syncLatch
+
+
+
+
+
+
+Fields inherited from classorg.apache.hadoop.hbase.procedure2.StateMachineProcedure
+stateCount
+
+
+
+
+
+Fields inherited from classorg.apache.hadoop.hbase.procedure2.Procedure
+NO_PROC_ID,
 NO_TIMEOUT
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors
+
+Constructor and Description
+
+
+SwitchRpcThrottleProcedure()
+
+
+SwitchRpcThrottleProcedure(RpcThrottleStoragerpcThrottleStorage,
+  booleanrpcThrottleEnabled,
+  ServerNameserverName,
+  ProcedurePrepareLatchsyncLatch)
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All MethodsInstance MethodsConcrete Methods
+
+Modifier and Type
+Method and Description
+
+
+protected void
+deserializeStateData(ProcedureStateSerializerserializer)
+Called on store load to allow the user to decode the 
previously serialized
+ state.
+
+
+
+protected StateMachineProcedure.Flow
+executeFromState(MasterProcedureEnvenv,
+
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SwitchRpcThrottleStatestate)
+called to perform a single step of the specified 'state' of 
the procedure
+
+
+
+protected 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SwitchRpcThrottleState

[15/51] [partial] hbase-site git commit: Published site at 466fa920fee572fe20db3b77ebf539dc304d5f31.

2019-01-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2bf59208/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.SingleRequestCallerBuilder.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.SingleRequestCallerBuilder.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.SingleRequestCallerBuilder.html
index 2e150bc..0b315b8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.SingleRequestCallerBuilder.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.SingleRequestCallerBuilder.html
@@ -25,22 +25,22 @@
 017 */
 018package org.apache.hadoop.hbase.client;
 019
-020import static 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions.checkArgument;
-021import static 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions.checkNotNull;
-022import static 
org.apache.hadoop.hbase.client.ConnectionUtils.retries2Attempts;
+020import static 
org.apache.hadoop.hbase.client.ConnectionUtils.retries2Attempts;
+021import static 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions.checkArgument;
+022import static 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions.checkNotNull;
 023
-024import 
org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer;
-025
-026import java.util.List;
-027import 
java.util.concurrent.CompletableFuture;
-028import java.util.concurrent.TimeUnit;
-029
-030import 
org.apache.hadoop.hbase.HRegionLocation;
-031import 
org.apache.hadoop.hbase.ServerName;
-032import 
org.apache.hadoop.hbase.TableName;
-033import 
org.apache.yetus.audience.InterfaceAudience;
-034import 
org.apache.hadoop.hbase.client.metrics.ScanMetrics;
-035import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
+024import java.util.List;
+025import 
java.util.concurrent.CompletableFuture;
+026import java.util.concurrent.TimeUnit;
+027import 
org.apache.hadoop.hbase.HRegionLocation;
+028import 
org.apache.hadoop.hbase.ServerName;
+029import 
org.apache.hadoop.hbase.TableName;
+030import 
org.apache.hadoop.hbase.client.metrics.ScanMetrics;
+031import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
+032import 
org.apache.yetus.audience.InterfaceAudience;
+033
+034import 
org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer;
+035
 036import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService;
 037import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanResponse;
 038
@@ -83,432 +83,441 @@
 075
 076private RegionLocateType locateType = 
RegionLocateType.CURRENT;
 077
-078public 
SingleRequestCallerBuilderT table(TableName tableName) {
-079  this.tableName = tableName;
-080  return this;
-081}
-082
-083public 
SingleRequestCallerBuilderT row(byte[] row) {
-084  this.row = row;
-085  return this;
-086}
-087
-088public 
SingleRequestCallerBuilderT action(
-089
AsyncSingleRequestRpcRetryingCaller.CallableT callable) {
-090  this.callable = callable;
-091  return this;
-092}
-093
-094public 
SingleRequestCallerBuilderT operationTimeout(long operationTimeout, 
TimeUnit unit) {
-095  this.operationTimeoutNs = 
unit.toNanos(operationTimeout);
-096  return this;
-097}
-098
-099public 
SingleRequestCallerBuilderT rpcTimeout(long rpcTimeout, TimeUnit unit) 
{
-100  this.rpcTimeoutNs = 
unit.toNanos(rpcTimeout);
-101  return this;
-102}
-103
-104public 
SingleRequestCallerBuilderT locateType(RegionLocateType locateType) {
-105  this.locateType = locateType;
-106  return this;
-107}
-108
-109public 
SingleRequestCallerBuilderT pause(long pause, TimeUnit unit) {
-110  this.pauseNs = 
unit.toNanos(pause);
-111  return this;
-112}
-113
-114public 
SingleRequestCallerBuilderT maxAttempts(int maxAttempts) {
-115  this.maxAttempts = maxAttempts;
-116  return this;
-117}
-118
-119public 
SingleRequestCallerBuilderT startLogErrorsCnt(int startLogErrorsCnt) 
{
-120  this.startLogErrorsCnt = 
startLogErrorsCnt;
-121  return this;
-122}
-123
-124public 
AsyncSingleRequestRpcRetryingCallerT build() {
-125  return new 
AsyncSingleRequestRpcRetryingCaller(retryTimer, conn,
-126  checkNotNull(tableName, 
"tableName is null"), checkNotNull(row, "row is null"),
-127  checkNotNull(locateType, 
"locateType is null"), checkNotNull(callable, "action is null"),
-128  pauseNs, maxAttempts, 
operationTimeoutNs, rpcTimeoutNs, startLogErrorsCnt);
+078private int replicaId = 
RegionReplicaUtil.DEFAULT_REPLICA_ID;
+079
+080public 
SingleRequestCallerBuilderT table(TableName tableName) {
+081  this.tableName = tableName;
+082  return this;
+083}
+084
+085public 
SingleRequestCallerBuilderT row(byte[] 

[15/51] [partial] hbase-site git commit: Published site at e4b6b4afb933a961f543537875f87a2dc62d3757.

2019-01-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/849d84a8/devapidocs/src-html/org/apache/hadoop/hbase/thrift/ThriftServerRunner.IOErrorWithCause.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/thrift/ThriftServerRunner.IOErrorWithCause.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/thrift/ThriftServerRunner.IOErrorWithCause.html
deleted file mode 100644
index e692633..000
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/thrift/ThriftServerRunner.IOErrorWithCause.html
+++ /dev/null
@@ -1,2103 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd;>
-
-
-Source code
-
-
-
-
-001/*
-002 * Licensed to the Apache Software 
Foundation (ASF) under one
-003 * or more contributor license 
agreements.  See the NOTICE file
-004 * distributed with this work for 
additional information
-005 * regarding copyright ownership.  The 
ASF licenses this file
-006 * to you under the Apache License, 
Version 2.0 (the
-007 * "License"); you may not use this file 
except in compliance
-008 * with the License.  You may obtain a 
copy of the License at
-009 *
-010 * 
http://www.apache.org/licenses/LICENSE-2.0
-011 *
-012 * Unless required by applicable law or 
agreed to in writing, software
-013 * distributed under the License is 
distributed on an "AS IS" BASIS,
-014 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
-015 * See the License for the specific 
language governing permissions and
-016 * limitations under the License.
-017 */
-018
-019package org.apache.hadoop.hbase.thrift;
-020
-021import static 
org.apache.hadoop.hbase.util.Bytes.getBytes;
-022
-023import java.io.IOException;
-024import java.net.InetAddress;
-025import java.net.InetSocketAddress;
-026import java.net.UnknownHostException;
-027import java.nio.ByteBuffer;
-028import java.security.PrivilegedAction;
-029import java.util.ArrayList;
-030import java.util.Arrays;
-031import java.util.Collections;
-032import java.util.HashMap;
-033import java.util.List;
-034import java.util.Map;
-035import java.util.TreeMap;
-036import 
java.util.concurrent.BlockingQueue;
-037import 
java.util.concurrent.ExecutorService;
-038import 
java.util.concurrent.LinkedBlockingQueue;
-039import 
java.util.concurrent.ThreadPoolExecutor;
-040import java.util.concurrent.TimeUnit;
-041
-042import 
javax.security.auth.callback.Callback;
-043import 
javax.security.auth.callback.UnsupportedCallbackException;
-044import 
javax.security.sasl.AuthorizeCallback;
-045import javax.security.sasl.SaslServer;
-046
-047import 
org.apache.commons.lang3.ArrayUtils;
-048import 
org.apache.hadoop.conf.Configuration;
-049import 
org.apache.hadoop.hbase.Cell.Type;
-050import 
org.apache.hadoop.hbase.CellBuilder;
-051import 
org.apache.hadoop.hbase.CellBuilderFactory;
-052import 
org.apache.hadoop.hbase.CellBuilderType;
-053import 
org.apache.hadoop.hbase.CellUtil;
-054import 
org.apache.hadoop.hbase.HBaseConfiguration;
-055import 
org.apache.hadoop.hbase.HColumnDescriptor;
-056import 
org.apache.hadoop.hbase.HConstants;
-057import 
org.apache.hadoop.hbase.HRegionLocation;
-058import 
org.apache.hadoop.hbase.HTableDescriptor;
-059import 
org.apache.hadoop.hbase.KeyValue;
-060import 
org.apache.hadoop.hbase.MetaTableAccessor;
-061import 
org.apache.hadoop.hbase.ServerName;
-062import 
org.apache.hadoop.hbase.TableName;
-063import 
org.apache.hadoop.hbase.TableNotFoundException;
-064import 
org.apache.hadoop.hbase.client.Admin;
-065import 
org.apache.hadoop.hbase.client.Append;
-066import 
org.apache.hadoop.hbase.client.Delete;
-067import 
org.apache.hadoop.hbase.client.Durability;
-068import 
org.apache.hadoop.hbase.client.Get;
-069import 
org.apache.hadoop.hbase.client.Increment;
-070import 
org.apache.hadoop.hbase.client.OperationWithAttributes;
-071import 
org.apache.hadoop.hbase.client.Put;
-072import 
org.apache.hadoop.hbase.client.RegionInfo;
-073import 
org.apache.hadoop.hbase.client.RegionLocator;
-074import 
org.apache.hadoop.hbase.client.Result;
-075import 
org.apache.hadoop.hbase.client.ResultScanner;
-076import 
org.apache.hadoop.hbase.client.Scan;
-077import 
org.apache.hadoop.hbase.client.Table;
-078import 
org.apache.hadoop.hbase.filter.Filter;
-079import 
org.apache.hadoop.hbase.filter.ParseFilter;
-080import 
org.apache.hadoop.hbase.filter.PrefixFilter;
-081import 
org.apache.hadoop.hbase.filter.WhileMatchFilter;
-082import 
org.apache.hadoop.hbase.http.HttpServerUtil;
-083import 
org.apache.hadoop.hbase.log.HBaseMarkers;
-084import 
org.apache.hadoop.hbase.security.SaslUtil;
-085import 
org.apache.hadoop.hbase.security.SaslUtil.QualityOfProtection;
-086import 
org.apache.hadoop.hbase.security.SecurityUtil;
-087import 
org.apache.hadoop.hbase.security.UserProvider;
-088import 
org.apache.hadoop.hbase.thrift.generated.AlreadyExists;
-089import 
org.apache.hadoop.hbase.thrift.generated.BatchMutation;
-090import 
org.apache.hadoop.hbase.thrift.generated.ColumnDescriptor;

[15/51] [partial] hbase-site git commit: Published site at 3ab895979b643a2980bcdb7fee2078f14b614210.

2019-01-01 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.html 
b/devapidocs/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.html
index bf31c99..9096f68 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.html
@@ -661,6 +661,6 @@ extends Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/io/encoding/CompressionState.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/encoding/CompressionState.html 
b/devapidocs/org/apache/hadoop/hbase/io/encoding/CompressionState.html
index c2c576c..4617f4c 100644
--- a/devapidocs/org/apache/hadoop/hbase/io/encoding/CompressionState.html
+++ b/devapidocs/org/apache/hadoop/hbase/io/encoding/CompressionState.html
@@ -494,6 +494,6 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 
-Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/io/encoding/CopyKeyDataBlockEncoder.CopyKeyEncodingState.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/encoding/CopyKeyDataBlockEncoder.CopyKeyEncodingState.html
 
b/devapidocs/org/apache/hadoop/hbase/io/encoding/CopyKeyDataBlockEncoder.CopyKeyEncodingState.html
index dbaece4..956abd1 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/io/encoding/CopyKeyDataBlockEncoder.CopyKeyEncodingState.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/io/encoding/CopyKeyDataBlockEncoder.CopyKeyEncodingState.html
@@ -287,6 +287,6 @@ extends Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/io/encoding/CopyKeyDataBlockEncoder.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/encoding/CopyKeyDataBlockEncoder.html 
b/devapidocs/org/apache/hadoop/hbase/io/encoding/CopyKeyDataBlockEncoder.html
index 28101e0..4f21742 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/io/encoding/CopyKeyDataBlockEncoder.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/io/encoding/CopyKeyDataBlockEncoder.html
@@ -473,6 +473,6 @@ extends Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.EncodedSeeker.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.EncodedSeeker.html
 
b/devapidocs/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.EncodedSeeker.html
index 4bba8b0..d2e72ac 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.EncodedSeeker.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.EncodedSeeker.html
@@ -397,6 +397,6 @@ var activeTableTab = "activeTableTab";
 
 
 
-Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.html 
b/devapidocs/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.html
index 729e90c..02327ba 100644
--- a/devapidocs/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.html
+++ b/devapidocs/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.html
@@ -458,6 +458,6 @@ public interface Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache 

[15/51] [partial] hbase-site git commit: Published site at 7820ba1dbdba58b1002cdfde08eb21aa7a0bb6da.

2018-12-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/09ea0d5f/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.Visitor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.Visitor.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.Visitor.html
index 0f5a095..50bf692 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.Visitor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.Visitor.html
@@ -78,8712 +78,8714 @@
 070import 
java.util.concurrent.locks.ReadWriteLock;
 071import 
java.util.concurrent.locks.ReentrantReadWriteLock;
 072import java.util.function.Function;
-073import 
org.apache.hadoop.conf.Configuration;
-074import org.apache.hadoop.fs.FileStatus;
-075import org.apache.hadoop.fs.FileSystem;
-076import 
org.apache.hadoop.fs.LocatedFileStatus;
-077import org.apache.hadoop.fs.Path;
-078import org.apache.hadoop.hbase.Cell;
-079import 
org.apache.hadoop.hbase.CellBuilderType;
-080import 
org.apache.hadoop.hbase.CellComparator;
-081import 
org.apache.hadoop.hbase.CellComparatorImpl;
-082import 
org.apache.hadoop.hbase.CellScanner;
-083import 
org.apache.hadoop.hbase.CellUtil;
-084import 
org.apache.hadoop.hbase.CompareOperator;
-085import 
org.apache.hadoop.hbase.CompoundConfiguration;
-086import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-087import 
org.apache.hadoop.hbase.DroppedSnapshotException;
-088import 
org.apache.hadoop.hbase.ExtendedCellBuilderFactory;
-089import 
org.apache.hadoop.hbase.HConstants;
-090import 
org.apache.hadoop.hbase.HConstants.OperationStatusCode;
-091import 
org.apache.hadoop.hbase.HDFSBlocksDistribution;
-092import 
org.apache.hadoop.hbase.KeyValue;
-093import 
org.apache.hadoop.hbase.KeyValueUtil;
-094import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-095import 
org.apache.hadoop.hbase.NotServingRegionException;
-096import 
org.apache.hadoop.hbase.PrivateCellUtil;
-097import 
org.apache.hadoop.hbase.RegionTooBusyException;
-098import org.apache.hadoop.hbase.Tag;
-099import org.apache.hadoop.hbase.TagUtil;
-100import 
org.apache.hadoop.hbase.UnknownScannerException;
-101import 
org.apache.hadoop.hbase.client.Append;
-102import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-103import 
org.apache.hadoop.hbase.client.CompactionState;
-104import 
org.apache.hadoop.hbase.client.Delete;
-105import 
org.apache.hadoop.hbase.client.Durability;
-106import 
org.apache.hadoop.hbase.client.Get;
-107import 
org.apache.hadoop.hbase.client.Increment;
-108import 
org.apache.hadoop.hbase.client.IsolationLevel;
-109import 
org.apache.hadoop.hbase.client.Mutation;
-110import 
org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor;
-111import 
org.apache.hadoop.hbase.client.Put;
-112import 
org.apache.hadoop.hbase.client.RegionInfo;
-113import 
org.apache.hadoop.hbase.client.RegionInfoBuilder;
-114import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
-115import 
org.apache.hadoop.hbase.client.Result;
-116import 
org.apache.hadoop.hbase.client.RowMutations;
-117import 
org.apache.hadoop.hbase.client.Scan;
-118import 
org.apache.hadoop.hbase.client.TableDescriptor;
-119import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-120import 
org.apache.hadoop.hbase.conf.ConfigurationManager;
-121import 
org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver;
-122import 
org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
-123import 
org.apache.hadoop.hbase.coprocessor.RegionObserver.MutationType;
-124import 
org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
-125import 
org.apache.hadoop.hbase.exceptions.FailedSanityCheckException;
-126import 
org.apache.hadoop.hbase.exceptions.TimeoutIOException;
-127import 
org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
-128import 
org.apache.hadoop.hbase.filter.ByteArrayComparable;
-129import 
org.apache.hadoop.hbase.filter.FilterWrapper;
-130import 
org.apache.hadoop.hbase.filter.IncompatibleFilterException;
-131import 
org.apache.hadoop.hbase.io.HFileLink;
-132import 
org.apache.hadoop.hbase.io.HeapSize;
-133import 
org.apache.hadoop.hbase.io.TimeRange;
-134import 
org.apache.hadoop.hbase.io.hfile.BlockCache;
-135import 
org.apache.hadoop.hbase.io.hfile.HFile;
-136import 
org.apache.hadoop.hbase.ipc.CallerDisconnectedException;
-137import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
-138import 
org.apache.hadoop.hbase.ipc.RpcCall;
-139import 
org.apache.hadoop.hbase.ipc.RpcServer;
-140import 
org.apache.hadoop.hbase.mob.MobFileCache;
-141import 
org.apache.hadoop.hbase.monitoring.MonitoredTask;
-142import 
org.apache.hadoop.hbase.monitoring.TaskMonitor;
-143import 
org.apache.hadoop.hbase.quotas.RegionServerSpaceQuotaManager;
-144import 
org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl.WriteEntry;
-145import 

[15/51] [partial] hbase-site git commit: Published site at c448604ceb987d113913f0583452b2abce04db0d.

2018-12-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4f8b8424/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.CombinedCacheStats.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.CombinedCacheStats.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.CombinedCacheStats.html
index 1e81ef0..ac83508 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.CombinedCacheStats.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.CombinedCacheStats.html
@@ -160,236 +160,248 @@
 152  this.bucketCacheStats = fcStats;
 153}
 154
-155@Override
-156public long getDataMissCount() {
-157  return 
lruCacheStats.getDataMissCount() + bucketCacheStats.getDataMissCount();
-158}
-159
-160@Override
-161public long getLeafIndexMissCount() 
{
-162  return 
lruCacheStats.getLeafIndexMissCount() + 
bucketCacheStats.getLeafIndexMissCount();
-163}
-164
-165@Override
-166public long getBloomChunkMissCount() 
{
-167  return 
lruCacheStats.getBloomChunkMissCount() + 
bucketCacheStats.getBloomChunkMissCount();
-168}
-169
-170@Override
-171public long getMetaMissCount() {
-172  return 
lruCacheStats.getMetaMissCount() + bucketCacheStats.getMetaMissCount();
-173}
-174
-175@Override
-176public long getRootIndexMissCount() 
{
-177  return 
lruCacheStats.getRootIndexMissCount() + 
bucketCacheStats.getRootIndexMissCount();
-178}
-179
-180@Override
-181public long 
getIntermediateIndexMissCount() {
-182  return 
lruCacheStats.getIntermediateIndexMissCount() +
-183  
bucketCacheStats.getIntermediateIndexMissCount();
-184}
-185
-186@Override
-187public long getFileInfoMissCount() 
{
-188  return 
lruCacheStats.getFileInfoMissCount() + 
bucketCacheStats.getFileInfoMissCount();
-189}
-190
-191@Override
-192public long 
getGeneralBloomMetaMissCount() {
-193  return 
lruCacheStats.getGeneralBloomMetaMissCount() +
-194  
bucketCacheStats.getGeneralBloomMetaMissCount();
-195}
-196
-197@Override
-198public long 
getDeleteFamilyBloomMissCount() {
-199  return 
lruCacheStats.getDeleteFamilyBloomMissCount() +
-200  
bucketCacheStats.getDeleteFamilyBloomMissCount();
-201}
-202
-203@Override
-204public long getTrailerMissCount() {
-205  return 
lruCacheStats.getTrailerMissCount() + bucketCacheStats.getTrailerMissCount();
-206}
-207
-208@Override
-209public long getDataHitCount() {
-210  return 
lruCacheStats.getDataHitCount() + bucketCacheStats.getDataHitCount();
-211}
-212
-213@Override
-214public long getLeafIndexHitCount() 
{
-215  return 
lruCacheStats.getLeafIndexHitCount() + 
bucketCacheStats.getLeafIndexHitCount();
-216}
-217
-218@Override
-219public long getBloomChunkHitCount() 
{
-220  return 
lruCacheStats.getBloomChunkHitCount() + 
bucketCacheStats.getBloomChunkHitCount();
-221}
-222
-223@Override
-224public long getMetaHitCount() {
-225  return 
lruCacheStats.getMetaHitCount() + bucketCacheStats.getMetaHitCount();
-226}
-227
-228@Override
-229public long getRootIndexHitCount() 
{
-230  return 
lruCacheStats.getRootIndexHitCount() + 
bucketCacheStats.getRootIndexHitCount();
-231}
-232
-233@Override
-234public long 
getIntermediateIndexHitCount() {
-235  return 
lruCacheStats.getIntermediateIndexHitCount() +
-236  
bucketCacheStats.getIntermediateIndexHitCount();
-237}
-238
-239@Override
-240public long getFileInfoHitCount() {
-241  return 
lruCacheStats.getFileInfoHitCount() + bucketCacheStats.getFileInfoHitCount();
-242}
-243
-244@Override
-245public long 
getGeneralBloomMetaHitCount() {
-246  return 
lruCacheStats.getGeneralBloomMetaHitCount() +
-247  
bucketCacheStats.getGeneralBloomMetaHitCount();
-248}
-249
-250@Override
-251public long 
getDeleteFamilyBloomHitCount() {
-252  return 
lruCacheStats.getDeleteFamilyBloomHitCount() +
-253  
bucketCacheStats.getDeleteFamilyBloomHitCount();
-254}
-255
-256@Override
-257public long getTrailerHitCount() {
-258  return 
lruCacheStats.getTrailerHitCount() + bucketCacheStats.getTrailerHitCount();
-259}
-260
-261@Override
-262public long getRequestCount() {
-263  return 
lruCacheStats.getRequestCount()
-264  + 
bucketCacheStats.getRequestCount();
-265}
-266
-267@Override
-268public long getRequestCachingCount() 
{
-269  return 
lruCacheStats.getRequestCachingCount()
-270  + 
bucketCacheStats.getRequestCachingCount();
-271}
-272
-273@Override
-274public long getMissCount() {
-275  return lruCacheStats.getMissCount() 
+ bucketCacheStats.getMissCount();

[15/51] [partial] hbase-site git commit: Published site at 8bf966c8e936dec4d83bcbe85c5aab543f14a0df.

2018-12-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HBaseFsckTool.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HBaseFsckTool.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HBaseFsckTool.html
index a957d31..62f81b6 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HBaseFsckTool.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HBaseFsckTool.html
@@ -142,5192 +142,5186 @@
 134import org.apache.hadoop.hbase.wal.WAL;
 135import 
org.apache.hadoop.hbase.wal.WALFactory;
 136import 
org.apache.hadoop.hbase.wal.WALSplitter;
-137import 
org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-138import 
org.apache.hadoop.hbase.zookeeper.ZKUtil;
-139import 
org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-140import 
org.apache.hadoop.hbase.zookeeper.ZNodePaths;
-141import 
org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
-142import 
org.apache.hadoop.ipc.RemoteException;
-143import 
org.apache.hadoop.security.UserGroupInformation;
-144import 
org.apache.hadoop.util.ReflectionUtils;
-145import org.apache.hadoop.util.Tool;
-146import 
org.apache.hadoop.util.ToolRunner;
-147import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-148import 
org.apache.hbase.thirdparty.com.google.common.collect.Sets;
-149import 
org.apache.yetus.audience.InterfaceAudience;
-150import 
org.apache.yetus.audience.InterfaceStability;
-151import 
org.apache.zookeeper.KeeperException;
-152import org.slf4j.Logger;
-153import org.slf4j.LoggerFactory;
-154
-155import 
org.apache.hbase.thirdparty.com.google.common.base.Joiner;
-156import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-157import 
org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;
-158import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
-159import 
org.apache.hbase.thirdparty.com.google.common.collect.Multimap;
-160import 
org.apache.hbase.thirdparty.com.google.common.collect.Ordering;
-161import 
org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;
-162
-163import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-164import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;
-165
-166/**
-167 * HBaseFsck (hbck) is a tool for 
checking and repairing region consistency and
-168 * table integrity problems in a 
corrupted HBase. This tool was written for hbase-1.x. It does not
-169 * work with hbase-2.x; it can read state 
but is not allowed to change state; i.e. effect 'repair'.
-170 * See hbck2 (HBASE-19121) for a hbck 
tool for hbase2.
-171 *
-172 * p
-173 * Region consistency checks verify that 
hbase:meta, region deployment on region
-174 * servers and the state of data in HDFS 
(.regioninfo files) all are in
-175 * accordance.
-176 * p
-177 * Table integrity checks verify that all 
possible row keys resolve to exactly
-178 * one region of a table.  This means 
there are no individual degenerate
-179 * or backwards regions; no holes between 
regions; and that there are no
-180 * overlapping regions.
-181 * p
-182 * The general repair strategy works in 
two phases:
-183 * ol
-184 * li Repair Table Integrity on 
HDFS. (merge or fabricate regions)
-185 * li Repair Region Consistency 
with hbase:meta and assignments
-186 * /ol
-187 * p
-188 * For table integrity repairs, the 
tables' region directories are scanned
-189 * for .regioninfo files.  Each table's 
integrity is then verified.  If there
-190 * are any orphan regions (regions with 
no .regioninfo files) or holes, new
-191 * regions are fabricated.  Backwards 
regions are sidelined as well as empty
-192 * degenerate (endkey==startkey) regions. 
 If there are any overlapping regions,
-193 * a new region is created and all data 
is merged into the new region.
-194 * p
-195 * Table integrity repairs deal solely 
with HDFS and could potentially be done
-196 * offline -- the hbase region servers or 
master do not need to be running.
-197 * This phase can eventually be used to 
completely reconstruct the hbase:meta table in
-198 * an offline fashion.
-199 * p
-200 * Region consistency requires three 
conditions -- 1) valid .regioninfo file
-201 * present in an HDFS region dir,  2) 
valid row with .regioninfo data in META,
-202 * and 3) a region is deployed only at 
the regionserver that was assigned to
-203 * with proper state in the master.
-204 * p
-205 * Region consistency repairs require 
hbase to be online so that hbck can
-206 * contact the HBase master and region 
servers.  The hbck#connect() method must
-207 * first be called successfully.  Much of 
the region consistency information
-208 * is transient and less risky to 
repair.
-209 * p
-210 * If hbck is run from the command line, 
there are a handful of arguments that
-211 * can be used 

[15/51] [partial] hbase-site git commit: Published site at 1acbd36c903b048141866b143507bfce124a5c5f.

2018-11-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5299e667/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.html
index d992e5c..6e007de 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.html
@@ -114,7 +114,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class HRegion
+public class HRegion
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements HeapSize, PropagatingConfigurationObserver, Region
 Regions store data for a certain region of a table.  It 
stores all columns
@@ -2386,7 +2386,7 @@ implements 
 
 LOG
-private static finalorg.slf4j.Logger LOG
+private static finalorg.slf4j.Logger LOG
 
 
 
@@ -2395,7 +2395,7 @@ implements 
 
 LOAD_CFS_ON_DEMAND_CONFIG_KEY
-public static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String LOAD_CFS_ON_DEMAND_CONFIG_KEY
+public static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String LOAD_CFS_ON_DEMAND_CONFIG_KEY
 
 See Also:
 Constant
 Field Values
@@ -2408,7 +2408,7 @@ implements 
 
 HBASE_MAX_CELL_SIZE_KEY
-public static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String HBASE_MAX_CELL_SIZE_KEY
+public static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String HBASE_MAX_CELL_SIZE_KEY
 
 See Also:
 Constant
 Field Values
@@ -2421,7 +2421,7 @@ implements 
 
 DEFAULT_MAX_CELL_SIZE
-public static finalint DEFAULT_MAX_CELL_SIZE
+public static finalint DEFAULT_MAX_CELL_SIZE
 
 See Also:
 Constant
 Field Values
@@ -2434,7 +2434,7 @@ implements 
 
 DEFAULT_DURABILITY
-private static finalDurability DEFAULT_DURABILITY
+private static finalDurability DEFAULT_DURABILITY
 This is the global default value for durability. All 
tables/mutations not
  defining a durability or using USE_DEFAULT will default to this value.
 
@@ -2445,7 +2445,7 @@ implements 
 
 HBASE_REGIONSERVER_MINIBATCH_SIZE
-public static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String HBASE_REGIONSERVER_MINIBATCH_SIZE
+public static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String HBASE_REGIONSERVER_MINIBATCH_SIZE
 
 See Also:
 Constant
 Field Values
@@ -2458,7 +2458,7 @@ implements 
 
 DEFAULT_HBASE_REGIONSERVER_MINIBATCH_SIZE
-public static finalint DEFAULT_HBASE_REGIONSERVER_MINIBATCH_SIZE
+public static finalint DEFAULT_HBASE_REGIONSERVER_MINIBATCH_SIZE
 
 See Also:
 Constant
 Field Values
@@ -2471,7 +2471,7 @@ implements 
 
 WAL_HSYNC_CONF_KEY
-public static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String WAL_HSYNC_CONF_KEY
+public static finalhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String WAL_HSYNC_CONF_KEY
 
 See Also:
 Constant
 Field Values
@@ -2484,7 +2484,7 @@ implements 
 
 DEFAULT_WAL_HSYNC
-public static finalboolean DEFAULT_WAL_HSYNC
+public static finalboolean DEFAULT_WAL_HSYNC
 
 See Also:
 Constant
 Field Values
@@ -2497,7 +2497,7 @@ implements 
 
 closed
-finalhttps://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicBoolean.html?is-external=true;
 title="class or interface in java.util.concurrent.atomic">AtomicBoolean closed
+finalhttps://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicBoolean.html?is-external=true;
 title="class or interface in java.util.concurrent.atomic">AtomicBoolean closed
 
 
 
@@ -2506,7 +2506,7 @@ implements 
 
 closing
-finalhttps://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicBoolean.html?is-external=true;
 title="class or interface in java.util.concurrent.atomic">AtomicBoolean closing
+finalhttps://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicBoolean.html?is-external=true;
 title="class or interface in java.util.concurrent.atomic">AtomicBoolean closing
 
 
 
@@ -2515,7 +2515,7 @@ implements 
 
 maxFlushedSeqId
-private volatilelong maxFlushedSeqId
+private volatilelong maxFlushedSeqId
 The max sequence id of flushed data on this region. There 
is no edit in memory that is
  less that this sequence id.
 
@@ -2526,7 +2526,7 @@ implements 
 
 lastFlushOpSeqId
-private volatilelong 

[15/51] [partial] hbase-site git commit: Published site at 130057f13774f6b213cdb06952c805a29d59396e.

2018-11-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/68eae623/devapidocs/src-html/org/apache/hadoop/hbase/security/access/TablePermission.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/security/access/TablePermission.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/security/access/TablePermission.html
index d00864b..0eb1c04 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/security/access/TablePermission.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/security/access/TablePermission.html
@@ -39,7 +39,7 @@
 031/**
 032 * Represents an authorization for access 
for the given actions, optionally
 033 * restricted to the given column family 
or column qualifier, over the
-034 * given table.  If the family property 
is codenull/code, it implies
+034 * given table. If the family property is 
codenull/code, it implies
 035 * full table access.
 036 */
 037@InterfaceAudience.Private
@@ -49,393 +49,278 @@
 041  private byte[] family;
 042  private byte[] qualifier;
 043
-044  //TODO refactor this class
-045  //we need to refacting this into three 
classes (Global, Table, Namespace)
-046  private String namespace;
-047
-048  /** Nullary constructor for Writable, 
do not use */
-049  public TablePermission() {
-050super();
-051  }
-052
-053  /**
-054   * Create a new permission for the 
given table and (optionally) column family,
-055   * allowing the given actions.
-056   * @param table the table
-057   * @param family the family, can be 
null if a global permission on the table
-058   * @param assigned the list of allowed 
actions
-059   */
-060  public TablePermission(TableName table, 
byte[] family, Action... assigned) {
-061this(table, family, null, 
assigned);
-062  }
-063
-064  /**
-065   * Creates a new permission for the 
given table, restricted to the given
-066   * column family and qualifier, 
allowing the assigned actions to be performed.
-067   * @param table the table
-068   * @param family the family, can be 
null if a global permission on the table
-069   * @param assigned the list of allowed 
actions
-070   */
-071  public TablePermission(TableName table, 
byte[] family, byte[] qualifier,
-072  Action... assigned) {
-073super(assigned);
-074this.table = table;
-075this.family = family;
-076this.qualifier = qualifier;
-077  }
-078
-079  /**
-080   * Creates a new permission for the 
given table, family and column qualifier,
-081   * allowing the actions matching the 
provided byte codes to be performed.
-082   * @param table the table
-083   * @param family the family, can be 
null if a global permission on the table
-084   * @param actionCodes the list of 
allowed action codes
-085   */
-086  public TablePermission(TableName table, 
byte[] family, byte[] qualifier,
-087  byte[] actionCodes) {
-088super(actionCodes);
-089this.table = table;
-090this.family = family;
-091this.qualifier = qualifier;
-092  }
-093
-094  /**
-095   * Creates a new permission for the 
given namespace or table, restricted to the given
-096   * column family and qualifier, 
allowing the assigned actions to be performed.
-097   * @param namespace
-098   * @param table the table
-099   * @param family the family, can be 
null if a global permission on the table
-100   * @param assigned the list of allowed 
actions
-101   */
-102  public TablePermission(String 
namespace, TableName table, byte[] family, byte[] qualifier,
-103  Action... assigned) {
-104super(assigned);
-105this.namespace = namespace;
-106this.table = table;
-107this.family = family;
-108this.qualifier = qualifier;
-109  }
-110
-111  /**
-112   * Creates a new permission for the 
given namespace or table, family and column qualifier,
-113   * allowing the actions matching the 
provided byte codes to be performed.
-114   * @param namespace
-115   * @param table the table
-116   * @param family the family, can be 
null if a global permission on the table
-117   * @param actionCodes the list of 
allowed action codes
-118   */
-119  public TablePermission(String 
namespace, TableName table, byte[] family, byte[] qualifier,
-120  byte[] actionCodes) {
-121super(actionCodes);
-122this.namespace = namespace;
+044  /** Nullary constructor for Writable, 
do not use */
+045  public TablePermission() {
+046super();
+047this.scope = Scope.EMPTY;
+048  }
+049
+050  /**
+051   * Construct a table permission.
+052   * @param table table name
+053   * @param assigned assigned actions
+054   */
+055  public TablePermission(TableName table, 
Action... assigned) {
+056this(table, null, null, assigned);
+057  }
+058
+059  /**
+060   * Construct a table:family 
permission.
+061   * @param table table name
+062   * @param family family name
+063   * @param assigned assigned actions
+064   */
+065  public TablePermission(TableName table, 
byte[] family, Action... assigned) {
+066

[15/51] [partial] hbase-site git commit: Published site at d5e4faacc354c1bc4d93efa71ca97ee3a056123e.

2018-10-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b5e107c3/testdevapidocs/org/apache/hadoop/hbase/MiniHBaseCluster.MiniHBaseClusterRegionServer.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/MiniHBaseCluster.MiniHBaseClusterRegionServer.html
 
b/testdevapidocs/org/apache/hadoop/hbase/MiniHBaseCluster.MiniHBaseClusterRegionServer.html
index cb9a240a..947abfd 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/MiniHBaseCluster.MiniHBaseClusterRegionServer.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/MiniHBaseCluster.MiniHBaseClusterRegionServer.html
@@ -197,7 +197,7 @@ extends 
org.apache.hadoop.hbase.regionserver.HRegionServer
 
 
 Fields inherited from 
classorg.apache.hadoop.hbase.regionserver.HRegionServer
-cacheConfig, cacheFlusher, clusterConnection, clusterId, 
clusterStatusTracker, compactSplitThread, conf, configurationManager, csm, 
executorService, fs, fsOk, fsUtilizationChore, hMemManager, infoServer, leases, 
lock, MASTER_HOSTNAME_KEY, metaTableLocator, movedRegions, msgInterval, 
numRegionsToReport, onlineRegions, regionFavoredNodesMap, REGIONSERVER, 
regionsInTransitionInRS, replicationSinkHandler, replicationSourceHandler, 
rpcServices, secureBulkLoadManager, serverName, sleeper, startcode, 
tableDescriptors, TEST_SKIP_REPORTING_TRANSITION, threadWakeFrequency, 
useThisHostnameInstead, walFactory, walFs, walRoller, zooKeeper
+ABORT_TIMEOUT, ABORT_TIMEOUT_TASK, cacheConfig, cacheFlusher, 
clusterConnection, clusterId, clusterStatusTracker, compactSplitThread, conf, 
configurationManager, csm, executorService, fs, fsOk, fsUtilizationChore, 
hMemManager, infoServer, leases, lock, MASTER_HOSTNAME_KEY, metaTableLocator, 
movedRegions, msgInterval, numRegionsToReport, onlineRegions, 
regionFavoredNodesMap, REGIONSERVER, regionsInTransitionInRS, 
replicationSinkHandler, replicationSourceHandler, rpcServices, 
secureBulkLoadManager, serverName, sleeper, startcode, tableDescriptors, 
TEST_SKIP_REPORTING_TRANSITION, threadWakeFrequency, useThisHostnameInstead, 
walFactory, walFs, walRoller, zooKeeper
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b5e107c3/testdevapidocs/org/apache/hadoop/hbase/TestClientOperationTimeout.DelayedRegionServer.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/TestClientOperationTimeout.DelayedRegionServer.html
 
b/testdevapidocs/org/apache/hadoop/hbase/TestClientOperationTimeout.DelayedRegionServer.html
index 8c65ae8..01404c1 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/TestClientOperationTimeout.DelayedRegionServer.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/TestClientOperationTimeout.DelayedRegionServer.html
@@ -180,7 +180,7 @@ extends http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b5e107c3/testdevapidocs/org/apache/hadoop/hbase/TestLocalHBaseCluster.MyHMaster.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/TestLocalHBaseCluster.MyHMaster.html 
b/testdevapidocs/org/apache/hadoop/hbase/TestLocalHBaseCluster.MyHMaster.html
index 9409d05..4ef4f56 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/TestLocalHBaseCluster.MyHMaster.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/TestLocalHBaseCluster.MyHMaster.html
@@ -189,7 +189,7 @@ extends org.apache.hadoop.hbase.master.HMaster
 
 
 Fields inherited from 
classorg.apache.hadoop.hbase.regionserver.HRegionServer
-cacheConfig, cacheFlusher, clusterConnection, clusterId, 
clusterStatusTracker, compactSplitThread, conf, configurationManager, csm, 
executorService, fs, fsOk, fsUtilizationChore, hMemManager, infoServer, leases, 
lock, MASTER_HOSTNAME_KEY, metaTableLocator, movedRegions, msgInterval, 
numRegionsToReport, onlineRegions, regionFavoredNodesMap, REGIONSERVER, 
regionsInTransitionInRS, replicationSinkHandler, replicationSourceHandler, 
rpcServices, secureBulkLoadManager, serverName, sleeper, startcode, 
tableDescriptors, TEST_SKIP_REPORTING_TRANSITION, threadWakeFrequency, 
useThisHostnameInstead, walFactory, walFs, walRoller, zooKeeper
+ABORT_TIMEOUT, ABORT_TIMEOUT_TASK, cacheConfig, cacheFlusher, 
clusterConnection, clusterId, clusterStatusTracker, compactSplitThread, conf, 
configurationManager, csm, executorService, fs, fsOk, fsUtilizationChore, 
hMemManager, infoServer, leases, lock, MASTER_HOSTNAME_KEY, metaTableLocator, 
movedRegions, msgInterval, numRegionsToReport, onlineRegions, 
regionFavoredNodesMap, REGIONSERVER, regionsInTransitionInRS, 
replicationSinkHandler, replicationSourceHandler, rpcServices, 
secureBulkLoadManager, serverName, sleeper, startcode, tableDescriptors, 
TEST_SKIP_REPORTING_TRANSITION, threadWakeFrequency, useThisHostnameInstead, 
walFactory, walFs, walRoller, zooKeeper
 
 
 


[15/51] [partial] hbase-site git commit: Published site at 3fe8649b2c9ba1271c25e8f476548907e4c7a90d.

2018-10-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8f09a71d/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.TableSkewCostFunction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.TableSkewCostFunction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.TableSkewCostFunction.html
index c7d99b2..9d1542c 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.TableSkewCostFunction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.TableSkewCostFunction.html
@@ -382,1357 +382,1365 @@
 374for (int i = 0; i  
this.curFunctionCosts.length; i++) {
 375  curFunctionCosts[i] = 
tempFunctionCosts[i];
 376}
-377LOG.info("start 
StochasticLoadBalancer.balancer, initCost=" + currentCost + ", functionCost="
-378+ functionCost());
+377double initCost = currentCost;
+378double newCost = currentCost;
 379
-380double initCost = currentCost;
-381double newCost = currentCost;
-382
-383long computedMaxSteps;
-384if (runMaxSteps) {
-385  computedMaxSteps = 
Math.max(this.maxSteps,
-386  ((long)cluster.numRegions * 
(long)this.stepsPerRegion * (long)cluster.numServers));
-387} else {
-388  computedMaxSteps = 
Math.min(this.maxSteps,
-389  ((long)cluster.numRegions * 
(long)this.stepsPerRegion * (long)cluster.numServers));
-390}
-391// Perform a stochastic walk to see 
if we can get a good fit.
-392long step;
-393
-394for (step = 0; step  
computedMaxSteps; step++) {
-395  Cluster.Action action = 
nextAction(cluster);
-396
-397  if (action.type == Type.NULL) {
-398continue;
-399  }
-400
-401  cluster.doAction(action);
-402  updateCostsWithAction(cluster, 
action);
-403
-404  newCost = computeCost(cluster, 
currentCost);
-405
-406  // Should this be kept?
-407  if (newCost  currentCost) {
-408currentCost = newCost;
-409
-410// save for JMX
-411curOverallCost = currentCost;
-412for (int i = 0; i  
this.curFunctionCosts.length; i++) {
-413  curFunctionCosts[i] = 
tempFunctionCosts[i];
-414}
-415  } else {
-416// Put things back the way they 
were before.
-417// TODO: undo by remembering old 
values
-418Action undoAction = 
action.undoAction();
-419cluster.doAction(undoAction);
-420updateCostsWithAction(cluster, 
undoAction);
-421  }
-422
-423  if 
(EnvironmentEdgeManager.currentTime() - startTime 
-424  maxRunningTime) {
-425break;
-426  }
-427}
-428long endTime = 
EnvironmentEdgeManager.currentTime();
-429
-430
metricsBalancer.balanceCluster(endTime - startTime);
-431
-432// update costs metrics
-433updateStochasticCosts(tableName, 
curOverallCost, curFunctionCosts);
-434if (initCost  currentCost) {
-435  plans = 
createRegionPlans(cluster);
-436  LOG.info("Finished computing new 
load balance plan. Computation took {}" +
-437" to try {} different iterations. 
 Found a solution that moves " +
-438"{} regions; Going from a 
computed cost of {}" +
-439" to a new cost of {}", 
java.time.Duration.ofMillis(endTime - startTime),
-440step, plans.size(), initCost, 
currentCost);
-441  return plans;
-442}
-443LOG.info("Could not find a better 
load balance plan.  Tried {} different configurations in " +
-444  "{}, and did not find anything with 
a computed cost less than {}", step,
-445  java.time.Duration.ofMillis(endTime 
- startTime), initCost);
-446return null;
-447  }
-448
-449  /**
-450   * update costs to JMX
-451   */
-452  private void 
updateStochasticCosts(TableName tableName, Double overall, Double[] subCosts) 
{
-453if (tableName == null) return;
-454
-455// check if the metricsBalancer is 
MetricsStochasticBalancer before casting
-456if (metricsBalancer instanceof 
MetricsStochasticBalancer) {
-457  MetricsStochasticBalancer balancer 
= (MetricsStochasticBalancer) metricsBalancer;
-458  // overall cost
-459  
balancer.updateStochasticCost(tableName.getNameAsString(),
-460"Overall", "Overall cost", 
overall);
-461
-462  // each cost function
-463  for (int i = 0; i  
costFunctions.length; i++) {
-464CostFunction costFunction = 
costFunctions[i];
-465String costFunctionName = 
costFunction.getClass().getSimpleName();
-466Double costPercent = (overall == 
0) ? 0 : (subCosts[i] / overall);
-467// TODO: cost function may need a 
specific description
-468
balancer.updateStochasticCost(tableName.getNameAsString(), costFunctionName,
-469  "The percent of " + 
costFunctionName, costPercent);
-470  }
-471}
-472  }
-473
-474  

[15/51] [partial] hbase-site git commit: Published site at 7adf590106826b9e4432cfeee06acdc0ccff8c6e.

2018-10-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/425db230/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.SeenRowTracker.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.SeenRowTracker.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.SeenRowTracker.html
index 0c894de..8729895 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.SeenRowTracker.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.SeenRowTracker.html
@@ -179,4145 +179,4146 @@
 171 * avoiding port contention if another 
local HBase instance is already running).
 172 * pTo preserve test data 
directories, pass the system property "hbase.testing.preserve.testdir"
 173 * setting it to true.
-174 */
-175@InterfaceAudience.Public
-176@SuppressWarnings("deprecation")
-177public class HBaseTestingUtility extends 
HBaseZKTestingUtility {
-178
-179  /**
-180   * System property key to get test 
directory value. Name is as it is because mini dfs has
-181   * hard-codings to put test data here. 
It should NOT be used directly in HBase, as it's a property
-182   * used in mini dfs.
-183   * @deprecated can be used only with 
mini dfs
-184   */
-185  @Deprecated
-186  private static final String 
TEST_DIRECTORY_KEY = "test.build.data";
-187
-188  public static final String 
REGIONS_PER_SERVER_KEY = "hbase.test.regions-per-server";
-189  /**
-190   * The default number of regions per 
regionserver when creating a pre-split
-191   * table.
-192   */
-193  public static final int 
DEFAULT_REGIONS_PER_SERVER = 3;
-194
+174 * Trigger pre commit.
+175 */
+176@InterfaceAudience.Public
+177@SuppressWarnings("deprecation")
+178public class HBaseTestingUtility extends 
HBaseZKTestingUtility {
+179
+180  /**
+181   * System property key to get test 
directory value. Name is as it is because mini dfs has
+182   * hard-codings to put test data here. 
It should NOT be used directly in HBase, as it's a property
+183   * used in mini dfs.
+184   * @deprecated can be used only with 
mini dfs
+185   */
+186  @Deprecated
+187  private static final String 
TEST_DIRECTORY_KEY = "test.build.data";
+188
+189  public static final String 
REGIONS_PER_SERVER_KEY = "hbase.test.regions-per-server";
+190  /**
+191   * The default number of regions per 
regionserver when creating a pre-split
+192   * table.
+193   */
+194  public static final int 
DEFAULT_REGIONS_PER_SERVER = 3;
 195
-196  public static final String 
PRESPLIT_TEST_TABLE_KEY = "hbase.test.pre-split-table";
-197  public static final boolean 
PRESPLIT_TEST_TABLE = true;
-198
-199  private MiniDFSCluster dfsCluster = 
null;
-200
-201  private volatile HBaseCluster 
hbaseCluster = null;
-202  private MiniMRCluster mrCluster = 
null;
-203
-204  /** If there is a mini cluster running 
for this testing utility instance. */
-205  private volatile boolean 
miniClusterRunning;
-206
-207  private String hadoopLogDir;
-208
-209  /** Directory on test filesystem where 
we put the data for this instance of
-210* HBaseTestingUtility*/
-211  private Path dataTestDirOnTestFS = 
null;
-212
-213  /**
-214   * Shared cluster connection.
-215   */
-216  private volatile Connection 
connection;
-217
-218  /** Filesystem URI used for map-reduce 
mini-cluster setup */
-219  private static String FS_URI;
-220
-221  /** This is for unit tests 
parameterized with a single boolean. */
-222  public static final 
ListObject[] MEMSTORETS_TAGS_PARAMETRIZED = 
memStoreTSAndTagsCombination();
-223
-224  /**
-225   * Checks to see if a specific port is 
available.
-226   *
-227   * @param port the port number to check 
for availability
-228   * @return tttrue/tt if 
the port is available, or ttfalse/tt if not
-229   */
-230  public static boolean available(int 
port) {
-231ServerSocket ss = null;
-232DatagramSocket ds = null;
-233try {
-234  ss = new ServerSocket(port);
-235  ss.setReuseAddress(true);
-236  ds = new DatagramSocket(port);
-237  ds.setReuseAddress(true);
-238  return true;
-239} catch (IOException e) {
-240  // Do nothing
-241} finally {
-242  if (ds != null) {
-243ds.close();
-244  }
-245
-246  if (ss != null) {
-247try {
-248  ss.close();
-249} catch (IOException e) {
-250  /* should not be thrown */
-251}
-252  }
-253}
-254
-255return false;
-256  }
-257
-258  /**
-259   * Create all combinations of Bloom 
filters and compression algorithms for
-260   * testing.
-261   */
-262  private static ListObject[] 
bloomAndCompressionCombinations() {
-263ListObject[] configurations = 
new ArrayList();
-264for (Compression.Algorithm comprAlgo 
:
-265 
HBaseCommonTestingUtility.COMPRESSION_ALGORITHMS) {
-266  for (BloomType bloomType : 
BloomType.values()) {
-267configurations.add(new 

[15/51] [partial] hbase-site git commit: Published site at 5fbb227deb365fe812d433fe39b85ac4b0ddee20.

2018-10-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9ebe686/devapidocs/org/apache/hadoop/hbase/procedure2/Procedure.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/procedure2/Procedure.html 
b/devapidocs/org/apache/hadoop/hbase/procedure2/Procedure.html
index 8f64d70..3444b27 100644
--- a/devapidocs/org/apache/hadoop/hbase/procedure2/Procedure.html
+++ b/devapidocs/org/apache/hadoop/hbase/procedure2/Procedure.html
@@ -230,8 +230,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/lang/Comparab
 
 private boolean
 bypass
-Used for force complete of the procedure without
- actually doing any logic in the procedure.
+Used for override complete of the procedure without 
actually doing any logic in the procedure.
 
 
 
@@ -377,12 +376,9 @@ implements https://docs.oracle.com/javase/8/docs/api/java/lang/Comparab
 
 
 
-(package private) void
-bypass()
-set the bypass to true
- Only called in ProcedureExecutor.bypassProcedure(long,
 long, boolean) for now,
- DO NOT use this method alone, since we can't just bypass
- one single procedure.
+protected void
+bypass(TEnvironmentenv)
+Set the bypass to true.
 
 
 
@@ -535,7 +531,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/lang/Comparab
 hasException()
 
 
-protected boolean
+boolean
 hasLock()
 This is used in conjunction with holdLock(Object).
 
@@ -1016,13 +1012,17 @@ implements https://docs.oracle.com/javase/8/docs/api/java/lang/Comparab
 
 
 bypass
-private volatileboolean bypass
-Used for force complete of the procedure without
- actually doing any logic in the procedure.
+private volatileboolean bypass
+Used for override complete of the procedure without 
actually doing any logic in the procedure.
  If bypass is set to true, when executing it will return null when
- doExecute(Object)
 to finish the procedure and releasing any locks
- it may currently hold.
- Bypassing a procedure is not like aborting. Aborting a procedure will trigger
+ doExecute(Object)
 is called to finish the procedure and release any locks
+ it may currently hold. The bypass does cleanup around the Procedure as far as 
the
+ Procedure framework is concerned. It does not clean any internal state that 
the
+ Procedure's themselves may have set. That is for the Procedures to do 
themselves
+ when bypass is called. They should override bypass and do their cleanup in the
+ overridden bypass method (be sure to call the parent bypass to ensure proper
+ processing).
+ Bypassing a procedure is not like aborting. Aborting a procedure will 
trigger
  a rollback. And since the abort(Object)
 method is overrideable
  Some procedures may have chosen to ignore the aborting.
 
@@ -1033,7 +1033,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/lang/Comparab
 
 
 persist
-privateboolean persist
+privateboolean persist
 Indicate whether we need to persist the procedure to 
ProcedureStore after execution. Default to
  true, and the implementation can all skipPersistence()
 to let the framework skip the
  persistence of the procedure.
@@ -1077,20 +1077,28 @@ implements https://docs.oracle.com/javase/8/docs/api/java/lang/Comparab
 
 
 isBypass
-publicbooleanisBypass()
+publicbooleanisBypass()
 
 
-
+
+
+
 
 
 
 
 bypass
-voidbypass()
-set the bypass to true
- Only called in ProcedureExecutor.bypassProcedure(long,
 long, boolean) for now,
- DO NOT use this method alone, since we can't just bypass
- one single procedure. We need to bypass its ancestor too. So making it 
package private
+protectedvoidbypass(TEnvironmentenv)
+Set the bypass to true.
+ Only called in ProcedureExecutor.bypassProcedure(long,
 long, boolean, boolean) for now.
+ DO NOT use this method alone, since we can't just bypass one single 
procedure. We need to
+ bypass its ancestor too. If your Procedure has set state, it needs to undo it 
in here.
+
+Parameters:
+env - Current environment. May be null because of context; 
e.g. pretty-printing
+procedure WALs where there is no 'environment' (and where 
Procedures that require
+an 'environment' won't be run.
+
 
 
 
@@ -1099,7 +1107,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/lang/Comparab
 
 
 needPersistence
-booleanneedPersistence()
+booleanneedPersistence()
 
 
 
@@ -1108,7 +1116,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/lang/Comparab
 
 
 resetPersistence
-voidresetPersistence()
+voidresetPersistence()
 
 
 
@@ -1117,7 +1125,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/lang/Comparab
 
 
 skipPersistence
-protected finalvoidskipPersistence()
+protected finalvoidskipPersistence()
 
 
 
@@ -1128,7 +1136,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/lang/Comparab
 
 
 execute
-protected abstractProcedureTEnvironment[]execute(TEnvironmentenv)
+protected abstractProcedureTEnvironment[]execute(TEnvironmentenv)
   throws 

[15/51] [partial] hbase-site git commit: Published site at 821e4d7de2d576189f4288d1c2acf9e9a9471f5c.

2018-10-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/323b17d9/testdevapidocs/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.LoadCounter.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.LoadCounter.html
 
b/testdevapidocs/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.LoadCounter.html
index ddbe86e..9365340 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.LoadCounter.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.LoadCounter.html
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public static class ProcedureTestingUtility.LoadCounter
+public static class ProcedureTestingUtility.LoadCounter
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureLoader
 
@@ -277,7 +277,7 @@ implements 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureLoad
 
 
 corrupted
-private finalhttps://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in 
java.util">ArrayListorg.apache.hadoop.hbase.procedure2.Procedure corrupted
+private finalhttps://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in 
java.util">ArrayListorg.apache.hadoop.hbase.procedure2.Procedure corrupted
 
 
 
@@ -286,7 +286,7 @@ implements 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureLoad
 
 
 completed
-private finalhttps://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in 
java.util">ArrayListorg.apache.hadoop.hbase.procedure2.Procedure completed
+private finalhttps://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in 
java.util">ArrayListorg.apache.hadoop.hbase.procedure2.Procedure completed
 
 
 
@@ -295,7 +295,7 @@ implements 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureLoad
 
 
 runnable
-private finalhttps://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in 
java.util">ArrayListorg.apache.hadoop.hbase.procedure2.Procedure runnable
+private finalhttps://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in 
java.util">ArrayListorg.apache.hadoop.hbase.procedure2.Procedure runnable
 
 
 
@@ -304,7 +304,7 @@ implements 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureLoad
 
 
 procIds
-privatehttps://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">Sethttps://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true;
 title="class or interface in java.lang">Long procIds
+privatehttps://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">Sethttps://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true;
 title="class or interface in java.lang">Long procIds
 
 
 
@@ -313,7 +313,7 @@ implements 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureLoad
 
 
 maxProcId
-privatelong maxProcId
+privatelong maxProcId
 
 
 
@@ -330,7 +330,7 @@ implements 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureLoad
 
 
 LoadCounter
-publicLoadCounter()
+publicLoadCounter()
 
 
 
@@ -339,7 +339,7 @@ implements 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureLoad
 
 
 LoadCounter
-publicLoadCounter(https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">Sethttps://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true;
 title="class or interface in java.lang">LongprocIds)
+publicLoadCounter(https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">Sethttps://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true;
 title="class or interface in java.lang">LongprocIds)
 
 
 
@@ -356,7 +356,7 @@ implements 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureLoad
 
 
 reset
-publicvoidreset()
+publicvoidreset()
 
 
 
@@ -365,7 +365,7 @@ implements 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureLoad
 
 
 reset
-publicvoidreset(https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">Sethttps://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true;
 title="class or interface in java.lang">LongprocIds)
+publicvoidreset(https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in 

[15/51] [partial] hbase-site git commit: Published site at fa5fa6ecdd071b72b58971058ff3ab9d28c3e709.

2018-10-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d1341859/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.SyncMetrics.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.SyncMetrics.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.SyncMetrics.html
index e1b183b..b456cd2 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.SyncMetrics.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.SyncMetrics.html
@@ -53,1338 +53,1354 @@
 045import 
org.apache.hadoop.hbase.log.HBaseMarkers;
 046import 
org.apache.hadoop.hbase.procedure2.Procedure;
 047import 
org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
-048import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStoreBase;
-049import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStoreTracker;
-050import 
org.apache.hadoop.hbase.procedure2.util.ByteSlot;
-051import 
org.apache.hadoop.hbase.procedure2.util.StringUtils;
-052import 
org.apache.hadoop.hbase.util.CommonFSUtils;
-053import 
org.apache.hadoop.hbase.util.Threads;
-054import 
org.apache.hadoop.ipc.RemoteException;
-055import 
org.apache.yetus.audience.InterfaceAudience;
-056import org.slf4j.Logger;
-057import org.slf4j.LoggerFactory;
-058
-059import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-060import 
org.apache.hbase.thirdparty.org.apache.commons.collections4.queue.CircularFifoQueue;
-061
-062import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureWALHeader;
-063
-064/**
-065 * WAL implementation of the 
ProcedureStore.
-066 * p/
-067 * When starting, the upper layer will 
first call {@link #start(int)}, then {@link #recoverLease()},
-068 * then {@link #load(ProcedureLoader)}.
-069 * p/
-070 * In {@link #recoverLease()}, we will 
get the lease by closing all the existing wal files(by
-071 * calling recoverFileLease), and 
creating a new wal writer. And we will also get the list of all
-072 * the old wal files.
-073 * p/
-074 * FIXME: notice that the current recover 
lease implementation is problematic, it can not deal with
-075 * the races if there are two master both 
wants to acquire the lease...
-076 * p/
-077 * In {@link #load(ProcedureLoader)} 
method, we will load all the active procedures. See the
-078 * comments of this method for more 
details.
-079 * p/
-080 * The actual logging way is a bit like 
our FileSystem based WAL implementation as RS side. There is
-081 * a {@link #slots}, which is more like 
the ring buffer, and in the insert, update and delete
-082 * methods we will put thing into the 
{@link #slots} and wait. And there is a background sync
-083 * thread(see the {@link #syncLoop()} 
method) which get data from the {@link #slots} and write them
-084 * to the FileSystem, and notify the 
caller that we have finished.
-085 * p/
-086 * TODO: try using disruptor to increase 
performance and simplify the logic?
-087 * p/
-088 * The {@link #storeTracker} keeps track 
of the modified procedures in the newest wal file, which is
-089 * also the one being written currently. 
And the deleted bits in it are for all the procedures, not
-090 * only the ones in the newest wal file. 
And when rolling a log, we will first store it in the
-091 * trailer of the current wal file, and 
then reset its modified bits, so that it can start to track
-092 * the modified procedures for the new 
wal file.
-093 * p/
-094 * The {@link #holdingCleanupTracker} is 
used to test whether we are safe to delete the oldest wal
-095 * file. When there are log rolling and 
there are more than 1 wal files, we will make use of it. It
-096 * will first be initialized to the 
oldest file's tracker(which is stored in the trailer), using the
-097 * method {@link 
ProcedureStoreTracker#resetTo(ProcedureStoreTracker, boolean)}, and then merge 
it
-098 * with the tracker of every newer wal 
files, using the
-099 * {@link 
ProcedureStoreTracker#setDeletedIfModifiedInBoth(ProcedureStoreTracker)}. If we 
find out
-100 * that all the modified procedures for 
the oldest wal file are modified or deleted in newer wal
-101 * files, then we can delete it.
-102 * @see ProcedureWALPrettyPrinter for 
printing content of a single WAL.
-103 * @see #main(String[]) to parse a 
directory of MasterWALProcs.
-104 */
-105@InterfaceAudience.Private
-106public class WALProcedureStore extends 
ProcedureStoreBase {
-107  private static final Logger LOG = 
LoggerFactory.getLogger(WALProcedureStore.class);
-108  public static final String LOG_PREFIX = 
"pv2-";
-109  /** Used to construct the name of the 
log directory for master procedures */
-110  public static final String 
MASTER_PROCEDURE_LOGDIR = "MasterProcWALs";
-111
-112
-113  public interface LeaseRecovery {
-114void 

[15/51] [partial] hbase-site git commit: Published site at 6bc7089f9e0793efc9bdd46a84f5ccd9bc4579ad.

2018-09-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/419d0338/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureUtil.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureUtil.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureUtil.html
index 0d97a1c..297bc43 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureUtil.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureUtil.html
@@ -29,331 +29,335 @@
 021import java.io.InputStream;
 022import java.lang.reflect.Constructor;
 023import java.lang.reflect.Modifier;
-024import 
org.apache.hadoop.hbase.HConstants;
-025import 
org.apache.yetus.audience.InterfaceAudience;
-026import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-027import 
org.apache.hbase.thirdparty.com.google.protobuf.Any;
-028import 
org.apache.hbase.thirdparty.com.google.protobuf.Internal;
-029import 
org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException;
-030import 
org.apache.hbase.thirdparty.com.google.protobuf.Message;
-031import 
org.apache.hbase.thirdparty.com.google.protobuf.Parser;
-032import 
org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations;
-033import 
org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos;
-034import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos;
-035import 
org.apache.hadoop.hbase.util.NonceKey;
-036
-037/**
-038 * Helper to convert to/from 
ProcedureProtos
-039 */
-040@InterfaceAudience.Private
-041public final class ProcedureUtil {
-042  private ProcedureUtil() { }
-043
-044  // 
==
-045  //  Reflection helpers to 
create/validate a Procedure object
-046  // 
==
-047  private static Procedure? 
newProcedure(String className) throws BadProcedureException {
-048try {
-049  Class? clazz = 
Class.forName(className);
-050  if 
(!Modifier.isPublic(clazz.getModifiers())) {
-051throw new Exception("the " + 
clazz + " class is not public");
-052  }
-053
-054  @SuppressWarnings("rawtypes")
-055  Constructor? extends 
Procedure ctor = clazz.asSubclass(Procedure.class).getConstructor();
-056  assert ctor != null : "no 
constructor found";
-057  if 
(!Modifier.isPublic(ctor.getModifiers())) {
-058throw new Exception("the " + 
clazz + " constructor is not public");
-059  }
-060  return ctor.newInstance();
-061} catch (Exception e) {
-062  throw new BadProcedureException(
-063"The procedure class " + 
className + " must be accessible and have an empty constructor",
-064e);
-065}
-066  }
-067
-068  static void 
validateClass(Procedure? proc) throws BadProcedureException {
-069try {
-070  Class? clazz = 
proc.getClass();
-071  if 
(!Modifier.isPublic(clazz.getModifiers())) {
-072throw new Exception("the " + 
clazz + " class is not public");
-073  }
-074
-075  Constructor? ctor = 
clazz.getConstructor();
-076  assert ctor != null;
-077  if 
(!Modifier.isPublic(ctor.getModifiers())) {
-078throw new Exception("the " + 
clazz + " constructor is not public");
-079  }
-080} catch (Exception e) {
-081  throw new 
BadProcedureException("The procedure class " + proc.getClass().getName() +
-082" must be accessible and have an 
empty constructor", e);
-083}
-084  }
-085
-086  // 
==
-087  //  convert to and from Procedure 
object
-088  // 
==
-089
-090  /**
-091   * A serializer for our Procedures. 
Instead of the previous serializer, it
-092   * uses the stateMessage list to store 
the internal state of the Procedures.
-093   */
-094  private static class StateSerializer 
implements ProcedureStateSerializer {
-095private final 
ProcedureProtos.Procedure.Builder builder;
-096private int deserializeIndex;
-097
-098public 
StateSerializer(ProcedureProtos.Procedure.Builder builder) {
-099  this.builder = builder;
-100}
-101
-102@Override
-103public void serialize(Message 
message) throws IOException {
-104  Any packedMessage = 
Any.pack(message);
-105  
builder.addStateMessage(packedMessage);
-106}
-107
-108@Override
-109public M extends Message M 
deserialize(ClassM clazz)
-110throws IOException {
-111  if (deserializeIndex = 
builder.getStateMessageCount()) {
-112throw new IOException("Invalid 
state message index: " + deserializeIndex);
-113  }
-114
-115  try {
-116Any packedMessage = 
builder.getStateMessage(deserializeIndex++);
-117return 
packedMessage.unpack(clazz);
-118  } catch 

[15/51] [partial] hbase-site git commit: Published site at d7e08317d2f214e4cca7b67578aba0ed7a567d54.

2018-09-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/37cf49a6/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.html
index 9925f48..adf4e7a 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.html
@@ -765,45 +765,47 @@
 757  }
 758
 759  private void 
writeMaxSequenceIdFile(MasterProcedureEnv env) throws IOException {
-760FileSystem fs = 
env.getMasterServices().getMasterFileSystem().getFileSystem();
+760FileSystem walFS = 
env.getMasterServices().getMasterWalManager().getFileSystem();
 761long maxSequenceId = -1L;
 762for (RegionInfo region : 
regionsToMerge) {
 763  maxSequenceId =
-764Math.max(maxSequenceId, 
WALSplitter.getMaxRegionSequenceId(fs, getRegionDir(env, region)));
-765}
-766if (maxSequenceId  0) {
-767  
WALSplitter.writeRegionSequenceIdFile(fs, getRegionDir(env, mergedRegion), 
maxSequenceId);
-768}
-769  }
-770
-771  /**
-772   * The procedure could be restarted 
from a different machine. If the variable is null, we need to
-773   * retrieve it.
-774   * @return traceEnabled
-775   */
-776  private Boolean isTraceEnabled() {
-777if (traceEnabled == null) {
-778  traceEnabled = 
LOG.isTraceEnabled();
-779}
-780return traceEnabled;
-781  }
-782
-783  /**
-784   * @return The merged region. Maybe be 
null if called to early or we failed.
-785   */
-786  @VisibleForTesting
-787  public RegionInfo getMergedRegion() {
-788return this.mergedRegion;
-789  }
-790
-791  @Override
-792  protected boolean 
abort(MasterProcedureEnv env) {
-793// Abort means rollback. We can't 
rollback all steps. HBASE-18018 added abort to all
-794// Procedures. Here is a Procedure 
that has a PONR and cannot be aborted once it enters this
-795// range of steps; what do we do for 
these should an operator want to cancel them? HBASE-20022.
-796return 
isRollbackSupported(getCurrentState())? super.abort(env): false;
-797  }
-798}
+764Math.max(maxSequenceId, 
WALSplitter.getMaxRegionSequenceId(
+765walFS, getWALRegionDir(env, 
region)));
+766}
+767if (maxSequenceId  0) {
+768  
WALSplitter.writeRegionSequenceIdFile(walFS, getWALRegionDir(env, 
mergedRegion),
+769  maxSequenceId);
+770}
+771  }
+772
+773  /**
+774   * The procedure could be restarted 
from a different machine. If the variable is null, we need to
+775   * retrieve it.
+776   * @return traceEnabled
+777   */
+778  private Boolean isTraceEnabled() {
+779if (traceEnabled == null) {
+780  traceEnabled = 
LOG.isTraceEnabled();
+781}
+782return traceEnabled;
+783  }
+784
+785  /**
+786   * @return The merged region. Maybe be 
null if called to early or we failed.
+787   */
+788  @VisibleForTesting
+789  public RegionInfo getMergedRegion() {
+790return this.mergedRegion;
+791  }
+792
+793  @Override
+794  protected boolean 
abort(MasterProcedureEnv env) {
+795// Abort means rollback. We can't 
rollback all steps. HBASE-18018 added abort to all
+796// Procedures. Here is a Procedure 
that has a PONR and cannot be aborted once it enters this
+797// range of steps; what do we do for 
these should an operator want to cancel them? HBASE-20022.
+798return 
isRollbackSupported(getCurrentState())? super.abort(env): false;
+799  }
+800}
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/37cf49a6/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStateStore.RegionStateVisitor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStateStore.RegionStateVisitor.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStateStore.RegionStateVisitor.html
index 207ebcc..c3ef37e 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStateStore.RegionStateVisitor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStateStore.RegionStateVisitor.html
@@ -28,330 +28,332 @@
 020import java.io.IOException;
 021import java.util.Collections;
 022import java.util.List;
-023import org.apache.hadoop.hbase.Cell;
-024import 
org.apache.hadoop.hbase.CellBuilderFactory;
-025import 
org.apache.hadoop.hbase.CellBuilderType;
-026import 
org.apache.hadoop.hbase.HConstants;
-027import 
org.apache.hadoop.hbase.HRegionLocation;
-028import 
org.apache.hadoop.hbase.MetaTableAccessor;
-029import 
org.apache.hadoop.hbase.RegionLocations;
-030import 
org.apache.hadoop.hbase.ServerName;

[15/51] [partial] hbase-site git commit: Published site at 8eaaa63114a64bcaeaf0ed9bdd88615ee22255c1.

2018-09-25 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f6f9d4f3/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.html
index 8cc5add..34858d6 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.html
@@ -2188,1428 +2188,1428 @@
 2180  }
 2181
 2182  @Override
-2183  public void postOpenDeployTasks(final 
PostOpenDeployContext context)
-2184  throws KeeperException, 
IOException {
-2185HRegion r = context.getRegion();
-2186long masterSystemTime = 
context.getMasterSystemTime();
-2187rpcServices.checkOpen();
-2188LOG.info("Post open deploy tasks for 
" + r.getRegionInfo().getRegionNameAsString());
-2189// Do checks to see if we need to 
compact (references or too many files)
-2190for (HStore s : r.stores.values()) 
{
-2191  if (s.hasReferences() || 
s.needsCompaction()) {
-2192
this.compactSplitThread.requestSystemCompaction(r, s, "Opening Region");
-2193  }
-2194}
-2195long openSeqNum = 
r.getOpenSeqNum();
-2196if (openSeqNum == 
HConstants.NO_SEQNUM) {
-2197  // If we opened a region, we 
should have read some sequence number from it.
-2198  LOG.error("No sequence number 
found when opening " +
-2199
r.getRegionInfo().getRegionNameAsString());
-2200  openSeqNum = 0;
-2201}
-2202
-2203// Notify master
-2204if (!reportRegionStateTransition(new 
RegionStateTransitionContext(
-2205TransitionCode.OPENED, 
openSeqNum, masterSystemTime, r.getRegionInfo( {
-2206  throw new IOException("Failed to 
report opened region to master: "
-2207+ 
r.getRegionInfo().getRegionNameAsString());
-2208}
-2209
-2210triggerFlushInPrimaryRegion(r);
-2211
-2212LOG.debug("Finished post open deploy 
task for " + r.getRegionInfo().getRegionNameAsString());
-2213  }
-2214
-2215  @Override
-2216  public boolean 
reportRegionStateTransition(final RegionStateTransitionContext context) {
-2217TransitionCode code = 
context.getCode();
-2218long openSeqNum = 
context.getOpenSeqNum();
-2219long masterSystemTime = 
context.getMasterSystemTime();
-2220RegionInfo[] hris = 
context.getHris();
-2221
-if (TEST_SKIP_REPORTING_TRANSITION) 
{
-2223  // This is for testing only in 
case there is no master
-2224  // to handle the region transition 
report at all.
-2225  if (code == TransitionCode.OPENED) 
{
-2226Preconditions.checkArgument(hris 
!= null  hris.length == 1);
-2227if (hris[0].isMetaRegion()) {
-2228  try {
-2229
MetaTableLocator.setMetaLocation(getZooKeeper(), serverName,
-2230
hris[0].getReplicaId(),State.OPEN);
-2231  } catch (KeeperException e) 
{
-2232LOG.info("Failed to update 
meta location", e);
-2233return false;
-2234  }
-2235} else {
-2236  try {
-2237
MetaTableAccessor.updateRegionLocation(clusterConnection,
-2238  hris[0], serverName, 
openSeqNum, masterSystemTime);
-2239  } catch (IOException e) {
-2240LOG.info("Failed to update 
meta", e);
-2241return false;
-2242  }
-2243}
-2244  }
-2245  return true;
-2246}
-2247
-2248
ReportRegionStateTransitionRequest.Builder builder =
-2249  
ReportRegionStateTransitionRequest.newBuilder();
-2250
builder.setServer(ProtobufUtil.toServerName(serverName));
-2251RegionStateTransition.Builder 
transition = builder.addTransitionBuilder();
-2252
transition.setTransitionCode(code);
-2253if (code == TransitionCode.OPENED 
 openSeqNum = 0) {
-2254  
transition.setOpenSeqNum(openSeqNum);
-2255}
-2256for (RegionInfo hri: hris) {
-2257  
transition.addRegionInfo(ProtobufUtil.toRegionInfo(hri));
-2258}
-2259ReportRegionStateTransitionRequest 
request = builder.build();
-2260int tries = 0;
-2261long pauseTime = 
INIT_PAUSE_TIME_MS;
-2262// Keep looping till we get an 
error. We want to send reports even though server is going down.
-2263// Only go down if clusterConnection 
is null. It is set to null almost as last thing as the
-2264// HRegionServer does down.
-2265while (this.clusterConnection != 
null  !this.clusterConnection.isClosed()) {
-2266  
RegionServerStatusService.BlockingInterface rss = rssStub;
-2267  try {
-2268if (rss == null) {
-2269  
createRegionServerStatusStub();
-2270  continue;
-2271}
-2272
ReportRegionStateTransitionResponse response =
-2273  
rss.reportRegionStateTransition(null, request);
-2274if (response.hasErrorMessage()) 
{

[15/51] [partial] hbase-site git commit: Published site at cd161d976ef47b84e904f2d54bac65d2f3417c2a.

2018-09-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fa1bebf8/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html
index df4d2d2..20442d4 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html
@@ -552,1331 +552,1334 @@
 544}
 545  }
 546
-547  public void assign(RegionInfo 
regionInfo, ServerName sn) throws IOException {
-548// TODO: should we use 
getRegionStateNode?
-549RegionStateNode regionNode = 
regionStates.getOrCreateRegionStateNode(regionInfo);
-550TransitRegionStateProcedure proc;
-551regionNode.lock();
-552try {
-553  preTransitCheck(regionNode, 
STATES_EXPECTED_ON_ASSIGN);
-554  proc = 
TransitRegionStateProcedure.assign(getProcedureEnvironment(), regionInfo, 
sn);
-555  regionNode.setProcedure(proc);
-556} finally {
-557  regionNode.unlock();
-558}
-559
ProcedureSyncWait.submitAndWaitProcedure(master.getMasterProcedureExecutor(), 
proc);
-560  }
-561
-562  public void assign(RegionInfo 
regionInfo) throws IOException {
-563assign(regionInfo, null);
-564  }
-565
-566  public void unassign(RegionInfo 
regionInfo) throws IOException {
-567RegionStateNode regionNode = 
regionStates.getRegionStateNode(regionInfo);
-568if (regionNode == null) {
-569  throw new 
UnknownRegionException("No RegionState found for " + 
regionInfo.getEncodedName());
-570}
-571TransitRegionStateProcedure proc;
-572regionNode.lock();
-573try {
-574  preTransitCheck(regionNode, 
STATES_EXPECTED_ON_UNASSIGN_OR_MOVE);
-575  proc = 
TransitRegionStateProcedure.unassign(getProcedureEnvironment(), regionInfo);
-576  regionNode.setProcedure(proc);
-577} finally {
-578  regionNode.unlock();
-579}
-580
ProcedureSyncWait.submitAndWaitProcedure(master.getMasterProcedureExecutor(), 
proc);
-581  }
-582
-583  private TransitRegionStateProcedure 
createMoveRegionProcedure(RegionInfo regionInfo,
-584  ServerName targetServer) throws 
HBaseIOException {
-585RegionStateNode regionNode = 
this.regionStates.getRegionStateNode(regionInfo);
-586if (regionNode == null) {
-587  throw new 
UnknownRegionException("No RegionState found for " + 
regionInfo.getEncodedName());
-588}
-589TransitRegionStateProcedure proc;
-590regionNode.lock();
-591try {
-592  preTransitCheck(regionNode, 
STATES_EXPECTED_ON_UNASSIGN_OR_MOVE);
-593  regionNode.checkOnline();
-594  proc = 
TransitRegionStateProcedure.move(getProcedureEnvironment(), regionInfo, 
targetServer);
-595  regionNode.setProcedure(proc);
-596} finally {
-597  regionNode.unlock();
-598}
-599return proc;
-600  }
-601
-602  public void move(RegionInfo regionInfo) 
throws IOException {
-603TransitRegionStateProcedure proc = 
createMoveRegionProcedure(regionInfo, null);
-604
ProcedureSyncWait.submitAndWaitProcedure(master.getMasterProcedureExecutor(), 
proc);
-605  }
-606
-607  public Futurebyte[] 
moveAsync(RegionPlan regionPlan) throws HBaseIOException {
-608TransitRegionStateProcedure proc =
-609  
createMoveRegionProcedure(regionPlan.getRegionInfo(), 
regionPlan.getDestination());
-610return 
ProcedureSyncWait.submitProcedure(master.getMasterProcedureExecutor(), proc);
-611  }
-612
-613  // 

-614  //  RegionTransition procedures 
helpers
-615  // 

-616
-617  /**
-618   * Create round-robin assigns. Use on 
table creation to distribute out regions across cluster.
-619   * @return AssignProcedures made out of 
the passed in codehris/code and a call to the balancer
-620   * to populate the assigns with 
targets chosen using round-robin (default balancer
-621   * scheme). If at assign-time, 
the target chosen is no longer up, thats fine, the
-622   * AssignProcedure will ask the 
balancer for a new target, and so on.
-623   */
-624  public TransitRegionStateProcedure[] 
createRoundRobinAssignProcedures(ListRegionInfo hris,
-625  ListServerName 
serversToExclude) {
-626if (hris.isEmpty()) {
-627  return new 
TransitRegionStateProcedure[0];
-628}
-629
-630if (serversToExclude != null
-631 
this.master.getServerManager().getOnlineServersList().size() == 1) {
-632  LOG.debug("Only one region server 
found and hence going ahead with the 

[15/51] [partial] hbase-site git commit: Published site at c6a65ba63fce85ac7c4b62b96ef2bbe6c35d2f00.

2018-09-04 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/293abb17/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.StoreFileSplitter.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.StoreFileSplitter.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.StoreFileSplitter.html
index 2f5e86e..475f5b2 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.StoreFileSplitter.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.StoreFileSplitter.html
@@ -30,837 +30,841 @@
 022import java.util.ArrayList;
 023import java.util.Arrays;
 024import java.util.Collection;
-025import java.util.HashMap;
-026import java.util.List;
-027import java.util.Map;
-028import java.util.concurrent.Callable;
-029import 
java.util.concurrent.ExecutionException;
-030import 
java.util.concurrent.ExecutorService;
-031import java.util.concurrent.Executors;
-032import java.util.concurrent.Future;
-033import java.util.concurrent.TimeUnit;
-034import java.util.stream.Stream;
-035import 
org.apache.hadoop.conf.Configuration;
-036import org.apache.hadoop.fs.FileSystem;
-037import org.apache.hadoop.fs.Path;
-038import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-039import 
org.apache.hadoop.hbase.HConstants;
-040import 
org.apache.hadoop.hbase.ServerName;
-041import 
org.apache.hadoop.hbase.TableName;
-042import 
org.apache.hadoop.hbase.UnknownRegionException;
-043import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-044import 
org.apache.hadoop.hbase.client.MasterSwitchType;
-045import 
org.apache.hadoop.hbase.client.Mutation;
-046import 
org.apache.hadoop.hbase.client.RegionInfo;
-047import 
org.apache.hadoop.hbase.client.RegionInfoBuilder;
-048import 
org.apache.hadoop.hbase.client.TableDescriptor;
-049import 
org.apache.hadoop.hbase.io.hfile.CacheConfig;
-050import 
org.apache.hadoop.hbase.master.MasterCoprocessorHost;
-051import 
org.apache.hadoop.hbase.master.MasterFileSystem;
-052import 
org.apache.hadoop.hbase.master.RegionState.State;
-053import 
org.apache.hadoop.hbase.master.normalizer.NormalizationPlan;
-054import 
org.apache.hadoop.hbase.master.procedure.AbstractStateMachineRegionProcedure;
-055import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
-056import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil;
-057import 
org.apache.hadoop.hbase.procedure2.ProcedureMetrics;
-058import 
org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
-059import 
org.apache.hadoop.hbase.quotas.QuotaExceededException;
-060import 
org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
-061import 
org.apache.hadoop.hbase.regionserver.HStore;
-062import 
org.apache.hadoop.hbase.regionserver.HStoreFile;
-063import 
org.apache.hadoop.hbase.regionserver.RegionSplitPolicy;
-064import 
org.apache.hadoop.hbase.regionserver.StoreFileInfo;
-065import 
org.apache.hadoop.hbase.util.Bytes;
-066import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-067import 
org.apache.hadoop.hbase.util.FSUtils;
-068import 
org.apache.hadoop.hbase.util.Pair;
-069import 
org.apache.hadoop.hbase.util.Threads;
-070import 
org.apache.hadoop.hbase.wal.WALSplitter;
-071import 
org.apache.hadoop.util.ReflectionUtils;
-072import 
org.apache.yetus.audience.InterfaceAudience;
-073import org.slf4j.Logger;
-074import org.slf4j.LoggerFactory;
-075
-076import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-077
-078import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-079import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
-080import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
-081import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionState;
-082
-083/**
-084 * The procedure to split a region in a 
table.
-085 * Takes lock on the parent region.
-086 * It holds the lock for the life of the 
procedure.
-087 * pThrows exception on 
construction if determines context hostile to spllt (cluster going
-088 * down or master is shutting down or 
table is disabled)./p
-089 */
-090@InterfaceAudience.Private
-091public class SplitTableRegionProcedure
-092extends 
AbstractStateMachineRegionProcedureSplitTableRegionState {
-093  private static final Logger LOG = 
LoggerFactory.getLogger(SplitTableRegionProcedure.class);
-094  private Boolean traceEnabled = null;
-095  private RegionInfo daughter_1_RI;
-096  private RegionInfo daughter_2_RI;
-097  private byte[] bestSplitRow;
-098  private RegionSplitPolicy 
splitPolicy;
-099
-100  public SplitTableRegionProcedure() {
-101// Required by the Procedure 
framework to create the procedure on replay
-102  }
-103
-104  public 

[15/51] [partial] hbase-site git commit: Published site at 7c1fad4992a169a35b4457e6f4afcb30d04406e9.

2018-08-31 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/74f60271/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/Procedure.LockState.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/Procedure.LockState.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/Procedure.LockState.html
index 3cfacfc..c081310 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/Procedure.LockState.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/Procedure.LockState.html
@@ -153,858 +153,901 @@
 145  private boolean lockedWhenLoading = 
false;
 146
 147  /**
-148   * The main code of the procedure. It 
must be idempotent since execute()
-149   * may be called multiple times in case 
of machine failure in the middle
-150   * of the execution.
-151   * @param env the environment passed to 
the ProcedureExecutor
-152   * @return a set of sub-procedures to 
run or ourselves if there is more work to do or null if the
-153   * procedure is done.
-154   * @throws ProcedureYieldException the 
procedure will be added back to the queue and retried later.
-155   * @throws InterruptedException the 
procedure will be added back to the queue and retried later.
-156   * @throws ProcedureSuspendedException 
Signal to the executor that Procedure has suspended itself and
-157   * has set itself up waiting for an 
external event to wake it back up again.
-158   */
-159  protected abstract 
ProcedureTEnvironment[] execute(TEnvironment env)
-160throws ProcedureYieldException, 
ProcedureSuspendedException, InterruptedException;
-161
-162  /**
-163   * The code to undo what was done by 
the execute() code.
-164   * It is called when the procedure or 
one of the sub-procedures failed or an
-165   * abort was requested. It should 
cleanup all the resources created by
-166   * the execute() call. The 
implementation must be idempotent since rollback()
-167   * may be called multiple time in case 
of machine failure in the middle
-168   * of the execution.
-169   * @param env the environment passed to 
the ProcedureExecutor
-170   * @throws IOException temporary 
failure, the rollback will retry later
-171   * @throws InterruptedException the 
procedure will be added back to the queue and retried later
-172   */
-173  protected abstract void 
rollback(TEnvironment env)
-174throws IOException, 
InterruptedException;
-175
-176  /**
-177   * The abort() call is asynchronous and 
each procedure must decide how to deal
-178   * with it, if they want to be 
abortable. The simplest implementation
-179   * is to have an AtomicBoolean set in 
the abort() method and then the execute()
-180   * will check if the abort flag is set 
or not.
-181   * abort() may be called multiple times 
from the client, so the implementation
-182   * must be idempotent.
-183   *
-184   * pNOTE: abort() is not like 
Thread.interrupt(). It is just a notification
-185   * that allows the procedure 
implementor abort.
-186   */
-187  protected abstract boolean 
abort(TEnvironment env);
-188
-189  /**
-190   * The user-level code of the procedure 
may have some state to
-191   * persist (e.g. input arguments or 
current position in the processing state) to
-192   * be able to resume on failure.
-193   * @param serializer stores the 
serializable state
-194   */
-195  protected abstract void 
serializeStateData(ProcedureStateSerializer serializer)
-196throws IOException;
-197
-198  /**
-199   * Called on store load to allow the 
user to decode the previously serialized
-200   * state.
-201   * @param serializer contains the 
serialized state
-202   */
-203  protected abstract void 
deserializeStateData(ProcedureStateSerializer serializer)
-204throws IOException;
-205
-206  /**
-207   * The {@link #doAcquireLock(Object, 
ProcedureStore)} will be split into two steps, first, it will
-208   * call us to determine whether we need 
to wait for initialization, second, it will call
-209   * {@link #acquireLock(Object)} to 
actually handle the lock for this procedure.
-210   * p/
-211   * This is because that when master 
restarts, we need to restore the lock state for all the
-212   * procedures to not break the semantic 
if {@link #holdLock(Object)} is true. But the
-213   * {@link ProcedureExecutor} will be 
started before the master finish initialization(as it is part
-214   * of the initialization!), so we need 
to split the code into two steps, and when restore, we just
-215   * restore the lock part and ignore the 
waitInitialized part. Otherwise there will be dead lock.
-216   * @return true means we need to wait 
until the environment has been initialized, otherwise true.
-217   */
-218  protected boolean 
waitInitialized(TEnvironment env) {
-219return false;
-220  }
-221
-222  /**
-223   * The user should override this method 
if they need a lock on an Entity. A lock can be anything,
-224   * and it is up to the implementor. 

[15/51] [partial] hbase-site git commit: Published site at 3afe9fb7e6ebfa71187cbe131558a83fae61cecd.

2018-08-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/424d7e41/testdevapidocs/index-all.html
--
diff --git a/testdevapidocs/index-all.html b/testdevapidocs/index-all.html
index eea5b6d..64a510f 100644
--- a/testdevapidocs/index-all.html
+++ b/testdevapidocs/index-all.html
@@ -881,6 +881,8 @@
 
 admin
 - Variable in class org.apache.hadoop.hbase.client.TestCloneSnapshotFromClient
 
+admin - 
Variable in class org.apache.hadoop.hbase.client.TestHbck
+
 admin
 - Variable in class org.apache.hadoop.hbase.client.TestRestoreSnapshotFromClient
 
 admin
 - Variable in class org.apache.hadoop.hbase.client.TestServerLoadDurability
@@ -2827,6 +2829,8 @@
 
 build()
 - Method in class org.apache.hadoop.hbase.chaos.factories.UnbalanceMonkeyFactory
 
+build()
 - Method in class org.apache.hadoop.hbase.StartMiniClusterOption.Builder
+
 BUILD_WEBAPPS_DIR
 - Static variable in class org.apache.hadoop.hbase.http.HttpServerFunctionalTest
 
 expected location of the test.build.webapps dir: 
"src/main/resources/hbase-webapps"
@@ -2851,6 +2855,10 @@
 
 builder
 - Variable in class org.apache.hadoop.hbase.snapshot.TestSnapshotManifest
 
+builder()
 - Static method in class org.apache.hadoop.hbase.StartMiniClusterOption
+
+Builder()
 - Constructor for class org.apache.hadoop.hbase.StartMiniClusterOption.Builder
+
 BuilderStyleTest - Class in org.apache.hadoop.hbase.util
 
 Utility class to check whether a given class conforms to 
builder-style:
@@ -4327,6 +4335,8 @@
 
 CLASS_RULE
 - Static variable in class org.apache.hadoop.hbase.client.TestHBaseAdminNoCluster
 
+CLASS_RULE
 - Static variable in class org.apache.hadoop.hbase.client.TestHbck
+
 CLASS_RULE
 - Static variable in class org.apache.hadoop.hbase.client.TestHTableMultiplexer
 
 CLASS_RULE
 - Static variable in class org.apache.hadoop.hbase.client.TestHTableMultiplexerFlushCache
@@ -10017,6 +10027,14 @@
 Same as HBaseTestingUtility.createRootDir(boolean
 create)
  except that create flag is false.
 
+createRootDir
 - Variable in class org.apache.hadoop.hbase.StartMiniClusterOption.Builder
+
+createRootDir(boolean)
 - Method in class org.apache.hadoop.hbase.StartMiniClusterOption.Builder
+
+createRootDir
 - Variable in class org.apache.hadoop.hbase.StartMiniClusterOption
+
+Whether to create a new root or data directory path.
+
 createRow(char)
 - Method in class org.apache.hadoop.hbase.filter.TestPrefixFilter
 
 createRpcClient(Configuration)
 - Method in class org.apache.hadoop.hbase.ipc.AbstractTestIPC
@@ -10680,6 +10698,14 @@
  HBaseTestingUtility.createWal(Configuration,
 Path, RegionInfo) because that method
  doesn't play nicely with FaultyFileSystem.
 
+createWALDir
 - Variable in class org.apache.hadoop.hbase.StartMiniClusterOption.Builder
+
+createWALDir(boolean)
 - Method in class org.apache.hadoop.hbase.StartMiniClusterOption.Builder
+
+createWALDir
 - Variable in class org.apache.hadoop.hbase.StartMiniClusterOption
+
+Whether to create a new WAL directory.
+
 createWALEdit(byte[],
 byte[], EnvironmentEdge, int) - Method in class 
org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay
 
 createWALEntry(byte[],
 byte[]) - Method in class 
org.apache.hadoop.hbase.replication.master.TestRecoverStandbyProcedure
@@ -11094,6 +11120,14 @@
 
 dataGenerator
 - Variable in class org.apache.hadoop.hbase.util.MultiThreadedAction
 
+dataNodeHosts
 - Variable in class org.apache.hadoop.hbase.StartMiniClusterOption.Builder
+
+dataNodeHosts(String[])
 - Method in class org.apache.hadoop.hbase.StartMiniClusterOption.Builder
+
+dataNodeHosts
 - Variable in class org.apache.hadoop.hbase.StartMiniClusterOption
+
+The hostnames of DataNodes to run on.
+
 dataTestDir
 - Variable in class org.apache.hadoop.hbase.HBaseCommonTestingUtility
 
 Directory where we put the data for this instance of 
HBaseTestingUtility
@@ -14527,8 +14561,6 @@
 
 filterStringSoTableNameSafe(String)
 - Static method in class org.apache.hadoop.hbase.client.TestIncrementsFromClientSide
 
-filterTableRegions(TableName,
 ListHRegionInfo) - Method in class 
org.apache.hadoop.hbase.client.TestEnableTable
-
 FilterTestingCluster - Class in org.apache.hadoop.hbase.filter
 
 By using this class as the super class of a set of tests 
you will have a HBase testing
@@ -16303,6 +16335,8 @@
 
 getDataMissCount()
 - Method in class org.apache.hadoop.hbase.regionserver.MetricsRegionServerWrapperStub
 
+getDataNodeHosts()
 - Method in class org.apache.hadoop.hbase.StartMiniClusterOption
+
 getDataNodes()
 - Method in class org.apache.hadoop.hbase.chaos.actions.RestartRandomDataNodeAction
 
 getDataTestDir()
 - Method in class org.apache.hadoop.hbase.HBaseCommonTestingUtility
@@ -16656,6 +16690,10 @@
 
 getHBaseIntegrationTestingUtility()
 - Method in class org.apache.hadoop.hbase.chaos.actions.Action.ActionContext
 
+getHbck()
 - Method in class org.apache.hadoop.hbase.HBaseTestingUtility
+
+Returns an Hbck instance.
+
 getHDFSBlockDistribution()
 - 

[15/51] [partial] hbase-site git commit: Published site at a452487a9b82bfd33bc10683c3f8b8ae74d58883.

2018-08-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0cf79db0/devapidocs/org/apache/hadoop/hbase/filter/KeyOnlyFilter.KeyOnlyByteBufferExtendedCell.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/filter/KeyOnlyFilter.KeyOnlyByteBufferExtendedCell.html
 
b/devapidocs/org/apache/hadoop/hbase/filter/KeyOnlyFilter.KeyOnlyByteBufferExtendedCell.html
index b91abdd..96c4516 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/filter/KeyOnlyFilter.KeyOnlyByteBufferExtendedCell.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/filter/KeyOnlyFilter.KeyOnlyByteBufferExtendedCell.html
@@ -122,7 +122,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-static class KeyOnlyFilter.KeyOnlyByteBufferExtendedCell
+static class KeyOnlyFilter.KeyOnlyByteBufferExtendedCell
 extends ByteBufferExtendedCell
 
 
@@ -425,7 +425,7 @@ extends 
 
 FIXED_OVERHEAD
-public static finalint FIXED_OVERHEAD
+public static finalint FIXED_OVERHEAD
 
 
 
@@ -434,7 +434,7 @@ extends 
 
 cell
-privateByteBufferExtendedCell cell
+privateByteBufferExtendedCell cell
 
 
 
@@ -443,7 +443,7 @@ extends 
 
 lenAsVal
-privateboolean lenAsVal
+privateboolean lenAsVal
 
 
 
@@ -460,7 +460,7 @@ extends 
 
 KeyOnlyByteBufferExtendedCell
-publicKeyOnlyByteBufferExtendedCell(ByteBufferExtendedCellc,
+publicKeyOnlyByteBufferExtendedCell(ByteBufferExtendedCellc,
  booleanlenAsVal)
 
 
@@ -478,7 +478,7 @@ extends 
 
 getRowArray
-publicbyte[]getRowArray()
+publicbyte[]getRowArray()
 Description copied from 
interface:Cell
 Contiguous raw bytes that may start at any index in the 
containing array. Max length is
  Short.MAX_VALUE which is 32,767 bytes.
@@ -494,7 +494,7 @@ extends 
 
 getRowOffset
-publicintgetRowOffset()
+publicintgetRowOffset()
 
 Returns:
 Array index of first row byte
@@ -507,7 +507,7 @@ extends 
 
 getRowLength
-publicshortgetRowLength()
+publicshortgetRowLength()
 
 Returns:
 Number of row bytes. Must be  rowArray.length - offset.
@@ -520,7 +520,7 @@ extends 
 
 getFamilyArray
-publicbyte[]getFamilyArray()
+publicbyte[]getFamilyArray()
 Description copied from 
interface:Cell
 Contiguous bytes composed of legal HDFS filename characters 
which may start at any index in the
  containing array. Max length is Byte.MAX_VALUE, which is 127 bytes.
@@ -536,7 +536,7 @@ extends 
 
 getFamilyOffset
-publicintgetFamilyOffset()
+publicintgetFamilyOffset()
 
 Returns:
 Array index of first family byte
@@ -549,7 +549,7 @@ extends 
 
 getFamilyLength
-publicbytegetFamilyLength()
+publicbytegetFamilyLength()
 
 Returns:
 Number of family bytes.  Must be  familyArray.length - offset.
@@ -562,7 +562,7 @@ extends 
 
 getQualifierArray
-publicbyte[]getQualifierArray()
+publicbyte[]getQualifierArray()
 Description copied from 
interface:Cell
 Contiguous raw bytes that may start at any index in the 
containing array.
 
@@ -577,7 +577,7 @@ extends 
 
 getQualifierOffset
-publicintgetQualifierOffset()
+publicintgetQualifierOffset()
 
 Returns:
 Array index of first qualifier byte
@@ -590,7 +590,7 @@ extends 
 
 getQualifierLength
-publicintgetQualifierLength()
+publicintgetQualifierLength()
 
 Returns:
 Number of qualifier bytes.  Must be  qualifierArray.length - 
offset.
@@ -603,7 +603,7 @@ extends 
 
 getTimestamp
-publiclonggetTimestamp()
+publiclonggetTimestamp()
 
 Returns:
 Long value representing time at which this cell was "Put" into the row.  
Typically
@@ -617,7 +617,7 @@ extends 
 
 getTypeByte
-publicbytegetTypeByte()
+publicbytegetTypeByte()
 
 Returns:
 The byte representation of the KeyValue.TYPE of this cell: one of Put, 
Delete, etc
@@ -630,7 +630,7 @@ extends 
 
 setSequenceId
-publicvoidsetSequenceId(longseqId)
+publicvoidsetSequenceId(longseqId)
throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Description copied from 
interface:ExtendedCell
 Sets with the given seqId.
@@ -648,7 +648,7 @@ extends 
 
 setTimestamp
-publicvoidsetTimestamp(longts)
+publicvoidsetTimestamp(longts)
   throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Description copied from 
interface:ExtendedCell
 Sets with the given timestamp.
@@ -666,7 +666,7 @@ extends 
 
 setTimestamp
-publicvoidsetTimestamp(byte[]ts)
+publicvoidsetTimestamp(byte[]ts)
   throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Description copied from 
interface:ExtendedCell
 Sets with the given timestamp.
@@ -684,7 +684,7 @@ extends 
 
 getSequenceId
-publiclonggetSequenceId()
+publiclonggetSequenceId()
 Description copied from 
interface:ExtendedCell
 A region-specific unique monotonically increasing sequence 
ID given to each Cell. It always
  exists for cells in the 

[15/51] [partial] hbase-site git commit: Published site at 6a5b4f2a5c188f8eef4f2250b8b7db7dd1e750e4.

2018-08-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ff05a18/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html
index 3559952..bd7445a 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html
@@ -359,2396 +359,2401 @@
 351switch (inMemoryCompaction) {
 352  case NONE:
 353ms = 
ReflectionUtils.newInstance(DefaultMemStore.class,
-354new Object[]{conf, 
this.comparator});
-355break;
-356  default:
-357Class? extends 
CompactingMemStore clz = conf.getClass(MEMSTORE_CLASS_NAME,
-358CompactingMemStore.class, 
CompactingMemStore.class);
-359ms = 
ReflectionUtils.newInstance(clz, new Object[]{conf, this.comparator, this,
-360
this.getHRegion().getRegionServicesForStores(), inMemoryCompaction});
-361}
-362return ms;
-363  }
-364
-365  /**
-366   * Creates the cache config.
-367   * @param family The current column 
family.
-368   */
-369  protected void createCacheConf(final 
ColumnFamilyDescriptor family) {
-370this.cacheConf = new 
CacheConfig(conf, family);
-371  }
-372
-373  /**
-374   * Creates the store engine configured 
for the given Store.
-375   * @param store The store. An 
unfortunate dependency needed due to it
-376   *  being passed to 
coprocessors via the compactor.
-377   * @param conf Store configuration.
-378   * @param kvComparator KVComparator for 
storeFileManager.
-379   * @return StoreEngine to use.
-380   */
-381  protected StoreEngine?, ?, ?, ? 
createStoreEngine(HStore store, Configuration conf,
-382  CellComparator kvComparator) throws 
IOException {
-383return StoreEngine.create(store, 
conf, comparator);
-384  }
-385
-386  /**
-387   * @param family
-388   * @return TTL in seconds of the 
specified family
-389   */
-390  public static long 
determineTTLFromFamily(final ColumnFamilyDescriptor family) {
-391// HCD.getTimeToLive returns ttl in 
seconds.  Convert to milliseconds.
-392long ttl = family.getTimeToLive();
-393if (ttl == HConstants.FOREVER) {
-394  // Default is unlimited ttl.
-395  ttl = Long.MAX_VALUE;
-396} else if (ttl == -1) {
-397  ttl = Long.MAX_VALUE;
-398} else {
-399  // Second - ms adjust for user 
data
-400  ttl *= 1000;
-401}
-402return ttl;
-403  }
-404
-405  @Override
-406  public String getColumnFamilyName() {
-407return 
this.family.getNameAsString();
-408  }
-409
-410  @Override
-411  public TableName getTableName() {
-412return 
this.getRegionInfo().getTable();
-413  }
-414
-415  @Override
-416  public FileSystem getFileSystem() {
-417return this.fs.getFileSystem();
-418  }
-419
-420  public HRegionFileSystem 
getRegionFileSystem() {
-421return this.fs;
-422  }
-423
-424  /* Implementation of 
StoreConfigInformation */
-425  @Override
-426  public long getStoreFileTtl() {
-427// TTL only applies if there's no 
MIN_VERSIONs setting on the column.
-428return 
(this.scanInfo.getMinVersions() == 0) ? this.scanInfo.getTtl() : 
Long.MAX_VALUE;
-429  }
-430
-431  @Override
-432  public long getMemStoreFlushSize() {
-433// TODO: Why is this in here?  The 
flushsize of the region rather than the store?  St.Ack
-434return 
this.region.memstoreFlushSize;
-435  }
-436
-437  @Override
-438  public MemStoreSize getFlushableSize() 
{
-439return 
this.memstore.getFlushableSize();
-440  }
-441
-442  @Override
-443  public MemStoreSize getSnapshotSize() 
{
-444return 
this.memstore.getSnapshotSize();
-445  }
-446
-447  @Override
-448  public long 
getCompactionCheckMultiplier() {
-449return 
this.compactionCheckMultiplier;
-450  }
-451
-452  @Override
-453  public long getBlockingFileCount() {
-454return blockingFileCount;
-455  }
-456  /* End implementation of 
StoreConfigInformation */
-457
-458  /**
-459   * Returns the configured 
bytesPerChecksum value.
-460   * @param conf The configuration
-461   * @return The bytesPerChecksum that is 
set in the configuration
-462   */
-463  public static int 
getBytesPerChecksum(Configuration conf) {
-464return 
conf.getInt(HConstants.BYTES_PER_CHECKSUM,
-465   
HFile.DEFAULT_BYTES_PER_CHECKSUM);
-466  }
-467
-468  /**
-469   * Returns the configured checksum 
algorithm.
-470   * @param conf The configuration
-471   * @return The checksum algorithm that 
is set in the configuration
-472   */
-473  public static ChecksumType 
getChecksumType(Configuration conf) {
-474String checksumName = 
conf.get(HConstants.CHECKSUM_TYPE_NAME);
-475if (checksumName == null) {
-476  

[15/51] [partial] hbase-site git commit: Published site at 63f2d3cbdc8151f5f61f33e0a078c51b9ac076a5.

2018-08-21 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7ae6a80c/devapidocs/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.html
 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.html
index 2e18704..51a8e3c 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.html
@@ -190,10 +190,6 @@ extends private TableName
 tableName
 
-
-private https://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
 title="class or interface in java.lang">Boolean
-traceEnabled
-
 
 
 
@@ -299,15 +295,15 @@ extends 
 protected boolean
-isRollbackSupported(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DisableTableStatestate)
-Used by the default implementation of abort() to know if 
the current state can be aborted
- and rollback can be triggered.
+holdLock(MasterProcedureEnvenv)
+Used to keep the procedure lock even when the procedure is 
yielding or suspended.
 
 
 
-private https://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
 title="class or interface in java.lang">Boolean
-isTraceEnabled()
-The procedure could be restarted from a different 
machine.
+protected boolean
+isRollbackSupported(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DisableTableStatestate)
+Used by the default implementation of abort() to know if 
the current state can be aborted
+ and rollback can be triggered.
 
 
 
@@ -359,7 +355,7 @@ extends 
-protected static void
+private static void
 setTableStateToDisabling(MasterProcedureEnvenv,
 TableNametableName)
 Mark table state to Disabling
@@ -385,7 +381,7 @@ extends Procedure
-addStackIndex,
 afterReplay,
 beforeReplay,
 compareTo,
 completionCleanup,
 doExecute,
 doRollback,
 elapsedTime,
 getChildrenLatch,
 getException,
 getLastUpdate,
 getNonceKey,
 getOwner,
 getParentProcId,
 getProcedureMetrics,
 getProcId,
 getProcIdHashCode, getProcName,
 getResult,
 getRootProcedureId,
 getRootProcId,
 getStackIndexes,
 getState,
 getSubmittedTime,
 getTimeout,
 getTimeoutTimestamp,
 hasChildren,
 hasException,
 hasLock,
 hasOwner,
 hasParent,
 hasTimeout,
 haveSameParent,
 holdLock, 
incChildrenLatch,
 isFailed,
 isFinished,
 isInitializing,
 isRunnable,
 isSuccess,
 isWaiting,
 removeStackIndex,
 setAbortFailure,
 setChildrenLatch,
 setFailure,
 setFailure,
 setLastUpdate,
 setNonceKey,
 setOwner,
 setOwner, setParentProcId,
 setProcId,
 setResult,
 setRootProcId,
 setStackIndexes,
 setState,
 setSubmittedTime,
 setTimeout, setTimeoutFailure,
 shouldWaitClientAck,
 toString,
 toStringClass,
 toStringDetails,
 toStringSimpleSB,
 updateMetricsOnFinish,
 updateMetricsOnSubmit,
 updateTimestamp,
 wasExecuted
+addStackIndex,
 afterReplay,
 beforeReplay,
 compareTo,
 completionCleanup,
 doExecute,
 doRollback,
 elapsedTime,
 getChildrenLatch,
 getException,
 getLastUpdate,
 getNonceKey,
 getOwner,
 getParentProcId,
 getProcedureMetrics,
 getProcId,
 getProcIdHashCode, getProcName,
 getResult,
 getRootProcedureId,
 getRootProcId,
 getStackIndexes,
 getState,
 getSubmittedTime,
 getTimeout,
 getTimeoutTimestamp,
 hasChildren,
 hasException,
 hasLock,
 hasOwner,
 hasParent,
 hasTimeout,
 haveSameParent,
 incChildrenLatch,
 isFailed,
 isFinished,
 isInitializing,
 isRunnable,
 isSuccess,
 isWaiting,
 removeStackIndex,
 setAbortFailure,
 setChildrenLatch,
 setFailure,
 setFailure,
 setLastUpdate,
 setNonceKey,
 setOwner,
 setOwner,
 setParentProcId, setProcId,
 setResult,
 setRootProcId,
 setStackIndexes,
 setState,
 setSubmittedTime,
 setTimeout,
 setTimeoutFailure,
 shouldWaitClientAck,
 toString,
 toStringClass,
 toStringDetails,
 toStringSimpleSB,
 updateMetricsOnFinish,
 updateMetricsOnSubmit,
 updateTimestamp,
 wasExecuted
 
 
 
@@ -429,21 +425,12 @@ extends 
 
 
-
+
 
 skipTableStateCheck
 privateboolean skipTableStateCheck
 
 
-
-
-
-
-
-traceEnabled
-privatehttps://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
 title="class or interface in java.lang">Boolean traceEnabled
-
-
 
 
 
@@ -458,7 +445,7 @@ extends 
 
 DisableTableProcedure
-publicDisableTableProcedure()
+publicDisableTableProcedure()
 
 
 
@@ -467,7 +454,7 @@ extends 
 
 DisableTableProcedure
-publicDisableTableProcedure(MasterProcedureEnvenv,
+publicDisableTableProcedure(MasterProcedureEnvenv,
  TableNametableName,
  booleanskipTableStateCheck)
   throws HBaseIOException
@@ -488,7 +475,7 @@ extends 
 
 DisableTableProcedure
-publicDisableTableProcedure(MasterProcedureEnvenv,
+publicDisableTableProcedure(MasterProcedureEnvenv,
  TableNametableName,

[15/51] [partial] hbase-site git commit: Published site at 092efb42749bf7fc6ad338c96aae8e7b9d3a2c74.

2018-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.CorruptedLogFileException.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.CorruptedLogFileException.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.CorruptedLogFileException.html
index 63e4b46..514f830 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.CorruptedLogFileException.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.CorruptedLogFileException.html
@@ -468,15 +468,15 @@
 460   * creating it if necessary.
 461   * @param logEntry
 462   * @param fileNameBeingSplit the file 
being split currently. Used to generate tmp file name.
-463   * @param conf
-464   * @return Path to file into which to 
dump split log edits.
-465   * @throws IOException
-466   */
-467  @SuppressWarnings("deprecation")
-468  @VisibleForTesting
-469  static Path 
getRegionSplitEditsPath(final Entry logEntry, String fileNameBeingSplit,
-470  Configuration conf)
-471  throws IOException {
+463   * @param tmpDirName of the directory 
used to sideline old recovered edits file
+464   * @param conf
+465   * @return Path to file into which to 
dump split log edits.
+466   * @throws IOException
+467   */
+468  @SuppressWarnings("deprecation")
+469  @VisibleForTesting
+470  static Path 
getRegionSplitEditsPath(final Entry logEntry, String fileNameBeingSplit,
+471  String tmpDirName, Configuration 
conf) throws IOException {
 472FileSystem fs = 
FileSystem.get(conf);
 473Path rootDir = 
FSUtils.getRootDir(conf);
 474Path tableDir = 
FSUtils.getTableDir(rootDir, logEntry.getKey().getTableName());
@@ -491,7 +491,7 @@
 483  return null;
 484}
 485if (fs.exists(dir)  
fs.isFile(dir)) {
-486  Path tmp = new Path("/tmp");
+486  Path tmp = new Path(tmpDirName);
 487  if (!fs.exists(tmp)) {
 488fs.mkdirs(tmp);
 489  }
@@ -1520,411 +1520,413 @@
 1512 * @return a path with a write for 
that path. caller should close.
 1513 */
 1514WriterAndPath createWAP(byte[] 
region, Entry entry) throws IOException {
-1515  Path regionedits = 
getRegionSplitEditsPath(entry,
-1516  
fileBeingSplit.getPath().getName(), conf);
-1517  if (regionedits == null) {
-1518return null;
-1519  }
-1520  FileSystem rootFs = 
FileSystem.get(conf);
-1521  if (rootFs.exists(regionedits)) 
{
-1522LOG.warn("Found old edits file. 
It could be the "
-1523+ "result of a previous 
failed split attempt. Deleting " + regionedits + ", length="
-1524+ 
rootFs.getFileStatus(regionedits).getLen());
-1525if (!rootFs.delete(regionedits, 
false)) {
-1526  LOG.warn("Failed delete of old 
{}", regionedits);
-1527}
-1528  }
-1529  Writer w = 
createWriter(regionedits);
-1530  LOG.debug("Creating writer 
path={}", regionedits);
-1531  return new 
WriterAndPath(regionedits, w, entry.getKey().getSequenceId());
-1532}
-1533
-1534void filterCellByStore(Entry 
logEntry) {
-1535  Mapbyte[], Long 
maxSeqIdInStores =
-1536  
regionMaxSeqIdInStores.get(Bytes.toString(logEntry.getKey().getEncodedRegionName()));
-1537  if 
(MapUtils.isEmpty(maxSeqIdInStores)) {
-1538return;
-1539  }
-1540  // Create the array list for the 
cells that aren't filtered.
-1541  // We make the assumption that 
most cells will be kept.
-1542  ArrayListCell keptCells = 
new ArrayList(logEntry.getEdit().getCells().size());
-1543  for (Cell cell : 
logEntry.getEdit().getCells()) {
-1544if 
(CellUtil.matchingFamily(cell, WALEdit.METAFAMILY)) {
-1545  keptCells.add(cell);
-1546} else {
-1547  byte[] family = 
CellUtil.cloneFamily(cell);
-1548  Long maxSeqId = 
maxSeqIdInStores.get(family);
-1549  // Do not skip cell even if 
maxSeqId is null. Maybe we are in a rolling upgrade,
-1550  // or the master was crashed 
before and we can not get the information.
-1551  if (maxSeqId == null || 
maxSeqId.longValue()  logEntry.getKey().getSequenceId()) {
-1552keptCells.add(cell);
-1553  }
-1554}
-1555  }
-1556
-1557  // Anything in the keptCells array 
list is still live.
-1558  // So rather than removing the 
cells from the array list
-1559  // which would be an O(n^2) 
operation, we just replace the list
-1560  
logEntry.getEdit().setCells(keptCells);
-1561}
-1562
-1563@Override
-1564public void append(RegionEntryBuffer 
buffer) throws IOException {
-1565  appendBuffer(buffer, true);
-1566}
-1567
-1568WriterAndPath 
appendBuffer(RegionEntryBuffer buffer, boolean reusable) throws IOException{
-1569  ListEntry entries = 
buffer.entryBuffer;
-1570  if (entries.isEmpty()) {
-1571

[15/51] [partial] hbase-site git commit: Published site at 613d831429960348dc42c3bdb6ea5d31be15c81c.

2018-08-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7cf6034b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.WriterThread.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.WriterThread.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.WriterThread.html
index bd3c59e..21e240a 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.WriterThread.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.WriterThread.html
@@ -33,62 +33,62 @@
 025import java.io.FileNotFoundException;
 026import java.io.FileOutputStream;
 027import java.io.IOException;
-028import java.io.ObjectInputStream;
-029import java.io.ObjectOutputStream;
-030import java.io.Serializable;
-031import java.nio.ByteBuffer;
-032import java.util.ArrayList;
-033import java.util.Comparator;
-034import java.util.HashSet;
-035import java.util.Iterator;
-036import java.util.List;
-037import java.util.Map;
-038import java.util.NavigableSet;
-039import java.util.PriorityQueue;
-040import java.util.Set;
-041import 
java.util.concurrent.ArrayBlockingQueue;
-042import 
java.util.concurrent.BlockingQueue;
-043import 
java.util.concurrent.ConcurrentHashMap;
-044import 
java.util.concurrent.ConcurrentMap;
-045import 
java.util.concurrent.ConcurrentSkipListSet;
-046import java.util.concurrent.Executors;
-047import 
java.util.concurrent.ScheduledExecutorService;
-048import java.util.concurrent.TimeUnit;
-049import 
java.util.concurrent.atomic.AtomicInteger;
-050import 
java.util.concurrent.atomic.AtomicLong;
-051import 
java.util.concurrent.atomic.LongAdder;
-052import java.util.concurrent.locks.Lock;
-053import 
java.util.concurrent.locks.ReentrantLock;
-054import 
java.util.concurrent.locks.ReentrantReadWriteLock;
-055import 
org.apache.hadoop.conf.Configuration;
-056import 
org.apache.hadoop.hbase.HBaseConfiguration;
-057import 
org.apache.hadoop.hbase.io.HeapSize;
-058import 
org.apache.hadoop.hbase.io.hfile.BlockCache;
-059import 
org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
-060import 
org.apache.hadoop.hbase.io.hfile.BlockCacheUtil;
-061import 
org.apache.hadoop.hbase.io.hfile.BlockPriority;
-062import 
org.apache.hadoop.hbase.io.hfile.BlockType;
-063import 
org.apache.hadoop.hbase.io.hfile.CacheStats;
-064import 
org.apache.hadoop.hbase.io.hfile.Cacheable;
-065import 
org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType;
-066import 
org.apache.hadoop.hbase.io.hfile.CacheableDeserializer;
-067import 
org.apache.hadoop.hbase.io.hfile.CacheableDeserializerIdManager;
-068import 
org.apache.hadoop.hbase.io.hfile.CachedBlock;
-069import 
org.apache.hadoop.hbase.io.hfile.HFileBlock;
-070import 
org.apache.hadoop.hbase.nio.ByteBuff;
-071import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-072import 
org.apache.hadoop.hbase.util.HasThread;
-073import 
org.apache.hadoop.hbase.util.IdReadWriteLock;
-074import 
org.apache.hadoop.hbase.util.IdReadWriteLock.ReferenceType;
-075import 
org.apache.hadoop.hbase.util.UnsafeAvailChecker;
-076import 
org.apache.hadoop.util.StringUtils;
-077import 
org.apache.yetus.audience.InterfaceAudience;
-078import org.slf4j.Logger;
-079import org.slf4j.LoggerFactory;
-080
-081import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-082import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-083import 
org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
+028import java.io.Serializable;
+029import java.nio.ByteBuffer;
+030import java.util.ArrayList;
+031import java.util.Comparator;
+032import java.util.HashSet;
+033import java.util.Iterator;
+034import java.util.List;
+035import java.util.Map;
+036import java.util.NavigableSet;
+037import java.util.PriorityQueue;
+038import java.util.Set;
+039import 
java.util.concurrent.ArrayBlockingQueue;
+040import 
java.util.concurrent.BlockingQueue;
+041import 
java.util.concurrent.ConcurrentHashMap;
+042import 
java.util.concurrent.ConcurrentMap;
+043import 
java.util.concurrent.ConcurrentSkipListSet;
+044import java.util.concurrent.Executors;
+045import 
java.util.concurrent.ScheduledExecutorService;
+046import java.util.concurrent.TimeUnit;
+047import 
java.util.concurrent.atomic.AtomicInteger;
+048import 
java.util.concurrent.atomic.AtomicLong;
+049import 
java.util.concurrent.atomic.LongAdder;
+050import java.util.concurrent.locks.Lock;
+051import 
java.util.concurrent.locks.ReentrantLock;
+052import 
java.util.concurrent.locks.ReentrantReadWriteLock;
+053import 
org.apache.hadoop.conf.Configuration;
+054import 
org.apache.hadoop.hbase.HBaseConfiguration;
+055import 
org.apache.hadoop.hbase.io.HeapSize;
+056import 
org.apache.hadoop.hbase.io.hfile.BlockCache;
+057import 
org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
+058import 
org.apache.hadoop.hbase.io.hfile.BlockCacheUtil;

[15/51] [partial] hbase-site git commit: Published site at ba5d1c1f28301adc99019d9d6c4a04fac98ae511.

2018-07-25 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/804782f0/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/Procedure.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/Procedure.html 
b/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/Procedure.html
index 7666b0b..776026f 100644
--- a/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/Procedure.html
+++ b/devapidocs/org/apache/hadoop/hbase/procedure2/class-use/Procedure.html
@@ -151,7 +151,7 @@
 
 
 private long
-ClusterSchemaServiceImpl.submitProcedure(Procedure?procedure,
+ClusterSchemaServiceImpl.submitProcedure(ProcedureMasterProcedureEnvprocedure,
NonceKeynonceKey)
 
 
@@ -574,16 +574,16 @@
 
 static byte[]
 ProcedureSyncWait.submitAndWaitProcedure(ProcedureExecutorMasterProcedureEnvprocExec,
-  Procedure?proc)
+  ProcedureMasterProcedureEnvproc)
 
 
 protected long
-MasterProcedureUtil.NonceProcedureRunnable.submitProcedure(Procedure?proc)
+MasterProcedureUtil.NonceProcedureRunnable.submitProcedure(ProcedureMasterProcedureEnvproc)
 
 
 static https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Future.html?is-external=true;
 title="class or interface in 
java.util.concurrent">Futurebyte[]
 ProcedureSyncWait.submitProcedure(ProcedureExecutorMasterProcedureEnvprocExec,
-   Procedure?proc)
+   ProcedureMasterProcedureEnvproc)
 
 
 private void
@@ -906,7 +906,7 @@
 
 
 
-private Procedure?
+private ProcedureTEnvironment
 ProcedureExecutor.WorkerThread.activeProcedure
 
 
@@ -918,7 +918,7 @@
 LockedResource.exclusiveLockOwnerProcedure
 
 
-private Procedure?
+private ProcedureTEnvironment
 ProcedureExecutor.CompletedProcedureRetainer.procedure
 
 
@@ -931,7 +931,7 @@
 
 
 
-private https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentHashMap.html?is-external=true;
 title="class or interface in java.util.concurrent">ConcurrentHashMaphttps://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true;
 title="class or interface in java.lang">Long,Procedure
+private https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentHashMap.html?is-external=true;
 title="class or interface in java.util.concurrent">ConcurrentHashMaphttps://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true;
 title="class or interface in java.lang">Long,ProcedureTEnvironment
 ProcedureExecutor.procedures
 Helper map to lookup the live procedures by ID.
 
@@ -941,11 +941,11 @@
 StateMachineProcedure.subProcList
 
 
-private https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">SetProcedure
+private https://docs.oracle.com/javase/8/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">SetProcedureTEnvironment
 RootProcedureState.subprocs
 
 
-private https://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in java.util">ArrayListProcedure
+private https://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in java.util">ArrayListProcedureTEnvironment
 RootProcedureState.subprocStack
 
 
@@ -968,7 +968,7 @@
 
 
 
-T extends ProcedureT
+T extends ProcedureTEnvironmentT
 ProcedureExecutor.getProcedure(https://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true;
 title="class or interface in java.lang">ClassTclazz,
 longprocId)
 
@@ -1039,26 +1039,26 @@
 LockedResource.getExclusiveLockOwnerProcedure()
 
 
-Procedure?
+ProcedureTEnvironment
 ProcedureExecutor.CompletedProcedureRetainer.getProcedure()
 
 
-Procedure
+ProcedureTEnvironment
 ProcedureExecutor.getProcedure(longprocId)
 
 
-Procedure
+ProcedureTEnvironment
 ProcedureExecutor.getResult(longprocId)
 
 
-Procedure
+ProcedureTEnvironment
 ProcedureExecutor.getResultOrProcedure(longprocId)
 
 
-private Procedure[]
-ProcedureExecutor.initializeChildren(RootProcedureStateprocStack,
-  Procedureprocedure,
-  Procedure[]subprocs)
+private ProcedureTEnvironment[]
+ProcedureExecutor.initializeChildren(RootProcedureStateTEnvironmentprocStack,
+  ProcedureTEnvironmentprocedure,
+  ProcedureTEnvironment[]subprocs)
 
 
 static Procedure
@@ -1091,8 +1091,8 @@
 https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true;
 title="class or interface in 
java.util.concurrent">TimeUnitunit)
 
 
-private Procedure
-ProcedureExecutor.prepareProcedure(Procedureproc)
+private ProcedureTEnvironment
+ProcedureExecutor.prepareProcedure(ProcedureTEnvironmentproc)
 
 
 Procedure?
@@ -1112,13 +1112,13 @@
 LockAndQueue.filterWaitingQueue(https://docs.oracle.com/javase/8/docs/api/java/util/function/Predicate.html?is-external=true;
 title="class or interface 

[15/51] [partial] hbase-site git commit: Published site at b4759ce6e72f50ccd9d410bd5917dc5a515414f1.

2018-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionCountSkewCostFunction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionCountSkewCostFunction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionCountSkewCostFunction.html
index 233dba3..91b9055 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionCountSkewCostFunction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionCountSkewCostFunction.html
@@ -540,1205 +540,1204 @@
 532  
sm.getRegionMetrics().forEach((byte[] regionName, RegionMetrics rm) - {
 533DequeBalancerRegionLoad 
rLoads = oldLoads.get(Bytes.toString(regionName));
 534if (rLoads == null) {
-535  // There was nothing there
-536  rLoads = new 
ArrayDeque();
-537} else if (rLoads.size() = 
numRegionLoadsToRemember) {
-538  rLoads.remove();
-539}
-540rLoads.add(new 
BalancerRegionLoad(rm));
-541
loads.put(Bytes.toString(regionName), rLoads);
-542  });
-543});
-544
-545for(CostFromRegionLoadFunction cost : 
regionLoadFunctions) {
-546  cost.setLoads(loads);
-547}
-548  }
-549
-550  protected void initCosts(Cluster 
cluster) {
-551for (CostFunction c:costFunctions) 
{
-552  c.init(cluster);
-553}
-554  }
-555
-556  protected void 
updateCostsWithAction(Cluster cluster, Action action) {
-557for (CostFunction c : costFunctions) 
{
-558  c.postAction(action);
-559}
-560  }
-561
-562  /**
-563   * Get the names of the cost 
functions
-564   */
-565  public String[] getCostFunctionNames() 
{
-566if (costFunctions == null) return 
null;
-567String[] ret = new 
String[costFunctions.length];
-568for (int i = 0; i  
costFunctions.length; i++) {
-569  CostFunction c = 
costFunctions[i];
-570  ret[i] = 
c.getClass().getSimpleName();
-571}
-572
-573return ret;
-574  }
-575
-576  /**
-577   * This is the main cost function.  It 
will compute a cost associated with a proposed cluster
-578   * state.  All different costs will be 
combined with their multipliers to produce a double cost.
-579   *
-580   * @param cluster The state of the 
cluster
-581   * @param previousCost the previous 
cost. This is used as an early out.
-582   * @return a double of a cost 
associated with the proposed cluster state.  This cost is an
-583   * aggregate of all individual 
cost functions.
-584   */
-585  protected double computeCost(Cluster 
cluster, double previousCost) {
-586double total = 0;
-587
-588for (int i = 0; i  
costFunctions.length; i++) {
-589  CostFunction c = 
costFunctions[i];
-590  this.tempFunctionCosts[i] = 0.0;
-591
-592  if (c.getMultiplier() = 0) {
-593continue;
-594  }
-595
-596  Float multiplier = 
c.getMultiplier();
-597  Double cost = c.cost();
-598
-599  this.tempFunctionCosts[i] = 
multiplier*cost;
-600  total += 
this.tempFunctionCosts[i];
-601
-602  if (total  previousCost) {
-603break;
-604  }
-605}
-606
-607return total;
-608  }
-609
-610  /** Generates a candidate action to be 
applied to the cluster for cost function search */
-611  abstract static class 
CandidateGenerator {
-612abstract Cluster.Action 
generate(Cluster cluster);
-613
-614/**
-615 * From a list of regions pick a 
random one. Null can be returned which
-616 * {@link 
StochasticLoadBalancer#balanceCluster(Map)} recognize as signal to try a region 
move
-617 * rather than swap.
-618 *
-619 * @param clusterThe state of 
the cluster
-620 * @param server index of the 
server
-621 * @param chanceOfNoSwap Chance that 
this will decide to try a move rather
-622 *   than a 
swap.
-623 * @return a random {@link 
RegionInfo} or null if an asymmetrical move is
-624 * suggested.
-625 */
-626protected int 
pickRandomRegion(Cluster cluster, int server, double chanceOfNoSwap) {
-627  // Check to see if this is just a 
move.
-628  if 
(cluster.regionsPerServer[server].length == 0 || RANDOM.nextFloat()  
chanceOfNoSwap) {
-629// signal a move only.
-630return -1;
-631  }
-632  int rand = 
RANDOM.nextInt(cluster.regionsPerServer[server].length);
-633  return 
cluster.regionsPerServer[server][rand];
-634
-635}
-636protected int 
pickRandomServer(Cluster cluster) {
-637  if (cluster.numServers  1) {
-638return -1;
-639  }
-640
-641  return 
RANDOM.nextInt(cluster.numServers);
-642}
-643
-644protected int pickRandomRack(Cluster 
cluster) {
-645  if (cluster.numRacks  1) {
-646

[15/51] [partial] hbase-site git commit: Published site at e66a6603e36ecd67237ca16acd5e2de03f0d372d.

2018-07-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0c6f447e/apidocs/org/apache/hadoop/hbase/TableInfoMissingException.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/TableInfoMissingException.html 
b/apidocs/org/apache/hadoop/hbase/TableInfoMissingException.html
index 91c0977..31b337d 100644
--- a/apidocs/org/apache/hadoop/hbase/TableInfoMissingException.html
+++ b/apidocs/org/apache/hadoop/hbase/TableInfoMissingException.html
@@ -1,6 +1,6 @@
 http://www.w3.org/TR/html4/loose.dtd;>
 
-
+
 
 
 
@@ -20,38 +20,38 @@
 //-->
 
 
-您的浏览器已禁用 JavaScript。
+JavaScript is disabled on your browser.
 
 
 
 
 
-跳过导航链接
+Skip navigation links
 
 
 
-
-概览
-程序包
-ç±»
-使用
-树
-已过时
-索引
-帮助
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
 
 
 
 
-上一个类
-下一个类
+PrevClass
+NextClass
 
 
-框架
-无框架
+Frames
+NoFrames
 
 
-所有类
+AllClasses
 
 
 

[15/51] [partial] hbase-site git commit: Published site at e66a6603e36ecd67237ca16acd5e2de03f0d372d.

2018-07-19 Thread zhangduo
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5427a45e/apidocs/org/apache/hadoop/hbase/Size.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/Size.html 
b/apidocs/org/apache/hadoop/hbase/Size.html
index cae789f..848a61c 100644
--- a/apidocs/org/apache/hadoop/hbase/Size.html
+++ b/apidocs/org/apache/hadoop/hbase/Size.html
@@ -1,6 +1,6 @@
 http://www.w3.org/TR/html4/loose.dtd;>
 
-
+
 
 
 
@@ -19,45 +19,45 @@
 }
 //-->
 var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10};
-var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
+var tabs = {65535:["t0","所有方法"],2:["t2","实例方法"],8:["t4","å…
·ä½“方法"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
 var tableTab = "tableTab";
 var activeTableTab = "activeTableTab";
 
 
-JavaScript is disabled on your browser.
+您的浏览器已禁用 JavaScript。
 
 
 
 
 
-Skip navigation links
+跳过导航链接
 
 
 
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
+
+概览
+程序包
+ç±»
+使用
+树
+已过时
+索引
+帮助
 
 
 
 
-PrevClass
-NextClass
+上一个类
+下一个类
 
 
-Frames
-NoFrames
+框架
+无框架
 
 
-AllClasses
+所有类
 
 
 
 
org.apache.hadoop.hbase
-

Class Size

+

ç±» Size


[15/51] [partial] hbase-site git commit: Published site at 0f23784182ab88649de340d75804e0ff20dcd0fc.

2018-07-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bcb555af/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.PBHelper.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.PBHelper.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.PBHelper.html
index 05e032c..40ef9f4 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.PBHelper.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.PBHelper.html
@@ -25,767 +25,805 @@
 017 */
 018package 
org.apache.hadoop.hbase.io.asyncfs;
 019
-020import static 
org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleState.READER_IDLE;
-021import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY;
+020import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY;
+021import static 
org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleState.READER_IDLE;
 022
-023import 
org.apache.hbase.thirdparty.com.google.common.base.Charsets;
-024import 
org.apache.hbase.thirdparty.com.google.common.base.Throwables;
-025import 
org.apache.hbase.thirdparty.com.google.common.collect.ImmutableSet;
-026import 
org.apache.hbase.thirdparty.com.google.common.collect.Maps;
-027import 
com.google.protobuf.CodedOutputStream;
-028
-029import 
org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf;
-030import 
org.apache.hbase.thirdparty.io.netty.buffer.ByteBufOutputStream;
-031import 
org.apache.hbase.thirdparty.io.netty.buffer.CompositeByteBuf;
-032import 
org.apache.hbase.thirdparty.io.netty.buffer.Unpooled;
-033import 
org.apache.hbase.thirdparty.io.netty.channel.Channel;
-034import 
org.apache.hbase.thirdparty.io.netty.channel.ChannelDuplexHandler;
-035import 
org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext;
-036import 
org.apache.hbase.thirdparty.io.netty.channel.ChannelOutboundHandlerAdapter;
-037import 
org.apache.hbase.thirdparty.io.netty.channel.ChannelPipeline;
-038import 
org.apache.hbase.thirdparty.io.netty.channel.ChannelPromise;
-039import 
org.apache.hbase.thirdparty.io.netty.channel.SimpleChannelInboundHandler;
-040import 
org.apache.hbase.thirdparty.io.netty.handler.codec.LengthFieldBasedFrameDecoder;
-041import 
org.apache.hbase.thirdparty.io.netty.handler.codec.MessageToByteEncoder;
-042import 
org.apache.hbase.thirdparty.io.netty.handler.codec.protobuf.ProtobufDecoder;
-043import 
org.apache.hbase.thirdparty.io.netty.handler.codec.protobuf.ProtobufVarint32FrameDecoder;
-044import 
org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateEvent;
-045import 
org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler;
-046import 
org.apache.hbase.thirdparty.io.netty.util.concurrent.Promise;
-047
-048import java.io.IOException;
-049import java.lang.reflect.Field;
-050import 
java.lang.reflect.InvocationTargetException;
-051import java.lang.reflect.Method;
-052import java.net.InetAddress;
-053import java.net.InetSocketAddress;
-054import java.nio.ByteBuffer;
-055import 
java.security.GeneralSecurityException;
-056import java.util.Arrays;
-057import java.util.Collections;
-058import java.util.List;
-059import java.util.Map;
-060import java.util.Set;
-061import java.util.concurrent.TimeUnit;
-062import 
java.util.concurrent.atomic.AtomicBoolean;
-063
-064import 
javax.security.auth.callback.Callback;
-065import 
javax.security.auth.callback.CallbackHandler;
-066import 
javax.security.auth.callback.NameCallback;
-067import 
javax.security.auth.callback.PasswordCallback;
-068import 
javax.security.auth.callback.UnsupportedCallbackException;
-069import 
javax.security.sasl.RealmCallback;
-070import 
javax.security.sasl.RealmChoiceCallback;
-071import javax.security.sasl.Sasl;
-072import javax.security.sasl.SaslClient;
-073import 
javax.security.sasl.SaslException;
-074
-075import 
org.apache.commons.codec.binary.Base64;
-076import 
org.apache.commons.lang3.StringUtils;
-077import 
org.apache.hadoop.conf.Configuration;
-078import 
org.apache.hadoop.crypto.CipherOption;
-079import 
org.apache.hadoop.crypto.CipherSuite;
-080import 
org.apache.hadoop.crypto.CryptoCodec;
-081import 
org.apache.hadoop.crypto.Decryptor;
-082import 
org.apache.hadoop.crypto.Encryptor;
-083import 
org.apache.hadoop.crypto.key.KeyProvider.KeyVersion;
-084import 
org.apache.hadoop.fs.FileEncryptionInfo;
-085import 
org.apache.yetus.audience.InterfaceAudience;
-086import org.slf4j.Logger;
-087import org.slf4j.LoggerFactory;
-088
-089import com.google.protobuf.ByteString;
-090import 
org.apache.hadoop.hdfs.DFSClient;
-091import 
org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-092import 
org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
-093import 

[15/51] [partial] hbase-site git commit: Published site at 85b41f36e01214b6485c9352875c84ebf877dab3.

2018-06-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a5c66de0/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.CreateTableFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.CreateTableFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.CreateTableFuture.html
index c10cfbf..a3e2f4a 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.CreateTableFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.CreateTableFuture.html
@@ -3371,7 +3371,7 @@
 3363private V result = null;
 3364
 3365private final HBaseAdmin admin;
-3366private final Long procId;
+3366protected final Long procId;
 3367
 3368public ProcedureFuture(final 
HBaseAdmin admin, final Long procId) {
 3369  this.admin = admin;
@@ -3653,653 +3653,651 @@
 3645 * @return a description of the 
operation
 3646 */
 3647protected String getDescription() 
{
-3648  return "Operation: " + 
getOperationType() + ", "
-3649  + "Table Name: " + 
tableName.getNameWithNamespaceInclAsString();
-3650
-3651}
-3652
-3653protected abstract class 
TableWaitForStateCallable implements WaitForStateCallable {
-3654  @Override
-3655  public void 
throwInterruptedException() throws InterruptedIOException {
-3656throw new 
InterruptedIOException("Interrupted while waiting for operation: "
-3657+ getOperationType() + " on 
table: " + tableName.getNameWithNamespaceInclAsString());
-3658  }
-3659
-3660  @Override
-3661  public void 
throwTimeoutException(long elapsedTime) throws TimeoutException {
-3662throw new TimeoutException("The 
operation: " + getOperationType() + " on table: " +
-3663tableName.getNameAsString() 
+ " has not completed after " + elapsedTime + "ms");
-3664  }
-3665}
-3666
-3667@Override
-3668protected V 
postOperationResult(final V result, final long deadlineTs)
-3669throws IOException, 
TimeoutException {
-3670  LOG.info(getDescription() + " 
completed");
-3671  return 
super.postOperationResult(result, deadlineTs);
-3672}
-3673
-3674@Override
-3675protected V 
postOperationFailure(final IOException exception, final long deadlineTs)
-3676throws IOException, 
TimeoutException {
-3677  LOG.info(getDescription() + " 
failed with " + exception.getMessage());
-3678  return 
super.postOperationFailure(exception, deadlineTs);
-3679}
-3680
-3681protected void 
waitForTableEnabled(final long deadlineTs)
-3682throws IOException, 
TimeoutException {
-3683  waitForState(deadlineTs, new 
TableWaitForStateCallable() {
-3684@Override
-3685public boolean checkState(int 
tries) throws IOException {
-3686  try {
-3687if 
(getAdmin().isTableAvailable(tableName)) {
-3688  return true;
-3689}
-3690  } catch 
(TableNotFoundException tnfe) {
-3691LOG.debug("Table " + 
tableName.getNameWithNamespaceInclAsString()
-3692+ " was not enabled, 
sleeping. tries=" + tries);
-3693  }
-3694  return false;
-3695}
-3696  });
-3697}
-3698
-3699protected void 
waitForTableDisabled(final long deadlineTs)
-3700throws IOException, 
TimeoutException {
-3701  waitForState(deadlineTs, new 
TableWaitForStateCallable() {
-3702@Override
-3703public boolean checkState(int 
tries) throws IOException {
-3704  return 
getAdmin().isTableDisabled(tableName);
-3705}
-3706  });
-3707}
-3708
-3709protected void 
waitTableNotFound(final long deadlineTs)
-3710throws IOException, 
TimeoutException {
-3711  waitForState(deadlineTs, new 
TableWaitForStateCallable() {
-3712@Override
-3713public boolean checkState(int 
tries) throws IOException {
-3714  return 
!getAdmin().tableExists(tableName);
-3715}
-3716  });
-3717}
-3718
-3719protected void 
waitForSchemaUpdate(final long deadlineTs)
-3720throws IOException, 
TimeoutException {
-3721  waitForState(deadlineTs, new 
TableWaitForStateCallable() {
-3722@Override
-3723public boolean checkState(int 
tries) throws IOException {
-3724  return 
getAdmin().getAlterStatus(tableName).getFirst() == 0;
-3725}
-3726  });
-3727}
-3728
-3729protected void 
waitForAllRegionsOnline(final long deadlineTs, final byte[][] splitKeys)
-3730throws IOException, 
TimeoutException {
-3731  final TableDescriptor desc = 
getTableDescriptor();
-3732  final AtomicInteger actualRegCount 
= new AtomicInteger(0);
-3733  final MetaTableAccessor.Visitor 
visitor = new MetaTableAccessor.Visitor() {
-3734@Override
-3735public boolean visit(Result 
rowResult) throws IOException {
-3736  

[15/51] [partial] hbase-site git commit: Published site at 6198e1fc7dfa85c3bc6b2855f9a5fb5f4b2354ff.

2018-06-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/eb5d2c62/devapidocs/org/apache/hadoop/hbase/master/HMaster.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/master/HMaster.html 
b/devapidocs/org/apache/hadoop/hbase/master/HMaster.html
index a312363..bb1360e 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/HMaster.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/HMaster.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":9,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":9,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":9,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":9,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i109"
 
:10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":9,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":9,"i135":10,"i136":10,"i137":10,"i138":10,"i139":10,"i140":10,"i141":10,"i142":10,"i143":10,"i144":10,"i145":10,"i146":10,"i147":10,"i148":10,"i149":10,"i150":10,"i151":10,"i152":10,"i153":10,"i154":10,"i155":10,"i156":10,"i157":10,"i158":10,"i159":10,"i160":10,"i161":10,"i162":10,"i163":10,"i164":10,"i165":10,"i166":10,"i167":10,"i168":10,"i169":10,"i170":10,"i171":9};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":9,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":9,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":9,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":9,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i109"
 
:10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":9,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10,"i135":9,"i136":10,"i137":10,"i138":10,"i139":10,"i140":10,"i141":10,"i142":10,"i143":10,"i144":10,"i145":10,"i146":10,"i147":10,"i148":10,"i149":10,"i150":10,"i151":10,"i152":10,"i153":10,"i154":10,"i155":10,"i156":10,"i157":10,"i158":10,"i159":10,"i160":10,"i161":10,"i162":10,"i163":10,"i164":10,"i165":10,"i166":10,"i167":10,"i168":10,"i169":10,"i170":10,"i171":10,"i172":10,"i173":9};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -128,7 +128,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.LimitedPrivate(value="Tools")
-public class HMaster
+public class HMaster
 extends HRegionServer
 implements MasterServices
 HMaster is the "master server" for HBase. An HBase cluster 
has one active
@@ -456,10 +456,14 @@ implements splitPlanCount
 
 
+private SyncReplicationReplayWALManager
+syncReplicationReplayWALManager
+
+
 private TableStateManager
 tableStateManager
 
-
+
 private MasterWalManager
 walManager
 
@@ -753,7 +757,7 @@ implements 
 private long
-executePeerProcedure(ModifyPeerProcedureprocedure)
+executePeerProcedure(AbstractPeerProcedure?procedure)
 
 
 private static void
@@ -1038,10 +1042,16 @@ implements getSplitPlanCount()
 
 
+SyncReplicationReplayWALManager
+getSyncReplicationReplayWALManager()
+Returns the SyncReplicationReplayWALManager.
+
+
+
 TableDescriptors
 getTableDescriptors()
 
-
+
 private https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface 

[15/51] [partial] hbase-site git commit: Published site at 14087cc919da9f2e0b1a68f701f6365ad9d1d71f.

2018-06-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/55ce8d97/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.html
index 4609108..5a71e17 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.html
@@ -2426,7 +2426,7 @@ protected static finalhttps://docs.oracle.com/javase/8/docs/api/j
 
 
 movedRegions
-protectedhttps://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,HRegionServer.MovedRegionInfo movedRegions
+protectedhttps://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,HRegionServer.MovedRegionInfo movedRegions
 
 
 
@@ -2435,7 +2435,7 @@ protected static finalhttps://docs.oracle.com/javase/8/docs/api/j
 
 
 TIMEOUT_REGION_MOVED
-private static finalint TIMEOUT_REGION_MOVED
+private static finalint TIMEOUT_REGION_MOVED
 
 See Also:
 Constant
 Field Values
@@ -3058,7 +3058,7 @@ protected static finalhttps://docs.oracle.com/javase/8/docs/api/j
 
 
 createRegionLoad
-publicorg.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoadcreateRegionLoad(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringencodedRegionName)
+publicorg.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoadcreateRegionLoad(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringencodedRegionName)

   throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Parameters:
@@ -3076,7 +3076,7 @@ protected static finalhttps://docs.oracle.com/javase/8/docs/api/j
 
 
 isOnline
-publicbooleanisOnline()
+publicbooleanisOnline()
 Report the status of the server. A server is online once 
all the startup is
  completed (setting up filesystem, starting executorService threads, etc.). 
This
  method is designed mostly to be useful in tests.
@@ -3092,7 +3092,7 @@ protected static finalhttps://docs.oracle.com/javase/8/docs/api/j
 
 
 setupWALAndReplication
-privatevoidsetupWALAndReplication()
+privatevoidsetupWALAndReplication()
  throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Setup WAL log and replication if enabled. Replication setup 
is done in here because it wants to
  be hooked up to WAL.
@@ -3108,7 +3108,7 @@ protected static finalhttps://docs.oracle.com/javase/8/docs/api/j
 
 
 startReplicationService
-privatevoidstartReplicationService()
+privatevoidstartReplicationService()
   throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Start up replication source and sink handlers.
 
@@ -3123,7 +3123,7 @@ protected static finalhttps://docs.oracle.com/javase/8/docs/api/j
 
 
 getRegionServerMetrics
-publicMetricsRegionServergetRegionServerMetrics()
+publicMetricsRegionServergetRegionServerMetrics()
 
 
 
@@ -3132,7 +3132,7 @@ protected static finalhttps://docs.oracle.com/javase/8/docs/api/j
 
 
 getMasterAddressTracker
-publicMasterAddressTrackergetMasterAddressTracker()
+publicMasterAddressTrackergetMasterAddressTracker()
 
 Returns:
 Master address tracker instance.
@@ -3145,7 +3145,7 @@ protected static finalhttps://docs.oracle.com/javase/8/docs/api/j
 
 
 startServices
-privatevoidstartServices()
+privatevoidstartServices()
 throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Throws:
@@ -3159,7 +3159,7 @@ protected static finalhttps://docs.oracle.com/javase/8/docs/api/j
 
 
 initializeThreads
-privatevoidinitializeThreads()
+privatevoidinitializeThreads()
 throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Throws:
@@ -3173,7 +3173,7 @@ protected static finalhttps://docs.oracle.com/javase/8/docs/api/j
 
 
 registerConfigurationObservers
-privatevoidregisterConfigurationObservers()

[15/51] [partial] hbase-site git commit: Published site at 72784c2d836a4b977667449d3adec5e8d15453f5.

2018-06-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2b11656f/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.TableFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.TableFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.TableFuture.html
index b6e7636..592c2cc 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.TableFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.TableFuture.html
@@ -356,3901 +356,3924 @@
 348  public FutureVoid 
modifyTableAsync(TableDescriptor td) throws IOException {
 349ModifyTableResponse response = 
executeCallable(
 350  new 
MasterCallableModifyTableResponse(getConnection(), 
getRpcControllerFactory()) {
-351@Override
-352protected ModifyTableResponse 
rpcCall() throws Exception {
-353  
setPriority(td.getTableName());
-354  ModifyTableRequest request = 
RequestConverter.buildModifyTableRequest(
-355td.getTableName(), td, 
ng.getNonceGroup(), ng.newNonce());
-356  return 
master.modifyTable(getRpcController(), request);
-357}
-358  });
-359return new ModifyTableFuture(this, 
td.getTableName(), response);
-360  }
-361
-362  @Override
-363  public ListTableDescriptor 
listTableDescriptorsByNamespace(byte[] name) throws IOException {
-364return executeCallable(new 
MasterCallableListTableDescriptor(getConnection(),
-365getRpcControllerFactory()) {
-366  @Override
-367  protected 
ListTableDescriptor rpcCall() throws Exception {
-368return 
master.listTableDescriptorsByNamespace(getRpcController(),
-369
ListTableDescriptorsByNamespaceRequest.newBuilder()
-370  
.setNamespaceName(Bytes.toString(name)).build())
-371.getTableSchemaList()
-372.stream()
-373
.map(ProtobufUtil::toTableDescriptor)
-374
.collect(Collectors.toList());
-375  }
-376});
-377  }
-378
-379  @Override
-380  public ListTableDescriptor 
listTableDescriptors(ListTableName tableNames) throws IOException {
-381return executeCallable(new 
MasterCallableListTableDescriptor(getConnection(),
-382getRpcControllerFactory()) {
-383  @Override
-384  protected 
ListTableDescriptor rpcCall() throws Exception {
-385GetTableDescriptorsRequest req 
=
-386
RequestConverter.buildGetTableDescriptorsRequest(tableNames);
-387  return 
ProtobufUtil.toTableDescriptorList(master.getTableDescriptors(getRpcController(),
-388  req));
-389  }
-390});
-391  }
-392
-393  @Override
-394  public ListRegionInfo 
getRegions(final ServerName sn) throws IOException {
-395AdminService.BlockingInterface admin 
= this.connection.getAdmin(sn);
-396// TODO: There is no timeout on this 
controller. Set one!
-397HBaseRpcController controller = 
rpcControllerFactory.newController();
-398return 
ProtobufUtil.getOnlineRegions(controller, admin);
-399  }
-400
-401  @Override
-402  public ListRegionInfo 
getRegions(TableName tableName) throws IOException {
-403if 
(TableName.isMetaTableName(tableName)) {
-404  return 
Arrays.asList(RegionInfoBuilder.FIRST_META_REGIONINFO);
-405} else {
-406  return 
MetaTableAccessor.getTableRegions(connection, tableName, true);
-407}
-408  }
-409
-410  private static class 
AbortProcedureFuture extends ProcedureFutureBoolean {
-411private boolean isAbortInProgress;
-412
-413public AbortProcedureFuture(
-414final HBaseAdmin admin,
-415final Long procId,
-416final Boolean abortProcResponse) 
{
-417  super(admin, procId);
-418  this.isAbortInProgress = 
abortProcResponse;
-419}
-420
-421@Override
-422public Boolean get(long timeout, 
TimeUnit unit)
-423throws InterruptedException, 
ExecutionException, TimeoutException {
-424  if (!this.isAbortInProgress) {
-425return false;
-426  }
-427  super.get(timeout, unit);
-428  return true;
-429}
-430  }
-431
-432  /** @return Connection used by this 
object. */
-433  @Override
-434  public Connection getConnection() {
-435return connection;
-436  }
-437
-438  @Override
-439  public boolean tableExists(final 
TableName tableName) throws IOException {
-440return executeCallable(new 
RpcRetryingCallableBoolean() {
-441  @Override
-442  protected Boolean rpcCall(int 
callTimeout) throws Exception {
-443return 
MetaTableAccessor.tableExists(connection, tableName);
-444  }
-445});
-446  }
-447
-448  @Override
-449  public HTableDescriptor[] listTables() 
throws IOException {
-450return listTables((Pattern)null, 
false);
-451  }
-452
-453  @Override
-454  public HTableDescriptor[] 
listTables(Pattern pattern) throws IOException {
-455return listTables(pattern, 

[15/51] [partial] hbase-site git commit: Published site at 9101fc246f86445006bfbcdfda5cc495016dc280.

2018-06-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/65565d77/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/Version.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
index 0b11000..02beab3 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
@@ -16,11 +16,11 @@
 008@InterfaceAudience.Private
 009public class Version {
 010  public static final String version = 
"3.0.0-SNAPSHOT";
-011  public static final String revision = 
"78da0e366970cf9bab6add9ba9fd74ca37c75be3";
+011  public static final String revision = 
"9101fc246f86445006bfbcdfda5cc495016dc280";
 012  public static final String user = 
"jenkins";
-013  public static final String date = "Tue 
Jun 19 00:28:02 UTC 2018";
+013  public static final String date = "Wed 
Jun 20 04:15:39 UTC 2018";
 014  public static final String url = 
"git://jenkins-websites1.apache.org/home/jenkins/jenkins-slave/workspace/hbase_generate_website/hbase";
-015  public static final String srcChecksum 
= "72ef87fb3fa82a7c510def4b3609400b";
+015  public static final String srcChecksum 
= "bc4bb89ad408bf39c503a300c7481e65";
 016}
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/65565d77/devapidocs/src-html/org/apache/hadoop/hbase/master/CatalogJanitor.SplitParentFirstComparator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/CatalogJanitor.SplitParentFirstComparator.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/CatalogJanitor.SplitParentFirstComparator.html
index 4229646..1814633 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/CatalogJanitor.SplitParentFirstComparator.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/CatalogJanitor.SplitParentFirstComparator.html
@@ -119,358 +119,355 @@
 111  protected void chore() {
 112try {
 113  AssignmentManager am = 
this.services.getAssignmentManager();
-114  if (this.enabled.get()
-115   
!this.services.isInMaintenanceMode()
-116   am != null
-117   
am.isFailoverCleanupDone()
-118   
!am.hasRegionsInTransition()) {
-119scan();
-120  } else {
-121LOG.warn("CatalogJanitor is 
disabled! Enabled=" + this.enabled.get() +
-122", maintenanceMode=" + 
this.services.isInMaintenanceMode() +
-123", am=" + am + ", 
failoverCleanupDone=" + (am != null  am.isFailoverCleanupDone()) +
-124", hasRIT=" + (am != null 
 am.hasRegionsInTransition()));
-125  }
-126} catch (IOException e) {
-127  LOG.warn("Failed scan of catalog 
table", e);
-128}
-129  }
-130
-131  /**
-132   * Scans hbase:meta and returns a 
number of scanned rows, and a map of merged
-133   * regions, and an ordered map of split 
parents.
-134   * @return triple of scanned rows, map 
of merged regions and map of split
-135   * parent regioninfos
-136   * @throws IOException
-137   */
-138  TripleInteger, MapRegionInfo, 
Result, MapRegionInfo, Result
-139getMergedRegionsAndSplitParents() 
throws IOException {
-140return 
getMergedRegionsAndSplitParents(null);
-141  }
-142
-143  /**
-144   * Scans hbase:meta and returns a 
number of scanned rows, and a map of merged
-145   * regions, and an ordered map of split 
parents. if the given table name is
-146   * null, return merged regions and 
split parents of all tables, else only the
-147   * specified table
-148   * @param tableName null represents all 
tables
-149   * @return triple of scanned rows, and 
map of merged regions, and map of split
-150   * parent regioninfos
-151   * @throws IOException
-152   */
-153  TripleInteger, MapRegionInfo, 
Result, MapRegionInfo, Result
-154getMergedRegionsAndSplitParents(final 
TableName tableName) throws IOException {
-155final boolean isTableSpecified = 
(tableName != null);
-156// TODO: Only works with single 
hbase:meta region currently.  Fix.
-157final AtomicInteger count = new 
AtomicInteger(0);
-158// Keep Map of found split parents.  
There are candidates for cleanup.
-159// Use a comparator that has split 
parents come before its daughters.
-160final MapRegionInfo, Result 
splitParents = new TreeMap(new SplitParentFirstComparator());
-161final MapRegionInfo, Result 
mergedRegions = new TreeMap(RegionInfo.COMPARATOR);
-162// This visitor collects split 
parents and counts rows in the hbase:meta table
-163
-164MetaTableAccessor.Visitor visitor = 
new MetaTableAccessor.Visitor() {
-165  @Override
-166  public boolean visit(Result r) 
throws IOException {
-167if (r == null || r.isEmpty()) 
return true;
-168count.incrementAndGet();
-169RegionInfo info = 
MetaTableAccessor.getRegionInfo(r);
-170if (info 

[15/51] [partial] hbase-site git commit: Published site at 0b28155d274910b4e667b949d51f78809a1eff0b.

2018-06-14 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e11cf2cb/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogWriter.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogWriter.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogWriter.html
index 64099d9..2cafba9 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogWriter.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogWriter.html
@@ -55,82 +55,81 @@
 047
 048  @Override
 049  public void append(Entry entry) throws 
IOException {
-050
entry.setCompressionContext(compressionContext);
-051
entry.getKey().getBuilder(compressor).
-052
setFollowingKvCount(entry.getEdit().size()).build().writeDelimitedTo(output);
-053for (Cell cell : 
entry.getEdit().getCells()) {
-054  // cellEncoder must assume little 
about the stream, since we write PB and cells in turn.
-055  cellEncoder.write(cell);
-056}
-057length.set(output.getPos());
-058  }
-059
-060  @Override
-061  public void close() throws IOException 
{
-062if (this.output != null) {
-063  try {
-064if (!trailerWritten) 
writeWALTrailer();
-065this.output.close();
-066  } catch (NullPointerException npe) 
{
-067// Can get a NPE coming up from 
down in DFSClient$DFSOutputStream#close
-068LOG.warn(npe.toString(), npe);
-069  }
-070  this.output = null;
-071}
-072  }
-073
-074  @Override
-075  public void sync(boolean forceSync) 
throws IOException {
-076FSDataOutputStream fsdos = 
this.output;
-077if (fsdos == null) return; // Presume 
closed
-078fsdos.flush();
-079if (forceSync) {
-080  fsdos.hsync();
-081} else {
-082  fsdos.hflush();
-083}
-084  }
-085
-086  public FSDataOutputStream getStream() 
{
-087return this.output;
-088  }
-089
-090  @SuppressWarnings("deprecation")
-091  @Override
-092  protected void initOutput(FileSystem 
fs, Path path, boolean overwritable, int bufferSize,
-093  short replication, long blockSize) 
throws IOException, StreamLacksCapabilityException {
-094this.output = 
fs.createNonRecursive(path, overwritable, bufferSize, replication, blockSize,
-095  null);
-096if 
(fs.getConf().getBoolean(CommonFSUtils.UNSAFE_STREAM_CAPABILITY_ENFORCE, true)) 
{
-097  if 
(!CommonFSUtils.hasCapability(output, "hflush")) {
-098throw new 
StreamLacksCapabilityException("hflush");
-099  }
-100  if 
(!CommonFSUtils.hasCapability(output, "hsync")) {
-101throw new 
StreamLacksCapabilityException("hsync");
-102  }
-103}
-104  }
-105
-106  @Override
-107  protected long 
writeMagicAndWALHeader(byte[] magic, WALHeader header) throws IOException {
-108output.write(magic);
-109header.writeDelimitedTo(output);
-110return output.getPos();
-111  }
-112
-113  @Override
-114  protected OutputStream 
getOutputStreamForCellEncoder() {
-115return this.output;
-116  }
-117
-118  @Override
-119  protected long 
writeWALTrailerAndMagic(WALTrailer trailer, byte[] magic) throws IOException 
{
-120trailer.writeTo(output);
-121
output.writeInt(trailer.getSerializedSize());
-122output.write(magic);
-123return output.getPos();
-124  }
-125}
+050
entry.getKey().getBuilder(compressor).
+051
setFollowingKvCount(entry.getEdit().size()).build().writeDelimitedTo(output);
+052for (Cell cell : 
entry.getEdit().getCells()) {
+053  // cellEncoder must assume little 
about the stream, since we write PB and cells in turn.
+054  cellEncoder.write(cell);
+055}
+056length.set(output.getPos());
+057  }
+058
+059  @Override
+060  public void close() throws IOException 
{
+061if (this.output != null) {
+062  try {
+063if (!trailerWritten) 
writeWALTrailer();
+064this.output.close();
+065  } catch (NullPointerException npe) 
{
+066// Can get a NPE coming up from 
down in DFSClient$DFSOutputStream#close
+067LOG.warn(npe.toString(), npe);
+068  }
+069  this.output = null;
+070}
+071  }
+072
+073  @Override
+074  public void sync(boolean forceSync) 
throws IOException {
+075FSDataOutputStream fsdos = 
this.output;
+076if (fsdos == null) return; // Presume 
closed
+077fsdos.flush();
+078if (forceSync) {
+079  fsdos.hsync();
+080} else {
+081  fsdos.hflush();
+082}
+083  }
+084
+085  public FSDataOutputStream getStream() 
{
+086return this.output;
+087  }
+088
+089  @SuppressWarnings("deprecation")
+090  @Override
+091  protected void initOutput(FileSystem 
fs, Path path, boolean overwritable, int bufferSize,
+092  short replication, long blockSize) 
throws IOException, StreamLacksCapabilityException {
+093this.output = 
fs.createNonRecursive(path, overwritable, bufferSize, 

[15/51] [partial] hbase-site git commit: Published site at 7d3750bd9fc9747623549c242cc4171e224b3eaf.

2018-06-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.ServerReportEvent.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.ServerReportEvent.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.ServerReportEvent.html
index 5420d82..6ea3672 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.ServerReportEvent.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.ServerReportEvent.html
@@ -316,687 +316,728 @@
 308}
 309  }
 310
-311  public enum ServerState { ONLINE, 
SPLITTING, OFFLINE }
-312  public static class ServerStateNode 
implements ComparableServerStateNode {
-313private final ServerReportEvent 
reportEvent;
-314
-315private final 
SetRegionStateNode regions;
-316private final ServerName 
serverName;
-317
-318private volatile ServerState state = 
ServerState.ONLINE;
-319private volatile int versionNumber = 
0;
-320
-321public ServerStateNode(final 
ServerName serverName) {
-322  this.serverName = serverName;
-323  this.regions = 
ConcurrentHashMap.newKeySet();
-324  this.reportEvent = new 
ServerReportEvent(serverName);
-325}
-326
-327public ServerName getServerName() {
-328  return serverName;
-329}
+311  /**
+312   * Server State.
+313   */
+314  public enum ServerState {
+315/**
+316 * Initial state. Available.
+317 */
+318ONLINE,
+319
+320/**
+321 * Server expired/crashed. Currently 
undergoing WAL splitting.
+322 */
+323SPLITTING,
+324
+325/**
+326 * WAL splitting done.
+327 */
+328OFFLINE
+329  }
 330
-331public ServerState getState() {
-332  return state;
-333}
-334
-335public int getVersionNumber() {
-336  return versionNumber;
-337}
-338
-339public ProcedureEvent? 
getReportEvent() {
-340  return reportEvent;
-341}
+331  /**
+332   * State of Server; list of hosted 
regions, etc.
+333   */
+334  public static class ServerStateNode 
implements ComparableServerStateNode {
+335private final ServerReportEvent 
reportEvent;
+336
+337private final 
SetRegionStateNode regions;
+338private final ServerName 
serverName;
+339
+340private volatile ServerState state = 
ServerState.ONLINE;
+341private volatile int versionNumber = 
0;
 342
-343public boolean isInState(final 
ServerState... expected) {
-344  boolean expectedState = false;
-345  if (expected != null) {
-346for (int i = 0; i  
expected.length; ++i) {
-347  expectedState |= (state == 
expected[i]);
-348}
-349  }
-350  return expectedState;
+343public ServerStateNode(final 
ServerName serverName) {
+344  this.serverName = serverName;
+345  this.regions = 
ConcurrentHashMap.newKeySet();
+346  this.reportEvent = new 
ServerReportEvent(serverName);
+347}
+348
+349public ServerName getServerName() {
+350  return serverName;
 351}
 352
-353public void setState(final 
ServerState state) {
-354  this.state = state;
+353public ServerState getState() {
+354  return state;
 355}
 356
-357public void setVersionNumber(final 
int versionNumber) {
-358  this.versionNumber = 
versionNumber;
+357public int getVersionNumber() {
+358  return versionNumber;
 359}
 360
-361public SetRegionStateNode 
getRegions() {
-362  return regions;
+361public ProcedureEvent? 
getReportEvent() {
+362  return reportEvent;
 363}
 364
-365public int getRegionCount() {
-366  return regions.size();
+365public boolean isOffline() {
+366  return 
this.state.equals(ServerState.OFFLINE);
 367}
 368
-369public ArrayListRegionInfo 
getRegionInfoList() {
-370  ArrayListRegionInfo hris = 
new ArrayListRegionInfo(regions.size());
-371  for (RegionStateNode region: 
regions) {
-372
hris.add(region.getRegionInfo());
-373  }
-374  return hris;
-375}
-376
-377public void addRegion(final 
RegionStateNode regionNode) {
-378  this.regions.add(regionNode);
-379}
-380
-381public void removeRegion(final 
RegionStateNode regionNode) {
-382  this.regions.remove(regionNode);
-383}
-384
-385@Override
-386public int compareTo(final 
ServerStateNode other) {
-387  return 
getServerName().compareTo(other.getServerName());
-388}
-389
-390@Override
-391public int hashCode() {
-392  return 
getServerName().hashCode();
+369public boolean isInState(final 
ServerState... expected) {
+370  boolean expectedState = false;
+371  if (expected != null) {
+372for (int i = 0; i  
expected.length; ++i) {
+373  expectedState |= (state == 
expected[i]);
+374}
+375  }
+376  return 

[15/51] [partial] hbase-site git commit: Published site at 997747076d8ec0b4346d7cb99c4b0667a7c14905.

2018-05-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4df09ed9/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html
index 3da432b..d30fa8f 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html
@@ -928,7690 +928,7698 @@
 920  CollectionHStore stores = 
this.stores.values();
 921  try {
 922// update the stores that we are 
replaying
-923
stores.forEach(HStore::startReplayingFromWAL);
-924// Recover any edits if 
available.
-925maxSeqId = Math.max(maxSeqId,
-926  
replayRecoveredEditsIfAny(this.fs.getRegionDir(), maxSeqIdInStores, reporter, 
status));
-927// Make sure mvcc is up to max.
-928this.mvcc.advanceTo(maxSeqId);
-929  } finally {
-930// update the stores that we are 
done replaying
-931
stores.forEach(HStore::stopReplayingFromWAL);
-932  }
-933}
-934this.lastReplayedOpenRegionSeqId = 
maxSeqId;
-935
-936
this.writestate.setReadOnly(ServerRegionReplicaUtil.isReadOnly(this));
-937this.writestate.flushRequested = 
false;
-938this.writestate.compacting.set(0);
-939
-940if (this.writestate.writesEnabled) 
{
-941  // Remove temporary data left over 
from old regions
-942  status.setStatus("Cleaning up 
temporary data from old regions");
-943  fs.cleanupTempDir();
-944}
-945
-946if (this.writestate.writesEnabled) 
{
-947  status.setStatus("Cleaning up 
detritus from prior splits");
-948  // Get rid of any splits or merges 
that were lost in-progress.  Clean out
-949  // these directories here on open.  
We may be opening a region that was
-950  // being split but we crashed in 
the middle of it all.
-951  fs.cleanupAnySplitDetritus();
-952  fs.cleanupMergesDir();
-953}
-954
-955// Initialize split policy
-956this.splitPolicy = 
RegionSplitPolicy.create(this, conf);
-957
-958// Initialize flush policy
-959this.flushPolicy = 
FlushPolicyFactory.create(this, conf);
-960
-961long lastFlushTime = 
EnvironmentEdgeManager.currentTime();
-962for (HStore store: stores.values()) 
{
-963  
this.lastStoreFlushTimeMap.put(store, lastFlushTime);
-964}
-965
-966// Use maximum of log sequenceid or 
that which was found in stores
-967// (particularly if no recovered 
edits, seqid will be -1).
-968long maxSeqIdFromFile =
-969  
WALSplitter.getMaxRegionSequenceId(fs.getFileSystem(), fs.getRegionDir());
-970long nextSeqId = Math.max(maxSeqId, 
maxSeqIdFromFile) + 1;
-971if (writestate.writesEnabled) {
-972  
WALSplitter.writeRegionSequenceIdFile(fs.getFileSystem(), fs.getRegionDir(), 
nextSeqId - 1);
-973}
-974
-975LOG.info("Opened {}; next 
sequenceid={}", this.getRegionInfo().getShortNameToLog(), nextSeqId);
-976
-977// A region can be reopened if failed 
a split; reset flags
-978this.closing.set(false);
-979this.closed.set(false);
-980
-981if (coprocessorHost != null) {
-982  status.setStatus("Running 
coprocessor post-open hooks");
-983  coprocessorHost.postOpen();
-984}
+923LOG.debug("replaying wal for " + 
this.getRegionInfo().getEncodedName());
+924
stores.forEach(HStore::startReplayingFromWAL);
+925// Recover any edits if 
available.
+926maxSeqId = Math.max(maxSeqId,
+927  
replayRecoveredEditsIfAny(this.fs.getRegionDir(), maxSeqIdInStores, reporter, 
status));
+928// Make sure mvcc is up to max.
+929this.mvcc.advanceTo(maxSeqId);
+930  } finally {
+931LOG.debug("stopping wal replay 
for " + this.getRegionInfo().getEncodedName());
+932// update the stores that we are 
done replaying
+933
stores.forEach(HStore::stopReplayingFromWAL);
+934  }
+935}
+936this.lastReplayedOpenRegionSeqId = 
maxSeqId;
+937
+938
this.writestate.setReadOnly(ServerRegionReplicaUtil.isReadOnly(this));
+939this.writestate.flushRequested = 
false;
+940this.writestate.compacting.set(0);
+941
+942if (this.writestate.writesEnabled) 
{
+943  LOG.debug("Cleaning up temporary 
data for " + this.getRegionInfo().getEncodedName());
+944  // Remove temporary data left over 
from old regions
+945  status.setStatus("Cleaning up 
temporary data from old regions");
+946  fs.cleanupTempDir();
+947}
+948
+949if (this.writestate.writesEnabled) 
{
+950  status.setStatus("Cleaning up 
detritus from prior splits");
+951  // Get rid of any splits or merges 
that were lost in-progress.  Clean out
+952  // these directories here on open.  
We may be opening a region that was
+953  // being split but we crashed in 
the 

[15/51] [partial] hbase-site git commit: Published site at f3d1c021de2264301f68eadb9ef126ff83d7ef53.

2018-05-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/883dde2f/devapidocs/src-html/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.html
index 78aac51..d08a687 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.html
@@ -299,421 +299,377 @@
 291// Hold a lock on the manager 
instance while moving servers to prevent
 292// another writer changing our state 
while we are working.
 293synchronized (rsGroupInfoManager) {
-294  if 
(master.getMasterCoprocessorHost() != null) {
-295
master.getMasterCoprocessorHost().preMoveServers(servers, targetGroupName);
-296  }
-297  // Presume first server's source 
group. Later ensure all servers are from this group.
-298  Address firstServer = 
servers.iterator().next();
-299  RSGroupInfo srcGrp = 
rsGroupInfoManager.getRSGroupOfServer(firstServer);
-300  if (srcGrp == null) {
-301// Be careful. This exception 
message is tested for in TestRSGroupsBase...
-302throw new 
ConstraintException("Source RSGroup for server " + firstServer
-303+ " does not exist.");
-304  }
-305  if 
(srcGrp.getName().equals(targetGroupName)) {
-306throw new 
ConstraintException("Target RSGroup " + targetGroupName +
-307" is same as source " + 
srcGrp + " RSGroup.");
-308  }
-309  // Only move online servers (when 
moving from 'default') or servers from other
-310  // groups. This prevents bogus 
servers from entering groups
-311  if 
(RSGroupInfo.DEFAULT_GROUP.equals(srcGrp.getName())) {
-312if (srcGrp.getServers().size() 
= servers.size()) {
-313  throw new 
ConstraintException(KEEP_ONE_SERVER_IN_DEFAULT_ERROR_MESSAGE);
-314}
-315
checkOnlineServersOnly(servers);
-316  }
-317  // Ensure all servers are of same 
rsgroup.
-318  for (Address server: servers) {
-319String tmpGroup = 
rsGroupInfoManager.getRSGroupOfServer(server).getName();
-320if 
(!tmpGroup.equals(srcGrp.getName())) {
-321  throw new 
ConstraintException("Move server request should only come from one source " +
-322  "RSGroup. Expecting only " 
+ srcGrp.getName() + " but contains " + tmpGroup);
-323}
-324  }
-325  if (srcGrp.getServers().size() 
= servers.size()  srcGrp.getTables().size()  0) {
-326throw new 
ConstraintException("Cannot leave a RSGroup " + srcGrp.getName() +
-327" that contains tables 
without servers to host them.");
-328  }
-329
-330  // MovedServers may be  passed 
in 'servers'.
-331  SetAddress movedServers = 
rsGroupInfoManager.moveServers(servers, srcGrp.getName(),
-332  targetGroupName);
-333  ListAddress 
editableMovedServers = Lists.newArrayList(movedServers);
-334  boolean foundRegionsToMove;
-335  do {
-336foundRegionsToMove = false;
-337for (IteratorAddress iter 
= editableMovedServers.iterator(); iter.hasNext();) {
-338  Address rs = iter.next();
-339  // Get regions that are 
associated with this server.
-340  ListRegionInfo regions 
= getRegions(rs);
+294  // Presume first server's source 
group. Later ensure all servers are from this group.
+295  Address firstServer = 
servers.iterator().next();
+296  RSGroupInfo srcGrp = 
rsGroupInfoManager.getRSGroupOfServer(firstServer);
+297  if (srcGrp == null) {
+298// Be careful. This exception 
message is tested for in TestRSGroupsBase...
+299throw new 
ConstraintException("Source RSGroup for server " + firstServer
+300+ " does not exist.");
+301  }
+302  if 
(srcGrp.getName().equals(targetGroupName)) {
+303throw new 
ConstraintException("Target RSGroup " + targetGroupName +
+304" is same as source " + 
srcGrp + " RSGroup.");
+305  }
+306  // Only move online servers (when 
moving from 'default') or servers from other
+307  // groups. This prevents bogus 
servers from entering groups
+308  if 
(RSGroupInfo.DEFAULT_GROUP.equals(srcGrp.getName())) {
+309if (srcGrp.getServers().size() 
= servers.size()) {
+310  throw new 
ConstraintException(KEEP_ONE_SERVER_IN_DEFAULT_ERROR_MESSAGE);
+311}
+312
checkOnlineServersOnly(servers);
+313  }
+314  // Ensure all servers are of same 
rsgroup.
+315  for (Address server: servers) {
+316String tmpGroup = 
rsGroupInfoManager.getRSGroupOfServer(server).getName();
+317if 
(!tmpGroup.equals(srcGrp.getName())) {
+318  throw new 
ConstraintException("Move server request should only come from one source " +
+319  

[15/51] [partial] hbase-site git commit: Published site at cf529f18a9959589fa635f78df4840472526ea2c.

2018-05-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.FailingSplitMasterObserver.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.FailingSplitMasterObserver.html
 
b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.FailingSplitMasterObserver.html
index 93820c0..d375e8c 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.FailingSplitMasterObserver.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.FailingSplitMasterObserver.html
@@ -230,7 +230,7 @@ implements 
org.apache.hadoop.hbase.coprocessor.MasterCoprocessor, org.apache.had
 
 
 Methods inherited from 
interfaceorg.apache.hadoop.hbase.coprocessor.MasterObserver
-postAbortProcedure, postAddReplicationPeer, postAddRSGroup, postAssign, 
postBalance, postBalanceRSGroup, postBalanceSwitch, postClearDeadServers, 
postCloneSnapshot, postCompletedCreateTableAction, 
postCompletedDeleteTableAction, postCompletedDisableTableAction, 
postCompletedEnableTableAction, postCompletedMergeRegionsAction, 
postCompletedModifyTableAction, postCompletedSplitRegionAction, 
postCompletedTruncateTableAction, postCreateNamespace, postCreateTable, 
postDecommissionRegionServers, postDeleteNamespace, postDeleteSnapshot, 
postDeleteTable, postDisableReplicationPeer, postDisableTable, 
postEnableReplicationPeer, postEnableTable, postGetClusterMetrics, 
postGetLocks, postGetNamespaceDescriptor, postGetProcedures, 
postGetReplicationPeerConfig, postGetTableDescriptors, postGetTableNames, 
postListDecommissionedRegionServers, postListNamespaceDescriptors, 
postListReplicationPeers, postListSnapshot, postLockHeartbeat, 
postMergeRegions, postMergeRegionsCommitAction, postModifyNam
 espace, postModifyTable, postMove, postMoveServers, postMoveServersAndTables, 
postMoveTables, postRecommissionRegionServer, postRegionOffline, 
postRemoveReplicationPeer, postRemoveRSGroup, postRemoveServers, 
postRequestLock, postRestoreSnapshot, postRollBackMergeRegionsAction, 
postRollBackSplitRegionAction, postSetNamespaceQuota, 
postSetSplitOrMergeEnabled, postSetTableQuota, postSetUserQuota, 
postSetUserQuota, postSetUserQuota, postSnapshot, postStartMaster, 
postTableFlush, postTruncateTable, postUnassign, 
postUpdateReplicationPeerConfig, preAbortProcedure, preAddReplicationPeer, 
preAddRSGroup, preAssign, preBalance, preBalanceRSGroup, preBalanceSwitch, 
preClearDeadServers, preCloneSnapshot, preCreateNamespace, preCreateTable, 
preCreateTableAction, preDecommissionRegionServers, preDeleteNamespace, 
preDeleteSnapshot, preDeleteTable, preDeleteTableAction, 
preDisableReplicationPeer, preDisableTable, preDisableTableAction, 
preEnableReplicationPeer, preEnableTable, preEnableTableAction,
  preGetClusterMetrics, preGetLocks, preGetNamespaceDescriptor, 
preGetProcedures, preGetReplicationPeerConfig, preGetTableDescriptors, 
preGetTableNames, preListDecommissionedRegionServers, 
preListNamespaceDescriptors, preListReplicationPeers, preListSnapshot, 
preLockHeartbeat, preMasterInitialization, preMergeRegions, 
preMergeRegionsAction, preMergeRegionsCommitAction, preModifyNamespace, 
preModifyTable, preModifyTableAction, preMove, preMoveServers, 
preMoveServersAndTables, preMoveTables, preRecommissionRegionServer, 
preRegionOffline, preRemoveReplicationPeer, preRemoveRSGroup, preRemoveServers, 
preRequestLock, preRestoreSnapshot, preSetNamespaceQuota, 
preSetSplitOrMergeEnabled, preSetTableQuota, preSetUserQuota, preSetUserQuota, 
preSetUserQuota, preShutdown, preSnapshot, preSplitRegion, 
preSplitRegionAction, preSplitRegionAfterMETAAction, preStopMaster, 
preTableFlush, preTruncateTable, preTruncateTableAction, preUnassign, 
preUpdateReplicationPeerConfig
+postAbortProcedure, postAddReplicationPeer, postAddRSGroup, postAssign, 
postBalance, postBalanceRSGroup, postBalanceSwitch, postClearDeadServers, 
postCloneSnapshot, postCompletedCreateTableAction, 
postCompletedDeleteTableAction, postCompletedDisableTableAction, 
postCompletedEnableTableAction, postCompletedMergeRegionsAction, 
postCompletedModifyTableAction, postCompletedModifyTableAction, 
postCompletedSplitRegionAction, postCompletedTruncateTableAction, 
postCreateNamespace, postCreateTable, postDecommissionRegionServers, 
postDeleteNamespace, postDeleteSnapshot, postDeleteTable, 
postDisableReplicationPeer, postDisableTable, postEnableReplicationPeer, 
postEnableTable, postGetClusterMetrics, postGetLocks, 
postGetNamespaceDescriptor, postGetProcedures, postGetReplicationPeerConfig, 
postGetTableDescriptors, postGetTableNames, 
postListDecommissionedRegionServers, postListNamespaceDescriptors, 
postListReplicationPeers, postListSnapshot, postLockHeartbeat, 
postMergeRegions, postMergeRe
 gionsCommitAction, 

[15/51] [partial] hbase-site git commit: Published site at 021f66d11d2cbb7308308093e29e69d6e7661ee9.

2018-05-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/92a26cfb/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
index 4a879bb..7d27402 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
@@ -300,7 +300,7 @@
 292  private MapString, 
com.google.protobuf.Service coprocessorServiceHandlers = 
Maps.newHashMap();
 293
 294  // Track data size in all memstores
-295  private final MemStoreSizing 
memStoreSize = new MemStoreSizing();
+295  private final MemStoreSizing 
memStoreSizing = new ThreadSafeMemStoreSizing();
 296  private final RegionServicesForStores 
regionServicesForStores = new RegionServicesForStores(this);
 297
 298  // Debug possible data loss due to WAL 
off
@@ -1218,7389 +1218,7399 @@
 1210   * Increase the size of mem store in 
this region and the size of global mem
 1211   * store
 1212   */
-1213  public void 
incMemStoreSize(MemStoreSize memStoreSize) {
-1214if (this.rsAccounting != null) {
-1215  
rsAccounting.incGlobalMemStoreSize(memStoreSize);
-1216}
-1217long dataSize;
-1218synchronized (this.memStoreSize) {
-1219  
this.memStoreSize.incMemStoreSize(memStoreSize);
-1220  dataSize = 
this.memStoreSize.getDataSize();
-1221}
-1222
checkNegativeMemStoreDataSize(dataSize, memStoreSize.getDataSize());
-1223  }
-1224
-1225  public void 
decrMemStoreSize(MemStoreSize memStoreSize) {
-1226if (this.rsAccounting != null) {
-1227  
rsAccounting.decGlobalMemStoreSize(memStoreSize);
-1228}
-1229long size;
-1230synchronized (this.memStoreSize) {
-1231  
this.memStoreSize.decMemStoreSize(memStoreSize);
-1232  size = 
this.memStoreSize.getDataSize();
+1213  void incMemStoreSize(MemStoreSize mss) 
{
+1214incMemStoreSize(mss.getDataSize(), 
mss.getHeapSize(), mss.getOffHeapSize());
+1215  }
+1216
+1217  void incMemStoreSize(long 
dataSizeDelta, long heapSizeDelta, long offHeapSizeDelta) {
+1218if (this.rsAccounting != null) {
+1219  
rsAccounting.incGlobalMemStoreSize(dataSizeDelta, heapSizeDelta, 
offHeapSizeDelta);
+1220}
+1221long dataSize =
+1222
this.memStoreSizing.incMemStoreSize(dataSizeDelta, heapSizeDelta, 
offHeapSizeDelta);
+1223
checkNegativeMemStoreDataSize(dataSize, dataSizeDelta);
+1224  }
+1225
+1226  void decrMemStoreSize(MemStoreSize 
mss) {
+1227decrMemStoreSize(mss.getDataSize(), 
mss.getHeapSize(), mss.getOffHeapSize());
+1228  }
+1229
+1230  void decrMemStoreSize(long 
dataSizeDelta, long heapSizeDelta, long offHeapSizeDelta) {
+1231if (this.rsAccounting != null) {
+1232  
rsAccounting.decGlobalMemStoreSize(dataSizeDelta, heapSizeDelta, 
offHeapSizeDelta);
 1233}
-1234checkNegativeMemStoreDataSize(size, 
-memStoreSize.getDataSize());
-1235  }
-1236
-1237  private void 
checkNegativeMemStoreDataSize(long memStoreDataSize, long delta) {
-1238// This is extremely bad if we make 
memStoreSize negative. Log as much info on the offending
-1239// caller as possible. (memStoreSize 
might be a negative value already -- freeing memory)
-1240if (memStoreDataSize  0) {
-1241  LOG.error("Asked to modify this 
region's (" + this.toString()
-1242  + ") memStoreSize to a 
negative value which is incorrect. Current memStoreSize="
-1243  + (memStoreDataSize - delta) + 
", delta=" + delta, new Exception());
-1244}
-1245  }
-1246
-1247  @Override
-1248  public RegionInfo getRegionInfo() {
-1249return this.fs.getRegionInfo();
-1250  }
-1251
-1252  /**
-1253   * @return Instance of {@link 
RegionServerServices} used by this HRegion.
-1254   * Can be null.
-1255   */
-1256  RegionServerServices 
getRegionServerServices() {
-1257return this.rsServices;
-1258  }
-1259
-1260  @Override
-1261  public long getReadRequestsCount() {
-1262return readRequestsCount.sum();
-1263  }
-1264
-1265  @Override
-1266  public long 
getFilteredReadRequestsCount() {
-1267return 
filteredReadRequestsCount.sum();
-1268  }
-1269
-1270  @Override
-1271  public long getWriteRequestsCount() 
{
-1272return writeRequestsCount.sum();
-1273  }
-1274
-1275  @Override
-1276  public long getMemStoreDataSize() {
-1277return memStoreSize.getDataSize();
-1278  }
-1279
-1280  @Override
-1281  public long getMemStoreHeapSize() {
-1282return memStoreSize.getHeapSize();
-1283  }
-1284
-1285  @Override
-1286  public long getMemStoreOffHeapSize() 
{
-1287return 
memStoreSize.getOffHeapSize();
-1288  }
-1289
-1290  /** @return store services for this 
region, to access services required by store level needs */
-1291  public RegionServicesForStores 

[15/51] [partial] hbase-site git commit: Published site at acd0d1e446c164d9c54bfb461b2d449c8d717c07.

2018-05-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f2065178/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.IncrementTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.IncrementTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.IncrementTest.html
index 2510283..418c60c 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.IncrementTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.IncrementTest.html
@@ -77,77 +77,77 @@
 069import 
org.apache.hadoop.hbase.client.RowMutations;
 070import 
org.apache.hadoop.hbase.client.Scan;
 071import 
org.apache.hadoop.hbase.client.Table;
-072import 
org.apache.hadoop.hbase.filter.BinaryComparator;
-073import 
org.apache.hadoop.hbase.filter.Filter;
-074import 
org.apache.hadoop.hbase.filter.FilterAllFilter;
-075import 
org.apache.hadoop.hbase.filter.FilterList;
-076import 
org.apache.hadoop.hbase.filter.PageFilter;
-077import 
org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
-078import 
org.apache.hadoop.hbase.filter.WhileMatchFilter;
-079import 
org.apache.hadoop.hbase.io.compress.Compression;
-080import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
-081import 
org.apache.hadoop.hbase.io.hfile.RandomDistribution;
-082import 
org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
-083import 
org.apache.hadoop.hbase.regionserver.BloomType;
-084import 
org.apache.hadoop.hbase.regionserver.CompactingMemStore;
-085import 
org.apache.hadoop.hbase.trace.HBaseHTraceConfiguration;
-086import 
org.apache.hadoop.hbase.trace.SpanReceiverHost;
-087import 
org.apache.hadoop.hbase.trace.TraceUtil;
-088import 
org.apache.hadoop.hbase.util.ByteArrayHashKey;
-089import 
org.apache.hadoop.hbase.util.Bytes;
-090import 
org.apache.hadoop.hbase.util.Hash;
-091import 
org.apache.hadoop.hbase.util.MurmurHash;
-092import 
org.apache.hadoop.hbase.util.Pair;
-093import 
org.apache.hadoop.hbase.util.YammerHistogramUtils;
-094import 
org.apache.hadoop.io.LongWritable;
-095import org.apache.hadoop.io.Text;
-096import org.apache.hadoop.mapreduce.Job;
-097import 
org.apache.hadoop.mapreduce.Mapper;
-098import 
org.apache.hadoop.mapreduce.lib.input.NLineInputFormat;
-099import 
org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
-100import 
org.apache.hadoop.mapreduce.lib.reduce.LongSumReducer;
-101import org.apache.hadoop.util.Tool;
-102import 
org.apache.hadoop.util.ToolRunner;
-103import 
org.apache.htrace.core.ProbabilitySampler;
-104import org.apache.htrace.core.Sampler;
-105import 
org.apache.htrace.core.TraceScope;
-106import 
org.apache.yetus.audience.InterfaceAudience;
-107import org.slf4j.Logger;
-108import org.slf4j.LoggerFactory;
-109import 
org.apache.hbase.thirdparty.com.google.common.base.MoreObjects;
-110import 
org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
-111
-112/**
-113 * Script used evaluating HBase 
performance and scalability.  Runs a HBase
-114 * client that steps through one of a set 
of hardcoded tests or 'experiments'
-115 * (e.g. a random reads test, a random 
writes test, etc.). Pass on the
-116 * command-line which test to run and how 
many clients are participating in
-117 * this experiment. Run {@code 
PerformanceEvaluation --help} to obtain usage.
-118 *
-119 * pThis class sets up and runs 
the evaluation programs described in
-120 * Section 7, iPerformance 
Evaluation/i, of the a
-121 * 
href="http://labs.google.com/papers/bigtable.html"Bigtable/a;
-122 * paper, pages 8-10.
-123 *
-124 * pBy default, runs as a 
mapreduce job where each mapper runs a single test
-125 * client. Can also run as a 
non-mapreduce, multithreaded application by
-126 * specifying {@code --nomapred}. Each 
client does about 1GB of data, unless
-127 * specified otherwise.
-128 */
-129@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
-130public class PerformanceEvaluation 
extends Configured implements Tool {
-131  static final String RANDOM_SEEK_SCAN = 
"randomSeekScan";
-132  static final String RANDOM_READ = 
"randomRead";
-133  private static final Logger LOG = 
LoggerFactory.getLogger(PerformanceEvaluation.class.getName());
-134  private static final ObjectMapper 
MAPPER = new ObjectMapper();
-135  static {
-136
MAPPER.configure(MapperFeature.SORT_PROPERTIES_ALPHABETICALLY, true);
-137  }
-138
-139  public static final String TABLE_NAME = 
"TestTable";
-140  public static final byte[] FAMILY_NAME 
= Bytes.toBytes("info");
-141  public static final byte [] COLUMN_ZERO 
= Bytes.toBytes("" + 0);
-142  public static final byte [] 
QUALIFIER_NAME = COLUMN_ZERO;
+072import 
org.apache.hadoop.hbase.client.metrics.ScanMetrics;
+073import 
org.apache.hadoop.hbase.filter.BinaryComparator;
+074import 
org.apache.hadoop.hbase.filter.Filter;
+075import 

[15/51] [partial] hbase-site git commit: Published site at 87f5b5f3411d96c31b4cb61b9a57ced22be91d1f.

2018-05-04 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/de18d468/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.html
index e1bc325..63e7421 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.html
@@ -66,5125 +66,5224 @@
 058import 
java.util.concurrent.TimeoutException;
 059import 
java.util.concurrent.atomic.AtomicBoolean;
 060import 
java.util.concurrent.atomic.AtomicInteger;
-061import org.apache.commons.io.IOUtils;
-062import 
org.apache.commons.lang3.RandomStringUtils;
-063import 
org.apache.commons.lang3.StringUtils;
-064import 
org.apache.hadoop.conf.Configuration;
-065import 
org.apache.hadoop.conf.Configured;
-066import 
org.apache.hadoop.fs.FSDataOutputStream;
-067import org.apache.hadoop.fs.FileStatus;
-068import org.apache.hadoop.fs.FileSystem;
-069import org.apache.hadoop.fs.Path;
-070import 
org.apache.hadoop.fs.permission.FsAction;
-071import 
org.apache.hadoop.fs.permission.FsPermission;
-072import 
org.apache.hadoop.hbase.Abortable;
-073import org.apache.hadoop.hbase.Cell;
-074import 
org.apache.hadoop.hbase.CellUtil;
-075import 
org.apache.hadoop.hbase.ClusterMetrics;
-076import 
org.apache.hadoop.hbase.ClusterMetrics.Option;
-077import 
org.apache.hadoop.hbase.HBaseConfiguration;
-078import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
-079import 
org.apache.hadoop.hbase.HConstants;
-080import 
org.apache.hadoop.hbase.HRegionInfo;
-081import 
org.apache.hadoop.hbase.HRegionLocation;
-082import 
org.apache.hadoop.hbase.KeyValue;
-083import 
org.apache.hadoop.hbase.MasterNotRunningException;
-084import 
org.apache.hadoop.hbase.MetaTableAccessor;
-085import 
org.apache.hadoop.hbase.RegionLocations;
-086import 
org.apache.hadoop.hbase.ServerName;
-087import 
org.apache.hadoop.hbase.TableName;
-088import 
org.apache.hadoop.hbase.ZooKeeperConnectionException;
-089import 
org.apache.hadoop.hbase.client.Admin;
-090import 
org.apache.hadoop.hbase.client.ClusterConnection;
-091import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-092import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
-093import 
org.apache.hadoop.hbase.client.Connection;
-094import 
org.apache.hadoop.hbase.client.ConnectionFactory;
-095import 
org.apache.hadoop.hbase.client.Delete;
-096import 
org.apache.hadoop.hbase.client.Get;
-097import 
org.apache.hadoop.hbase.client.Put;
-098import 
org.apache.hadoop.hbase.client.RegionInfo;
-099import 
org.apache.hadoop.hbase.client.RegionInfoBuilder;
-100import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
-101import 
org.apache.hadoop.hbase.client.Result;
-102import 
org.apache.hadoop.hbase.client.RowMutations;
-103import 
org.apache.hadoop.hbase.client.Table;
-104import 
org.apache.hadoop.hbase.client.TableDescriptor;
-105import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-106import 
org.apache.hadoop.hbase.client.TableState;
-107import 
org.apache.hadoop.hbase.io.FileLink;
-108import 
org.apache.hadoop.hbase.io.HFileLink;
-109import 
org.apache.hadoop.hbase.io.hfile.CacheConfig;
-110import 
org.apache.hadoop.hbase.io.hfile.HFile;
-111import 
org.apache.hadoop.hbase.log.HBaseMarkers;
-112import 
org.apache.hadoop.hbase.master.MasterFileSystem;
-113import 
org.apache.hadoop.hbase.master.RegionState;
-114import 
org.apache.hadoop.hbase.regionserver.HRegion;
-115import 
org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
-116import 
org.apache.hadoop.hbase.regionserver.StoreFileInfo;
-117import 
org.apache.hadoop.hbase.replication.ReplicationException;
-118import 
org.apache.hadoop.hbase.security.AccessDeniedException;
-119import 
org.apache.hadoop.hbase.security.UserProvider;
-120import 
org.apache.hadoop.hbase.util.Bytes.ByteArrayComparator;
-121import 
org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter.ERROR_CODE;
-122import 
org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker;
-123import 
org.apache.hadoop.hbase.util.hbck.ReplicationChecker;
-124import 
org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandler;
-125import 
org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandlerImpl;
-126import org.apache.hadoop.hbase.wal.WAL;
-127import 
org.apache.hadoop.hbase.wal.WALFactory;
-128import 
org.apache.hadoop.hbase.wal.WALSplitter;
-129import 
org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-130import 
org.apache.hadoop.hbase.zookeeper.ZKUtil;
-131import 
org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-132import 
org.apache.hadoop.hbase.zookeeper.ZNodePaths;
-133import 
org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
-134import 
org.apache.hadoop.ipc.RemoteException;
-135import 
org.apache.hadoop.security.UserGroupInformation;
-136import 
org.apache.hadoop.util.ReflectionUtils;
-137import 

[15/51] [partial] hbase-site git commit: Published site at 2912c953551bedbfbf30c32c156ed7bb187d54c3.

2018-04-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HBaseFsckTool.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HBaseFsckTool.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HBaseFsckTool.html
index 8302e28..c370eb9 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HBaseFsckTool.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HBaseFsckTool.html
@@ -2113,3031 +2113,3033 @@
 2105
errors.reportError(ERROR_CODE.ORPHAN_TABLE_STATE,
 2106tableName + " unable to 
delete dangling table state " + tableState);
 2107  }
-2108} else {
-2109  
errors.reportError(ERROR_CODE.ORPHAN_TABLE_STATE,
-2110  tableName + " has dangling 
table state " + tableState);
-2111}
-2112  }
-2113}
-2114// check that all tables have 
states
-2115for (TableName tableName : 
tablesInfo.keySet()) {
-2116  if (isTableIncluded(tableName) 
 !tableStates.containsKey(tableName)) {
-2117if (fixMeta) {
-2118  
MetaTableAccessor.updateTableState(connection, tableName, 
TableState.State.ENABLED);
-2119  TableState newState = 
MetaTableAccessor.getTableState(connection, tableName);
-2120  if (newState == null) {
-2121
errors.reportError(ERROR_CODE.NO_TABLE_STATE,
-2122"Unable to change state 
for table " + tableName + " in meta ");
-2123  }
-2124} else {
-2125  
errors.reportError(ERROR_CODE.NO_TABLE_STATE,
-2126  tableName + " has no state 
in meta ");
-2127}
-2128  }
-2129}
-2130  }
-2131
-2132  private void preCheckPermission() 
throws IOException, AccessDeniedException {
-2133if 
(shouldIgnorePreCheckPermission()) {
-2134  return;
-2135}
-2136
-2137Path hbaseDir = 
FSUtils.getRootDir(getConf());
-2138FileSystem fs = 
hbaseDir.getFileSystem(getConf());
-2139UserProvider userProvider = 
UserProvider.instantiate(getConf());
-2140UserGroupInformation ugi = 
userProvider.getCurrent().getUGI();
-2141FileStatus[] files = 
fs.listStatus(hbaseDir);
-2142for (FileStatus file : files) {
-2143  try {
-2144FSUtils.checkAccess(ugi, file, 
FsAction.WRITE);
-2145  } catch (AccessDeniedException 
ace) {
-2146LOG.warn("Got 
AccessDeniedException when preCheckPermission ", ace);
-2147
errors.reportError(ERROR_CODE.WRONG_USAGE, "Current user " + 
ugi.getUserName()
-2148  + " does not have write perms 
to " + file.getPath()
-2149  + ". Please rerun hbck as hdfs 
user " + file.getOwner());
-2150throw ace;
-2151  }
-2152}
-2153  }
-2154
-2155  /**
-2156   * Deletes region from meta table
-2157   */
-2158  private void deleteMetaRegion(HbckInfo 
hi) throws IOException {
-2159
deleteMetaRegion(hi.metaEntry.getRegionName());
-2160  }
-2161
-2162  /**
-2163   * Deletes region from meta table
-2164   */
-2165  private void deleteMetaRegion(byte[] 
metaKey) throws IOException {
-2166Delete d = new Delete(metaKey);
-2167meta.delete(d);
-2168LOG.info("Deleted " + 
Bytes.toString(metaKey) + " from META" );
-2169  }
-2170
-2171  /**
-2172   * Reset the split parent region info 
in meta table
-2173   */
-2174  private void resetSplitParent(HbckInfo 
hi) throws IOException {
-2175RowMutations mutations = new 
RowMutations(hi.metaEntry.getRegionName());
-2176Delete d = new 
Delete(hi.metaEntry.getRegionName());
-2177
d.addColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER);
-2178
d.addColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER);
-2179mutations.add(d);
-2180
-2181RegionInfo hri = 
RegionInfoBuilder.newBuilder(hi.metaEntry)
-2182.setOffline(false)
-2183.setSplit(false)
-2184.build();
-2185Put p = 
MetaTableAccessor.makePutFromRegionInfo(hri, 
EnvironmentEdgeManager.currentTime());
-2186mutations.add(p);
-2187
-2188meta.mutateRow(mutations);
-2189LOG.info("Reset split parent " + 
hi.metaEntry.getRegionNameAsString() + " in META" );
-2190  }
-2191
-2192  /**
-2193   * This backwards-compatibility 
wrapper for permanently offlining a region
-2194   * that should not be alive.  If the 
region server does not support the
-2195   * "offline" method, it will use the 
closest unassign method instead.  This
-2196   * will basically work until one 
attempts to disable or delete the affected
-2197   * table.  The problem has to do with 
in-memory only master state, so
-2198   * restarting the HMaster or failing 
over to another should fix this.
-2199   */
-2200  private void offline(byte[] 
regionName) throws IOException {
-2201String regionString = 
Bytes.toStringBinary(regionName);
-2202if (!rsSupportsOffline) {
-2203  LOG.warn("Using 

[15/51] [partial] hbase-site git commit: Published site at 2a2258656b2fcd92b967131b6c1f037363553bc4.

2018-03-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e0fb1fde/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.html
index d7aa8b1..98a45a0 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.html
@@ -680,1330 +680,1333 @@
 672}
 673ListHRegionLocation locations 
= new ArrayList();
 674for (RegionInfo regionInfo : regions) 
{
-675  RegionLocations list = 
locateRegion(tableName, regionInfo.getStartKey(), useCache, true);
-676  if (list != null) {
-677for (HRegionLocation loc : 
list.getRegionLocations()) {
-678  if (loc != null) {
-679locations.add(loc);
-680  }
-681}
-682  }
-683}
-684return locations;
-685  }
-686
-687  @Override
-688  public HRegionLocation 
locateRegion(final TableName tableName, final byte[] row)
-689  throws IOException {
-690RegionLocations locations = 
locateRegion(tableName, row, true, true);
-691return locations == null ? null : 
locations.getRegionLocation();
-692  }
-693
-694  @Override
-695  public HRegionLocation 
relocateRegion(final TableName tableName, final byte[] row)
-696  throws IOException {
-697RegionLocations locations =
-698  relocateRegion(tableName, row, 
RegionReplicaUtil.DEFAULT_REPLICA_ID);
-699return locations == null ? null
-700  : 
locations.getRegionLocation(RegionReplicaUtil.DEFAULT_REPLICA_ID);
-701  }
-702
-703  @Override
-704  public RegionLocations 
relocateRegion(final TableName tableName,
-705  final byte [] row, int replicaId) 
throws IOException{
-706// Since this is an explicit request 
not to use any caching, finding
-707// disabled tables should not be 
desirable.  This will ensure that an exception is thrown when
-708// the first time a disabled table is 
interacted with.
-709if 
(!tableName.equals(TableName.META_TABLE_NAME)  
isTableDisabled(tableName)) {
-710  throw new 
TableNotEnabledException(tableName.getNameAsString() + " is disabled.");
-711}
-712
-713return locateRegion(tableName, row, 
false, true, replicaId);
-714  }
+675  if 
(!RegionReplicaUtil.isDefaultReplica(regionInfo)) {
+676continue;
+677  }
+678  RegionLocations list = 
locateRegion(tableName, regionInfo.getStartKey(), useCache, true);
+679  if (list != null) {
+680for (HRegionLocation loc : 
list.getRegionLocations()) {
+681  if (loc != null) {
+682locations.add(loc);
+683  }
+684}
+685  }
+686}
+687return locations;
+688  }
+689
+690  @Override
+691  public HRegionLocation 
locateRegion(final TableName tableName, final byte[] row)
+692  throws IOException {
+693RegionLocations locations = 
locateRegion(tableName, row, true, true);
+694return locations == null ? null : 
locations.getRegionLocation();
+695  }
+696
+697  @Override
+698  public HRegionLocation 
relocateRegion(final TableName tableName, final byte[] row)
+699  throws IOException {
+700RegionLocations locations =
+701  relocateRegion(tableName, row, 
RegionReplicaUtil.DEFAULT_REPLICA_ID);
+702return locations == null ? null
+703  : 
locations.getRegionLocation(RegionReplicaUtil.DEFAULT_REPLICA_ID);
+704  }
+705
+706  @Override
+707  public RegionLocations 
relocateRegion(final TableName tableName,
+708  final byte [] row, int replicaId) 
throws IOException{
+709// Since this is an explicit request 
not to use any caching, finding
+710// disabled tables should not be 
desirable.  This will ensure that an exception is thrown when
+711// the first time a disabled table is 
interacted with.
+712if 
(!tableName.equals(TableName.META_TABLE_NAME)  
isTableDisabled(tableName)) {
+713  throw new 
TableNotEnabledException(tableName.getNameAsString() + " is disabled.");
+714}
 715
-716  @Override
-717  public RegionLocations 
locateRegion(final TableName tableName, final byte[] row, boolean useCache,
-718  boolean retry) throws IOException 
{
-719return locateRegion(tableName, row, 
useCache, retry, RegionReplicaUtil.DEFAULT_REPLICA_ID);
-720  }
-721
-722  @Override
-723  public RegionLocations 
locateRegion(final TableName tableName, final byte[] row, boolean useCache,
-724  boolean retry, int replicaId) 
throws IOException {
-725checkClosed();
-726if (tableName == null || 
tableName.getName().length == 0) {
-727  throw new 
IllegalArgumentException("table name cannot be null or zero length");
-728}
-729if 
(tableName.equals(TableName.META_TABLE_NAME)) {
-730  return locateMeta(tableName, 
useCache, replicaId);

[15/51] [partial] hbase-site git commit: Published site at e468b4022f76688851b3e0c34722f01a56bd624f.

2018-03-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/16541468/devapidocs/org/apache/hadoop/hbase/class-use/RegionMetrics.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/RegionMetrics.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/RegionMetrics.html
index 41e5ed9..646ab55 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/RegionMetrics.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/RegionMetrics.html
@@ -185,9 +185,7 @@
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Mapbyte[],RegionMetrics
-ServerLoad.getRegionMetrics()
-Deprecated.
-
+ServerMetricsBuilder.ServerMetricsImpl.getRegionMetrics()
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Mapbyte[],RegionMetrics
@@ -195,7 +193,9 @@
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Mapbyte[],RegionMetrics
-ServerMetricsBuilder.ServerMetricsImpl.getRegionMetrics()
+ServerLoad.getRegionMetrics()
+Deprecated.
+
 
 
 static https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionMetrics
@@ -282,54 +282,54 @@
 ServerNameserverName)
 
 
-https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionMetrics
-AsyncHBaseAdmin.getRegionMetrics(ServerNameserverName)
-
-
 default https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionMetrics
 Admin.getRegionMetrics(ServerNameserverName)
 Get RegionMetrics of 
all regions hosted on a regionserver.
 
 
-
-https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionMetrics
-AsyncAdmin.getRegionMetrics(ServerNameserverName)
-Get a list of RegionMetrics of 
all regions hosted on a region seerver.
-
-
 
 https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionMetrics
 RawAsyncHBaseAdmin.getRegionMetrics(ServerNameserverName)
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionMetrics
-AsyncHBaseAdmin.getRegionMetrics(ServerNameserverName,
-TableNametableName)
+AsyncHBaseAdmin.getRegionMetrics(ServerNameserverName)
 
 
+https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionMetrics
+AsyncAdmin.getRegionMetrics(ServerNameserverName)
+Get a list of RegionMetrics of 
all regions hosted on a region seerver.
+
+
+
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionMetrics
 Admin.getRegionMetrics(ServerNameserverName,
 TableNametableName)
 Get RegionMetrics of 
all regions hosted on a regionserver for a table.
 
 
+
+https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionMetrics
+RawAsyncHBaseAdmin.getRegionMetrics(ServerNameserverName,
+TableNametableName)
+
 
+https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionMetrics
+AsyncHBaseAdmin.getRegionMetrics(ServerNameserverName,
+TableNametableName)
+
+
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 

[15/51] [partial] hbase-site git commit: Published site at 64061f896fe21512504e3886a400759e88b519da.

2018-03-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f1ebf5b6/devapidocs/org/apache/hadoop/hbase/client/class-use/SnapshotDescription.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/SnapshotDescription.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/SnapshotDescription.html
index 7edabda..08add92 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/SnapshotDescription.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/SnapshotDescription.html
@@ -137,9 +137,7 @@
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListSnapshotDescription
-AsyncAdmin.listSnapshots()
-List completed snapshots.
-
+AsyncHBaseAdmin.listSnapshots()
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListSnapshotDescription
@@ -148,22 +146,22 @@
 
 
 
-https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListSnapshotDescription
-RawAsyncHBaseAdmin.listSnapshots()
-
-
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListSnapshotDescription
 HBaseAdmin.listSnapshots()
 
+
+https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListSnapshotDescription
+AsyncAdmin.listSnapshots()
+List completed snapshots.
+
+
 
 https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListSnapshotDescription
-AsyncHBaseAdmin.listSnapshots()
+RawAsyncHBaseAdmin.listSnapshots()
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListSnapshotDescription
-AsyncAdmin.listSnapshots(https://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html?is-external=true;
 title="class or interface in java.util.regex">Patternpattern)
-List all the completed snapshots matching the given 
pattern.
-
+AsyncHBaseAdmin.listSnapshots(https://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html?is-external=true;
 title="class or interface in 
java.util.regex">Patternpattern)
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListSnapshotDescription
@@ -172,16 +170,18 @@
 
 
 
-https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListSnapshotDescription
-RawAsyncHBaseAdmin.listSnapshots(https://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html?is-external=true;
 title="class or interface in 
java.util.regex">Patternpattern)
-
-
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListSnapshotDescription
 HBaseAdmin.listSnapshots(https://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html?is-external=true;
 title="class or interface in 
java.util.regex">Patternpattern)
 
+
+https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListSnapshotDescription
+AsyncAdmin.listSnapshots(https://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html?is-external=true;
 title="class or interface in java.util.regex">Patternpattern)
+List all the completed snapshots matching the given 
pattern.
+
+
 
 https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in 

[15/51] [partial] hbase-site git commit: Published site at 4cb40e6d846ce1f28ffb40d388c9efb753197813.

2018-03-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4dc2a2e8/devapidocs/org/apache/hadoop/hbase/client/class-use/RpcRetryingCallerFactory.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/RpcRetryingCallerFactory.html
 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/RpcRetryingCallerFactory.html
index 4d6f99a..00622a7 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/RpcRetryingCallerFactory.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/RpcRetryingCallerFactory.html
@@ -126,15 +126,15 @@
 
 
 private RpcRetryingCallerFactory
-RegionCoprocessorRpcChannel.rpcCallerFactory
+ConnectionImplementation.rpcCallerFactory
 
 
 private RpcRetryingCallerFactory
-ConnectionImplementation.rpcCallerFactory
+HTable.rpcCallerFactory
 
 
 private RpcRetryingCallerFactory
-HTable.rpcCallerFactory
+RegionCoprocessorRpcChannel.rpcCallerFactory
 
 
 private RpcRetryingCallerFactory
@@ -155,21 +155,21 @@
 
 
 RpcRetryingCallerFactory
-ConnectionImplementation.getNewRpcRetryingCallerFactory(org.apache.hadoop.conf.Configurationconf)
-
-
-RpcRetryingCallerFactory
 ClusterConnection.getNewRpcRetryingCallerFactory(org.apache.hadoop.conf.Configurationconf)
 Returns a new RpcRetryingCallerFactory from the given 
Configuration.
 
 
+
+RpcRetryingCallerFactory
+ConnectionImplementation.getNewRpcRetryingCallerFactory(org.apache.hadoop.conf.Configurationconf)
+
 
 RpcRetryingCallerFactory
-ConnectionImplementation.getRpcRetryingCallerFactory()
+ClusterConnection.getRpcRetryingCallerFactory()
 
 
 RpcRetryingCallerFactory
-ClusterConnection.getRpcRetryingCallerFactory()
+ConnectionImplementation.getRpcRetryingCallerFactory()
 
 
 static RpcRetryingCallerFactory

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4dc2a2e8/devapidocs/org/apache/hadoop/hbase/client/class-use/Scan.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/class-use/Scan.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/Scan.html
index f5a73bc..d833faa 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/Scan.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/Scan.html
@@ -283,27 +283,27 @@ service.
 
 
 private Scan
-ScannerCallableWithReplicas.scan
+AsyncScanSingleRegionRpcRetryingCaller.scan
 
 
 protected Scan
-ClientScanner.scan
+ScannerCallable.scan
 
 
 private Scan
-AsyncClientScanner.scan
+ScannerCallableWithReplicas.scan
 
 
-private Scan
-AsyncRpcRetryingCallerFactory.ScanSingleRegionCallerBuilder.scan
+protected Scan
+ClientScanner.scan
 
 
 private Scan
-AsyncScanSingleRegionRpcRetryingCaller.scan
+AsyncClientScanner.scan
 
 
-protected Scan
-ScannerCallable.scan
+private Scan
+AsyncRpcRetryingCallerFactory.ScanSingleRegionCallerBuilder.scan
 
 
 private Scan
@@ -339,11 +339,11 @@ service.
 
 
 protected Scan
-ClientScanner.getScan()
+ScannerCallable.getScan()
 
 
 protected Scan
-ScannerCallable.getScan()
+ClientScanner.getScan()
 
 
 Scan
@@ -638,29 +638,29 @@ service.
 
 
 ResultScanner
-AsyncTable.getScanner(Scanscan)
-Returns a scanner on the current table as specified by the 
Scan 
object.
-
+RawAsyncTableImpl.getScanner(Scanscan)
 
 
 ResultScanner
-Table.getScanner(Scanscan)
-Returns a scanner on the current table as specified by the 
Scan
- object.
+HTable.getScanner(Scanscan)
+The underlying HTable must 
not be closed.
 
 
 
 ResultScanner
-AsyncTableImpl.getScanner(Scanscan)
+Table.getScanner(Scanscan)
+Returns a scanner on the current table as specified by the 
Scan
+ object.
+
 
 
 ResultScanner
-RawAsyncTableImpl.getScanner(Scanscan)
+AsyncTableImpl.getScanner(Scanscan)
 
 
 ResultScanner
-HTable.getScanner(Scanscan)
-The underlying HTable must 
not be closed.
+AsyncTable.getScanner(Scanscan)
+Returns a scanner on the current table as specified by the 
Scan 
object.
 
 
 
@@ -703,9 +703,7 @@ service.
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListResult
-AsyncTable.scanAll(Scanscan)
-Return all the results that match the given scan 
object.
-
+RawAsyncTableImpl.scanAll(Scanscan)
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListResult
@@ -713,7 +711,9 @@ service.
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in 

[15/51] [partial] hbase-site git commit: Published site at 8ab7b20f48951d77945181024f5e15842bc253c4.

2018-03-21 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6eb695c8/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushRegionEntry.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushRegionEntry.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushRegionEntry.html
index 9ee12ef..4c42811 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushRegionEntry.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushRegionEntry.html
@@ -51,889 +51,893 @@
 043import 
org.apache.hadoop.hbase.HConstants;
 044import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
 045import 
org.apache.hadoop.hbase.regionserver.HRegion.FlushResult;
-046import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-047import 
org.apache.hadoop.hbase.trace.TraceUtil;
-048import 
org.apache.hadoop.hbase.util.Bytes;
-049import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-050import 
org.apache.hadoop.hbase.util.HasThread;
-051import 
org.apache.hadoop.hbase.util.ServerRegionReplicaUtil;
-052import 
org.apache.hadoop.hbase.util.Threads;
-053import 
org.apache.hadoop.ipc.RemoteException;
-054import 
org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix;
-055import 
org.apache.htrace.core.TraceScope;
-056import 
org.apache.yetus.audience.InterfaceAudience;
-057import org.slf4j.Logger;
-058import org.slf4j.LoggerFactory;
-059
-060/**
-061 * Thread that flushes cache on request
-062 *
-063 * NOTE: This class extends Thread rather 
than Chore because the sleep time
-064 * can be interrupted when there is 
something to do, rather than the Chore
-065 * sleep time which is invariant.
-066 *
-067 * @see FlushRequester
-068 */
-069@InterfaceAudience.Private
-070class MemStoreFlusher implements 
FlushRequester {
-071  private static final Logger LOG = 
LoggerFactory.getLogger(MemStoreFlusher.class);
-072
-073  private Configuration conf;
-074  // These two data members go together.  
Any entry in the one must have
-075  // a corresponding entry in the 
other.
-076  private final 
BlockingQueueFlushQueueEntry flushQueue = new DelayQueue();
-077  private final MapRegion, 
FlushRegionEntry regionsInQueue = new HashMap();
-078  private AtomicBoolean wakeupPending = 
new AtomicBoolean();
-079
-080  private final long 
threadWakeFrequency;
-081  private final HRegionServer server;
-082  private final ReentrantReadWriteLock 
lock = new ReentrantReadWriteLock();
-083  private final Object blockSignal = new 
Object();
-084
-085  private long blockingWaitTime;
-086  private final LongAdder 
updatesBlockedMsHighWater = new LongAdder();
-087
-088  private final FlushHandler[] 
flushHandlers;
-089  private 
ListFlushRequestListener flushRequestListeners = new 
ArrayList(1);
-090
-091  private FlushType flushType;
-092
-093  /**
-094   * Singleton instance inserted into 
flush queue used for signaling.
-095   */
-096  private static final FlushQueueEntry 
WAKEUPFLUSH_INSTANCE = new FlushQueueEntry() {
-097@Override
-098public long getDelay(TimeUnit unit) 
{
-099  return 0;
-100}
-101
-102@Override
-103public int compareTo(Delayed o) {
-104  return -1;
-105}
-106
-107@Override
-108public boolean equals(Object obj) {
-109  return obj == this;
-110}
-111
-112@Override
-113public int hashCode() {
-114  return 42;
-115}
-116  };
+046import 
org.apache.hadoop.hbase.trace.TraceUtil;
+047import 
org.apache.hadoop.hbase.util.Bytes;
+048import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+049import 
org.apache.hadoop.hbase.util.HasThread;
+050import 
org.apache.hadoop.hbase.util.ServerRegionReplicaUtil;
+051import 
org.apache.hadoop.hbase.util.Threads;
+052import 
org.apache.hadoop.ipc.RemoteException;
+053import 
org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix;
+054import 
org.apache.htrace.core.TraceScope;
+055import 
org.apache.yetus.audience.InterfaceAudience;
+056import org.slf4j.Logger;
+057import org.slf4j.LoggerFactory;
+058
+059/**
+060 * Thread that flushes cache on request
+061 *
+062 * NOTE: This class extends Thread rather 
than Chore because the sleep time
+063 * can be interrupted when there is 
something to do, rather than the Chore
+064 * sleep time which is invariant.
+065 *
+066 * @see FlushRequester
+067 */
+068@InterfaceAudience.Private
+069class MemStoreFlusher implements 
FlushRequester {
+070  private static final Logger LOG = 
LoggerFactory.getLogger(MemStoreFlusher.class);
+071
+072  private Configuration conf;
+073  // These two data members go together.  
Any entry in the one must have
+074  // a corresponding entry in the 
other.
+075  private final 
BlockingQueueFlushQueueEntry flushQueue = new DelayQueue();
+076  private final MapRegion, 
FlushRegionEntry regionsInQueue = new HashMap();
+077  

[15/51] [partial] hbase-site git commit: Published site at 00095a2ef9442e3fd86c04876c9d91f2f8b23ad8.

2018-03-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/MoveRegionProcedure.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/MoveRegionProcedure.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/MoveRegionProcedure.html
index b75d90b..6b7e383 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/MoveRegionProcedure.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/MoveRegionProcedure.html
@@ -72,103 +72,114 @@
 064super(env, plan.getRegionInfo());
 065this.plan = plan;
 066preflightChecks(env, true);
-067  }
-068
-069  @Override
-070  protected Flow executeFromState(final 
MasterProcedureEnv env, final MoveRegionState state)
-071  throws InterruptedException {
-072if (LOG.isTraceEnabled()) {
-073  LOG.trace(this + " execute state=" 
+ state);
-074}
-075switch (state) {
-076  case MOVE_REGION_UNASSIGN:
-077addChildProcedure(new 
UnassignProcedure(plan.getRegionInfo(), plan.getSource(),
-078plan.getDestination(), 
true));
-079
setNextState(MoveRegionState.MOVE_REGION_ASSIGN);
-080break;
-081  case MOVE_REGION_ASSIGN:
-082AssignProcedure assignProcedure = 
plan.getDestination() == null ?
-083new 
AssignProcedure(plan.getRegionInfo()):
-084new 
AssignProcedure(plan.getRegionInfo(), plan.getDestination());
-085
addChildProcedure(assignProcedure);
-086return Flow.NO_MORE_STATE;
-087  default:
-088throw new 
UnsupportedOperationException("unhandled state=" + state);
-089}
-090return Flow.HAS_MORE_STATE;
-091  }
-092
-093  @Override
-094  protected void rollbackState(final 
MasterProcedureEnv env, final MoveRegionState state)
-095  throws IOException {
-096// no-op
-097  }
-098
-099  @Override
-100  public boolean abort(final 
MasterProcedureEnv env) {
-101return false;
+067checkOnline(env, 
plan.getRegionInfo());
+068  }
+069
+070  @Override
+071  protected Flow executeFromState(final 
MasterProcedureEnv env, final MoveRegionState state)
+072  throws InterruptedException {
+073if (LOG.isTraceEnabled()) {
+074  LOG.trace(this + " execute state=" 
+ state);
+075}
+076switch (state) {
+077  case MOVE_REGION_PREPARE:
+078// Check context again and that 
region is online; do it here after we have lock on region.
+079try {
+080  preflightChecks(env, true);
+081  checkOnline(env, 
this.plan.getRegionInfo());
+082} catch (HBaseIOException e) {
+083  LOG.warn(this.toString() + " 
FAILED because " + e.toString());
+084  return Flow.NO_MORE_STATE;
+085}
+086break;
+087  case MOVE_REGION_UNASSIGN:
+088addChildProcedure(new 
UnassignProcedure(plan.getRegionInfo(), plan.getSource(),
+089plan.getDestination(), 
true));
+090
setNextState(MoveRegionState.MOVE_REGION_ASSIGN);
+091break;
+092  case MOVE_REGION_ASSIGN:
+093AssignProcedure assignProcedure = 
plan.getDestination() == null ?
+094new 
AssignProcedure(plan.getRegionInfo()):
+095new 
AssignProcedure(plan.getRegionInfo(), plan.getDestination());
+096
addChildProcedure(assignProcedure);
+097return Flow.NO_MORE_STATE;
+098  default:
+099throw new 
UnsupportedOperationException("unhandled state=" + state);
+100}
+101return Flow.HAS_MORE_STATE;
 102  }
 103
 104  @Override
-105  public void toStringClassDetails(final 
StringBuilder sb) {
-106
sb.append(getClass().getSimpleName());
-107sb.append(" ");
-108sb.append(plan);
-109  }
-110
-111  @Override
-112  protected MoveRegionState 
getInitialState() {
-113return 
MoveRegionState.MOVE_REGION_UNASSIGN;
-114  }
-115
-116  @Override
-117  protected int getStateId(final 
MoveRegionState state) {
-118return state.getNumber();
-119  }
-120
-121  @Override
-122  protected MoveRegionState 
getState(final int stateId) {
-123return 
MoveRegionState.valueOf(stateId);
-124  }
-125
-126  @Override
-127  public TableName getTableName() {
-128return 
plan.getRegionInfo().getTable();
-129  }
-130
-131  @Override
-132  public TableOperationType 
getTableOperationType() {
-133return 
TableOperationType.REGION_EDIT;
-134  }
-135
-136  @Override
-137  protected void 
serializeStateData(ProcedureStateSerializer serializer)
-138  throws IOException {
-139
super.serializeStateData(serializer);
-140
-141final MoveRegionStateData.Builder 
state = MoveRegionStateData.newBuilder()
-142// No need to serialize the 
RegionInfo. The super class has the region.
-143
.setSourceServer(ProtobufUtil.toServerName(plan.getSource()));
-144if (plan.getDestination() != null) 
{
-145  

[15/51] [partial] hbase-site git commit: Published site at 22f4def942f8a3367d0ca6598317e9b9a7d0cfcd.

2018-03-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8b1eaec1/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.RegionEnvironment.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.RegionEnvironment.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.RegionEnvironment.html
index b99f924..2bb6cea 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.RegionEnvironment.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.RegionEnvironment.html
@@ -37,1779 +37,1734 @@
 029import java.util.UUID;
 030import 
java.util.concurrent.ConcurrentHashMap;
 031import 
java.util.concurrent.ConcurrentMap;
-032import java.util.regex.Matcher;
-033
-034import 
org.apache.commons.collections4.map.AbstractReferenceMap;
-035import 
org.apache.commons.collections4.map.ReferenceMap;
-036import 
org.apache.hadoop.conf.Configuration;
-037import org.apache.hadoop.fs.FileSystem;
-038import org.apache.hadoop.fs.Path;
-039import org.apache.hadoop.hbase.Cell;
-040import 
org.apache.hadoop.hbase.CompareOperator;
-041import 
org.apache.hadoop.hbase.Coprocessor;
-042import 
org.apache.hadoop.hbase.HBaseConfiguration;
-043import 
org.apache.hadoop.hbase.HConstants;
-044import 
org.apache.hadoop.hbase.RawCellBuilder;
-045import 
org.apache.hadoop.hbase.RawCellBuilderFactory;
-046import 
org.apache.hadoop.hbase.ServerName;
-047import 
org.apache.hadoop.hbase.SharedConnection;
-048import 
org.apache.hadoop.hbase.client.Append;
-049import 
org.apache.hadoop.hbase.client.Connection;
-050import 
org.apache.hadoop.hbase.client.Delete;
-051import 
org.apache.hadoop.hbase.client.Durability;
-052import 
org.apache.hadoop.hbase.client.Get;
-053import 
org.apache.hadoop.hbase.client.Increment;
-054import 
org.apache.hadoop.hbase.client.Mutation;
-055import 
org.apache.hadoop.hbase.client.Put;
-056import 
org.apache.hadoop.hbase.client.RegionInfo;
-057import 
org.apache.hadoop.hbase.client.Result;
-058import 
org.apache.hadoop.hbase.client.Scan;
-059import 
org.apache.hadoop.hbase.client.TableDescriptor;
-060import 
org.apache.hadoop.hbase.coprocessor.BaseEnvironment;
-061import 
org.apache.hadoop.hbase.coprocessor.BulkLoadObserver;
-062import 
org.apache.hadoop.hbase.coprocessor.CoprocessorException;
-063import 
org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
-064import 
org.apache.hadoop.hbase.coprocessor.CoprocessorService;
-065import 
org.apache.hadoop.hbase.coprocessor.CoprocessorServiceBackwardCompatiblity;
-066import 
org.apache.hadoop.hbase.coprocessor.CoreCoprocessor;
-067import 
org.apache.hadoop.hbase.coprocessor.EndpointObserver;
-068import 
org.apache.hadoop.hbase.coprocessor.HasRegionServerServices;
-069import 
org.apache.hadoop.hbase.coprocessor.MetricsCoprocessor;
-070import 
org.apache.hadoop.hbase.coprocessor.ObserverContext;
-071import 
org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
-072import 
org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
-073import 
org.apache.hadoop.hbase.coprocessor.RegionObserver;
-074import 
org.apache.hadoop.hbase.coprocessor.RegionObserver.MutationType;
-075import 
org.apache.hadoop.hbase.filter.ByteArrayComparable;
-076import 
org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
-077import 
org.apache.hadoop.hbase.io.Reference;
-078import 
org.apache.hadoop.hbase.io.hfile.CacheConfig;
-079import 
org.apache.hadoop.hbase.metrics.MetricRegistry;
-080import 
org.apache.hadoop.hbase.regionserver.Region.Operation;
-081import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker;
-082import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
-083import 
org.apache.hadoop.hbase.regionserver.querymatcher.DeleteTracker;
-084import 
org.apache.hadoop.hbase.security.User;
-085import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
-086import 
org.apache.hadoop.hbase.util.Bytes;
-087import 
org.apache.hadoop.hbase.util.CoprocessorClassLoader;
-088import 
org.apache.hadoop.hbase.util.Pair;
-089import 
org.apache.hadoop.hbase.wal.WALEdit;
-090import 
org.apache.hadoop.hbase.wal.WALKey;
-091import 
org.apache.yetus.audience.InterfaceAudience;
-092import org.slf4j.Logger;
-093import org.slf4j.LoggerFactory;
-094
-095/**
-096 * Implements the coprocessor environment 
and runtime support for coprocessors
-097 * loaded within a {@link Region}.
-098 */
-099@InterfaceAudience.Private
-100public class RegionCoprocessorHost
-101extends 
CoprocessorHostRegionCoprocessor, RegionCoprocessorEnvironment {
-102
-103  private static final Logger LOG = 
LoggerFactory.getLogger(RegionCoprocessorHost.class);
-104  // The shared data map
-105  private static final 
ReferenceMapString, ConcurrentMapString, Object SHARED_DATA_MAP 
=
-106  new 

[15/51] [partial] hbase-site git commit: Published site at 31da4d0bce69b3a47066a5df675756087ce4dc60.

2018-03-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a754d895/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
index d363e3a..5066d3e 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
@@ -121,7 +121,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-class HRegion.RegionScannerImpl
+class HRegion.RegionScannerImpl
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements RegionScanner, Shipper, RpcCallback
 RegionScannerImpl is used to combine scanners from multiple 
Stores (aka column families).
@@ -425,7 +425,7 @@ implements 
 
 storeHeap
-KeyValueHeap storeHeap
+KeyValueHeap storeHeap
 
 
 
@@ -434,7 +434,7 @@ implements 
 
 joinedHeap
-KeyValueHeap joinedHeap
+KeyValueHeap joinedHeap
 Heap of key-values that are not essential for the provided 
filters and are thus read
  on demand, if on-demand column family loading is enabled.
 
@@ -445,7 +445,7 @@ implements 
 
 joinedContinuationRow
-protectedCell joinedContinuationRow
+protectedCell joinedContinuationRow
 If the joined heap data gathering is interrupted due to 
scan limits, this will
  contain the row for which we are populating the values.
 
@@ -456,7 +456,7 @@ implements 
 
 filterClosed
-privateboolean filterClosed
+privateboolean filterClosed
 
 
 
@@ -465,7 +465,7 @@ implements 
 
 stopRow
-protected finalbyte[] stopRow
+protected finalbyte[] stopRow
 
 
 
@@ -474,7 +474,7 @@ implements 
 
 includeStopRow
-protected finalboolean includeStopRow
+protected finalboolean includeStopRow
 
 
 
@@ -483,7 +483,7 @@ implements 
 
 region
-protected finalHRegion region
+protected finalHRegion region
 
 
 
@@ -492,7 +492,7 @@ implements 
 
 comparator
-protected finalCellComparator comparator
+protected finalCellComparator comparator
 
 
 
@@ -501,7 +501,7 @@ implements 
 
 readPt
-private finallong readPt
+private finallong readPt
 
 
 
@@ -510,7 +510,7 @@ implements 
 
 maxResultSize
-private finallong maxResultSize
+private finallong maxResultSize
 
 
 
@@ -519,7 +519,7 @@ implements 
 
 defaultScannerContext
-private finalScannerContext defaultScannerContext
+private finalScannerContext defaultScannerContext
 
 
 
@@ -528,7 +528,7 @@ implements 
 
 filter
-private finalFilterWrapper filter
+private finalFilterWrapper filter
 
 
 
@@ -545,7 +545,7 @@ implements 
 
 RegionScannerImpl
-RegionScannerImpl(Scanscan,
+RegionScannerImpl(Scanscan,
   https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListKeyValueScanneradditionalScanners,
   HRegionregion)
throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
@@ -561,7 +561,7 @@ implements 
 
 RegionScannerImpl
-RegionScannerImpl(Scanscan,
+RegionScannerImpl(Scanscan,
   https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListKeyValueScanneradditionalScanners,
   HRegionregion,
   longnonceGroup,
@@ -587,7 +587,7 @@ implements 
 
 getRegionInfo
-publicRegionInfogetRegionInfo()
+publicRegionInfogetRegionInfo()
 
 Specified by:
 getRegionInfoin
 interfaceRegionScanner
@@ -602,7 +602,7 @@ implements 
 
 initializeScanners
-protectedvoidinitializeScanners(Scanscan,
+protectedvoidinitializeScanners(Scanscan,
   https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListKeyValueScanneradditionalScanners)
throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
@@ -617,7 +617,7 @@ implements 
 
 initializeKVHeap
-protectedvoidinitializeKVHeap(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListKeyValueScannerscanners,
+protectedvoidinitializeKVHeap(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListKeyValueScannerscanners,
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListKeyValueScannerjoinedScanners,
 HRegionregion)
  throws 

[15/51] [partial] hbase-site git commit: Published site at 6b77786dfc46d25ac5bb5f1c8a4a9eb47b52a604.

2018-03-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/81cde4ce/devapidocs/org/apache/hadoop/hbase/exceptions/class-use/DeserializationException.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/exceptions/class-use/DeserializationException.html
 
b/devapidocs/org/apache/hadoop/hbase/exceptions/class-use/DeserializationException.html
index 49b5557..338b7a4 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/exceptions/class-use/DeserializationException.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/exceptions/class-use/DeserializationException.html
@@ -144,17 +144,15 @@
 
 
 
-static HColumnDescriptor
-HColumnDescriptor.parseFrom(byte[]bytes)
-Deprecated.
-
-
-
 static HTableDescriptor
 HTableDescriptor.parseFrom(byte[]bytes)
 Deprecated.
 
 
+
+static ClusterId
+ClusterId.parseFrom(byte[]bytes)
+
 
 static HRegionInfo
 HRegionInfo.parseFrom(byte[]bytes)
@@ -165,8 +163,10 @@
 
 
 
-static ClusterId
-ClusterId.parseFrom(byte[]bytes)
+static HColumnDescriptor
+HColumnDescriptor.parseFrom(byte[]bytes)
+Deprecated.
+
 
 
 static SplitLogTask
@@ -220,17 +220,17 @@
 TableDescriptorBuilder.ModifyableTableDescriptor.parseFrom(byte[]bytes)
 
 
+static RegionInfo
+RegionInfo.parseFrom(byte[]bytes)
+
+
 static ColumnFamilyDescriptor
 ColumnFamilyDescriptorBuilder.parseFrom(byte[]pbBytes)
 
-
+
 private static ColumnFamilyDescriptor
 ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor.parseFrom(byte[]bytes)
 
-
-static RegionInfo
-RegionInfo.parseFrom(byte[]bytes)
-
 
 static RegionInfo
 RegionInfo.parseFrom(byte[]bytes,
@@ -305,153 +305,153 @@
 ByteArrayComparable.parseFrom(byte[]pbBytes)
 
 
-static ColumnPrefixFilter
-ColumnPrefixFilter.parseFrom(byte[]pbBytes)
+static SingleColumnValueExcludeFilter
+SingleColumnValueExcludeFilter.parseFrom(byte[]pbBytes)
 
 
-static ColumnCountGetFilter
-ColumnCountGetFilter.parseFrom(byte[]pbBytes)
+static ValueFilter
+ValueFilter.parseFrom(byte[]pbBytes)
 
 
-static RowFilter
-RowFilter.parseFrom(byte[]pbBytes)
+static SkipFilter
+SkipFilter.parseFrom(byte[]pbBytes)
 
 
-static FuzzyRowFilter
-FuzzyRowFilter.parseFrom(byte[]pbBytes)
+static FamilyFilter
+FamilyFilter.parseFrom(byte[]pbBytes)
 
 
-static BinaryComparator
-BinaryComparator.parseFrom(byte[]pbBytes)
+static BinaryPrefixComparator
+BinaryPrefixComparator.parseFrom(byte[]pbBytes)
 
 
-static RegexStringComparator
-RegexStringComparator.parseFrom(byte[]pbBytes)
+static NullComparator
+NullComparator.parseFrom(byte[]pbBytes)
 
 
-static Filter
-Filter.parseFrom(byte[]pbBytes)
-Concrete implementers can signal a failure condition in 
their code by throwing an
- https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException.
-
+static BigDecimalComparator
+BigDecimalComparator.parseFrom(byte[]pbBytes)
 
 
-static RandomRowFilter
-RandomRowFilter.parseFrom(byte[]pbBytes)
+static ColumnPrefixFilter
+ColumnPrefixFilter.parseFrom(byte[]pbBytes)
 
 
-static FirstKeyOnlyFilter
-FirstKeyOnlyFilter.parseFrom(byte[]pbBytes)
+static PageFilter
+PageFilter.parseFrom(byte[]pbBytes)
 
 
-static SkipFilter
-SkipFilter.parseFrom(byte[]pbBytes)
+static BitComparator
+BitComparator.parseFrom(byte[]pbBytes)
 
 
-static BinaryPrefixComparator
-BinaryPrefixComparator.parseFrom(byte[]pbBytes)
+static RowFilter
+RowFilter.parseFrom(byte[]pbBytes)
 
 
-static TimestampsFilter
-TimestampsFilter.parseFrom(byte[]pbBytes)
+static ColumnRangeFilter
+ColumnRangeFilter.parseFrom(byte[]pbBytes)
 
 
-static ValueFilter
-ValueFilter.parseFrom(byte[]pbBytes)
+static ColumnCountGetFilter
+ColumnCountGetFilter.parseFrom(byte[]pbBytes)
 
 
-static KeyOnlyFilter
-KeyOnlyFilter.parseFrom(byte[]pbBytes)
+static SubstringComparator
+SubstringComparator.parseFrom(byte[]pbBytes)
 
 
-static FamilyFilter
-FamilyFilter.parseFrom(byte[]pbBytes)
+static MultipleColumnPrefixFilter
+MultipleColumnPrefixFilter.parseFrom(byte[]pbBytes)
 
 
-static QualifierFilter
-QualifierFilter.parseFrom(byte[]pbBytes)
+static ColumnPaginationFilter
+ColumnPaginationFilter.parseFrom(byte[]pbBytes)
 
 
-static FilterList
-FilterList.parseFrom(byte[]pbBytes)
+static DependentColumnFilter
+DependentColumnFilter.parseFrom(byte[]pbBytes)
 
 
-static BigDecimalComparator
-BigDecimalComparator.parseFrom(byte[]pbBytes)
+static BinaryComparator
+BinaryComparator.parseFrom(byte[]pbBytes)
 
 
-static ColumnRangeFilter
-ColumnRangeFilter.parseFrom(byte[]pbBytes)
+static InclusiveStopFilter
+InclusiveStopFilter.parseFrom(byte[]pbBytes)
 
 
-static ColumnPaginationFilter
-ColumnPaginationFilter.parseFrom(byte[]pbBytes)
+static KeyOnlyFilter
+KeyOnlyFilter.parseFrom(byte[]pbBytes)
 
 
-static SubstringComparator
-SubstringComparator.parseFrom(byte[]pbBytes)
+static MultiRowRangeFilter
+MultiRowRangeFilter.parseFrom(byte[]pbBytes)
 
 
-static WhileMatchFilter
-WhileMatchFilter.parseFrom(byte[]pbBytes)
+static Filter
+Filter.parseFrom(byte[]pbBytes)

[15/51] [partial] hbase-site git commit: Published site at 1384da71375427b522b09f06862bb5d629cef52f.

2018-03-06 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d347bde8/devapidocs/org/apache/hadoop/hbase/client/class-use/SnapshotDescription.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/SnapshotDescription.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/SnapshotDescription.html
index 08add92..7edabda 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/SnapshotDescription.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/SnapshotDescription.html
@@ -137,7 +137,9 @@
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListSnapshotDescription
-AsyncHBaseAdmin.listSnapshots()
+AsyncAdmin.listSnapshots()
+List completed snapshots.
+
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListSnapshotDescription
@@ -146,22 +148,22 @@
 
 
 
-https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListSnapshotDescription
-HBaseAdmin.listSnapshots()
+https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListSnapshotDescription
+RawAsyncHBaseAdmin.listSnapshots()
 
 
-https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListSnapshotDescription
-AsyncAdmin.listSnapshots()
-List completed snapshots.
-
+https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListSnapshotDescription
+HBaseAdmin.listSnapshots()
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListSnapshotDescription
-RawAsyncHBaseAdmin.listSnapshots()
+AsyncHBaseAdmin.listSnapshots()
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListSnapshotDescription
-AsyncHBaseAdmin.listSnapshots(https://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html?is-external=true;
 title="class or interface in 
java.util.regex">Patternpattern)
+AsyncAdmin.listSnapshots(https://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html?is-external=true;
 title="class or interface in java.util.regex">Patternpattern)
+List all the completed snapshots matching the given 
pattern.
+
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListSnapshotDescription
@@ -170,18 +172,16 @@
 
 
 
-https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListSnapshotDescription
-HBaseAdmin.listSnapshots(https://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html?is-external=true;
 title="class or interface in 
java.util.regex">Patternpattern)
+https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListSnapshotDescription
+RawAsyncHBaseAdmin.listSnapshots(https://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html?is-external=true;
 title="class or interface in 
java.util.regex">Patternpattern)
 
 
-https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListSnapshotDescription
-AsyncAdmin.listSnapshots(https://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html?is-external=true;
 title="class or interface in java.util.regex">Patternpattern)
-List all the completed snapshots matching the given 
pattern.
-

[15/51] [partial] hbase-site git commit: Published site at b7b86839250bf9b295ebc1948826f43a88736d6c.

2018-03-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6b94a2f2/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.DefaultVisitorBase.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.DefaultVisitorBase.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.DefaultVisitorBase.html
index df5fa53..8fffb89 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.DefaultVisitorBase.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.DefaultVisitorBase.html
@@ -42,1927 +42,2060 @@
 034import java.util.TreeMap;
 035import java.util.regex.Matcher;
 036import java.util.regex.Pattern;
-037import 
org.apache.hadoop.conf.Configuration;
-038import 
org.apache.hadoop.hbase.Cell.Type;
-039import 
org.apache.hadoop.hbase.client.Connection;
-040import 
org.apache.hadoop.hbase.client.ConnectionFactory;
-041import 
org.apache.hadoop.hbase.client.Consistency;
-042import 
org.apache.hadoop.hbase.client.Delete;
-043import 
org.apache.hadoop.hbase.client.Get;
-044import 
org.apache.hadoop.hbase.client.Mutation;
-045import 
org.apache.hadoop.hbase.client.Put;
-046import 
org.apache.hadoop.hbase.client.RegionInfo;
-047import 
org.apache.hadoop.hbase.client.RegionInfoBuilder;
-048import 
org.apache.hadoop.hbase.client.RegionLocator;
-049import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
-050import 
org.apache.hadoop.hbase.client.RegionServerCallable;
-051import 
org.apache.hadoop.hbase.client.Result;
-052import 
org.apache.hadoop.hbase.client.ResultScanner;
-053import 
org.apache.hadoop.hbase.client.Scan;
-054import 
org.apache.hadoop.hbase.client.Table;
-055import 
org.apache.hadoop.hbase.client.TableState;
-056import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-057import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
-058import 
org.apache.hadoop.hbase.master.RegionState;
-059import 
org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-060import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
-061import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier;
-062import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-063import 
org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos;
-064import 
org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MutateRowsRequest;
-065import 
org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MutateRowsResponse;
-066import 
org.apache.hadoop.hbase.util.Bytes;
-067import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-068import 
org.apache.hadoop.hbase.util.ExceptionUtil;
-069import 
org.apache.hadoop.hbase.util.Pair;
-070import 
org.apache.hadoop.hbase.util.PairOfSameType;
-071import 
org.apache.yetus.audience.InterfaceAudience;
-072import org.slf4j.Logger;
-073import org.slf4j.LoggerFactory;
-074
-075import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-076
-077/**
-078 * p
-079 * Read/write operations on region and 
assignment information store in codehbase:meta/code.
-080 * /p
+037import java.util.stream.Collectors;
+038import java.util.stream.Stream;
+039import 
org.apache.hadoop.conf.Configuration;
+040import 
org.apache.hadoop.hbase.Cell.Type;
+041import 
org.apache.hadoop.hbase.client.Connection;
+042import 
org.apache.hadoop.hbase.client.ConnectionFactory;
+043import 
org.apache.hadoop.hbase.client.Consistency;
+044import 
org.apache.hadoop.hbase.client.Delete;
+045import 
org.apache.hadoop.hbase.client.Get;
+046import 
org.apache.hadoop.hbase.client.Mutation;
+047import 
org.apache.hadoop.hbase.client.Put;
+048import 
org.apache.hadoop.hbase.client.RegionInfo;
+049import 
org.apache.hadoop.hbase.client.RegionInfoBuilder;
+050import 
org.apache.hadoop.hbase.client.RegionLocator;
+051import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
+052import 
org.apache.hadoop.hbase.client.RegionServerCallable;
+053import 
org.apache.hadoop.hbase.client.Result;
+054import 
org.apache.hadoop.hbase.client.ResultScanner;
+055import 
org.apache.hadoop.hbase.client.Scan;
+056import 
org.apache.hadoop.hbase.client.Table;
+057import 
org.apache.hadoop.hbase.client.TableState;
+058import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
+059import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
+060import 
org.apache.hadoop.hbase.master.RegionState;
+061import 
org.apache.hadoop.hbase.master.RegionState.State;
+062import 
org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+063import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
+064import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier;
+065import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
+066import 
org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos;
+067import 

[15/51] [partial] hbase-site git commit: Published site at 1d25b60831b8cc8f7ad5fd366f1867de5c20d2f3.

2018-03-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/eb05e3e3/apidocs/org/apache/hadoop/hbase/client/HTableMultiplexer.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/client/HTableMultiplexer.html 
b/apidocs/org/apache/hadoop/hbase/client/HTableMultiplexer.html
index df7925c..d0d02d9 100644
--- a/apidocs/org/apache/hadoop/hbase/client/HTableMultiplexer.html
+++ b/apidocs/org/apache/hadoop/hbase/client/HTableMultiplexer.html
@@ -97,7 +97,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
 
 
 org.apache.hadoop.hbase.client.HTableMultiplexer
@@ -111,7 +111,7 @@ var activeTableTab = "activeTableTab";
 
 @InterfaceAudience.Public
 public class HTableMultiplexer
-extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 HTableMultiplexer provides a thread-safe non blocking PUT 
API across all the tables.
  Each put will be sharded into different buffer queues based on its 
destination region server.
  So each region server buffer queue will only have the puts which share the 
same destination.
@@ -164,15 +164,15 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 Field and Description
 
 
-static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+static https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 TABLE_MULTIPLEXER_FLUSH_PERIOD_MS
 
 
-static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+static https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 TABLE_MULTIPLEXER_INIT_THREADS
 
 
-static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+static https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 TABLE_MULTIPLEXER_MAX_RETRIES_IN_QUEUE
 
 
@@ -224,9 +224,9 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 getHTableMultiplexerStatus()
 
 
-http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListPut
+https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListPut
 put(byte[]tableName,
-   http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListPutputs)
+   https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListPutputs)
 Deprecated.
 Use put(TableName,
 List) instead.
 
@@ -252,9 +252,9 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 
-http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListPut
+https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListPut
 put(TableNametableName,
-   http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListPutputs)
+   https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListPutputs)
 The puts request will be buffered by their corresponding 
buffer queue.
 
 
@@ -278,8 +278,8 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 
-Methods inherited from classjava.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
-http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--;
 title="class or interface in java.lang">clone, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-;
 title="class or interface in java.lang">equals, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--;
 title="class or interface in java.lang">finalize, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--;
 title="class or interface in java.lang">getClass, 

[15/51] [partial] hbase-site git commit: Published site at .

2018-02-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ec8bf761/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.ServerErrors.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.ServerErrors.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.ServerErrors.html
index 93f650f..d7aa8b1 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.ServerErrors.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.ServerErrors.html
@@ -546,1472 +546,1464 @@
 538return this.conf;
 539  }
 540
-541  /**
-542   * @return true if the master is 
running, throws an exception otherwise
-543   * @throws 
org.apache.hadoop.hbase.MasterNotRunningException - if the master is not 
running
-544   * @deprecated this has been deprecated 
without a replacement
-545   */
-546  @Deprecated
-547  @Override
-548  public boolean isMasterRunning()
-549  throws MasterNotRunningException, 
ZooKeeperConnectionException {
-550// When getting the master 
connection, we check it's running,
-551// so if there is no exception, it 
means we've been able to get a
-552// connection on a running master
-553MasterKeepAliveConnection m = 
getKeepAliveMasterService();
-554m.close();
-555return true;
-556  }
-557
-558  @Override
-559  public HRegionLocation 
getRegionLocation(final TableName tableName,
-560  final byte [] row, boolean 
reload)
-561  throws IOException {
-562return reload? 
relocateRegion(tableName, row): locateRegion(tableName, row);
-563  }
-564
-565
-566  @Override
-567  public boolean isTableEnabled(TableName 
tableName) throws IOException {
-568return 
getTableState(tableName).inStates(TableState.State.ENABLED);
-569  }
-570
-571  @Override
-572  public boolean 
isTableDisabled(TableName tableName) throws IOException {
-573return 
getTableState(tableName).inStates(TableState.State.DISABLED);
-574  }
-575
-576  @Override
-577  public boolean isTableAvailable(final 
TableName tableName, @Nullable final byte[][] splitKeys)
-578  throws IOException {
-579if (this.closed) {
-580  throw new IOException(toString() + 
" closed");
-581}
-582try {
-583  if (!isTableEnabled(tableName)) {
-584LOG.debug("Table " + tableName + 
" not enabled");
-585return false;
-586  }
-587  ListPairRegionInfo, 
ServerName locations =
-588
MetaTableAccessor.getTableRegionsAndLocations(this, tableName, true);
-589
-590  int notDeployed = 0;
-591  int regionCount = 0;
-592  for (PairRegionInfo, 
ServerName pair : locations) {
-593RegionInfo info = 
pair.getFirst();
-594if (pair.getSecond() == null) {
-595  if (LOG.isDebugEnabled()) {
-596LOG.debug("Table " + 
tableName + " has not deployed region " + pair.getFirst()
-597.getEncodedName());
-598  }
-599  notDeployed++;
-600} else if (splitKeys != null
-601 
!Bytes.equals(info.getStartKey(), HConstants.EMPTY_BYTE_ARRAY)) {
-602  for (byte[] splitKey : 
splitKeys) {
-603// Just check if the splitkey 
is available
-604if 
(Bytes.equals(info.getStartKey(), splitKey)) {
-605  regionCount++;
-606  break;
-607}
-608  }
-609} else {
-610  // Always empty start row 
should be counted
-611  regionCount++;
-612}
-613  }
-614  if (notDeployed  0) {
-615if (LOG.isDebugEnabled()) {
-616  LOG.debug("Table " + tableName 
+ " has " + notDeployed + " regions");
-617}
-618return false;
-619  } else if (splitKeys != null 
 regionCount != splitKeys.length + 1) {
-620if (LOG.isDebugEnabled()) {
-621  LOG.debug("Table " + tableName 
+ " expected to have " + (splitKeys.length + 1)
-622  + " regions, but only " + 
regionCount + " available");
-623}
-624return false;
-625  } else {
-626if (LOG.isDebugEnabled()) {
-627  LOG.debug("Table " + tableName 
+ " should be available");
-628}
-629return true;
-630  }
-631} catch (TableNotFoundException tnfe) 
{
-632  LOG.warn("Table " + tableName + " 
not enabled, it is not exists");
-633  return false;
-634}
-635  }
-636
-637  @Override
-638  public HRegionLocation 
locateRegion(final byte[] regionName) throws IOException {
-639RegionLocations locations = 
locateRegion(RegionInfo.getTable(regionName),
-640  RegionInfo.getStartKey(regionName), 
false, true);
-641return locations == null ? null : 
locations.getRegionLocation();
+541  private void checkClosed() throws 
DoNotRetryIOException {
+542if (this.closed) 

[15/51] [partial] hbase-site git commit: Published site at .

2018-02-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
index 802b925..a3e80ab 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
@@ -73,229 +73,229 @@
 065import 
java.util.concurrent.TimeoutException;
 066import 
java.util.concurrent.atomic.AtomicBoolean;
 067import 
java.util.concurrent.atomic.AtomicInteger;
-068import 
java.util.concurrent.atomic.AtomicLong;
-069import 
java.util.concurrent.atomic.LongAdder;
-070import java.util.concurrent.locks.Lock;
-071import 
java.util.concurrent.locks.ReadWriteLock;
-072import 
java.util.concurrent.locks.ReentrantReadWriteLock;
-073import java.util.function.Function;
-074import 
org.apache.hadoop.conf.Configuration;
-075import org.apache.hadoop.fs.FileStatus;
-076import org.apache.hadoop.fs.FileSystem;
-077import 
org.apache.hadoop.fs.LocatedFileStatus;
-078import org.apache.hadoop.fs.Path;
-079import org.apache.hadoop.hbase.Cell;
-080import 
org.apache.hadoop.hbase.CellBuilderType;
-081import 
org.apache.hadoop.hbase.CellComparator;
-082import 
org.apache.hadoop.hbase.CellComparatorImpl;
-083import 
org.apache.hadoop.hbase.CellScanner;
-084import 
org.apache.hadoop.hbase.CellUtil;
-085import 
org.apache.hadoop.hbase.CompareOperator;
-086import 
org.apache.hadoop.hbase.CompoundConfiguration;
-087import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-088import 
org.apache.hadoop.hbase.DroppedSnapshotException;
-089import 
org.apache.hadoop.hbase.ExtendedCellBuilderFactory;
-090import 
org.apache.hadoop.hbase.HConstants;
-091import 
org.apache.hadoop.hbase.HConstants.OperationStatusCode;
-092import 
org.apache.hadoop.hbase.HDFSBlocksDistribution;
-093import 
org.apache.hadoop.hbase.HRegionInfo;
-094import 
org.apache.hadoop.hbase.KeyValue;
-095import 
org.apache.hadoop.hbase.KeyValueUtil;
-096import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-097import 
org.apache.hadoop.hbase.NotServingRegionException;
-098import 
org.apache.hadoop.hbase.PrivateCellUtil;
-099import 
org.apache.hadoop.hbase.RegionTooBusyException;
-100import 
org.apache.hadoop.hbase.TableName;
-101import org.apache.hadoop.hbase.Tag;
-102import org.apache.hadoop.hbase.TagUtil;
-103import 
org.apache.hadoop.hbase.UnknownScannerException;
-104import 
org.apache.hadoop.hbase.client.Append;
-105import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-106import 
org.apache.hadoop.hbase.client.CompactionState;
-107import 
org.apache.hadoop.hbase.client.Delete;
-108import 
org.apache.hadoop.hbase.client.Durability;
-109import 
org.apache.hadoop.hbase.client.Get;
-110import 
org.apache.hadoop.hbase.client.Increment;
-111import 
org.apache.hadoop.hbase.client.IsolationLevel;
-112import 
org.apache.hadoop.hbase.client.Mutation;
-113import 
org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor;
-114import 
org.apache.hadoop.hbase.client.Put;
-115import 
org.apache.hadoop.hbase.client.RegionInfo;
-116import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
-117import 
org.apache.hadoop.hbase.client.Result;
-118import 
org.apache.hadoop.hbase.client.RowMutations;
-119import 
org.apache.hadoop.hbase.client.Scan;
-120import 
org.apache.hadoop.hbase.client.TableDescriptor;
-121import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-122import 
org.apache.hadoop.hbase.conf.ConfigurationManager;
-123import 
org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver;
-124import 
org.apache.hadoop.hbase.coprocessor.RegionObserver.MutationType;
-125import 
org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
-126import 
org.apache.hadoop.hbase.exceptions.FailedSanityCheckException;
-127import 
org.apache.hadoop.hbase.exceptions.TimeoutIOException;
-128import 
org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
-129import 
org.apache.hadoop.hbase.filter.ByteArrayComparable;
-130import 
org.apache.hadoop.hbase.filter.FilterWrapper;
-131import 
org.apache.hadoop.hbase.filter.IncompatibleFilterException;
-132import 
org.apache.hadoop.hbase.io.HFileLink;
-133import 
org.apache.hadoop.hbase.io.HeapSize;
-134import 
org.apache.hadoop.hbase.io.TimeRange;
-135import 
org.apache.hadoop.hbase.io.hfile.HFile;
-136import 
org.apache.hadoop.hbase.ipc.CallerDisconnectedException;
-137import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
-138import 
org.apache.hadoop.hbase.ipc.RpcCall;
-139import 
org.apache.hadoop.hbase.ipc.RpcServer;
-140import 
org.apache.hadoop.hbase.monitoring.MonitoredTask;
-141import 
org.apache.hadoop.hbase.monitoring.TaskMonitor;
-142import 

[15/51] [partial] hbase-site git commit: Published site at .

2018-02-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/193b4259/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatchOperation.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatchOperation.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatchOperation.html
index bd13b53..802b925 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatchOperation.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatchOperation.html
@@ -900,7600 +900,7598 @@
 892if 
(this.getRegionInfo().getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) {
 893  status.setStatus("Writing region 
info on filesystem");
 894  fs.checkRegionInfoOnFilesystem();
-895} else {
-896  if (LOG.isDebugEnabled()) {
-897LOG.debug("Skipping creation of 
.regioninfo file for " + this.getRegionInfo());
-898  }
-899}
-900
-901// Initialize all the HStores
-902status.setStatus("Initializing all 
the Stores");
-903long maxSeqId = 
initializeStores(reporter, status);
-904this.mvcc.advanceTo(maxSeqId);
-905if 
(ServerRegionReplicaUtil.shouldReplayRecoveredEdits(this)) {
-906  CollectionHStore stores = 
this.stores.values();
-907  try {
-908// update the stores that we are 
replaying
-909
stores.forEach(HStore::startReplayingFromWAL);
-910// Recover any edits if 
available.
-911maxSeqId = Math.max(maxSeqId,
-912  
replayRecoveredEditsIfAny(this.fs.getRegionDir(), maxSeqIdInStores, reporter, 
status));
-913// Make sure mvcc is up to max.
-914this.mvcc.advanceTo(maxSeqId);
-915  } finally {
-916// update the stores that we are 
done replaying
-917
stores.forEach(HStore::stopReplayingFromWAL);
-918  }
-919}
-920this.lastReplayedOpenRegionSeqId = 
maxSeqId;
+895}
+896
+897// Initialize all the HStores
+898status.setStatus("Initializing all 
the Stores");
+899long maxSeqId = 
initializeStores(reporter, status);
+900this.mvcc.advanceTo(maxSeqId);
+901if 
(ServerRegionReplicaUtil.shouldReplayRecoveredEdits(this)) {
+902  CollectionHStore stores = 
this.stores.values();
+903  try {
+904// update the stores that we are 
replaying
+905
stores.forEach(HStore::startReplayingFromWAL);
+906// Recover any edits if 
available.
+907maxSeqId = Math.max(maxSeqId,
+908  
replayRecoveredEditsIfAny(this.fs.getRegionDir(), maxSeqIdInStores, reporter, 
status));
+909// Make sure mvcc is up to max.
+910this.mvcc.advanceTo(maxSeqId);
+911  } finally {
+912// update the stores that we are 
done replaying
+913
stores.forEach(HStore::stopReplayingFromWAL);
+914  }
+915}
+916this.lastReplayedOpenRegionSeqId = 
maxSeqId;
+917
+918
this.writestate.setReadOnly(ServerRegionReplicaUtil.isReadOnly(this));
+919this.writestate.flushRequested = 
false;
+920this.writestate.compacting.set(0);
 921
-922
this.writestate.setReadOnly(ServerRegionReplicaUtil.isReadOnly(this));
-923this.writestate.flushRequested = 
false;
-924this.writestate.compacting.set(0);
-925
-926if (this.writestate.writesEnabled) 
{
-927  // Remove temporary data left over 
from old regions
-928  status.setStatus("Cleaning up 
temporary data from old regions");
-929  fs.cleanupTempDir();
-930}
-931
-932if (this.writestate.writesEnabled) 
{
-933  status.setStatus("Cleaning up 
detritus from prior splits");
-934  // Get rid of any splits or merges 
that were lost in-progress.  Clean out
-935  // these directories here on open.  
We may be opening a region that was
-936  // being split but we crashed in 
the middle of it all.
-937  fs.cleanupAnySplitDetritus();
-938  fs.cleanupMergesDir();
-939}
-940
-941// Initialize split policy
-942this.splitPolicy = 
RegionSplitPolicy.create(this, conf);
-943
-944// Initialize flush policy
-945this.flushPolicy = 
FlushPolicyFactory.create(this, conf);
-946
-947long lastFlushTime = 
EnvironmentEdgeManager.currentTime();
-948for (HStore store: stores.values()) 
{
-949  
this.lastStoreFlushTimeMap.put(store, lastFlushTime);
-950}
-951
-952// Use maximum of log sequenceid or 
that which was found in stores
-953// (particularly if no recovered 
edits, seqid will be -1).
-954long nextSeqid = maxSeqId;
-955if (this.writestate.writesEnabled) 
{
-956  nextSeqid = 
WALSplitter.writeRegionSequenceIdFile(this.fs.getFileSystem(),
-957  this.fs.getRegionDir(), 
nextSeqid, 1);
-958} else {
-959  nextSeqid++;
-960}
-961
-962LOG.info("Onlined " + 
this.getRegionInfo().getShortNameToLog() +
-963  "; next sequenceid=" + 
nextSeqid);
+922if 

[15/51] [partial] hbase-site git commit: Published site at .

2018-02-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/94208cfe/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/BlockCacheKey.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/BlockCacheKey.html 
b/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/BlockCacheKey.html
index 570fb68..b8ce496 100644
--- a/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/BlockCacheKey.html
+++ b/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/BlockCacheKey.html
@@ -168,39 +168,27 @@
 
 
 void
-CombinedBlockCache.cacheBlock(BlockCacheKeycacheKey,
-  Cacheablebuf)
-
-
-void
 BlockCache.cacheBlock(BlockCacheKeycacheKey,
   Cacheablebuf)
 Add block to cache (defaults to not in-memory).
 
 
-
+
 void
 LruBlockCache.cacheBlock(BlockCacheKeycacheKey,
   Cacheablebuf)
 Cache the block with the specified name and buffer.
 
 
-
-void
-MemcachedBlockCache.cacheBlock(BlockCacheKeycacheKey,
-  Cacheablebuf)
-
 
 void
-CombinedBlockCache.cacheBlock(BlockCacheKeycacheKey,
-  Cacheablebuf,
-  booleaninMemory)
+CombinedBlockCache.cacheBlock(BlockCacheKeycacheKey,
+  Cacheablebuf)
 
 
 void
-InclusiveCombinedBlockCache.cacheBlock(BlockCacheKeycacheKey,
-  Cacheablebuf,
-  booleaninMemory)
+MemcachedBlockCache.cacheBlock(BlockCacheKeycacheKey,
+  Cacheablebuf)
 
 
 void
@@ -220,6 +208,18 @@
 
 
 void
+CombinedBlockCache.cacheBlock(BlockCacheKeycacheKey,
+  Cacheablebuf,
+  booleaninMemory)
+
+
+void
+InclusiveCombinedBlockCache.cacheBlock(BlockCacheKeycacheKey,
+  Cacheablebuf,
+  booleaninMemory)
+
+
+void
 MemcachedBlockCache.cacheBlock(BlockCacheKeycacheKey,
   Cacheablebuf,
   booleaninMemory)
@@ -232,21 +232,21 @@
 
 
 boolean
-CombinedBlockCache.evictBlock(BlockCacheKeycacheKey)
+BlockCache.evictBlock(BlockCacheKeycacheKey)
+Evict block from cache.
+
 
 
 boolean
-InclusiveCombinedBlockCache.evictBlock(BlockCacheKeycacheKey)
+LruBlockCache.evictBlock(BlockCacheKeycacheKey)
 
 
 boolean
-BlockCache.evictBlock(BlockCacheKeycacheKey)
-Evict block from cache.
-
+CombinedBlockCache.evictBlock(BlockCacheKeycacheKey)
 
 
 boolean
-LruBlockCache.evictBlock(BlockCacheKeycacheKey)
+InclusiveCombinedBlockCache.evictBlock(BlockCacheKeycacheKey)
 
 
 boolean
@@ -254,35 +254,35 @@
 
 
 Cacheable
-CombinedBlockCache.getBlock(BlockCacheKeycacheKey,
+BlockCache.getBlock(BlockCacheKeycacheKey,
 booleancaching,
 booleanrepeat,
-booleanupdateCacheMetrics)
+booleanupdateCacheMetrics)
+Fetch block from cache.
+
 
 
 Cacheable
-InclusiveCombinedBlockCache.getBlock(BlockCacheKeycacheKey,
+LruBlockCache.getBlock(BlockCacheKeycacheKey,
 booleancaching,
 booleanrepeat,
-booleanupdateCacheMetrics)
+booleanupdateCacheMetrics)
+Get the buffer of the block with the specified name.
+
 
 
 Cacheable
-BlockCache.getBlock(BlockCacheKeycacheKey,
+CombinedBlockCache.getBlock(BlockCacheKeycacheKey,
 booleancaching,
 booleanrepeat,
-booleanupdateCacheMetrics)
-Fetch block from cache.
-
+booleanupdateCacheMetrics)
 
 
 Cacheable
-LruBlockCache.getBlock(BlockCacheKeycacheKey,
+InclusiveCombinedBlockCache.getBlock(BlockCacheKeycacheKey,
 booleancaching,
 booleanrepeat,
-booleanupdateCacheMetrics)
-Get the buffer of the block with the specified name.
-
+booleanupdateCacheMetrics)
 
 
 Cacheable
@@ -308,11 +308,6 @@
 CombinedBlockCache.getRefCount(BlockCacheKeycacheKey)
 
 
-void
-CombinedBlockCache.returnBlock(BlockCacheKeycacheKey,
-   Cacheableblock)
-
-
 default void
 BlockCache.returnBlock(BlockCacheKeycacheKey,
Cacheableblock)
@@ -320,6 +315,11 @@
  is over.
 
 
+
+void
+CombinedBlockCache.returnBlock(BlockCacheKeycacheKey,
+   Cacheableblock)
+
 
 
 
@@ -497,14 +497,14 @@
 
 
 void
-BucketCache.BucketEntryGroup.add(http://docs.oracle.com/javase/8/docs/api/java/util/Map.Entry.html?is-external=true;
 title="class or interface in java.util">Map.EntryBlockCacheKey,BucketCache.BucketEntryblock)
-
-
-void
 CachedEntryQueue.add(http://docs.oracle.com/javase/8/docs/api/java/util/Map.Entry.html?is-external=true;
 title="class or interface in java.util">Map.EntryBlockCacheKey,BucketCache.BucketEntryentry)
 Attempt to add the specified entry to this queue.
 
 
+
+void
+BucketCache.BucketEntryGroup.add(http://docs.oracle.com/javase/8/docs/api/java/util/Map.Entry.html?is-external=true;
 title="class or interface in java.util">Map.EntryBlockCacheKey,BucketCache.BucketEntryblock)
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/94208cfe/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/BlockType.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/BlockType.html 

[15/51] [partial] hbase-site git commit: Published site at .

2018-02-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0cd17dc5/devapidocs/org/apache/hadoop/hbase/exceptions/class-use/DeserializationException.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/exceptions/class-use/DeserializationException.html
 
b/devapidocs/org/apache/hadoop/hbase/exceptions/class-use/DeserializationException.html
index a07a830..80108a2 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/exceptions/class-use/DeserializationException.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/exceptions/class-use/DeserializationException.html
@@ -144,14 +144,16 @@
 
 
 
-static HTableDescriptor
-HTableDescriptor.parseFrom(byte[]bytes)
+static HColumnDescriptor
+HColumnDescriptor.parseFrom(byte[]bytes)
 Deprecated.
 
 
 
-static ClusterId
-ClusterId.parseFrom(byte[]bytes)
+static HTableDescriptor
+HTableDescriptor.parseFrom(byte[]bytes)
+Deprecated.
+
 
 
 static HRegionInfo
@@ -163,10 +165,8 @@
 
 
 
-static HColumnDescriptor
-HColumnDescriptor.parseFrom(byte[]bytes)
-Deprecated.
-
+static ClusterId
+ClusterId.parseFrom(byte[]bytes)
 
 
 static SplitLogTask
@@ -220,17 +220,17 @@
 TableDescriptorBuilder.ModifyableTableDescriptor.parseFrom(byte[]bytes)
 
 
-static RegionInfo
-RegionInfo.parseFrom(byte[]bytes)
-
-
 static ColumnFamilyDescriptor
 ColumnFamilyDescriptorBuilder.parseFrom(byte[]pbBytes)
 
-
+
 private static ColumnFamilyDescriptor
 ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor.parseFrom(byte[]bytes)
 
+
+static RegionInfo
+RegionInfo.parseFrom(byte[]bytes)
+
 
 static RegionInfo
 RegionInfo.parseFrom(byte[]bytes,
@@ -305,111 +305,111 @@
 ByteArrayComparable.parseFrom(byte[]pbBytes)
 
 
-static SingleColumnValueExcludeFilter
-SingleColumnValueExcludeFilter.parseFrom(byte[]pbBytes)
+static ColumnPrefixFilter
+ColumnPrefixFilter.parseFrom(byte[]pbBytes)
 
 
-static ValueFilter
-ValueFilter.parseFrom(byte[]pbBytes)
+static ColumnCountGetFilter
+ColumnCountGetFilter.parseFrom(byte[]pbBytes)
 
 
-static SkipFilter
-SkipFilter.parseFrom(byte[]pbBytes)
+static RowFilter
+RowFilter.parseFrom(byte[]pbBytes)
 
 
-static FamilyFilter
-FamilyFilter.parseFrom(byte[]pbBytes)
+static FuzzyRowFilter
+FuzzyRowFilter.parseFrom(byte[]pbBytes)
 
 
-static BinaryPrefixComparator
-BinaryPrefixComparator.parseFrom(byte[]pbBytes)
+static BinaryComparator
+BinaryComparator.parseFrom(byte[]pbBytes)
 
 
-static NullComparator
-NullComparator.parseFrom(byte[]pbBytes)
+static RegexStringComparator
+RegexStringComparator.parseFrom(byte[]pbBytes)
 
 
-static BigDecimalComparator
-BigDecimalComparator.parseFrom(byte[]pbBytes)
+static Filter
+Filter.parseFrom(byte[]pbBytes)
+Concrete implementers can signal a failure condition in 
their code by throwing an
+ http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException.
+
 
 
-static ColumnPrefixFilter
-ColumnPrefixFilter.parseFrom(byte[]pbBytes)
+static RandomRowFilter
+RandomRowFilter.parseFrom(byte[]pbBytes)
 
 
-static PageFilter
-PageFilter.parseFrom(byte[]pbBytes)
+static FirstKeyOnlyFilter
+FirstKeyOnlyFilter.parseFrom(byte[]pbBytes)
 
 
-static BitComparator
-BitComparator.parseFrom(byte[]pbBytes)
+static SkipFilter
+SkipFilter.parseFrom(byte[]pbBytes)
 
 
-static RowFilter
-RowFilter.parseFrom(byte[]pbBytes)
+static BinaryPrefixComparator
+BinaryPrefixComparator.parseFrom(byte[]pbBytes)
 
 
-static ColumnRangeFilter
-ColumnRangeFilter.parseFrom(byte[]pbBytes)
+static TimestampsFilter
+TimestampsFilter.parseFrom(byte[]pbBytes)
 
 
-static ColumnCountGetFilter
-ColumnCountGetFilter.parseFrom(byte[]pbBytes)
+static ValueFilter
+ValueFilter.parseFrom(byte[]pbBytes)
 
 
-static SubstringComparator
-SubstringComparator.parseFrom(byte[]pbBytes)
+static KeyOnlyFilter
+KeyOnlyFilter.parseFrom(byte[]pbBytes)
 
 
-static MultipleColumnPrefixFilter
-MultipleColumnPrefixFilter.parseFrom(byte[]pbBytes)
+static FamilyFilter
+FamilyFilter.parseFrom(byte[]pbBytes)
 
 
-static ColumnPaginationFilter
-ColumnPaginationFilter.parseFrom(byte[]pbBytes)
+static QualifierFilter
+QualifierFilter.parseFrom(byte[]pbBytes)
 
 
-static DependentColumnFilter
-DependentColumnFilter.parseFrom(byte[]pbBytes)
+static FilterList
+FilterList.parseFrom(byte[]pbBytes)
 
 
-static BinaryComparator
-BinaryComparator.parseFrom(byte[]pbBytes)
+static BigDecimalComparator
+BigDecimalComparator.parseFrom(byte[]pbBytes)
 
 
-static InclusiveStopFilter
-InclusiveStopFilter.parseFrom(byte[]pbBytes)
+static ColumnRangeFilter
+ColumnRangeFilter.parseFrom(byte[]pbBytes)
 
 
-static KeyOnlyFilter
-KeyOnlyFilter.parseFrom(byte[]pbBytes)
+static ColumnPaginationFilter
+ColumnPaginationFilter.parseFrom(byte[]pbBytes)
 
 
-static MultiRowRangeFilter
-MultiRowRangeFilter.parseFrom(byte[]pbBytes)
+static SubstringComparator
+SubstringComparator.parseFrom(byte[]pbBytes)
 
 
-static Filter
-Filter.parseFrom(byte[]pbBytes)
-Concrete implementers can signal a 

[15/51] [partial] hbase-site git commit: Published site at .

2018-02-14 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/828486ae/devapidocs/org/apache/hadoop/hbase/io/class-use/ImmutableBytesWritable.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/class-use/ImmutableBytesWritable.html 
b/devapidocs/org/apache/hadoop/hbase/io/class-use/ImmutableBytesWritable.html
index 6e37f0b..49f85aa 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/io/class-use/ImmutableBytesWritable.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/io/class-use/ImmutableBytesWritable.html
@@ -162,11 +162,11 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 ImmutableBytesWritable
-TableSnapshotInputFormat.TableSnapshotRecordReader.createKey()
+TableRecordReader.createKey()
 
 
 ImmutableBytesWritable
-TableRecordReader.createKey()
+TableSnapshotInputFormat.TableSnapshotRecordReader.createKey()
 
 
 ImmutableBytesWritable
@@ -183,9 +183,11 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 org.apache.hadoop.mapred.RecordReaderImmutableBytesWritable,Result
-TableSnapshotInputFormat.getRecordReader(org.apache.hadoop.mapred.InputSplitsplit,
+TableInputFormatBase.getRecordReader(org.apache.hadoop.mapred.InputSplitsplit,
org.apache.hadoop.mapred.JobConfjob,
-   
org.apache.hadoop.mapred.Reporterreporter)
+   org.apache.hadoop.mapred.Reporterreporter)
+Builds a TableRecordReader.
+
 
 
 org.apache.hadoop.mapred.RecordReaderImmutableBytesWritable,Result
@@ -195,11 +197,9 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 org.apache.hadoop.mapred.RecordReaderImmutableBytesWritable,Result
-TableInputFormatBase.getRecordReader(org.apache.hadoop.mapred.InputSplitsplit,
+TableSnapshotInputFormat.getRecordReader(org.apache.hadoop.mapred.InputSplitsplit,
org.apache.hadoop.mapred.JobConfjob,
-   org.apache.hadoop.mapred.Reporterreporter)
-Builds a TableRecordReader.
-
+   
org.apache.hadoop.mapred.Reporterreporter)
 
 
 
@@ -218,10 +218,12 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 void
-RowCounter.RowCounterMapper.map(ImmutableBytesWritablerow,
-   Resultvalues,
+IdentityTableMap.map(ImmutableBytesWritablekey,
+   Resultvalue,
org.apache.hadoop.mapred.OutputCollectorImmutableBytesWritable,Resultoutput,
-   org.apache.hadoop.mapred.Reporterreporter)
+   org.apache.hadoop.mapred.Reporterreporter)
+Pass the key, value to reduce
+
 
 
 void
@@ -234,21 +236,19 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 void
-IdentityTableMap.map(ImmutableBytesWritablekey,
-   Resultvalue,
+RowCounter.RowCounterMapper.map(ImmutableBytesWritablerow,
+   Resultvalues,
org.apache.hadoop.mapred.OutputCollectorImmutableBytesWritable,Resultoutput,
-   org.apache.hadoop.mapred.Reporterreporter)
-Pass the key, value to reduce
-
+   org.apache.hadoop.mapred.Reporterreporter)
 
 
 boolean
-TableSnapshotInputFormat.TableSnapshotRecordReader.next(ImmutableBytesWritablekey,
+TableRecordReader.next(ImmutableBytesWritablekey,
 Resultvalue)
 
 
 boolean
-TableRecordReader.next(ImmutableBytesWritablekey,
+TableSnapshotInputFormat.TableSnapshotRecordReader.next(ImmutableBytesWritablekey,
 Resultvalue)
 
 
@@ -281,10 +281,12 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 void
-RowCounter.RowCounterMapper.map(ImmutableBytesWritablerow,
-   Resultvalues,
+IdentityTableMap.map(ImmutableBytesWritablekey,
+   Resultvalue,
org.apache.hadoop.mapred.OutputCollectorImmutableBytesWritable,Resultoutput,
-   org.apache.hadoop.mapred.Reporterreporter)
+   org.apache.hadoop.mapred.Reporterreporter)
+Pass the key, value to reduce
+
 
 
 void
@@ -297,12 +299,10 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 void
-IdentityTableMap.map(ImmutableBytesWritablekey,
-   Resultvalue,
+RowCounter.RowCounterMapper.map(ImmutableBytesWritablerow,
+   Resultvalues,
org.apache.hadoop.mapred.OutputCollectorImmutableBytesWritable,Resultoutput,
-   org.apache.hadoop.mapred.Reporterreporter)
-Pass the key, value to reduce
-
+   org.apache.hadoop.mapred.Reporterreporter)
 
 
 void
@@ -349,7 +349,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 private ImmutableBytesWritable
-TableRecordReaderImpl.key
+MultithreadedTableMapper.SubMapRecordReader.key
 
 
 private ImmutableBytesWritable
@@ -357,7 +357,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 private ImmutableBytesWritable
-MultithreadedTableMapper.SubMapRecordReader.key
+TableRecordReaderImpl.key
 
 
 (package private) ImmutableBytesWritable
@@ -427,33 +427,33 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 ImmutableBytesWritable
-TableSnapshotInputFormat.TableSnapshotRegionRecordReader.getCurrentKey()

[15/51] [partial] hbase-site git commit: Published site at .

2018-02-13 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f272b0e8/devapidocs/org/apache/hadoop/hbase/client/class-use/TableDescriptor.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/TableDescriptor.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/TableDescriptor.html
index 98104cb..56a2ea1 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/TableDescriptor.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/TableDescriptor.html
@@ -449,13 +449,13 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 TableDescriptor
-Table.getDescriptor()
-Gets the table 
descriptor for this table.
-
+HTable.getDescriptor()
 
 
 TableDescriptor
-HTable.getDescriptor()
+Table.getDescriptor()
+Gets the table 
descriptor for this table.
+
 
 
 TableDescriptor
@@ -509,52 +509,52 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFutureTableDescriptor
-AsyncHBaseAdmin.getDescriptor(TableNametableName)
-
-
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFutureTableDescriptor
 AsyncAdmin.getDescriptor(TableNametableName)
 Method for getting the tableDescriptor
 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFutureTableDescriptor
 RawAsyncHBaseAdmin.getDescriptor(TableNametableName)
 
+
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFutureTableDescriptor
+AsyncHBaseAdmin.getDescriptor(TableNametableName)
+
 
 private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListTableDescriptor
 RawAsyncHBaseAdmin.getTableDescriptors(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsRequestrequest)
 
 
-http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListTableDescriptor
-Admin.listTableDescriptors()
+default http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListTableDescriptor
+AsyncAdmin.listTableDescriptors()
 List all the userspace tables.
 
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListTableDescriptor
-HBaseAdmin.listTableDescriptors()
-
-
-default http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListTableDescriptor
-AsyncAdmin.listTableDescriptors()
+Admin.listTableDescriptors()
 List all the userspace tables.
 
 
-
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListTableDescriptor
-AsyncHBaseAdmin.listTableDescriptors(booleanincludeSysTables)
-
 
+http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListTableDescriptor
+HBaseAdmin.listTableDescriptors()
+
+
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListTableDescriptor
 AsyncAdmin.listTableDescriptors(booleanincludeSysTables)
 List all the tables.
 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListTableDescriptor
 RawAsyncHBaseAdmin.listTableDescriptors(booleanincludeSysTables)
 

[15/51] [partial] hbase-site git commit: Published site at .

2018-02-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c83a37c8/devapidocs/org/apache/hadoop/hbase/io/class-use/ImmutableBytesWritable.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/class-use/ImmutableBytesWritable.html 
b/devapidocs/org/apache/hadoop/hbase/io/class-use/ImmutableBytesWritable.html
index 6e37f0b..49f85aa 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/io/class-use/ImmutableBytesWritable.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/io/class-use/ImmutableBytesWritable.html
@@ -162,11 +162,11 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 ImmutableBytesWritable
-TableSnapshotInputFormat.TableSnapshotRecordReader.createKey()
+TableRecordReader.createKey()
 
 
 ImmutableBytesWritable
-TableRecordReader.createKey()
+TableSnapshotInputFormat.TableSnapshotRecordReader.createKey()
 
 
 ImmutableBytesWritable
@@ -183,9 +183,11 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 org.apache.hadoop.mapred.RecordReaderImmutableBytesWritable,Result
-TableSnapshotInputFormat.getRecordReader(org.apache.hadoop.mapred.InputSplitsplit,
+TableInputFormatBase.getRecordReader(org.apache.hadoop.mapred.InputSplitsplit,
org.apache.hadoop.mapred.JobConfjob,
-   
org.apache.hadoop.mapred.Reporterreporter)
+   org.apache.hadoop.mapred.Reporterreporter)
+Builds a TableRecordReader.
+
 
 
 org.apache.hadoop.mapred.RecordReaderImmutableBytesWritable,Result
@@ -195,11 +197,9 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 org.apache.hadoop.mapred.RecordReaderImmutableBytesWritable,Result
-TableInputFormatBase.getRecordReader(org.apache.hadoop.mapred.InputSplitsplit,
+TableSnapshotInputFormat.getRecordReader(org.apache.hadoop.mapred.InputSplitsplit,
org.apache.hadoop.mapred.JobConfjob,
-   org.apache.hadoop.mapred.Reporterreporter)
-Builds a TableRecordReader.
-
+   
org.apache.hadoop.mapred.Reporterreporter)
 
 
 
@@ -218,10 +218,12 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 void
-RowCounter.RowCounterMapper.map(ImmutableBytesWritablerow,
-   Resultvalues,
+IdentityTableMap.map(ImmutableBytesWritablekey,
+   Resultvalue,
org.apache.hadoop.mapred.OutputCollectorImmutableBytesWritable,Resultoutput,
-   org.apache.hadoop.mapred.Reporterreporter)
+   org.apache.hadoop.mapred.Reporterreporter)
+Pass the key, value to reduce
+
 
 
 void
@@ -234,21 +236,19 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 void
-IdentityTableMap.map(ImmutableBytesWritablekey,
-   Resultvalue,
+RowCounter.RowCounterMapper.map(ImmutableBytesWritablerow,
+   Resultvalues,
org.apache.hadoop.mapred.OutputCollectorImmutableBytesWritable,Resultoutput,
-   org.apache.hadoop.mapred.Reporterreporter)
-Pass the key, value to reduce
-
+   org.apache.hadoop.mapred.Reporterreporter)
 
 
 boolean
-TableSnapshotInputFormat.TableSnapshotRecordReader.next(ImmutableBytesWritablekey,
+TableRecordReader.next(ImmutableBytesWritablekey,
 Resultvalue)
 
 
 boolean
-TableRecordReader.next(ImmutableBytesWritablekey,
+TableSnapshotInputFormat.TableSnapshotRecordReader.next(ImmutableBytesWritablekey,
 Resultvalue)
 
 
@@ -281,10 +281,12 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 void
-RowCounter.RowCounterMapper.map(ImmutableBytesWritablerow,
-   Resultvalues,
+IdentityTableMap.map(ImmutableBytesWritablekey,
+   Resultvalue,
org.apache.hadoop.mapred.OutputCollectorImmutableBytesWritable,Resultoutput,
-   org.apache.hadoop.mapred.Reporterreporter)
+   org.apache.hadoop.mapred.Reporterreporter)
+Pass the key, value to reduce
+
 
 
 void
@@ -297,12 +299,10 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 void
-IdentityTableMap.map(ImmutableBytesWritablekey,
-   Resultvalue,
+RowCounter.RowCounterMapper.map(ImmutableBytesWritablerow,
+   Resultvalues,
org.apache.hadoop.mapred.OutputCollectorImmutableBytesWritable,Resultoutput,
-   org.apache.hadoop.mapred.Reporterreporter)
-Pass the key, value to reduce
-
+   org.apache.hadoop.mapred.Reporterreporter)
 
 
 void
@@ -349,7 +349,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 private ImmutableBytesWritable
-TableRecordReaderImpl.key
+MultithreadedTableMapper.SubMapRecordReader.key
 
 
 private ImmutableBytesWritable
@@ -357,7 +357,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 private ImmutableBytesWritable
-MultithreadedTableMapper.SubMapRecordReader.key
+TableRecordReaderImpl.key
 
 
 (package private) ImmutableBytesWritable
@@ -427,33 +427,33 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 ImmutableBytesWritable
-TableSnapshotInputFormat.TableSnapshotRegionRecordReader.getCurrentKey()

[15/51] [partial] hbase-site git commit: Published site at .

2018-02-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0ab8335e/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDecodingContext.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDecodingContext.html
 
b/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDecodingContext.html
index 7161108..fe5ef34 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDecodingContext.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDecodingContext.html
@@ -166,27 +166,27 @@
 
 
 DataBlockEncoder.EncodedSeeker
-CopyKeyDataBlockEncoder.createSeeker(CellComparatorcomparator,
+RowIndexCodecV1.createSeeker(CellComparatorcomparator,
 HFileBlockDecodingContextdecodingCtx)
 
 
 DataBlockEncoder.EncodedSeeker
-PrefixKeyDeltaEncoder.createSeeker(CellComparatorcomparator,
+CopyKeyDataBlockEncoder.createSeeker(CellComparatorcomparator,
 HFileBlockDecodingContextdecodingCtx)
 
 
 DataBlockEncoder.EncodedSeeker
-FastDiffDeltaEncoder.createSeeker(CellComparatorcomparator,
+DiffKeyDeltaEncoder.createSeeker(CellComparatorcomparator,
 HFileBlockDecodingContextdecodingCtx)
 
 
 DataBlockEncoder.EncodedSeeker
-DiffKeyDeltaEncoder.createSeeker(CellComparatorcomparator,
+FastDiffDeltaEncoder.createSeeker(CellComparatorcomparator,
 HFileBlockDecodingContextdecodingCtx)
 
 
 DataBlockEncoder.EncodedSeeker
-RowIndexCodecV1.createSeeker(CellComparatorcomparator,
+PrefixKeyDeltaEncoder.createSeeker(CellComparatorcomparator,
 HFileBlockDecodingContextdecodingCtx)
 
 
@@ -198,13 +198,13 @@
 
 
 http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBuffer
-BufferedDataBlockEncoder.decodeKeyValues(http://docs.oracle.com/javase/8/docs/api/java/io/DataInputStream.html?is-external=true;
 title="class or interface in java.io">DataInputStreamsource,
-   HFileBlockDecodingContextblkDecodingCtx)
+RowIndexCodecV1.decodeKeyValues(http://docs.oracle.com/javase/8/docs/api/java/io/DataInputStream.html?is-external=true;
 title="class or interface in java.io">DataInputStreamsource,
+   HFileBlockDecodingContextdecodingCtx)
 
 
 http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBuffer
-RowIndexCodecV1.decodeKeyValues(http://docs.oracle.com/javase/8/docs/api/java/io/DataInputStream.html?is-external=true;
 title="class or interface in java.io">DataInputStreamsource,
-   HFileBlockDecodingContextdecodingCtx)
+BufferedDataBlockEncoder.decodeKeyValues(http://docs.oracle.com/javase/8/docs/api/java/io/DataInputStream.html?is-external=true;
 title="class or interface in java.io">DataInputStreamsource,
+   HFileBlockDecodingContextblkDecodingCtx)
 
 
 
@@ -279,17 +279,17 @@
 
 
 HFileBlockDecodingContext
-NoOpDataBlockEncoder.newDataBlockDecodingContext(HFileContextmeta)
+HFileDataBlockEncoderImpl.newDataBlockDecodingContext(HFileContextfileContext)
 
 
 HFileBlockDecodingContext
-HFileDataBlockEncoder.newDataBlockDecodingContext(HFileContextfileContext)
-create a encoder specific decoding context for 
reading.
-
+NoOpDataBlockEncoder.newDataBlockDecodingContext(HFileContextmeta)
 
 
 HFileBlockDecodingContext
-HFileDataBlockEncoderImpl.newDataBlockDecodingContext(HFileContextfileContext)
+HFileDataBlockEncoder.newDataBlockDecodingContext(HFileContextfileContext)
+create a encoder specific decoding context for 
reading.
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0ab8335e/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDefaultDecodingContext.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDefaultDecodingContext.html
 
b/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDefaultDecodingContext.html
index 79b047f..66443b9 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDefaultDecodingContext.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDefaultDecodingContext.html
@@ -116,36 +116,36 @@
  HFileBlockDefaultDecodingContextdecodingCtx)
 
 
-protected abstract http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBuffer
-BufferedDataBlockEncoder.internalDecodeKeyValues(http://docs.oracle.com/javase/8/docs/api/java/io/DataInputStream.html?is-external=true;
 title="class or interface in java.io">DataInputStreamsource,
+protected http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBuffer

[15/51] [partial] hbase-site git commit: Published site at .

2018-02-04 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverOperation.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverOperation.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverOperation.html
index 77fb9b5..c4e8c8b 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverOperation.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverOperation.html
@@ -165,520 +165,519 @@
 157E env = 
checkAndLoadInstance(implClass, priority, conf);
 158if (env != null) {
 159  
this.coprocEnvironments.add(env);
-160  LOG.info(
-161  "System coprocessor " + 
className + " was loaded " + "successfully with priority (" + priority + 
").");
-162  ++priority;
-163}
-164  } catch (Throwable t) {
-165// We always abort if system 
coprocessors cannot be loaded
-166abortServer(className, t);
-167  }
-168}
-169  }
-170
-171  /**
-172   * Load a coprocessor implementation 
into the host
-173   * @param path path to implementation 
jar
-174   * @param className the main class 
name
-175   * @param priority chaining priority
-176   * @param conf configuration for 
coprocessor
-177   * @throws java.io.IOException 
Exception
-178   */
-179  public E load(Path path, String 
className, int priority,
-180  Configuration conf) throws 
IOException {
-181String[] includedClassPrefixes = 
null;
-182if 
(conf.get(HConstants.CP_HTD_ATTR_INCLUSION_KEY) != null){
-183  String prefixes = 
conf.get(HConstants.CP_HTD_ATTR_INCLUSION_KEY);
-184  includedClassPrefixes = 
prefixes.split(";");
-185}
-186return load(path, className, 
priority, conf, includedClassPrefixes);
-187  }
-188
-189  /**
-190   * Load a coprocessor implementation 
into the host
-191   * @param path path to implementation 
jar
-192   * @param className the main class 
name
-193   * @param priority chaining priority
-194   * @param conf configuration for 
coprocessor
-195   * @param includedClassPrefixes class 
name prefixes to include
-196   * @throws java.io.IOException 
Exception
-197   */
-198  public E load(Path path, String 
className, int priority,
-199  Configuration conf, String[] 
includedClassPrefixes) throws IOException {
-200Class? implClass;
-201LOG.debug("Loading coprocessor class 
" + className + " with path " +
-202path + " and priority " + 
priority);
-203
-204ClassLoader cl = null;
-205if (path == null) {
-206  try {
-207implClass = 
getClass().getClassLoader().loadClass(className);
-208  } catch (ClassNotFoundException e) 
{
-209throw new IOException("No jar 
path specified for " + className);
-210  }
-211} else {
-212  cl = 
CoprocessorClassLoader.getClassLoader(
-213path, 
getClass().getClassLoader(), pathPrefix, conf);
-214  try {
-215implClass = 
((CoprocessorClassLoader)cl).loadClass(className, includedClassPrefixes);
-216  } catch (ClassNotFoundException e) 
{
-217throw new IOException("Cannot 
load external coprocessor class " + className, e);
-218  }
-219}
-220
-221//load custom code for coprocessor
-222Thread currentThread = 
Thread.currentThread();
-223ClassLoader hostClassLoader = 
currentThread.getContextClassLoader();
-224try{
-225  // switch temporarily to the thread 
classloader for custom CP
-226  
currentThread.setContextClassLoader(cl);
-227  E cpInstance = 
checkAndLoadInstance(implClass, priority, conf);
-228  return cpInstance;
-229} finally {
-230  // restore the fresh (host) 
classloader
-231  
currentThread.setContextClassLoader(hostClassLoader);
-232}
-233  }
-234
-235  @VisibleForTesting
-236  public void load(Class? extends 
C implClass, int priority, Configuration conf)
-237  throws IOException {
-238E env = 
checkAndLoadInstance(implClass, priority, conf);
-239coprocEnvironments.add(env);
-240  }
-241
-242  /**
-243   * @param implClass Implementation 
class
-244   * @param priority priority
-245   * @param conf configuration
-246   * @throws java.io.IOException 
Exception
-247   */
-248  public E 
checkAndLoadInstance(Class? implClass, int priority, Configuration 
conf)
-249  throws IOException {
-250// create the instance
-251C impl;
-252try {
-253  impl = 
checkAndGetInstance(implClass);
-254  if (impl == null) {
-255LOG.error("Cannot load 
coprocessor " + implClass.getSimpleName());
-256return null;
-257  }
-258} catch 
(InstantiationException|IllegalAccessException e) {
-259  throw new IOException(e);
-260}
-261// create the environment
-262E env = createEnvironment(impl, 

[15/51] [partial] hbase-site git commit: Published site at .

2018-02-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1f2eeb22/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.StatisticsThread.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.StatisticsThread.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.StatisticsThread.html
index b7c24d7..eecd2f9 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.StatisticsThread.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.StatisticsThread.html
@@ -44,792 +44,792 @@
 036import java.util.List;
 037import java.util.Map;
 038import java.util.NavigableSet;
-039import java.util.Objects;
-040import java.util.PriorityQueue;
-041import java.util.Set;
-042import 
java.util.concurrent.ArrayBlockingQueue;
-043import 
java.util.concurrent.BlockingQueue;
-044import 
java.util.concurrent.ConcurrentHashMap;
-045import 
java.util.concurrent.ConcurrentMap;
-046import 
java.util.concurrent.ConcurrentSkipListSet;
-047import java.util.concurrent.Executors;
-048import 
java.util.concurrent.ScheduledExecutorService;
-049import java.util.concurrent.TimeUnit;
-050import 
java.util.concurrent.atomic.AtomicInteger;
-051import 
java.util.concurrent.atomic.AtomicLong;
-052import 
java.util.concurrent.atomic.LongAdder;
-053import java.util.concurrent.locks.Lock;
-054import 
java.util.concurrent.locks.ReentrantLock;
-055import 
java.util.concurrent.locks.ReentrantReadWriteLock;
-056import 
org.apache.hadoop.conf.Configuration;
-057import 
org.apache.hadoop.hbase.HBaseConfiguration;
-058import 
org.apache.hadoop.hbase.io.HeapSize;
-059import 
org.apache.hadoop.hbase.io.hfile.BlockCache;
-060import 
org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
-061import 
org.apache.hadoop.hbase.io.hfile.BlockCacheUtil;
-062import 
org.apache.hadoop.hbase.io.hfile.BlockPriority;
-063import 
org.apache.hadoop.hbase.io.hfile.BlockType;
-064import 
org.apache.hadoop.hbase.io.hfile.CacheStats;
-065import 
org.apache.hadoop.hbase.io.hfile.Cacheable;
-066import 
org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType;
-067import 
org.apache.hadoop.hbase.io.hfile.CacheableDeserializer;
-068import 
org.apache.hadoop.hbase.io.hfile.CacheableDeserializerIdManager;
-069import 
org.apache.hadoop.hbase.io.hfile.CachedBlock;
-070import 
org.apache.hadoop.hbase.io.hfile.HFileBlock;
-071import 
org.apache.hadoop.hbase.nio.ByteBuff;
-072import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-073import 
org.apache.hadoop.hbase.util.HasThread;
-074import 
org.apache.hadoop.hbase.util.IdReadWriteLock;
-075import 
org.apache.hadoop.hbase.util.IdReadWriteLock.ReferenceType;
-076import 
org.apache.hadoop.util.StringUtils;
-077import 
org.apache.yetus.audience.InterfaceAudience;
-078import org.slf4j.Logger;
-079import org.slf4j.LoggerFactory;
-080
-081import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-082import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-083import 
org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
-084
-085/**
-086 * BucketCache uses {@link 
BucketAllocator} to allocate/free blocks, and uses
-087 * BucketCache#ramCache and 
BucketCache#backingMap in order to
-088 * determine if a given element is in the 
cache. The bucket cache can use on-heap or
-089 * off-heap memory {@link 
ByteBufferIOEngine} or in a file {@link FileIOEngine} to
-090 * store/read the block data.
-091 *
-092 * pEviction is via a similar 
algorithm as used in
-093 * {@link 
org.apache.hadoop.hbase.io.hfile.LruBlockCache}
-094 *
-095 * pBucketCache can be used as 
mainly a block cache (see
-096 * {@link 
org.apache.hadoop.hbase.io.hfile.CombinedBlockCache}), combined with
-097 * LruBlockCache to decrease CMS GC and 
heap fragmentation.
-098 *
-099 * pIt also can be used as a 
secondary cache (e.g. using a file on ssd/fusionio to store
-100 * blocks) to enlarge cache space via
-101 * {@link 
org.apache.hadoop.hbase.io.hfile.LruBlockCache#setVictimCache}
-102 */
-103@InterfaceAudience.Private
-104public class BucketCache implements 
BlockCache, HeapSize {
-105  private static final Logger LOG = 
LoggerFactory.getLogger(BucketCache.class);
-106
-107  /** Priority buckets config */
-108  static final String 
SINGLE_FACTOR_CONFIG_NAME = "hbase.bucketcache.single.factor";
-109  static final String 
MULTI_FACTOR_CONFIG_NAME = "hbase.bucketcache.multi.factor";
-110  static final String 
MEMORY_FACTOR_CONFIG_NAME = "hbase.bucketcache.memory.factor";
-111  static final String 
EXTRA_FREE_FACTOR_CONFIG_NAME = "hbase.bucketcache.extrafreefactor";
-112  static final String 
ACCEPT_FACTOR_CONFIG_NAME = "hbase.bucketcache.acceptfactor";
-113  static final String 
MIN_FACTOR_CONFIG_NAME = "hbase.bucketcache.minfactor";
-114
-115  /** Priority buckets */
-116  @VisibleForTesting
-117  static final 

[15/51] [partial] hbase-site git commit: Published site at .

2018-01-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/cc6597ec/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.html 
b/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.html
index c356a32..723896c 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.html
@@ -75,13 +75,13 @@ var activeTableTab = "activeTableTab";
 
 Summary:
 Nested|
-Field|
+Field|
 Constr|
 Method
 
 
 Detail:
-Field|
+Field|
 Constr|
 Method
 
@@ -114,7 +114,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public class TestAsyncRegionAdminApi
+public class TestAsyncRegionAdminApi
 extends TestAsyncAdminBase
 Class to test asynchronous region admin operations.
 
@@ -129,6 +129,17 @@ extends 
+Fields
+
+Modifier and Type
+Field and Description
+
+
+static HBaseClassTestRule
+CLASS_RULE
+
+
 
 
 
@@ -291,6 +302,23 @@ extends 
 
 
+
+
+
+
+
+Field Detail
+
+
+
+
+
+CLASS_RULE
+public static finalHBaseClassTestRule CLASS_RULE
+
+
+
+
 
 
 
@@ -303,7 +331,7 @@ extends 
 
 TestAsyncRegionAdminApi
-publicTestAsyncRegionAdminApi()
+publicTestAsyncRegionAdminApi()
 
 
 
@@ -320,7 +348,7 @@ extends 
 
 testGetRegionLocation
-publicvoidtestGetRegionLocation()
+publicvoidtestGetRegionLocation()
throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">Exception
 
 Throws:
@@ -334,7 +362,7 @@ extends 
 
 testAssignRegionAndUnassignRegion
-publicvoidtestAssignRegionAndUnassignRegion()
+publicvoidtestAssignRegionAndUnassignRegion()
throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">Exception
 
 Throws:
@@ -348,7 +376,7 @@ extends 
 
 createTableAndGetOneRegion
-org.apache.hadoop.hbase.client.RegionInfocreateTableAndGetOneRegion(org.apache.hadoop.hbase.TableNametableName)
+org.apache.hadoop.hbase.client.RegionInfocreateTableAndGetOneRegion(org.apache.hadoop.hbase.TableNametableName)
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException,
  http://docs.oracle.com/javase/8/docs/api/java/lang/InterruptedException.html?is-external=true;
 title="class or interface in java.lang">InterruptedException,
  http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutionException.html?is-external=true;
 title="class or interface in java.util.concurrent">ExecutionException
@@ -366,7 +394,7 @@ extends 
 
 testGetRegionByStateOfTable
-publicvoidtestGetRegionByStateOfTable()
+publicvoidtestGetRegionByStateOfTable()
  throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">Exception
 
 Throws:
@@ -380,7 +408,7 @@ extends 
 
 testMoveRegion
-publicvoidtestMoveRegion()
+publicvoidtestMoveRegion()
 throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">Exception
 
 Throws:
@@ -394,7 +422,7 @@ extends 
 
 testGetOnlineRegions
-publicvoidtestGetOnlineRegions()
+publicvoidtestGetOnlineRegions()
   throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">Exception
 
 Throws:
@@ -408,7 +436,7 @@ extends 
 
 testFlushTableAndRegion
-publicvoidtestFlushTableAndRegion()
+publicvoidtestFlushTableAndRegion()
  throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">Exception
 
 Throws:
@@ -422,7 +450,7 @@ extends 
 
 testSplitSwitch
-publicvoidtestSplitSwitch()
+publicvoidtestSplitSwitch()
  throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">Exception
 
 Throws:
@@ -436,7 +464,7 @@ extends 
 
 testMergeSwitch
-publicvoidtestMergeSwitch()
+publicvoidtestMergeSwitch()
  throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">Exception
 
 Throws:
@@ -450,7 +478,7 @@ extends 
 
 initSplitMergeSwitch
-privatevoidinitSplitMergeSwitch()
+privatevoidinitSplitMergeSwitch()
throws 

[15/51] [partial] hbase-site git commit: Published site at .

2018-01-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aa7ffc92/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.WALItem.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.WALItem.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.WALItem.html
index 3f74159..3445980 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.WALItem.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.WALItem.html
@@ -97,809 +97,809 @@
 089 * value = backupId and full WAL file 
name/li
 090 * /ul/p
 091 */
-092
-093@InterfaceAudience.Private
-094public final class BackupSystemTable 
implements Closeable {
-095  private static final Logger LOG = 
LoggerFactory.getLogger(BackupSystemTable.class);
-096
-097  static class WALItem {
-098String backupId;
-099String walFile;
-100String backupRoot;
-101
-102WALItem(String backupId, String 
walFile, String backupRoot) {
-103  this.backupId = backupId;
-104  this.walFile = walFile;
-105  this.backupRoot = backupRoot;
-106}
-107
-108public String getBackupId() {
-109  return backupId;
-110}
-111
-112public String getWalFile() {
-113  return walFile;
-114}
-115
-116public String getBackupRoot() {
-117  return backupRoot;
-118}
-119
-120@Override
-121public String toString() {
-122  return Path.SEPARATOR + backupRoot 
+ Path.SEPARATOR + backupId + Path.SEPARATOR + walFile;
-123}
+092@InterfaceAudience.Private
+093public final class BackupSystemTable 
implements Closeable {
+094  private static final Logger LOG = 
LoggerFactory.getLogger(BackupSystemTable.class);
+095
+096  static class WALItem {
+097String backupId;
+098String walFile;
+099String backupRoot;
+100
+101WALItem(String backupId, String 
walFile, String backupRoot) {
+102  this.backupId = backupId;
+103  this.walFile = walFile;
+104  this.backupRoot = backupRoot;
+105}
+106
+107public String getBackupId() {
+108  return backupId;
+109}
+110
+111public String getWalFile() {
+112  return walFile;
+113}
+114
+115public String getBackupRoot() {
+116  return backupRoot;
+117}
+118
+119@Override
+120public String toString() {
+121  return Path.SEPARATOR + backupRoot 
+ Path.SEPARATOR + backupId + Path.SEPARATOR + walFile;
+122}
+123  }
 124
-125  }
-126
-127  /**
-128   * Backup system table (main) name
-129   */
-130  private TableName tableName;
-131
-132  /**
-133   * Backup System table name for bulk 
loaded files.
-134   * We keep all bulk loaded file 
references in a separate table
-135   * because we have to isolate general 
backup operations: create, merge etc
-136   * from activity of RegionObserver, 
which controls process of a bulk loading
-137   * {@link 
org.apache.hadoop.hbase.backup.BackupObserver}
-138   */
-139
-140  private TableName bulkLoadTableName;
-141
-142  /**
-143   * Stores backup sessions (contexts)
-144   */
-145  final static byte[] SESSIONS_FAMILY = 
"session".getBytes();
-146  /**
-147   * Stores other meta
-148   */
-149  final static byte[] META_FAMILY = 
"meta".getBytes();
-150  final static byte[] BULK_LOAD_FAMILY = 
"bulk".getBytes();
-151  /**
-152   * Connection to HBase cluster, shared 
among all instances
-153   */
-154  private final Connection connection;
-155
-156  private final static String 
BACKUP_INFO_PREFIX = "session:";
-157  private final static String 
START_CODE_ROW = "startcode:";
-158  private final static byte[] 
ACTIVE_SESSION_ROW = "activesession:".getBytes();
-159  private final static byte[] 
ACTIVE_SESSION_COL = "c".getBytes();
+125  /**
+126   * Backup system table (main) name
+127   */
+128  private TableName tableName;
+129
+130  /**
+131   * Backup System table name for bulk 
loaded files.
+132   * We keep all bulk loaded file 
references in a separate table
+133   * because we have to isolate general 
backup operations: create, merge etc
+134   * from activity of RegionObserver, 
which controls process of a bulk loading
+135   * {@link 
org.apache.hadoop.hbase.backup.BackupObserver}
+136   */
+137  private TableName bulkLoadTableName;
+138
+139  /**
+140   * Stores backup sessions (contexts)
+141   */
+142  final static byte[] SESSIONS_FAMILY = 
"session".getBytes();
+143  /**
+144   * Stores other meta
+145   */
+146  final static byte[] META_FAMILY = 
"meta".getBytes();
+147  final static byte[] BULK_LOAD_FAMILY = 
"bulk".getBytes();
+148  /**
+149   * Connection to HBase cluster, shared 
among all instances
+150   */
+151  private final Connection connection;
+152
+153  private final static String 
BACKUP_INFO_PREFIX = "session:";
+154  private final static String 
START_CODE_ROW = "startcode:";
+155  private final static byte[] 
ACTIVE_SESSION_ROW = 

[15/51] [partial] hbase-site git commit: Published site at .

2018-01-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/96e5e102/testdevapidocs/org/apache/hadoop/hbase/client/TestRawAsyncScanCursor.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/client/TestRawAsyncScanCursor.html 
b/testdevapidocs/org/apache/hadoop/hbase/client/TestRawAsyncScanCursor.html
index 5d20201..4c1b44c 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/client/TestRawAsyncScanCursor.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/client/TestRawAsyncScanCursor.html
@@ -114,7 +114,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public class TestRawAsyncScanCursor
+public class TestRawAsyncScanCursor
 extends AbstractTestScanCursor
 
 
@@ -251,7 +251,7 @@ extends 
 
 CONN
-private staticorg.apache.hadoop.hbase.client.AsyncConnection CONN
+private staticorg.apache.hadoop.hbase.client.AsyncConnection CONN
 
 
 
@@ -268,7 +268,7 @@ extends 
 
 TestRawAsyncScanCursor
-publicTestRawAsyncScanCursor()
+publicTestRawAsyncScanCursor()
 
 
 
@@ -285,7 +285,7 @@ extends 
 
 setUpBeforeClass
-public staticvoidsetUpBeforeClass()
+public staticvoidsetUpBeforeClass()
  throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">Exception
 
 Throws:
@@ -299,7 +299,7 @@ extends 
 
 tearDownAfterClass
-public staticvoidtearDownAfterClass()
+public staticvoidtearDownAfterClass()
throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">Exception
 
 Throws:
@@ -313,7 +313,7 @@ extends 
 
 doTest
-privatevoiddoTest(booleanreversed)
+privatevoiddoTest(booleanreversed)
  throws http://docs.oracle.com/javase/8/docs/api/java/lang/InterruptedException.html?is-external=true;
 title="class or interface in java.lang">InterruptedException,
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutionException.html?is-external=true;
 title="class or interface in java.util.concurrent">ExecutionException,
 http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
@@ -331,7 +331,7 @@ extends 
 
 testHeartbeatWithSparseFilter
-publicvoidtestHeartbeatWithSparseFilter()
+publicvoidtestHeartbeatWithSparseFilter()
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException,
   http://docs.oracle.com/javase/8/docs/api/java/lang/InterruptedException.html?is-external=true;
 title="class or interface in java.lang">InterruptedException,
   http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutionException.html?is-external=true;
 title="class or interface in java.util.concurrent">ExecutionException
@@ -349,7 +349,7 @@ extends 
 
 testHeartbeatWithSparseFilterReversed
-publicvoidtestHeartbeatWithSparseFilterReversed()
+publicvoidtestHeartbeatWithSparseFilterReversed()
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException,
   http://docs.oracle.com/javase/8/docs/api/java/lang/InterruptedException.html?is-external=true;
 title="class or interface in java.lang">InterruptedException,
   http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutionException.html?is-external=true;
 title="class or interface in java.util.concurrent">ExecutionException
@@ -367,7 +367,7 @@ extends 
 
 testSizeLimit
-publicvoidtestSizeLimit()
+publicvoidtestSizeLimit()
throws http://docs.oracle.com/javase/8/docs/api/java/lang/InterruptedException.html?is-external=true;
 title="class or interface in java.lang">InterruptedException,
   http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutionException.html?is-external=true;
 title="class or interface in java.util.concurrent">ExecutionException
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/96e5e102/testdevapidocs/org/apache/hadoop/hbase/io/hfile/package-tree.html
--
diff --git a/testdevapidocs/org/apache/hadoop/hbase/io/hfile/package-tree.html 
b/testdevapidocs/org/apache/hadoop/hbase/io/hfile/package-tree.html
index b7bb4ff..7580e5c 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/io/hfile/package-tree.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/io/hfile/package-tree.html
@@ -158,8 +158,8 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in 

[15/51] [partial] hbase-site git commit: Published site at .

2018-01-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.OpenRegionRemoteCall.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.OpenRegionRemoteCall.html
 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.OpenRegionRemoteCall.html
index 733dac1..4cc62fd 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.OpenRegionRemoteCall.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.OpenRegionRemoteCall.html
@@ -122,7 +122,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private final class RSProcedureDispatcher.OpenRegionRemoteCall
+private final class RSProcedureDispatcher.OpenRegionRemoteCall
 extends RSProcedureDispatcher.AbstractRSRemoteCall
 Compatibility class used by RSProcedureDispatcher.CompatRemoteProcedureResolver
 to open regions using old
  AdminService#openRegion(RpcController, OpenRegionRequest, 
RpcCallback) rpc.
@@ -230,7 +230,7 @@ extends 
 
 operations
-private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRSProcedureDispatcher.RegionOpenOperation
 operations
+private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRSProcedureDispatcher.RegionOpenOperation
 operations
 
 
 
@@ -247,7 +247,7 @@ extends 
 
 OpenRegionRemoteCall
-publicOpenRegionRemoteCall(ServerNameserverName,
+publicOpenRegionRemoteCall(ServerNameserverName,
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRSProcedureDispatcher.RegionOpenOperationoperations)
 
 
@@ -265,7 +265,7 @@ extends 
 
 call
-publichttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Voidcall()
+publichttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Voidcall()
 
 Specified by:
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Callable.html?is-external=true#call--;
 title="class or interface in java.util.concurrent">callin 
interfacehttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Callable.html?is-external=true;
 title="class or interface in java.util.concurrent">Callablehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
@@ -280,7 +280,7 @@ extends 
 
 sendRequest
-privateorg.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponsesendRequest(ServerNameserverName,
+privateorg.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponsesendRequest(ServerNameserverName,

  
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequestrequest)

   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
@@ -295,7 +295,7 @@ extends 
 
 remoteCallFailed
-privatevoidremoteCallFailed(MasterProcedureEnvenv,
+privatevoidremoteCallFailed(MasterProcedureEnvenv,
   http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOExceptione)
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.RegionCloseOperation.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.RegionCloseOperation.html
 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.RegionCloseOperation.html
index 88c6085..695b8f8 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.RegionCloseOperation.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.RegionCloseOperation.html
@@ -123,7 +123,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public static class RSProcedureDispatcher.RegionCloseOperation
+public static class RSProcedureDispatcher.RegionCloseOperation
 extends RSProcedureDispatcher.RegionOperation
 
 
@@ -243,7 +243,7 @@ extends 
 
 destinationServer
-private finalServerName destinationServer
+private finalServerName destinationServer
 
 
 
@@ -252,7 +252,7 @@ extends 
 
 closed
-privateboolean closed
+privateboolean closed
 
 
 
@@ -269,7 +269,7 @@ extends 
 
 RegionCloseOperation

[15/51] [partial] hbase-site git commit: Published site at .

2018-01-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/14db89d7/testdevapidocs/src-html/org/apache/hadoop/hbase/MiniHBaseCluster.MiniHBaseClusterRegionServer.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/MiniHBaseCluster.MiniHBaseClusterRegionServer.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/MiniHBaseCluster.MiniHBaseClusterRegionServer.html
index 1636aa6..f79f186 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/MiniHBaseCluster.MiniHBaseClusterRegionServer.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/MiniHBaseCluster.MiniHBaseClusterRegionServer.html
@@ -86,811 +86,824 @@
 078   */
 079  public MiniHBaseCluster(Configuration 
conf, int numMasters, int numRegionServers)
 080  throws IOException, 
InterruptedException {
-081this(conf, numMasters, 
numRegionServers, null, null, null);
+081this(conf, numMasters, 
numRegionServers, null, null);
 082  }
 083
 084  /**
-085   * @param rsPorts Ports that 
RegionServer should use; pass ports if you want to test cluster
-086   *   restart where for sure the 
regionservers come up on same address+port (but
-087   *   just with different startcode); by 
default mini hbase clusters choose new
-088   *   arbitrary ports on each cluster 
start.
-089   * @throws IOException
-090   * @throws InterruptedException
-091   */
-092  public MiniHBaseCluster(Configuration 
conf, int numMasters, int numRegionServers,
-093 ListInteger rsPorts,
-094 Class? extends HMaster 
masterClass,
-095 Class? extends 
MiniHBaseCluster.MiniHBaseClusterRegionServer regionserverClass)
-096  throws IOException, 
InterruptedException {
-097super(conf);
-098conf.set(HConstants.MASTER_PORT, 
"0");
-099if 
(conf.getInt(HConstants.MASTER_INFO_PORT, 0) != -1) {
-100  
conf.set(HConstants.MASTER_INFO_PORT, "0");
-101}
-102
-103// Hadoop 2
-104
CompatibilityFactory.getInstance(MetricsAssertHelper.class).init();
-105
-106init(numMasters, numRegionServers, 
rsPorts, masterClass, regionserverClass);
-107this.initialClusterStatus = 
getClusterStatus();
-108  }
-109
-110  public Configuration getConfiguration() 
{
-111return this.conf;
-112  }
-113
-114  /**
-115   * Subclass so can get at protected 
methods (none at moment).  Also, creates
-116   * a FileSystem instance per 
instantiation.  Adds a shutdown own FileSystem
-117   * on the way out. Shuts down own 
Filesystem only, not All filesystems as
-118   * the FileSystem system exit hook 
does.
-119   */
-120  public static class 
MiniHBaseClusterRegionServer extends HRegionServer {
-121private Thread shutdownThread = 
null;
-122private User user = null;
-123/**
-124 * List of RegionServers killed so 
far. ServerName also comprises startCode of a server,
-125 * so any restarted instances of the 
same server will have different ServerName and will not
-126 * coincide with past dead ones. So 
there's no need to cleanup this list.
-127 */
-128static SetServerName 
killedServers = new HashSet();
-129
-130public 
MiniHBaseClusterRegionServer(Configuration conf)
-131throws IOException, 
InterruptedException {
-132  super(conf);
-133  this.user = User.getCurrent();
-134}
-135
-136/*
-137 * @param c
-138 * @param currentfs We return this if 
we did not make a new one.
-139 * @param uniqueName Same name used 
to help identify the created fs.
-140 * @return A new fs instance if we 
are up on DistributeFileSystem.
-141 * @throws IOException
-142 */
-143
-144@Override
-145protected void 
handleReportForDutyResponse(
-146final RegionServerStartupResponse 
c) throws IOException {
-147  
super.handleReportForDutyResponse(c);
-148  // Run this thread to shutdown our 
filesystem on way out.
-149  this.shutdownThread = new 
SingleFileSystemShutdownThread(getFileSystem());
-150}
-151
-152@Override
-153public void run() {
-154  try {
-155this.user.runAs(new 
PrivilegedActionObject(){
-156  public Object run() {
-157runRegionServer();
-158return null;
-159  }
-160});
-161  } catch (Throwable t) {
-162LOG.error("Exception in run", 
t);
-163  } finally {
-164// Run this on the way out.
-165if (this.shutdownThread != null) 
{
-166  this.shutdownThread.start();
-167  
Threads.shutdown(this.shutdownThread, 3);
-168}
-169  }
-170}
-171
-172private void runRegionServer() {
-173  super.run();
-174}
-175
-176@Override
-177protected void kill() {
-178  
killedServers.add(getServerName());
-179  super.kill();
-180}
-181
-182@Override
-183public void abort(final String 
reason, final Throwable cause) {
-184  this.user.runAs(new 
PrivilegedActionObject() {
-185public Object run() 

[15/51] [partial] hbase-site git commit: Published site at .

2018-01-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/testdevapidocs/org/apache/hadoop/hbase/client/class-use/AbstractTestCITimeout.SleepAndFailFirstTime.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/client/class-use/AbstractTestCITimeout.SleepAndFailFirstTime.html
 
b/testdevapidocs/org/apache/hadoop/hbase/client/class-use/AbstractTestCITimeout.SleepAndFailFirstTime.html
new file mode 100644
index 000..f020af6
--- /dev/null
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/client/class-use/AbstractTestCITimeout.SleepAndFailFirstTime.html
@@ -0,0 +1,125 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+Uses of Class 
org.apache.hadoop.hbase.client.AbstractTestCITimeout.SleepAndFailFirstTime 
(Apache HBase 3.0.0-SNAPSHOT Test API)
+
+
+
+
+
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev
+Next
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+
+
+
+Uses of 
Classorg.apache.hadoop.hbase.client.AbstractTestCITimeout.SleepAndFailFirstTime
+
+No usage of 
org.apache.hadoop.hbase.client.AbstractTestCITimeout.SleepAndFailFirstTime
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev
+Next
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+
+
+Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+
+

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/testdevapidocs/org/apache/hadoop/hbase/client/class-use/AbstractTestCITimeout.SleepCoprocessor.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/client/class-use/AbstractTestCITimeout.SleepCoprocessor.html
 
b/testdevapidocs/org/apache/hadoop/hbase/client/class-use/AbstractTestCITimeout.SleepCoprocessor.html
new file mode 100644
index 000..f3fe2fd
--- /dev/null
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/client/class-use/AbstractTestCITimeout.SleepCoprocessor.html
@@ -0,0 +1,125 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+Uses of Class 
org.apache.hadoop.hbase.client.AbstractTestCITimeout.SleepCoprocessor (Apache 
HBase 3.0.0-SNAPSHOT Test API)
+
+
+
+
+
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev
+Next
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+
+
+
+Uses of 
Classorg.apache.hadoop.hbase.client.AbstractTestCITimeout.SleepCoprocessor
+
+No usage of 
org.apache.hadoop.hbase.client.AbstractTestCITimeout.SleepCoprocessor
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev
+Next
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+
+
+Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+
+

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/testdevapidocs/org/apache/hadoop/hbase/client/class-use/AbstractTestCITimeout.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/client/class-use/AbstractTestCITimeout.html
 
b/testdevapidocs/org/apache/hadoop/hbase/client/class-use/AbstractTestCITimeout.html
new file mode 100644
index 000..9a0672a
--- /dev/null
+++ 

[15/51] [partial] hbase-site git commit: Published site at .

2018-01-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/testdevapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html
--
diff --git a/testdevapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html 
b/testdevapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html
index d446a14..97f60fb 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html
@@ -2218,7 +2218,7 @@ private static finalhttp://docs.oracle.com/javase/8/docs/api/java
 
 
 hbaseAdmin
-privateorg.apache.hadoop.hbase.client.HBaseAdmin hbaseAdmin
+privateorg.apache.hadoop.hbase.client.HBaseAdmin hbaseAdmin
 
 
 
@@ -2227,7 +2227,7 @@ private static finalhttp://docs.oracle.com/javase/8/docs/api/java
 
 
 random
-private statichttp://docs.oracle.com/javase/8/docs/api/java/util/Random.html?is-external=true;
 title="class or interface in java.util">Random random
+private statichttp://docs.oracle.com/javase/8/docs/api/java/util/Random.html?is-external=true;
 title="class or interface in java.util">Random random
 
 
 
@@ -2236,7 +2236,7 @@ private static finalhttp://docs.oracle.com/javase/8/docs/api/java
 
 
 portAllocator
-private static finalHBaseTestingUtility.PortAllocator 
portAllocator
+private static finalHBaseTestingUtility.PortAllocator 
portAllocator
 
 
 
@@ -4746,7 +4746,7 @@ publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.htm
 
 
 createRegionAndWAL
-public staticorg.apache.hadoop.hbase.regionserver.HRegioncreateRegionAndWAL(org.apache.hadoop.hbase.client.RegionInfoinfo,
+public staticorg.apache.hadoop.hbase.regionserver.HRegioncreateRegionAndWAL(org.apache.hadoop.hbase.client.RegionInfoinfo,
   
org.apache.hadoop.fs.PathrootDir,
   
org.apache.hadoop.conf.Configurationconf,
   
org.apache.hadoop.hbase.client.TableDescriptorhtd)
@@ -4765,7 +4765,7 @@ publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.htm
 
 
 createRegionAndWAL
-public staticorg.apache.hadoop.hbase.regionserver.HRegioncreateRegionAndWAL(org.apache.hadoop.hbase.client.RegionInfoinfo,
+public staticorg.apache.hadoop.hbase.regionserver.HRegioncreateRegionAndWAL(org.apache.hadoop.hbase.client.RegionInfoinfo,
   
org.apache.hadoop.fs.PathrootDir,
   
org.apache.hadoop.conf.Configurationconf,
   
org.apache.hadoop.hbase.client.TableDescriptorhtd,
@@ -4785,7 +4785,7 @@ publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.htm
 
 
 getMetaTableRows
-publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listbyte[]getMetaTableRows()
+publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listbyte[]getMetaTableRows()
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Returns all rows from the hbase:meta table.
 
@@ -4800,7 +4800,7 @@ publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.htm
 
 
 getMetaTableRows
-publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listbyte[]getMetaTableRows(org.apache.hadoop.hbase.TableNametableName)
+publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listbyte[]getMetaTableRows(org.apache.hadoop.hbase.TableNametableName)
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Returns all rows from the hbase:meta table for a given user 
table
 
@@ -4815,7 +4815,7 @@ publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.htm
 
 
 getOtherRegionServer
-publicorg.apache.hadoop.hbase.regionserver.HRegionServergetOtherRegionServer(org.apache.hadoop.hbase.regionserver.HRegionServerrs)
+publicorg.apache.hadoop.hbase.regionserver.HRegionServergetOtherRegionServer(org.apache.hadoop.hbase.regionserver.HRegionServerrs)
 
 
 
@@ -4824,7 +4824,7 @@ publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.htm
 
 
 getRSForFirstRegionInTable
-publicorg.apache.hadoop.hbase.regionserver.HRegionServergetRSForFirstRegionInTable(org.apache.hadoop.hbase.TableNametableName)

[15/51] [partial] hbase-site git commit: Published site at .

2018-01-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/49431b18/devapidocs/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html 
b/devapidocs/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
index 4144086..62d0f3f 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/HMaster.RedirectServlet.html
@@ -127,7 +127,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public static class HMaster.RedirectServlet
+public static class HMaster.RedirectServlet
 extends javax.servlet.http.HttpServlet
 
 See Also:
@@ -243,7 +243,7 @@ extends javax.servlet.http.HttpServlet
 
 
 serialVersionUID
-private static finallong serialVersionUID
+private static finallong serialVersionUID
 
 See Also:
 Constant
 Field Values
@@ -256,7 +256,7 @@ extends javax.servlet.http.HttpServlet
 
 
 regionServerInfoPort
-private finalint regionServerInfoPort
+private finalint regionServerInfoPort
 
 
 
@@ -265,7 +265,7 @@ extends javax.servlet.http.HttpServlet
 
 
 regionServerHostname
-private finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String regionServerHostname
+private finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String regionServerHostname
 
 
 
@@ -282,7 +282,7 @@ extends javax.servlet.http.HttpServlet
 
 
 RedirectServlet
-publicRedirectServlet(InfoServerinfoServer,
+publicRedirectServlet(InfoServerinfoServer,
http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringhostname)
 
 Parameters:
@@ -305,7 +305,7 @@ extends javax.servlet.http.HttpServlet
 
 
 doGet
-publicvoiddoGet(javax.servlet.http.HttpServletRequestrequest,
+publicvoiddoGet(javax.servlet.http.HttpServletRequestrequest,
   javax.servlet.http.HttpServletResponseresponse)
throws javax.servlet.ServletException,
   http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException



[15/51] [partial] hbase-site git commit: Published site at .

2018-01-04 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7c40c62/devapidocs/org/apache/hadoop/hbase/master/MasterRpcServices.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/master/MasterRpcServices.html 
b/devapidocs/org/apache/hadoop/hbase/master/MasterRpcServices.html
index c4c5327..ce948d2 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/MasterRpcServices.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/MasterRpcServices.html
@@ -119,7 +119,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class MasterRpcServices
+public class MasterRpcServices
 extends RSRpcServices
 implements 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService.BlockingInterface,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStatusService.BlockingInterface,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockService.BlockingInterface
 Implements the master RPC services.
@@ -779,7 +779,7 @@ implements 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.Master
 
 
 LOG
-private static finalorg.slf4j.Logger LOG
+private static finalorg.slf4j.Logger LOG
 
 
 
@@ -788,7 +788,7 @@ implements 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.Master
 
 
 master
-private finalHMaster master
+private finalHMaster master
 
 
 
@@ -805,7 +805,7 @@ implements 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.Master
 
 
 MasterRpcServices
-publicMasterRpcServices(HMasterm)
+publicMasterRpcServices(HMasterm)
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Throws:
@@ -827,7 +827,7 @@ implements 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.Master
 
 
 createConfigurationSubset
-privateorg.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse.BuildercreateConfigurationSubset()
+privateorg.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse.BuildercreateConfigurationSubset()
 
 Returns:
 Subset of configuration to pass initializing regionservers: e.g.
@@ -841,7 +841,7 @@ implements 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.Master
 
 
 addConfig
-privateorg.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse.BuilderaddConfig(org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse.Builderresp,
+privateorg.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse.BuilderaddConfig(org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse.Builderresp,

  http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringkey)
 
 
@@ -851,7 +851,7 @@ implements 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.Master
 
 
 createRpcServer
-protectedRpcServerInterfacecreateRpcServer(Serverserver,
+protectedRpcServerInterfacecreateRpcServer(Serverserver,
  
org.apache.hadoop.conf.Configurationconf,
  RpcSchedulerFactoryrpcSchedulerFactory,
  http://docs.oracle.com/javase/8/docs/api/java/net/InetSocketAddress.html?is-external=true;
 title="class or interface in java.net">InetSocketAddressbindAddress,
@@ -871,7 +871,7 @@ implements 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.Master
 
 
 createPriority
-protectedPriorityFunctioncreatePriority()
+protectedPriorityFunctioncreatePriority()
 
 Overrides:
 createPriorityin
 classRSRpcServices
@@ -884,7 +884,7 @@ implements 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.Master
 
 
 switchBalancer
-booleanswitchBalancer(booleanb,
+booleanswitchBalancer(booleanb,
MasterRpcServices.BalanceSwitchModemode)
 throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Assigns balancer switch according to BalanceSwitchMode
@@ -905,7 +905,7 @@ implements 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.Master
 
 
 synchronousBalanceSwitch
-booleansynchronousBalanceSwitch(booleanb)
+booleansynchronousBalanceSwitch(booleanb)
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Throws:
@@ -919,7 +919,7 @@ implements 

[15/51] [partial] hbase-site git commit: Published site at .

2018-01-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bb398572/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.html
index 5b3b750..a1f3f7e 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.html
@@ -97,3307 +97,3304 @@
 089import 
org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer;
 090import 
org.apache.hbase.thirdparty.io.netty.util.Timeout;
 091import 
org.apache.hbase.thirdparty.io.netty.util.TimerTask;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesResponse;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-100import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
-101import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
-102import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
-103import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
-104import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
-105import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest;
-106import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse;
-107import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest;
-108import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse;
-109import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest;
-110import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse;
-111import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
-112import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse;
-113import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-114import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-115import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-116import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-117import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-118import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-119import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-120import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-121import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-122import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-123import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
-124import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersRequest;
-125import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersResponse;
-126import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-127import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
-128import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest;
-129import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse;
-130import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersRequest;
-131import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersResponse;
-132import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
-133import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
-134import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
-135import 

[15/51] [partial] hbase-site git commit: Published site at .

2018-01-01 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/io/asyncfs/class-use/FanOutOneBlockAsyncDFSOutputHelper.LeaseManager.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/asyncfs/class-use/FanOutOneBlockAsyncDFSOutputHelper.LeaseManager.html
 
b/devapidocs/org/apache/hadoop/hbase/io/asyncfs/class-use/FanOutOneBlockAsyncDFSOutputHelper.LeaseManager.html
index 9c3c289..c3851cb 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/io/asyncfs/class-use/FanOutOneBlockAsyncDFSOutputHelper.LeaseManager.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/io/asyncfs/class-use/FanOutOneBlockAsyncDFSOutputHelper.LeaseManager.html
@@ -173,6 +173,6 @@
 
 
 
-Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/io/asyncfs/class-use/FanOutOneBlockAsyncDFSOutputHelper.NameNodeException.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/asyncfs/class-use/FanOutOneBlockAsyncDFSOutputHelper.NameNodeException.html
 
b/devapidocs/org/apache/hadoop/hbase/io/asyncfs/class-use/FanOutOneBlockAsyncDFSOutputHelper.NameNodeException.html
index f669520..100ff48 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/io/asyncfs/class-use/FanOutOneBlockAsyncDFSOutputHelper.NameNodeException.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/io/asyncfs/class-use/FanOutOneBlockAsyncDFSOutputHelper.NameNodeException.html
@@ -120,6 +120,6 @@
 
 
 
-Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/io/asyncfs/class-use/FanOutOneBlockAsyncDFSOutputHelper.PBHelper.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/asyncfs/class-use/FanOutOneBlockAsyncDFSOutputHelper.PBHelper.html
 
b/devapidocs/org/apache/hadoop/hbase/io/asyncfs/class-use/FanOutOneBlockAsyncDFSOutputHelper.PBHelper.html
index d0872ec..3a02b25 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/io/asyncfs/class-use/FanOutOneBlockAsyncDFSOutputHelper.PBHelper.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/io/asyncfs/class-use/FanOutOneBlockAsyncDFSOutputHelper.PBHelper.html
@@ -173,6 +173,6 @@
 
 
 
-Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/io/asyncfs/class-use/FanOutOneBlockAsyncDFSOutputHelper.PipelineAckStatusGetter.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/asyncfs/class-use/FanOutOneBlockAsyncDFSOutputHelper.PipelineAckStatusGetter.html
 
b/devapidocs/org/apache/hadoop/hbase/io/asyncfs/class-use/FanOutOneBlockAsyncDFSOutputHelper.PipelineAckStatusGetter.html
index a8bcc93..f2dcb53 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/io/asyncfs/class-use/FanOutOneBlockAsyncDFSOutputHelper.PipelineAckStatusGetter.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/io/asyncfs/class-use/FanOutOneBlockAsyncDFSOutputHelper.PipelineAckStatusGetter.html
@@ -181,6 +181,6 @@
 
 
 
-Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/io/asyncfs/class-use/FanOutOneBlockAsyncDFSOutputHelper.StorageTypeSetter.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/asyncfs/class-use/FanOutOneBlockAsyncDFSOutputHelper.StorageTypeSetter.html
 
b/devapidocs/org/apache/hadoop/hbase/io/asyncfs/class-use/FanOutOneBlockAsyncDFSOutputHelper.StorageTypeSetter.html
index fe0f388..87ebdfc 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/io/asyncfs/class-use/FanOutOneBlockAsyncDFSOutputHelper.StorageTypeSetter.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/io/asyncfs/class-use/FanOutOneBlockAsyncDFSOutputHelper.StorageTypeSetter.html
@@ -173,6 +173,6 @@
 
 
 
-Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 


[15/51] [partial] hbase-site git commit: Published site at .

2017-12-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHRegion.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHRegion.html 
b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHRegion.html
index 203fe62..79572f8 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHRegion.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHRegion.html
@@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public class TestHRegion
+public class TestHRegion
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 Basic stand-alone testing of HRegion.  No clusters!
 
@@ -152,9 +152,24 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 TestHRegion.GetTillDoneOrException
 
 
+static class
+TestHRegion.HRegionForTesting
+The same as HRegion class, the only difference is that 
instantiateHStore will
+ create a different HStore - HStoreForTesting.
+
+
+
 (package private) static class
 TestHRegion.HRegionWithSeqId
 
+
+static class
+TestHRegion.HStoreForTesting
+HStoreForTesting is merely the same as HStore, the 
difference is in the doCompaction method
+ of HStoreForTesting there is a checkpoint "hbase.hstore.compaction.complete" 
which
+ doesn't let hstore compaction complete.
+
+
 
 private static class
 TestHRegion.Incrementer
@@ -995,7 +1010,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 LOG
-private static finalorg.slf4j.Logger LOG
+private static finalorg.slf4j.Logger LOG
 
 
 
@@ -1004,7 +1019,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 name
-publicorg.junit.rules.TestName name
+publicorg.junit.rules.TestName name
 
 
 
@@ -1013,7 +1028,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 timeout
-public static finalorg.junit.rules.TestRule timeout
+public static finalorg.junit.rules.TestRule timeout
 
 
 
@@ -1022,7 +1037,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 thrown
-public finalorg.junit.rules.ExpectedException thrown
+public finalorg.junit.rules.ExpectedException thrown
 
 
 
@@ -1031,7 +1046,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 COLUMN_FAMILY
-private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String COLUMN_FAMILY
+private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String COLUMN_FAMILY
 
 See Also:
 Constant
 Field Values
@@ -1044,7 +1059,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 COLUMN_FAMILY_BYTES
-private static finalbyte[] COLUMN_FAMILY_BYTES
+private static finalbyte[] COLUMN_FAMILY_BYTES
 
 
 
@@ -1053,7 +1068,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 region
-org.apache.hadoop.hbase.regionserver.HRegion region
+org.apache.hadoop.hbase.regionserver.HRegion region
 
 
 
@@ -1062,7 +1077,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 TEST_UTIL
-protected staticHBaseTestingUtility TEST_UTIL
+protected staticHBaseTestingUtility TEST_UTIL
 
 
 
@@ -1071,7 +1086,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 CONF
-public staticorg.apache.hadoop.conf.Configuration CONF
+public staticorg.apache.hadoop.conf.Configuration CONF
 
 
 
@@ -1080,7 +1095,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 dir
-privatehttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String dir
+privatehttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String dir
 
 
 
@@ -1089,7 +1104,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 FILESYSTEM
-private staticorg.apache.hadoop.fs.FileSystem FILESYSTEM
+private staticorg.apache.hadoop.fs.FileSystem FILESYSTEM
 
 
 
@@ -1098,7 +1113,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 MAX_VERSIONS
-private finalint MAX_VERSIONS
+private finalint MAX_VERSIONS
 
 See Also:
 Constant
 Field Values
@@ -,7 +1126,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 tableName
-protectedorg.apache.hadoop.hbase.TableName tableName
+protectedorg.apache.hadoop.hbase.TableName tableName
 
 
 
@@ -1120,7 +1135,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 method
-protectedhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 

[15/51] [partial] hbase-site git commit: Published site at .

2017-12-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/63d6f712/devapidocs/org/apache/hadoop/hbase/ipc/SimpleRpcServer.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/ipc/SimpleRpcServer.html 
b/devapidocs/org/apache/hadoop/hbase/ipc/SimpleRpcServer.html
index dab22e7..aaefcd2 100644
--- a/devapidocs/org/apache/hadoop/hbase/ipc/SimpleRpcServer.html
+++ b/devapidocs/org/apache/hadoop/hbase/ipc/SimpleRpcServer.html
@@ -282,19 +282,19 @@ extends 
 
 
-Pairorg.apache.hadoop.hbase.shaded.com.google.protobuf.Message,CellScanner
-call(org.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingServiceservice,
-
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.MethodDescriptormd,
-org.apache.hadoop.hbase.shaded.com.google.protobuf.Messageparam,
+Pairorg.apache.hbase.thirdparty.com.google.protobuf.Message,CellScanner
+call(org.apache.hbase.thirdparty.com.google.protobuf.BlockingServiceservice,
+
org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.MethodDescriptormd,
+org.apache.hbase.thirdparty.com.google.protobuf.Messageparam,
 CellScannercellScanner,
 longreceiveTime,
 MonitoredRPCHandlerstatus)
 
 
-Pairorg.apache.hadoop.hbase.shaded.com.google.protobuf.Message,CellScanner
-call(org.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingServiceservice,
-
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.MethodDescriptormd,
-org.apache.hadoop.hbase.shaded.com.google.protobuf.Messageparam,
+Pairorg.apache.hbase.thirdparty.com.google.protobuf.Message,CellScanner
+call(org.apache.hbase.thirdparty.com.google.protobuf.BlockingServiceservice,
+
org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.MethodDescriptormd,
+org.apache.hbase.thirdparty.com.google.protobuf.Messageparam,
 CellScannercellScanner,
 longreceiveTime,
 MonitoredRPCHandlerstatus,
@@ -362,7 +362,7 @@ extends 
 
 Methods inherited from classorg.apache.hadoop.hbase.ipc.RpcServer
-addCallSize,
 allocateByteBuffToReadInto,
 authorize,
 call,
 channelRead,
 createSecretManager,
 getConf, getCurrentCall,
 getErrorHandler,
 getMetrics,
 getMinSizeForReservoirUse,
 getRemoteAddress,
 getRemoteIp,
 getRequestUser,
 getRequestUserName,
 getScheduler, getSecretManager,
 getService,
 getServiceAndInterface,
 getServiceInterface,
 getStatus,
 initReconfigurable,
 isInRpcCallContext,
 isStarted, logResponse,
 onConfigurationChange,
 refreshAuthManager,
 setErrorHandler,
 setRsRpcServices,
 setSecretManager
+addCallSize,
 allocateByteBuffToReadInto,
 authorize,
 call,
 channelRead,
 createSecretManager,
 getConf, getCurrentCall,
 getErrorHandler,
 getMetrics,
 getMinSizeForReservoirUse,
 getRemoteAddress,
 getRemoteIp,
 getRequestUser,
 getRequestUserName,
 getScheduler, getSecretManager,
 getService,
 getServiceAndInterface,
 getServiceInterface,
 getStatus,
 initReconfigurable,
 isInRpcCallContext,
 isStarted, logResponse,
 onConfigurationChange,
 refreshAuthManager,
 setErrorHandler,
 setRsRpcServices,
 setSecretManager
 
 
 
@@ -591,40 +591,40 @@ extends 
 
 
-
+
 
 
 
 
 call
-publicPairorg.apache.hadoop.hbase.shaded.com.google.protobuf.Message,CellScannercall(org.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingServiceservice,
-   
  
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.MethodDescriptormd,
-   
  org.apache.hadoop.hbase.shaded.com.google.protobuf.Messageparam,
-   
  CellScannercellScanner,
-   
  longreceiveTime,
-   
  MonitoredRPCHandlerstatus)
-   
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
+publicPairorg.apache.hbase.thirdparty.com.google.protobuf.Message,CellScannercall(org.apache.hbase.thirdparty.com.google.protobuf.BlockingServiceservice,
+   
   
org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.MethodDescriptormd,
+   
   org.apache.hbase.thirdparty.com.google.protobuf.Messageparam,
+   
   CellScannercellScanner,
+   
   longreceiveTime,
+   
   

[15/51] [partial] hbase-site git commit: Published site at .

2017-12-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d449e87f/devapidocs/org/apache/hadoop/hbase/filter/KeyOnlyFilter.KeyOnlyByteBufferExtendedCell.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/filter/KeyOnlyFilter.KeyOnlyByteBufferExtendedCell.html
 
b/devapidocs/org/apache/hadoop/hbase/filter/KeyOnlyFilter.KeyOnlyByteBufferExtendedCell.html
new file mode 100644
index 000..8f9614d
--- /dev/null
+++ 
b/devapidocs/org/apache/hadoop/hbase/filter/KeyOnlyFilter.KeyOnlyByteBufferExtendedCell.html
@@ -0,0 +1,1073 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+KeyOnlyFilter.KeyOnlyByteBufferExtendedCell (Apache HBase 
3.0.0-SNAPSHOT API)
+
+
+
+
+
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase.filter
+Class KeyOnlyFilter.KeyOnlyByteBufferExtendedCell
+
+
+
+http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.ByteBufferExtendedCell
+
+
+org.apache.hadoop.hbase.filter.KeyOnlyFilter.KeyOnlyByteBufferExtendedCell
+
+
+
+
+
+
+
+
+
+All Implemented Interfaces:
+http://docs.oracle.com/javase/8/docs/api/java/lang/Cloneable.html?is-external=true;
 title="class or interface in java.lang">Cloneable, Cell, ExtendedCell, HeapSize, RawCell
+
+
+Enclosing class:
+KeyOnlyFilter
+
+
+
+static class KeyOnlyFilter.KeyOnlyByteBufferExtendedCell
+extends ByteBufferExtendedCell
+
+
+
+
+
+
+
+
+
+
+
+Nested Class Summary
+
+
+
+
+Nested classes/interfaces inherited from 
interfaceorg.apache.hadoop.hbase.Cell
+Cell.Type
+
+
+
+
+
+
+
+
+Field Summary
+
+Fields
+
+Modifier and Type
+Field and Description
+
+
+private ByteBufferExtendedCell
+cell
+
+
+static int
+FIXED_OVERHEAD
+
+
+private boolean
+lenAsVal
+
+
+
+
+
+
+Fields inherited from interfaceorg.apache.hadoop.hbase.ExtendedCell
+CELL_NOT_BASED_ON_CHUNK
+
+
+
+
+
+Fields inherited from interfaceorg.apache.hadoop.hbase.RawCell
+MAX_TAGS_LENGTH
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors
+
+Constructor and Description
+
+
+KeyOnlyByteBufferExtendedCell(ByteBufferExtendedCellc,
+ booleanlenAsVal)
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All MethodsInstance MethodsConcrete Methods
+
+Modifier and Type
+Method and Description
+
+
+byte[]
+getFamilyArray()
+Contiguous bytes composed of legal HDFS filename characters 
which may start at any index in the
+ containing array.
+
+
+
+http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBuffer
+getFamilyByteBuffer()
+
+
+byte
+getFamilyLength()
+
+
+int
+getFamilyOffset()
+
+
+int
+getFamilyPosition()
+
+
+byte[]
+getQualifierArray()
+Contiguous raw bytes that may start at any index in the 
containing array.
+
+
+
+http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBuffer
+getQualifierByteBuffer()
+
+
+int
+getQualifierLength()
+
+
+int
+getQualifierOffset()
+
+
+int
+getQualifierPosition()
+
+
+byte[]
+getRowArray()
+Contiguous raw bytes that may start at any index in the 
containing array.
+
+
+
+http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBuffer
+getRowByteBuffer()
+
+
+short
+getRowLength()
+
+
+int
+getRowOffset()
+
+
+int
+getRowPosition()
+
+
+long
+getSequenceId()
+A region-specific unique monotonically increasing sequence 
ID given to each Cell.
+
+
+
+http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in 

[15/51] [partial] hbase-site git commit: Published site at .

2017-12-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d2b28a1a/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.SamePrefixComparator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.SamePrefixComparator.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.SamePrefixComparator.html
index f8eace7..66b6656 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.SamePrefixComparator.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.SamePrefixComparator.html
@@ -27,2569 +27,2540 @@
 019 */
 020package org.apache.hadoop.hbase;
 021
-022import static 
org.apache.hadoop.hbase.Tag.TAG_LENGTH_SIZE;
-023import static 
org.apache.hadoop.hbase.util.Bytes.len;
-024
-025import java.io.DataInput;
-026import java.io.DataOutput;
-027import java.io.IOException;
-028import java.io.OutputStream;
-029import java.nio.ByteBuffer;
-030import java.util.ArrayList;
-031import java.util.Arrays;
-032import java.util.HashMap;
-033import java.util.Iterator;
-034import java.util.List;
-035import java.util.Map;
-036import java.util.Optional;
-037
-038import 
org.apache.hadoop.hbase.util.ByteBufferUtils;
-039import 
org.apache.hadoop.hbase.util.Bytes;
-040import 
org.apache.hadoop.hbase.util.ClassSize;
-041import 
org.apache.hadoop.io.RawComparator;
-042import 
org.apache.yetus.audience.InterfaceAudience;
-043import org.slf4j.Logger;
-044import org.slf4j.LoggerFactory;
-045
-046import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
-047
-048/**
-049 * An HBase Key/Value. This is the 
fundamental HBase Type.
+022import static 
org.apache.hadoop.hbase.util.Bytes.len;
+023
+024import java.io.DataInput;
+025import java.io.DataOutput;
+026import java.io.IOException;
+027import java.io.OutputStream;
+028import java.nio.ByteBuffer;
+029import java.util.ArrayList;
+030import java.util.Arrays;
+031import java.util.HashMap;
+032import java.util.Iterator;
+033import java.util.List;
+034import java.util.Map;
+035import 
org.apache.hadoop.hbase.util.ByteBufferUtils;
+036import 
org.apache.hadoop.hbase.util.Bytes;
+037import 
org.apache.hadoop.hbase.util.ClassSize;
+038import 
org.apache.hadoop.io.RawComparator;
+039import 
org.apache.yetus.audience.InterfaceAudience;
+040import org.slf4j.Logger;
+041import org.slf4j.LoggerFactory;
+042
+043import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
+044
+045/**
+046 * An HBase Key/Value. This is the 
fundamental HBase Type.
+047 * p
+048 * HBase applications and users should 
use the Cell interface and avoid directly using KeyValue and
+049 * member functions not defined in 
Cell.
 050 * p
-051 * HBase applications and users should 
use the Cell interface and avoid directly using KeyValue and
-052 * member functions not defined in 
Cell.
-053 * p
-054 * If being used client-side, the primary 
methods to access individual fields are
-055 * {@link #getRowArray()}, {@link 
#getFamilyArray()}, {@link #getQualifierArray()},
-056 * {@link #getTimestamp()}, and {@link 
#getValueArray()}. These methods allocate new byte arrays
-057 * and return copies. Avoid their use 
server-side.
-058 * p
-059 * Instances of this class are immutable. 
They do not implement Comparable but Comparators are
-060 * provided. Comparators change with 
context, whether user table or a catalog table comparison. Its
-061 * critical you use the appropriate 
comparator. There are Comparators for normal HFiles, Meta's
-062 * Hfiles, and bloom filter keys.
-063 * p
-064 * KeyValue wraps a byte array and takes 
offsets and lengths into passed array at where to start
-065 * interpreting the content as KeyValue. 
The KeyValue format inside a byte array is:
-066 * codelt;keylengthgt; 
lt;valuelengthgt; lt;keygt; 
lt;valuegt;/code Key is further
-067 * decomposed as: 
codelt;rowlengthgt; lt;rowgt; 
lt;columnfamilylengthgt;
-068 * lt;columnfamilygt; 
lt;columnqualifiergt;
-069 * lt;timestampgt; 
lt;keytypegt;/code The coderowlength/code 
maximum is
-070 * 
codeShort.MAX_SIZE/code, column family length maximum is 
codeByte.MAX_SIZE/code, and
-071 * column qualifier + key length must be 
lt; codeInteger.MAX_SIZE/code. The column does not
-072 * contain the family/qualifier 
delimiter, {@link #COLUMN_FAMILY_DELIMITER}br
-073 * KeyValue can optionally contain Tags. 
When it contains tags, it is added in the byte array after
-074 * the value part. The format for this 
part is: 
codelt;tagslengthgt;lt;tagsbytesgt;/code.
-075 * codetagslength/code 
maximum is codeShort.MAX_SIZE/code. The 
codetagsbytes/code
-076 * contain one or more tags where as each 
tag is of the form
-077 * 
codelt;taglengthgt;lt;tagtypegt;lt;tagbytesgt;/code.
 codetagtype/code is one byte
-078 * and codetaglength/code 
maximum is codeShort.MAX_SIZE/code and it includes 1 byte 
type
-079 * length and actual tag bytes length.
-080 */
-081@InterfaceAudience.Private
-082public class KeyValue 

[15/51] [partial] hbase-site git commit: Published site at .

2017-12-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/devapidocs/src-html/org/apache/hadoop/hbase/client/Delete.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/Delete.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/Delete.html
index 0c7ac41..0f682ab 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/client/Delete.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/Delete.html
@@ -33,342 +33,331 @@
 025import java.util.NavigableMap;
 026import java.util.UUID;
 027import org.apache.hadoop.hbase.Cell;
-028import 
org.apache.hadoop.hbase.CellUtil;
-029import 
org.apache.hadoop.hbase.HConstants;
-030import 
org.apache.hadoop.hbase.KeyValue;
-031import 
org.apache.hadoop.hbase.security.access.Permission;
-032import 
org.apache.hadoop.hbase.security.visibility.CellVisibility;
-033import 
org.apache.hadoop.hbase.util.Bytes;
-034import 
org.apache.yetus.audience.InterfaceAudience;
-035
-036/**
-037 * Used to perform Delete operations on a 
single row.
-038 * p
-039 * To delete an entire row, instantiate a 
Delete object with the row
-040 * to delete.  To further define the 
scope of what to delete, perform
-041 * additional methods as outlined 
below.
-042 * p
-043 * To delete specific families, execute 
{@link #addFamily(byte[]) deleteFamily}
-044 * for each family to delete.
-045 * p
-046 * To delete multiple versions of 
specific columns, execute
-047 * {@link #addColumns(byte[], byte[]) 
deleteColumns}
-048 * for each column to delete.
-049 * p
-050 * To delete specific versions of 
specific columns, execute
-051 * {@link #addColumn(byte[], byte[], 
long) deleteColumn}
-052 * for each column version to delete.
-053 * p
-054 * Specifying timestamps, deleteFamily 
and deleteColumns will delete all
-055 * versions with a timestamp less than or 
equal to that passed.  If no
-056 * timestamp is specified, an entry is 
added with a timestamp of 'now'
-057 * where 'now' is the servers's 
System.currentTimeMillis().
-058 * Specifying a timestamp to the 
deleteColumn method will
-059 * delete versions only with a timestamp 
equal to that specified.
-060 * If no timestamp is passed to 
deleteColumn, internally, it figures the
-061 * most recent cell's timestamp and adds 
a delete at that timestamp; i.e.
-062 * it deletes the most recently added 
cell.
-063 * pThe timestamp passed to the 
constructor is used ONLY for delete of
-064 * rows.  For anything less -- a 
deleteColumn, deleteColumns or
-065 * deleteFamily -- then you need to use 
the method overrides that take a
-066 * timestamp.  The constructor timestamp 
is not referenced.
-067 */
-068@InterfaceAudience.Public
-069public class Delete extends Mutation 
implements ComparableRow {
-070  /**
-071   * Create a Delete operation for the 
specified row.
-072   * p
-073   * If no further operations are done, 
this will delete everything
-074   * associated with the specified row 
(all versions of all columns in all
-075   * families), with timestamp from 
current point in time to the past.
-076   * Cells defining timestamp for a 
future point in time
-077   * (timestamp  current time) will 
not be deleted.
-078   * @param row row key
-079   */
-080  public Delete(byte [] row) {
-081this(row, 
HConstants.LATEST_TIMESTAMP);
-082  }
-083
-084  /**
-085   * Create a Delete operation for the 
specified row and timestamp.p
-086   *
-087   * If no further operations are done, 
this will delete all columns in all
-088   * families of the specified row with a 
timestamp less than or equal to the
-089   * specified timestamp.p
-090   *
-091   * This timestamp is ONLY used for a 
delete row operation.  If specifying
-092   * families or columns, you must 
specify each timestamp individually.
-093   * @param row row key
-094   * @param timestamp maximum version 
timestamp (only for delete row)
-095   */
-096  public Delete(byte [] row, long 
timestamp) {
-097this(row, 0, row.length, 
timestamp);
-098  }
-099
-100  /**
-101   * Create a Delete operation for the 
specified row and timestamp.p
-102   *
-103   * If no further operations are done, 
this will delete all columns in all
-104   * families of the specified row with a 
timestamp less than or equal to the
-105   * specified timestamp.p
-106   *
-107   * This timestamp is ONLY used for a 
delete row operation.  If specifying
-108   * families or columns, you must 
specify each timestamp individually.
-109   * @param row We make a local copy of 
this passed in row.
-110   * @param rowOffset
-111   * @param rowLength
-112   */
-113  public Delete(final byte[] row, final 
int rowOffset, final int rowLength) {
-114this(row, rowOffset, rowLength, 
HConstants.LATEST_TIMESTAMP);
-115  }
-116
-117  /**
-118   * Create a Delete operation for the 
specified row and timestamp.p
-119   *
-120   * If no further operations are done, 
this will delete all columns in all
-121   * families of the 

[15/51] [partial] hbase-site git commit: Published site at .

2017-12-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7c0589c0/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.NamespaceFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.NamespaceFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.NamespaceFuture.html
index 6fecbc9..2accda0 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.NamespaceFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.NamespaceFuture.html
@@ -34,4140 +34,4141 @@
 026import 
java.nio.charset.StandardCharsets;
 027import java.util.ArrayList;
 028import java.util.Arrays;
-029import java.util.Collection;
-030import java.util.EnumSet;
-031import java.util.HashMap;
-032import java.util.Iterator;
-033import java.util.LinkedList;
-034import java.util.List;
-035import java.util.Map;
-036import java.util.Set;
-037import java.util.concurrent.Callable;
-038import 
java.util.concurrent.ExecutionException;
-039import java.util.concurrent.Future;
-040import java.util.concurrent.TimeUnit;
-041import 
java.util.concurrent.TimeoutException;
-042import 
java.util.concurrent.atomic.AtomicInteger;
-043import 
java.util.concurrent.atomic.AtomicReference;
-044import java.util.regex.Pattern;
-045import java.util.stream.Collectors;
-046import java.util.stream.Stream;
-047import 
org.apache.hadoop.conf.Configuration;
-048import 
org.apache.hadoop.hbase.Abortable;
-049import 
org.apache.hadoop.hbase.CacheEvictionStats;
-050import 
org.apache.hadoop.hbase.CacheEvictionStatsBuilder;
-051import 
org.apache.hadoop.hbase.ClusterMetrics.Option;
-052import 
org.apache.hadoop.hbase.ClusterStatus;
-053import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-054import 
org.apache.hadoop.hbase.HBaseConfiguration;
-055import 
org.apache.hadoop.hbase.HConstants;
-056import 
org.apache.hadoop.hbase.HRegionInfo;
-057import 
org.apache.hadoop.hbase.HRegionLocation;
-058import 
org.apache.hadoop.hbase.HTableDescriptor;
-059import 
org.apache.hadoop.hbase.MasterNotRunningException;
-060import 
org.apache.hadoop.hbase.MetaTableAccessor;
-061import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-062import 
org.apache.hadoop.hbase.NamespaceNotFoundException;
-063import 
org.apache.hadoop.hbase.NotServingRegionException;
-064import 
org.apache.hadoop.hbase.RegionLoad;
-065import 
org.apache.hadoop.hbase.RegionLocations;
-066import 
org.apache.hadoop.hbase.ServerName;
-067import 
org.apache.hadoop.hbase.TableExistsException;
-068import 
org.apache.hadoop.hbase.TableName;
-069import 
org.apache.hadoop.hbase.TableNotDisabledException;
-070import 
org.apache.hadoop.hbase.TableNotFoundException;
-071import 
org.apache.hadoop.hbase.UnknownRegionException;
-072import 
org.apache.hadoop.hbase.ZooKeeperConnectionException;
-073import 
org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil;
-074import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-075import 
org.apache.hadoop.hbase.client.security.SecurityCapability;
-076import 
org.apache.hadoop.hbase.exceptions.TimeoutIOException;
-077import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
-078import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
-079import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-080import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-081import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-082import 
org.apache.hadoop.hbase.quotas.QuotaRetriever;
-083import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-084import 
org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
-085import 
org.apache.hadoop.hbase.replication.ReplicationException;
-086import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-087import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-088import 
org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
-089import 
org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
-090import 
org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
-091import 
org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
-092import 
org.apache.hadoop.hbase.snapshot.UnknownSnapshotException;
-093import 
org.apache.hadoop.hbase.util.Addressing;
-094import 
org.apache.hadoop.hbase.util.Bytes;
-095import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-096import 
org.apache.hadoop.hbase.util.ForeignExceptionUtil;
-097import 
org.apache.hadoop.hbase.util.Pair;
-098import 
org.apache.hadoop.ipc.RemoteException;
-099import 
org.apache.hadoop.util.StringUtils;
-100import 
org.apache.yetus.audience.InterfaceAudience;
-101import 
org.apache.yetus.audience.InterfaceStability;
-102import org.slf4j.Logger;
-103import org.slf4j.LoggerFactory;
-104
-105import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
-106import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
-107import 

[15/51] [partial] hbase-site git commit: Published site at .

2017-12-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c4b2cc17/checkstyle-aggregate.html
--
diff --git a/checkstyle-aggregate.html b/checkstyle-aggregate.html
index d5a2465..d5a2af9 100644
--- a/checkstyle-aggregate.html
+++ b/checkstyle-aggregate.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Checkstyle Results
 
@@ -286,10 +286,10 @@
 Warnings
 Errors
 
-3458
+3470
 0
 0
-19378
+19312
 
 Files
 
@@ -377,7 +377,7 @@
 org/apache/hadoop/hbase/ClusterStatus.java
 0
 0
-4
+1
 
 org/apache/hadoop/hbase/CompoundConfiguration.java
 0
@@ -607,7 +607,7 @@
 org/apache/hadoop/hbase/MetaTableAccessor.java
 0
 0
-118
+114
 
 org/apache/hadoop/hbase/MiniHBaseCluster.java
 0
@@ -667,7 +667,7 @@
 org/apache/hadoop/hbase/RegionLoad.java
 0
 0
-2
+1
 
 org/apache/hadoop/hbase/RegionLocations.java
 0
@@ -702,7 +702,7 @@
 org/apache/hadoop/hbase/ServerLoad.java
 0
 0
-8
+1
 
 org/apache/hadoop/hbase/ServerName.java
 0
@@ -764,140 +764,140 @@
 0
 1
 
+org/apache/hadoop/hbase/TestClientClusterStatus.java
+0
+0
+1
+
 org/apache/hadoop/hbase/TestCompatibilitySingletonFactory.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/TestFullLogReconstruction.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/TestGlobalMemStoreSize.java
 0
 0
 5
-
+
 org/apache/hadoop/hbase/TestHBaseConfiguration.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/TestHBaseOnOtherDfsCluster.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/TestHBaseTestingUtility.java
 0
 0
 6
-
+
 org/apache/hadoop/hbase/TestHColumnDescriptor.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/TestHColumnDescriptorDefaultVersions.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/TestHDFSBlocksDistribution.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/TestHTableDescriptor.java
 0
 0
 11
-
+
 org/apache/hadoop/hbase/TestIOFencing.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/TestInfoServers.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/TestInterfaceAudienceAnnotations.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/TestJMXListener.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/TestKeyValue.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/TestLocalHBaseCluster.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/TestMetaTableAccessor.java
 0
 0
 5
-
+
 org/apache/hadoop/hbase/TestMetaTableAccessorNoCluster.java
 0
 0
 8
-
+
 org/apache/hadoop/hbase/TestMetaTableLocator.java
 0
 0
 35
-
+
 org/apache/hadoop/hbase/TestMovedRegionsCleaner.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/TestMultiVersions.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/TestNamespace.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/TestNodeHealthCheckChore.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java
 0
 0
 38
-
+
 org/apache/hadoop/hbase/TestPerformanceEvaluation.java
 0
 0
 4
-
-org/apache/hadoop/hbase/TestRegionLoad.java
-0
-0
-3
 
 org/apache/hadoop/hbase/TestRegionRebalancing.java
 0
@@ -909,387 +909,377 @@
 0
 2
 
-org/apache/hadoop/hbase/TestServerLoad.java
-0
-0
-9
-
 org/apache/hadoop/hbase/TestServerSideScanMetricsFromClientSide.java
 0
 0
 16
-
+
 org/apache/hadoop/hbase/TestStochasticBalancerJmxMetrics.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/TestTagRewriteCell.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/TestTimeout.java
 0
 0
 7
-
+
 org/apache/hadoop/hbase/TestZooKeeper.java
 0
 0
 12
-
+
 org/apache/hadoop/hbase/TimestampTestBase.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/UnknownRegionException.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/Waiter.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/ZKNamespaceManager.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/ZNodeClearer.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/backup/BackupClientFactory.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/backup/BackupCopyJob.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/backup/BackupHFileCleaner.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/backup/BackupInfo.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/backup/BackupMergeJob.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/backup/BackupRestoreConstants.java
 0
 0
 5
-
+
 org/apache/hadoop/hbase/backup/BackupRestoreFactory.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/backup/BackupTableInfo.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/backup/FailedArchiveException.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/backup/HBackupFileSystem.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/backup/HFileArchiver.java
 0
 0
 19
-
+
 org/apache/hadoop/hbase/backup/LogUtils.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/backup/RestoreDriver.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/backup/RestoreJob.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/backup/RestoreRequest.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/backup/TestBackupBase.java
 0
 0
 13
-
+
 org/apache/hadoop/hbase/backup/TestBackupBoundaryTests.java
 0
 0
 5
-
+
 org/apache/hadoop/hbase/backup/TestBackupDelete.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/backup/TestBackupDeleteRestore.java
 0
 0
 1
-
+
 

[15/51] [partial] hbase-site git commit: Published site at .

2017-12-21 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/505bbb2e/devapidocs/org/apache/hadoop/hbase/HRegionInfo.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/HRegionInfo.html 
b/devapidocs/org/apache/hadoop/hbase/HRegionInfo.html
index 8a9cf9c..50fdead 100644
--- a/devapidocs/org/apache/hadoop/hbase/HRegionInfo.html
+++ b/devapidocs/org/apache/hadoop/hbase/HRegionInfo.html
@@ -123,7 +123,7 @@ var activeTableTab = "activeTableTab";
 
 http://docs.oracle.com/javase/8/docs/api/java/lang/Deprecated.html?is-external=true;
 title="class or interface in java.lang">@Deprecated
  @InterfaceAudience.Public
-public class HRegionInfo
+public class HRegionInfo
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements RegionInfo, http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableHRegionInfo
 Information about a region. A region is a range of keys in 
the whole keyspace of a table, an
@@ -229,7 +229,7 @@ implements 
 
 
-private static 
org.apache.commons.logging.Log
+private static org.slf4j.Logger
 LOG
 Deprecated.
 
@@ -917,7 +917,7 @@ implements 
 
 LOG
-private static finalorg.apache.commons.logging.Log LOG
+private static finalorg.slf4j.Logger LOG
 Deprecated.
 
 
@@ -927,7 +927,7 @@ implements 
 
 ENCODED_REGION_NAME_REGEX
-public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String ENCODED_REGION_NAME_REGEX
+public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String ENCODED_REGION_NAME_REGEX
 Deprecated.
 A non-capture group so that this can be embedded.
 
@@ -942,7 +942,7 @@ implements 
 
 MAX_REPLICA_ID
-private static finalint MAX_REPLICA_ID
+private static finalint MAX_REPLICA_ID
 Deprecated.
 
 See Also:
@@ -956,7 +956,7 @@ implements 
 
 endKey
-privatebyte[] endKey
+privatebyte[] endKey
 Deprecated.
 
 
@@ -966,7 +966,7 @@ implements 
 
 offLine
-privateboolean offLine
+privateboolean offLine
 Deprecated.
 
 
@@ -976,7 +976,7 @@ implements 
 
 regionId
-privatelong regionId
+privatelong regionId
 Deprecated.
 
 
@@ -986,7 +986,7 @@ implements 
 
 regionName
-private transientbyte[] regionName
+private transientbyte[] regionName
 Deprecated.
 
 
@@ -996,7 +996,7 @@ implements 
 
 split
-privateboolean split
+privateboolean split
 Deprecated.
 
 
@@ -1006,7 +1006,7 @@ implements 
 
 startKey
-privatebyte[] startKey
+privatebyte[] startKey
 Deprecated.
 
 
@@ -1016,7 +1016,7 @@ implements 
 
 hashCode
-privateint hashCode
+privateint hashCode
 Deprecated.
 
 
@@ -1026,7 +1026,7 @@ implements 
 
 NO_HASH
-public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String NO_HASH
+public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String NO_HASH
 Deprecated.
 
 
@@ -1036,7 +1036,7 @@ implements 
 
 encodedName
-privatehttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String encodedName
+privatehttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String encodedName
 Deprecated.
 
 
@@ -1046,7 +1046,7 @@ implements 
 
 encodedNameAsBytes
-privatebyte[] encodedNameAsBytes
+privatebyte[] encodedNameAsBytes
 Deprecated.
 
 
@@ -1056,7 +1056,7 @@ implements 
 
 replicaId
-privateint replicaId
+privateint replicaId
 Deprecated.
 
 
@@ -1066,7 +1066,7 @@ implements 
 
 tableName
-privateTableName tableName
+privateTableName tableName
 Deprecated.
 
 
@@ -1076,7 +1076,7 @@ implements 
 
 DISPLAY_KEYS_KEY
-static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String DISPLAY_KEYS_KEY
+static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String DISPLAY_KEYS_KEY
 Deprecated.
 
 See Also:
@@ -1090,7 +1090,7 @@ implements 
 
 HIDDEN_END_KEY
-public static finalbyte[] HIDDEN_END_KEY
+public static finalbyte[] HIDDEN_END_KEY
 Deprecated.
 
 
@@ -1100,7 +1100,7 @@ implements 
 
 HIDDEN_START_KEY
-public static finalbyte[] HIDDEN_START_KEY
+public static finalbyte[] HIDDEN_START_KEY
 Deprecated.
 
 
@@ -1110,7 +1110,7 @@ implements 
 
 FIRST_META_REGIONINFO
-public static finalHRegionInfo FIRST_META_REGIONINFO
+public static finalHRegionInfo FIRST_META_REGIONINFO
 Deprecated.
 HRegionInfo for first meta region
 
@@ -1129,7 +1129,7 @@ implements 
 
 HRegionInfo
-privateHRegionInfo(longregionId,
+privateHRegionInfo(longregionId,
   

[15/51] [partial] hbase-site git commit: Published site at .

2017-12-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c4c0cfa5/apidocs/src-html/org/apache/hadoop/hbase/io/crypto/Encryption.Context.html
--
diff --git 
a/apidocs/src-html/org/apache/hadoop/hbase/io/crypto/Encryption.Context.html 
b/apidocs/src-html/org/apache/hadoop/hbase/io/crypto/Encryption.Context.html
index 331a8cb..3ce8f8a 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/io/crypto/Encryption.Context.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/io/crypto/Encryption.Context.html
@@ -35,558 +35,557 @@
 027import java.util.Arrays;
 028import java.util.Map;
 029import 
java.util.concurrent.ConcurrentHashMap;
-030
-031import javax.crypto.SecretKeyFactory;
-032import javax.crypto.spec.PBEKeySpec;
-033import javax.crypto.spec.SecretKeySpec;
-034
-035import org.apache.commons.io.IOUtils;
-036import org.apache.commons.logging.Log;
-037import 
org.apache.commons.logging.LogFactory;
-038import 
org.apache.hadoop.conf.Configuration;
-039import 
org.apache.hadoop.hbase.HBaseConfiguration;
-040import 
org.apache.hadoop.hbase.HConstants;
-041import 
org.apache.yetus.audience.InterfaceAudience;
-042import 
org.apache.hadoop.hbase.util.Bytes;
-043import 
org.apache.hadoop.hbase.util.Pair;
-044import 
org.apache.hadoop.util.ReflectionUtils;
-045
-046/**
-047 * A facade for encryption algorithms and 
related support.
-048 */
-049@InterfaceAudience.Public
-050public final class Encryption {
-051
-052  private static final Log LOG = 
LogFactory.getLog(Encryption.class);
-053
-054  /**
-055   * Crypto context
-056   */
-057  @InterfaceAudience.Public
-058  public static class Context extends 
org.apache.hadoop.hbase.io.crypto.Context {
-059
-060/** The null crypto context */
-061public static final Context NONE = 
new Context();
-062
-063private Context() {
-064  super();
-065}
-066
-067private Context(Configuration conf) 
{
-068  super(conf);
-069}
-070
-071@Override
-072public Context setCipher(Cipher 
cipher) {
-073  super.setCipher(cipher);
-074  return this;
-075}
-076
-077@Override
-078public Context setKey(Key key) {
-079  super.setKey(key);
-080  return this;
-081}
-082
-083public Context setKey(byte[] key) {
-084  super.setKey(new SecretKeySpec(key, 
getCipher().getName()));
-085  return this;
-086}
-087  }
-088
-089  public static Context newContext() {
-090return new Context();
-091  }
-092
-093  public static Context 
newContext(Configuration conf) {
-094return new Context(conf);
-095  }
-096
-097  // Prevent instantiation
-098  private Encryption() {
-099super();
-100  }
-101
-102  /**
-103   * Get an cipher given a name
-104   * @param name the cipher name
-105   * @return the cipher, or null if a 
suitable one could not be found
-106   */
-107  public static Cipher 
getCipher(Configuration conf, String name) {
-108return 
getCipherProvider(conf).getCipher(name);
-109  }
-110
-111  /**
-112   * Get names of supported encryption 
algorithms
-113   *
-114   * @return Array of strings, each 
represents a supported encryption algorithm
-115   */
-116  public static String[] 
getSupportedCiphers() {
-117return 
getSupportedCiphers(HBaseConfiguration.create());
-118  }
-119
-120  /**
-121   * Get names of supported encryption 
algorithms
-122   *
-123   * @return Array of strings, each 
represents a supported encryption algorithm
-124   */
-125  public static String[] 
getSupportedCiphers(Configuration conf) {
-126return 
getCipherProvider(conf).getSupportedCiphers();
-127  }
-128
-129  /**
-130   * Return the MD5 digest of the 
concatenation of the supplied arguments.
-131   */
-132  public static byte[] hash128(String... 
args) {
-133byte[] result = new byte[16];
-134try {
-135  MessageDigest md = 
MessageDigest.getInstance("MD5");
-136  for (String arg: args) {
-137md.update(Bytes.toBytes(arg));
-138  }
-139  md.digest(result, 0, 
result.length);
-140  return result;
-141} catch (NoSuchAlgorithmException e) 
{
-142  throw new RuntimeException(e);
-143} catch (DigestException e) {
-144  throw new RuntimeException(e);
-145}
-146  }
-147
-148  /**
-149   * Return the MD5 digest of the 
concatenation of the supplied arguments.
-150   */
-151  public static byte[] hash128(byte[]... 
args) {
-152byte[] result = new byte[16];
-153try {
-154  MessageDigest md = 
MessageDigest.getInstance("MD5");
-155  for (byte[] arg: args) {
-156md.update(arg);
-157  }
-158  md.digest(result, 0, 
result.length);
-159  return result;
-160} catch (NoSuchAlgorithmException e) 
{
-161  throw new RuntimeException(e);
-162} catch (DigestException e) {
-163  throw new RuntimeException(e);
-164}
-165  }
-166
-167  /**
-168   * Return the SHA-256 digest of the 
concatenation of the supplied arguments.
-169   */
-170  public static byte[] 

[15/51] [partial] hbase-site git commit: Published site at .

2017-12-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/071f974b/devapidocs/src-html/org/apache/hadoop/hbase/client/example/MultiThreadedClientExample.ReadExampleCallable.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/example/MultiThreadedClientExample.ReadExampleCallable.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/example/MultiThreadedClientExample.ReadExampleCallable.html
index 0b8baa8..c77170b 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/example/MultiThreadedClientExample.ReadExampleCallable.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/example/MultiThreadedClientExample.ReadExampleCallable.html
@@ -30,308 +30,325 @@
 022import org.apache.commons.logging.Log;
 023import 
org.apache.commons.logging.LogFactory;
 024import 
org.apache.hadoop.conf.Configured;
-025import 
org.apache.hadoop.hbase.TableName;
-026import 
org.apache.hadoop.hbase.client.Connection;
-027import 
org.apache.hadoop.hbase.client.ConnectionFactory;
-028import 
org.apache.hadoop.hbase.client.Put;
-029import 
org.apache.hadoop.hbase.client.RegionLocator;
-030import 
org.apache.hadoop.hbase.client.Result;
-031import 
org.apache.hadoop.hbase.client.ResultScanner;
-032import 
org.apache.hadoop.hbase.client.Scan;
-033import 
org.apache.hadoop.hbase.client.Table;
-034import 
org.apache.hadoop.hbase.filter.KeyOnlyFilter;
-035import 
org.apache.hadoop.hbase.util.Bytes;
-036import org.apache.hadoop.util.Tool;
-037import 
org.apache.hadoop.util.ToolRunner;
-038
-039import java.io.IOException;
-040import java.util.ArrayList;
-041import java.util.List;
-042import java.util.concurrent.Callable;
-043import 
java.util.concurrent.ExecutorService;
-044import java.util.concurrent.Executors;
-045import 
java.util.concurrent.ForkJoinPool;
-046import java.util.concurrent.Future;
-047import 
java.util.concurrent.ThreadFactory;
-048import 
java.util.concurrent.ThreadLocalRandom;
-049import java.util.concurrent.TimeUnit;
-050
-051
-052/**
-053 * Example on how to use HBase's {@link 
Connection} and {@link Table} in a
-054 * multi-threaded environment. Each table 
is a light weight object
-055 * that is created and thrown away. 
Connections are heavy weight objects
-056 * that hold on to zookeeper connections, 
async processes, and other state.
-057 *
-058 * pre
-059 * Usage:
-060 * bin/hbase 
org.apache.hadoop.hbase.client.example.MultiThreadedClientExample testTableName 
50
-061 * /pre
-062 *
-063 * p
-064 * The table should already be created 
before running the command.
-065 * This example expects one column family 
named d.
-066 * /p
-067 * p
-068 * This is meant to show different 
operations that are likely to be
-069 * done in a real world application. 
These operations are:
-070 * /p
-071 *
-072 * ul
-073 *   li
-074 * 30% of all operations performed 
are batch writes.
-075 * 30 puts are created and sent out 
at a time.
-076 * The response for all puts is 
waited on.
-077 *   /li
-078 *   li
-079 * 20% of all operations are single 
writes.
-080 * A single put is sent out and the 
response is waited for.
-081 *   /li
-082 *   li
-083 * 50% of all operations are scans.
-084 * These scans start at a random 
place and scan up to 100 rows.
-085 *   /li
-086 * /ul
-087 *
-088 */
-089public class MultiThreadedClientExample 
extends Configured implements Tool {
-090  private static final Log LOG = 
LogFactory.getLog(MultiThreadedClientExample.class);
-091  private static final int 
DEFAULT_NUM_OPERATIONS = 50;
-092
-093  /**
-094   * The name of the column family.
-095   *
-096   * d for default.
-097   */
-098  private static final byte[] FAMILY = 
Bytes.toBytes("d");
-099
-100  /**
-101   * For the example we're just using one 
qualifier.
-102   */
-103  private static final byte[] QUAL = 
Bytes.toBytes("test");
-104
-105  private final ExecutorService 
internalPool;
-106
-107  private final int threads;
-108
-109  public MultiThreadedClientExample() 
throws IOException {
-110// Base number of threads.
-111// This represents the number of 
threads you application has
-112// that can be interacting with an 
hbase client.
-113this.threads = 
Runtime.getRuntime().availableProcessors() * 4;
-114
-115// Daemon threads are great for 
things that get shut down.
-116ThreadFactory threadFactory = new 
ThreadFactoryBuilder()
-117
.setDaemon(true).setNameFormat("internal-pol-%d").build();
-118
-119
-120this.internalPool = 
Executors.newFixedThreadPool(threads, threadFactory);
-121  }
+025import 
org.apache.hadoop.hbase.CellBuilder;
+026import 
org.apache.hadoop.hbase.CellBuilderFactory;
+027import 
org.apache.hadoop.hbase.CellBuilderType;
+028import 
org.apache.hadoop.hbase.TableName;
+029import 
org.apache.hadoop.hbase.client.Connection;
+030import 
org.apache.hadoop.hbase.client.ConnectionFactory;
+031import 
org.apache.hadoop.hbase.client.Put;

[15/51] [partial] hbase-site git commit: Published site at .

2017-12-14 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dc4e5c85/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.TableFuture.TableWaitForStateCallable.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.TableFuture.TableWaitForStateCallable.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.TableFuture.TableWaitForStateCallable.html
index 7c59e27..c904c56 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.TableFuture.TableWaitForStateCallable.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.TableFuture.TableWaitForStateCallable.html
@@ -119,4048 +119,4054 @@
 111import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
 112import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest;
 113import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest;
-114import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-115import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-116import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
-117import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
-118import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest;
-119import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse;
-120import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest;
-121import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
-122import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
-123import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceRequest;
-124import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceResponse;
-125import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
-126import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-127import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-128import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-129import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos;
-130import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-131import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-132import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-133import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-134import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-135import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersRequest;
-136import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-137import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
-138import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest;
-139import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse;
-140import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
-141import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
-142import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
-143import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse;
-144import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
-145import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableRequest;
-146import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableResponse;
-147import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest;
-148import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse;
-149import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
-150import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse;
-151import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest;
-152import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse;
-153import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest;
-154import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
-155import 

[15/51] [partial] hbase-site git commit: Published site at .

2017-12-13 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4abd958d/devapidocs/org/apache/hadoop/hbase/rest/client/RemoteHTable.CheckAndMutateBuilderImpl.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/rest/client/RemoteHTable.CheckAndMutateBuilderImpl.html
 
b/devapidocs/org/apache/hadoop/hbase/rest/client/RemoteHTable.CheckAndMutateBuilderImpl.html
new file mode 100644
index 000..9ed0853
--- /dev/null
+++ 
b/devapidocs/org/apache/hadoop/hbase/rest/client/RemoteHTable.CheckAndMutateBuilderImpl.html
@@ -0,0 +1,502 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+RemoteHTable.CheckAndMutateBuilderImpl (Apache HBase 3.0.0-SNAPSHOT 
API)
+
+
+
+
+
+var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase.rest.client
+Class 
RemoteHTable.CheckAndMutateBuilderImpl
+
+
+
+http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.rest.client.RemoteHTable.CheckAndMutateBuilderImpl
+
+
+
+
+
+
+
+All Implemented Interfaces:
+Table.CheckAndMutateBuilder
+
+
+Enclosing class:
+RemoteHTable
+
+
+
+private class RemoteHTable.CheckAndMutateBuilderImpl
+extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+implements Table.CheckAndMutateBuilder
+
+
+
+
+
+
+
+
+
+
+
+Field Summary
+
+Fields
+
+Modifier and Type
+Field and Description
+
+
+private byte[]
+family
+
+
+private byte[]
+qualifier
+
+
+private byte[]
+row
+
+
+private byte[]
+value
+
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors
+
+Constructor and Description
+
+
+CheckAndMutateBuilderImpl(byte[]row,
+ byte[]family)
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All MethodsInstance MethodsConcrete Methods
+
+Modifier and Type
+Method and Description
+
+
+Table.CheckAndMutateBuilder
+ifEquals(byte[]value)
+Check for equality.
+
+
+
+Table.CheckAndMutateBuilder
+ifMatches(CompareOperatorcompareOp,
+ byte[]value)
+
+
+Table.CheckAndMutateBuilder
+ifNotExists()
+Check for lack of column.
+
+
+
+Table.CheckAndMutateBuilder
+qualifier(byte[]qualifier)
+
+
+boolean
+thenDelete(Deletedelete)
+
+
+boolean
+thenMutate(RowMutationsmutation)
+
+
+boolean
+thenPut(Putput)
+
+
+
+
+
+
+Methods inherited from classjava.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--;
 title="class or interface in java.lang">clone, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-;
 title="class or interface in java.lang">equals, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--;
 title="class or interface in java.lang">finalize, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--;
 title="class or interface in java.lang">getClass, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--;
 title="class or interface in java.lang">hashCode, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notify--;
 title="class or interface in java.lang">notify, http://docs.oracle.com/javase/8/docs/api/java/lang
 /Object.html?is-external=true#notifyAll--" title="class or interface in 
java.lang">notifyAll, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#toString--;
 title="class or interface in java.lang">toString, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait--;
 title="class or interface in java.lang">wait, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-;
 title="class or interface in java.lang">wait, 

[15/51] [partial] hbase-site git commit: Published site at .

2017-12-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e23b49ba/devapidocs/org/apache/hadoop/hbase/class-use/TableName.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/TableName.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/TableName.html
index d21d294..a6d1fc5 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/TableName.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/TableName.html
@@ -559,29 +559,37 @@ service.
 TableDescriptors.get(TableNametableName)
 
 
+BufferedMutator
+SharedConnection.getBufferedMutator(TableNametableName)
+
+
 static RegionInfo
 MetaTableAccessor.getClosestRegionInfo(Connectionconnection,
 TableNametableName,
 byte[]row)
 
-
+
 long
 ClusterStatus.getLastMajorCompactionTsForTable(TableNametable)
 
-
+
 static int
 MetaTableAccessor.getRegionCount(org.apache.hadoop.conf.Configurationc,
   TableNametableName)
 Count regions in hbase:meta for passed 
table.
 
 
-
+
 static int
 MetaTableAccessor.getRegionCount(Connectionconnection,
   TableNametableName)
 Count regions in hbase:meta for passed 
table.
 
 
+
+RegionLocator
+SharedConnection.getRegionLocator(TableNametableName)
+
 
 static Scan
 MetaTableAccessor.getScanForTableName(Connectionconnection,
@@ -590,13 +598,18 @@ service.
 
 
 
+TableBuilder
+SharedConnection.getTableBuilder(TableNametableName,
+   http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html?is-external=true;
 title="class or interface in 
java.util.concurrent">ExecutorServicepool)
+
+
 static http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfo
 MetaTableAccessor.getTableRegions(Connectionconnection,
TableNametableName)
 Gets all of the regions of the specified table.
 
 
-
+
 static http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfo
 MetaTableAccessor.getTableRegions(Connectionconnection,
TableNametableName,
@@ -604,14 +617,14 @@ service.
 Gets all of the regions of the specified table.
 
 
-
+
 static http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListPairRegionInfo,ServerName
 MetaTableAccessor.getTableRegionsAndLocations(Connectionconnection,
TableNametableName)
 Do not use this method to get meta table regions, use 
methods in MetaTableLocator instead.
 
 
-
+
 static http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListPairRegionInfo,ServerName
 MetaTableAccessor.getTableRegionsAndLocations(Connectionconnection,
TableNametableName,
@@ -619,42 +632,42 @@ service.
 Do not use this method to get meta table regions, use 
methods in MetaTableLocator instead.
 
 
-
+
 static byte[]
 MetaTableAccessor.getTableStartRowForMeta(TableNametableName,
MetaTableAccessor.QueryTypetype)
 
-
+
 static http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in java.util">OptionalTableState
 AsyncMetaTableAccessor.getTableState(AsyncTable?metaTable,
  TableNametableName)
 
-
+
 static TableState
 MetaTableAccessor.getTableState(Connectionconn,
  TableNametableName)
 Fetch table state for given table from META table
 
 
-
+
 static byte[]
 MetaTableAccessor.getTableStopRowForMeta(TableNametableName,
   MetaTableAccessor.QueryTypetype)
 
-
+
 (package private) static boolean
 MetaTableAccessor.isInsideTable(RegionInfocurrent,
  TableNametableName)
 
-
+
 static boolean
 TableName.isMetaTableName(TableNametn)
 
-
+
 TableDescriptor
 TableDescriptors.remove(TableNametablename)
 
-
+
 static void
 MetaTableAccessor.scanMeta(Connectionconnection,
 MetaTableAccessor.Visitorvisitor,
@@ -665,7 +678,7 @@ service.
  given row.
 
 
-
+
 static void
 MetaTableAccessor.scanMeta(Connectionconnection,
 TableNametable,
@@ -673,25 +686,25 @@ service.
 intmaxRows,
 MetaTableAccessor.Visitorvisitor)
 
-
+
 static void
 MetaTableAccessor.scanMetaForTableRegions(Connectionconnection,
MetaTableAccessor.Visitorvisitor,
TableNametableName)
 
-
+
 static http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
 title="class or 

[15/51] [partial] hbase-site git commit: Published site at .

2017-12-06 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d171b896/hbase-build-configuration/dependency-convergence.html
--
diff --git a/hbase-build-configuration/dependency-convergence.html 
b/hbase-build-configuration/dependency-convergence.html
index 9acdb2b..a5f5cf4 100644
--- a/hbase-build-configuration/dependency-convergence.html
+++ b/hbase-build-configuration/dependency-convergence.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Build Configuration  Reactor Dependency 
Convergence
 
@@ -488,22 +488,22 @@
 3.4.10
 
 
-org.apache.hbase:hbase-assembly:pom:3.0.0-SNAPSHOT+-org.apache.hbase:hbase-server:jar:3.0.0-SNAPSHOT:compile|+-org.apache.hbase:hbase-client:jar:3.0.0-SNAPSHOT:compile||\-(org.apache.zookeeper:zookeeper:jar:3.4.10:compile
 - version managed from 3.4.6; omitted for duplicate)|+-org.apache.zookeeper:zookeeper:jar:3.4.10:compile|+-org.apache.hadoop:hadoop-common:jar:2.7.4:compile||\-(org.apache.zookeeper:zookeeper:jar:3.4.10:compile
 - version managed from 3.4.6; omitted for duplicate)|+-org.apache.hadoop:hadoop-auth:jar:2.7.4:compile||\-(org.apache.zookeeper:zookeeper:jar:3.4.10:compile
 - version managed from 3.4.6; omitted for duplicate)|\-org.apache.hadoop:hadoop-client:jar:2.7.4:compile|\-org.apache.had
 oop:hadoop-mapreduce-client-app:jar:2.7.4:compile|\-org.apache.hadoop:hadoop-mapreduce-client-common:jar:2.7.4:compile|\-org.apache.hadoop:hadoop-yarn-server-common:jar:2.7.4:compile|\-(org.apache.zookeeper:zookeeper:jar:3.4.10:compile
 - version managed from 3.4.6; omitted for duplicate)+-org.apache.hbase:hbase-mapreduce:jar:3.0.0-SNAPSHOT:compile|\-(org.apache.zookeeper:zookeeper:jar:3.4.10:compile - 
version managed from 3.4.6; omitted for duplicate)+-org.apache.hbase:hbase-mapreduce:test-jar:tests:3.0.0-SNAPSHOT:test|\-(org.apache.zookeeper:zookeeper:jar:3.4.10:test - 
version managed from 3.4.6; omitted for duplicate)+-org.apache.hbase:hbase-testing-util:jar:3.0.0-SNAP
 SHOT:test|+-org.apache.hbase:hbase-server:test-jar:tests:3.0.0-SNAPSHOT:test||\-(org.apache.zookeeper:zookeeper:jar:3.4.10:test
 - version managed from 3.4.6; omitted for duplicate)|\-org.apache.hadoop:hadoop-minicluster:jar:2.7.4:test|+-org.apache.hadoop:hadoop-common:test-jar:tests:2.7.4:test||\-(org.apache.zookeeper:zookeeper:jar:3.4.10:test
 - version managed from 3.4.6; omitted for duplicate)|\-org.apache.hadoop:hadoop-yarn-server-tests:test-jar:tests:2.7.4:test|\-org.apache.hadoop:hadoop-yarn-server-resourcemanager:jar:2.7.4:test|\-(org.apache.zookeeper:zookeeper:jar:3.4.10:test
 - version managed from 3.4.6; omitted for dupli
 cate)+-org.apache.hbase:hbase-backup:jar:3.0.0-SNAPSHOT:compile|\-(org.apache.zookeeper:zookeeper:jar:3.4.10:compile - 
version managed from 3.4.6; omitted for duplicate)+-org.apache.hbase:hbase-replication:jar:3.0.0-SNAPSHOT:compile|\-(org.apache.zookeeper:zookeeper:jar:3.4.10:compile - 
version managed from 3.4.6; omitted for duplicate)+-org.apache.hbase:hbase-examples:jar:3.0.0-SNAPSHOT:compile|\-(org.apache.zookeeper:zookeeper:jar:3.4.10:compile - 
version managed from 3.4.6; omitted for duplicate)+-org.apache.hbase:hbase-zookeeper:jar:3.0.0-SNAPSHOT:compile|\-(org.apache.zookeeper:zookeeper:jar:3.4.10:compile - 
version managed from 3.4.6; omitted for duplicate)\-org.apache.hbase:hbase-rsgroup:jar:3.0.0-SNAPSHOT:compile\-(org.apache.zookeeper:zookeeper:jar:3.4.10:compile 
- version managed from 3.4.6
 ; omitted for duplicate)
-org.apache.hbase:hbase-backup:jar:3.0.0-SNAPSHOT+-org.apache.hbase:hbase-client:jar:3.0.0-SNAPSHOT:compile|+-(org.apache.zookeeper:zookeeper:jar:3.4.10:compile - 
version managed from 3.4.6; omitted for duplicate)|\-org.apache.hadoop:hadoop-auth:jar:2.7.4:compile|\-(org.apache.zookeeper:zookeeper:jar:3.4.10:compile
 - version managed from 3.4.6; omitted for duplicate)+-org.apache.hbase:hbase-server:jar:3.0.0-SNAPSHOT:compile|+-org.apache.hbase:hbase-zookeeper:jar:3.0.0-SNAPSHOT:compile||\-(org.apache.zookeeper:zookeeper:jar:3.4.10:compile
 - version managed from 3.4.6; omitted for duplicate)|+-org.apache.hbase:hbase-replication:jar:3.0.0-SNAPSHOT:compile||\-(org.apache.zookeeper:zookeeper:jar:3.4.10:compile
 - version managed from 3.4.6; omitted for dup
 licate)|+-(org.apache.zookeeper:zookeeper:jar:3.4.10:compile - 
version managed from 3.4.6; omitted for duplicate)|\-org.apache.hadoop:hadoop-client:jar:2.7.4:compile|\-org.apache.hadoop:hadoop-mapreduce-client-app:jar:2.7.4:compile|\-org.apache.hadoop:hadoop-mapreduce-client-common:jar:2.7.4:compile|\-org.apache.hadoop:hadoop-yarn-server-common:jar:2.7.4:compile|\-(org.apache.zookeeper:zookeeper:jar:3.4.10:compile
 - version managed from 3.4.6; omitted for duplicate)+-org.apache.hbase:hbase-server:test-jar:tests:3.0.0-SNAPSHOT:test|\-(org.apache.zookeeper:zookeeper:jar:3.4.10:test - 
version managed from 3.4.6; omitted for duplicat
 

[15/51] [partial] hbase-site git commit: Published site at .

2017-12-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c54c242b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.html
index 64ba75d..6cb35af 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.html
@@ -56,2149 +56,2149 @@
 048import 
java.util.function.ToLongFunction;
 049import java.util.stream.Collectors;
 050import java.util.stream.LongStream;
-051
-052import org.apache.commons.logging.Log;
-053import 
org.apache.commons.logging.LogFactory;
-054import 
org.apache.hadoop.conf.Configuration;
-055import org.apache.hadoop.fs.FileSystem;
-056import org.apache.hadoop.fs.Path;
-057import org.apache.hadoop.hbase.Cell;
-058import 
org.apache.hadoop.hbase.CellComparator;
-059import 
org.apache.hadoop.hbase.CellUtil;
-060import 
org.apache.hadoop.hbase.CompoundConfiguration;
-061import 
org.apache.hadoop.hbase.HConstants;
-062import 
org.apache.hadoop.hbase.MemoryCompactionPolicy;
-063import 
org.apache.hadoop.hbase.TableName;
-064import 
org.apache.hadoop.hbase.backup.FailedArchiveException;
-065import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-066import 
org.apache.hadoop.hbase.client.RegionInfo;
-067import 
org.apache.hadoop.hbase.client.Scan;
-068import 
org.apache.hadoop.hbase.conf.ConfigurationManager;
-069import 
org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver;
-070import 
org.apache.hadoop.hbase.io.HeapSize;
-071import 
org.apache.hadoop.hbase.io.compress.Compression;
-072import 
org.apache.hadoop.hbase.io.crypto.Encryption;
-073import 
org.apache.hadoop.hbase.io.hfile.CacheConfig;
-074import 
org.apache.hadoop.hbase.io.hfile.HFile;
-075import 
org.apache.hadoop.hbase.io.hfile.HFileContext;
-076import 
org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
-077import 
org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder;
-078import 
org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoderImpl;
-079import 
org.apache.hadoop.hbase.io.hfile.HFileScanner;
-080import 
org.apache.hadoop.hbase.io.hfile.InvalidHFileException;
-081import 
org.apache.hadoop.hbase.monitoring.MonitoredTask;
-082import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
-083import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker;
-084import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionProgress;
-085import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionRequestImpl;
-086import 
org.apache.hadoop.hbase.regionserver.compactions.DefaultCompactor;
-087import 
org.apache.hadoop.hbase.regionserver.compactions.OffPeakHours;
-088import 
org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher;
-089import 
org.apache.hadoop.hbase.regionserver.throttle.ThroughputController;
-090import 
org.apache.hadoop.hbase.regionserver.wal.WALUtil;
-091import 
org.apache.hadoop.hbase.security.EncryptionUtil;
-092import 
org.apache.hadoop.hbase.security.User;
-093import 
org.apache.hadoop.hbase.util.Bytes;
-094import 
org.apache.hadoop.hbase.util.ChecksumType;
-095import 
org.apache.hadoop.hbase.util.ClassSize;
-096import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-097import 
org.apache.hadoop.hbase.util.Pair;
-098import 
org.apache.hadoop.hbase.util.ReflectionUtils;
-099import 
org.apache.hadoop.util.StringUtils;
-100import 
org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix;
-101import 
org.apache.yetus.audience.InterfaceAudience;
-102
-103import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
-104import 
org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions;
-105import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableCollection;
-106import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableList;
-107import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
-108import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Sets;
-109import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-110import 
org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor;
-111
-112/**
-113 * A Store holds a column family in a 
Region.  Its a memstore and a set of zero
-114 * or more StoreFiles, which stretch 
backwards over time.
-115 *
-116 * pThere's no reason to consider 
append-logging at this level; all logging
-117 * and locking is handled at the HRegion 
level.  Store just provides
-118 * services to manage sets of StoreFiles. 
 One of the most important of those
-119 * services is compaction services where 
files are aggregated once they pass
-120 * a configurable threshold.
-121 *
-122 * pLocking and transactions are 
handled at a higher level.  This API should
-123 * not be 

[15/51] [partial] hbase-site git commit: Published site at .

2017-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.html
index de6cb11..dd54dd2 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.html
@@ -25,563 +25,558 @@
 017 */
 018package 
org.apache.hadoop.hbase.io.asyncfs;
 019
-020import static 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleState.READER_IDLE;
-021import static 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleState.WRITER_IDLE;
-022import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.HEART_BEAT_SEQNO;
-023import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.READ_TIMEOUT;
-024import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile;
-025import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.endFileLease;
-026import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.getStatus;
+020import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.HEART_BEAT_SEQNO;
+021import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.READ_TIMEOUT;
+022import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile;
+023import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.endFileLease;
+024import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.getStatus;
+025import static 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleState.READER_IDLE;
+026import static 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleState.WRITER_IDLE;
 027import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY;
 028
-029
-030import 
org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBuf;
-031import 
org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBufAllocator;
-032import 
org.apache.hadoop.hbase.shaded.io.netty.channel.Channel;
-033import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandler.Sharable;
-034import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandlerContext;
-035import 
org.apache.hadoop.hbase.shaded.io.netty.channel.EventLoop;
-036import 
org.apache.hadoop.hbase.shaded.io.netty.channel.SimpleChannelInboundHandler;
-037import 
org.apache.hadoop.hbase.shaded.io.netty.handler.codec.protobuf.ProtobufDecoder;
-038import 
org.apache.hadoop.hbase.shaded.io.netty.handler.codec.protobuf.ProtobufVarint32FrameDecoder;
-039import 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleStateEvent;
-040import 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleStateHandler;
-041import 
org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.Future;
-042import 
org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.Promise;
-043import 
org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.PromiseCombiner;
-044
-045import java.io.IOException;
-046import java.nio.ByteBuffer;
-047import java.util.ArrayDeque;
-048import java.util.Collection;
-049import java.util.Collections;
-050import java.util.Deque;
-051import java.util.IdentityHashMap;
-052import java.util.List;
-053import java.util.Set;
-054import 
java.util.concurrent.CompletableFuture;
-055import java.util.concurrent.TimeUnit;
-056import java.util.function.Supplier;
-057
-058import 
org.apache.hadoop.conf.Configuration;
-059import 
org.apache.hadoop.crypto.Encryptor;
-060import org.apache.hadoop.fs.Path;
+029import java.io.IOException;
+030import java.io.InterruptedIOException;
+031import java.nio.ByteBuffer;
+032import java.util.Collection;
+033import java.util.Collections;
+034import java.util.Iterator;
+035import java.util.List;
+036import java.util.Set;
+037import 
java.util.concurrent.CompletableFuture;
+038import 
java.util.concurrent.ConcurrentHashMap;
+039import 
java.util.concurrent.ConcurrentLinkedDeque;
+040import 
java.util.concurrent.ExecutionException;
+041import java.util.concurrent.TimeUnit;
+042import java.util.function.Supplier;
+043
+044import 
org.apache.hadoop.conf.Configuration;
+045import 
org.apache.hadoop.crypto.Encryptor;
+046import org.apache.hadoop.fs.Path;
+047import 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.CancelOnClose;
+048import 
org.apache.hadoop.hbase.util.CancelableProgressable;
+049import 
org.apache.hadoop.hbase.util.FSUtils;
+050import 
org.apache.hadoop.hdfs.DFSClient;
+051import 
org.apache.hadoop.hdfs.DistributedFileSystem;
+052import 

[15/51] [partial] hbase-site git commit: Published site at .

2017-11-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.AddColumnFamilyFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.AddColumnFamilyFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.AddColumnFamilyFuture.html
index d438f22..7c59e27 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.AddColumnFamilyFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.AddColumnFamilyFuture.html
@@ -1290,8 +1290,8 @@
 1282   CompactType 
compactType) throws IOException {
 1283switch (compactType) {
 1284  case MOB:
-1285
compact(this.connection.getAdminForMaster(), getMobRegionInfo(tableName), 
major,
-1286  columnFamily);
+1285
compact(this.connection.getAdminForMaster(), 
RegionInfo.createMobRegionInfo(tableName),
+1286major, columnFamily);
 1287break;
 1288  case NORMAL:
 1289checkTableExists(tableName);
@@ -3248,7 +3248,7 @@
 3240  new 
CallableAdminProtos.GetRegionInfoResponse.CompactionState() {
 3241@Override
 3242public 
AdminProtos.GetRegionInfoResponse.CompactionState call() throws Exception {
-3243  RegionInfo info = 
getMobRegionInfo(tableName);
+3243  RegionInfo info = 
RegionInfo.createMobRegionInfo(tableName);
 3244  GetRegionInfoRequest 
request =
 3245
RequestConverter.buildGetRegionInfoRequest(info.getRegionName(), true);
 3246  GetRegionInfoResponse 
response = masterAdmin.getRegionInfo(rpcController, request);
@@ -3312,7 +3312,7 @@
 3304}
 3305break;
 3306  default:
-3307throw new 
IllegalArgumentException("Unknowne compactType: " + compactType);
+3307throw new 
IllegalArgumentException("Unknown compactType: " + compactType);
 3308}
 3309if (state != null) {
 3310  return 
ProtobufUtil.createCompactionState(state);
@@ -3847,325 +3847,320 @@
 3839});
 3840  }
 3841
-3842  private RegionInfo 
getMobRegionInfo(TableName tableName) {
-3843return 
RegionInfoBuilder.newBuilder(tableName).setStartKey(Bytes.toBytes(".mob")).setRegionId(0)
-3844.build();
-3845  }
-3846
-3847  private RpcControllerFactory 
getRpcControllerFactory() {
-3848return this.rpcControllerFactory;
-3849  }
-3850
-3851  @Override
-3852  public void addReplicationPeer(String 
peerId, ReplicationPeerConfig peerConfig, boolean enabled)
-3853  throws IOException {
-3854executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-3855  @Override
-3856  protected Void rpcCall() throws 
Exception {
-3857
master.addReplicationPeer(getRpcController(),
-3858  
RequestConverter.buildAddReplicationPeerRequest(peerId, peerConfig, 
enabled));
-3859return null;
-3860  }
-3861});
-3862  }
-3863
-3864  @Override
-3865  public void 
removeReplicationPeer(String peerId) throws IOException {
-3866executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-3867  @Override
-3868  protected Void rpcCall() throws 
Exception {
-3869
master.removeReplicationPeer(getRpcController(),
-3870  
RequestConverter.buildRemoveReplicationPeerRequest(peerId));
-3871return null;
-3872  }
-3873});
-3874  }
-3875
-3876  @Override
-3877  public void 
enableReplicationPeer(final String peerId) throws IOException {
-3878executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-3879  @Override
-3880  protected Void rpcCall() throws 
Exception {
-3881
master.enableReplicationPeer(getRpcController(),
-3882  
RequestConverter.buildEnableReplicationPeerRequest(peerId));
-3883return null;
-3884  }
-3885});
-3886  }
-3887
-3888  @Override
-3889  public void 
disableReplicationPeer(final String peerId) throws IOException {
-3890executeCallable(new 
MasterCallableVoid(getConnection(), getRpcControllerFactory()) {
-3891  @Override
-3892  protected Void rpcCall() throws 
Exception {
-3893
master.disableReplicationPeer(getRpcController(),
-3894  
RequestConverter.buildDisableReplicationPeerRequest(peerId));
-3895return null;
-3896  }
-3897});
-3898  }
-3899
-3900  @Override
-3901  public ReplicationPeerConfig 
getReplicationPeerConfig(final String peerId) throws IOException {
-3902return executeCallable(new 
MasterCallableReplicationPeerConfig(getConnection(),
-3903getRpcControllerFactory()) {
-3904  @Override
-3905  protected ReplicationPeerConfig 
rpcCall() throws Exception {
-3906GetReplicationPeerConfigResponse 
response = master.getReplicationPeerConfig(
-3907  getRpcController(), 

[15/51] [partial] hbase-site git commit: Published site at .

2017-11-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BulkLoadListener.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BulkLoadListener.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BulkLoadListener.html
index 29ea7b3..6ed75c9 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BulkLoadListener.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BulkLoadListener.html
@@ -1313,7093 +1313,7082 @@
 1305
 1306  @Override
 1307  public boolean isSplittable() {
-1308boolean result = isAvailable() 
 !hasReferences();
-1309LOG.info("ASKED IF SPLITTABLE " + 
result + " " + getRegionInfo().getShortNameToLog(),
-1310  new Throwable("LOGGING: 
REMOVE"));
-1311// REMOVE BELOW
-1312LOG.info("DEBUG LIST ALL FILES");
-1313for (HStore store : 
this.stores.values()) {
-1314  LOG.info("store " + 
store.getColumnFamilyName());
-1315  for (HStoreFile sf : 
store.getStorefiles()) {
-1316
LOG.info(sf.toStringDetailed());
-1317  }
-1318}
-1319return result;
-1320  }
-1321
-1322  @Override
-1323  public boolean isMergeable() {
-1324if (!isAvailable()) {
-1325  LOG.debug("Region " + this
-1326  + " is not mergeable because 
it is closing or closed");
-1327  return false;
-1328}
-1329if (hasReferences()) {
-1330  LOG.debug("Region " + this
-1331  + " is not mergeable because 
it has references");
-1332  return false;
-1333}
-1334
-1335return true;
+1308return isAvailable()  
!hasReferences();
+1309  }
+1310
+1311  @Override
+1312  public boolean isMergeable() {
+1313if (!isAvailable()) {
+1314  LOG.debug("Region " + this
+1315  + " is not mergeable because 
it is closing or closed");
+1316  return false;
+1317}
+1318if (hasReferences()) {
+1319  LOG.debug("Region " + this
+1320  + " is not mergeable because 
it has references");
+1321  return false;
+1322}
+1323
+1324return true;
+1325  }
+1326
+1327  public boolean areWritesEnabled() {
+1328synchronized(this.writestate) {
+1329  return 
this.writestate.writesEnabled;
+1330}
+1331  }
+1332
+1333  @VisibleForTesting
+1334  public MultiVersionConcurrencyControl 
getMVCC() {
+1335return mvcc;
 1336  }
 1337
-1338  public boolean areWritesEnabled() {
-1339synchronized(this.writestate) {
-1340  return 
this.writestate.writesEnabled;
-1341}
-1342  }
-1343
-1344  @VisibleForTesting
-1345  public MultiVersionConcurrencyControl 
getMVCC() {
-1346return mvcc;
-1347  }
-1348
-1349  @Override
-1350  public long getMaxFlushedSeqId() {
-1351return maxFlushedSeqId;
+1338  @Override
+1339  public long getMaxFlushedSeqId() {
+1340return maxFlushedSeqId;
+1341  }
+1342
+1343  /**
+1344   * @return readpoint considering given 
IsolationLevel. Pass {@code null} for default
+1345   */
+1346  public long 
getReadPoint(IsolationLevel isolationLevel) {
+1347if (isolationLevel != null 
 isolationLevel == IsolationLevel.READ_UNCOMMITTED) {
+1348  // This scan can read even 
uncommitted transactions
+1349  return Long.MAX_VALUE;
+1350}
+1351return mvcc.getReadPoint();
 1352  }
 1353
-1354  /**
-1355   * @return readpoint considering given 
IsolationLevel. Pass {@code null} for default
-1356   */
-1357  public long 
getReadPoint(IsolationLevel isolationLevel) {
-1358if (isolationLevel != null 
 isolationLevel == IsolationLevel.READ_UNCOMMITTED) {
-1359  // This scan can read even 
uncommitted transactions
-1360  return Long.MAX_VALUE;
-1361}
-1362return mvcc.getReadPoint();
-1363  }
-1364
-1365  public boolean 
isLoadingCfsOnDemandDefault() {
-1366return 
this.isLoadingCfsOnDemandDefault;
-1367  }
-1368
-1369  /**
-1370   * Close down this HRegion.  Flush the 
cache, shut down each HStore, don't
-1371   * service any more calls.
-1372   *
-1373   * pThis method could take 
some time to execute, so don't call it from a
-1374   * time-sensitive thread.
-1375   *
-1376   * @return Vector of all the storage 
files that the HRegion's component
-1377   * HStores make use of.  It's a list 
of all StoreFile objects. Returns empty
-1378   * vector if already closed and null 
if judged that it should not close.
-1379   *
-1380   * @throws IOException e
-1381   * @throws DroppedSnapshotException 
Thrown when replay of wal is required
-1382   * because a Snapshot was not properly 
persisted. The region is put in closing mode, and the
-1383   * caller MUST abort after this.
-1384   */
-1385  public Mapbyte[], 
ListHStoreFile close() throws IOException {
-1386return close(false);
-1387  }
-1388
-1389  private final Object closeLock = new 
Object();
-1390
-1391  /** Conf key for the periodic flush 
interval */
-1392  

[15/51] [partial] hbase-site git commit: Published site at .

2017-11-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b1eb7453/devapidocs/src-html/org/apache/hadoop/hbase/CellUtil.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/CellUtil.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/CellUtil.html
index 435b2b3..211a086 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/CellUtil.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/CellUtil.html
@@ -26,10 +26,10 @@
 018
 019package org.apache.hadoop.hbase;
 020
-021import static 
org.apache.hadoop.hbase.Tag.TAG_LENGTH_SIZE;
-022import static 
org.apache.hadoop.hbase.KeyValue.COLUMN_FAMILY_DELIMITER;
+021import static 
org.apache.hadoop.hbase.KeyValue.COLUMN_FAMILY_DELIMITER;
+022import static 
org.apache.hadoop.hbase.KeyValue.COLUMN_FAMILY_DELIM_ARRAY;
 023import static 
org.apache.hadoop.hbase.KeyValue.getDelimiter;
-024import static 
org.apache.hadoop.hbase.KeyValue.COLUMN_FAMILY_DELIM_ARRAY;
+024import static 
org.apache.hadoop.hbase.Tag.TAG_LENGTH_SIZE;
 025
 026import java.io.DataOutput;
 027import java.io.DataOutputStream;
@@ -41,102 +41,102 @@
 033import java.util.List;
 034import java.util.Map.Entry;
 035import java.util.NavigableMap;
-036
-037import 
org.apache.hadoop.hbase.KeyValue.Type;
-038import 
org.apache.yetus.audience.InterfaceAudience;
-039import 
org.apache.yetus.audience.InterfaceAudience.Private;
-040
-041import 
com.google.common.annotations.VisibleForTesting;
-042
-043import 
org.apache.hadoop.hbase.io.HeapSize;
-044import 
org.apache.hadoop.hbase.util.ByteBufferUtils;
-045import 
org.apache.hadoop.hbase.util.ByteRange;
-046import 
org.apache.hadoop.hbase.util.Bytes;
-047
-048/**
-049 * Utility methods helpful for slinging 
{@link Cell} instances. Some methods below are for internal
-050 * use only and are marked 
InterfaceAudience.Private at the method level. Note that all such methods
-051 * have been marked deprecated in 
HBase-2.0 which will be subsequently removed in HBase-3.0
-052 */
-053@InterfaceAudience.Public
-054public final class CellUtil {
-055
-056  /**
-057   * Private constructor to keep this 
class from being instantiated.
-058   */
-059  private CellUtil() {
-060  }
-061
-062  /*** ByteRange 
***/
-063
-064  /**
-065   * @deprecated As of HBase-2.0. Will be 
removed in HBase-3.0.
-066   */
-067  @Deprecated
-068  public static ByteRange 
fillRowRange(Cell cell, ByteRange range) {
-069return 
PrivateCellUtil.fillRowRange(cell, range);
-070  }
-071
-072  /**
-073   * @deprecated As of HBase-2.0. Will be 
removed in HBase-3.0.
-074   */
-075  @Deprecated
-076  public static ByteRange 
fillFamilyRange(Cell cell, ByteRange range) {
-077return 
PrivateCellUtil.fillFamilyRange(cell, range);
-078  }
-079
-080  /**
-081   * @deprecated As of HBase-2.0. Will be 
removed in HBase-3.0.
-082   */
-083  @Deprecated
-084  public static ByteRange 
fillQualifierRange(Cell cell, ByteRange range) {
-085return 
PrivateCellUtil.fillQualifierRange(cell, range);
-086  }
-087
-088  /**
-089   * @deprecated As of HBase-2.0. Will be 
removed in HBase-3.0.
-090   */
-091  @Deprecated
-092  public static ByteRange 
fillValueRange(Cell cell, ByteRange range) {
-093return 
PrivateCellUtil.fillValueRange(cell, range);
-094  }
-095
-096  /**
-097   * @deprecated As of HBase-2.0. Will be 
removed in HBase-3.0.
-098   */
-099  @Deprecated
-100  public static ByteRange 
fillTagRange(Cell cell, ByteRange range) {
-101return 
PrivateCellUtil.fillTagRange(cell, range);
-102  }
-103
-104  /* get individual 
arrays for tests /
-105
-106  public static byte[] cloneRow(Cell 
cell) {
-107byte[] output = new 
byte[cell.getRowLength()];
-108copyRowTo(cell, output, 0);
-109return output;
-110  }
-111
-112  public static byte[] cloneFamily(Cell 
cell) {
-113byte[] output = new 
byte[cell.getFamilyLength()];
-114copyFamilyTo(cell, output, 0);
-115return output;
-116  }
-117
-118  public static byte[] 
cloneQualifier(Cell cell) {
-119byte[] output = new 
byte[cell.getQualifierLength()];
-120copyQualifierTo(cell, output, 0);
-121return output;
-122  }
-123
-124  public static byte[] cloneValue(Cell 
cell) {
-125byte[] output = new 
byte[cell.getValueLength()];
-126copyValueTo(cell, output, 0);
-127return output;
-128  }
-129
-130  /**
-131   * @deprecated As of HBase-2.0. Will be 
removed in HBase-3.0.
+036import java.util.Optional;
+037
+038import 
org.apache.hadoop.hbase.KeyValue.Type;
+039import 
org.apache.hadoop.hbase.io.HeapSize;
+040import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
+041import 
org.apache.hadoop.hbase.util.ByteBufferUtils;
+042import 
org.apache.hadoop.hbase.util.ByteRange;
+043import 
org.apache.hadoop.hbase.util.Bytes;
+044import 
org.apache.yetus.audience.InterfaceAudience;
+045import 

  1   2   3   >