[16/51] [partial] hbase-site git commit: Published site at 620d70d6186fb800299bcc62ad7179fccfd1be41.

2019-01-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aa3fb87f/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionStat.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionStat.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionStat.html
index e763690..f66043c 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionStat.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionStat.html
@@ -1301,610 +1301,613 @@
 1293}
 1294RegionStateNode regionNode = 
regionStates.getOrCreateRegionStateNode(regionInfo);
 1295// Do not need to lock on 
regionNode, as we can make sure that before we finish loading
-1296// meta, all the related 
procedures can not be executed. The only exception is formeta
+1296// meta, all the related 
procedures can not be executed. The only exception is for meta
 1297// region related operations, 
but here we do not load the informations for meta region.
 1298
regionNode.setState(localState);
 1299
regionNode.setLastHost(lastHost);
 1300
regionNode.setRegionLocation(regionLocation);
 1301
regionNode.setOpenSeqNum(openSeqNum);
 1302
-1303if (localState == State.OPEN) 
{
-1304  assert regionLocation != null 
: "found null region location for " + regionNode;
-1305  
regionStates.addRegionToServer(regionNode);
-1306} else if (localState == 
State.OFFLINE || regionInfo.isOffline()) {
-1307  
regionStates.addToOfflineRegions(regionNode);
-1308}
-1309  }
-1310});
-1311
-1312// every assignment is blocked until 
meta is loaded.
-1313wakeMetaLoadedEvent();
-1314  }
-1315
-1316  /**
-1317   * Used to check if the meta loading 
is done.
-1318   * p/
-1319   * if not we throw PleaseHoldException 
since we are rebuilding the RegionStates
-1320   * @param hri region to check if it is 
already rebuild
-1321   * @throws PleaseHoldException if meta 
has not been loaded yet
-1322   */
-1323  private void 
checkMetaLoaded(RegionInfo hri) throws PleaseHoldException {
-1324if (!isRunning()) {
-1325  throw new 
PleaseHoldException("AssignmentManager not running");
-1326}
-1327boolean meta = isMetaRegion(hri);
-1328boolean metaLoaded = 
isMetaLoaded();
-1329if (!meta  !metaLoaded) 
{
-1330  throw new PleaseHoldException(
-1331"Master not fully online; 
hbase:meta=" + meta + ", metaLoaded=" + metaLoaded);
-1332}
-1333  }
-1334
-1335  // 

-1336  //  TODO: Metrics
-1337  // 

-1338  public int getNumRegionsOpened() {
-1339// TODO: Used by 
TestRegionPlacement.java and assume monotonically increasing value
-1340return 0;
-1341  }
-1342
-1343  public long 
submitServerCrash(ServerName serverName, boolean shouldSplitWal) {
-1344boolean carryingMeta;
-1345long pid;
-1346ServerStateNode serverNode = 
regionStates.getServerNode(serverName);
-1347if(serverNode == null){
-1348  LOG.info("Skip to add SCP for {} 
since this server should be OFFLINE already", serverName);
-1349  return -1;
-1350}
-1351// we hold the write lock here for 
fencing on reportRegionStateTransition. Once we set the
-1352// server state to CRASHED, we will 
no longer accept the reportRegionStateTransition call from
-1353// this server. This is used to 
simplify the implementation for TRSP and SCP, where we can make
-1354// sure that, the region list 
fetched by SCP will not be changed any more.
-1355serverNode.writeLock().lock();
-1356try {
-1357  
ProcedureExecutorMasterProcedureEnv procExec = 
this.master.getMasterProcedureExecutor();
-1358  carryingMeta = 
isCarryingMeta(serverName);
-1359  if 
(!serverNode.isInState(ServerState.ONLINE)) {
-1360LOG.info(
-1361  "Skip to add SCP for {} with 
meta= {}, " +
-1362  "since there should be a 
SCP is processing or already done for this server node",
-1363  serverName, carryingMeta);
-1364return -1;
-1365  } else {
-1366
serverNode.setState(ServerState.CRASHED);
-1367pid = 
procExec.submitProcedure(new ServerCrashProcedure(procExec.getEnvironment(),
-1368serverName, shouldSplitWal, 
carryingMeta));
-1369LOG.info(
-1370  "Added {} to dead servers 
which carryingMeta={}, submitted ServerCrashProcedure pid={}",
-1371  serverName, carryingMeta, 
pid);
-1372  }
-1373} finally {
-1374  serverNode.writeLock().unlock();
-1375   

[16/51] [partial] hbase-site git commit: Published site at 281d6429e55149cc4c05430dcc1d1dc136d8b245.

2019-01-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/901d593a/devapidocs/org/apache/hadoop/hbase/master/procedure/SplitWALRemoteProcedure.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/SplitWALRemoteProcedure.html
 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/SplitWALRemoteProcedure.html
new file mode 100644
index 000..e849292
--- /dev/null
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/SplitWALRemoteProcedure.html
@@ -0,0 +1,783 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+SplitWALRemoteProcedure (Apache HBase 3.0.0-SNAPSHOT API)
+
+
+
+
+
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase.master.procedure
+Class 
SplitWALRemoteProcedure
+
+
+
+https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.procedure2.ProcedureMasterProcedureEnv
+
+
+org.apache.hadoop.hbase.master.procedure.SplitWALRemoteProcedure
+
+
+
+
+
+
+
+
+
+All Implemented Interfaces:
+https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableProcedureMasterProcedureEnv, ServerProcedureInterface, RemoteProcedureDispatcher.RemoteProcedureMasterProcedureEnv,ServerName
+
+
+
+@InterfaceAudience.Private
+public class SplitWALRemoteProcedure
+extends ProcedureMasterProcedureEnv
+implements RemoteProcedureDispatcher.RemoteProcedureMasterProcedureEnv,ServerName, ServerProcedureInterface
+A remote procedure which is used to send split WAL request 
to region server.
+ it will return null if the task is succeed or return a DoNotRetryIOException
+ SplitWALProcedure 
will help handle the situation that encounter
+ DoNotRetryIOException. Otherwise it will retry until succeed.
+
+
+
+
+
+
+
+
+
+
+
+Nested Class Summary
+
+
+
+
+Nested classes/interfaces inherited from 
classorg.apache.hadoop.hbase.procedure2.Procedure
+Procedure.LockState
+
+
+
+
+
+Nested classes/interfaces inherited from 
interfaceorg.apache.hadoop.hbase.master.procedure.ServerProcedureInterface
+ServerProcedureInterface.ServerOperationType
+
+
+
+
+
+
+
+
+Field Summary
+
+Fields
+
+Modifier and Type
+Field and Description
+
+
+private ServerName
+crashedServer
+
+
+private boolean
+dispatched
+
+
+private ProcedureEvent?
+event
+
+
+private static org.slf4j.Logger
+LOG
+
+
+private boolean
+success
+
+
+private https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+walPath
+
+
+private ServerName
+worker
+
+
+
+
+
+
+Fields inherited from classorg.apache.hadoop.hbase.procedure2.Procedure
+NO_PROC_ID,
 NO_TIMEOUT
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors
+
+Constructor and Description
+
+
+SplitWALRemoteProcedure()
+
+
+SplitWALRemoteProcedure(ServerNameworker,
+   ServerNamecrashedServer,
+   https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringwal)
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All MethodsInstance MethodsConcrete Methods
+
+Modifier and Type
+Method and Description
+
+
+protected boolean
+abort(MasterProcedureEnvenv)
+The abort() call is asynchronous and each procedure must 
decide how to deal
+ with it, if they want to be abortable.
+
+
+
+private void
+complete(MasterProcedureEnvenv,
+https://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true;
 title="class or interface in 
java.lang">Throwableerror)
+
+
+protected void
+deserializeStateData(ProcedureStateSerializerserializer)
+Called on store load to allow the user to decode the 
previously serialized
+ state.
+
+
+
+protected ProcedureMasterProcedureEnv[]
+execute(MasterProcedureEnvenv)

[16/51] [partial] hbase-site git commit: Published site at 466fa920fee572fe20db3b77ebf539dc304d5f31.

2019-01-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2bf59208/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.ServerRequestCallerBuilder.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.ServerRequestCallerBuilder.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.ServerRequestCallerBuilder.html
index 2e150bc..0b315b8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.ServerRequestCallerBuilder.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.ServerRequestCallerBuilder.html
@@ -25,22 +25,22 @@
 017 */
 018package org.apache.hadoop.hbase.client;
 019
-020import static 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions.checkArgument;
-021import static 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions.checkNotNull;
-022import static 
org.apache.hadoop.hbase.client.ConnectionUtils.retries2Attempts;
+020import static 
org.apache.hadoop.hbase.client.ConnectionUtils.retries2Attempts;
+021import static 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions.checkArgument;
+022import static 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions.checkNotNull;
 023
-024import 
org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer;
-025
-026import java.util.List;
-027import 
java.util.concurrent.CompletableFuture;
-028import java.util.concurrent.TimeUnit;
-029
-030import 
org.apache.hadoop.hbase.HRegionLocation;
-031import 
org.apache.hadoop.hbase.ServerName;
-032import 
org.apache.hadoop.hbase.TableName;
-033import 
org.apache.yetus.audience.InterfaceAudience;
-034import 
org.apache.hadoop.hbase.client.metrics.ScanMetrics;
-035import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
+024import java.util.List;
+025import 
java.util.concurrent.CompletableFuture;
+026import java.util.concurrent.TimeUnit;
+027import 
org.apache.hadoop.hbase.HRegionLocation;
+028import 
org.apache.hadoop.hbase.ServerName;
+029import 
org.apache.hadoop.hbase.TableName;
+030import 
org.apache.hadoop.hbase.client.metrics.ScanMetrics;
+031import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
+032import 
org.apache.yetus.audience.InterfaceAudience;
+033
+034import 
org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer;
+035
 036import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService;
 037import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanResponse;
 038
@@ -83,432 +83,441 @@
 075
 076private RegionLocateType locateType = 
RegionLocateType.CURRENT;
 077
-078public 
SingleRequestCallerBuilderT table(TableName tableName) {
-079  this.tableName = tableName;
-080  return this;
-081}
-082
-083public 
SingleRequestCallerBuilderT row(byte[] row) {
-084  this.row = row;
-085  return this;
-086}
-087
-088public 
SingleRequestCallerBuilderT action(
-089
AsyncSingleRequestRpcRetryingCaller.CallableT callable) {
-090  this.callable = callable;
-091  return this;
-092}
-093
-094public 
SingleRequestCallerBuilderT operationTimeout(long operationTimeout, 
TimeUnit unit) {
-095  this.operationTimeoutNs = 
unit.toNanos(operationTimeout);
-096  return this;
-097}
-098
-099public 
SingleRequestCallerBuilderT rpcTimeout(long rpcTimeout, TimeUnit unit) 
{
-100  this.rpcTimeoutNs = 
unit.toNanos(rpcTimeout);
-101  return this;
-102}
-103
-104public 
SingleRequestCallerBuilderT locateType(RegionLocateType locateType) {
-105  this.locateType = locateType;
-106  return this;
-107}
-108
-109public 
SingleRequestCallerBuilderT pause(long pause, TimeUnit unit) {
-110  this.pauseNs = 
unit.toNanos(pause);
-111  return this;
-112}
-113
-114public 
SingleRequestCallerBuilderT maxAttempts(int maxAttempts) {
-115  this.maxAttempts = maxAttempts;
-116  return this;
-117}
-118
-119public 
SingleRequestCallerBuilderT startLogErrorsCnt(int startLogErrorsCnt) 
{
-120  this.startLogErrorsCnt = 
startLogErrorsCnt;
-121  return this;
-122}
-123
-124public 
AsyncSingleRequestRpcRetryingCallerT build() {
-125  return new 
AsyncSingleRequestRpcRetryingCaller(retryTimer, conn,
-126  checkNotNull(tableName, 
"tableName is null"), checkNotNull(row, "row is null"),
-127  checkNotNull(locateType, 
"locateType is null"), checkNotNull(callable, "action is null"),
-128  pauseNs, maxAttempts, 
operationTimeoutNs, rpcTimeoutNs, startLogErrorsCnt);
+078private int replicaId = 
RegionReplicaUtil.DEFAULT_REPLICA_ID;
+079
+080public 
SingleRequestCallerBuilderT table(TableName tableName) {
+081  this.tableName = tableName;
+082  return this;
+083}
+084
+085public 
SingleRequestCallerBuilderT row(byte[] 

[16/51] [partial] hbase-site git commit: Published site at e4b6b4afb933a961f543537875f87a2dc62d3757.

2019-01-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/849d84a8/devapidocs/src-html/org/apache/hadoop/hbase/thrift/ThriftServerRunner.HBaseHandler.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/thrift/ThriftServerRunner.HBaseHandler.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/thrift/ThriftServerRunner.HBaseHandler.html
deleted file mode 100644
index e692633..000
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/thrift/ThriftServerRunner.HBaseHandler.html
+++ /dev/null
@@ -1,2103 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd;>
-
-
-Source code
-
-
-
-
-001/*
-002 * Licensed to the Apache Software 
Foundation (ASF) under one
-003 * or more contributor license 
agreements.  See the NOTICE file
-004 * distributed with this work for 
additional information
-005 * regarding copyright ownership.  The 
ASF licenses this file
-006 * to you under the Apache License, 
Version 2.0 (the
-007 * "License"); you may not use this file 
except in compliance
-008 * with the License.  You may obtain a 
copy of the License at
-009 *
-010 * 
http://www.apache.org/licenses/LICENSE-2.0
-011 *
-012 * Unless required by applicable law or 
agreed to in writing, software
-013 * distributed under the License is 
distributed on an "AS IS" BASIS,
-014 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
-015 * See the License for the specific 
language governing permissions and
-016 * limitations under the License.
-017 */
-018
-019package org.apache.hadoop.hbase.thrift;
-020
-021import static 
org.apache.hadoop.hbase.util.Bytes.getBytes;
-022
-023import java.io.IOException;
-024import java.net.InetAddress;
-025import java.net.InetSocketAddress;
-026import java.net.UnknownHostException;
-027import java.nio.ByteBuffer;
-028import java.security.PrivilegedAction;
-029import java.util.ArrayList;
-030import java.util.Arrays;
-031import java.util.Collections;
-032import java.util.HashMap;
-033import java.util.List;
-034import java.util.Map;
-035import java.util.TreeMap;
-036import 
java.util.concurrent.BlockingQueue;
-037import 
java.util.concurrent.ExecutorService;
-038import 
java.util.concurrent.LinkedBlockingQueue;
-039import 
java.util.concurrent.ThreadPoolExecutor;
-040import java.util.concurrent.TimeUnit;
-041
-042import 
javax.security.auth.callback.Callback;
-043import 
javax.security.auth.callback.UnsupportedCallbackException;
-044import 
javax.security.sasl.AuthorizeCallback;
-045import javax.security.sasl.SaslServer;
-046
-047import 
org.apache.commons.lang3.ArrayUtils;
-048import 
org.apache.hadoop.conf.Configuration;
-049import 
org.apache.hadoop.hbase.Cell.Type;
-050import 
org.apache.hadoop.hbase.CellBuilder;
-051import 
org.apache.hadoop.hbase.CellBuilderFactory;
-052import 
org.apache.hadoop.hbase.CellBuilderType;
-053import 
org.apache.hadoop.hbase.CellUtil;
-054import 
org.apache.hadoop.hbase.HBaseConfiguration;
-055import 
org.apache.hadoop.hbase.HColumnDescriptor;
-056import 
org.apache.hadoop.hbase.HConstants;
-057import 
org.apache.hadoop.hbase.HRegionLocation;
-058import 
org.apache.hadoop.hbase.HTableDescriptor;
-059import 
org.apache.hadoop.hbase.KeyValue;
-060import 
org.apache.hadoop.hbase.MetaTableAccessor;
-061import 
org.apache.hadoop.hbase.ServerName;
-062import 
org.apache.hadoop.hbase.TableName;
-063import 
org.apache.hadoop.hbase.TableNotFoundException;
-064import 
org.apache.hadoop.hbase.client.Admin;
-065import 
org.apache.hadoop.hbase.client.Append;
-066import 
org.apache.hadoop.hbase.client.Delete;
-067import 
org.apache.hadoop.hbase.client.Durability;
-068import 
org.apache.hadoop.hbase.client.Get;
-069import 
org.apache.hadoop.hbase.client.Increment;
-070import 
org.apache.hadoop.hbase.client.OperationWithAttributes;
-071import 
org.apache.hadoop.hbase.client.Put;
-072import 
org.apache.hadoop.hbase.client.RegionInfo;
-073import 
org.apache.hadoop.hbase.client.RegionLocator;
-074import 
org.apache.hadoop.hbase.client.Result;
-075import 
org.apache.hadoop.hbase.client.ResultScanner;
-076import 
org.apache.hadoop.hbase.client.Scan;
-077import 
org.apache.hadoop.hbase.client.Table;
-078import 
org.apache.hadoop.hbase.filter.Filter;
-079import 
org.apache.hadoop.hbase.filter.ParseFilter;
-080import 
org.apache.hadoop.hbase.filter.PrefixFilter;
-081import 
org.apache.hadoop.hbase.filter.WhileMatchFilter;
-082import 
org.apache.hadoop.hbase.http.HttpServerUtil;
-083import 
org.apache.hadoop.hbase.log.HBaseMarkers;
-084import 
org.apache.hadoop.hbase.security.SaslUtil;
-085import 
org.apache.hadoop.hbase.security.SaslUtil.QualityOfProtection;
-086import 
org.apache.hadoop.hbase.security.SecurityUtil;
-087import 
org.apache.hadoop.hbase.security.UserProvider;
-088import 
org.apache.hadoop.hbase.thrift.generated.AlreadyExists;
-089import 
org.apache.hadoop.hbase.thrift.generated.BatchMutation;
-090import 
org.apache.hadoop.hbase.thrift.generated.ColumnDescriptor;
-091import 

[16/51] [partial] hbase-site git commit: Published site at 3ab895979b643a2980bcdb7fee2078f14b614210.

2019-01-01 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/io/class-use/FSDataInputStreamWrapper.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/class-use/FSDataInputStreamWrapper.html 
b/devapidocs/org/apache/hadoop/hbase/io/class-use/FSDataInputStreamWrapper.html
index bf2b088..6d2124c 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/io/class-use/FSDataInputStreamWrapper.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/io/class-use/FSDataInputStreamWrapper.html
@@ -384,6 +384,6 @@
 
 
 
-Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/io/class-use/FileLink.FileLinkInputStream.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/class-use/FileLink.FileLinkInputStream.html
 
b/devapidocs/org/apache/hadoop/hbase/io/class-use/FileLink.FileLinkInputStream.html
index 2900ae0..9c6438f 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/io/class-use/FileLink.FileLinkInputStream.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/io/class-use/FileLink.FileLinkInputStream.html
@@ -120,6 +120,6 @@
 
 
 
-Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/io/class-use/FileLink.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/io/class-use/FileLink.html 
b/devapidocs/org/apache/hadoop/hbase/io/class-use/FileLink.html
index 586d9eb..bdcba77 100644
--- a/devapidocs/org/apache/hadoop/hbase/io/class-use/FileLink.html
+++ b/devapidocs/org/apache/hadoop/hbase/io/class-use/FileLink.html
@@ -238,6 +238,6 @@
 
 
 
-Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/io/class-use/HFileLink.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/io/class-use/HFileLink.html 
b/devapidocs/org/apache/hadoop/hbase/io/class-use/HFileLink.html
index e492bb5..9c63727 100644
--- a/devapidocs/org/apache/hadoop/hbase/io/class-use/HFileLink.html
+++ b/devapidocs/org/apache/hadoop/hbase/io/class-use/HFileLink.html
@@ -246,6 +246,6 @@
 
 
 
-Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/io/class-use/HalfStoreFileReader.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/class-use/HalfStoreFileReader.html 
b/devapidocs/org/apache/hadoop/hbase/io/class-use/HalfStoreFileReader.html
index 3bced9f..43f6cea 100644
--- a/devapidocs/org/apache/hadoop/hbase/io/class-use/HalfStoreFileReader.html
+++ b/devapidocs/org/apache/hadoop/hbase/io/class-use/HalfStoreFileReader.html
@@ -120,6 +120,6 @@
 
 
 
-Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/io/class-use/HeapSize.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/io/class-use/HeapSize.html 
b/devapidocs/org/apache/hadoop/hbase/io/class-use/HeapSize.html
index 2048738..93f5505 100644
--- a/devapidocs/org/apache/hadoop/hbase/io/class-use/HeapSize.html
+++ b/devapidocs/org/apache/hadoop/hbase/io/class-use/HeapSize.html
@@ -735,6 +735,6 @@
 
 
 
-Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/io/class-use/ImmutableBytesWritable.Comparator.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/class-use/ImmutableBytesWritable.Comparator.html
 

[16/51] [partial] hbase-site git commit: Published site at 7820ba1dbdba58b1002cdfde08eb21aa7a0bb6da.

2018-12-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/09ea0d5f/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/CopyTable.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/CopyTable.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/CopyTable.html
index e2ca07a..333ff45 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/CopyTable.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/CopyTable.html
@@ -29,7 +29,7 @@
 021import java.io.IOException;
 022import java.util.HashMap;
 023import java.util.Map;
-024import java.util.Random;
+024import java.util.UUID;
 025
 026import 
org.apache.hadoop.conf.Configured;
 027import org.apache.hadoop.fs.FileSystem;
@@ -37,363 +37,410 @@
 029import 
org.apache.hadoop.hbase.HBaseConfiguration;
 030import 
org.apache.hadoop.hbase.HConstants;
 031import 
org.apache.hadoop.hbase.TableName;
-032import 
org.apache.hadoop.hbase.util.FSUtils;
-033import 
org.apache.yetus.audience.InterfaceAudience;
-034import org.slf4j.Logger;
-035import org.slf4j.LoggerFactory;
-036import 
org.apache.hadoop.hbase.client.Admin;
-037import 
org.apache.hadoop.hbase.client.Connection;
-038import 
org.apache.hadoop.hbase.client.ConnectionFactory;
-039import 
org.apache.hadoop.hbase.client.Scan;
-040import 
org.apache.hadoop.hbase.tool.LoadIncrementalHFiles;
-041import 
org.apache.hadoop.hbase.util.Bytes;
-042import org.apache.hadoop.mapreduce.Job;
-043import org.apache.hadoop.util.Tool;
-044import 
org.apache.hadoop.util.ToolRunner;
-045
-046/**
-047 * Tool used to copy a table to another 
one which can be on a different setup.
-048 * It is also configurable with a start 
and time as well as a specification
-049 * of the region server implementation if 
different from the local cluster.
-050 */
-051@InterfaceAudience.Public
-052public class CopyTable extends Configured 
implements Tool {
-053  private static final Logger LOG = 
LoggerFactory.getLogger(CopyTable.class);
-054
-055  final static String NAME = 
"copytable";
-056  long startTime = 0;
-057  long endTime = 
HConstants.LATEST_TIMESTAMP;
-058  int batch = Integer.MAX_VALUE;
-059  int cacheRow = -1;
-060  int versions = -1;
-061  String tableName = null;
-062  String startRow = null;
-063  String stopRow = null;
-064  String dstTableName = null;
-065  String peerAddress = null;
-066  String families = null;
-067  boolean allCells = false;
-068  static boolean shuffle = false;
-069
-070  boolean bulkload = false;
-071  Path bulkloadDir = null;
-072
-073  private final static String 
JOB_NAME_CONF_KEY = "mapreduce.job.name";
+032import 
org.apache.hadoop.hbase.mapreduce.Import.CellImporter;
+033import 
org.apache.hadoop.hbase.mapreduce.Import.Importer;
+034import 
org.apache.hadoop.hbase.util.FSUtils;
+035import 
org.apache.yetus.audience.InterfaceAudience;
+036import org.slf4j.Logger;
+037import org.slf4j.LoggerFactory;
+038import 
org.apache.hadoop.hbase.client.Admin;
+039import 
org.apache.hadoop.hbase.client.Connection;
+040import 
org.apache.hadoop.hbase.client.ConnectionFactory;
+041import 
org.apache.hadoop.hbase.client.Scan;
+042import 
org.apache.hadoop.hbase.tool.LoadIncrementalHFiles;
+043import 
org.apache.hadoop.hbase.util.Bytes;
+044import org.apache.hadoop.mapreduce.Job;
+045import org.apache.hadoop.util.Tool;
+046import 
org.apache.hadoop.util.ToolRunner;
+047
+048/**
+049 * Tool used to copy a table to another 
one which can be on a different setup.
+050 * It is also configurable with a start 
and time as well as a specification
+051 * of the region server implementation if 
different from the local cluster.
+052 */
+053@InterfaceAudience.Public
+054public class CopyTable extends Configured 
implements Tool {
+055  private static final Logger LOG = 
LoggerFactory.getLogger(CopyTable.class);
+056
+057  final static String NAME = 
"copytable";
+058  long startTime = 0;
+059  long endTime = 
HConstants.LATEST_TIMESTAMP;
+060  int batch = Integer.MAX_VALUE;
+061  int cacheRow = -1;
+062  int versions = -1;
+063  String tableName = null;
+064  String startRow = null;
+065  String stopRow = null;
+066  String dstTableName = null;
+067  String peerAddress = null;
+068  String families = null;
+069  boolean allCells = false;
+070  static boolean shuffle = false;
+071
+072  boolean bulkload = false;
+073  Path bulkloadDir = null;
 074
-075  /**
-076   * Sets up the actual job.
-077   *
-078   * @param args  The command line 
parameters.
-079   * @return The newly created job.
-080   * @throws IOException When setting up 
the job fails.
-081   */
-082  public Job 
createSubmittableJob(String[] args)
-083  throws IOException {
-084if (!doCommandLine(args)) {
-085  return null;
-086}
-087
-088Job job = Job.getInstance(getConf(), 
getConf().get(JOB_NAME_CONF_KEY, NAME + "_" + tableName));
-089job.setJarByClass(CopyTable.class);
-090Scan scan = new Scan();
-091
-092

[16/51] [partial] hbase-site git commit: Published site at c448604ceb987d113913f0583452b2abce04db0d.

2018-12-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4f8b8424/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/CacheConfig.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/CacheConfig.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/CacheConfig.html
index f32b223..6ac9299 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/CacheConfig.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/CacheConfig.html
@@ -25,671 +25,358 @@
 017 */
 018package 
org.apache.hadoop.hbase.io.hfile;
 019
-020import static 
org.apache.hadoop.hbase.HConstants.BUCKET_CACHE_IOENGINE_KEY;
-021import static 
org.apache.hadoop.hbase.HConstants.BUCKET_CACHE_SIZE_KEY;
-022
-023import java.io.IOException;
-024
-025import 
org.apache.hadoop.conf.Configuration;
-026import 
org.apache.hadoop.hbase.HConstants;
-027import 
org.apache.yetus.audience.InterfaceAudience;
-028import org.slf4j.Logger;
-029import org.slf4j.LoggerFactory;
-030import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-031import 
org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory;
-032import 
org.apache.hadoop.hbase.io.hfile.bucket.BucketCache;
-033import 
org.apache.hadoop.hbase.io.util.MemorySizeUtil;
-034import 
org.apache.hadoop.hbase.util.ReflectionUtils;
-035import 
org.apache.hadoop.util.StringUtils;
-036
-037import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-038
-039
-040/**
-041 * Stores all of the cache objects and 
configuration for a single HFile.
-042 */
-043@InterfaceAudience.Private
-044public class CacheConfig {
-045  private static final Logger LOG = 
LoggerFactory.getLogger(CacheConfig.class.getName());
-046
-047
-048  /**
-049   * Disabled cache configuration
-050   */
-051  public static final CacheConfig 
DISABLED = new CacheConfig();
-052
-053  /**
-054   * Configuration key to cache data 
blocks on read. Bloom blocks and index blocks are always be
-055   * cached if the block cache is 
enabled.
-056   */
-057  public static final String 
CACHE_DATA_ON_READ_KEY = "hbase.block.data.cacheonread";
-058
-059  /**
-060   * Configuration key to cache data 
blocks on write. There are separate
-061   * switches for bloom blocks and 
non-root index blocks.
-062   */
-063  public static final String 
CACHE_BLOCKS_ON_WRITE_KEY =
-064  "hbase.rs.cacheblocksonwrite";
+020import java.util.Optional;
+021
+022import 
org.apache.hadoop.conf.Configuration;
+023import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+024import 
org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory;
+025import 
org.apache.yetus.audience.InterfaceAudience;
+026import org.slf4j.Logger;
+027import org.slf4j.LoggerFactory;
+028
+029import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+030
+031/**
+032 * Stores all of the cache objects and 
configuration for a single HFile.
+033 */
+034@InterfaceAudience.Private
+035public class CacheConfig {
+036  private static final Logger LOG = 
LoggerFactory.getLogger(CacheConfig.class.getName());
+037
+038  /**
+039   * Disabled cache configuration
+040   */
+041  public static final CacheConfig 
DISABLED = new CacheConfig();
+042
+043  /**
+044   * Configuration key to cache data 
blocks on read. Bloom blocks and index blocks are always be
+045   * cached if the block cache is 
enabled.
+046   */
+047  public static final String 
CACHE_DATA_ON_READ_KEY = "hbase.block.data.cacheonread";
+048
+049  /**
+050   * Configuration key to cache data 
blocks on write. There are separate
+051   * switches for bloom blocks and 
non-root index blocks.
+052   */
+053  public static final String 
CACHE_BLOCKS_ON_WRITE_KEY = "hbase.rs.cacheblocksonwrite";
+054
+055  /**
+056   * Configuration key to cache leaf and 
intermediate-level index blocks on
+057   * write.
+058   */
+059  public static final String 
CACHE_INDEX_BLOCKS_ON_WRITE_KEY = "hfile.block.index.cacheonwrite";
+060
+061  /**
+062   * Configuration key to cache compound 
bloom filter blocks on write.
+063   */
+064  public static final String 
CACHE_BLOOM_BLOCKS_ON_WRITE_KEY = "hfile.block.bloom.cacheonwrite";
 065
 066  /**
-067   * Configuration key to cache leaf and 
intermediate-level index blocks on
-068   * write.
-069   */
-070  public static final String 
CACHE_INDEX_BLOCKS_ON_WRITE_KEY =
-071  "hfile.block.index.cacheonwrite";
-072
-073  /**
-074   * Configuration key to cache compound 
bloom filter blocks on write.
-075   */
-076  public static final String 
CACHE_BLOOM_BLOCKS_ON_WRITE_KEY =
-077  "hfile.block.bloom.cacheonwrite";
-078
-079  /**
-080   * Configuration key to cache data 
blocks in compressed and/or encrypted format.
-081   */
-082  public static final String 
CACHE_DATA_BLOCKS_COMPRESSED_KEY =
-083  
"hbase.block.data.cachecompressed";
-084
-085  /**
-086   * Configuration key to evict all 
blocks of a given file from the block cache

[16/51] [partial] hbase-site git commit: Published site at 8bf966c8e936dec4d83bcbe85c5aab543f14a0df.

2018-12-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html
index a957d31..62f81b6 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html
@@ -142,5192 +142,5186 @@
 134import org.apache.hadoop.hbase.wal.WAL;
 135import 
org.apache.hadoop.hbase.wal.WALFactory;
 136import 
org.apache.hadoop.hbase.wal.WALSplitter;
-137import 
org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-138import 
org.apache.hadoop.hbase.zookeeper.ZKUtil;
-139import 
org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-140import 
org.apache.hadoop.hbase.zookeeper.ZNodePaths;
-141import 
org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
-142import 
org.apache.hadoop.ipc.RemoteException;
-143import 
org.apache.hadoop.security.UserGroupInformation;
-144import 
org.apache.hadoop.util.ReflectionUtils;
-145import org.apache.hadoop.util.Tool;
-146import 
org.apache.hadoop.util.ToolRunner;
-147import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-148import 
org.apache.hbase.thirdparty.com.google.common.collect.Sets;
-149import 
org.apache.yetus.audience.InterfaceAudience;
-150import 
org.apache.yetus.audience.InterfaceStability;
-151import 
org.apache.zookeeper.KeeperException;
-152import org.slf4j.Logger;
-153import org.slf4j.LoggerFactory;
-154
-155import 
org.apache.hbase.thirdparty.com.google.common.base.Joiner;
-156import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-157import 
org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;
-158import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
-159import 
org.apache.hbase.thirdparty.com.google.common.collect.Multimap;
-160import 
org.apache.hbase.thirdparty.com.google.common.collect.Ordering;
-161import 
org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;
-162
-163import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-164import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;
-165
-166/**
-167 * HBaseFsck (hbck) is a tool for 
checking and repairing region consistency and
-168 * table integrity problems in a 
corrupted HBase. This tool was written for hbase-1.x. It does not
-169 * work with hbase-2.x; it can read state 
but is not allowed to change state; i.e. effect 'repair'.
-170 * See hbck2 (HBASE-19121) for a hbck 
tool for hbase2.
-171 *
-172 * p
-173 * Region consistency checks verify that 
hbase:meta, region deployment on region
-174 * servers and the state of data in HDFS 
(.regioninfo files) all are in
-175 * accordance.
-176 * p
-177 * Table integrity checks verify that all 
possible row keys resolve to exactly
-178 * one region of a table.  This means 
there are no individual degenerate
-179 * or backwards regions; no holes between 
regions; and that there are no
-180 * overlapping regions.
-181 * p
-182 * The general repair strategy works in 
two phases:
-183 * ol
-184 * li Repair Table Integrity on 
HDFS. (merge or fabricate regions)
-185 * li Repair Region Consistency 
with hbase:meta and assignments
-186 * /ol
-187 * p
-188 * For table integrity repairs, the 
tables' region directories are scanned
-189 * for .regioninfo files.  Each table's 
integrity is then verified.  If there
-190 * are any orphan regions (regions with 
no .regioninfo files) or holes, new
-191 * regions are fabricated.  Backwards 
regions are sidelined as well as empty
-192 * degenerate (endkey==startkey) regions. 
 If there are any overlapping regions,
-193 * a new region is created and all data 
is merged into the new region.
-194 * p
-195 * Table integrity repairs deal solely 
with HDFS and could potentially be done
-196 * offline -- the hbase region servers or 
master do not need to be running.
-197 * This phase can eventually be used to 
completely reconstruct the hbase:meta table in
-198 * an offline fashion.
-199 * p
-200 * Region consistency requires three 
conditions -- 1) valid .regioninfo file
-201 * present in an HDFS region dir,  2) 
valid row with .regioninfo data in META,
-202 * and 3) a region is deployed only at 
the regionserver that was assigned to
-203 * with proper state in the master.
-204 * p
-205 * Region consistency repairs require 
hbase to be online so that hbck can
-206 * contact the HBase master and region 
servers.  The hbck#connect() method must
-207 * first be called successfully.  Much of 
the region consistency information
-208 * is transient and less risky to 
repair.
-209 * p
-210 * If hbck is run from the command line, 
there are a handful of arguments that

[16/51] [partial] hbase-site git commit: Published site at 1acbd36c903b048141866b143507bfce124a5c5f.

2018-11-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5299e667/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
index 4075394..b390abe 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
@@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-static class HRegion.WriteState
+static class HRegion.WriteState
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 
 
@@ -239,7 +239,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 flushing
-volatileboolean flushing
+volatileboolean flushing
 
 
 
@@ -248,7 +248,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 flushRequested
-volatileboolean flushRequested
+volatileboolean flushRequested
 
 
 
@@ -257,7 +257,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 compacting
-https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true;
 title="class or interface in java.util.concurrent.atomic">AtomicInteger compacting
+https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true;
 title="class or interface in java.util.concurrent.atomic">AtomicInteger compacting
 
 
 
@@ -266,7 +266,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 writesEnabled
-volatileboolean writesEnabled
+volatileboolean writesEnabled
 
 
 
@@ -275,7 +275,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 readOnly
-volatileboolean readOnly
+volatileboolean readOnly
 
 
 
@@ -284,7 +284,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 readsEnabled
-volatileboolean readsEnabled
+volatileboolean readsEnabled
 
 
 
@@ -293,7 +293,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 HEAP_SIZE
-static finallong HEAP_SIZE
+static finallong HEAP_SIZE
 
 
 
@@ -310,7 +310,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 WriteState
-WriteState()
+WriteState()
 
 
 
@@ -327,7 +327,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 setReadOnly
-voidsetReadOnly(booleanonOff)
+voidsetReadOnly(booleanonOff)
 Set flags that make this region read-only.
 
 Parameters:
@@ -341,7 +341,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 isReadOnly
-booleanisReadOnly()
+booleanisReadOnly()
 
 
 
@@ -350,7 +350,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 isFlushRequested
-booleanisFlushRequested()
+booleanisFlushRequested()
 
 
 
@@ -359,7 +359,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 setReadsEnabled
-voidsetReadsEnabled(booleanreadsEnabled)
+voidsetReadsEnabled(booleanreadsEnabled)
 
 
 



[16/51] [partial] hbase-site git commit: Published site at 130057f13774f6b213cdb06952c805a29d59396e.

2018-11-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/68eae623/devapidocs/src-html/org/apache/hadoop/hbase/security/access/TableAuthManager.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/security/access/TableAuthManager.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/security/access/TableAuthManager.html
deleted file mode 100644
index 2e68b22..000
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/security/access/TableAuthManager.html
+++ /dev/null
@@ -1,859 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd;>
-
-
-Source code
-
-
-
-
-001/**
-002 * Licensed to the Apache Software 
Foundation (ASF) under one
-003 * or more contributor license 
agreements.  See the NOTICE file
-004 * distributed with this work for 
additional information
-005 * regarding copyright ownership.  The 
ASF licenses this file
-006 * to you under the Apache License, 
Version 2.0 (the
-007 * "License"); you may not use this file 
except in compliance
-008 * with the License.  You may obtain a 
copy of the License at
-009 *
-010 * 
http://www.apache.org/licenses/LICENSE-2.0
-011 *
-012 * Unless required by applicable law or 
agreed to in writing, software
-013 * distributed under the License is 
distributed on an "AS IS" BASIS,
-014 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
-015 * See the License for the specific 
language governing permissions and
-016 * limitations under the License.
-017 */
-018
-019package 
org.apache.hadoop.hbase.security.access;
-020
-021import static 
org.apache.hadoop.hbase.util.CollectionUtils.computeIfAbsent;
-022
-023import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-024import 
org.apache.hbase.thirdparty.com.google.common.collect.ArrayListMultimap;
-025import 
org.apache.hbase.thirdparty.com.google.common.collect.ListMultimap;
-026import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
-027
-028import java.io.Closeable;
-029import java.io.IOException;
-030import java.util.HashMap;
-031import java.util.List;
-032import java.util.Map;
-033import 
java.util.concurrent.ConcurrentSkipListMap;
-034import 
java.util.concurrent.atomic.AtomicLong;
-035
-036import 
org.apache.hadoop.conf.Configuration;
-037import 
org.apache.hadoop.hbase.AuthUtil;
-038import org.apache.hadoop.hbase.Cell;
-039import 
org.apache.hadoop.hbase.TableName;
-040import 
org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-041import 
org.apache.yetus.audience.InterfaceAudience;
-042import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-043import 
org.apache.hadoop.hbase.log.HBaseMarkers;
-044import 
org.apache.hadoop.hbase.security.Superusers;
-045import 
org.apache.hadoop.hbase.security.User;
-046import 
org.apache.hadoop.hbase.security.UserProvider;
-047import 
org.apache.hadoop.hbase.util.Bytes;
-048import 
org.apache.zookeeper.KeeperException;
-049import org.slf4j.Logger;
-050import org.slf4j.LoggerFactory;
-051
-052/**
-053 * Performs authorization checks for a 
given user's assigned permissions
-054 */
-055@InterfaceAudience.Private
-056public class TableAuthManager implements 
Closeable {
-057  private static class 
PermissionCacheT extends Permission {
-058/** Cache of user permissions */
-059private ListMultimapString,T 
userCache = ArrayListMultimap.create();
-060/** Cache of group permissions */
-061private ListMultimapString,T 
groupCache = ArrayListMultimap.create();
-062
-063public ListT getUser(String 
user) {
-064  return userCache.get(user);
-065}
-066
-067public void putUser(String user, T 
perm) {
-068  userCache.put(user, perm);
-069}
-070
-071public ListT 
replaceUser(String user, Iterable? extends T perms) {
-072  return 
userCache.replaceValues(user, perms);
-073}
-074
-075public ListT getGroup(String 
group) {
-076  return groupCache.get(group);
-077}
-078
-079public void putGroup(String group, T 
perm) {
-080  groupCache.put(group, perm);
-081}
-082
-083public ListT 
replaceGroup(String group, Iterable? extends T perms) {
-084  return 
groupCache.replaceValues(group, perms);
-085}
-086
-087/**
-088 * Returns a combined map of user and 
group permissions, with group names
-089 * distinguished according to {@link 
AuthUtil#isGroupPrincipal(String)}.
-090 */
-091public ListMultimapString,T 
getAllPermissions() {
-092  ListMultimapString,T tmp = 
ArrayListMultimap.create();
-093  tmp.putAll(userCache);
-094  for (String group : 
groupCache.keySet()) {
-095
tmp.putAll(AuthUtil.toGroupEntry(group), groupCache.get(group));
-096  }
-097  return tmp;
-098}
-099  }
-100
-101  private static final Logger LOG = 
LoggerFactory.getLogger(TableAuthManager.class);
-102
-103  /** Cache of global permissions */
-104  private volatile 
PermissionCachePermission globalCache;
-105
-106  private 

[16/51] [partial] hbase-site git commit: Published site at d5e4faacc354c1bc4d93efa71ca97ee3a056123e.

2018-10-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b5e107c3/testdevapidocs/index-all.html
--
diff --git a/testdevapidocs/index-all.html b/testdevapidocs/index-all.html
index 489343a..d22ba8c 100644
--- a/testdevapidocs/index-all.html
+++ b/testdevapidocs/index-all.html
@@ -268,6 +268,8 @@
 
 AbortServer()
 - Constructor for class org.apache.hadoop.hbase.ipc.TestRpcHandlerException.AbortServer
 
+abortTimeoutTaskScheduled
 - Static variable in class org.apache.hadoop.hbase.regionserver.TestRegionServerAbortTimeout
+
 AbstractHBaseToolTest - Class in org.apache.hadoop.hbase.util
 
 AbstractHBaseToolTest()
 - Constructor for class org.apache.hadoop.hbase.util.AbstractHBaseToolTest
@@ -446,6 +448,12 @@
 
 acquireLock(Void)
 - Method in class org.apache.hadoop.hbase.master.procedure.MasterProcedureSchedulerPerformanceEvaluation.TableProcedure
 
+acquireLock(TestSchedulerQueueDeadLock.TestEnv)
 - Method in class org.apache.hadoop.hbase.master.procedure.TestSchedulerQueueDeadLock.TableExclusiveProcedure
+
+acquireLock(TestSchedulerQueueDeadLock.TestEnv)
 - Method in class org.apache.hadoop.hbase.master.procedure.TestSchedulerQueueDeadLock.TableShardParentProcedure
+
+acquireLock(TestSchedulerQueueDeadLock.TestEnv)
 - Method in class org.apache.hadoop.hbase.master.procedure.TestSchedulerQueueDeadLock.TableSharedProcedure
+
 acquireLock(Void)
 - Method in class org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility.TestProcedure
 
 acquireLock(TestProcedureSuspended.TestProcEnv)
 - Method in class org.apache.hadoop.hbase.procedure2.TestProcedureSuspended.TestLockProcedure
@@ -3443,6 +3451,8 @@
 
 CF
 - Static variable in class org.apache.hadoop.hbase.regionserver.TestOpenSeqNumUnexpectedIncrease
 
+CF
 - Static variable in class org.apache.hadoop.hbase.regionserver.TestRegionServerAbortTimeout
+
 CF
 - Static variable in class org.apache.hadoop.hbase.regionserver.TestRegionServerCrashDisableWAL
 
 cf
 - Static variable in class org.apache.hadoop.hbase.regionserver.TestRegionServerMetrics
@@ -5303,6 +5313,8 @@
 
 CLASS_RULE
 - Static variable in class org.apache.hadoop.hbase.master.procedure.TestSafemodeBringsDownMaster
 
+CLASS_RULE
 - Static variable in class org.apache.hadoop.hbase.master.procedure.TestSchedulerQueueDeadLock
+
 CLASS_RULE
 - Static variable in class org.apache.hadoop.hbase.master.procedure.TestServerCrashProcedure
 
 CLASS_RULE
 - Static variable in class org.apache.hadoop.hbase.master.procedure.TestServerCrashProcedureWithReplicas
@@ -5515,6 +5527,8 @@
 
 CLASS_RULE
 - Static variable in class org.apache.hadoop.hbase.procedure2.TestChildProcedures
 
+CLASS_RULE
 - Static variable in class org.apache.hadoop.hbase.procedure2.TestLockAndQueue
+
 CLASS_RULE
 - Static variable in class org.apache.hadoop.hbase.procedure2.TestProcedureBypass
 
 CLASS_RULE
 - Static variable in class org.apache.hadoop.hbase.procedure2.TestProcedureCleanup
@@ -5865,6 +5879,8 @@
 
 CLASS_RULE
 - Static variable in class org.apache.hadoop.hbase.regionserver.TestRegionServerAbort
 
+CLASS_RULE
 - Static variable in class org.apache.hadoop.hbase.regionserver.TestRegionServerAbortTimeout
+
 CLASS_RULE
 - Static variable in class org.apache.hadoop.hbase.regionserver.TestRegionServerAccounting
 
 CLASS_RULE
 - Static variable in class org.apache.hadoop.hbase.regionserver.TestRegionServerCrashDisableWAL
@@ -9504,6 +9520,8 @@
 
 cq
 - Variable in class org.apache.hadoop.hbase.regionserver.TestHRegionReplayEvents
 
+CQ
 - Static variable in class org.apache.hadoop.hbase.regionserver.TestRegionServerAbortTimeout
+
 CQ
 - Static variable in class org.apache.hadoop.hbase.regionserver.TestRegionServerCrashDisableWAL
 
 CQ
 - Static variable in class org.apache.hadoop.hbase.replication.regionserver.TestRaceWhenCreatingReplicationSource
@@ -13624,6 +13642,12 @@
 
 execute(int)
 - Method in interface org.apache.hadoop.hbase.master.procedure.MasterProcedureTestingUtility.StepHook
 
+execute(TestSchedulerQueueDeadLock.TestEnv)
 - Method in class org.apache.hadoop.hbase.master.procedure.TestSchedulerQueueDeadLock.TableExclusiveProcedure
+
+execute(TestSchedulerQueueDeadLock.TestEnv)
 - Method in class org.apache.hadoop.hbase.master.procedure.TestSchedulerQueueDeadLock.TableShardParentProcedure
+
+execute(TestSchedulerQueueDeadLock.TestEnv)
 - Method in class org.apache.hadoop.hbase.master.procedure.TestSchedulerQueueDeadLock.TableSharedProcedure
+
 execute()
 - Method in class org.apache.hadoop.hbase.procedure.SimpleRSProcedureManager.SimpleSubprocedure
 
 execute(Object)
 - Method in class org.apache.hadoop.hbase.procedure.TestProcedureDescriber.TestProcedure
@@ -18480,6 +18504,8 @@
 
 getRegionObserver()
 - Method in class org.apache.hadoop.hbase.regionserver.TestRegionServerAbort.StopBlockingRegionObserver
 
+getRegionObserver()
 - Method in class org.apache.hadoop.hbase.regionserver.TestRegionServerAbortTimeout.SleepWhenCloseCoprocessor
+
 getRegionObserver()
 

[16/51] [partial] hbase-site git commit: Published site at 3fe8649b2c9ba1271c25e8f476548907e4c7a90d.

2018-10-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8f09a71d/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.StoreFileCostFunction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.StoreFileCostFunction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.StoreFileCostFunction.html
index c7d99b2..9d1542c 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.StoreFileCostFunction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.StoreFileCostFunction.html
@@ -382,1357 +382,1365 @@
 374for (int i = 0; i  
this.curFunctionCosts.length; i++) {
 375  curFunctionCosts[i] = 
tempFunctionCosts[i];
 376}
-377LOG.info("start 
StochasticLoadBalancer.balancer, initCost=" + currentCost + ", functionCost="
-378+ functionCost());
+377double initCost = currentCost;
+378double newCost = currentCost;
 379
-380double initCost = currentCost;
-381double newCost = currentCost;
-382
-383long computedMaxSteps;
-384if (runMaxSteps) {
-385  computedMaxSteps = 
Math.max(this.maxSteps,
-386  ((long)cluster.numRegions * 
(long)this.stepsPerRegion * (long)cluster.numServers));
-387} else {
-388  computedMaxSteps = 
Math.min(this.maxSteps,
-389  ((long)cluster.numRegions * 
(long)this.stepsPerRegion * (long)cluster.numServers));
-390}
-391// Perform a stochastic walk to see 
if we can get a good fit.
-392long step;
-393
-394for (step = 0; step  
computedMaxSteps; step++) {
-395  Cluster.Action action = 
nextAction(cluster);
-396
-397  if (action.type == Type.NULL) {
-398continue;
-399  }
-400
-401  cluster.doAction(action);
-402  updateCostsWithAction(cluster, 
action);
-403
-404  newCost = computeCost(cluster, 
currentCost);
-405
-406  // Should this be kept?
-407  if (newCost  currentCost) {
-408currentCost = newCost;
-409
-410// save for JMX
-411curOverallCost = currentCost;
-412for (int i = 0; i  
this.curFunctionCosts.length; i++) {
-413  curFunctionCosts[i] = 
tempFunctionCosts[i];
-414}
-415  } else {
-416// Put things back the way they 
were before.
-417// TODO: undo by remembering old 
values
-418Action undoAction = 
action.undoAction();
-419cluster.doAction(undoAction);
-420updateCostsWithAction(cluster, 
undoAction);
-421  }
-422
-423  if 
(EnvironmentEdgeManager.currentTime() - startTime 
-424  maxRunningTime) {
-425break;
-426  }
-427}
-428long endTime = 
EnvironmentEdgeManager.currentTime();
-429
-430
metricsBalancer.balanceCluster(endTime - startTime);
-431
-432// update costs metrics
-433updateStochasticCosts(tableName, 
curOverallCost, curFunctionCosts);
-434if (initCost  currentCost) {
-435  plans = 
createRegionPlans(cluster);
-436  LOG.info("Finished computing new 
load balance plan. Computation took {}" +
-437" to try {} different iterations. 
 Found a solution that moves " +
-438"{} regions; Going from a 
computed cost of {}" +
-439" to a new cost of {}", 
java.time.Duration.ofMillis(endTime - startTime),
-440step, plans.size(), initCost, 
currentCost);
-441  return plans;
-442}
-443LOG.info("Could not find a better 
load balance plan.  Tried {} different configurations in " +
-444  "{}, and did not find anything with 
a computed cost less than {}", step,
-445  java.time.Duration.ofMillis(endTime 
- startTime), initCost);
-446return null;
-447  }
-448
-449  /**
-450   * update costs to JMX
-451   */
-452  private void 
updateStochasticCosts(TableName tableName, Double overall, Double[] subCosts) 
{
-453if (tableName == null) return;
-454
-455// check if the metricsBalancer is 
MetricsStochasticBalancer before casting
-456if (metricsBalancer instanceof 
MetricsStochasticBalancer) {
-457  MetricsStochasticBalancer balancer 
= (MetricsStochasticBalancer) metricsBalancer;
-458  // overall cost
-459  
balancer.updateStochasticCost(tableName.getNameAsString(),
-460"Overall", "Overall cost", 
overall);
-461
-462  // each cost function
-463  for (int i = 0; i  
costFunctions.length; i++) {
-464CostFunction costFunction = 
costFunctions[i];
-465String costFunctionName = 
costFunction.getClass().getSimpleName();
-466Double costPercent = (overall == 
0) ? 0 : (subCosts[i] / overall);
-467// TODO: cost function may need a 
specific description
-468
balancer.updateStochasticCost(tableName.getNameAsString(), costFunctionName,
-469  "The percent of " + 
costFunctionName, costPercent);
-470  }
-471}
-472  }
-473
-474  

[16/51] [partial] hbase-site git commit: Published site at 7adf590106826b9e4432cfeee06acdc0ccff8c6e.

2018-10-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/425db230/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.PortAllocator.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.PortAllocator.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.PortAllocator.html
index 0c894de..8729895 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.PortAllocator.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.PortAllocator.html
@@ -179,4145 +179,4146 @@
 171 * avoiding port contention if another 
local HBase instance is already running).
 172 * pTo preserve test data 
directories, pass the system property "hbase.testing.preserve.testdir"
 173 * setting it to true.
-174 */
-175@InterfaceAudience.Public
-176@SuppressWarnings("deprecation")
-177public class HBaseTestingUtility extends 
HBaseZKTestingUtility {
-178
-179  /**
-180   * System property key to get test 
directory value. Name is as it is because mini dfs has
-181   * hard-codings to put test data here. 
It should NOT be used directly in HBase, as it's a property
-182   * used in mini dfs.
-183   * @deprecated can be used only with 
mini dfs
-184   */
-185  @Deprecated
-186  private static final String 
TEST_DIRECTORY_KEY = "test.build.data";
-187
-188  public static final String 
REGIONS_PER_SERVER_KEY = "hbase.test.regions-per-server";
-189  /**
-190   * The default number of regions per 
regionserver when creating a pre-split
-191   * table.
-192   */
-193  public static final int 
DEFAULT_REGIONS_PER_SERVER = 3;
-194
+174 * Trigger pre commit.
+175 */
+176@InterfaceAudience.Public
+177@SuppressWarnings("deprecation")
+178public class HBaseTestingUtility extends 
HBaseZKTestingUtility {
+179
+180  /**
+181   * System property key to get test 
directory value. Name is as it is because mini dfs has
+182   * hard-codings to put test data here. 
It should NOT be used directly in HBase, as it's a property
+183   * used in mini dfs.
+184   * @deprecated can be used only with 
mini dfs
+185   */
+186  @Deprecated
+187  private static final String 
TEST_DIRECTORY_KEY = "test.build.data";
+188
+189  public static final String 
REGIONS_PER_SERVER_KEY = "hbase.test.regions-per-server";
+190  /**
+191   * The default number of regions per 
regionserver when creating a pre-split
+192   * table.
+193   */
+194  public static final int 
DEFAULT_REGIONS_PER_SERVER = 3;
 195
-196  public static final String 
PRESPLIT_TEST_TABLE_KEY = "hbase.test.pre-split-table";
-197  public static final boolean 
PRESPLIT_TEST_TABLE = true;
-198
-199  private MiniDFSCluster dfsCluster = 
null;
-200
-201  private volatile HBaseCluster 
hbaseCluster = null;
-202  private MiniMRCluster mrCluster = 
null;
-203
-204  /** If there is a mini cluster running 
for this testing utility instance. */
-205  private volatile boolean 
miniClusterRunning;
-206
-207  private String hadoopLogDir;
-208
-209  /** Directory on test filesystem where 
we put the data for this instance of
-210* HBaseTestingUtility*/
-211  private Path dataTestDirOnTestFS = 
null;
-212
-213  /**
-214   * Shared cluster connection.
-215   */
-216  private volatile Connection 
connection;
-217
-218  /** Filesystem URI used for map-reduce 
mini-cluster setup */
-219  private static String FS_URI;
-220
-221  /** This is for unit tests 
parameterized with a single boolean. */
-222  public static final 
ListObject[] MEMSTORETS_TAGS_PARAMETRIZED = 
memStoreTSAndTagsCombination();
-223
-224  /**
-225   * Checks to see if a specific port is 
available.
-226   *
-227   * @param port the port number to check 
for availability
-228   * @return tttrue/tt if 
the port is available, or ttfalse/tt if not
-229   */
-230  public static boolean available(int 
port) {
-231ServerSocket ss = null;
-232DatagramSocket ds = null;
-233try {
-234  ss = new ServerSocket(port);
-235  ss.setReuseAddress(true);
-236  ds = new DatagramSocket(port);
-237  ds.setReuseAddress(true);
-238  return true;
-239} catch (IOException e) {
-240  // Do nothing
-241} finally {
-242  if (ds != null) {
-243ds.close();
-244  }
-245
-246  if (ss != null) {
-247try {
-248  ss.close();
-249} catch (IOException e) {
-250  /* should not be thrown */
-251}
-252  }
-253}
-254
-255return false;
-256  }
-257
-258  /**
-259   * Create all combinations of Bloom 
filters and compression algorithms for
-260   * testing.
-261   */
-262  private static ListObject[] 
bloomAndCompressionCombinations() {
-263ListObject[] configurations = 
new ArrayList();
-264for (Compression.Algorithm comprAlgo 
:
-265 
HBaseCommonTestingUtility.COMPRESSION_ALGORITHMS) {
-266  for (BloomType bloomType : 
BloomType.values()) {
-267configurations.add(new 

[16/51] [partial] hbase-site git commit: Published site at 5fbb227deb365fe812d433fe39b85ac4b0ddee20.

2018-10-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9ebe686/devapidocs/org/apache/hadoop/hbase/procedure2/OnePhaseProcedure.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/procedure2/OnePhaseProcedure.html 
b/devapidocs/org/apache/hadoop/hbase/procedure2/OnePhaseProcedure.html
index 29b9add..65a5532 100644
--- a/devapidocs/org/apache/hadoop/hbase/procedure2/OnePhaseProcedure.html
+++ b/devapidocs/org/apache/hadoop/hbase/procedure2/OnePhaseProcedure.html
@@ -180,7 +180,7 @@ extends Procedure
-abort,
 acquireLock,
 addStackIndex,
 afterReplay,
 beforeReplay,
 bypass,
 compareTo,
 completionCleanup,
 deserializeStateData,
 doAcquireLock,
 doExecute,
 doReleaseLock,
 doRollback,
 elapsedTime,
 execute,
 getChildrenLatch, getException,
 getLastUpdate,
 getNonceKey,
 getOwner,
 getParentProcId,
 getProcedureMetrics,
 getProcId,
 getProcIdHashCode,
 getProcName, getResult,
 getRootProcedureId,
 getRootProcId,
 getStackIndexes,
 getState,
 getSubmittedTime,
 getTimeout,
 getTimeoutT
 imestamp, hasChildren,
 hasException,
 hasLock,
 hasOwner,
 hasParent,
 hasTimeout,
 haveSameParent,
 holdLock,
 incChildrenLatch, isBypass,
 isFailed,
 isFinished,
 isInitializing,
 isRunnable,
 isSuccess,
 isWaiting,
 isYieldAfterExecutionStep,
 lockedWhenLoading, needPersistence,
 releaseLock,
 removeStackIndex,
 resetPersistence,
 restoreLock,
 rollback,
 serializeStateData,
 setAbortFailure, 
setChildrenLatch,
 setFailure,
 setFailure,
 setLastUpdate,
 setNonceKey,
 setOwner,
 s
 etOwner, setParentProcId,
 setProcId,
 setResult,
 setRootProcId,
 setStackIndexes,
 setState,
 setSubmittedTime,
 setTimeout,
 setTimeoutFailure,
 shouldWaitClientAck,
 skipPersistence,
 toString,
 toStringClass,
 toStringClassDetails,
 toStringDetails,
 toStringSimpleSB,
 toStringState,
 tryRunnable,
 updateMetricsOnFinish,
 updateMetricsOnSubmit,
 updateTimestamp,
 waitInitialized,
 wasExecuted
+abort,
 acquireLock,
 addStackIndex,
 afterReplay,
 beforeReplay,
 bypass,
 compareTo,
 completionCleanup,
 deserializeStateData,
 doAcquireLock,
 doExecute,
 doReleaseLock,
 doRollback,
 elapsedTime,
 execute,
 getChildrenLatch, getException,
 getLastUpdate,
 getNonceKey,
 getOwner,
 getParentProcId,
 getProcedureMetrics,
 getProcId,
 getProcIdHashCode,
 getProcName, 
getResult,
 getRootProcedureId,
 getRootProcId,
 getStackIndexes,
 getState,
 getSubmittedTime,
 getTimeout,
 getTimeoutTimestamp, href="../../../../../org/apache/hadoop/hbase/procedure2/Procedure.html#hasChildren--">hasChildren,
 > href="../../../../../org/apache/hadoop/hbase/procedure2/Procedure.html#hasException--">hasException,
 > href="../../../../../org/apache/hadoop/hbase/procedure2/Procedure.html#hasLock--">hasLock,
 > href="../../../../../org/apache/hadoop/hbase/procedure2/Procedure.html#hasOwner--">hasOwner,
 > href="../../../../../org/apache/hadoop/hbase/procedure2/Procedure.html#hasParent--">hasParent,
 > href="../../../../../org/apache/hadoop/hbase/procedure2/Procedure.html#hasTimeout--">hasTimeout,
 > href="../../../../../org/apache/hadoop/hbase/procedure2/Procedure.html#haveSameParent-org.apache.hadoop.hbase.procedure2.Procedure-org.apache.hadoop.hbase.procedure2.Procedure-">haveSameParent,
 > href="../../../../../org/apache/hadoop/hbase/procedure2/Procedure.html#holdLock-TEnvironment-">holdLock,
 > incChildrenLatch, isBypass,
 isFailed,
 isFinished,
 isInitializing,
 isRunnable,
 isSuccess,
 isWaiting,
 isYieldAfterExecutionStep,
 lockedWhenLoading, needPersistence,
 releaseLock,
 removeStackIndex,
 resetPersistence,
 restoreLock,
 rollback,
 serializeStateData,
 setAbortFailure,
 setChildrenLatch,
 setFailure,
 setFailure,
 setLastUpdate,
 setNonceKey,
 setOwner,
 setOwner, setParentProcId,
 setProcId,
 setResult,
 setRootProcId,
 setStackIndexes,
 setState,
 setSubmittedTime,
 setTime
 out, setTimeoutFailure,
 shouldWaitClientAck,
 skipPersistence,
 toString,
 toStringClass,
 toStringClassDetails,
 toStringDetails,
 toStringSimpleSB,
 toStringState,
 tryRunnable,
 updateMetricsOnFinish,
 updateMetricsOnSubmit,
 updateTimestamp,
 waitInitialized,
 wasExecuted
 
 
 



[16/51] [partial] hbase-site git commit: Published site at 821e4d7de2d576189f4288d1c2acf9e9a9471f5c.

2018-10-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/323b17d9/testdevapidocs/org/apache/hadoop/hbase/client/package-tree.html
--
diff --git a/testdevapidocs/org/apache/hadoop/hbase/client/package-tree.html 
b/testdevapidocs/org/apache/hadoop/hbase/client/package-tree.html
index 45a27ec..656c0e2 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/client/package-tree.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/client/package-tree.html
@@ -292,6 +292,47 @@
 org.apache.hadoop.hbase.client.TestGetProcedureResult.DummyProcedure 
(implements 
org.apache.hadoop.hbase.master.procedure.TableProcedureInterface)
 
 
+org.apache.hadoop.hbase.client.RestoreSnapshotFromClientTestBase
+
+org.apache.hadoop.hbase.client.RestoreSnapshotFromClientAfterSplittingRegionsTestBase
+
+org.apache.hadoop.hbase.client.TestMobRestoreSnapshotFromClientAfterSplittingRegions
+org.apache.hadoop.hbase.client.TestRestoreSnapshotFromClientAfterSplittingRegions
+
+
+org.apache.hadoop.hbase.client.RestoreSnapshotFromClientAfterTruncateTestBase
+
+org.apache.hadoop.hbase.client.TestMobRestoreSnapshotFromClientAfterTruncate
+org.apache.hadoop.hbase.client.TestRestoreSnapshotFromClientAfterTruncate
+
+
+org.apache.hadoop.hbase.client.RestoreSnapshotFromClientCloneTestBase
+
+org.apache.hadoop.hbase.client.TestMobRestoreSnapshotFromClientClone
+org.apache.hadoop.hbase.client.TestRestoreSnapshotFromClientClone
+
+
+org.apache.hadoop.hbase.client.RestoreSnapshotFromClientGetCompactionStateTestBase
+
+org.apache.hadoop.hbase.client.TestMobRestoreSnapshotFromClientGetCompactionState
+org.apache.hadoop.hbase.client.TestRestoreSnapshotFromClientGetCompactionState
+
+
+org.apache.hadoop.hbase.client.RestoreSnapshotFromClientSchemaChangeTestBase
+
+org.apache.hadoop.hbase.client.TestMobRestoreSnapshotFromClientSchemaChange
+org.apache.hadoop.hbase.client.TestRestoreSnapshotFromClientSchemaChange
+
+
+org.apache.hadoop.hbase.client.RestoreSnapshotFromClientSimpleTestBase
+
+org.apache.hadoop.hbase.client.TestMobRestoreSnapshotFromClientSimple
+org.apache.hadoop.hbase.client.TestRestoreSnapshotFromClientSimple
+
+
+org.apache.hadoop.hbase.client.TestRestoreSnapshotFromClientWithRegionReplicas
+
+
 org.apache.hadoop.hbase.ipc.RpcControllerFactory
 
 org.apache.hadoop.hbase.client.TestRpcControllerFactory.StaticRpcControllerFactory
@@ -503,12 +544,6 @@
 org.apache.hadoop.hbase.client.TestReplicaWithCluster.RegionServerHostingPrimayMetaRegionSlowOrStopCopro
 (implements org.apache.hadoop.hbase.coprocessor.RegionCoprocessor, 
org.apache.hadoop.hbase.coprocessor.RegionObserver)
 org.apache.hadoop.hbase.client.TestReplicaWithCluster.RegionServerStoppedCopro 
(implements org.apache.hadoop.hbase.coprocessor.RegionCoprocessor, 
org.apache.hadoop.hbase.coprocessor.RegionObserver)
 org.apache.hadoop.hbase.client.TestReplicaWithCluster.SlowMeCopro (implements 
org.apache.hadoop.hbase.coprocessor.RegionCoprocessor, 
org.apache.hadoop.hbase.coprocessor.RegionObserver)
-org.apache.hadoop.hbase.client.TestRestoreSnapshotFromClient
-
-org.apache.hadoop.hbase.client.TestMobRestoreSnapshotFromClient
-org.apache.hadoop.hbase.client.TestRestoreSnapshotFromClientWithRegionReplicas
-
-
 org.apache.hadoop.hbase.client.TestResultFromCoprocessor
 org.apache.hadoop.hbase.client.TestResultFromCoprocessor.MyObserver 
(implements org.apache.hadoop.hbase.coprocessor.RegionCoprocessor, 
org.apache.hadoop.hbase.coprocessor.RegionObserver)
 org.apache.hadoop.hbase.client.TestResultSizeEstimation

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/323b17d9/testdevapidocs/org/apache/hadoop/hbase/client/package-use.html
--
diff --git a/testdevapidocs/org/apache/hadoop/hbase/client/package-use.html 
b/testdevapidocs/org/apache/hadoop/hbase/client/package-use.html
index 2b370f1..822ca81 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/client/package-use.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/client/package-use.html
@@ -159,75 +159,93 @@
 
 
 
+RestoreSnapshotFromClientAfterSplittingRegionsTestBase
+
+
+RestoreSnapshotFromClientAfterTruncateTestBase
+
+
+RestoreSnapshotFromClientCloneTestBase
+
+
+RestoreSnapshotFromClientGetCompactionStateTestBase
+
+
+RestoreSnapshotFromClientSchemaChangeTestBase
+
+
+RestoreSnapshotFromClientSimpleTestBase
+
+
+RestoreSnapshotFromClientTestBase
+Base class for testing restore snapshot
+
+
+
 TestAsyncAdminBase
 Class to test AsyncAdmin.
 
 
-
+
 TestAsyncProcess.MyAsyncProcess
 
-
+
 TestAsyncProcess.MyAsyncProcessWithReplicas
 
-
+
 TestAsyncProcess.MyConnectionImpl
 Returns our async process.
 
 
-
+
 TestAsyncProcess.ResponseGenerator
 
-
+
 TestAsyncProcess.RR
 After reading TheDailyWtf, I always wanted to create a 
MyBoolean enum like this!
 
 
-
+
 TestAsyncTableGetMultiThreaded
 Will split the table, and move region randomly when 
testing.
 
 
-
+
 TestAsyncTableScanMetrics.ScanWithMetrics
 
-

[16/51] [partial] hbase-site git commit: Published site at fa5fa6ecdd071b72b58971058ff3ab9d28c3e709.

2018-10-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d1341859/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.PushType.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.PushType.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.PushType.html
index e1b183b..b456cd2 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.PushType.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.PushType.html
@@ -53,1338 +53,1354 @@
 045import 
org.apache.hadoop.hbase.log.HBaseMarkers;
 046import 
org.apache.hadoop.hbase.procedure2.Procedure;
 047import 
org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
-048import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStoreBase;
-049import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStoreTracker;
-050import 
org.apache.hadoop.hbase.procedure2.util.ByteSlot;
-051import 
org.apache.hadoop.hbase.procedure2.util.StringUtils;
-052import 
org.apache.hadoop.hbase.util.CommonFSUtils;
-053import 
org.apache.hadoop.hbase.util.Threads;
-054import 
org.apache.hadoop.ipc.RemoteException;
-055import 
org.apache.yetus.audience.InterfaceAudience;
-056import org.slf4j.Logger;
-057import org.slf4j.LoggerFactory;
-058
-059import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-060import 
org.apache.hbase.thirdparty.org.apache.commons.collections4.queue.CircularFifoQueue;
-061
-062import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureWALHeader;
-063
-064/**
-065 * WAL implementation of the 
ProcedureStore.
-066 * p/
-067 * When starting, the upper layer will 
first call {@link #start(int)}, then {@link #recoverLease()},
-068 * then {@link #load(ProcedureLoader)}.
-069 * p/
-070 * In {@link #recoverLease()}, we will 
get the lease by closing all the existing wal files(by
-071 * calling recoverFileLease), and 
creating a new wal writer. And we will also get the list of all
-072 * the old wal files.
-073 * p/
-074 * FIXME: notice that the current recover 
lease implementation is problematic, it can not deal with
-075 * the races if there are two master both 
wants to acquire the lease...
-076 * p/
-077 * In {@link #load(ProcedureLoader)} 
method, we will load all the active procedures. See the
-078 * comments of this method for more 
details.
-079 * p/
-080 * The actual logging way is a bit like 
our FileSystem based WAL implementation as RS side. There is
-081 * a {@link #slots}, which is more like 
the ring buffer, and in the insert, update and delete
-082 * methods we will put thing into the 
{@link #slots} and wait. And there is a background sync
-083 * thread(see the {@link #syncLoop()} 
method) which get data from the {@link #slots} and write them
-084 * to the FileSystem, and notify the 
caller that we have finished.
-085 * p/
-086 * TODO: try using disruptor to increase 
performance and simplify the logic?
-087 * p/
-088 * The {@link #storeTracker} keeps track 
of the modified procedures in the newest wal file, which is
-089 * also the one being written currently. 
And the deleted bits in it are for all the procedures, not
-090 * only the ones in the newest wal file. 
And when rolling a log, we will first store it in the
-091 * trailer of the current wal file, and 
then reset its modified bits, so that it can start to track
-092 * the modified procedures for the new 
wal file.
-093 * p/
-094 * The {@link #holdingCleanupTracker} is 
used to test whether we are safe to delete the oldest wal
-095 * file. When there are log rolling and 
there are more than 1 wal files, we will make use of it. It
-096 * will first be initialized to the 
oldest file's tracker(which is stored in the trailer), using the
-097 * method {@link 
ProcedureStoreTracker#resetTo(ProcedureStoreTracker, boolean)}, and then merge 
it
-098 * with the tracker of every newer wal 
files, using the
-099 * {@link 
ProcedureStoreTracker#setDeletedIfModifiedInBoth(ProcedureStoreTracker)}. If we 
find out
-100 * that all the modified procedures for 
the oldest wal file are modified or deleted in newer wal
-101 * files, then we can delete it.
-102 * @see ProcedureWALPrettyPrinter for 
printing content of a single WAL.
-103 * @see #main(String[]) to parse a 
directory of MasterWALProcs.
-104 */
-105@InterfaceAudience.Private
-106public class WALProcedureStore extends 
ProcedureStoreBase {
-107  private static final Logger LOG = 
LoggerFactory.getLogger(WALProcedureStore.class);
-108  public static final String LOG_PREFIX = 
"pv2-";
-109  /** Used to construct the name of the 
log directory for master procedures */
-110  public static final String 
MASTER_PROCEDURE_LOGDIR = "MasterProcWALs";
-111
-112
-113  public interface LeaseRecovery {
-114void recoverFileLease(FileSystem fs, 

[16/51] [partial] hbase-site git commit: Published site at 6bc7089f9e0793efc9bdd46a84f5ccd9bc4579ad.

2018-09-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/419d0338/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureUtil.StateSerializer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureUtil.StateSerializer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureUtil.StateSerializer.html
index 0d97a1c..297bc43 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureUtil.StateSerializer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureUtil.StateSerializer.html
@@ -29,331 +29,335 @@
 021import java.io.InputStream;
 022import java.lang.reflect.Constructor;
 023import java.lang.reflect.Modifier;
-024import 
org.apache.hadoop.hbase.HConstants;
-025import 
org.apache.yetus.audience.InterfaceAudience;
-026import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-027import 
org.apache.hbase.thirdparty.com.google.protobuf.Any;
-028import 
org.apache.hbase.thirdparty.com.google.protobuf.Internal;
-029import 
org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException;
-030import 
org.apache.hbase.thirdparty.com.google.protobuf.Message;
-031import 
org.apache.hbase.thirdparty.com.google.protobuf.Parser;
-032import 
org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations;
-033import 
org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos;
-034import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos;
-035import 
org.apache.hadoop.hbase.util.NonceKey;
-036
-037/**
-038 * Helper to convert to/from 
ProcedureProtos
-039 */
-040@InterfaceAudience.Private
-041public final class ProcedureUtil {
-042  private ProcedureUtil() { }
-043
-044  // 
==
-045  //  Reflection helpers to 
create/validate a Procedure object
-046  // 
==
-047  private static Procedure? 
newProcedure(String className) throws BadProcedureException {
-048try {
-049  Class? clazz = 
Class.forName(className);
-050  if 
(!Modifier.isPublic(clazz.getModifiers())) {
-051throw new Exception("the " + 
clazz + " class is not public");
-052  }
-053
-054  @SuppressWarnings("rawtypes")
-055  Constructor? extends 
Procedure ctor = clazz.asSubclass(Procedure.class).getConstructor();
-056  assert ctor != null : "no 
constructor found";
-057  if 
(!Modifier.isPublic(ctor.getModifiers())) {
-058throw new Exception("the " + 
clazz + " constructor is not public");
-059  }
-060  return ctor.newInstance();
-061} catch (Exception e) {
-062  throw new BadProcedureException(
-063"The procedure class " + 
className + " must be accessible and have an empty constructor",
-064e);
-065}
-066  }
-067
-068  static void 
validateClass(Procedure? proc) throws BadProcedureException {
-069try {
-070  Class? clazz = 
proc.getClass();
-071  if 
(!Modifier.isPublic(clazz.getModifiers())) {
-072throw new Exception("the " + 
clazz + " class is not public");
-073  }
-074
-075  Constructor? ctor = 
clazz.getConstructor();
-076  assert ctor != null;
-077  if 
(!Modifier.isPublic(ctor.getModifiers())) {
-078throw new Exception("the " + 
clazz + " constructor is not public");
-079  }
-080} catch (Exception e) {
-081  throw new 
BadProcedureException("The procedure class " + proc.getClass().getName() +
-082" must be accessible and have an 
empty constructor", e);
-083}
-084  }
-085
-086  // 
==
-087  //  convert to and from Procedure 
object
-088  // 
==
-089
-090  /**
-091   * A serializer for our Procedures. 
Instead of the previous serializer, it
-092   * uses the stateMessage list to store 
the internal state of the Procedures.
-093   */
-094  private static class StateSerializer 
implements ProcedureStateSerializer {
-095private final 
ProcedureProtos.Procedure.Builder builder;
-096private int deserializeIndex;
-097
-098public 
StateSerializer(ProcedureProtos.Procedure.Builder builder) {
-099  this.builder = builder;
-100}
-101
-102@Override
-103public void serialize(Message 
message) throws IOException {
-104  Any packedMessage = 
Any.pack(message);
-105  
builder.addStateMessage(packedMessage);
-106}
-107
-108@Override
-109public M extends Message M 
deserialize(ClassM clazz)
-110throws IOException {
-111  if (deserializeIndex = 
builder.getStateMessageCount()) {
-112throw new IOException("Invalid 
state message index: " + deserializeIndex);
-113  }
-114
-115  try {
-116Any packedMessage = 

[16/51] [partial] hbase-site git commit: Published site at d7e08317d2f214e4cca7b67578aba0ed7a567d54.

2018-09-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/37cf49a6/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
index 976894f..721035e 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.html
@@ -3020,926 +3020,927 @@
 3012}
 3013  }
 3014
-3015  void checkServiceStarted() throws 
ServerNotRunningYetException {
-3016if (!serviceStarted) {
-3017  throw new 
ServerNotRunningYetException("Server is not running yet");
-3018}
-3019  }
-3020
-3021  public static class 
MasterStoppedException extends DoNotRetryIOException {
-3022MasterStoppedException() {
-3023  super();
-3024}
-3025  }
-3026
-3027  void checkInitialized() throws 
PleaseHoldException, ServerNotRunningYetException,
-3028  MasterNotRunningException, 
MasterStoppedException {
-3029checkServiceStarted();
-3030if (!isInitialized()) {
-3031  throw new 
PleaseHoldException("Master is initializing");
-3032}
-3033if (isStopped()) {
-3034  throw new 
MasterStoppedException();
-3035}
-3036  }
-3037
-3038  /**
-3039   * Report whether this master is 
currently the active master or not.
-3040   * If not active master, we are parked 
on ZK waiting to become active.
-3041   *
-3042   * This method is used for testing.
-3043   *
-3044   * @return true if active master, 
false if not.
-3045   */
-3046  @Override
-3047  public boolean isActiveMaster() {
-3048return activeMaster;
-3049  }
-3050
-3051  /**
-3052   * Report whether this master has 
completed with its initialization and is
-3053   * ready.  If ready, the master is 
also the active master.  A standby master
-3054   * is never ready.
-3055   *
-3056   * This method is used for testing.
-3057   *
-3058   * @return true if master is ready to 
go, false if not.
-3059   */
-3060  @Override
-3061  public boolean isInitialized() {
-3062return initialized.isReady();
-3063  }
-3064
-3065  /**
-3066   * Report whether this master is in 
maintenance mode.
-3067   *
-3068   * @return true if master is in 
maintenanceMode
-3069   */
-3070  @Override
-3071  public boolean isInMaintenanceMode() 
throws IOException {
-3072if (!isInitialized()) {
-3073  throw new 
PleaseHoldException("Master is initializing");
-3074}
-3075return 
maintenanceModeTracker.isInMaintenanceMode();
-3076  }
-3077
-3078  @VisibleForTesting
-3079  public void setInitialized(boolean 
isInitialized) {
-3080
procedureExecutor.getEnvironment().setEventReady(initialized, isInitialized);
-3081  }
-3082
-3083  @Override
-3084  public ProcedureEvent? 
getInitializedEvent() {
-3085return initialized;
-3086  }
-3087
-3088  /**
-3089   * Compute the average load across all 
region servers.
-3090   * Currently, this uses a very naive 
computation - just uses the number of
-3091   * regions being served, ignoring 
stats about number of requests.
-3092   * @return the average load
-3093   */
-3094  public double getAverageLoad() {
-3095if (this.assignmentManager == null) 
{
-3096  return 0;
-3097}
-3098
-3099RegionStates regionStates = 
this.assignmentManager.getRegionStates();
-3100if (regionStates == null) {
-3101  return 0;
-3102}
-3103return 
regionStates.getAverageLoad();
-3104  }
-3105
-3106  /*
-3107   * @return the count of region split 
plans executed
-3108   */
-3109  public long getSplitPlanCount() {
-3110return splitPlanCount;
-3111  }
-3112
-3113  /*
-3114   * @return the count of region merge 
plans executed
-3115   */
-3116  public long getMergePlanCount() {
-3117return mergePlanCount;
-3118  }
-3119
-3120  @Override
-3121  public boolean registerService(Service 
instance) {
-3122/*
-3123 * No stacking of instances is 
allowed for a single service name
-3124 */
-3125Descriptors.ServiceDescriptor 
serviceDesc = instance.getDescriptorForType();
-3126String serviceName = 
CoprocessorRpcUtils.getServiceName(serviceDesc);
-3127if 
(coprocessorServiceHandlers.containsKey(serviceName)) {
-3128  LOG.error("Coprocessor service 
"+serviceName+
-3129  " already registered, 
rejecting request from "+instance
-3130  );
-3131  return false;
-3132}
-3133
-3134
coprocessorServiceHandlers.put(serviceName, instance);
-3135if (LOG.isDebugEnabled()) {
-3136  LOG.debug("Registered master 
coprocessor service: service="+serviceName);
-3137}
-3138return true;
-3139  }
-3140
-3141  /**
-3142   * Utility for constructing an 
instance of the passed HMaster class.
-3143   * @param masterClass
-3144   * @return HMaster instance.
-3145   */
-3146  public static HMaster 
constructMaster(Class? extends HMaster masterClass,
-3147  final Configuration conf)  {

[16/51] [partial] hbase-site git commit: Published site at 8eaaa63114a64bcaeaf0ed9bdd88615ee22255c1.

2018-09-25 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f6f9d4f3/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemStoreFlusher.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemStoreFlusher.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemStoreFlusher.html
index 8cc5add..34858d6 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemStoreFlusher.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemStoreFlusher.html
@@ -2188,1428 +2188,1428 @@
 2180  }
 2181
 2182  @Override
-2183  public void postOpenDeployTasks(final 
PostOpenDeployContext context)
-2184  throws KeeperException, 
IOException {
-2185HRegion r = context.getRegion();
-2186long masterSystemTime = 
context.getMasterSystemTime();
-2187rpcServices.checkOpen();
-2188LOG.info("Post open deploy tasks for 
" + r.getRegionInfo().getRegionNameAsString());
-2189// Do checks to see if we need to 
compact (references or too many files)
-2190for (HStore s : r.stores.values()) 
{
-2191  if (s.hasReferences() || 
s.needsCompaction()) {
-2192
this.compactSplitThread.requestSystemCompaction(r, s, "Opening Region");
-2193  }
-2194}
-2195long openSeqNum = 
r.getOpenSeqNum();
-2196if (openSeqNum == 
HConstants.NO_SEQNUM) {
-2197  // If we opened a region, we 
should have read some sequence number from it.
-2198  LOG.error("No sequence number 
found when opening " +
-2199
r.getRegionInfo().getRegionNameAsString());
-2200  openSeqNum = 0;
-2201}
-2202
-2203// Notify master
-2204if (!reportRegionStateTransition(new 
RegionStateTransitionContext(
-2205TransitionCode.OPENED, 
openSeqNum, masterSystemTime, r.getRegionInfo( {
-2206  throw new IOException("Failed to 
report opened region to master: "
-2207+ 
r.getRegionInfo().getRegionNameAsString());
-2208}
-2209
-2210triggerFlushInPrimaryRegion(r);
-2211
-2212LOG.debug("Finished post open deploy 
task for " + r.getRegionInfo().getRegionNameAsString());
-2213  }
-2214
-2215  @Override
-2216  public boolean 
reportRegionStateTransition(final RegionStateTransitionContext context) {
-2217TransitionCode code = 
context.getCode();
-2218long openSeqNum = 
context.getOpenSeqNum();
-2219long masterSystemTime = 
context.getMasterSystemTime();
-2220RegionInfo[] hris = 
context.getHris();
-2221
-if (TEST_SKIP_REPORTING_TRANSITION) 
{
-2223  // This is for testing only in 
case there is no master
-2224  // to handle the region transition 
report at all.
-2225  if (code == TransitionCode.OPENED) 
{
-2226Preconditions.checkArgument(hris 
!= null  hris.length == 1);
-2227if (hris[0].isMetaRegion()) {
-2228  try {
-2229
MetaTableLocator.setMetaLocation(getZooKeeper(), serverName,
-2230
hris[0].getReplicaId(),State.OPEN);
-2231  } catch (KeeperException e) 
{
-2232LOG.info("Failed to update 
meta location", e);
-2233return false;
-2234  }
-2235} else {
-2236  try {
-2237
MetaTableAccessor.updateRegionLocation(clusterConnection,
-2238  hris[0], serverName, 
openSeqNum, masterSystemTime);
-2239  } catch (IOException e) {
-2240LOG.info("Failed to update 
meta", e);
-2241return false;
-2242  }
-2243}
-2244  }
-2245  return true;
-2246}
-2247
-2248
ReportRegionStateTransitionRequest.Builder builder =
-2249  
ReportRegionStateTransitionRequest.newBuilder();
-2250
builder.setServer(ProtobufUtil.toServerName(serverName));
-2251RegionStateTransition.Builder 
transition = builder.addTransitionBuilder();
-2252
transition.setTransitionCode(code);
-2253if (code == TransitionCode.OPENED 
 openSeqNum = 0) {
-2254  
transition.setOpenSeqNum(openSeqNum);
-2255}
-2256for (RegionInfo hri: hris) {
-2257  
transition.addRegionInfo(ProtobufUtil.toRegionInfo(hri));
-2258}
-2259ReportRegionStateTransitionRequest 
request = builder.build();
-2260int tries = 0;
-2261long pauseTime = 
INIT_PAUSE_TIME_MS;
-2262// Keep looping till we get an 
error. We want to send reports even though server is going down.
-2263// Only go down if clusterConnection 
is null. It is set to null almost as last thing as the
-2264// HRegionServer does down.
-2265while (this.clusterConnection != 
null  !this.clusterConnection.isClosed()) {
-2266  
RegionServerStatusService.BlockingInterface rss = rssStub;
-2267  try {
-2268if (rss == null) {
-2269  
createRegionServerStatusStub();
-2270  continue;
-2271}
-2272
ReportRegionStateTransitionResponse 

[16/51] [partial] hbase-site git commit: Published site at cd161d976ef47b84e904f2d54bac65d2f3417c2a.

2018-09-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fa1bebf8/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.html
index b56bd67..bba0c5e 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/MasterRpcServices.html
@@ -37,2318 +37,2428 @@
 029import java.util.Map;
 030import java.util.Map.Entry;
 031import java.util.Set;
-032import java.util.stream.Collectors;
-033import 
org.apache.hadoop.conf.Configuration;
-034import 
org.apache.hadoop.hbase.ClusterMetricsBuilder;
-035import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-036import 
org.apache.hadoop.hbase.HConstants;
-037import 
org.apache.hadoop.hbase.MetaTableAccessor;
-038import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-039import org.apache.hadoop.hbase.Server;
-040import 
org.apache.hadoop.hbase.ServerMetrics;
-041import 
org.apache.hadoop.hbase.ServerMetricsBuilder;
-042import 
org.apache.hadoop.hbase.ServerName;
-043import 
org.apache.hadoop.hbase.TableName;
-044import 
org.apache.hadoop.hbase.UnknownRegionException;
-045import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-046import 
org.apache.hadoop.hbase.client.Connection;
-047import 
org.apache.hadoop.hbase.client.MasterSwitchType;
-048import 
org.apache.hadoop.hbase.client.RegionInfo;
-049import 
org.apache.hadoop.hbase.client.RegionInfoBuilder;
-050import 
org.apache.hadoop.hbase.client.TableDescriptor;
-051import 
org.apache.hadoop.hbase.client.TableState;
-052import 
org.apache.hadoop.hbase.client.VersionInfoUtil;
-053import 
org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil;
-054import 
org.apache.hadoop.hbase.coprocessor.MasterCoprocessor;
-055import 
org.apache.hadoop.hbase.errorhandling.ForeignException;
-056import 
org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
-057import 
org.apache.hadoop.hbase.io.hfile.HFile;
-058import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
-059import 
org.apache.hadoop.hbase.ipc.PriorityFunction;
-060import 
org.apache.hadoop.hbase.ipc.QosPriority;
-061import 
org.apache.hadoop.hbase.ipc.RpcServer;
-062import 
org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface;
-063import 
org.apache.hadoop.hbase.ipc.RpcServerFactory;
-064import 
org.apache.hadoop.hbase.ipc.RpcServerInterface;
-065import 
org.apache.hadoop.hbase.ipc.ServerRpcController;
-066import 
org.apache.hadoop.hbase.master.assignment.RegionStates;
-067import 
org.apache.hadoop.hbase.master.locking.LockProcedure;
-068import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil;
-069import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil.NonceProcedureRunnable;
-070import 
org.apache.hadoop.hbase.mob.MobUtils;
-071import 
org.apache.hadoop.hbase.procedure.MasterProcedureManager;
-072import 
org.apache.hadoop.hbase.procedure2.LockType;
-073import 
org.apache.hadoop.hbase.procedure2.LockedResource;
-074import 
org.apache.hadoop.hbase.procedure2.Procedure;
-075import 
org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
-076import 
org.apache.hadoop.hbase.procedure2.ProcedureUtil;
-077import 
org.apache.hadoop.hbase.procedure2.RemoteProcedureException;
-078import 
org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.AccessControlService;
-079import 
org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsService;
-080import 
org.apache.hadoop.hbase.quotas.MasterQuotaManager;
-081import 
org.apache.hadoop.hbase.quotas.QuotaObserverChore;
-082import 
org.apache.hadoop.hbase.quotas.QuotaUtil;
-083import 
org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot;
-084import 
org.apache.hadoop.hbase.regionserver.RSRpcServices;
-085import 
org.apache.hadoop.hbase.regionserver.RpcSchedulerFactory;
-086import 
org.apache.hadoop.hbase.replication.ReplicationException;
-087import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-088import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-089import 
org.apache.hadoop.hbase.security.User;
-090import 
org.apache.hadoop.hbase.security.access.AccessChecker;
-091import 
org.apache.hadoop.hbase.security.access.AccessController;
-092import 
org.apache.hadoop.hbase.security.access.Permission;
-093import 
org.apache.hadoop.hbase.security.visibility.VisibilityController;
-094import 
org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
-095import 
org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
-096import 
org.apache.hadoop.hbase.util.Bytes;
-097import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-098import 
org.apache.hadoop.hbase.util.ForeignExceptionUtil;
-099import 
org.apache.hadoop.hbase.util.Pair;
-100import 

[16/51] [partial] hbase-site git commit: Published site at c6a65ba63fce85ac7c4b62b96ef2bbe6c35d2f00.

2018-09-04 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/293abb17/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.html
index 9a4c209..9925f48 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.html
@@ -29,780 +29,781 @@
 021import java.util.ArrayList;
 022import java.util.Arrays;
 023import java.util.Collection;
-024import java.util.List;
-025import java.util.stream.Stream;
-026import 
org.apache.hadoop.conf.Configuration;
-027import org.apache.hadoop.fs.FileSystem;
-028import org.apache.hadoop.fs.Path;
-029import 
org.apache.hadoop.hbase.HConstants;
-030import 
org.apache.hadoop.hbase.MetaMutationAnnotation;
-031import 
org.apache.hadoop.hbase.ServerName;
-032import 
org.apache.hadoop.hbase.TableName;
-033import 
org.apache.hadoop.hbase.UnknownRegionException;
-034import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-035import 
org.apache.hadoop.hbase.client.DoNotRetryRegionException;
-036import 
org.apache.hadoop.hbase.client.MasterSwitchType;
-037import 
org.apache.hadoop.hbase.client.Mutation;
-038import 
org.apache.hadoop.hbase.client.RegionInfo;
-039import 
org.apache.hadoop.hbase.client.RegionInfoBuilder;
-040import 
org.apache.hadoop.hbase.client.TableDescriptor;
-041import 
org.apache.hadoop.hbase.exceptions.MergeRegionException;
-042import 
org.apache.hadoop.hbase.io.hfile.CacheConfig;
-043import 
org.apache.hadoop.hbase.master.CatalogJanitor;
-044import 
org.apache.hadoop.hbase.master.MasterCoprocessorHost;
-045import 
org.apache.hadoop.hbase.master.MasterFileSystem;
-046import 
org.apache.hadoop.hbase.master.RegionState;
-047import 
org.apache.hadoop.hbase.master.RegionState.State;
-048import 
org.apache.hadoop.hbase.master.normalizer.NormalizationPlan;
-049import 
org.apache.hadoop.hbase.master.procedure.AbstractStateMachineTableProcedure;
-050import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
-051import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil;
-052import 
org.apache.hadoop.hbase.procedure2.ProcedureMetrics;
-053import 
org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
-054import 
org.apache.hadoop.hbase.quotas.QuotaExceededException;
-055import 
org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
-056import 
org.apache.hadoop.hbase.regionserver.HStoreFile;
-057import 
org.apache.hadoop.hbase.regionserver.StoreFileInfo;
-058import 
org.apache.hadoop.hbase.util.Bytes;
-059import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-060import 
org.apache.hadoop.hbase.util.FSUtils;
-061import 
org.apache.hadoop.hbase.wal.WALSplitter;
-062import 
org.apache.yetus.audience.InterfaceAudience;
-063import org.slf4j.Logger;
-064import org.slf4j.LoggerFactory;
-065
-066import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-067
-068import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-069import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
-070import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
-071import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MergeTableRegionsState;
-072
-073/**
-074 * The procedure to Merge a region in a 
table.
-075 * p/
-076 * This procedure takes an exclusive 
table lock since it is working over multiple regions.
-077 * p/
-078 * It holds the lock for the life of the 
procedure.
-079 * p/
-080 * Throws exception on construction if 
determines context hostile to merge (cluster going down or
-081 * master is shutting down or table is 
disabled).
-082 */
-083@InterfaceAudience.Private
-084public class MergeTableRegionsProcedure
-085extends 
AbstractStateMachineTableProcedureMergeTableRegionsState {
-086  private static final Logger LOG = 
LoggerFactory.getLogger(MergeTableRegionsProcedure.class);
-087  private Boolean traceEnabled;
-088  private ServerName regionLocation;
-089  private RegionInfo[] regionsToMerge;
-090  private RegionInfo mergedRegion;
-091  private boolean forcible;
-092
-093  public MergeTableRegionsProcedure() {
-094// Required by the Procedure 
framework to create the procedure on replay
-095  }
-096
-097  public MergeTableRegionsProcedure(final 
MasterProcedureEnv env,
-098  final RegionInfo regionToMergeA, 
final RegionInfo regionToMergeB) throws IOException {
-099this(env, regionToMergeA, 
regionToMergeB, false);
-100  }
-101
-102  public MergeTableRegionsProcedure(final 
MasterProcedureEnv env,
-103  final RegionInfo regionToMergeA, 
final RegionInfo regionToMergeB,
-104  

[16/51] [partial] hbase-site git commit: Published site at 7c1fad4992a169a35b4457e6f4afcb30d04406e9.

2018-08-31 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/74f60271/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.html
index 51bf304..db6ac15 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/RpcServer.html
@@ -199,614 +199,635 @@
 191  protected static final int 
DEFAULT_WARN_RESPONSE_TIME = 1; // milliseconds
 192  protected static final int 
DEFAULT_WARN_RESPONSE_SIZE = 100 * 1024 * 1024;
 193
-194  protected static final ObjectMapper 
MAPPER = new ObjectMapper();
-195
-196  protected final int maxRequestSize;
-197  protected final int warnResponseTime;
-198  protected final int warnResponseSize;
+194  protected static final int 
DEFAULT_TRACE_LOG_MAX_LENGTH = 1000;
+195  protected static final String 
TRACE_LOG_MAX_LENGTH = "hbase.ipc.trace.log.max.length";
+196  protected static final String 
KEY_WORD_TRUNCATED = " TRUNCATED";
+197
+198  protected static final ObjectMapper 
MAPPER = new ObjectMapper();
 199
-200  protected final int 
minClientRequestTimeout;
-201
-202  protected final Server server;
-203  protected final 
ListBlockingServiceAndInterface services;
-204
-205  protected final RpcScheduler 
scheduler;
-206
-207  protected UserProvider userProvider;
+200  protected final int maxRequestSize;
+201  protected final int warnResponseTime;
+202  protected final int warnResponseSize;
+203
+204  protected final int 
minClientRequestTimeout;
+205
+206  protected final Server server;
+207  protected final 
ListBlockingServiceAndInterface services;
 208
-209  protected final ByteBufferPool 
reservoir;
-210  // The requests and response will use 
buffers from ByteBufferPool, when the size of the
-211  // request/response is at least this 
size.
-212  // We make this to be 1/6th of the pool 
buffer size.
-213  protected final int 
minSizeForReservoirUse;
-214
-215  protected volatile boolean 
allowFallbackToSimpleAuth;
-216
-217  /**
-218   * Used to get details for scan with a 
scanner_idbr/
-219   * TODO try to figure out a better way 
and remove reference from regionserver package later.
-220   */
-221  private RSRpcServices rsRpcServices;
-222
-223  @FunctionalInterface
-224  protected static interface CallCleanup 
{
-225void run();
-226  }
-227
-228  /**
-229   * Datastructure for passing a {@link 
BlockingService} and its associated class of
-230   * protobuf service interface.  For 
example, a server that fielded what is defined
-231   * in the client protobuf service would 
pass in an implementation of the client blocking service
-232   * and then its 
ClientService.BlockingInterface.class.  Used checking connection setup.
-233   */
-234  public static class 
BlockingServiceAndInterface {
-235private final BlockingService 
service;
-236private final Class? 
serviceInterface;
-237public 
BlockingServiceAndInterface(final BlockingService service,
-238final Class? 
serviceInterface) {
-239  this.service = service;
-240  this.serviceInterface = 
serviceInterface;
-241}
-242public Class? 
getServiceInterface() {
-243  return this.serviceInterface;
-244}
-245public BlockingService 
getBlockingService() {
-246  return this.service;
-247}
-248  }
-249
-250  /**
-251   * Constructs a server listening on the 
named port and address.
-252   * @param server hosting instance of 
{@link Server}. We will do authentications if an
-253   * instance else pass null for no 
authentication check.
-254   * @param name Used keying this rpc 
servers' metrics and for naming the Listener thread.
-255   * @param services A list of 
services.
-256   * @param bindAddress Where to listen
-257   * @param conf
-258   * @param scheduler
-259   * @param reservoirEnabled Enable 
ByteBufferPool or not.
-260   */
-261  public RpcServer(final Server server, 
final String name,
-262  final 
ListBlockingServiceAndInterface services,
-263  final InetSocketAddress 
bindAddress, Configuration conf,
-264  RpcScheduler scheduler, boolean 
reservoirEnabled) throws IOException {
-265if (reservoirEnabled) {
-266  int poolBufSize = 
conf.getInt(ByteBufferPool.BUFFER_SIZE_KEY,
-267  
ByteBufferPool.DEFAULT_BUFFER_SIZE);
-268  // The max number of buffers to be 
pooled in the ByteBufferPool. The default value been
-269  // selected based on the #handlers 
configured. When it is read request, 2 MB is the max size
-270  // at which we will send back one 
RPC request. Means max we need 2 MB for creating the
-271  // response cell block. (Well it 
might be much lesser than this because in 2 MB size calc, we
-272  // include the heap size overhead 
of each cells also.) Considering 2 MB, we will need
-273  // (2 * 1024 * 1024) / poolBufSize 

[16/51] [partial] hbase-site git commit: Published site at 3afe9fb7e6ebfa71187cbe131558a83fae61cecd.

2018-08-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/424d7e41/testapidocs/src-html/org/apache/hadoop/hbase/StartMiniClusterOption.html
--
diff --git 
a/testapidocs/src-html/org/apache/hadoop/hbase/StartMiniClusterOption.html 
b/testapidocs/src-html/org/apache/hadoop/hbase/StartMiniClusterOption.html
new file mode 100644
index 000..46c1c1d
--- /dev/null
+++ b/testapidocs/src-html/org/apache/hadoop/hbase/StartMiniClusterOption.html
@@ -0,0 +1,326 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+Source code
+
+
+
+
+001/**
+002 *
+003 * Licensed to the Apache Software 
Foundation (ASF) under one
+004 * or more contributor license 
agreements.  See the NOTICE file
+005 * distributed with this work for 
additional information
+006 * regarding copyright ownership.  The 
ASF licenses this file
+007 * to you under the Apache License, 
Version 2.0 (the
+008 * "License"); you may not use this file 
except in compliance
+009 * with the License.  You may obtain a 
copy of the License at
+010 *
+011 * 
http://www.apache.org/licenses/LICENSE-2.0
+012 *
+013 * Unless required by applicable law or 
agreed to in writing, software
+014 * distributed under the License is 
distributed on an "AS IS" BASIS,
+015 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
+016 * See the License for the specific 
language governing permissions and
+017 * limitations under the License.
+018 */
+019package org.apache.hadoop.hbase;
+020
+021import java.util.Arrays;
+022import java.util.List;
+023
+024import 
org.apache.commons.lang3.StringUtils;
+025import 
org.apache.hadoop.hbase.master.HMaster;
+026import 
org.apache.yetus.audience.InterfaceAudience;
+027
+028/**
+029 * Options for starting up a mini cluster 
(including an hbase, dfs and zookeeper clusters) in test.
+030 * The options include HDFS options to 
build mini dfs cluster, Zookeeper options to build mini zk
+031 * cluster, and mostly HBase options to 
build mini hbase cluster.
+032 *
+033 * To create an object, use a {@link 
Builder}.
+034 * Example usage:
+035 * pre
+036 *StartMiniClusterOption option = 
StartMiniClusterOption.builder().
+037 *
.numMasters(3).rsClass(MyRegionServer.class).createWALDir(true).build();
+038 * /pre
+039 *
+040 * Default values can be found in {@link 
Builder}.
+041 */
+042@InterfaceAudience.Public
+043public final class StartMiniClusterOption 
{
+044  /**
+045   * Number of masters to start up.  
We'll start this many hbase masters.  If numMasters  1, you
+046   * can find the active/primary master 
with {@link MiniHBaseCluster#getMaster()}.
+047   */
+048  private final int numMasters;
+049  /**
+050   * The class to use as HMaster, or null 
for default.
+051   */
+052  private final Class? extends 
HMaster masterClass;
+053
+054  /**
+055   * Number of region servers to start 
up.
+056   * If this value is  1, then make 
sure config "hbase.regionserver.info.port" is -1
+057   * (i.e. no ui per regionserver) 
otherwise bind errors.
+058   */
+059  private final int numRegionServers;
+060  /**
+061   * Ports that RegionServer should use. 
Pass ports if you want to test cluster restart where for
+062   * sure the regionservers come up on 
same address+port (but just with different startcode); by
+063   * default mini hbase clusters choose 
new arbitrary ports on each cluster start.
+064   */
+065  private final ListInteger 
rsPorts;
+066  /**
+067   * The class to use as HRegionServer, 
or null for default.
+068   */
+069  private Class? extends 
MiniHBaseCluster.MiniHBaseClusterRegionServer rsClass;
+070
+071  /**
+072   * Number of datanodes. Used to create 
mini DSF cluster. Surpassed by {@link #dataNodeHosts} size.
+073   */
+074  private final int numDataNodes;
+075  /**
+076   * The hostnames of DataNodes to run 
on. This is useful if you want to run datanode on distinct
+077   * hosts for things like HDFS block 
location verification. If you start MiniDFSCluster without
+078   * host names, all instances of the 
datanodes will have the same host name.
+079   */
+080  private final String[] dataNodeHosts;
+081
+082  /**
+083   * Number of Zookeeper servers.
+084   */
+085  private final int numZkServers;
+086
+087  /**
+088   * Whether to create a new root or data 
directory path.  If true, the newly created data directory
+089   * will be configured as HBase rootdir. 
 This will overwrite existing root directory config.
+090   */
+091  private final boolean createRootDir;
+092
+093  /**
+094   * Whether to create a new WAL 
directory.  If true, the newly created directory will be configured
+095   * as HBase wal.dir which is separate 
from HBase rootdir.
+096   */
+097  private final boolean createWALDir;
+098
+099  /**
+100   * Private constructor. Use {@link 
Builder#build()}.
+101   */
+102  private StartMiniClusterOption(int 
numMasters, Class? extends HMaster masterClass,
+103  int numRegionServers, 
ListInteger rsPorts,

[16/51] [partial] hbase-site git commit: Published site at a452487a9b82bfd33bc10683c3f8b8ae74d58883.

2018-08-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0cf79db0/devapidocs/org/apache/hadoop/hbase/filter/FuzzyRowFilter.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/filter/FuzzyRowFilter.html 
b/devapidocs/org/apache/hadoop/hbase/filter/FuzzyRowFilter.html
index fa319cb..3ca642c 100644
--- a/devapidocs/org/apache/hadoop/hbase/filter/FuzzyRowFilter.html
+++ b/devapidocs/org/apache/hadoop/hbase/filter/FuzzyRowFilter.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":42,"i4":10,"i5":9,"i6":9,"i7":9,"i8":10,"i9":9,"i10":10,"i11":10,"i12":9,"i13":9,"i14":9,"i15":9,"i16":10,"i17":10,"i18":9};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":42,"i5":10,"i6":9,"i7":9,"i8":9,"i9":10,"i10":10,"i11":9,"i12":10,"i13":10,"i14":9,"i15":9,"i16":9,"i17":9,"i18":10,"i19":10,"i20":9};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete 
Methods"],32:["t6","Deprecated Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -120,7 +120,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Public
-public class FuzzyRowFilter
+public class FuzzyRowFilter
 extends FilterBase
 This is optimized version of a standard FuzzyRowFilter 
Filters data based on fuzzy row key.
  Performs fast-forwards during scanning. It takes pairs (row key, fuzzy info) 
to match row keys.
@@ -265,38 +265,42 @@ extends 
 
 boolean
+equals(https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Objectobj)
+
+
+boolean
 filterAllRemaining()
 Filters that never filter all remaining can inherit this 
implementation that
  never stops the filter early.
 
 
-
+
 Filter.ReturnCode
 filterCell(Cellc)
 A way to filter based on the column family, column 
qualifier and/or the column value.
 
 
-
+
 Filter.ReturnCode
 filterKeyValue(Cellc)
 Deprecated.
 
 
-
+
 Cell
 getNextCellHint(CellcurrentCell)
 Filters that are not sure which key must be next seeked to, 
can inherit
  this implementation that, by default, returns a null Cell.
 
 
-
+
 (package private) static byte[]
 getNextForFuzzyRule(booleanreverse,
byte[]row,
byte[]fuzzyKeyBytes,
byte[]fuzzyKeyMeta)
 
-
+
 (package private) static byte[]
 getNextForFuzzyRule(booleanreverse,
byte[]row,
@@ -305,39 +309,43 @@ extends 
 
-
+
 (package private) static byte[]
 getNextForFuzzyRule(byte[]row,
byte[]fuzzyKeyBytes,
byte[]fuzzyKeyMeta)
 
-
+
+int
+hashCode()
+
+
 private boolean
 isPreprocessedMask(byte[]mask)
 
-
+
 static FuzzyRowFilter
 parseFrom(byte[]pbBytes)
 
-
+
 private byte[]
 preprocessMask(byte[]mask)
 We need to preprocess mask array, as since we treat 2's as 
unfixed positions and -1 (0xff) as
  fixed positions
 
 
-
+
 private void
 preprocessSearchKey(Pairbyte[],byte[]p)
 
-
+
 (package private) static FuzzyRowFilter.SatisfiesCode
 satisfies(booleanreverse,
  byte[]row,
  byte[]fuzzyKeyBytes,
  byte[]fuzzyKeyMeta)
 
-
+
 (package private) static FuzzyRowFilter.SatisfiesCode
 satisfies(booleanreverse,
  byte[]row,
@@ -346,13 +354,13 @@ extends 
 
-
+
 (package private) static FuzzyRowFilter.SatisfiesCode
 satisfies(byte[]row,
  byte[]fuzzyKeyBytes,
  byte[]fuzzyKeyMeta)
 
-
+
 (package private) static FuzzyRowFilter.SatisfiesCode
 satisfiesNoUnsafe(booleanreverse,
  byte[]row,
@@ -361,19 +369,19 @@ extends 
 
-
+
 byte[]
 toByteArray()
 Return length 0 byte array for Filters that don't require 
special serialization
 
 
-
+
 https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 toString()
 Return filter's info for debugging and logging 
purpose.
 
 
-
+
 private static byte[]
 trimTrailingZeroes(byte[]result,
   byte[]fuzzyKeyMeta,
@@ -404,7 +412,7 @@ extends 
 
 Methods inherited from classjava.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
-https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--;
 title="class or interface in java.lang">clone, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-;
 title="class or interface in java.lang">equals, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--;
 title="class or interface in java.lang">finalize, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--;
 title="class or interface in java.lang">getClass, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--;
 title="class or interface in java.lang">hashCode, 

[16/51] [partial] hbase-site git commit: Published site at 6a5b4f2a5c188f8eef4f2250b8b7db7dd1e750e4.

2018-08-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ff05a18/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html
index db8431b..a8cb7c4 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html
@@ -885,7766 +885,7797 @@
 877   * @return What the next sequence 
(edit) id should be.
 878   * @throws IOException e
 879   */
-880  private long initialize(final 
CancelableProgressable reporter) throws IOException {
-881
-882//Refuse to open the region if there 
is no column family in the table
-883if 
(htableDescriptor.getColumnFamilyCount() == 0) {
-884  throw new 
DoNotRetryIOException("Table " + 
htableDescriptor.getTableName().getNameAsString()+
-885  " should have at least one 
column family.");
-886}
-887
-888MonitoredTask status = 
TaskMonitor.get().createStatus("Initializing region " + this);
-889long nextSeqId = -1;
-890try {
-891  nextSeqId = 
initializeRegionInternals(reporter, status);
-892  return nextSeqId;
-893} finally {
-894  // nextSeqid will be -1 if the 
initialization fails.
-895  // At least it will be 0 
otherwise.
-896  if (nextSeqId == -1) {
-897status.abort("Exception during 
region " + getRegionInfo().getRegionNameAsString() +
-898  " initialization.");
-899  }
-900}
-901  }
-902
-903  private long 
initializeRegionInternals(final CancelableProgressable reporter,
-904  final MonitoredTask status) throws 
IOException {
-905if (coprocessorHost != null) {
-906  status.setStatus("Running 
coprocessor pre-open hook");
-907  coprocessorHost.preOpen();
-908}
-909
-910// Write HRI to a file in case we 
need to recover hbase:meta
-911// Only the primary replica should 
write .regioninfo
-912if 
(this.getRegionInfo().getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) {
-913  status.setStatus("Writing region 
info on filesystem");
-914  fs.checkRegionInfoOnFilesystem();
-915}
-916
-917// Initialize all the HStores
-918status.setStatus("Initializing all 
the Stores");
-919long maxSeqId = 
initializeStores(reporter, status);
-920this.mvcc.advanceTo(maxSeqId);
-921if 
(ServerRegionReplicaUtil.shouldReplayRecoveredEdits(this)) {
-922  CollectionHStore stores = 
this.stores.values();
-923  try {
-924// update the stores that we are 
replaying
-925LOG.debug("replaying wal for " + 
this.getRegionInfo().getEncodedName());
-926
stores.forEach(HStore::startReplayingFromWAL);
-927// Recover any edits if 
available.
-928maxSeqId = Math.max(maxSeqId,
-929  
replayRecoveredEditsIfAny(this.fs.getRegionDir(), maxSeqIdInStores, reporter, 
status));
-930// Make sure mvcc is up to max.
-931this.mvcc.advanceTo(maxSeqId);
-932  } finally {
-933LOG.debug("stopping wal replay 
for " + this.getRegionInfo().getEncodedName());
-934// update the stores that we are 
done replaying
-935
stores.forEach(HStore::stopReplayingFromWAL);
-936  }
-937}
-938this.lastReplayedOpenRegionSeqId = 
maxSeqId;
-939
-940
this.writestate.setReadOnly(ServerRegionReplicaUtil.isReadOnly(this));
-941this.writestate.flushRequested = 
false;
-942this.writestate.compacting.set(0);
-943
-944if (this.writestate.writesEnabled) 
{
-945  LOG.debug("Cleaning up temporary 
data for " + this.getRegionInfo().getEncodedName());
-946  // Remove temporary data left over 
from old regions
-947  status.setStatus("Cleaning up 
temporary data from old regions");
-948  fs.cleanupTempDir();
-949}
-950
-951if (this.writestate.writesEnabled) 
{
-952  status.setStatus("Cleaning up 
detritus from prior splits");
-953  // Get rid of any splits or merges 
that were lost in-progress.  Clean out
-954  // these directories here on open.  
We may be opening a region that was
-955  // being split but we crashed in 
the middle of it all.
-956  LOG.debug("Cleaning up detritus for 
" + this.getRegionInfo().getEncodedName());
-957  fs.cleanupAnySplitDetritus();
-958  fs.cleanupMergesDir();
-959}
+880  @VisibleForTesting
+881  long initialize(final 
CancelableProgressable reporter) throws IOException {
+882
+883//Refuse to open the region if there 
is no column family in the table
+884if 
(htableDescriptor.getColumnFamilyCount() == 0) {
+885  throw new 
DoNotRetryIOException("Table " + 
htableDescriptor.getTableName().getNameAsString()+
+886  " should have at least one 
column family.");
+887}
+888
+889MonitoredTask status = 
TaskMonitor.get().createStatus("Initializing region " + 

[16/51] [partial] hbase-site git commit: Published site at 63f2d3cbdc8151f5f61f33e0a078c51b9ac076a5.

2018-08-21 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7ae6a80c/devapidocs/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.html
 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.html
index 1f94064..d6fed65 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.html
@@ -128,7 +128,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public abstract class AbstractStateMachineTableProcedureTState
+public abstract class AbstractStateMachineTableProcedureTState
 extends StateMachineProcedureMasterProcedureEnv,TState
 implements TableProcedureInterface
 Base class for all the Table procedures that want to use a 
StateMachineProcedure.
@@ -361,7 +361,7 @@ implements 
 
 syncLatch
-private finalProcedurePrepareLatch syncLatch
+private finalProcedurePrepareLatch syncLatch
 
 
 
@@ -370,7 +370,7 @@ implements 
 
 user
-privateUser user
+privateUser user
 
 
 
@@ -387,7 +387,7 @@ implements 
 
 AbstractStateMachineTableProcedure
-protectedAbstractStateMachineTableProcedure()
+protectedAbstractStateMachineTableProcedure()
 
 
 
@@ -396,7 +396,7 @@ implements 
 
 AbstractStateMachineTableProcedure
-protectedAbstractStateMachineTableProcedure(MasterProcedureEnvenv)
+protectedAbstractStateMachineTableProcedure(MasterProcedureEnvenv)
 
 
 
@@ -405,7 +405,7 @@ implements 
 
 AbstractStateMachineTableProcedure
-protectedAbstractStateMachineTableProcedure(MasterProcedureEnvenv,
+protectedAbstractStateMachineTableProcedure(MasterProcedureEnvenv,
  ProcedurePrepareLatchlatch)
 
 Parameters:
@@ -427,7 +427,7 @@ implements 
 
 getTableName
-public abstractTableNamegetTableName()
+public abstractTableNamegetTableName()
 
 Specified by:
 getTableNamein
 interfaceTableProcedureInterface
@@ -442,7 +442,7 @@ implements 
 
 getTableOperationType
-public abstractTableProcedureInterface.TableOperationTypegetTableOperationType()
+public abstractTableProcedureInterface.TableOperationTypegetTableOperationType()
 Description copied from 
interface:TableProcedureInterface
 Given an operation type we can take decisions about what to 
do with pending operations.
  e.g. if we get a delete and we have some table operation pending (e.g. add 
column)
@@ -461,7 +461,7 @@ implements 
 
 toStringClassDetails
-publicvoidtoStringClassDetails(https://docs.oracle.com/javase/8/docs/api/java/lang/StringBuilder.html?is-external=true;
 title="class or interface in java.lang">StringBuildersb)
+publicvoidtoStringClassDetails(https://docs.oracle.com/javase/8/docs/api/java/lang/StringBuilder.html?is-external=true;
 title="class or interface in java.lang">StringBuildersb)
 Description copied from 
class:Procedure
 Extend the toString() information with the procedure details
  e.g. className and parameters
@@ -479,7 +479,7 @@ implements 
 
 waitInitialized
-protectedbooleanwaitInitialized(MasterProcedureEnvenv)
+protectedbooleanwaitInitialized(MasterProcedureEnvenv)
 Description copied from 
class:Procedure
 The Procedure.doAcquireLock(Object,
 ProcedureStore) will be split into two steps, first, it will
  call us to determine whether we need to wait for initialization, second, it 
will call
@@ -504,7 +504,7 @@ implements 
 
 acquireLock
-protectedProcedure.LockStateacquireLock(MasterProcedureEnvenv)
+protectedProcedure.LockStateacquireLock(MasterProcedureEnvenv)
 Description copied from 
class:Procedure
 The user should override this method if they need a lock on 
an Entity. A lock can be anything,
  and it is up to the implementor. The Procedure Framework will call this 
method just before it
@@ -541,7 +541,7 @@ implements 
 
 releaseLock
-protectedvoidreleaseLock(MasterProcedureEnvenv)
+protectedvoidreleaseLock(MasterProcedureEnvenv)
 Description copied from 
class:Procedure
 The user should override this method, and release lock if 
necessary.
 
@@ -556,7 +556,7 @@ implements 
 
 getUser
-protectedUsergetUser()
+protectedUsergetUser()
 
 
 
@@ -565,7 +565,7 @@ implements 
 
 setUser
-protectedvoidsetUser(Useruser)
+protectedvoidsetUser(Useruser)
 
 
 
@@ -574,7 +574,7 @@ implements 
 
 releaseSyncLatch
-protectedvoidreleaseSyncLatch()
+protectedvoidreleaseSyncLatch()
 
 
 
@@ -583,7 +583,7 @@ implements 
 
 checkTableModifiable
-protectedvoidcheckTableModifiable(MasterProcedureEnvenv)
+protectedvoidcheckTableModifiable(MasterProcedureEnvenv)
  throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Check whether a table is modifiable - exists and either 
offline or online with config set
 
@@ 

[16/51] [partial] hbase-site git commit: Published site at 092efb42749bf7fc6ad338c96aae8e7b9d3a2c74.

2018-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.BoundedLogWriterCreationOutputSink.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.BoundedLogWriterCreationOutputSink.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.BoundedLogWriterCreationOutputSink.html
index 63e4b46..514f830 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.BoundedLogWriterCreationOutputSink.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.BoundedLogWriterCreationOutputSink.html
@@ -468,15 +468,15 @@
 460   * creating it if necessary.
 461   * @param logEntry
 462   * @param fileNameBeingSplit the file 
being split currently. Used to generate tmp file name.
-463   * @param conf
-464   * @return Path to file into which to 
dump split log edits.
-465   * @throws IOException
-466   */
-467  @SuppressWarnings("deprecation")
-468  @VisibleForTesting
-469  static Path 
getRegionSplitEditsPath(final Entry logEntry, String fileNameBeingSplit,
-470  Configuration conf)
-471  throws IOException {
+463   * @param tmpDirName of the directory 
used to sideline old recovered edits file
+464   * @param conf
+465   * @return Path to file into which to 
dump split log edits.
+466   * @throws IOException
+467   */
+468  @SuppressWarnings("deprecation")
+469  @VisibleForTesting
+470  static Path 
getRegionSplitEditsPath(final Entry logEntry, String fileNameBeingSplit,
+471  String tmpDirName, Configuration 
conf) throws IOException {
 472FileSystem fs = 
FileSystem.get(conf);
 473Path rootDir = 
FSUtils.getRootDir(conf);
 474Path tableDir = 
FSUtils.getTableDir(rootDir, logEntry.getKey().getTableName());
@@ -491,7 +491,7 @@
 483  return null;
 484}
 485if (fs.exists(dir)  
fs.isFile(dir)) {
-486  Path tmp = new Path("/tmp");
+486  Path tmp = new Path(tmpDirName);
 487  if (!fs.exists(tmp)) {
 488fs.mkdirs(tmp);
 489  }
@@ -1520,411 +1520,413 @@
 1512 * @return a path with a write for 
that path. caller should close.
 1513 */
 1514WriterAndPath createWAP(byte[] 
region, Entry entry) throws IOException {
-1515  Path regionedits = 
getRegionSplitEditsPath(entry,
-1516  
fileBeingSplit.getPath().getName(), conf);
-1517  if (regionedits == null) {
-1518return null;
-1519  }
-1520  FileSystem rootFs = 
FileSystem.get(conf);
-1521  if (rootFs.exists(regionedits)) 
{
-1522LOG.warn("Found old edits file. 
It could be the "
-1523+ "result of a previous 
failed split attempt. Deleting " + regionedits + ", length="
-1524+ 
rootFs.getFileStatus(regionedits).getLen());
-1525if (!rootFs.delete(regionedits, 
false)) {
-1526  LOG.warn("Failed delete of old 
{}", regionedits);
-1527}
-1528  }
-1529  Writer w = 
createWriter(regionedits);
-1530  LOG.debug("Creating writer 
path={}", regionedits);
-1531  return new 
WriterAndPath(regionedits, w, entry.getKey().getSequenceId());
-1532}
-1533
-1534void filterCellByStore(Entry 
logEntry) {
-1535  Mapbyte[], Long 
maxSeqIdInStores =
-1536  
regionMaxSeqIdInStores.get(Bytes.toString(logEntry.getKey().getEncodedRegionName()));
-1537  if 
(MapUtils.isEmpty(maxSeqIdInStores)) {
-1538return;
-1539  }
-1540  // Create the array list for the 
cells that aren't filtered.
-1541  // We make the assumption that 
most cells will be kept.
-1542  ArrayListCell keptCells = 
new ArrayList(logEntry.getEdit().getCells().size());
-1543  for (Cell cell : 
logEntry.getEdit().getCells()) {
-1544if 
(CellUtil.matchingFamily(cell, WALEdit.METAFAMILY)) {
-1545  keptCells.add(cell);
-1546} else {
-1547  byte[] family = 
CellUtil.cloneFamily(cell);
-1548  Long maxSeqId = 
maxSeqIdInStores.get(family);
-1549  // Do not skip cell even if 
maxSeqId is null. Maybe we are in a rolling upgrade,
-1550  // or the master was crashed 
before and we can not get the information.
-1551  if (maxSeqId == null || 
maxSeqId.longValue()  logEntry.getKey().getSequenceId()) {
-1552keptCells.add(cell);
-1553  }
-1554}
-1555  }
-1556
-1557  // Anything in the keptCells array 
list is still live.
-1558  // So rather than removing the 
cells from the array list
-1559  // which would be an O(n^2) 
operation, we just replace the list
-1560  
logEntry.getEdit().setCells(keptCells);
-1561}
-1562
-1563@Override
-1564public void append(RegionEntryBuffer 
buffer) throws IOException {
-1565  appendBuffer(buffer, true);
-1566}
-1567
-1568WriterAndPath 
appendBuffer(RegionEntryBuffer buffer, boolean reusable) throws IOException{
-1569  ListEntry entries = 
buffer.entryBuffer;
-1570  

[16/51] [partial] hbase-site git commit: Published site at 613d831429960348dc42c3bdb6ea5d31be15c81c.

2018-08-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7cf6034b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.StatisticsThread.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.StatisticsThread.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.StatisticsThread.html
index bd3c59e..21e240a 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.StatisticsThread.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.StatisticsThread.html
@@ -33,62 +33,62 @@
 025import java.io.FileNotFoundException;
 026import java.io.FileOutputStream;
 027import java.io.IOException;
-028import java.io.ObjectInputStream;
-029import java.io.ObjectOutputStream;
-030import java.io.Serializable;
-031import java.nio.ByteBuffer;
-032import java.util.ArrayList;
-033import java.util.Comparator;
-034import java.util.HashSet;
-035import java.util.Iterator;
-036import java.util.List;
-037import java.util.Map;
-038import java.util.NavigableSet;
-039import java.util.PriorityQueue;
-040import java.util.Set;
-041import 
java.util.concurrent.ArrayBlockingQueue;
-042import 
java.util.concurrent.BlockingQueue;
-043import 
java.util.concurrent.ConcurrentHashMap;
-044import 
java.util.concurrent.ConcurrentMap;
-045import 
java.util.concurrent.ConcurrentSkipListSet;
-046import java.util.concurrent.Executors;
-047import 
java.util.concurrent.ScheduledExecutorService;
-048import java.util.concurrent.TimeUnit;
-049import 
java.util.concurrent.atomic.AtomicInteger;
-050import 
java.util.concurrent.atomic.AtomicLong;
-051import 
java.util.concurrent.atomic.LongAdder;
-052import java.util.concurrent.locks.Lock;
-053import 
java.util.concurrent.locks.ReentrantLock;
-054import 
java.util.concurrent.locks.ReentrantReadWriteLock;
-055import 
org.apache.hadoop.conf.Configuration;
-056import 
org.apache.hadoop.hbase.HBaseConfiguration;
-057import 
org.apache.hadoop.hbase.io.HeapSize;
-058import 
org.apache.hadoop.hbase.io.hfile.BlockCache;
-059import 
org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
-060import 
org.apache.hadoop.hbase.io.hfile.BlockCacheUtil;
-061import 
org.apache.hadoop.hbase.io.hfile.BlockPriority;
-062import 
org.apache.hadoop.hbase.io.hfile.BlockType;
-063import 
org.apache.hadoop.hbase.io.hfile.CacheStats;
-064import 
org.apache.hadoop.hbase.io.hfile.Cacheable;
-065import 
org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType;
-066import 
org.apache.hadoop.hbase.io.hfile.CacheableDeserializer;
-067import 
org.apache.hadoop.hbase.io.hfile.CacheableDeserializerIdManager;
-068import 
org.apache.hadoop.hbase.io.hfile.CachedBlock;
-069import 
org.apache.hadoop.hbase.io.hfile.HFileBlock;
-070import 
org.apache.hadoop.hbase.nio.ByteBuff;
-071import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-072import 
org.apache.hadoop.hbase.util.HasThread;
-073import 
org.apache.hadoop.hbase.util.IdReadWriteLock;
-074import 
org.apache.hadoop.hbase.util.IdReadWriteLock.ReferenceType;
-075import 
org.apache.hadoop.hbase.util.UnsafeAvailChecker;
-076import 
org.apache.hadoop.util.StringUtils;
-077import 
org.apache.yetus.audience.InterfaceAudience;
-078import org.slf4j.Logger;
-079import org.slf4j.LoggerFactory;
-080
-081import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-082import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-083import 
org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
+028import java.io.Serializable;
+029import java.nio.ByteBuffer;
+030import java.util.ArrayList;
+031import java.util.Comparator;
+032import java.util.HashSet;
+033import java.util.Iterator;
+034import java.util.List;
+035import java.util.Map;
+036import java.util.NavigableSet;
+037import java.util.PriorityQueue;
+038import java.util.Set;
+039import 
java.util.concurrent.ArrayBlockingQueue;
+040import 
java.util.concurrent.BlockingQueue;
+041import 
java.util.concurrent.ConcurrentHashMap;
+042import 
java.util.concurrent.ConcurrentMap;
+043import 
java.util.concurrent.ConcurrentSkipListSet;
+044import java.util.concurrent.Executors;
+045import 
java.util.concurrent.ScheduledExecutorService;
+046import java.util.concurrent.TimeUnit;
+047import 
java.util.concurrent.atomic.AtomicInteger;
+048import 
java.util.concurrent.atomic.AtomicLong;
+049import 
java.util.concurrent.atomic.LongAdder;
+050import java.util.concurrent.locks.Lock;
+051import 
java.util.concurrent.locks.ReentrantLock;
+052import 
java.util.concurrent.locks.ReentrantReadWriteLock;
+053import 
org.apache.hadoop.conf.Configuration;
+054import 
org.apache.hadoop.hbase.HBaseConfiguration;
+055import 
org.apache.hadoop.hbase.io.HeapSize;
+056import 
org.apache.hadoop.hbase.io.hfile.BlockCache;
+057import 
org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
+058import 

[16/51] [partial] hbase-site git commit: Published site at ba5d1c1f28301adc99019d9d6c4a04fac98ae511.

2018-07-25 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/804782f0/devapidocs/org/apache/hadoop/hbase/procedure2/StateMachineProcedure.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/procedure2/StateMachineProcedure.html 
b/devapidocs/org/apache/hadoop/hbase/procedure2/StateMachineProcedure.html
index a7a90ea..09169b7 100644
--- a/devapidocs/org/apache/hadoop/hbase/procedure2/StateMachineProcedure.html
+++ b/devapidocs/org/apache/hadoop/hbase/procedure2/StateMachineProcedure.html
@@ -403,7 +403,7 @@ extends Procedure
-acquireLock,
 addStackIndex,
 afterReplay,
 beforeReplay,
 compareTo,
 completionCleanup,
 doAcquireLock,
 doExecute,
 doReleaseLock,
 doRollback,
 elapsedTime,
 getChildrenLatch,
 getException,
 getLastUpdate,
 getNonceKey,
 getOwner,
 getParentProcId,
 getProcedureMetrics,
 getProcId,
 getProcIdHashCode,
 getProcName,
 getResult,
 getRootProcedureId,
 getRootProcId,
 getStackIndexes,
 getState, getSubmittedTime,
 getTimeout,
 getTimeoutTimestamp,
 hasChildren,
 hasException,
 hasLock,
 hasOwner,
 hasParent,
 hasTimeout, haveSameParent,
 holdLock,
 incChildrenLatch,
 isFailed,
 isFinished,
 isInitializing,
 isRunnable,
 isSuccess,
 isWaiting,
 releaseLock,
 removeStackIndex,
 setAbortFailure,
 setChildrenLatch,
 setFailure,
 setFailure,
 setLas
 tUpdate, setNonceKey,
 setOwner,
 setOwner,
 setParentProcId,
 setProcId,
 setResult,
 setRootProcId,
 setStackIndexes,
 setState,
 setSubmittedTime,
 setTimeout,
 setTimeoutFailure,
 shouldWaitClientAck,
 toString,
 toStringClass,
 toStringC
 lassDetails, toStringDetails,
 toStringSimpleSB,
 tryRunnable,
 updateMetricsOnFinish,
 updateMetricsOnSubmit,
 updateTimestamp,
 wasExecuted
+acquireLock,
 addStackIndex,
 afterReplay,
 beforeReplay,
 compareTo,
 completionCleanup,
 doAcquireLock,
 doExecute, doReleaseLock,
 doRollback,
 elapsedTime,
 getChildrenLatch,
 getException,
 getLastUpdate,
 getNonceKey,
 getOwner,
 getParentProcId,
 getProcedureMetrics,
 getProcId,
 getProcIdHashCode,
 getProcName,
 getResult,
 getRootProcedureId,
 getRootProcId,
 getStackIndexes, getState,
 getSubmittedTime,
 getTimeout,
 getTimeoutTimestamp,
 hasChildren,
 hasException,
 hasLock,
 hasOwner,
 hasParent, hasTimeout,
 haveSameParent,
 holdLock,
 incChildrenLatch,
 isFailed,
 isFinished,
 isInitializing,
 isRunnable,
 isSuccess,
 isWaiting,
 lockedWhenLoading,
 releaseLock,
 removeStackIndex,
 restoreLock,
 setAbortFailure,
 setChildrenLatch,
 setFailure,
 setFailure,
 setLastUpdate,
 setNonceKey,
 setOwner,
 setOwner,
 setParentProcId,
 setProcId,
 setResult, 
setRootProcId,
 setStackIndexes,
 setState,
 setSubmittedTime,
 setTimeout,
 setTimeoutFailure,
 shouldWaitClientAck,
 toString,
 toStringClass,
 toStringClassDetails,
 toStringDetails,
 toStringSimpleSB,
 tryRunnable,
 updateMetricsOnFinish,
 updateMetricsOnSubmit,
 updateTimestamp,
 waitInitialized,
 wasExecuted
 
 
 
@@ -893,8 +893,8 @@ extends toStringState(https://docs.oracle.com/javase/8/docs/api/java/lang/StringBuilder.html?is-external=true;
 title="class or interface in java.lang">StringBuilderbuilder)
 Description copied from 
class:Procedure
-Called from Procedure.toString()
 when interpolating Procedure 
State.
- Allows decorating generic Procedure State with Procedure particulars.
+Called from Procedure.toString()
 when interpolating Procedure 
State. Allows decorating
+ generic Procedure State with Procedure particulars.
 
 Overrides:
 toStringStatein
 classProcedureTEnvironment

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/804782f0/devapidocs/org/apache/hadoop/hbase/procedure2/TimeoutExecutorThread.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/procedure2/TimeoutExecutorThread.html 
b/devapidocs/org/apache/hadoop/hbase/procedure2/TimeoutExecutorThread.html
index 36f7951..b2c67ae 100644
--- a/devapidocs/org/apache/hadoop/hbase/procedure2/TimeoutExecutorThread.html
+++ b/devapidocs/org/apache/hadoop/hbase/procedure2/TimeoutExecutorThread.html
@@ -93,7 +93,7 @@ var activeTableTab = "activeTableTab";
 
 
 org.apache.hadoop.hbase.procedure2
-Class 
TimeoutExecutorThread
+Class 
TimeoutExecutorThreadTEnvironment
 
 
 
@@ -106,7 +106,7 @@ var activeTableTab = "activeTableTab";
 org.apache.hadoop.hbase.procedure2.StoppableThread
 
 
-org.apache.hadoop.hbase.procedure2.TimeoutExecutorThread
+org.apache.hadoop.hbase.procedure2.TimeoutExecutorThreadTEnvironment
 
 
 
@@ -124,7 +124,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-class TimeoutExecutorThread
+class TimeoutExecutorThreadTEnvironment
 extends StoppableThread
 Runs task on a period such as check for stuck workers.
 
@@ -165,7 +165,7 @@ extends Field and Description
 
 
-private ProcedureExecutor?
+private ProcedureExecutorTEnvironment
 executor
 
 
@@ -198,7 +198,7 @@ extends Constructor and Description
 
 
-TimeoutExecutorThread(ProcedureExecutor?executor,

[16/51] [partial] hbase-site git commit: Published site at b4759ce6e72f50ccd9d410bd5917dc5a515414f1.

2018-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.ReadRequestCostFunction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.ReadRequestCostFunction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.ReadRequestCostFunction.html
index 233dba3..91b9055 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.ReadRequestCostFunction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.ReadRequestCostFunction.html
@@ -540,1205 +540,1204 @@
 532  
sm.getRegionMetrics().forEach((byte[] regionName, RegionMetrics rm) - {
 533DequeBalancerRegionLoad 
rLoads = oldLoads.get(Bytes.toString(regionName));
 534if (rLoads == null) {
-535  // There was nothing there
-536  rLoads = new 
ArrayDeque();
-537} else if (rLoads.size() = 
numRegionLoadsToRemember) {
-538  rLoads.remove();
-539}
-540rLoads.add(new 
BalancerRegionLoad(rm));
-541
loads.put(Bytes.toString(regionName), rLoads);
-542  });
-543});
-544
-545for(CostFromRegionLoadFunction cost : 
regionLoadFunctions) {
-546  cost.setLoads(loads);
-547}
-548  }
-549
-550  protected void initCosts(Cluster 
cluster) {
-551for (CostFunction c:costFunctions) 
{
-552  c.init(cluster);
-553}
-554  }
-555
-556  protected void 
updateCostsWithAction(Cluster cluster, Action action) {
-557for (CostFunction c : costFunctions) 
{
-558  c.postAction(action);
-559}
-560  }
-561
-562  /**
-563   * Get the names of the cost 
functions
-564   */
-565  public String[] getCostFunctionNames() 
{
-566if (costFunctions == null) return 
null;
-567String[] ret = new 
String[costFunctions.length];
-568for (int i = 0; i  
costFunctions.length; i++) {
-569  CostFunction c = 
costFunctions[i];
-570  ret[i] = 
c.getClass().getSimpleName();
-571}
-572
-573return ret;
-574  }
-575
-576  /**
-577   * This is the main cost function.  It 
will compute a cost associated with a proposed cluster
-578   * state.  All different costs will be 
combined with their multipliers to produce a double cost.
-579   *
-580   * @param cluster The state of the 
cluster
-581   * @param previousCost the previous 
cost. This is used as an early out.
-582   * @return a double of a cost 
associated with the proposed cluster state.  This cost is an
-583   * aggregate of all individual 
cost functions.
-584   */
-585  protected double computeCost(Cluster 
cluster, double previousCost) {
-586double total = 0;
-587
-588for (int i = 0; i  
costFunctions.length; i++) {
-589  CostFunction c = 
costFunctions[i];
-590  this.tempFunctionCosts[i] = 0.0;
-591
-592  if (c.getMultiplier() = 0) {
-593continue;
-594  }
-595
-596  Float multiplier = 
c.getMultiplier();
-597  Double cost = c.cost();
-598
-599  this.tempFunctionCosts[i] = 
multiplier*cost;
-600  total += 
this.tempFunctionCosts[i];
-601
-602  if (total  previousCost) {
-603break;
-604  }
-605}
-606
-607return total;
-608  }
-609
-610  /** Generates a candidate action to be 
applied to the cluster for cost function search */
-611  abstract static class 
CandidateGenerator {
-612abstract Cluster.Action 
generate(Cluster cluster);
-613
-614/**
-615 * From a list of regions pick a 
random one. Null can be returned which
-616 * {@link 
StochasticLoadBalancer#balanceCluster(Map)} recognize as signal to try a region 
move
-617 * rather than swap.
-618 *
-619 * @param clusterThe state of 
the cluster
-620 * @param server index of the 
server
-621 * @param chanceOfNoSwap Chance that 
this will decide to try a move rather
-622 *   than a 
swap.
-623 * @return a random {@link 
RegionInfo} or null if an asymmetrical move is
-624 * suggested.
-625 */
-626protected int 
pickRandomRegion(Cluster cluster, int server, double chanceOfNoSwap) {
-627  // Check to see if this is just a 
move.
-628  if 
(cluster.regionsPerServer[server].length == 0 || RANDOM.nextFloat()  
chanceOfNoSwap) {
-629// signal a move only.
-630return -1;
-631  }
-632  int rand = 
RANDOM.nextInt(cluster.regionsPerServer[server].length);
-633  return 
cluster.regionsPerServer[server][rand];
-634
-635}
-636protected int 
pickRandomServer(Cluster cluster) {
-637  if (cluster.numServers  1) {
-638return -1;
-639  }
-640
-641  return 
RANDOM.nextInt(cluster.numServers);
-642}
-643
-644protected int pickRandomRack(Cluster 
cluster) {
-645  if (cluster.numRacks  1) {
-646return -1;
-647  

[16/51] [partial] hbase-site git commit: Published site at e66a6603e36ecd67237ca16acd5e2de03f0d372d.

2018-07-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0c6f447e/apidocs/org/apache/hadoop/hbase/Size.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/Size.html 
b/apidocs/org/apache/hadoop/hbase/Size.html
index 848a61c..cae789f 100644
--- a/apidocs/org/apache/hadoop/hbase/Size.html
+++ b/apidocs/org/apache/hadoop/hbase/Size.html
@@ -1,6 +1,6 @@
 http://www.w3.org/TR/html4/loose.dtd;>
 
-
+
 
 
 
@@ -19,45 +19,45 @@
 }
 //-->
 var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10};
-var tabs = {65535:["t0","所有方法"],2:["t2","实例方法"],8:["t4","å…
·ä½“方法"]};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
 var tableTab = "tableTab";
 var activeTableTab = "activeTableTab";
 
 
-您的浏览器已禁用 JavaScript。
+JavaScript is disabled on your browser.
 
 
 
 
 
-跳过导航链接
+Skip navigation links
 
 
 
-
-概览
-程序包
-ç±»
-使用
-树
-已过时
-索引
-帮助
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
 
 
 
 
-上一个类
-下一个类
+PrevClass
+NextClass
 
 
-框架
-无框架
+Frames
+NoFrames
 
 
-所有类
+AllClasses
 
 
 
 
org.apache.hadoop.hbase
-

ç±» Size

+

Class Size


[16/51] [partial] hbase-site git commit: Published site at e66a6603e36ecd67237ca16acd5e2de03f0d372d.

2018-07-19 Thread zhangduo
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5427a45e/apidocs/org/apache/hadoop/hbase/ServerName.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/ServerName.html 
b/apidocs/org/apache/hadoop/hbase/ServerName.html
index 82a2c34..40b8d8e 100644
--- a/apidocs/org/apache/hadoop/hbase/ServerName.html
+++ b/apidocs/org/apache/hadoop/hbase/ServerName.html
@@ -1,6 +1,6 @@
 http://www.w3.org/TR/html4/loose.dtd;>
 
-
+
 
 
 
@@ -19,45 +19,45 @@
 }
 //-->
 var methods = 
{"i0":10,"i1":10,"i2":10,"i3":42,"i4":10,"i5":10,"i6":10,"i7":10,"i8":41,"i9":41,"i10":41,"i11":10,"i12":10,"i13":10,"i14":9,"i15":9,"i16":41,"i17":41,"i18":9,"i19":41,"i20":9,"i21":10,"i22":10,"i23":9,"i24":9,"i25":9};
-var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete 
Methods"],32:["t6","Deprecated Methods"]};
+var tabs = 
{65535:["t0","所有方法"],1:["t1","静态方法"],2:["t2","实例方法"],8:["t4","å
…·ä½“方法"],32:["t6","已过时的方法"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
 var tableTab = "tableTab";
 var activeTableTab = "activeTableTab";
 
 
-JavaScript is disabled on your browser.
+您的浏览器已禁用 JavaScript。
 
 
 
 
 
-Skip navigation links
+跳过导航链接
 
 
 
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
+
+概览
+程序包
+ç±»
+使用
+树
+已过时
+索引
+帮助
 
 
 
 
-PrevClass
-NextClass
+上一个类
+下一个类
 
 
-Frames
-NoFrames
+框架
+无框架
 
 
-AllClasses
+所有类
 
 
 

[16/51] [partial] hbase-site git commit: Published site at 0f23784182ab88649de340d75804e0ff20dcd0fc.

2018-07-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bcb555af/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.EncryptHandler.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.EncryptHandler.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.EncryptHandler.html
index 05e032c..40ef9f4 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.EncryptHandler.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.EncryptHandler.html
@@ -25,767 +25,805 @@
 017 */
 018package 
org.apache.hadoop.hbase.io.asyncfs;
 019
-020import static 
org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleState.READER_IDLE;
-021import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY;
+020import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY;
+021import static 
org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleState.READER_IDLE;
 022
-023import 
org.apache.hbase.thirdparty.com.google.common.base.Charsets;
-024import 
org.apache.hbase.thirdparty.com.google.common.base.Throwables;
-025import 
org.apache.hbase.thirdparty.com.google.common.collect.ImmutableSet;
-026import 
org.apache.hbase.thirdparty.com.google.common.collect.Maps;
-027import 
com.google.protobuf.CodedOutputStream;
-028
-029import 
org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf;
-030import 
org.apache.hbase.thirdparty.io.netty.buffer.ByteBufOutputStream;
-031import 
org.apache.hbase.thirdparty.io.netty.buffer.CompositeByteBuf;
-032import 
org.apache.hbase.thirdparty.io.netty.buffer.Unpooled;
-033import 
org.apache.hbase.thirdparty.io.netty.channel.Channel;
-034import 
org.apache.hbase.thirdparty.io.netty.channel.ChannelDuplexHandler;
-035import 
org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext;
-036import 
org.apache.hbase.thirdparty.io.netty.channel.ChannelOutboundHandlerAdapter;
-037import 
org.apache.hbase.thirdparty.io.netty.channel.ChannelPipeline;
-038import 
org.apache.hbase.thirdparty.io.netty.channel.ChannelPromise;
-039import 
org.apache.hbase.thirdparty.io.netty.channel.SimpleChannelInboundHandler;
-040import 
org.apache.hbase.thirdparty.io.netty.handler.codec.LengthFieldBasedFrameDecoder;
-041import 
org.apache.hbase.thirdparty.io.netty.handler.codec.MessageToByteEncoder;
-042import 
org.apache.hbase.thirdparty.io.netty.handler.codec.protobuf.ProtobufDecoder;
-043import 
org.apache.hbase.thirdparty.io.netty.handler.codec.protobuf.ProtobufVarint32FrameDecoder;
-044import 
org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateEvent;
-045import 
org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler;
-046import 
org.apache.hbase.thirdparty.io.netty.util.concurrent.Promise;
-047
-048import java.io.IOException;
-049import java.lang.reflect.Field;
-050import 
java.lang.reflect.InvocationTargetException;
-051import java.lang.reflect.Method;
-052import java.net.InetAddress;
-053import java.net.InetSocketAddress;
-054import java.nio.ByteBuffer;
-055import 
java.security.GeneralSecurityException;
-056import java.util.Arrays;
-057import java.util.Collections;
-058import java.util.List;
-059import java.util.Map;
-060import java.util.Set;
-061import java.util.concurrent.TimeUnit;
-062import 
java.util.concurrent.atomic.AtomicBoolean;
-063
-064import 
javax.security.auth.callback.Callback;
-065import 
javax.security.auth.callback.CallbackHandler;
-066import 
javax.security.auth.callback.NameCallback;
-067import 
javax.security.auth.callback.PasswordCallback;
-068import 
javax.security.auth.callback.UnsupportedCallbackException;
-069import 
javax.security.sasl.RealmCallback;
-070import 
javax.security.sasl.RealmChoiceCallback;
-071import javax.security.sasl.Sasl;
-072import javax.security.sasl.SaslClient;
-073import 
javax.security.sasl.SaslException;
-074
-075import 
org.apache.commons.codec.binary.Base64;
-076import 
org.apache.commons.lang3.StringUtils;
-077import 
org.apache.hadoop.conf.Configuration;
-078import 
org.apache.hadoop.crypto.CipherOption;
-079import 
org.apache.hadoop.crypto.CipherSuite;
-080import 
org.apache.hadoop.crypto.CryptoCodec;
-081import 
org.apache.hadoop.crypto.Decryptor;
-082import 
org.apache.hadoop.crypto.Encryptor;
-083import 
org.apache.hadoop.crypto.key.KeyProvider.KeyVersion;
-084import 
org.apache.hadoop.fs.FileEncryptionInfo;
-085import 
org.apache.yetus.audience.InterfaceAudience;
-086import org.slf4j.Logger;
-087import org.slf4j.LoggerFactory;
-088
-089import com.google.protobuf.ByteString;
-090import 
org.apache.hadoop.hdfs.DFSClient;
-091import 
org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-092import 
org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
-093import 

[16/51] [partial] hbase-site git commit: Published site at 85b41f36e01214b6485c9352875c84ebf877dab3.

2018-06-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a5c66de0/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.AddColumnFamilyFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.AddColumnFamilyFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.AddColumnFamilyFuture.html
index c10cfbf..a3e2f4a 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.AddColumnFamilyFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.AddColumnFamilyFuture.html
@@ -3371,7 +3371,7 @@
 3363private V result = null;
 3364
 3365private final HBaseAdmin admin;
-3366private final Long procId;
+3366protected final Long procId;
 3367
 3368public ProcedureFuture(final 
HBaseAdmin admin, final Long procId) {
 3369  this.admin = admin;
@@ -3653,653 +3653,651 @@
 3645 * @return a description of the 
operation
 3646 */
 3647protected String getDescription() 
{
-3648  return "Operation: " + 
getOperationType() + ", "
-3649  + "Table Name: " + 
tableName.getNameWithNamespaceInclAsString();
-3650
-3651}
-3652
-3653protected abstract class 
TableWaitForStateCallable implements WaitForStateCallable {
-3654  @Override
-3655  public void 
throwInterruptedException() throws InterruptedIOException {
-3656throw new 
InterruptedIOException("Interrupted while waiting for operation: "
-3657+ getOperationType() + " on 
table: " + tableName.getNameWithNamespaceInclAsString());
-3658  }
-3659
-3660  @Override
-3661  public void 
throwTimeoutException(long elapsedTime) throws TimeoutException {
-3662throw new TimeoutException("The 
operation: " + getOperationType() + " on table: " +
-3663tableName.getNameAsString() 
+ " has not completed after " + elapsedTime + "ms");
-3664  }
-3665}
-3666
-3667@Override
-3668protected V 
postOperationResult(final V result, final long deadlineTs)
-3669throws IOException, 
TimeoutException {
-3670  LOG.info(getDescription() + " 
completed");
-3671  return 
super.postOperationResult(result, deadlineTs);
-3672}
-3673
-3674@Override
-3675protected V 
postOperationFailure(final IOException exception, final long deadlineTs)
-3676throws IOException, 
TimeoutException {
-3677  LOG.info(getDescription() + " 
failed with " + exception.getMessage());
-3678  return 
super.postOperationFailure(exception, deadlineTs);
-3679}
-3680
-3681protected void 
waitForTableEnabled(final long deadlineTs)
-3682throws IOException, 
TimeoutException {
-3683  waitForState(deadlineTs, new 
TableWaitForStateCallable() {
-3684@Override
-3685public boolean checkState(int 
tries) throws IOException {
-3686  try {
-3687if 
(getAdmin().isTableAvailable(tableName)) {
-3688  return true;
-3689}
-3690  } catch 
(TableNotFoundException tnfe) {
-3691LOG.debug("Table " + 
tableName.getNameWithNamespaceInclAsString()
-3692+ " was not enabled, 
sleeping. tries=" + tries);
-3693  }
-3694  return false;
-3695}
-3696  });
-3697}
-3698
-3699protected void 
waitForTableDisabled(final long deadlineTs)
-3700throws IOException, 
TimeoutException {
-3701  waitForState(deadlineTs, new 
TableWaitForStateCallable() {
-3702@Override
-3703public boolean checkState(int 
tries) throws IOException {
-3704  return 
getAdmin().isTableDisabled(tableName);
-3705}
-3706  });
-3707}
-3708
-3709protected void 
waitTableNotFound(final long deadlineTs)
-3710throws IOException, 
TimeoutException {
-3711  waitForState(deadlineTs, new 
TableWaitForStateCallable() {
-3712@Override
-3713public boolean checkState(int 
tries) throws IOException {
-3714  return 
!getAdmin().tableExists(tableName);
-3715}
-3716  });
-3717}
-3718
-3719protected void 
waitForSchemaUpdate(final long deadlineTs)
-3720throws IOException, 
TimeoutException {
-3721  waitForState(deadlineTs, new 
TableWaitForStateCallable() {
-3722@Override
-3723public boolean checkState(int 
tries) throws IOException {
-3724  return 
getAdmin().getAlterStatus(tableName).getFirst() == 0;
-3725}
-3726  });
-3727}
-3728
-3729protected void 
waitForAllRegionsOnline(final long deadlineTs, final byte[][] splitKeys)
-3730throws IOException, 
TimeoutException {
-3731  final TableDescriptor desc = 
getTableDescriptor();
-3732  final AtomicInteger actualRegCount 
= new AtomicInteger(0);
-3733  final MetaTableAccessor.Visitor 
visitor = new MetaTableAccessor.Visitor() {
-3734@Override
-3735public boolean visit(Result 
rowResult) throws 

[16/51] [partial] hbase-site git commit: Published site at 6198e1fc7dfa85c3bc6b2855f9a5fb5f4b2354ff.

2018-06-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/eb5d2c62/devapidocs/org/apache/hadoop/hbase/filter/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/filter/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/filter/package-tree.html
index 796e6a6..39ffb45 100644
--- a/devapidocs/org/apache/hadoop/hbase/filter/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/filter/package-tree.html
@@ -183,14 +183,14 @@
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.filter.RegexStringComparator.EngineType
-org.apache.hadoop.hbase.filter.FilterWrapper.FilterRowRetCode
+org.apache.hadoop.hbase.filter.FilterList.Operator
 org.apache.hadoop.hbase.filter.Filter.ReturnCode
 org.apache.hadoop.hbase.filter.FuzzyRowFilter.SatisfiesCode
-org.apache.hadoop.hbase.filter.CompareFilter.CompareOp
-org.apache.hadoop.hbase.filter.FilterList.Operator
 org.apache.hadoop.hbase.filter.FuzzyRowFilter.Order
+org.apache.hadoop.hbase.filter.RegexStringComparator.EngineType
 org.apache.hadoop.hbase.filter.BitComparator.BitwiseOp
+org.apache.hadoop.hbase.filter.CompareFilter.CompareOp
+org.apache.hadoop.hbase.filter.FilterWrapper.FilterRowRetCode
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/eb5d2c62/devapidocs/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.AckHandler.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.AckHandler.html
 
b/devapidocs/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.AckHandler.html
index a66e859..deec4b4 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.AckHandler.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.AckHandler.html
@@ -133,7 +133,7 @@ var activeTableTab = "activeTableTab";
 
 
 @ChannelHandler.Sharable
-private final class FanOutOneBlockAsyncDFSOutput.AckHandler
+private final class FanOutOneBlockAsyncDFSOutput.AckHandler
 extends 
org.apache.hbase.thirdparty.io.netty.channel.SimpleChannelInboundHandlerorg.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto
 
 
@@ -279,7 +279,7 @@ extends 
org.apache.hbase.thirdparty.io.netty.channel.SimpleChannelInboundHandler
 
 
 timeoutMs
-private finalint timeoutMs
+private finalint timeoutMs
 
 
 
@@ -296,7 +296,7 @@ extends 
org.apache.hbase.thirdparty.io.netty.channel.SimpleChannelInboundHandler
 
 
 AckHandler
-publicAckHandler(inttimeoutMs)
+publicAckHandler(inttimeoutMs)
 
 
 
@@ -313,7 +313,7 @@ extends 
org.apache.hbase.thirdparty.io.netty.channel.SimpleChannelInboundHandler
 
 
 channelRead0
-protectedvoidchannelRead0(org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContextctx,
+protectedvoidchannelRead0(org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContextctx,
 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProtoack)
  throws https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">Exception
 
@@ -330,7 +330,7 @@ extends 
org.apache.hbase.thirdparty.io.netty.channel.SimpleChannelInboundHandler
 
 
 channelInactive
-publicvoidchannelInactive(org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContextctx)
+publicvoidchannelInactive(org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContextctx)
  throws https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">Exception
 
 Specified by:
@@ -348,7 +348,7 @@ extends 
org.apache.hbase.thirdparty.io.netty.channel.SimpleChannelInboundHandler
 
 
 exceptionCaught
-publicvoidexceptionCaught(org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContextctx,
+publicvoidexceptionCaught(org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContextctx,
 https://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true;
 title="class or interface in java.lang">Throwablecause)
  throws https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">Exception
 
@@ -369,7 +369,7 @@ extends 
org.apache.hbase.thirdparty.io.netty.channel.SimpleChannelInboundHandler
 
 
 userEventTriggered

[16/51] [partial] hbase-site git commit: Published site at 14087cc919da9f2e0b1a68f701f6365ad9d1d71f.

2018-06-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/55ce8d97/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html
index 1546b5d..31c6fd0 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html
@@ -122,7 +122,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private static class HRegionServer.CompactionChecker
+private static class HRegionServer.CompactionChecker
 extends ScheduledChore
 
 
@@ -233,7 +233,7 @@ extends 
 
 instance
-private finalHRegionServer instance
+private finalHRegionServer instance
 
 
 
@@ -242,7 +242,7 @@ extends 
 
 majorCompactPriority
-private finalint majorCompactPriority
+private finalint majorCompactPriority
 
 
 
@@ -251,7 +251,7 @@ extends 
 
 DEFAULT_PRIORITY
-private static finalint DEFAULT_PRIORITY
+private static finalint DEFAULT_PRIORITY
 
 See Also:
 Constant
 Field Values
@@ -264,7 +264,7 @@ extends 
 
 iteration
-privatelong iteration
+privatelong iteration
 
 
 
@@ -281,7 +281,7 @@ extends 
 
 CompactionChecker
-CompactionChecker(HRegionServerh,
+CompactionChecker(HRegionServerh,
   intsleepTime,
   Stoppablestopper)
 
@@ -300,7 +300,7 @@ extends 
 
 chore
-protectedvoidchore()
+protectedvoidchore()
 Description copied from 
class:ScheduledChore
 The task to execute on each scheduled execution of the 
Chore
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/55ce8d97/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionInfo.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionInfo.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionInfo.html
index 6eda1d5..e10e070 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionInfo.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionInfo.html
@@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private static class HRegionServer.MovedRegionInfo
+private static class HRegionServer.MovedRegionInfo
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 
 
@@ -218,7 +218,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 serverName
-private finalServerName serverName
+private finalServerName serverName
 
 
 
@@ -227,7 +227,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 seqNum
-private finallong seqNum
+private finallong seqNum
 
 
 
@@ -236,7 +236,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 ts
-private finallong ts
+private finallong ts
 
 
 
@@ -253,7 +253,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 MovedRegionInfo
-publicMovedRegionInfo(ServerNameserverName,
+publicMovedRegionInfo(ServerNameserverName,
longcloseSeqNum)
 
 
@@ -271,7 +271,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 getServerName
-publicServerNamegetServerName()
+publicServerNamegetServerName()
 
 
 
@@ -280,7 +280,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 getSeqNum
-publiclonggetSeqNum()
+publiclonggetSeqNum()
 
 
 
@@ -289,7 +289,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 getMoveTime
-publiclonggetMoveTime()
+publiclonggetMoveTime()
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/55ce8d97/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html
index b2b3568..295b9b5 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html
@@ -122,7 +122,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-protected static final class HRegionServer.MovedRegionsCleaner
+protected static final class HRegionServer.MovedRegionsCleaner
 extends ScheduledChore
 implements Stoppable
 Creates a Chore thread to clean the moved region 
cache.
@@ -242,7 +242,7 @@ implements 
 
 regionServer
-privateHRegionServer regionServer
+privateHRegionServer regionServer
 
 
 
@@ -251,7 +251,7 @@ implements 
 
 

[16/51] [partial] hbase-site git commit: Published site at 72784c2d836a4b977667449d3adec5e8d15453f5.

2018-06-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2b11656f/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.TableFuture.TableWaitForStateCallable.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.TableFuture.TableWaitForStateCallable.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.TableFuture.TableWaitForStateCallable.html
index b6e7636..592c2cc 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.TableFuture.TableWaitForStateCallable.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.TableFuture.TableWaitForStateCallable.html
@@ -356,3901 +356,3924 @@
 348  public FutureVoid 
modifyTableAsync(TableDescriptor td) throws IOException {
 349ModifyTableResponse response = 
executeCallable(
 350  new 
MasterCallableModifyTableResponse(getConnection(), 
getRpcControllerFactory()) {
-351@Override
-352protected ModifyTableResponse 
rpcCall() throws Exception {
-353  
setPriority(td.getTableName());
-354  ModifyTableRequest request = 
RequestConverter.buildModifyTableRequest(
-355td.getTableName(), td, 
ng.getNonceGroup(), ng.newNonce());
-356  return 
master.modifyTable(getRpcController(), request);
-357}
-358  });
-359return new ModifyTableFuture(this, 
td.getTableName(), response);
-360  }
-361
-362  @Override
-363  public ListTableDescriptor 
listTableDescriptorsByNamespace(byte[] name) throws IOException {
-364return executeCallable(new 
MasterCallableListTableDescriptor(getConnection(),
-365getRpcControllerFactory()) {
-366  @Override
-367  protected 
ListTableDescriptor rpcCall() throws Exception {
-368return 
master.listTableDescriptorsByNamespace(getRpcController(),
-369
ListTableDescriptorsByNamespaceRequest.newBuilder()
-370  
.setNamespaceName(Bytes.toString(name)).build())
-371.getTableSchemaList()
-372.stream()
-373
.map(ProtobufUtil::toTableDescriptor)
-374
.collect(Collectors.toList());
-375  }
-376});
-377  }
-378
-379  @Override
-380  public ListTableDescriptor 
listTableDescriptors(ListTableName tableNames) throws IOException {
-381return executeCallable(new 
MasterCallableListTableDescriptor(getConnection(),
-382getRpcControllerFactory()) {
-383  @Override
-384  protected 
ListTableDescriptor rpcCall() throws Exception {
-385GetTableDescriptorsRequest req 
=
-386
RequestConverter.buildGetTableDescriptorsRequest(tableNames);
-387  return 
ProtobufUtil.toTableDescriptorList(master.getTableDescriptors(getRpcController(),
-388  req));
-389  }
-390});
-391  }
-392
-393  @Override
-394  public ListRegionInfo 
getRegions(final ServerName sn) throws IOException {
-395AdminService.BlockingInterface admin 
= this.connection.getAdmin(sn);
-396// TODO: There is no timeout on this 
controller. Set one!
-397HBaseRpcController controller = 
rpcControllerFactory.newController();
-398return 
ProtobufUtil.getOnlineRegions(controller, admin);
-399  }
-400
-401  @Override
-402  public ListRegionInfo 
getRegions(TableName tableName) throws IOException {
-403if 
(TableName.isMetaTableName(tableName)) {
-404  return 
Arrays.asList(RegionInfoBuilder.FIRST_META_REGIONINFO);
-405} else {
-406  return 
MetaTableAccessor.getTableRegions(connection, tableName, true);
-407}
-408  }
-409
-410  private static class 
AbortProcedureFuture extends ProcedureFutureBoolean {
-411private boolean isAbortInProgress;
-412
-413public AbortProcedureFuture(
-414final HBaseAdmin admin,
-415final Long procId,
-416final Boolean abortProcResponse) 
{
-417  super(admin, procId);
-418  this.isAbortInProgress = 
abortProcResponse;
-419}
-420
-421@Override
-422public Boolean get(long timeout, 
TimeUnit unit)
-423throws InterruptedException, 
ExecutionException, TimeoutException {
-424  if (!this.isAbortInProgress) {
-425return false;
-426  }
-427  super.get(timeout, unit);
-428  return true;
-429}
-430  }
-431
-432  /** @return Connection used by this 
object. */
-433  @Override
-434  public Connection getConnection() {
-435return connection;
-436  }
-437
-438  @Override
-439  public boolean tableExists(final 
TableName tableName) throws IOException {
-440return executeCallable(new 
RpcRetryingCallableBoolean() {
-441  @Override
-442  protected Boolean rpcCall(int 
callTimeout) throws Exception {
-443return 
MetaTableAccessor.tableExists(connection, tableName);
-444  }
-445});
-446  }
-447
-448  @Override
-449  public HTableDescriptor[] listTables() 
throws IOException {
-450return listTables((Pattern)null, 
false);
-451  }
-452
-453 

[16/51] [partial] hbase-site git commit: Published site at 9101fc246f86445006bfbcdfda5cc495016dc280.

2018-06-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/65565d77/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.html
index fea2b5a..c7a6cc4 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.html
@@ -1354,816 +1354,824 @@
 1346   */
 1347  public static void 
putsToMetaTable(final Connection connection, final ListPut ps)
 1348  throws IOException {
-1349try (Table t = 
getMetaHTable(connection)) {
-1350  debugLogMutations(ps);
-1351  t.put(ps);
-1352}
-1353  }
-1354
-1355  /**
-1356   * Delete the passed 
coded/code from the codehbase:meta/code 
table.
-1357   * @param connection connection we're 
using
-1358   * @param d Delete to add to 
hbase:meta
-1359   */
-1360  private static void 
deleteFromMetaTable(final Connection connection, final Delete d)
-1361  throws IOException {
-1362ListDelete dels = new 
ArrayList(1);
-1363dels.add(d);
-1364deleteFromMetaTable(connection, 
dels);
-1365  }
-1366
-1367  /**
-1368   * Delete the passed 
codedeletes/code from the codehbase:meta/code 
table.
-1369   * @param connection connection we're 
using
-1370   * @param deletes Deletes to add to 
hbase:meta  This list should support #remove.
-1371   */
-1372  private static void 
deleteFromMetaTable(final Connection connection, final ListDelete 
deletes)
-1373  throws IOException {
-1374try (Table t = 
getMetaHTable(connection)) {
-1375  debugLogMutations(deletes);
-1376  t.delete(deletes);
-1377}
-1378  }
-1379
-1380  /**
-1381   * Deletes some replica columns 
corresponding to replicas for the passed rows
-1382   * @param metaRows rows in 
hbase:meta
-1383   * @param replicaIndexToDeleteFrom the 
replica ID we would start deleting from
-1384   * @param numReplicasToRemove how many 
replicas to remove
-1385   * @param connection connection we're 
using to access meta table
-1386   */
-1387  public static void 
removeRegionReplicasFromMeta(Setbyte[] metaRows,
-1388int replicaIndexToDeleteFrom, int 
numReplicasToRemove, Connection connection)
-1389  throws IOException {
-1390int absoluteIndex = 
replicaIndexToDeleteFrom + numReplicasToRemove;
-1391for (byte[] row : metaRows) {
-1392  long now = 
EnvironmentEdgeManager.currentTime();
-1393  Delete deleteReplicaLocations = 
new Delete(row);
-1394  for (int i = 
replicaIndexToDeleteFrom; i  absoluteIndex; i++) {
-1395
deleteReplicaLocations.addColumns(getCatalogFamily(),
-1396  getServerColumn(i), now);
-1397
deleteReplicaLocations.addColumns(getCatalogFamily(),
-1398  getSeqNumColumn(i), now);
-1399
deleteReplicaLocations.addColumns(getCatalogFamily(),
-1400  getStartCodeColumn(i), now);
-1401  }
-1402  deleteFromMetaTable(connection, 
deleteReplicaLocations);
-1403}
-1404  }
-1405
-1406  /**
-1407   * Execute the passed 
codemutations/code against codehbase:meta/code 
table.
-1408   * @param connection connection we're 
using
-1409   * @param mutations Puts and Deletes 
to execute on hbase:meta
-1410   * @throws IOException
-1411   */
-1412  public static void 
mutateMetaTable(final Connection connection,
-1413 
final ListMutation mutations)
-1414throws IOException {
-1415Table t = 
getMetaHTable(connection);
-1416try {
-1417  debugLogMutations(mutations);
-1418  t.batch(mutations, null);
-1419} catch (InterruptedException e) {
-1420  InterruptedIOException ie = new 
InterruptedIOException(e.getMessage());
-1421  ie.initCause(e);
-1422  throw ie;
-1423} finally {
-1424  t.close();
-1425}
-1426  }
-1427
-1428  private static void 
addRegionStateToPut(Put put, RegionState.State state) throws IOException {
-1429
put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY)
-1430.setRow(put.getRow())
-1431
.setFamily(HConstants.CATALOG_FAMILY)
-1432
.setQualifier(getRegionStateColumn())
-1433
.setTimestamp(put.getTimestamp())
-1434.setType(Cell.Type.Put)
-1435
.setValue(Bytes.toBytes(state.name()))
-1436.build());
-1437  }
-1438
-1439  /**
-1440   * Adds daughter region infos to 
hbase:meta row for the specified region. Note that this does not
-1441   * add its daughter's as different 
rows, but adds information about the daughters in the same row
-1442   * as the parent. Use
-1443   * {@link #splitRegion(Connection, 
RegionInfo, long, RegionInfo, RegionInfo, ServerName, int)}
-1444   * if you want to do that.
-1445   * @param connection connection we're 
using
-1446   * @param regionInfo RegionInfo of 
parent region
-1447   * @param splitA first split daughter 

[16/51] [partial] hbase-site git commit: Published site at 0b28155d274910b4e667b949d51f78809a1eff0b.

2018-06-14 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e11cf2cb/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.WALHdrContext.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.WALHdrContext.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.WALHdrContext.html
index fdc5a8a..62e604e 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.WALHdrContext.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.WALHdrContext.html
@@ -320,141 +320,143 @@
 312this.cellDecoder = 
codec.getDecoder(this.inputStream);
 313if (this.hasCompression) {
 314  this.byteStringUncompressor = 
codec.getByteStringUncompressor();
-315}
-316  }
-317
-318  @Override
-319  protected boolean hasCompression() {
-320return this.hasCompression;
-321  }
-322
-323  @Override
-324  protected boolean hasTagCompression() 
{
-325return this.hasTagCompression;
-326  }
-327
-328  @Override
-329  protected boolean readNext(Entry entry) 
throws IOException {
-330while (true) {
-331  // OriginalPosition might be  0 
on local fs; if so, it is useless to us.
-332  long originalPosition = 
this.inputStream.getPos();
-333  if (trailerPresent  
originalPosition  0  originalPosition == this.walEditsStopOffset) 
{
-334if (LOG.isTraceEnabled()) {
-335  LOG.trace("Reached end of 
expected edits area at offset " + originalPosition);
-336}
-337return false;
-338  }
-339  WALKey.Builder builder = 
WALKey.newBuilder();
-340  long size = 0;
-341  try {
-342long available = -1;
-343try {
-344  int firstByte = 
this.inputStream.read();
-345  if (firstByte == -1) {
-346throw new EOFException("First 
byte is negative at offset " + originalPosition);
-347  }
-348  size = 
CodedInputStream.readRawVarint32(firstByte, this.inputStream);
-349  // available may be  0 on 
local fs for instance.  If so, can't depend on it.
-350  available = 
this.inputStream.available();
-351  if (available  0  
available  size) {
-352throw new 
EOFException("Available stream not enough for edit, " +
-353"inputStream.available()= 
" + this.inputStream.available() + ", " +
-354"entry size= " + size + " 
at offset = " + this.inputStream.getPos());
-355  }
-356  ProtobufUtil.mergeFrom(builder, 
ByteStreams.limit(this.inputStream, size),
-357(int)size);
-358} catch 
(InvalidProtocolBufferException ipbe) {
-359  throw (EOFException) new 
EOFException("Invalid PB, EOF? Ignoring; originalPosition=" +
-360originalPosition + ", 
currentPosition=" + this.inputStream.getPos() +
-361", messageSize=" + size + ", 
currentAvailable=" + available).initCause(ipbe);
-362}
-363if (!builder.isInitialized()) {
-364  // TODO: not clear if we should 
try to recover from corrupt PB that looks semi-legit.
-365  //   If we can get the KV 
count, we could, theoretically, try to get next record.
-366  throw new EOFException("Partial 
PB while reading WAL, " +
-367  "probably an unexpected 
EOF, ignoring. current offset=" + this.inputStream.getPos());
-368}
-369WALKey walKey = 
builder.build();
-370
entry.getKey().readFieldsFromPb(walKey, this.byteStringUncompressor);
-371if (!walKey.hasFollowingKvCount() 
|| 0 == walKey.getFollowingKvCount()) {
-372  if (LOG.isTraceEnabled()) {
-373LOG.trace("WALKey has no KVs 
that follow it; trying the next one. current offset=" +
-374
this.inputStream.getPos());
-375  }
-376  continue;
-377}
-378int expectedCells = 
walKey.getFollowingKvCount();
-379long posBefore = 
this.inputStream.getPos();
-380try {
-381  int actualCells = 
entry.getEdit().readFromCells(cellDecoder, expectedCells);
-382  if (expectedCells != 
actualCells) {
-383throw new EOFException("Only 
read " + actualCells); // other info added in catch
-384  }
-385} catch (Exception ex) {
-386  String posAfterStr = 
"unknown";
-387  try {
-388posAfterStr = 
this.inputStream.getPos() + "";
-389  } catch (Throwable t) {
-390if (LOG.isTraceEnabled()) {
-391  LOG.trace("Error getting 
pos for error message - ignoring", t);
-392}
-393  }
-394  String message = " while 
reading " + expectedCells + " WAL KVs; started reading at "
-395  + posBefore + " and read up 
to " + posAfterStr;
-396  IOException realEofEx = 
extractHiddenEof(ex);
-397  throw (EOFException) new 

[16/51] [partial] hbase-site git commit: Published site at 7d3750bd9fc9747623549c242cc4171e224b3eaf.

2018-06-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionStateStampComparator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionStateStampComparator.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionStateStampComparator.html
index 5420d82..6ea3672 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionStateStampComparator.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.RegionStateStampComparator.html
@@ -316,687 +316,728 @@
 308}
 309  }
 310
-311  public enum ServerState { ONLINE, 
SPLITTING, OFFLINE }
-312  public static class ServerStateNode 
implements ComparableServerStateNode {
-313private final ServerReportEvent 
reportEvent;
-314
-315private final 
SetRegionStateNode regions;
-316private final ServerName 
serverName;
-317
-318private volatile ServerState state = 
ServerState.ONLINE;
-319private volatile int versionNumber = 
0;
-320
-321public ServerStateNode(final 
ServerName serverName) {
-322  this.serverName = serverName;
-323  this.regions = 
ConcurrentHashMap.newKeySet();
-324  this.reportEvent = new 
ServerReportEvent(serverName);
-325}
-326
-327public ServerName getServerName() {
-328  return serverName;
-329}
+311  /**
+312   * Server State.
+313   */
+314  public enum ServerState {
+315/**
+316 * Initial state. Available.
+317 */
+318ONLINE,
+319
+320/**
+321 * Server expired/crashed. Currently 
undergoing WAL splitting.
+322 */
+323SPLITTING,
+324
+325/**
+326 * WAL splitting done.
+327 */
+328OFFLINE
+329  }
 330
-331public ServerState getState() {
-332  return state;
-333}
-334
-335public int getVersionNumber() {
-336  return versionNumber;
-337}
-338
-339public ProcedureEvent? 
getReportEvent() {
-340  return reportEvent;
-341}
+331  /**
+332   * State of Server; list of hosted 
regions, etc.
+333   */
+334  public static class ServerStateNode 
implements ComparableServerStateNode {
+335private final ServerReportEvent 
reportEvent;
+336
+337private final 
SetRegionStateNode regions;
+338private final ServerName 
serverName;
+339
+340private volatile ServerState state = 
ServerState.ONLINE;
+341private volatile int versionNumber = 
0;
 342
-343public boolean isInState(final 
ServerState... expected) {
-344  boolean expectedState = false;
-345  if (expected != null) {
-346for (int i = 0; i  
expected.length; ++i) {
-347  expectedState |= (state == 
expected[i]);
-348}
-349  }
-350  return expectedState;
+343public ServerStateNode(final 
ServerName serverName) {
+344  this.serverName = serverName;
+345  this.regions = 
ConcurrentHashMap.newKeySet();
+346  this.reportEvent = new 
ServerReportEvent(serverName);
+347}
+348
+349public ServerName getServerName() {
+350  return serverName;
 351}
 352
-353public void setState(final 
ServerState state) {
-354  this.state = state;
+353public ServerState getState() {
+354  return state;
 355}
 356
-357public void setVersionNumber(final 
int versionNumber) {
-358  this.versionNumber = 
versionNumber;
+357public int getVersionNumber() {
+358  return versionNumber;
 359}
 360
-361public SetRegionStateNode 
getRegions() {
-362  return regions;
+361public ProcedureEvent? 
getReportEvent() {
+362  return reportEvent;
 363}
 364
-365public int getRegionCount() {
-366  return regions.size();
+365public boolean isOffline() {
+366  return 
this.state.equals(ServerState.OFFLINE);
 367}
 368
-369public ArrayListRegionInfo 
getRegionInfoList() {
-370  ArrayListRegionInfo hris = 
new ArrayListRegionInfo(regions.size());
-371  for (RegionStateNode region: 
regions) {
-372
hris.add(region.getRegionInfo());
-373  }
-374  return hris;
-375}
-376
-377public void addRegion(final 
RegionStateNode regionNode) {
-378  this.regions.add(regionNode);
-379}
-380
-381public void removeRegion(final 
RegionStateNode regionNode) {
-382  this.regions.remove(regionNode);
-383}
-384
-385@Override
-386public int compareTo(final 
ServerStateNode other) {
-387  return 
getServerName().compareTo(other.getServerName());
-388}
-389
-390@Override
-391public int hashCode() {
-392  return 
getServerName().hashCode();
+369public boolean isInState(final 
ServerState... expected) {
+370  boolean expectedState = false;
+371  if (expected != null) {
+372for (int i = 0; i  
expected.length; ++i) {
+373  expectedState |= (state == 
expected[i]);
+374  

[16/51] [partial] hbase-site git commit: Published site at 997747076d8ec0b4346d7cb99c4b0667a7c14905.

2018-05-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4df09ed9/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
index 3da432b..d30fa8f 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
@@ -928,7690 +928,7698 @@
 920  CollectionHStore stores = 
this.stores.values();
 921  try {
 922// update the stores that we are 
replaying
-923
stores.forEach(HStore::startReplayingFromWAL);
-924// Recover any edits if 
available.
-925maxSeqId = Math.max(maxSeqId,
-926  
replayRecoveredEditsIfAny(this.fs.getRegionDir(), maxSeqIdInStores, reporter, 
status));
-927// Make sure mvcc is up to max.
-928this.mvcc.advanceTo(maxSeqId);
-929  } finally {
-930// update the stores that we are 
done replaying
-931
stores.forEach(HStore::stopReplayingFromWAL);
-932  }
-933}
-934this.lastReplayedOpenRegionSeqId = 
maxSeqId;
-935
-936
this.writestate.setReadOnly(ServerRegionReplicaUtil.isReadOnly(this));
-937this.writestate.flushRequested = 
false;
-938this.writestate.compacting.set(0);
-939
-940if (this.writestate.writesEnabled) 
{
-941  // Remove temporary data left over 
from old regions
-942  status.setStatus("Cleaning up 
temporary data from old regions");
-943  fs.cleanupTempDir();
-944}
-945
-946if (this.writestate.writesEnabled) 
{
-947  status.setStatus("Cleaning up 
detritus from prior splits");
-948  // Get rid of any splits or merges 
that were lost in-progress.  Clean out
-949  // these directories here on open.  
We may be opening a region that was
-950  // being split but we crashed in 
the middle of it all.
-951  fs.cleanupAnySplitDetritus();
-952  fs.cleanupMergesDir();
-953}
-954
-955// Initialize split policy
-956this.splitPolicy = 
RegionSplitPolicy.create(this, conf);
-957
-958// Initialize flush policy
-959this.flushPolicy = 
FlushPolicyFactory.create(this, conf);
-960
-961long lastFlushTime = 
EnvironmentEdgeManager.currentTime();
-962for (HStore store: stores.values()) 
{
-963  
this.lastStoreFlushTimeMap.put(store, lastFlushTime);
-964}
-965
-966// Use maximum of log sequenceid or 
that which was found in stores
-967// (particularly if no recovered 
edits, seqid will be -1).
-968long maxSeqIdFromFile =
-969  
WALSplitter.getMaxRegionSequenceId(fs.getFileSystem(), fs.getRegionDir());
-970long nextSeqId = Math.max(maxSeqId, 
maxSeqIdFromFile) + 1;
-971if (writestate.writesEnabled) {
-972  
WALSplitter.writeRegionSequenceIdFile(fs.getFileSystem(), fs.getRegionDir(), 
nextSeqId - 1);
-973}
-974
-975LOG.info("Opened {}; next 
sequenceid={}", this.getRegionInfo().getShortNameToLog(), nextSeqId);
-976
-977// A region can be reopened if failed 
a split; reset flags
-978this.closing.set(false);
-979this.closed.set(false);
-980
-981if (coprocessorHost != null) {
-982  status.setStatus("Running 
coprocessor post-open hooks");
-983  coprocessorHost.postOpen();
-984}
+923LOG.debug("replaying wal for " + 
this.getRegionInfo().getEncodedName());
+924
stores.forEach(HStore::startReplayingFromWAL);
+925// Recover any edits if 
available.
+926maxSeqId = Math.max(maxSeqId,
+927  
replayRecoveredEditsIfAny(this.fs.getRegionDir(), maxSeqIdInStores, reporter, 
status));
+928// Make sure mvcc is up to max.
+929this.mvcc.advanceTo(maxSeqId);
+930  } finally {
+931LOG.debug("stopping wal replay 
for " + this.getRegionInfo().getEncodedName());
+932// update the stores that we are 
done replaying
+933
stores.forEach(HStore::stopReplayingFromWAL);
+934  }
+935}
+936this.lastReplayedOpenRegionSeqId = 
maxSeqId;
+937
+938
this.writestate.setReadOnly(ServerRegionReplicaUtil.isReadOnly(this));
+939this.writestate.flushRequested = 
false;
+940this.writestate.compacting.set(0);
+941
+942if (this.writestate.writesEnabled) 
{
+943  LOG.debug("Cleaning up temporary 
data for " + this.getRegionInfo().getEncodedName());
+944  // Remove temporary data left over 
from old regions
+945  status.setStatus("Cleaning up 
temporary data from old regions");
+946  fs.cleanupTempDir();
+947}
+948
+949if (this.writestate.writesEnabled) 
{
+950  status.setStatus("Cleaning up 
detritus from prior splits");
+951  // Get rid of any splits or merges 
that were lost in-progress.  Clean out
+952  // these directories here on open.  
We may be opening a region that 

[16/51] [partial] hbase-site git commit: Published site at f3d1c021de2264301f68eadb9ef126ff83d7ef53.

2018-05-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/883dde2f/devapidocs/src-html/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.html
index 85373ba..d253aa8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.html
@@ -41,208 +41,208 @@
 033import 
org.apache.hadoop.hbase.CoprocessorEnvironment;
 034import 
org.apache.hadoop.hbase.HBaseIOException;
 035import 
org.apache.hadoop.hbase.HConstants;
-036import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-037import 
org.apache.hadoop.hbase.ServerName;
-038import 
org.apache.hadoop.hbase.TableName;
-039import 
org.apache.hadoop.hbase.client.RegionInfo;
-040import 
org.apache.hadoop.hbase.client.SnapshotDescription;
-041import 
org.apache.hadoop.hbase.client.TableDescriptor;
-042import 
org.apache.hadoop.hbase.constraint.ConstraintException;
-043import 
org.apache.hadoop.hbase.coprocessor.CoreCoprocessor;
-044import 
org.apache.hadoop.hbase.coprocessor.HasMasterServices;
-045import 
org.apache.hadoop.hbase.coprocessor.MasterCoprocessor;
-046import 
org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
-047import 
org.apache.hadoop.hbase.coprocessor.MasterObserver;
-048import 
org.apache.hadoop.hbase.coprocessor.ObserverContext;
-049import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
-050import 
org.apache.hadoop.hbase.ipc.RpcServer;
-051import 
org.apache.hadoop.hbase.master.MasterServices;
-052import 
org.apache.hadoop.hbase.net.Address;
-053import 
org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-054import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
-055import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos;
-056import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupRequest;
-057import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupResponse;
-058import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupRequest;
-059import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupResponse;
-060import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerRequest;
-061import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerResponse;
-062import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableRequest;
-063import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableResponse;
-064import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoRequest;
-065import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoResponse;
-066import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest;
-067import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse;
-068import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest;
-069import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse;
-070import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersRequest;
-071import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersResponse;
-072import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesRequest;
-073import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesResponse;
-074import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RSGroupAdminService;
-075import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupRequest;
-076import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupResponse;
-077import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersRequest;
-078import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersResponse;
-079import 
org.apache.hadoop.hbase.security.User;
-080import 
org.apache.hadoop.hbase.security.UserProvider;
-081import 
org.apache.hadoop.hbase.security.access.AccessChecker;
-082import 
org.apache.hadoop.hbase.security.access.Permission.Action;
-083import 
org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-084import 
org.apache.yetus.audience.InterfaceAudience;
-085import org.slf4j.Logger;
-086import org.slf4j.LoggerFactory;
-087
-088import 
org.apache.hbase.thirdparty.com.google.common.collect.Sets;
+036import 
org.apache.hadoop.hbase.MasterNotRunningException;
+037import 
org.apache.hadoop.hbase.NamespaceDescriptor;
+038import 
org.apache.hadoop.hbase.PleaseHoldException;
+039import 
org.apache.hadoop.hbase.ServerName;
+040import 

[16/51] [partial] hbase-site git commit: Published site at cf529f18a9959589fa635f78df4840472526ea2c.

2018-05-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/testdevapidocs/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.html 
b/testdevapidocs/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.html
index 3fd4061..188d432 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.html
@@ -785,7 +785,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 runIncrementalPELoad
-privatevoidrunIncrementalPELoad(org.apache.hadoop.conf.Configurationconf,
+privatevoidrunIncrementalPELoad(org.apache.hadoop.conf.Configurationconf,
   https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in 
java.util">Listorg.apache.hadoop.hbase.mapreduce.HFileOutputFormat2.TableInfotableInfo,
   org.apache.hadoop.fs.PathoutDir,
   booleanputSortReducer)
@@ -806,7 +806,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 testSerializeDeserializeFamilyCompressionMap
-publicvoidtestSerializeDeserializeFamilyCompressionMap()
+publicvoidtestSerializeDeserializeFamilyCompressionMap()
   throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Test for 
HFileOutputFormat2#configureCompression(Configuration, 
HTableDescriptor) and
  HFileOutputFormat2.createFamilyCompressionMap(Configuration).
@@ -824,7 +824,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 setupMockColumnFamiliesForCompression
-privatevoidsetupMockColumnFamiliesForCompression(org.apache.hadoop.hbase.client.Tabletable,
+privatevoidsetupMockColumnFamiliesForCompression(org.apache.hadoop.hbase.client.Tabletable,
https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">String,org.apache.hadoop.hbase.io.compress.Compression.AlgorithmfamilyToCompression)
 throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
@@ -839,7 +839,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 getMockColumnFamiliesForCompression
-privatehttps://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">String,org.apache.hadoop.hbase.io.compress.Compression.AlgorithmgetMockColumnFamiliesForCompression(intnumCfs)
+privatehttps://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">String,org.apache.hadoop.hbase.io.compress.Compression.AlgorithmgetMockColumnFamiliesForCompression(intnumCfs)
 
 Returns:
 a map from column family names to compression algorithms for
@@ -853,7 +853,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 testSerializeDeserializeFamilyBloomTypeMap
-publicvoidtestSerializeDeserializeFamilyBloomTypeMap()
+publicvoidtestSerializeDeserializeFamilyBloomTypeMap()
 throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Test for 
HFileOutputFormat2#configureBloomType(HTableDescriptor, 
Configuration) and
  HFileOutputFormat2.createFamilyBloomTypeMap(Configuration).
@@ -871,7 +871,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 setupMockColumnFamiliesForBloomType
-privatevoidsetupMockColumnFamiliesForBloomType(org.apache.hadoop.hbase.client.Tabletable,
+privatevoidsetupMockColumnFamiliesForBloomType(org.apache.hadoop.hbase.client.Tabletable,
  https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">String,org.apache.hadoop.hbase.regionserver.BloomTypefamilyToDataBlockEncoding)
   throws 

[16/51] [partial] hbase-site git commit: Published site at 021f66d11d2cbb7308308093e29e69d6e7661ee9.

2018-05-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/92a26cfb/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
index 4a879bb..7d27402 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
@@ -300,7 +300,7 @@
 292  private MapString, 
com.google.protobuf.Service coprocessorServiceHandlers = 
Maps.newHashMap();
 293
 294  // Track data size in all memstores
-295  private final MemStoreSizing 
memStoreSize = new MemStoreSizing();
+295  private final MemStoreSizing 
memStoreSizing = new ThreadSafeMemStoreSizing();
 296  private final RegionServicesForStores 
regionServicesForStores = new RegionServicesForStores(this);
 297
 298  // Debug possible data loss due to WAL 
off
@@ -1218,7389 +1218,7399 @@
 1210   * Increase the size of mem store in 
this region and the size of global mem
 1211   * store
 1212   */
-1213  public void 
incMemStoreSize(MemStoreSize memStoreSize) {
-1214if (this.rsAccounting != null) {
-1215  
rsAccounting.incGlobalMemStoreSize(memStoreSize);
-1216}
-1217long dataSize;
-1218synchronized (this.memStoreSize) {
-1219  
this.memStoreSize.incMemStoreSize(memStoreSize);
-1220  dataSize = 
this.memStoreSize.getDataSize();
-1221}
-1222
checkNegativeMemStoreDataSize(dataSize, memStoreSize.getDataSize());
-1223  }
-1224
-1225  public void 
decrMemStoreSize(MemStoreSize memStoreSize) {
-1226if (this.rsAccounting != null) {
-1227  
rsAccounting.decGlobalMemStoreSize(memStoreSize);
-1228}
-1229long size;
-1230synchronized (this.memStoreSize) {
-1231  
this.memStoreSize.decMemStoreSize(memStoreSize);
-1232  size = 
this.memStoreSize.getDataSize();
+1213  void incMemStoreSize(MemStoreSize mss) 
{
+1214incMemStoreSize(mss.getDataSize(), 
mss.getHeapSize(), mss.getOffHeapSize());
+1215  }
+1216
+1217  void incMemStoreSize(long 
dataSizeDelta, long heapSizeDelta, long offHeapSizeDelta) {
+1218if (this.rsAccounting != null) {
+1219  
rsAccounting.incGlobalMemStoreSize(dataSizeDelta, heapSizeDelta, 
offHeapSizeDelta);
+1220}
+1221long dataSize =
+1222
this.memStoreSizing.incMemStoreSize(dataSizeDelta, heapSizeDelta, 
offHeapSizeDelta);
+1223
checkNegativeMemStoreDataSize(dataSize, dataSizeDelta);
+1224  }
+1225
+1226  void decrMemStoreSize(MemStoreSize 
mss) {
+1227decrMemStoreSize(mss.getDataSize(), 
mss.getHeapSize(), mss.getOffHeapSize());
+1228  }
+1229
+1230  void decrMemStoreSize(long 
dataSizeDelta, long heapSizeDelta, long offHeapSizeDelta) {
+1231if (this.rsAccounting != null) {
+1232  
rsAccounting.decGlobalMemStoreSize(dataSizeDelta, heapSizeDelta, 
offHeapSizeDelta);
 1233}
-1234checkNegativeMemStoreDataSize(size, 
-memStoreSize.getDataSize());
-1235  }
-1236
-1237  private void 
checkNegativeMemStoreDataSize(long memStoreDataSize, long delta) {
-1238// This is extremely bad if we make 
memStoreSize negative. Log as much info on the offending
-1239// caller as possible. (memStoreSize 
might be a negative value already -- freeing memory)
-1240if (memStoreDataSize  0) {
-1241  LOG.error("Asked to modify this 
region's (" + this.toString()
-1242  + ") memStoreSize to a 
negative value which is incorrect. Current memStoreSize="
-1243  + (memStoreDataSize - delta) + 
", delta=" + delta, new Exception());
-1244}
-1245  }
-1246
-1247  @Override
-1248  public RegionInfo getRegionInfo() {
-1249return this.fs.getRegionInfo();
-1250  }
-1251
-1252  /**
-1253   * @return Instance of {@link 
RegionServerServices} used by this HRegion.
-1254   * Can be null.
-1255   */
-1256  RegionServerServices 
getRegionServerServices() {
-1257return this.rsServices;
-1258  }
-1259
-1260  @Override
-1261  public long getReadRequestsCount() {
-1262return readRequestsCount.sum();
-1263  }
-1264
-1265  @Override
-1266  public long 
getFilteredReadRequestsCount() {
-1267return 
filteredReadRequestsCount.sum();
-1268  }
-1269
-1270  @Override
-1271  public long getWriteRequestsCount() 
{
-1272return writeRequestsCount.sum();
-1273  }
-1274
-1275  @Override
-1276  public long getMemStoreDataSize() {
-1277return memStoreSize.getDataSize();
-1278  }
-1279
-1280  @Override
-1281  public long getMemStoreHeapSize() {
-1282return memStoreSize.getHeapSize();
-1283  }
-1284
-1285  @Override
-1286  public long getMemStoreOffHeapSize() 
{
-1287return 
memStoreSize.getOffHeapSize();
-1288  }
-1289
-1290  /** @return store services for this 
region, to access services required by store level needs */
-1291  public 

[16/51] [partial] hbase-site git commit: Published site at acd0d1e446c164d9c54bfb461b2d449c8d717c07.

2018-05-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f2065178/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.FilteredScanTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.FilteredScanTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.FilteredScanTest.html
index 2510283..418c60c 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.FilteredScanTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.FilteredScanTest.html
@@ -77,77 +77,77 @@
 069import 
org.apache.hadoop.hbase.client.RowMutations;
 070import 
org.apache.hadoop.hbase.client.Scan;
 071import 
org.apache.hadoop.hbase.client.Table;
-072import 
org.apache.hadoop.hbase.filter.BinaryComparator;
-073import 
org.apache.hadoop.hbase.filter.Filter;
-074import 
org.apache.hadoop.hbase.filter.FilterAllFilter;
-075import 
org.apache.hadoop.hbase.filter.FilterList;
-076import 
org.apache.hadoop.hbase.filter.PageFilter;
-077import 
org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
-078import 
org.apache.hadoop.hbase.filter.WhileMatchFilter;
-079import 
org.apache.hadoop.hbase.io.compress.Compression;
-080import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
-081import 
org.apache.hadoop.hbase.io.hfile.RandomDistribution;
-082import 
org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
-083import 
org.apache.hadoop.hbase.regionserver.BloomType;
-084import 
org.apache.hadoop.hbase.regionserver.CompactingMemStore;
-085import 
org.apache.hadoop.hbase.trace.HBaseHTraceConfiguration;
-086import 
org.apache.hadoop.hbase.trace.SpanReceiverHost;
-087import 
org.apache.hadoop.hbase.trace.TraceUtil;
-088import 
org.apache.hadoop.hbase.util.ByteArrayHashKey;
-089import 
org.apache.hadoop.hbase.util.Bytes;
-090import 
org.apache.hadoop.hbase.util.Hash;
-091import 
org.apache.hadoop.hbase.util.MurmurHash;
-092import 
org.apache.hadoop.hbase.util.Pair;
-093import 
org.apache.hadoop.hbase.util.YammerHistogramUtils;
-094import 
org.apache.hadoop.io.LongWritable;
-095import org.apache.hadoop.io.Text;
-096import org.apache.hadoop.mapreduce.Job;
-097import 
org.apache.hadoop.mapreduce.Mapper;
-098import 
org.apache.hadoop.mapreduce.lib.input.NLineInputFormat;
-099import 
org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
-100import 
org.apache.hadoop.mapreduce.lib.reduce.LongSumReducer;
-101import org.apache.hadoop.util.Tool;
-102import 
org.apache.hadoop.util.ToolRunner;
-103import 
org.apache.htrace.core.ProbabilitySampler;
-104import org.apache.htrace.core.Sampler;
-105import 
org.apache.htrace.core.TraceScope;
-106import 
org.apache.yetus.audience.InterfaceAudience;
-107import org.slf4j.Logger;
-108import org.slf4j.LoggerFactory;
-109import 
org.apache.hbase.thirdparty.com.google.common.base.MoreObjects;
-110import 
org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
-111
-112/**
-113 * Script used evaluating HBase 
performance and scalability.  Runs a HBase
-114 * client that steps through one of a set 
of hardcoded tests or 'experiments'
-115 * (e.g. a random reads test, a random 
writes test, etc.). Pass on the
-116 * command-line which test to run and how 
many clients are participating in
-117 * this experiment. Run {@code 
PerformanceEvaluation --help} to obtain usage.
-118 *
-119 * pThis class sets up and runs 
the evaluation programs described in
-120 * Section 7, iPerformance 
Evaluation/i, of the a
-121 * 
href="http://labs.google.com/papers/bigtable.html"Bigtable/a;
-122 * paper, pages 8-10.
-123 *
-124 * pBy default, runs as a 
mapreduce job where each mapper runs a single test
-125 * client. Can also run as a 
non-mapreduce, multithreaded application by
-126 * specifying {@code --nomapred}. Each 
client does about 1GB of data, unless
-127 * specified otherwise.
-128 */
-129@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
-130public class PerformanceEvaluation 
extends Configured implements Tool {
-131  static final String RANDOM_SEEK_SCAN = 
"randomSeekScan";
-132  static final String RANDOM_READ = 
"randomRead";
-133  private static final Logger LOG = 
LoggerFactory.getLogger(PerformanceEvaluation.class.getName());
-134  private static final ObjectMapper 
MAPPER = new ObjectMapper();
-135  static {
-136
MAPPER.configure(MapperFeature.SORT_PROPERTIES_ALPHABETICALLY, true);
-137  }
-138
-139  public static final String TABLE_NAME = 
"TestTable";
-140  public static final byte[] FAMILY_NAME 
= Bytes.toBytes("info");
-141  public static final byte [] COLUMN_ZERO 
= Bytes.toBytes("" + 0);
-142  public static final byte [] 
QUALIFIER_NAME = COLUMN_ZERO;
+072import 
org.apache.hadoop.hbase.client.metrics.ScanMetrics;
+073import 
org.apache.hadoop.hbase.filter.BinaryComparator;
+074import 
org.apache.hadoop.hbase.filter.Filter;
+075import 

[16/51] [partial] hbase-site git commit: Published site at 87f5b5f3411d96c31b4cb61b9a57ced22be91d1f.

2018-05-04 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/de18d468/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html
index e1bc325..63e7421 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemRegion.html
@@ -66,5125 +66,5224 @@
 058import 
java.util.concurrent.TimeoutException;
 059import 
java.util.concurrent.atomic.AtomicBoolean;
 060import 
java.util.concurrent.atomic.AtomicInteger;
-061import org.apache.commons.io.IOUtils;
-062import 
org.apache.commons.lang3.RandomStringUtils;
-063import 
org.apache.commons.lang3.StringUtils;
-064import 
org.apache.hadoop.conf.Configuration;
-065import 
org.apache.hadoop.conf.Configured;
-066import 
org.apache.hadoop.fs.FSDataOutputStream;
-067import org.apache.hadoop.fs.FileStatus;
-068import org.apache.hadoop.fs.FileSystem;
-069import org.apache.hadoop.fs.Path;
-070import 
org.apache.hadoop.fs.permission.FsAction;
-071import 
org.apache.hadoop.fs.permission.FsPermission;
-072import 
org.apache.hadoop.hbase.Abortable;
-073import org.apache.hadoop.hbase.Cell;
-074import 
org.apache.hadoop.hbase.CellUtil;
-075import 
org.apache.hadoop.hbase.ClusterMetrics;
-076import 
org.apache.hadoop.hbase.ClusterMetrics.Option;
-077import 
org.apache.hadoop.hbase.HBaseConfiguration;
-078import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
-079import 
org.apache.hadoop.hbase.HConstants;
-080import 
org.apache.hadoop.hbase.HRegionInfo;
-081import 
org.apache.hadoop.hbase.HRegionLocation;
-082import 
org.apache.hadoop.hbase.KeyValue;
-083import 
org.apache.hadoop.hbase.MasterNotRunningException;
-084import 
org.apache.hadoop.hbase.MetaTableAccessor;
-085import 
org.apache.hadoop.hbase.RegionLocations;
-086import 
org.apache.hadoop.hbase.ServerName;
-087import 
org.apache.hadoop.hbase.TableName;
-088import 
org.apache.hadoop.hbase.ZooKeeperConnectionException;
-089import 
org.apache.hadoop.hbase.client.Admin;
-090import 
org.apache.hadoop.hbase.client.ClusterConnection;
-091import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-092import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
-093import 
org.apache.hadoop.hbase.client.Connection;
-094import 
org.apache.hadoop.hbase.client.ConnectionFactory;
-095import 
org.apache.hadoop.hbase.client.Delete;
-096import 
org.apache.hadoop.hbase.client.Get;
-097import 
org.apache.hadoop.hbase.client.Put;
-098import 
org.apache.hadoop.hbase.client.RegionInfo;
-099import 
org.apache.hadoop.hbase.client.RegionInfoBuilder;
-100import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
-101import 
org.apache.hadoop.hbase.client.Result;
-102import 
org.apache.hadoop.hbase.client.RowMutations;
-103import 
org.apache.hadoop.hbase.client.Table;
-104import 
org.apache.hadoop.hbase.client.TableDescriptor;
-105import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-106import 
org.apache.hadoop.hbase.client.TableState;
-107import 
org.apache.hadoop.hbase.io.FileLink;
-108import 
org.apache.hadoop.hbase.io.HFileLink;
-109import 
org.apache.hadoop.hbase.io.hfile.CacheConfig;
-110import 
org.apache.hadoop.hbase.io.hfile.HFile;
-111import 
org.apache.hadoop.hbase.log.HBaseMarkers;
-112import 
org.apache.hadoop.hbase.master.MasterFileSystem;
-113import 
org.apache.hadoop.hbase.master.RegionState;
-114import 
org.apache.hadoop.hbase.regionserver.HRegion;
-115import 
org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
-116import 
org.apache.hadoop.hbase.regionserver.StoreFileInfo;
-117import 
org.apache.hadoop.hbase.replication.ReplicationException;
-118import 
org.apache.hadoop.hbase.security.AccessDeniedException;
-119import 
org.apache.hadoop.hbase.security.UserProvider;
-120import 
org.apache.hadoop.hbase.util.Bytes.ByteArrayComparator;
-121import 
org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter.ERROR_CODE;
-122import 
org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker;
-123import 
org.apache.hadoop.hbase.util.hbck.ReplicationChecker;
-124import 
org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandler;
-125import 
org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandlerImpl;
-126import org.apache.hadoop.hbase.wal.WAL;
-127import 
org.apache.hadoop.hbase.wal.WALFactory;
-128import 
org.apache.hadoop.hbase.wal.WALSplitter;
-129import 
org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-130import 
org.apache.hadoop.hbase.zookeeper.ZKUtil;
-131import 
org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-132import 
org.apache.hadoop.hbase.zookeeper.ZNodePaths;
-133import 
org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
-134import 
org.apache.hadoop.ipc.RemoteException;
-135import 

[16/51] [partial] hbase-site git commit: Published site at 2912c953551bedbfbf30c32c156ed7bb187d54c3.

2018-04-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html
index 8302e28..c370eb9 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.FileLockCallable.html
@@ -2113,3031 +2113,3033 @@
 2105
errors.reportError(ERROR_CODE.ORPHAN_TABLE_STATE,
 2106tableName + " unable to 
delete dangling table state " + tableState);
 2107  }
-2108} else {
-2109  
errors.reportError(ERROR_CODE.ORPHAN_TABLE_STATE,
-2110  tableName + " has dangling 
table state " + tableState);
-2111}
-2112  }
-2113}
-2114// check that all tables have 
states
-2115for (TableName tableName : 
tablesInfo.keySet()) {
-2116  if (isTableIncluded(tableName) 
 !tableStates.containsKey(tableName)) {
-2117if (fixMeta) {
-2118  
MetaTableAccessor.updateTableState(connection, tableName, 
TableState.State.ENABLED);
-2119  TableState newState = 
MetaTableAccessor.getTableState(connection, tableName);
-2120  if (newState == null) {
-2121
errors.reportError(ERROR_CODE.NO_TABLE_STATE,
-2122"Unable to change state 
for table " + tableName + " in meta ");
-2123  }
-2124} else {
-2125  
errors.reportError(ERROR_CODE.NO_TABLE_STATE,
-2126  tableName + " has no state 
in meta ");
-2127}
-2128  }
-2129}
-2130  }
-2131
-2132  private void preCheckPermission() 
throws IOException, AccessDeniedException {
-2133if 
(shouldIgnorePreCheckPermission()) {
-2134  return;
-2135}
-2136
-2137Path hbaseDir = 
FSUtils.getRootDir(getConf());
-2138FileSystem fs = 
hbaseDir.getFileSystem(getConf());
-2139UserProvider userProvider = 
UserProvider.instantiate(getConf());
-2140UserGroupInformation ugi = 
userProvider.getCurrent().getUGI();
-2141FileStatus[] files = 
fs.listStatus(hbaseDir);
-2142for (FileStatus file : files) {
-2143  try {
-2144FSUtils.checkAccess(ugi, file, 
FsAction.WRITE);
-2145  } catch (AccessDeniedException 
ace) {
-2146LOG.warn("Got 
AccessDeniedException when preCheckPermission ", ace);
-2147
errors.reportError(ERROR_CODE.WRONG_USAGE, "Current user " + 
ugi.getUserName()
-2148  + " does not have write perms 
to " + file.getPath()
-2149  + ". Please rerun hbck as hdfs 
user " + file.getOwner());
-2150throw ace;
-2151  }
-2152}
-2153  }
-2154
-2155  /**
-2156   * Deletes region from meta table
-2157   */
-2158  private void deleteMetaRegion(HbckInfo 
hi) throws IOException {
-2159
deleteMetaRegion(hi.metaEntry.getRegionName());
-2160  }
-2161
-2162  /**
-2163   * Deletes region from meta table
-2164   */
-2165  private void deleteMetaRegion(byte[] 
metaKey) throws IOException {
-2166Delete d = new Delete(metaKey);
-2167meta.delete(d);
-2168LOG.info("Deleted " + 
Bytes.toString(metaKey) + " from META" );
-2169  }
-2170
-2171  /**
-2172   * Reset the split parent region info 
in meta table
-2173   */
-2174  private void resetSplitParent(HbckInfo 
hi) throws IOException {
-2175RowMutations mutations = new 
RowMutations(hi.metaEntry.getRegionName());
-2176Delete d = new 
Delete(hi.metaEntry.getRegionName());
-2177
d.addColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER);
-2178
d.addColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER);
-2179mutations.add(d);
-2180
-2181RegionInfo hri = 
RegionInfoBuilder.newBuilder(hi.metaEntry)
-2182.setOffline(false)
-2183.setSplit(false)
-2184.build();
-2185Put p = 
MetaTableAccessor.makePutFromRegionInfo(hri, 
EnvironmentEdgeManager.currentTime());
-2186mutations.add(p);
-2187
-2188meta.mutateRow(mutations);
-2189LOG.info("Reset split parent " + 
hi.metaEntry.getRegionNameAsString() + " in META" );
-2190  }
-2191
-2192  /**
-2193   * This backwards-compatibility 
wrapper for permanently offlining a region
-2194   * that should not be alive.  If the 
region server does not support the
-2195   * "offline" method, it will use the 
closest unassign method instead.  This
-2196   * will basically work until one 
attempts to disable or delete the affected
-2197   * table.  The problem has to do with 
in-memory only master state, so
-2198   * restarting the HMaster or failing 
over to another should fix this.
-2199   */
-2200  private void offline(byte[] 
regionName) throws IOException {
-2201String regionString = 
Bytes.toStringBinary(regionName);
-2202if (!rsSupportsOffline) {
-2203  

[16/51] [partial] hbase-site git commit: Published site at 2a2258656b2fcd92b967131b6c1f037363553bc4.

2018-03-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e0fb1fde/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.html
index d7aa8b1..98a45a0 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.html
@@ -680,1330 +680,1333 @@
 672}
 673ListHRegionLocation locations 
= new ArrayList();
 674for (RegionInfo regionInfo : regions) 
{
-675  RegionLocations list = 
locateRegion(tableName, regionInfo.getStartKey(), useCache, true);
-676  if (list != null) {
-677for (HRegionLocation loc : 
list.getRegionLocations()) {
-678  if (loc != null) {
-679locations.add(loc);
-680  }
-681}
-682  }
-683}
-684return locations;
-685  }
-686
-687  @Override
-688  public HRegionLocation 
locateRegion(final TableName tableName, final byte[] row)
-689  throws IOException {
-690RegionLocations locations = 
locateRegion(tableName, row, true, true);
-691return locations == null ? null : 
locations.getRegionLocation();
-692  }
-693
-694  @Override
-695  public HRegionLocation 
relocateRegion(final TableName tableName, final byte[] row)
-696  throws IOException {
-697RegionLocations locations =
-698  relocateRegion(tableName, row, 
RegionReplicaUtil.DEFAULT_REPLICA_ID);
-699return locations == null ? null
-700  : 
locations.getRegionLocation(RegionReplicaUtil.DEFAULT_REPLICA_ID);
-701  }
-702
-703  @Override
-704  public RegionLocations 
relocateRegion(final TableName tableName,
-705  final byte [] row, int replicaId) 
throws IOException{
-706// Since this is an explicit request 
not to use any caching, finding
-707// disabled tables should not be 
desirable.  This will ensure that an exception is thrown when
-708// the first time a disabled table is 
interacted with.
-709if 
(!tableName.equals(TableName.META_TABLE_NAME)  
isTableDisabled(tableName)) {
-710  throw new 
TableNotEnabledException(tableName.getNameAsString() + " is disabled.");
-711}
-712
-713return locateRegion(tableName, row, 
false, true, replicaId);
-714  }
+675  if 
(!RegionReplicaUtil.isDefaultReplica(regionInfo)) {
+676continue;
+677  }
+678  RegionLocations list = 
locateRegion(tableName, regionInfo.getStartKey(), useCache, true);
+679  if (list != null) {
+680for (HRegionLocation loc : 
list.getRegionLocations()) {
+681  if (loc != null) {
+682locations.add(loc);
+683  }
+684}
+685  }
+686}
+687return locations;
+688  }
+689
+690  @Override
+691  public HRegionLocation 
locateRegion(final TableName tableName, final byte[] row)
+692  throws IOException {
+693RegionLocations locations = 
locateRegion(tableName, row, true, true);
+694return locations == null ? null : 
locations.getRegionLocation();
+695  }
+696
+697  @Override
+698  public HRegionLocation 
relocateRegion(final TableName tableName, final byte[] row)
+699  throws IOException {
+700RegionLocations locations =
+701  relocateRegion(tableName, row, 
RegionReplicaUtil.DEFAULT_REPLICA_ID);
+702return locations == null ? null
+703  : 
locations.getRegionLocation(RegionReplicaUtil.DEFAULT_REPLICA_ID);
+704  }
+705
+706  @Override
+707  public RegionLocations 
relocateRegion(final TableName tableName,
+708  final byte [] row, int replicaId) 
throws IOException{
+709// Since this is an explicit request 
not to use any caching, finding
+710// disabled tables should not be 
desirable.  This will ensure that an exception is thrown when
+711// the first time a disabled table is 
interacted with.
+712if 
(!tableName.equals(TableName.META_TABLE_NAME)  
isTableDisabled(tableName)) {
+713  throw new 
TableNotEnabledException(tableName.getNameAsString() + " is disabled.");
+714}
 715
-716  @Override
-717  public RegionLocations 
locateRegion(final TableName tableName, final byte[] row, boolean useCache,
-718  boolean retry) throws IOException 
{
-719return locateRegion(tableName, row, 
useCache, retry, RegionReplicaUtil.DEFAULT_REPLICA_ID);
-720  }
-721
-722  @Override
-723  public RegionLocations 
locateRegion(final TableName tableName, final byte[] row, boolean useCache,
-724  boolean retry, int replicaId) 
throws IOException {
-725checkClosed();
-726if (tableName == null || 
tableName.getName().length == 0) {
-727  throw new 
IllegalArgumentException("table name cannot be null or zero length");
-728}
-729if 

[16/51] [partial] hbase-site git commit: Published site at e468b4022f76688851b3e0c34722f01a56bd624f.

2018-03-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/16541468/devapidocs/org/apache/hadoop/hbase/class-use/HRegionLocation.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/HRegionLocation.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/HRegionLocation.html
index 2d8d219..6e574eb 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/HRegionLocation.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/HRegionLocation.html
@@ -162,7 +162,7 @@ service.
 
 
 private static HRegionLocation
-AsyncMetaTableAccessor.getRegionLocation(Resultr,
+MetaTableAccessor.getRegionLocation(Resultr,
  RegionInforegionInfo,
  intreplicaId)
 Returns the HRegionLocation parsed from the given meta row 
Result
@@ -171,7 +171,7 @@ service.
 
 
 private static HRegionLocation
-MetaTableAccessor.getRegionLocation(Resultr,
+AsyncMetaTableAccessor.getRegionLocation(Resultr,
  RegionInforegionInfo,
  intreplicaId)
 Returns the HRegionLocation parsed from the given meta row 
Result
@@ -309,26 +309,26 @@ service.
 
 
 private HRegionLocation
-AsyncRpcRetryingCallerFactory.ScanSingleRegionCallerBuilder.loc
-
-
-private HRegionLocation
 AsyncScanSingleRegionRpcRetryingCaller.loc
 
-
+
 HRegionLocation
 AsyncBatchRpcRetryingCaller.RegionRequest.loc
 
-
-protected HRegionLocation
-RegionAdminServiceCallable.location
-
 
+private HRegionLocation
+AsyncRpcRetryingCallerFactory.ScanSingleRegionCallerBuilder.loc
+
+
 protected HRegionLocation
 RegionServerCallable.location
 Some subclasses want to set their own location.
 
 
+
+protected HRegionLocation
+RegionAdminServiceCallable.location
+
 
 
 
@@ -409,18 +409,18 @@ service.
 
 
 HRegionLocation
-ConnectionImplementation.getRegionLocation(TableNametableName,
- byte[]row,
- booleanreload)
-
-
-HRegionLocation
 ClusterConnection.getRegionLocation(TableNametableName,
  byte[]row,
  booleanreload)
 Find region location hosting passed row
 
 
+
+HRegionLocation
+ConnectionImplementation.getRegionLocation(TableNametableName,
+ byte[]row,
+ booleanreload)
+
 
 private HRegionLocation
 AsyncRequestFutureImpl.getReplicaLocationOrFail(Actionaction)
@@ -434,20 +434,15 @@ service.
 
 
 HRegionLocation
-ConnectionImplementation.locateRegion(byte[]regionName)
-
-
-HRegionLocation
 ClusterConnection.locateRegion(byte[]regionName)
 Gets the location of the region of regionName.
 
 
-
+
 HRegionLocation
-ConnectionImplementation.locateRegion(TableNametableName,
-byte[]row)
+ConnectionImplementation.locateRegion(byte[]regionName)
 
-
+
 HRegionLocation
 ClusterConnection.locateRegion(TableNametableName,
 byte[]row)
@@ -455,6 +450,11 @@ service.
  lives in.
 
 
+
+HRegionLocation
+ConnectionImplementation.locateRegion(TableNametableName,
+byte[]row)
+
 
 private HRegionLocation
 AsyncNonMetaRegionLocator.locateRowBeforeInCache(AsyncNonMetaRegionLocator.TableCachetableCache,
@@ -469,17 +469,17 @@ service.
 
 
 HRegionLocation
-ConnectionImplementation.relocateRegion(TableNametableName,
-  byte[]row)
-
-
-HRegionLocation
 ClusterConnection.relocateRegion(TableNametableName,
   byte[]row)
 Find the location of the region of tableName that 
row
  lives in, ignoring any value that might be in the cache.
 
 
+
+HRegionLocation
+ConnectionImplementation.relocateRegion(TableNametableName,
+  byte[]row)
+
 
 
 
@@ -523,29 +523,29 @@ service.
 AsyncMetaRegionLocator.getRegionLocation(booleanreload)
 
 
-default https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFutureHRegionLocation
-AsyncTableRegionLocator.getRegionLocation(byte[]row)
-Finds the region on which the given row is being 
served.
-
-
-
 (package private) https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFutureHRegionLocation
 RawAsyncHBaseAdmin.getRegionLocation(byte[]regionNameOrEncodedRegionName)
 Get the region location for the passed region name.
 
 
-
-https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFutureHRegionLocation
-AsyncTableRegionLocator.getRegionLocation(byte[]row,
- booleanreload)
+
+default https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFutureHRegionLocation
+AsyncTableRegionLocator.getRegionLocation(byte[]row)
 Finds the region on which the given row is being 
served.
 
 
-
+
 

[16/51] [partial] hbase-site git commit: Published site at 64061f896fe21512504e3886a400759e88b519da.

2018-03-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f1ebf5b6/devapidocs/org/apache/hadoop/hbase/client/class-use/RpcRetryingCallerFactory.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/RpcRetryingCallerFactory.html
 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/RpcRetryingCallerFactory.html
index 00622a7..4d6f99a 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/RpcRetryingCallerFactory.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/RpcRetryingCallerFactory.html
@@ -126,15 +126,15 @@
 
 
 private RpcRetryingCallerFactory
-ConnectionImplementation.rpcCallerFactory
+RegionCoprocessorRpcChannel.rpcCallerFactory
 
 
 private RpcRetryingCallerFactory
-HTable.rpcCallerFactory
+ConnectionImplementation.rpcCallerFactory
 
 
 private RpcRetryingCallerFactory
-RegionCoprocessorRpcChannel.rpcCallerFactory
+HTable.rpcCallerFactory
 
 
 private RpcRetryingCallerFactory
@@ -155,21 +155,21 @@
 
 
 RpcRetryingCallerFactory
-ClusterConnection.getNewRpcRetryingCallerFactory(org.apache.hadoop.conf.Configurationconf)
-Returns a new RpcRetryingCallerFactory from the given 
Configuration.
-
+ConnectionImplementation.getNewRpcRetryingCallerFactory(org.apache.hadoop.conf.Configurationconf)
 
 
 RpcRetryingCallerFactory
-ConnectionImplementation.getNewRpcRetryingCallerFactory(org.apache.hadoop.conf.Configurationconf)
+ClusterConnection.getNewRpcRetryingCallerFactory(org.apache.hadoop.conf.Configurationconf)
+Returns a new RpcRetryingCallerFactory from the given 
Configuration.
+
 
 
 RpcRetryingCallerFactory
-ClusterConnection.getRpcRetryingCallerFactory()
+ConnectionImplementation.getRpcRetryingCallerFactory()
 
 
 RpcRetryingCallerFactory
-ConnectionImplementation.getRpcRetryingCallerFactory()
+ClusterConnection.getRpcRetryingCallerFactory()
 
 
 static RpcRetryingCallerFactory

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f1ebf5b6/devapidocs/org/apache/hadoop/hbase/client/class-use/Scan.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/class-use/Scan.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/Scan.html
index d833faa..f5a73bc 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/Scan.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/Scan.html
@@ -283,14 +283,6 @@ service.
 
 
 private Scan
-AsyncScanSingleRegionRpcRetryingCaller.scan
-
-
-protected Scan
-ScannerCallable.scan
-
-
-private Scan
 ScannerCallableWithReplicas.scan
 
 
@@ -307,6 +299,14 @@ service.
 
 
 private Scan
+AsyncScanSingleRegionRpcRetryingCaller.scan
+
+
+protected Scan
+ScannerCallable.scan
+
+
+private Scan
 TableSnapshotScanner.scan
 
 
@@ -339,11 +339,11 @@ service.
 
 
 protected Scan
-ScannerCallable.getScan()
+ClientScanner.getScan()
 
 
 protected Scan
-ClientScanner.getScan()
+ScannerCallable.getScan()
 
 
 Scan
@@ -638,29 +638,29 @@ service.
 
 
 ResultScanner
-RawAsyncTableImpl.getScanner(Scanscan)
-
-
-ResultScanner
-HTable.getScanner(Scanscan)
-The underlying HTable must 
not be closed.
+AsyncTable.getScanner(Scanscan)
+Returns a scanner on the current table as specified by the 
Scan 
object.
 
 
-
+
 ResultScanner
 Table.getScanner(Scanscan)
 Returns a scanner on the current table as specified by the 
Scan
  object.
 
 
-
+
 ResultScanner
 AsyncTableImpl.getScanner(Scanscan)
 
+
+ResultScanner
+RawAsyncTableImpl.getScanner(Scanscan)
+
 
 ResultScanner
-AsyncTable.getScanner(Scanscan)
-Returns a scanner on the current table as specified by the 
Scan 
object.
+HTable.getScanner(Scanscan)
+The underlying HTable must 
not be closed.
 
 
 
@@ -703,7 +703,9 @@ service.
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListResult
-RawAsyncTableImpl.scanAll(Scanscan)
+AsyncTable.scanAll(Scanscan)
+Return all the results that match the given scan 
object.
+
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListResult
@@ -711,9 +713,7 @@ service.
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListResult
-AsyncTable.scanAll(Scanscan)
-Return all the results that match the given scan 
object.
-
+RawAsyncTableImpl.scanAll(Scanscan)
 
 
 private Scan
@@ 

[16/51] [partial] hbase-site git commit: Published site at 4cb40e6d846ce1f28ffb40d388c9efb753197813.

2018-03-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4dc2a2e8/devapidocs/org/apache/hadoop/hbase/client/class-use/ResultScanner.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/ResultScanner.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/ResultScanner.html
index 6209920..3616545 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/ResultScanner.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/ResultScanner.html
@@ -208,9 +208,9 @@ service.
 
 
 
-default ResultScanner
-AsyncTable.getScanner(byte[]family)
-Gets a scanner on the current table for the given 
family.
+ResultScanner
+HTable.getScanner(byte[]family)
+The underlying HTable must 
not be closed.
 
 
 
@@ -220,16 +220,16 @@ service.
 
 
 
-ResultScanner
-HTable.getScanner(byte[]family)
-The underlying HTable must 
not be closed.
+default ResultScanner
+AsyncTable.getScanner(byte[]family)
+Gets a scanner on the current table for the given 
family.
 
 
 
-default ResultScanner
-AsyncTable.getScanner(byte[]family,
+ResultScanner
+HTable.getScanner(byte[]family,
   byte[]qualifier)
-Gets a scanner on the current table for the given family 
and qualifier.
+The underlying HTable must 
not be closed.
 
 
 
@@ -240,37 +240,37 @@ service.
 
 
 
-ResultScanner
-HTable.getScanner(byte[]family,
+default ResultScanner
+AsyncTable.getScanner(byte[]family,
   byte[]qualifier)
-The underlying HTable must 
not be closed.
+Gets a scanner on the current table for the given family 
and qualifier.
 
 
 
 ResultScanner
-AsyncTable.getScanner(Scanscan)
-Returns a scanner on the current table as specified by the 
Scan 
object.
-
+RawAsyncTableImpl.getScanner(Scanscan)
 
 
 ResultScanner
-Table.getScanner(Scanscan)
-Returns a scanner on the current table as specified by the 
Scan
- object.
+HTable.getScanner(Scanscan)
+The underlying HTable must 
not be closed.
 
 
 
 ResultScanner
-AsyncTableImpl.getScanner(Scanscan)
+Table.getScanner(Scanscan)
+Returns a scanner on the current table as specified by the 
Scan
+ object.
+
 
 
 ResultScanner
-RawAsyncTableImpl.getScanner(Scanscan)
+AsyncTableImpl.getScanner(Scanscan)
 
 
 ResultScanner
-HTable.getScanner(Scanscan)
-The underlying HTable must 
not be closed.
+AsyncTable.getScanner(Scanscan)
+Returns a scanner on the current table as specified by the 
Scan 
object.
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4dc2a2e8/devapidocs/org/apache/hadoop/hbase/client/class-use/RetriesExhaustedWithDetailsException.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/RetriesExhaustedWithDetailsException.html
 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/RetriesExhaustedWithDetailsException.html
index 8fa3f76..62cfd60 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/RetriesExhaustedWithDetailsException.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/RetriesExhaustedWithDetailsException.html
@@ -106,11 +106,11 @@
 
 
 RetriesExhaustedWithDetailsException
-AsyncRequestFuture.getErrors()
+AsyncRequestFutureImpl.getErrors()
 
 
 RetriesExhaustedWithDetailsException
-AsyncRequestFutureImpl.getErrors()
+AsyncRequestFuture.getErrors()
 
 
 (package private) RetriesExhaustedWithDetailsException

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4dc2a2e8/devapidocs/org/apache/hadoop/hbase/client/class-use/RetryingCallable.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/RetryingCallable.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/RetryingCallable.html
index 5b32e1b..eec52bf 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/RetryingCallable.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/RetryingCallable.html
@@ -234,28 +234,36 @@
 
 
 
+T
+RpcRetryingCallerImpl.callWithoutRetries(RetryingCallableTcallable,
+  intcallTimeout)
+
+
 T
 RpcRetryingCaller.callWithoutRetries(RetryingCallableTcallable,
   intcallTimeout)
 Call the server once only.
 
 
-
+
 T
-RpcRetryingCallerImpl.callWithoutRetries(RetryingCallableTcallable,
-  intcallTimeout)
+RpcRetryingCallerImpl.callWithRetries(RetryingCallableTcallable,
+   intcallTimeout)
 
-
+
 T
 RpcRetryingCaller.callWithRetries(RetryingCallableTcallable,
intcallTimeout)
 Retries if invocation fails.
 
 
+
+RetryingCallerInterceptorContext
+NoOpRetryingInterceptorContext.prepare(RetryingCallable?callable)
+
 
-T
-RpcRetryingCallerImpl.callWithRetries(RetryingCallableTcallable,
-   intcallTimeout)
+FastFailInterceptorContext
+FastFailInterceptorContext.prepare(RetryingCallable?callable)
 
 
 abstract RetryingCallerInterceptorContext
@@ -267,11 +275,13 @@
 
 
 

[16/51] [partial] hbase-site git commit: Published site at 8ab7b20f48951d77945181024f5e15842bc253c4.

2018-03-21 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6eb695c8/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushQueueEntry.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushQueueEntry.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushQueueEntry.html
index 9ee12ef..4c42811 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushQueueEntry.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.FlushQueueEntry.html
@@ -51,889 +51,893 @@
 043import 
org.apache.hadoop.hbase.HConstants;
 044import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
 045import 
org.apache.hadoop.hbase.regionserver.HRegion.FlushResult;
-046import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-047import 
org.apache.hadoop.hbase.trace.TraceUtil;
-048import 
org.apache.hadoop.hbase.util.Bytes;
-049import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-050import 
org.apache.hadoop.hbase.util.HasThread;
-051import 
org.apache.hadoop.hbase.util.ServerRegionReplicaUtil;
-052import 
org.apache.hadoop.hbase.util.Threads;
-053import 
org.apache.hadoop.ipc.RemoteException;
-054import 
org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix;
-055import 
org.apache.htrace.core.TraceScope;
-056import 
org.apache.yetus.audience.InterfaceAudience;
-057import org.slf4j.Logger;
-058import org.slf4j.LoggerFactory;
-059
-060/**
-061 * Thread that flushes cache on request
-062 *
-063 * NOTE: This class extends Thread rather 
than Chore because the sleep time
-064 * can be interrupted when there is 
something to do, rather than the Chore
-065 * sleep time which is invariant.
-066 *
-067 * @see FlushRequester
-068 */
-069@InterfaceAudience.Private
-070class MemStoreFlusher implements 
FlushRequester {
-071  private static final Logger LOG = 
LoggerFactory.getLogger(MemStoreFlusher.class);
-072
-073  private Configuration conf;
-074  // These two data members go together.  
Any entry in the one must have
-075  // a corresponding entry in the 
other.
-076  private final 
BlockingQueueFlushQueueEntry flushQueue = new DelayQueue();
-077  private final MapRegion, 
FlushRegionEntry regionsInQueue = new HashMap();
-078  private AtomicBoolean wakeupPending = 
new AtomicBoolean();
-079
-080  private final long 
threadWakeFrequency;
-081  private final HRegionServer server;
-082  private final ReentrantReadWriteLock 
lock = new ReentrantReadWriteLock();
-083  private final Object blockSignal = new 
Object();
-084
-085  private long blockingWaitTime;
-086  private final LongAdder 
updatesBlockedMsHighWater = new LongAdder();
-087
-088  private final FlushHandler[] 
flushHandlers;
-089  private 
ListFlushRequestListener flushRequestListeners = new 
ArrayList(1);
-090
-091  private FlushType flushType;
-092
-093  /**
-094   * Singleton instance inserted into 
flush queue used for signaling.
-095   */
-096  private static final FlushQueueEntry 
WAKEUPFLUSH_INSTANCE = new FlushQueueEntry() {
-097@Override
-098public long getDelay(TimeUnit unit) 
{
-099  return 0;
-100}
-101
-102@Override
-103public int compareTo(Delayed o) {
-104  return -1;
-105}
-106
-107@Override
-108public boolean equals(Object obj) {
-109  return obj == this;
-110}
-111
-112@Override
-113public int hashCode() {
-114  return 42;
-115}
-116  };
+046import 
org.apache.hadoop.hbase.trace.TraceUtil;
+047import 
org.apache.hadoop.hbase.util.Bytes;
+048import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+049import 
org.apache.hadoop.hbase.util.HasThread;
+050import 
org.apache.hadoop.hbase.util.ServerRegionReplicaUtil;
+051import 
org.apache.hadoop.hbase.util.Threads;
+052import 
org.apache.hadoop.ipc.RemoteException;
+053import 
org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix;
+054import 
org.apache.htrace.core.TraceScope;
+055import 
org.apache.yetus.audience.InterfaceAudience;
+056import org.slf4j.Logger;
+057import org.slf4j.LoggerFactory;
+058
+059/**
+060 * Thread that flushes cache on request
+061 *
+062 * NOTE: This class extends Thread rather 
than Chore because the sleep time
+063 * can be interrupted when there is 
something to do, rather than the Chore
+064 * sleep time which is invariant.
+065 *
+066 * @see FlushRequester
+067 */
+068@InterfaceAudience.Private
+069class MemStoreFlusher implements 
FlushRequester {
+070  private static final Logger LOG = 
LoggerFactory.getLogger(MemStoreFlusher.class);
+071
+072  private Configuration conf;
+073  // These two data members go together.  
Any entry in the one must have
+074  // a corresponding entry in the 
other.
+075  private final 
BlockingQueueFlushQueueEntry flushQueue = new DelayQueue();
+076  private final MapRegion, 
FlushRegionEntry regionsInQueue = new HashMap();
+077  

[16/51] [partial] hbase-site git commit: Published site at 00095a2ef9442e3fd86c04876c9d91f2f8b23ad8.

2018-03-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.html
index 7df78ff..d523437 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.html
@@ -41,774 +41,784 @@
 033import 
org.apache.hadoop.hbase.TableName;
 034import 
org.apache.hadoop.hbase.UnknownRegionException;
 035import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-036import 
org.apache.hadoop.hbase.client.MasterSwitchType;
-037import 
org.apache.hadoop.hbase.client.Mutation;
-038import 
org.apache.hadoop.hbase.client.RegionInfo;
-039import 
org.apache.hadoop.hbase.client.RegionInfoBuilder;
-040import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
-041import 
org.apache.hadoop.hbase.client.TableDescriptor;
-042import 
org.apache.hadoop.hbase.exceptions.MergeRegionException;
-043import 
org.apache.hadoop.hbase.io.hfile.CacheConfig;
-044import 
org.apache.hadoop.hbase.master.CatalogJanitor;
-045import 
org.apache.hadoop.hbase.master.MasterCoprocessorHost;
-046import 
org.apache.hadoop.hbase.master.MasterFileSystem;
-047import 
org.apache.hadoop.hbase.master.RegionState;
-048import 
org.apache.hadoop.hbase.master.RegionState.State;
-049import 
org.apache.hadoop.hbase.master.normalizer.NormalizationPlan;
-050import 
org.apache.hadoop.hbase.master.procedure.AbstractStateMachineTableProcedure;
-051import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
-052import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil;
-053import 
org.apache.hadoop.hbase.procedure2.ProcedureMetrics;
-054import 
org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
-055import 
org.apache.hadoop.hbase.quotas.QuotaExceededException;
-056import 
org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
-057import 
org.apache.hadoop.hbase.regionserver.HStoreFile;
-058import 
org.apache.hadoop.hbase.regionserver.StoreFileInfo;
-059import 
org.apache.hadoop.hbase.util.Bytes;
-060import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-061import 
org.apache.hadoop.hbase.util.FSUtils;
-062import 
org.apache.hadoop.hbase.wal.WALSplitter;
-063import 
org.apache.yetus.audience.InterfaceAudience;
-064import org.slf4j.Logger;
-065import org.slf4j.LoggerFactory;
-066import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-067import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-068import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
-069import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
-070import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MergeTableRegionsState;
-071
-072/**
-073 * The procedure to Merge a region in a 
table.
-074 * This procedure takes an exclusive 
table lock since it is working over multiple regions.
-075 * It holds the lock for the life of the 
procedure.
-076 * pThrows exception on 
construction if determines context hostile to merge (cluster going
-077 * down or master is shutting down or 
table is disabled)./p
-078 */
-079@InterfaceAudience.Private
-080public class MergeTableRegionsProcedure
-081extends 
AbstractStateMachineTableProcedureMergeTableRegionsState {
-082  private static final Logger LOG = 
LoggerFactory.getLogger(MergeTableRegionsProcedure.class);
-083  private Boolean traceEnabled;
-084  private volatile boolean lock = 
false;
-085  private ServerName regionLocation;
-086  private RegionInfo[] regionsToMerge;
-087  private RegionInfo mergedRegion;
-088  private boolean forcible;
-089
-090  public MergeTableRegionsProcedure() {
-091// Required by the Procedure 
framework to create the procedure on replay
-092  }
-093
-094  public MergeTableRegionsProcedure(final 
MasterProcedureEnv env,
-095  final RegionInfo regionToMergeA, 
final RegionInfo regionToMergeB) throws IOException {
-096this(env, regionToMergeA, 
regionToMergeB, false);
-097  }
-098
-099  public MergeTableRegionsProcedure(final 
MasterProcedureEnv env,
-100  final RegionInfo regionToMergeA, 
final RegionInfo regionToMergeB,
-101  final boolean forcible) throws 
IOException {
-102this(env, new RegionInfo[] 
{regionToMergeA, regionToMergeB}, forcible);
-103  }
-104
-105  public MergeTableRegionsProcedure(final 
MasterProcedureEnv env,
-106  final RegionInfo[] regionsToMerge, 
final boolean forcible)
-107  throws IOException {
-108super(env);
-109
-110// Check daughter regions and make 
sure that we have valid daughter regions
-111// before 

[16/51] [partial] hbase-site git commit: Published site at 22f4def942f8a3367d0ca6598317e9b9a7d0cfcd.

2018-03-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8b1eaec1/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.BulkLoadObserverOperation.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.BulkLoadObserverOperation.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.BulkLoadObserverOperation.html
index b99f924..2bb6cea 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.BulkLoadObserverOperation.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.BulkLoadObserverOperation.html
@@ -37,1779 +37,1734 @@
 029import java.util.UUID;
 030import 
java.util.concurrent.ConcurrentHashMap;
 031import 
java.util.concurrent.ConcurrentMap;
-032import java.util.regex.Matcher;
-033
-034import 
org.apache.commons.collections4.map.AbstractReferenceMap;
-035import 
org.apache.commons.collections4.map.ReferenceMap;
-036import 
org.apache.hadoop.conf.Configuration;
-037import org.apache.hadoop.fs.FileSystem;
-038import org.apache.hadoop.fs.Path;
-039import org.apache.hadoop.hbase.Cell;
-040import 
org.apache.hadoop.hbase.CompareOperator;
-041import 
org.apache.hadoop.hbase.Coprocessor;
-042import 
org.apache.hadoop.hbase.HBaseConfiguration;
-043import 
org.apache.hadoop.hbase.HConstants;
-044import 
org.apache.hadoop.hbase.RawCellBuilder;
-045import 
org.apache.hadoop.hbase.RawCellBuilderFactory;
-046import 
org.apache.hadoop.hbase.ServerName;
-047import 
org.apache.hadoop.hbase.SharedConnection;
-048import 
org.apache.hadoop.hbase.client.Append;
-049import 
org.apache.hadoop.hbase.client.Connection;
-050import 
org.apache.hadoop.hbase.client.Delete;
-051import 
org.apache.hadoop.hbase.client.Durability;
-052import 
org.apache.hadoop.hbase.client.Get;
-053import 
org.apache.hadoop.hbase.client.Increment;
-054import 
org.apache.hadoop.hbase.client.Mutation;
-055import 
org.apache.hadoop.hbase.client.Put;
-056import 
org.apache.hadoop.hbase.client.RegionInfo;
-057import 
org.apache.hadoop.hbase.client.Result;
-058import 
org.apache.hadoop.hbase.client.Scan;
-059import 
org.apache.hadoop.hbase.client.TableDescriptor;
-060import 
org.apache.hadoop.hbase.coprocessor.BaseEnvironment;
-061import 
org.apache.hadoop.hbase.coprocessor.BulkLoadObserver;
-062import 
org.apache.hadoop.hbase.coprocessor.CoprocessorException;
-063import 
org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
-064import 
org.apache.hadoop.hbase.coprocessor.CoprocessorService;
-065import 
org.apache.hadoop.hbase.coprocessor.CoprocessorServiceBackwardCompatiblity;
-066import 
org.apache.hadoop.hbase.coprocessor.CoreCoprocessor;
-067import 
org.apache.hadoop.hbase.coprocessor.EndpointObserver;
-068import 
org.apache.hadoop.hbase.coprocessor.HasRegionServerServices;
-069import 
org.apache.hadoop.hbase.coprocessor.MetricsCoprocessor;
-070import 
org.apache.hadoop.hbase.coprocessor.ObserverContext;
-071import 
org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
-072import 
org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
-073import 
org.apache.hadoop.hbase.coprocessor.RegionObserver;
-074import 
org.apache.hadoop.hbase.coprocessor.RegionObserver.MutationType;
-075import 
org.apache.hadoop.hbase.filter.ByteArrayComparable;
-076import 
org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
-077import 
org.apache.hadoop.hbase.io.Reference;
-078import 
org.apache.hadoop.hbase.io.hfile.CacheConfig;
-079import 
org.apache.hadoop.hbase.metrics.MetricRegistry;
-080import 
org.apache.hadoop.hbase.regionserver.Region.Operation;
-081import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker;
-082import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
-083import 
org.apache.hadoop.hbase.regionserver.querymatcher.DeleteTracker;
-084import 
org.apache.hadoop.hbase.security.User;
-085import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
-086import 
org.apache.hadoop.hbase.util.Bytes;
-087import 
org.apache.hadoop.hbase.util.CoprocessorClassLoader;
-088import 
org.apache.hadoop.hbase.util.Pair;
-089import 
org.apache.hadoop.hbase.wal.WALEdit;
-090import 
org.apache.hadoop.hbase.wal.WALKey;
-091import 
org.apache.yetus.audience.InterfaceAudience;
-092import org.slf4j.Logger;
-093import org.slf4j.LoggerFactory;
-094
-095/**
-096 * Implements the coprocessor environment 
and runtime support for coprocessors
-097 * loaded within a {@link Region}.
-098 */
-099@InterfaceAudience.Private
-100public class RegionCoprocessorHost
-101extends 
CoprocessorHostRegionCoprocessor, RegionCoprocessorEnvironment {
-102
-103  private static final Logger LOG = 
LoggerFactory.getLogger(RegionCoprocessorHost.class);
-104  // The shared data map
-105  private static final 
ReferenceMapString, ConcurrentMapString, Object SHARED_DATA_MAP 
=
-106  new 

[16/51] [partial] hbase-site git commit: Published site at 31da4d0bce69b3a47066a5df675756087ce4dc60.

2018-03-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a754d895/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatchOperation.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatchOperation.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatchOperation.html
index a190a33..82da4d7 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatchOperation.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatchOperation.html
@@ -118,7 +118,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-static class HRegion.MutationBatchOperation
+static class HRegion.MutationBatchOperation
 extends HRegion.BatchOperationMutation
 Batch of mutation operations. Base class is shared with HRegion.ReplayBatchOperation
 as most
  of the logic is same.
@@ -342,7 +342,7 @@ extends 
 
 nonceGroup
-privatelong nonceGroup
+privatelong nonceGroup
 
 
 
@@ -351,7 +351,7 @@ extends 
 
 nonce
-privatelong nonce
+privatelong nonce
 
 
 
@@ -368,7 +368,7 @@ extends 
 
 MutationBatchOperation
-publicMutationBatchOperation(HRegionregion,
+publicMutationBatchOperation(HRegionregion,
   Mutation[]operations,
   booleanatomic,
   longnonceGroup,
@@ -389,7 +389,7 @@ extends 
 
 getMutation
-publicMutationgetMutation(intindex)
+publicMutationgetMutation(intindex)
 
 Specified by:
 getMutationin
 classHRegion.BatchOperationMutation
@@ -402,7 +402,7 @@ extends 
 
 getNonceGroup
-publiclonggetNonceGroup(intindex)
+publiclonggetNonceGroup(intindex)
 
 Specified by:
 getNonceGroupin
 classHRegion.BatchOperationMutation
@@ -415,7 +415,7 @@ extends 
 
 getNonce
-publiclonggetNonce(intindex)
+publiclonggetNonce(intindex)
 
 Specified by:
 getNoncein
 classHRegion.BatchOperationMutation
@@ -428,7 +428,7 @@ extends 
 
 getMutationsForCoprocs
-publicMutation[]getMutationsForCoprocs()
+publicMutation[]getMutationsForCoprocs()
 Description copied from 
class:HRegion.BatchOperation
 This method is potentially expensive and useful mostly for 
non-replay CP path.
 
@@ -443,7 +443,7 @@ extends 
 
 isInReplay
-publicbooleanisInReplay()
+publicbooleanisInReplay()
 
 Specified by:
 isInReplayin
 classHRegion.BatchOperationMutation
@@ -456,7 +456,7 @@ extends 
 
 getOrigLogSeqNum
-publiclonggetOrigLogSeqNum()
+publiclonggetOrigLogSeqNum()
 
 Specified by:
 getOrigLogSeqNumin
 classHRegion.BatchOperationMutation
@@ -469,7 +469,7 @@ extends 
 
 startRegionOperation
-publicvoidstartRegionOperation()
+publicvoidstartRegionOperation()
   throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Specified by:
@@ -485,7 +485,7 @@ extends 
 
 closeRegionOperation
-publicvoidcloseRegionOperation()
+publicvoidcloseRegionOperation()
   throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Specified by:
@@ -501,7 +501,7 @@ extends 
 
 checkAndPreparePut
-publicvoidcheckAndPreparePut(Putp)
+publicvoidcheckAndPreparePut(Putp)
 throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Description copied from 
class:HRegion.BatchOperation
 Implement any Put request specific check and prepare logic 
here. Please refer to
@@ -520,7 +520,7 @@ extends 
 
 checkAndPrepare
-publicvoidcheckAndPrepare()
+publicvoidcheckAndPrepare()
  throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Description copied from 
class:HRegion.BatchOperation
 Validates each mutation and prepares a batch for write. If 
necessary (non-replay case), runs
@@ -542,7 +542,7 @@ extends 
 
 prepareMiniBatchOperations
-publicvoidprepareMiniBatchOperations(MiniBatchOperationInProgressMutationminiBatchOp,
+publicvoidprepareMiniBatchOperations(MiniBatchOperationInProgressMutationminiBatchOp,
longtimestamp,
https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegion.RowLockacquiredRowLocks)
 throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
@@ -563,7 +563,7 @@ extends 
 
 buildWALEdits
-publichttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListPairNonceKey,WALEditbuildWALEdits(MiniBatchOperationInProgressMutation
 ;miniBatchOp)

[16/51] [partial] hbase-site git commit: Published site at 6b77786dfc46d25ac5bb5f1c8a4a9eb47b52a604.

2018-03-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/81cde4ce/devapidocs/org/apache/hadoop/hbase/client/coprocessor/class-use/Batch.Callback.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/coprocessor/class-use/Batch.Callback.html
 
b/devapidocs/org/apache/hadoop/hbase/client/coprocessor/class-use/Batch.Callback.html
index 65d4b29..a6c6bcc 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/coprocessor/class-use/Batch.Callback.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/coprocessor/class-use/Batch.Callback.html
@@ -113,17 +113,17 @@
 
 
 
+private Batch.CallbackCResult
+AsyncRequestFutureImpl.callback
+
+
 private Batch.CallbackT
 AsyncProcessTask.callback
 
-
+
 private Batch.CallbackT
 AsyncProcessTask.Builder.callback
 
-
-private Batch.CallbackCResult
-AsyncRequestFutureImpl.callback
-
 
 
 
@@ -148,50 +148,42 @@
 
 
 Rvoid
-HTable.batchCallback(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">List? extends Rowactions,
- https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object[]results,
- Batch.CallbackRcallback)
-
-
-Rvoid
 Table.batchCallback(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">List? extends Rowactions,
  https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object[]results,
  Batch.CallbackRcallback)
 Same as Table.batch(List,
 Object[]), but with a callback.
 
 
+
+Rvoid
+HTable.batchCallback(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">List? extends Rowactions,
+ https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object[]results,
+ Batch.CallbackRcallback)
+
 
 R extends 
com.google.protobuf.Messagevoid
-HTable.batchCoprocessorService(com.google.protobuf.Descriptors.MethodDescriptormethodDescriptor,
+Table.batchCoprocessorService(com.google.protobuf.Descriptors.MethodDescriptormethodDescriptor,
com.google.protobuf.Messagerequest,
byte[]startKey,
byte[]endKey,
RresponsePrototype,
-   Batch.CallbackRcallback)
+   Batch.CallbackRcallback)
+Creates an instance of the given Service 
subclass for each table
+ region spanning the range from the startKey row to 
endKey row (inclusive), all
+ the invocations to the same region server will be batched into one call.
+
 
 
 R extends 
com.google.protobuf.Messagevoid
-Table.batchCoprocessorService(com.google.protobuf.Descriptors.MethodDescriptormethodDescriptor,
+HTable.batchCoprocessorService(com.google.protobuf.Descriptors.MethodDescriptormethodDescriptor,
com.google.protobuf.Messagerequest,
byte[]startKey,
byte[]endKey,
RresponsePrototype,
-   Batch.CallbackRcallback)
-Creates an instance of the given Service 
subclass for each table
- region spanning the range from the startKey row to 
endKey row (inclusive), all
- the invocations to the same region server will be batched into one call.
-
+   Batch.CallbackRcallback)
 
 
 T extends 
com.google.protobuf.Service,Rvoid
-HTable.coprocessorService(https://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true;
 title="class or interface in java.lang">ClassTservice,
-  byte[]startKey,
-  byte[]endKey,
-  Batch.CallT,Rcallable,
-  Batch.CallbackRcallback)
-
-
-T extends 
com.google.protobuf.Service,Rvoid
 Table.coprocessorService(https://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true;
 title="class or interface in java.lang">ClassTservice,
   byte[]startKey,
   byte[]endKey,
@@ -203,6 +195,14 @@
  with each Service instance.
 
 
+
+T extends 
com.google.protobuf.Service,Rvoid
+HTable.coprocessorService(https://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true;
 title="class or interface in java.lang">ClassTservice,
+  byte[]startKey,
+  byte[]endKey,
+  Batch.CallT,Rcallable,
+  Batch.CallbackRcallback)
+
 
 static Rvoid
 HTable.doBatchWithCallback(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">List? extends Rowactions,


[16/51] [partial] hbase-site git commit: Published site at 1384da71375427b522b09f06862bb5d629cef52f.

2018-03-06 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d347bde8/devapidocs/org/apache/hadoop/hbase/client/class-use/RpcRetryingCallerFactory.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/RpcRetryingCallerFactory.html
 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/RpcRetryingCallerFactory.html
index 4d6f99a..00622a7 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/RpcRetryingCallerFactory.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/RpcRetryingCallerFactory.html
@@ -126,15 +126,15 @@
 
 
 private RpcRetryingCallerFactory
-RegionCoprocessorRpcChannel.rpcCallerFactory
+ConnectionImplementation.rpcCallerFactory
 
 
 private RpcRetryingCallerFactory
-ConnectionImplementation.rpcCallerFactory
+HTable.rpcCallerFactory
 
 
 private RpcRetryingCallerFactory
-HTable.rpcCallerFactory
+RegionCoprocessorRpcChannel.rpcCallerFactory
 
 
 private RpcRetryingCallerFactory
@@ -155,21 +155,21 @@
 
 
 RpcRetryingCallerFactory
-ConnectionImplementation.getNewRpcRetryingCallerFactory(org.apache.hadoop.conf.Configurationconf)
-
-
-RpcRetryingCallerFactory
 ClusterConnection.getNewRpcRetryingCallerFactory(org.apache.hadoop.conf.Configurationconf)
 Returns a new RpcRetryingCallerFactory from the given 
Configuration.
 
 
+
+RpcRetryingCallerFactory
+ConnectionImplementation.getNewRpcRetryingCallerFactory(org.apache.hadoop.conf.Configurationconf)
+
 
 RpcRetryingCallerFactory
-ConnectionImplementation.getRpcRetryingCallerFactory()
+ClusterConnection.getRpcRetryingCallerFactory()
 
 
 RpcRetryingCallerFactory
-ClusterConnection.getRpcRetryingCallerFactory()
+ConnectionImplementation.getRpcRetryingCallerFactory()
 
 
 static RpcRetryingCallerFactory

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d347bde8/devapidocs/org/apache/hadoop/hbase/client/class-use/Scan.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/class-use/Scan.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/Scan.html
index f5a73bc..d833faa 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/Scan.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/Scan.html
@@ -283,27 +283,27 @@ service.
 
 
 private Scan
-ScannerCallableWithReplicas.scan
+AsyncScanSingleRegionRpcRetryingCaller.scan
 
 
 protected Scan
-ClientScanner.scan
+ScannerCallable.scan
 
 
 private Scan
-AsyncClientScanner.scan
+ScannerCallableWithReplicas.scan
 
 
-private Scan
-AsyncRpcRetryingCallerFactory.ScanSingleRegionCallerBuilder.scan
+protected Scan
+ClientScanner.scan
 
 
 private Scan
-AsyncScanSingleRegionRpcRetryingCaller.scan
+AsyncClientScanner.scan
 
 
-protected Scan
-ScannerCallable.scan
+private Scan
+AsyncRpcRetryingCallerFactory.ScanSingleRegionCallerBuilder.scan
 
 
 private Scan
@@ -339,11 +339,11 @@ service.
 
 
 protected Scan
-ClientScanner.getScan()
+ScannerCallable.getScan()
 
 
 protected Scan
-ScannerCallable.getScan()
+ClientScanner.getScan()
 
 
 Scan
@@ -638,29 +638,29 @@ service.
 
 
 ResultScanner
-AsyncTable.getScanner(Scanscan)
-Returns a scanner on the current table as specified by the 
Scan 
object.
-
+RawAsyncTableImpl.getScanner(Scanscan)
 
 
 ResultScanner
-Table.getScanner(Scanscan)
-Returns a scanner on the current table as specified by the 
Scan
- object.
+HTable.getScanner(Scanscan)
+The underlying HTable must 
not be closed.
 
 
 
 ResultScanner
-AsyncTableImpl.getScanner(Scanscan)
+Table.getScanner(Scanscan)
+Returns a scanner on the current table as specified by the 
Scan
+ object.
+
 
 
 ResultScanner
-RawAsyncTableImpl.getScanner(Scanscan)
+AsyncTableImpl.getScanner(Scanscan)
 
 
 ResultScanner
-HTable.getScanner(Scanscan)
-The underlying HTable must 
not be closed.
+AsyncTable.getScanner(Scanscan)
+Returns a scanner on the current table as specified by the 
Scan 
object.
 
 
 
@@ -703,9 +703,7 @@ service.
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListResult
-AsyncTable.scanAll(Scanscan)
-Return all the results that match the given scan 
object.
-
+RawAsyncTableImpl.scanAll(Scanscan)
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListResult
@@ -713,7 +711,9 @@ service.
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in 

[16/51] [partial] hbase-site git commit: Published site at b7b86839250bf9b295ebc1948826f43a88736d6c.

2018-03-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6b94a2f2/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.CollectingVisitor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.CollectingVisitor.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.CollectingVisitor.html
index df5fa53..8fffb89 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.CollectingVisitor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.CollectingVisitor.html
@@ -42,1927 +42,2060 @@
 034import java.util.TreeMap;
 035import java.util.regex.Matcher;
 036import java.util.regex.Pattern;
-037import 
org.apache.hadoop.conf.Configuration;
-038import 
org.apache.hadoop.hbase.Cell.Type;
-039import 
org.apache.hadoop.hbase.client.Connection;
-040import 
org.apache.hadoop.hbase.client.ConnectionFactory;
-041import 
org.apache.hadoop.hbase.client.Consistency;
-042import 
org.apache.hadoop.hbase.client.Delete;
-043import 
org.apache.hadoop.hbase.client.Get;
-044import 
org.apache.hadoop.hbase.client.Mutation;
-045import 
org.apache.hadoop.hbase.client.Put;
-046import 
org.apache.hadoop.hbase.client.RegionInfo;
-047import 
org.apache.hadoop.hbase.client.RegionInfoBuilder;
-048import 
org.apache.hadoop.hbase.client.RegionLocator;
-049import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
-050import 
org.apache.hadoop.hbase.client.RegionServerCallable;
-051import 
org.apache.hadoop.hbase.client.Result;
-052import 
org.apache.hadoop.hbase.client.ResultScanner;
-053import 
org.apache.hadoop.hbase.client.Scan;
-054import 
org.apache.hadoop.hbase.client.Table;
-055import 
org.apache.hadoop.hbase.client.TableState;
-056import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-057import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
-058import 
org.apache.hadoop.hbase.master.RegionState;
-059import 
org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-060import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
-061import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier;
-062import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-063import 
org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos;
-064import 
org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MutateRowsRequest;
-065import 
org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MutateRowsResponse;
-066import 
org.apache.hadoop.hbase.util.Bytes;
-067import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-068import 
org.apache.hadoop.hbase.util.ExceptionUtil;
-069import 
org.apache.hadoop.hbase.util.Pair;
-070import 
org.apache.hadoop.hbase.util.PairOfSameType;
-071import 
org.apache.yetus.audience.InterfaceAudience;
-072import org.slf4j.Logger;
-073import org.slf4j.LoggerFactory;
-074
-075import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-076
-077/**
-078 * p
-079 * Read/write operations on region and 
assignment information store in codehbase:meta/code.
-080 * /p
+037import java.util.stream.Collectors;
+038import java.util.stream.Stream;
+039import 
org.apache.hadoop.conf.Configuration;
+040import 
org.apache.hadoop.hbase.Cell.Type;
+041import 
org.apache.hadoop.hbase.client.Connection;
+042import 
org.apache.hadoop.hbase.client.ConnectionFactory;
+043import 
org.apache.hadoop.hbase.client.Consistency;
+044import 
org.apache.hadoop.hbase.client.Delete;
+045import 
org.apache.hadoop.hbase.client.Get;
+046import 
org.apache.hadoop.hbase.client.Mutation;
+047import 
org.apache.hadoop.hbase.client.Put;
+048import 
org.apache.hadoop.hbase.client.RegionInfo;
+049import 
org.apache.hadoop.hbase.client.RegionInfoBuilder;
+050import 
org.apache.hadoop.hbase.client.RegionLocator;
+051import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
+052import 
org.apache.hadoop.hbase.client.RegionServerCallable;
+053import 
org.apache.hadoop.hbase.client.Result;
+054import 
org.apache.hadoop.hbase.client.ResultScanner;
+055import 
org.apache.hadoop.hbase.client.Scan;
+056import 
org.apache.hadoop.hbase.client.Table;
+057import 
org.apache.hadoop.hbase.client.TableState;
+058import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
+059import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
+060import 
org.apache.hadoop.hbase.master.RegionState;
+061import 
org.apache.hadoop.hbase.master.RegionState.State;
+062import 
org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+063import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
+064import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier;
+065import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
+066import 
org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos;
+067import 

[16/51] [partial] hbase-site git commit: Published site at 1d25b60831b8cc8f7ad5fd366f1867de5c20d2f3.

2018-03-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/eb05e3e3/apidocs/org/apache/hadoop/hbase/client/DoNotRetryRegionException.html
--
diff --git 
a/apidocs/org/apache/hadoop/hbase/client/DoNotRetryRegionException.html 
b/apidocs/org/apache/hadoop/hbase/client/DoNotRetryRegionException.html
index 8fd0886..343e4bb 100644
--- a/apidocs/org/apache/hadoop/hbase/client/DoNotRetryRegionException.html
+++ b/apidocs/org/apache/hadoop/hbase/client/DoNotRetryRegionException.html
@@ -91,16 +91,16 @@
 
 
 
-http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
 
 
-http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true;
 title="class or interface in java.lang">java.lang.Throwable
+https://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true;
 title="class or interface in java.lang">java.lang.Throwable
 
 
-http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">java.lang.Exception
+https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">java.lang.Exception
 
 
-http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">java.io.IOException
+https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">java.io.IOException
 
 
 org.apache.hadoop.hbase.HBaseIOException
@@ -128,7 +128,7 @@
 
 
 All Implemented Interfaces:
-http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable
+https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable
 
 
 Direct Known Subclasses:
@@ -165,7 +165,7 @@ extends DoNotRetryRegionException()
 
 
-DoNotRetryRegionException(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Strings)
+DoNotRetryRegionException(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Strings)
 
 
 
@@ -180,15 +180,15 @@ extends 
 
 
-Methods inherited from classjava.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true;
 title="class or interface in java.lang">Throwable
-http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true#addSuppressed-java.lang.Throwable-;
 title="class or interface in java.lang">addSuppressed, http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true#fillInStackTrace--;
 title="class or interface in java.lang">fillInStackTrace, http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true#getCause--;
 title="class or interface in java.lang">getCause, http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true#getLocalizedMessage--;
 title="class or interface in java.lang">getLocalizedMessage, http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true#getMessage--;
 title="class or interface in java.lang">getMessage, http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true#getStackTrace--;
 title="class or inter
 face in java.lang">getStackTrace, http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true#getSuppressed--;
 title="class or interface in java.lang">getSuppressed, http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true#initCause-java.lang.Throwable-;
 title="class or interface in java.lang">initCause, http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true#printStackTrace--;
 title="class or interface in java.lang">printStackTrace, http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true#printStackTrace-java.io.PrintStream-;
 title="class or interface in java.lang">printStackTrace, http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true#printStackTrace-java.io.PrintWriter-;
 title="class or interface in java.lang">printStackTrace, http://docs.oracle.com/javase/8/docs/api/java/lan
 
g/Throwable.html?is-external=true#setStackTrace-java.lang.StackTraceElement:A-" 
title="class or interface in java.lang">setStackTrace, http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true#toString--;
 title="class or interface in java.lang">toString
+Methods inherited from classjava.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true;
 

[16/51] [partial] hbase-site git commit: Published site at .

2018-02-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ec8bf761/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceStubMaker.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceStubMaker.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceStubMaker.html
index 93f650f..d7aa8b1 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceStubMaker.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceStubMaker.html
@@ -546,1472 +546,1464 @@
 538return this.conf;
 539  }
 540
-541  /**
-542   * @return true if the master is 
running, throws an exception otherwise
-543   * @throws 
org.apache.hadoop.hbase.MasterNotRunningException - if the master is not 
running
-544   * @deprecated this has been deprecated 
without a replacement
-545   */
-546  @Deprecated
-547  @Override
-548  public boolean isMasterRunning()
-549  throws MasterNotRunningException, 
ZooKeeperConnectionException {
-550// When getting the master 
connection, we check it's running,
-551// so if there is no exception, it 
means we've been able to get a
-552// connection on a running master
-553MasterKeepAliveConnection m = 
getKeepAliveMasterService();
-554m.close();
-555return true;
-556  }
-557
-558  @Override
-559  public HRegionLocation 
getRegionLocation(final TableName tableName,
-560  final byte [] row, boolean 
reload)
-561  throws IOException {
-562return reload? 
relocateRegion(tableName, row): locateRegion(tableName, row);
-563  }
-564
-565
-566  @Override
-567  public boolean isTableEnabled(TableName 
tableName) throws IOException {
-568return 
getTableState(tableName).inStates(TableState.State.ENABLED);
-569  }
-570
-571  @Override
-572  public boolean 
isTableDisabled(TableName tableName) throws IOException {
-573return 
getTableState(tableName).inStates(TableState.State.DISABLED);
-574  }
-575
-576  @Override
-577  public boolean isTableAvailable(final 
TableName tableName, @Nullable final byte[][] splitKeys)
-578  throws IOException {
-579if (this.closed) {
-580  throw new IOException(toString() + 
" closed");
-581}
-582try {
-583  if (!isTableEnabled(tableName)) {
-584LOG.debug("Table " + tableName + 
" not enabled");
-585return false;
-586  }
-587  ListPairRegionInfo, 
ServerName locations =
-588
MetaTableAccessor.getTableRegionsAndLocations(this, tableName, true);
-589
-590  int notDeployed = 0;
-591  int regionCount = 0;
-592  for (PairRegionInfo, 
ServerName pair : locations) {
-593RegionInfo info = 
pair.getFirst();
-594if (pair.getSecond() == null) {
-595  if (LOG.isDebugEnabled()) {
-596LOG.debug("Table " + 
tableName + " has not deployed region " + pair.getFirst()
-597.getEncodedName());
-598  }
-599  notDeployed++;
-600} else if (splitKeys != null
-601 
!Bytes.equals(info.getStartKey(), HConstants.EMPTY_BYTE_ARRAY)) {
-602  for (byte[] splitKey : 
splitKeys) {
-603// Just check if the splitkey 
is available
-604if 
(Bytes.equals(info.getStartKey(), splitKey)) {
-605  regionCount++;
-606  break;
-607}
-608  }
-609} else {
-610  // Always empty start row 
should be counted
-611  regionCount++;
-612}
-613  }
-614  if (notDeployed  0) {
-615if (LOG.isDebugEnabled()) {
-616  LOG.debug("Table " + tableName 
+ " has " + notDeployed + " regions");
-617}
-618return false;
-619  } else if (splitKeys != null 
 regionCount != splitKeys.length + 1) {
-620if (LOG.isDebugEnabled()) {
-621  LOG.debug("Table " + tableName 
+ " expected to have " + (splitKeys.length + 1)
-622  + " regions, but only " + 
regionCount + " available");
-623}
-624return false;
-625  } else {
-626if (LOG.isDebugEnabled()) {
-627  LOG.debug("Table " + tableName 
+ " should be available");
-628}
-629return true;
-630  }
-631} catch (TableNotFoundException tnfe) 
{
-632  LOG.warn("Table " + tableName + " 
not enabled, it is not exists");
-633  return false;
-634}
-635  }
-636
-637  @Override
-638  public HRegionLocation 
locateRegion(final byte[] regionName) throws IOException {
-639RegionLocations locations = 
locateRegion(RegionInfo.getTable(regionName),
-640  RegionInfo.getStartKey(regionName), 
false, true);
-641return locations == null ? null : 
locations.getRegionLocation();
+541  private void checkClosed() throws 
DoNotRetryIOException {
+542if (this.closed) {
+543  throw new 

[16/51] [partial] hbase-site git commit: Published site at .

2018-02-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.Visitor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.Visitor.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.Visitor.html
index 802b925..a3e80ab 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.Visitor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.Visitor.html
@@ -73,229 +73,229 @@
 065import 
java.util.concurrent.TimeoutException;
 066import 
java.util.concurrent.atomic.AtomicBoolean;
 067import 
java.util.concurrent.atomic.AtomicInteger;
-068import 
java.util.concurrent.atomic.AtomicLong;
-069import 
java.util.concurrent.atomic.LongAdder;
-070import java.util.concurrent.locks.Lock;
-071import 
java.util.concurrent.locks.ReadWriteLock;
-072import 
java.util.concurrent.locks.ReentrantReadWriteLock;
-073import java.util.function.Function;
-074import 
org.apache.hadoop.conf.Configuration;
-075import org.apache.hadoop.fs.FileStatus;
-076import org.apache.hadoop.fs.FileSystem;
-077import 
org.apache.hadoop.fs.LocatedFileStatus;
-078import org.apache.hadoop.fs.Path;
-079import org.apache.hadoop.hbase.Cell;
-080import 
org.apache.hadoop.hbase.CellBuilderType;
-081import 
org.apache.hadoop.hbase.CellComparator;
-082import 
org.apache.hadoop.hbase.CellComparatorImpl;
-083import 
org.apache.hadoop.hbase.CellScanner;
-084import 
org.apache.hadoop.hbase.CellUtil;
-085import 
org.apache.hadoop.hbase.CompareOperator;
-086import 
org.apache.hadoop.hbase.CompoundConfiguration;
-087import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-088import 
org.apache.hadoop.hbase.DroppedSnapshotException;
-089import 
org.apache.hadoop.hbase.ExtendedCellBuilderFactory;
-090import 
org.apache.hadoop.hbase.HConstants;
-091import 
org.apache.hadoop.hbase.HConstants.OperationStatusCode;
-092import 
org.apache.hadoop.hbase.HDFSBlocksDistribution;
-093import 
org.apache.hadoop.hbase.HRegionInfo;
-094import 
org.apache.hadoop.hbase.KeyValue;
-095import 
org.apache.hadoop.hbase.KeyValueUtil;
-096import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-097import 
org.apache.hadoop.hbase.NotServingRegionException;
-098import 
org.apache.hadoop.hbase.PrivateCellUtil;
-099import 
org.apache.hadoop.hbase.RegionTooBusyException;
-100import 
org.apache.hadoop.hbase.TableName;
-101import org.apache.hadoop.hbase.Tag;
-102import org.apache.hadoop.hbase.TagUtil;
-103import 
org.apache.hadoop.hbase.UnknownScannerException;
-104import 
org.apache.hadoop.hbase.client.Append;
-105import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-106import 
org.apache.hadoop.hbase.client.CompactionState;
-107import 
org.apache.hadoop.hbase.client.Delete;
-108import 
org.apache.hadoop.hbase.client.Durability;
-109import 
org.apache.hadoop.hbase.client.Get;
-110import 
org.apache.hadoop.hbase.client.Increment;
-111import 
org.apache.hadoop.hbase.client.IsolationLevel;
-112import 
org.apache.hadoop.hbase.client.Mutation;
-113import 
org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor;
-114import 
org.apache.hadoop.hbase.client.Put;
-115import 
org.apache.hadoop.hbase.client.RegionInfo;
-116import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
-117import 
org.apache.hadoop.hbase.client.Result;
-118import 
org.apache.hadoop.hbase.client.RowMutations;
-119import 
org.apache.hadoop.hbase.client.Scan;
-120import 
org.apache.hadoop.hbase.client.TableDescriptor;
-121import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-122import 
org.apache.hadoop.hbase.conf.ConfigurationManager;
-123import 
org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver;
-124import 
org.apache.hadoop.hbase.coprocessor.RegionObserver.MutationType;
-125import 
org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
-126import 
org.apache.hadoop.hbase.exceptions.FailedSanityCheckException;
-127import 
org.apache.hadoop.hbase.exceptions.TimeoutIOException;
-128import 
org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
-129import 
org.apache.hadoop.hbase.filter.ByteArrayComparable;
-130import 
org.apache.hadoop.hbase.filter.FilterWrapper;
-131import 
org.apache.hadoop.hbase.filter.IncompatibleFilterException;
-132import 
org.apache.hadoop.hbase.io.HFileLink;
-133import 
org.apache.hadoop.hbase.io.HeapSize;
-134import 
org.apache.hadoop.hbase.io.TimeRange;
-135import 
org.apache.hadoop.hbase.io.hfile.HFile;
-136import 
org.apache.hadoop.hbase.ipc.CallerDisconnectedException;
-137import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
-138import 
org.apache.hadoop.hbase.ipc.RpcCall;
-139import 
org.apache.hadoop.hbase.ipc.RpcServer;
-140import 
org.apache.hadoop.hbase.monitoring.MonitoredTask;
-141import 
org.apache.hadoop.hbase.monitoring.TaskMonitor;
-142import 

[16/51] [partial] hbase-site git commit: Published site at .

2018-02-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/193b4259/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
index bd13b53..802b925 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
@@ -900,7600 +900,7598 @@
 892if 
(this.getRegionInfo().getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) {
 893  status.setStatus("Writing region 
info on filesystem");
 894  fs.checkRegionInfoOnFilesystem();
-895} else {
-896  if (LOG.isDebugEnabled()) {
-897LOG.debug("Skipping creation of 
.regioninfo file for " + this.getRegionInfo());
-898  }
-899}
-900
-901// Initialize all the HStores
-902status.setStatus("Initializing all 
the Stores");
-903long maxSeqId = 
initializeStores(reporter, status);
-904this.mvcc.advanceTo(maxSeqId);
-905if 
(ServerRegionReplicaUtil.shouldReplayRecoveredEdits(this)) {
-906  CollectionHStore stores = 
this.stores.values();
-907  try {
-908// update the stores that we are 
replaying
-909
stores.forEach(HStore::startReplayingFromWAL);
-910// Recover any edits if 
available.
-911maxSeqId = Math.max(maxSeqId,
-912  
replayRecoveredEditsIfAny(this.fs.getRegionDir(), maxSeqIdInStores, reporter, 
status));
-913// Make sure mvcc is up to max.
-914this.mvcc.advanceTo(maxSeqId);
-915  } finally {
-916// update the stores that we are 
done replaying
-917
stores.forEach(HStore::stopReplayingFromWAL);
-918  }
-919}
-920this.lastReplayedOpenRegionSeqId = 
maxSeqId;
+895}
+896
+897// Initialize all the HStores
+898status.setStatus("Initializing all 
the Stores");
+899long maxSeqId = 
initializeStores(reporter, status);
+900this.mvcc.advanceTo(maxSeqId);
+901if 
(ServerRegionReplicaUtil.shouldReplayRecoveredEdits(this)) {
+902  CollectionHStore stores = 
this.stores.values();
+903  try {
+904// update the stores that we are 
replaying
+905
stores.forEach(HStore::startReplayingFromWAL);
+906// Recover any edits if 
available.
+907maxSeqId = Math.max(maxSeqId,
+908  
replayRecoveredEditsIfAny(this.fs.getRegionDir(), maxSeqIdInStores, reporter, 
status));
+909// Make sure mvcc is up to max.
+910this.mvcc.advanceTo(maxSeqId);
+911  } finally {
+912// update the stores that we are 
done replaying
+913
stores.forEach(HStore::stopReplayingFromWAL);
+914  }
+915}
+916this.lastReplayedOpenRegionSeqId = 
maxSeqId;
+917
+918
this.writestate.setReadOnly(ServerRegionReplicaUtil.isReadOnly(this));
+919this.writestate.flushRequested = 
false;
+920this.writestate.compacting.set(0);
 921
-922
this.writestate.setReadOnly(ServerRegionReplicaUtil.isReadOnly(this));
-923this.writestate.flushRequested = 
false;
-924this.writestate.compacting.set(0);
-925
-926if (this.writestate.writesEnabled) 
{
-927  // Remove temporary data left over 
from old regions
-928  status.setStatus("Cleaning up 
temporary data from old regions");
-929  fs.cleanupTempDir();
-930}
-931
-932if (this.writestate.writesEnabled) 
{
-933  status.setStatus("Cleaning up 
detritus from prior splits");
-934  // Get rid of any splits or merges 
that were lost in-progress.  Clean out
-935  // these directories here on open.  
We may be opening a region that was
-936  // being split but we crashed in 
the middle of it all.
-937  fs.cleanupAnySplitDetritus();
-938  fs.cleanupMergesDir();
-939}
-940
-941// Initialize split policy
-942this.splitPolicy = 
RegionSplitPolicy.create(this, conf);
-943
-944// Initialize flush policy
-945this.flushPolicy = 
FlushPolicyFactory.create(this, conf);
-946
-947long lastFlushTime = 
EnvironmentEdgeManager.currentTime();
-948for (HStore store: stores.values()) 
{
-949  
this.lastStoreFlushTimeMap.put(store, lastFlushTime);
-950}
-951
-952// Use maximum of log sequenceid or 
that which was found in stores
-953// (particularly if no recovered 
edits, seqid will be -1).
-954long nextSeqid = maxSeqId;
-955if (this.writestate.writesEnabled) 
{
-956  nextSeqid = 
WALSplitter.writeRegionSequenceIdFile(this.fs.getFileSystem(),
-957  this.fs.getRegionDir(), 
nextSeqid, 1);
-958} else {
-959  nextSeqid++;
-960}
-961
-962LOG.info("Onlined " + 
this.getRegionInfo().getShortNameToLog() +
-963  "; next sequenceid=" + 
nextSeqid);
+922if (this.writestate.writesEnabled) 
{
+923  // Remove 

[16/51] [partial] hbase-site git commit: Published site at .

2018-02-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/94208cfe/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDecodingContext.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDecodingContext.html
 
b/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDecodingContext.html
index fe5ef34..7161108 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDecodingContext.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDecodingContext.html
@@ -166,27 +166,27 @@
 
 
 DataBlockEncoder.EncodedSeeker
-RowIndexCodecV1.createSeeker(CellComparatorcomparator,
+CopyKeyDataBlockEncoder.createSeeker(CellComparatorcomparator,
 HFileBlockDecodingContextdecodingCtx)
 
 
 DataBlockEncoder.EncodedSeeker
-CopyKeyDataBlockEncoder.createSeeker(CellComparatorcomparator,
+PrefixKeyDeltaEncoder.createSeeker(CellComparatorcomparator,
 HFileBlockDecodingContextdecodingCtx)
 
 
 DataBlockEncoder.EncodedSeeker
-DiffKeyDeltaEncoder.createSeeker(CellComparatorcomparator,
+FastDiffDeltaEncoder.createSeeker(CellComparatorcomparator,
 HFileBlockDecodingContextdecodingCtx)
 
 
 DataBlockEncoder.EncodedSeeker
-FastDiffDeltaEncoder.createSeeker(CellComparatorcomparator,
+DiffKeyDeltaEncoder.createSeeker(CellComparatorcomparator,
 HFileBlockDecodingContextdecodingCtx)
 
 
 DataBlockEncoder.EncodedSeeker
-PrefixKeyDeltaEncoder.createSeeker(CellComparatorcomparator,
+RowIndexCodecV1.createSeeker(CellComparatorcomparator,
 HFileBlockDecodingContextdecodingCtx)
 
 
@@ -198,13 +198,13 @@
 
 
 http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBuffer
-RowIndexCodecV1.decodeKeyValues(http://docs.oracle.com/javase/8/docs/api/java/io/DataInputStream.html?is-external=true;
 title="class or interface in java.io">DataInputStreamsource,
-   HFileBlockDecodingContextdecodingCtx)
+BufferedDataBlockEncoder.decodeKeyValues(http://docs.oracle.com/javase/8/docs/api/java/io/DataInputStream.html?is-external=true;
 title="class or interface in java.io">DataInputStreamsource,
+   HFileBlockDecodingContextblkDecodingCtx)
 
 
 http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBuffer
-BufferedDataBlockEncoder.decodeKeyValues(http://docs.oracle.com/javase/8/docs/api/java/io/DataInputStream.html?is-external=true;
 title="class or interface in java.io">DataInputStreamsource,
-   HFileBlockDecodingContextblkDecodingCtx)
+RowIndexCodecV1.decodeKeyValues(http://docs.oracle.com/javase/8/docs/api/java/io/DataInputStream.html?is-external=true;
 title="class or interface in java.io">DataInputStreamsource,
+   HFileBlockDecodingContextdecodingCtx)
 
 
 
@@ -279,18 +279,18 @@
 
 
 HFileBlockDecodingContext
-HFileDataBlockEncoderImpl.newDataBlockDecodingContext(HFileContextfileContext)
-
-
-HFileBlockDecodingContext
 NoOpDataBlockEncoder.newDataBlockDecodingContext(HFileContextmeta)
 
-
+
 HFileBlockDecodingContext
 HFileDataBlockEncoder.newDataBlockDecodingContext(HFileContextfileContext)
 create a encoder specific decoding context for 
reading.
 
 
+
+HFileBlockDecodingContext
+HFileDataBlockEncoderImpl.newDataBlockDecodingContext(HFileContextfileContext)
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/94208cfe/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDefaultDecodingContext.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDefaultDecodingContext.html
 
b/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDefaultDecodingContext.html
index 66443b9..79b047f 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDefaultDecodingContext.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDefaultDecodingContext.html
@@ -116,36 +116,36 @@
  HFileBlockDefaultDecodingContextdecodingCtx)
 
 
-protected http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBuffer
-CopyKeyDataBlockEncoder.internalDecodeKeyValues(http://docs.oracle.com/javase/8/docs/api/java/io/DataInputStream.html?is-external=true;
 title="class or interface in java.io">DataInputStreamsource,
+protected abstract http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBuffer
+BufferedDataBlockEncoder.internalDecodeKeyValues(http://docs.oracle.com/javase/8/docs/api/java/io/DataInputStream.html?is-external=true;
 title="class or interface in java.io">DataInputStreamsource,

[16/51] [partial] hbase-site git commit: Published site at .

2018-02-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0cd17dc5/devapidocs/org/apache/hadoop/hbase/client/coprocessor/class-use/Batch.Callback.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/coprocessor/class-use/Batch.Callback.html
 
b/devapidocs/org/apache/hadoop/hbase/client/coprocessor/class-use/Batch.Callback.html
index bf8d672..61695fd 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/coprocessor/class-use/Batch.Callback.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/coprocessor/class-use/Batch.Callback.html
@@ -113,17 +113,17 @@
 
 
 
-private Batch.CallbackCResult
-AsyncRequestFutureImpl.callback
-
-
 private Batch.CallbackT
 AsyncProcessTask.callback
 
-
+
 private Batch.CallbackT
 AsyncProcessTask.Builder.callback
 
+
+private Batch.CallbackCResult
+AsyncRequestFutureImpl.callback
+
 
 
 
@@ -148,42 +148,50 @@
 
 
 Rvoid
-Table.batchCallback(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">List? extends Rowactions,
+HTable.batchCallback(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">List? extends Rowactions,
  http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object[]results,
- Batch.CallbackRcallback)
-Same as Table.batch(List,
 Object[]), but with a callback.
-
+ Batch.CallbackRcallback)
 
 
 Rvoid
-HTable.batchCallback(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">List? extends Rowactions,
+Table.batchCallback(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">List? extends Rowactions,
  http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object[]results,
- Batch.CallbackRcallback)
+ Batch.CallbackRcallback)
+Same as Table.batch(List,
 Object[]), but with a callback.
+
 
 
 R extends 
com.google.protobuf.Messagevoid
-Table.batchCoprocessorService(com.google.protobuf.Descriptors.MethodDescriptormethodDescriptor,
+HTable.batchCoprocessorService(com.google.protobuf.Descriptors.MethodDescriptormethodDescriptor,
com.google.protobuf.Messagerequest,
byte[]startKey,
byte[]endKey,
RresponsePrototype,
-   Batch.CallbackRcallback)
-Creates an instance of the given Service 
subclass for each table
- region spanning the range from the startKey row to 
endKey row (inclusive), all
- the invocations to the same region server will be batched into one call.
-
+   Batch.CallbackRcallback)
 
 
 R extends 
com.google.protobuf.Messagevoid
-HTable.batchCoprocessorService(com.google.protobuf.Descriptors.MethodDescriptormethodDescriptor,
+Table.batchCoprocessorService(com.google.protobuf.Descriptors.MethodDescriptormethodDescriptor,
com.google.protobuf.Messagerequest,
byte[]startKey,
byte[]endKey,
RresponsePrototype,
-   Batch.CallbackRcallback)
+   Batch.CallbackRcallback)
+Creates an instance of the given Service 
subclass for each table
+ region spanning the range from the startKey row to 
endKey row (inclusive), all
+ the invocations to the same region server will be batched into one call.
+
 
 
 T extends 
com.google.protobuf.Service,Rvoid
+HTable.coprocessorService(http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true;
 title="class or interface in java.lang">ClassTservice,
+  byte[]startKey,
+  byte[]endKey,
+  Batch.CallT,Rcallable,
+  Batch.CallbackRcallback)
+
+
+T extends 
com.google.protobuf.Service,Rvoid
 Table.coprocessorService(http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true;
 title="class or interface in java.lang">ClassTservice,
   byte[]startKey,
   byte[]endKey,
@@ -195,14 +203,6 @@
  with each Service instance.
 
 
-
-T extends 
com.google.protobuf.Service,Rvoid
-HTable.coprocessorService(http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true;
 title="class or interface in java.lang">ClassTservice,
-  byte[]startKey,
-  byte[]endKey,
-  Batch.CallT,Rcallable,
-  Batch.CallbackRcallback)
-
 
 static Rvoid
 HTable.doBatchWithCallback(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">List? extends Rowactions,


[16/51] [partial] hbase-site git commit: Published site at .

2018-02-14 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/828486ae/devapidocs/org/apache/hadoop/hbase/filter/class-use/Filter.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/filter/class-use/Filter.html 
b/devapidocs/org/apache/hadoop/hbase/filter/class-use/Filter.html
index 65795ae..463f4fa 100644
--- a/devapidocs/org/apache/hadoop/hbase/filter/class-use/Filter.html
+++ b/devapidocs/org/apache/hadoop/hbase/filter/class-use/Filter.html
@@ -488,15 +488,15 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 static Filter
-ColumnPrefixFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in 
java.util">ArrayListbyte[]filterArguments)
+SingleColumnValueExcludeFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in 
java.util">ArrayListbyte[]filterArguments)
 
 
 static Filter
-ColumnCountGetFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in 
java.util">ArrayListbyte[]filterArguments)
+ValueFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in 
java.util">ArrayListbyte[]filterArguments)
 
 
 static Filter
-RowFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in 
java.util">ArrayListbyte[]filterArguments)
+FamilyFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in 
java.util">ArrayListbyte[]filterArguments)
 
 
 static Filter
@@ -506,63 +506,63 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 static Filter
-FirstKeyOnlyFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in 
java.util">ArrayListbyte[]filterArguments)
+ColumnPrefixFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in 
java.util">ArrayListbyte[]filterArguments)
 
 
 static Filter
-TimestampsFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in 
java.util">ArrayListbyte[]filterArguments)
+PageFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in 
java.util">ArrayListbyte[]filterArguments)
 
 
 static Filter
-ValueFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in 
java.util">ArrayListbyte[]filterArguments)
+RowFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in 
java.util">ArrayListbyte[]filterArguments)
 
 
 static Filter
-KeyOnlyFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in 
java.util">ArrayListbyte[]filterArguments)
+ColumnRangeFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in 
java.util">ArrayListbyte[]filterArguments)
 
 
 static Filter
-FamilyFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in 
java.util">ArrayListbyte[]filterArguments)
+ColumnCountGetFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in 
java.util">ArrayListbyte[]filterArguments)
 
 
 static Filter
-QualifierFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in 
java.util">ArrayListbyte[]filterArguments)
+MultipleColumnPrefixFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in 
java.util">ArrayListbyte[]filterArguments)
 
 
 static Filter
-ColumnRangeFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in 
java.util">ArrayListbyte[]filterArguments)
+ColumnPaginationFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in 
java.util">ArrayListbyte[]filterArguments)
 
 
 static Filter

[16/51] [partial] hbase-site git commit: Published site at .

2018-02-13 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f272b0e8/devapidocs/org/apache/hadoop/hbase/client/class-use/SnapshotDescription.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/SnapshotDescription.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/SnapshotDescription.html
index fb9bdb3..4584cda 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/SnapshotDescription.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/SnapshotDescription.html
@@ -137,7 +137,9 @@
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListSnapshotDescription
-AsyncHBaseAdmin.listSnapshots()
+AsyncAdmin.listSnapshots()
+List completed snapshots.
+
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListSnapshotDescription
@@ -146,22 +148,22 @@
 
 
 
-http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListSnapshotDescription
-HBaseAdmin.listSnapshots()
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListSnapshotDescription
+RawAsyncHBaseAdmin.listSnapshots()
 
 
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListSnapshotDescription
-AsyncAdmin.listSnapshots()
-List completed snapshots.
-
+http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListSnapshotDescription
+HBaseAdmin.listSnapshots()
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListSnapshotDescription
-RawAsyncHBaseAdmin.listSnapshots()
+AsyncHBaseAdmin.listSnapshots()
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListSnapshotDescription
-AsyncHBaseAdmin.listSnapshots(http://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html?is-external=true;
 title="class or interface in 
java.util.regex">Patternpattern)
+AsyncAdmin.listSnapshots(http://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html?is-external=true;
 title="class or interface in java.util.regex">Patternpattern)
+List all the completed snapshots matching the given 
pattern.
+
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListSnapshotDescription
@@ -170,18 +172,16 @@
 
 
 
-http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListSnapshotDescription
-HBaseAdmin.listSnapshots(http://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html?is-external=true;
 title="class or interface in 
java.util.regex">Patternpattern)
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListSnapshotDescription
+RawAsyncHBaseAdmin.listSnapshots(http://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html?is-external=true;
 title="class or interface in 
java.util.regex">Patternpattern)
 
 
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListSnapshotDescription
-AsyncAdmin.listSnapshots(http://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html?is-external=true;
 title="class or interface in java.util.regex">Patternpattern)
-List all the completed snapshots matching the given 
pattern.
-

[16/51] [partial] hbase-site git commit: Published site at .

2018-02-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c83a37c8/devapidocs/org/apache/hadoop/hbase/filter/class-use/Filter.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/filter/class-use/Filter.html 
b/devapidocs/org/apache/hadoop/hbase/filter/class-use/Filter.html
index 65795ae..463f4fa 100644
--- a/devapidocs/org/apache/hadoop/hbase/filter/class-use/Filter.html
+++ b/devapidocs/org/apache/hadoop/hbase/filter/class-use/Filter.html
@@ -488,15 +488,15 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 static Filter
-ColumnPrefixFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in 
java.util">ArrayListbyte[]filterArguments)
+SingleColumnValueExcludeFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in 
java.util">ArrayListbyte[]filterArguments)
 
 
 static Filter
-ColumnCountGetFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in 
java.util">ArrayListbyte[]filterArguments)
+ValueFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in 
java.util">ArrayListbyte[]filterArguments)
 
 
 static Filter
-RowFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in 
java.util">ArrayListbyte[]filterArguments)
+FamilyFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in 
java.util">ArrayListbyte[]filterArguments)
 
 
 static Filter
@@ -506,63 +506,63 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 static Filter
-FirstKeyOnlyFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in 
java.util">ArrayListbyte[]filterArguments)
+ColumnPrefixFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in 
java.util">ArrayListbyte[]filterArguments)
 
 
 static Filter
-TimestampsFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in 
java.util">ArrayListbyte[]filterArguments)
+PageFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in 
java.util">ArrayListbyte[]filterArguments)
 
 
 static Filter
-ValueFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in 
java.util">ArrayListbyte[]filterArguments)
+RowFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in 
java.util">ArrayListbyte[]filterArguments)
 
 
 static Filter
-KeyOnlyFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in 
java.util">ArrayListbyte[]filterArguments)
+ColumnRangeFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in 
java.util">ArrayListbyte[]filterArguments)
 
 
 static Filter
-FamilyFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in 
java.util">ArrayListbyte[]filterArguments)
+ColumnCountGetFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in 
java.util">ArrayListbyte[]filterArguments)
 
 
 static Filter
-QualifierFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in 
java.util">ArrayListbyte[]filterArguments)
+MultipleColumnPrefixFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in 
java.util">ArrayListbyte[]filterArguments)
 
 
 static Filter
-ColumnRangeFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in 
java.util">ArrayListbyte[]filterArguments)
+ColumnPaginationFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true;
 title="class or interface in 
java.util">ArrayListbyte[]filterArguments)
 
 
 static Filter

[16/51] [partial] hbase-site git commit: Published site at .

2018-02-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0ab8335e/devapidocs/org/apache/hadoop/hbase/io/class-use/ImmutableBytesWritable.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/class-use/ImmutableBytesWritable.html 
b/devapidocs/org/apache/hadoop/hbase/io/class-use/ImmutableBytesWritable.html
index 49f85aa..6e37f0b 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/io/class-use/ImmutableBytesWritable.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/io/class-use/ImmutableBytesWritable.html
@@ -162,11 +162,11 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 ImmutableBytesWritable
-TableRecordReader.createKey()
+TableSnapshotInputFormat.TableSnapshotRecordReader.createKey()
 
 
 ImmutableBytesWritable
-TableSnapshotInputFormat.TableSnapshotRecordReader.createKey()
+TableRecordReader.createKey()
 
 
 ImmutableBytesWritable
@@ -183,11 +183,9 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 org.apache.hadoop.mapred.RecordReaderImmutableBytesWritable,Result
-TableInputFormatBase.getRecordReader(org.apache.hadoop.mapred.InputSplitsplit,
+TableSnapshotInputFormat.getRecordReader(org.apache.hadoop.mapred.InputSplitsplit,
org.apache.hadoop.mapred.JobConfjob,
-   org.apache.hadoop.mapred.Reporterreporter)
-Builds a TableRecordReader.
-
+   
org.apache.hadoop.mapred.Reporterreporter)
 
 
 org.apache.hadoop.mapred.RecordReaderImmutableBytesWritable,Result
@@ -197,9 +195,11 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 org.apache.hadoop.mapred.RecordReaderImmutableBytesWritable,Result
-TableSnapshotInputFormat.getRecordReader(org.apache.hadoop.mapred.InputSplitsplit,
+TableInputFormatBase.getRecordReader(org.apache.hadoop.mapred.InputSplitsplit,
org.apache.hadoop.mapred.JobConfjob,
-   
org.apache.hadoop.mapred.Reporterreporter)
+   org.apache.hadoop.mapred.Reporterreporter)
+Builds a TableRecordReader.
+
 
 
 
@@ -218,12 +218,10 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 void
-IdentityTableMap.map(ImmutableBytesWritablekey,
-   Resultvalue,
+RowCounter.RowCounterMapper.map(ImmutableBytesWritablerow,
+   Resultvalues,
org.apache.hadoop.mapred.OutputCollectorImmutableBytesWritable,Resultoutput,
-   org.apache.hadoop.mapred.Reporterreporter)
-Pass the key, value to reduce
-
+   org.apache.hadoop.mapred.Reporterreporter)
 
 
 void
@@ -236,19 +234,21 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 void
-RowCounter.RowCounterMapper.map(ImmutableBytesWritablerow,
-   Resultvalues,
+IdentityTableMap.map(ImmutableBytesWritablekey,
+   Resultvalue,
org.apache.hadoop.mapred.OutputCollectorImmutableBytesWritable,Resultoutput,
-   org.apache.hadoop.mapred.Reporterreporter)
+   org.apache.hadoop.mapred.Reporterreporter)
+Pass the key, value to reduce
+
 
 
 boolean
-TableRecordReader.next(ImmutableBytesWritablekey,
+TableSnapshotInputFormat.TableSnapshotRecordReader.next(ImmutableBytesWritablekey,
 Resultvalue)
 
 
 boolean
-TableSnapshotInputFormat.TableSnapshotRecordReader.next(ImmutableBytesWritablekey,
+TableRecordReader.next(ImmutableBytesWritablekey,
 Resultvalue)
 
 
@@ -281,12 +281,10 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 void
-IdentityTableMap.map(ImmutableBytesWritablekey,
-   Resultvalue,
+RowCounter.RowCounterMapper.map(ImmutableBytesWritablerow,
+   Resultvalues,
org.apache.hadoop.mapred.OutputCollectorImmutableBytesWritable,Resultoutput,
-   org.apache.hadoop.mapred.Reporterreporter)
-Pass the key, value to reduce
-
+   org.apache.hadoop.mapred.Reporterreporter)
 
 
 void
@@ -299,10 +297,12 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 void
-RowCounter.RowCounterMapper.map(ImmutableBytesWritablerow,
-   Resultvalues,
+IdentityTableMap.map(ImmutableBytesWritablekey,
+   Resultvalue,
org.apache.hadoop.mapred.OutputCollectorImmutableBytesWritable,Resultoutput,
-   org.apache.hadoop.mapred.Reporterreporter)
+   org.apache.hadoop.mapred.Reporterreporter)
+Pass the key, value to reduce
+
 
 
 void
@@ -349,7 +349,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 private ImmutableBytesWritable
-MultithreadedTableMapper.SubMapRecordReader.key
+TableRecordReaderImpl.key
 
 
 private ImmutableBytesWritable
@@ -357,7 +357,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 private ImmutableBytesWritable
-TableRecordReaderImpl.key
+MultithreadedTableMapper.SubMapRecordReader.key
 
 
 (package private) ImmutableBytesWritable
@@ -427,33 +427,33 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 ImmutableBytesWritable
-MultithreadedTableMapper.SubMapRecordReader.getCurrentKey()

[16/51] [partial] hbase-site git commit: Published site at .

2018-02-04 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverGetter.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverGetter.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverGetter.html
index 77fb9b5..c4e8c8b 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverGetter.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverGetter.html
@@ -165,520 +165,519 @@
 157E env = 
checkAndLoadInstance(implClass, priority, conf);
 158if (env != null) {
 159  
this.coprocEnvironments.add(env);
-160  LOG.info(
-161  "System coprocessor " + 
className + " was loaded " + "successfully with priority (" + priority + 
").");
-162  ++priority;
-163}
-164  } catch (Throwable t) {
-165// We always abort if system 
coprocessors cannot be loaded
-166abortServer(className, t);
-167  }
-168}
-169  }
-170
-171  /**
-172   * Load a coprocessor implementation 
into the host
-173   * @param path path to implementation 
jar
-174   * @param className the main class 
name
-175   * @param priority chaining priority
-176   * @param conf configuration for 
coprocessor
-177   * @throws java.io.IOException 
Exception
-178   */
-179  public E load(Path path, String 
className, int priority,
-180  Configuration conf) throws 
IOException {
-181String[] includedClassPrefixes = 
null;
-182if 
(conf.get(HConstants.CP_HTD_ATTR_INCLUSION_KEY) != null){
-183  String prefixes = 
conf.get(HConstants.CP_HTD_ATTR_INCLUSION_KEY);
-184  includedClassPrefixes = 
prefixes.split(";");
-185}
-186return load(path, className, 
priority, conf, includedClassPrefixes);
-187  }
-188
-189  /**
-190   * Load a coprocessor implementation 
into the host
-191   * @param path path to implementation 
jar
-192   * @param className the main class 
name
-193   * @param priority chaining priority
-194   * @param conf configuration for 
coprocessor
-195   * @param includedClassPrefixes class 
name prefixes to include
-196   * @throws java.io.IOException 
Exception
-197   */
-198  public E load(Path path, String 
className, int priority,
-199  Configuration conf, String[] 
includedClassPrefixes) throws IOException {
-200Class? implClass;
-201LOG.debug("Loading coprocessor class 
" + className + " with path " +
-202path + " and priority " + 
priority);
-203
-204ClassLoader cl = null;
-205if (path == null) {
-206  try {
-207implClass = 
getClass().getClassLoader().loadClass(className);
-208  } catch (ClassNotFoundException e) 
{
-209throw new IOException("No jar 
path specified for " + className);
-210  }
-211} else {
-212  cl = 
CoprocessorClassLoader.getClassLoader(
-213path, 
getClass().getClassLoader(), pathPrefix, conf);
-214  try {
-215implClass = 
((CoprocessorClassLoader)cl).loadClass(className, includedClassPrefixes);
-216  } catch (ClassNotFoundException e) 
{
-217throw new IOException("Cannot 
load external coprocessor class " + className, e);
-218  }
-219}
-220
-221//load custom code for coprocessor
-222Thread currentThread = 
Thread.currentThread();
-223ClassLoader hostClassLoader = 
currentThread.getContextClassLoader();
-224try{
-225  // switch temporarily to the thread 
classloader for custom CP
-226  
currentThread.setContextClassLoader(cl);
-227  E cpInstance = 
checkAndLoadInstance(implClass, priority, conf);
-228  return cpInstance;
-229} finally {
-230  // restore the fresh (host) 
classloader
-231  
currentThread.setContextClassLoader(hostClassLoader);
-232}
-233  }
-234
-235  @VisibleForTesting
-236  public void load(Class? extends 
C implClass, int priority, Configuration conf)
-237  throws IOException {
-238E env = 
checkAndLoadInstance(implClass, priority, conf);
-239coprocEnvironments.add(env);
-240  }
-241
-242  /**
-243   * @param implClass Implementation 
class
-244   * @param priority priority
-245   * @param conf configuration
-246   * @throws java.io.IOException 
Exception
-247   */
-248  public E 
checkAndLoadInstance(Class? implClass, int priority, Configuration 
conf)
-249  throws IOException {
-250// create the instance
-251C impl;
-252try {
-253  impl = 
checkAndGetInstance(implClass);
-254  if (impl == null) {
-255LOG.error("Cannot load 
coprocessor " + implClass.getSimpleName());
-256return null;
-257  }
-258} catch 
(InstantiationException|IllegalAccessException e) {
-259  throw new IOException(e);
-260}
-261// create the environment
-262E env = createEnvironment(impl, 
priority, 

[16/51] [partial] hbase-site git commit: Published site at .

2018-02-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1f2eeb22/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.RAMQueueEntry.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.RAMQueueEntry.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.RAMQueueEntry.html
index b7c24d7..eecd2f9 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.RAMQueueEntry.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.RAMQueueEntry.html
@@ -44,792 +44,792 @@
 036import java.util.List;
 037import java.util.Map;
 038import java.util.NavigableSet;
-039import java.util.Objects;
-040import java.util.PriorityQueue;
-041import java.util.Set;
-042import 
java.util.concurrent.ArrayBlockingQueue;
-043import 
java.util.concurrent.BlockingQueue;
-044import 
java.util.concurrent.ConcurrentHashMap;
-045import 
java.util.concurrent.ConcurrentMap;
-046import 
java.util.concurrent.ConcurrentSkipListSet;
-047import java.util.concurrent.Executors;
-048import 
java.util.concurrent.ScheduledExecutorService;
-049import java.util.concurrent.TimeUnit;
-050import 
java.util.concurrent.atomic.AtomicInteger;
-051import 
java.util.concurrent.atomic.AtomicLong;
-052import 
java.util.concurrent.atomic.LongAdder;
-053import java.util.concurrent.locks.Lock;
-054import 
java.util.concurrent.locks.ReentrantLock;
-055import 
java.util.concurrent.locks.ReentrantReadWriteLock;
-056import 
org.apache.hadoop.conf.Configuration;
-057import 
org.apache.hadoop.hbase.HBaseConfiguration;
-058import 
org.apache.hadoop.hbase.io.HeapSize;
-059import 
org.apache.hadoop.hbase.io.hfile.BlockCache;
-060import 
org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
-061import 
org.apache.hadoop.hbase.io.hfile.BlockCacheUtil;
-062import 
org.apache.hadoop.hbase.io.hfile.BlockPriority;
-063import 
org.apache.hadoop.hbase.io.hfile.BlockType;
-064import 
org.apache.hadoop.hbase.io.hfile.CacheStats;
-065import 
org.apache.hadoop.hbase.io.hfile.Cacheable;
-066import 
org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType;
-067import 
org.apache.hadoop.hbase.io.hfile.CacheableDeserializer;
-068import 
org.apache.hadoop.hbase.io.hfile.CacheableDeserializerIdManager;
-069import 
org.apache.hadoop.hbase.io.hfile.CachedBlock;
-070import 
org.apache.hadoop.hbase.io.hfile.HFileBlock;
-071import 
org.apache.hadoop.hbase.nio.ByteBuff;
-072import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-073import 
org.apache.hadoop.hbase.util.HasThread;
-074import 
org.apache.hadoop.hbase.util.IdReadWriteLock;
-075import 
org.apache.hadoop.hbase.util.IdReadWriteLock.ReferenceType;
-076import 
org.apache.hadoop.util.StringUtils;
-077import 
org.apache.yetus.audience.InterfaceAudience;
-078import org.slf4j.Logger;
-079import org.slf4j.LoggerFactory;
-080
-081import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-082import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-083import 
org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
-084
-085/**
-086 * BucketCache uses {@link 
BucketAllocator} to allocate/free blocks, and uses
-087 * BucketCache#ramCache and 
BucketCache#backingMap in order to
-088 * determine if a given element is in the 
cache. The bucket cache can use on-heap or
-089 * off-heap memory {@link 
ByteBufferIOEngine} or in a file {@link FileIOEngine} to
-090 * store/read the block data.
-091 *
-092 * pEviction is via a similar 
algorithm as used in
-093 * {@link 
org.apache.hadoop.hbase.io.hfile.LruBlockCache}
-094 *
-095 * pBucketCache can be used as 
mainly a block cache (see
-096 * {@link 
org.apache.hadoop.hbase.io.hfile.CombinedBlockCache}), combined with
-097 * LruBlockCache to decrease CMS GC and 
heap fragmentation.
-098 *
-099 * pIt also can be used as a 
secondary cache (e.g. using a file on ssd/fusionio to store
-100 * blocks) to enlarge cache space via
-101 * {@link 
org.apache.hadoop.hbase.io.hfile.LruBlockCache#setVictimCache}
-102 */
-103@InterfaceAudience.Private
-104public class BucketCache implements 
BlockCache, HeapSize {
-105  private static final Logger LOG = 
LoggerFactory.getLogger(BucketCache.class);
-106
-107  /** Priority buckets config */
-108  static final String 
SINGLE_FACTOR_CONFIG_NAME = "hbase.bucketcache.single.factor";
-109  static final String 
MULTI_FACTOR_CONFIG_NAME = "hbase.bucketcache.multi.factor";
-110  static final String 
MEMORY_FACTOR_CONFIG_NAME = "hbase.bucketcache.memory.factor";
-111  static final String 
EXTRA_FREE_FACTOR_CONFIG_NAME = "hbase.bucketcache.extrafreefactor";
-112  static final String 
ACCEPT_FACTOR_CONFIG_NAME = "hbase.bucketcache.acceptfactor";
-113  static final String 
MIN_FACTOR_CONFIG_NAME = "hbase.bucketcache.minfactor";
-114
-115  /** Priority buckets */
-116  @VisibleForTesting
-117  static final float 

[16/51] [partial] hbase-site git commit: Published site at .

2018-01-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/cc6597ec/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncProcess.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncProcess.html 
b/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncProcess.html
index 31826e9..4990591 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncProcess.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncProcess.html
@@ -109,7 +109,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public class TestAsyncProcess
+public class TestAsyncProcess
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 
 
@@ -205,121 +205,121 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 Field and Description
 
 
+static HBaseClassTestRule
+CLASS_RULE
+
+
 private static 
org.apache.hadoop.conf.Configuration
 CONF
 
-
+
 private static 
org.apache.hadoop.hbase.client.ConnectionConfiguration
 CONNECTION_CONFIG
 
-
+
 private static byte[]
 DUMMY_BYTES_1
 
-
+
 private static byte[]
 DUMMY_BYTES_2
 
-
+
 private static byte[]
 DUMMY_BYTES_3
 
-
+
 private static 
org.apache.hadoop.hbase.TableName
 DUMMY_TABLE
 
-
+
 private static byte[]
 FAILS
 
-
+
 private static http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">Exception
 failure
 
-
+
 private static 
org.apache.hadoop.hbase.HRegionInfo
 hri1
 
-
+
 private static 
org.apache.hadoop.hbase.client.RegionInfo
 hri1r1
 
-
+
 private static 
org.apache.hadoop.hbase.client.RegionInfo
 hri1r2
 
-
+
 private static 
org.apache.hadoop.hbase.HRegionInfo
 hri2
 
-
+
 private static 
org.apache.hadoop.hbase.client.RegionInfo
 hri2r1
 
-
+
 private static 
org.apache.hadoop.hbase.HRegionInfo
 hri3
 
-
+
 private static 
org.apache.hadoop.hbase.RegionLocations
 hrls1
 
-
+
 private static 
org.apache.hadoop.hbase.RegionLocations
 hrls2
 
-
+
 private static 
org.apache.hadoop.hbase.RegionLocations
 hrls3
 
-
+
 private static 
org.apache.hadoop.hbase.HRegionLocation
 loc1
 
-
+
 private static 
org.apache.hadoop.hbase.HRegionLocation
 loc2
 
-
+
 private static 
org.apache.hadoop.hbase.HRegionLocation
 loc3
 
-
+
 private static org.slf4j.Logger
 LOG
 
-
+
 private static int
 NB_RETRIES
 
-
+
 private static int
 OPERATION_TIMEOUT
 
-
+
 private static int
 RPC_TIMEOUT
 
-
+
 private static 
org.apache.hadoop.hbase.ServerName
 sn
 
-
+
 private static 
org.apache.hadoop.hbase.ServerName
 sn2
 
-
+
 private static 
org.apache.hadoop.hbase.ServerName
 sn3
 
-
+
 private static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 success
 
-
-org.junit.rules.TestRule
-timeout
-
 
 
 
@@ -620,13 +620,13 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 Field Detail
-
+
 
 
 
 
-timeout
-public finalorg.junit.rules.TestRule timeout
+CLASS_RULE
+public static finalHBaseClassTestRule CLASS_RULE
 
 
 
@@ -635,7 +635,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 LOG
-private static finalorg.slf4j.Logger LOG
+private static finalorg.slf4j.Logger LOG
 
 
 
@@ -644,7 +644,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 DUMMY_TABLE
-private static finalorg.apache.hadoop.hbase.TableName DUMMY_TABLE
+private static finalorg.apache.hadoop.hbase.TableName DUMMY_TABLE
 
 
 
@@ -653,7 +653,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 DUMMY_BYTES_1
-private static finalbyte[] DUMMY_BYTES_1
+private static finalbyte[] DUMMY_BYTES_1
 
 
 
@@ -662,7 +662,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 DUMMY_BYTES_2
-private static finalbyte[] DUMMY_BYTES_2
+private static finalbyte[] DUMMY_BYTES_2
 
 
 
@@ -671,7 +671,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 DUMMY_BYTES_3
-private static finalbyte[] DUMMY_BYTES_3
+private static finalbyte[] DUMMY_BYTES_3
 
 
 
@@ -680,7 +680,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 FAILS
-private static finalbyte[] FAILS
+private static finalbyte[] FAILS
 
 
 
@@ -689,7 +689,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 CONF
-private static finalorg.apache.hadoop.conf.Configuration CONF
+private static finalorg.apache.hadoop.conf.Configuration CONF
 
 
 
@@ -698,7 +698,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 CONNECTION_CONFIG
-private static 
finalorg.apache.hadoop.hbase.client.ConnectionConfiguration CONNECTION_CONFIG
+private static 
finalorg.apache.hadoop.hbase.client.ConnectionConfiguration CONNECTION_CONFIG
 
 
 
@@ -707,7 +707,7 @@ extends 

[16/51] [partial] hbase-site git commit: Published site at .

2018-01-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aa7ffc92/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupManifest.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupManifest.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupManifest.html
index 7509dcf..ec2aa41 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupManifest.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupManifest.html
@@ -64,152 +64,152 @@
 056 */
 057@InterfaceAudience.Private
 058public class BackupManifest {
-059
-060  private static final Logger LOG = 
LoggerFactory.getLogger(BackupManifest.class);
-061
-062  // manifest file name
-063  public static final String 
MANIFEST_FILE_NAME = ".backup.manifest";
-064
-065  /**
-066   * Backup image, the dependency graph 
is made up by series of backup images BackupImage contains
-067   * all the relevant information to 
restore the backup and is used during restore operation
-068   */
-069
-070  public static class BackupImage 
implements ComparableBackupImage {
+059  private static final Logger LOG = 
LoggerFactory.getLogger(BackupManifest.class);
+060
+061  // manifest file name
+062  public static final String 
MANIFEST_FILE_NAME = ".backup.manifest";
+063
+064  /**
+065   * Backup image, the dependency graph 
is made up by series of backup images BackupImage contains
+066   * all the relevant information to 
restore the backup and is used during restore operation
+067   */
+068  public static class BackupImage 
implements ComparableBackupImage {
+069static class Builder {
+070  BackupImage image;
 071
-072static class Builder {
-073  BackupImage image;
-074
-075  Builder() {
-076image = new BackupImage();
-077  }
-078
-079  Builder withBackupId(String 
backupId) {
-080image.setBackupId(backupId);
-081return this;
-082  }
-083
-084  Builder withType(BackupType type) 
{
-085image.setType(type);
-086return this;
-087  }
-088
-089  Builder withRootDir(String rootDir) 
{
-090image.setRootDir(rootDir);
-091return this;
-092  }
-093
-094  Builder 
withTableList(ListTableName tableList) {
-095image.setTableList(tableList);
-096return this;
-097  }
-098
-099  Builder withStartTime(long 
startTime) {
-100image.setStartTs(startTime);
-101return this;
-102  }
-103
-104  Builder withCompleteTime(long 
completeTime) {
-105
image.setCompleteTs(completeTime);
-106return this;
-107  }
-108
-109  BackupImage build() {
-110return image;
-111  }
-112
-113}
-114
-115private String backupId;
-116private BackupType type;
-117private String rootDir;
-118private ListTableName 
tableList;
-119private long startTs;
-120private long completeTs;
-121private ArrayListBackupImage 
ancestors;
-122private HashMapTableName, 
HashMapString, Long incrTimeRanges;
-123
-124static Builder newBuilder() {
-125  return new Builder();
-126}
-127
-128public BackupImage() {
-129  super();
-130}
-131
-132private BackupImage(String backupId, 
BackupType type, String rootDir,
-133ListTableName tableList, 
long startTs, long completeTs) {
-134  this.backupId = backupId;
-135  this.type = type;
-136  this.rootDir = rootDir;
-137  this.tableList = tableList;
-138  this.startTs = startTs;
-139  this.completeTs = completeTs;
-140}
-141
-142static BackupImage 
fromProto(BackupProtos.BackupImage im) {
-143  String backupId = 
im.getBackupId();
-144  String rootDir = 
im.getBackupRootDir();
-145  long startTs = im.getStartTs();
-146  long completeTs = 
im.getCompleteTs();
-147  ListHBaseProtos.TableName 
tableListList = im.getTableListList();
-148  ListTableName tableList = 
new ArrayListTableName();
-149  for (HBaseProtos.TableName tn : 
tableListList) {
-150
tableList.add(ProtobufUtil.toTableName(tn));
-151  }
-152
-153  
ListBackupProtos.BackupImage ancestorList = im.getAncestorsList();
-154
-155  BackupType type =
-156  im.getBackupType() == 
BackupProtos.BackupType.FULL ? BackupType.FULL
-157  : BackupType.INCREMENTAL;
-158
-159  BackupImage image = new 
BackupImage(backupId, type, rootDir, tableList, startTs, completeTs);
-160  for (BackupProtos.BackupImage img : 
ancestorList) {
-161
image.addAncestor(fromProto(img));
-162  }
-163  
image.setIncrTimeRanges(loadIncrementalTimestampMap(im));
-164  return image;
-165}
-166
-167BackupProtos.BackupImage toProto() 
{
-168  BackupProtos.BackupImage.Builder 
builder = BackupProtos.BackupImage.newBuilder();
-169  builder.setBackupId(backupId);
-170  
builder.setCompleteTs(completeTs);
-171  

[16/51] [partial] hbase-site git commit: Published site at .

2018-01-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/96e5e102/hbase-build-configuration/issue-tracking.html
--
diff --git a/hbase-build-configuration/issue-tracking.html 
b/hbase-build-configuration/issue-tracking.html
index 71b73b4..a3863c7 100644
--- a/hbase-build-configuration/issue-tracking.html
+++ b/hbase-build-configuration/issue-tracking.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Build Configuration  Issue Management
 
@@ -123,7 +123,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-01-25
+  Last Published: 
2018-01-26
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/96e5e102/hbase-build-configuration/license.html
--
diff --git a/hbase-build-configuration/license.html 
b/hbase-build-configuration/license.html
index c8ab132..3edcf8d 100644
--- a/hbase-build-configuration/license.html
+++ b/hbase-build-configuration/license.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Build Configuration  Project Licenses
 
@@ -326,7 +326,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-01-25
+  Last Published: 
2018-01-26
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/96e5e102/hbase-build-configuration/mail-lists.html
--
diff --git a/hbase-build-configuration/mail-lists.html 
b/hbase-build-configuration/mail-lists.html
index b8cf7e8..2a2cfd1 100644
--- a/hbase-build-configuration/mail-lists.html
+++ b/hbase-build-configuration/mail-lists.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Build Configuration  Project Mailing 
Lists
 
@@ -176,7 +176,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-01-25
+  Last Published: 
2018-01-26
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/96e5e102/hbase-build-configuration/plugin-management.html
--
diff --git a/hbase-build-configuration/plugin-management.html 
b/hbase-build-configuration/plugin-management.html
index f80b968..c34f34a 100644
--- a/hbase-build-configuration/plugin-management.html
+++ b/hbase-build-configuration/plugin-management.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Build Configuration  Project Plugin 
Management
 
@@ -271,7 +271,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-01-25
+  Last Published: 
2018-01-26
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/96e5e102/hbase-build-configuration/plugins.html
--
diff --git a/hbase-build-configuration/plugins.html 
b/hbase-build-configuration/plugins.html
index 26ba53e..70b8afe 100644
--- a/hbase-build-configuration/plugins.html
+++ b/hbase-build-configuration/plugins.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Build Configuration  Project Plugins
 
@@ -214,7 +214,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-01-25
+  Last Published: 
2018-01-26
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/96e5e102/hbase-build-configuration/project-info.html
--
diff --git a/hbase-build-configuration/project-info.html 
b/hbase-build-configuration/project-info.html
index 9f841e8..36a29ec 100644
--- a/hbase-build-configuration/project-info.html
+++ b/hbase-build-configuration/project-info.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Build Configuration  Project 
Information
 
@@ -167,7 +167,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-01-25
+  Last Published: 
2018-01-26
 
 
 


[16/51] [partial] hbase-site git commit: Published site at .

2018-01-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.html 
b/devapidocs/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.html
index de0f43c..85b7b98 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/balancer/RegionLocationFinder.html
@@ -394,7 +394,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 cache
-privateorg.apache.hbase.thirdparty.com.google.common.cache.LoadingCacheRegionInfo,HDFSBlocksDistribution cache
+privateorg.apache.hbase.thirdparty.com.google.common.cache.LoadingCacheRegionInfo,HDFSBlocksDistribution cache
 
 
 
@@ -411,7 +411,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 RegionLocationFinder
-RegionLocationFinder()
+RegionLocationFinder()
 
 
 
@@ -428,7 +428,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 createCache
-privateorg.apache.hbase.thirdparty.com.google.common.cache.LoadingCacheRegionInfo,HDFSBlocksDistributioncreateCache()
+privateorg.apache.hbase.thirdparty.com.google.common.cache.LoadingCacheRegionInfo,HDFSBlocksDistributioncreateCache()
 Create a cache for region to list of servers
 
 Returns:
@@ -442,7 +442,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getConf
-publicorg.apache.hadoop.conf.ConfigurationgetConf()
+publicorg.apache.hadoop.conf.ConfigurationgetConf()
 
 
 
@@ -451,7 +451,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 setConf
-publicvoidsetConf(org.apache.hadoop.conf.Configurationconf)
+publicvoidsetConf(org.apache.hadoop.conf.Configurationconf)
 
 
 
@@ -460,7 +460,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 setServices
-publicvoidsetServices(MasterServicesservices)
+publicvoidsetServices(MasterServicesservices)
 
 
 
@@ -469,7 +469,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 setClusterMetrics
-publicvoidsetClusterMetrics(ClusterMetricsstatus)
+publicvoidsetClusterMetrics(ClusterMetricsstatus)
 
 
 
@@ -478,7 +478,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 scheduleFullRefresh
-privatebooleanscheduleFullRefresh()
+privatebooleanscheduleFullRefresh()
 Refresh all the region locations.
 
 Returns:
@@ -492,7 +492,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getTopBlockLocations
-protectedhttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListServerNamegetTopBlockLocations(RegionInforegion)
+protectedhttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListServerNamegetTopBlockLocations(RegionInforegion)
 
 
 
@@ -501,7 +501,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getTopBlockLocations
-protectedhttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListServerNamegetTopBlockLocations(RegionInforegion,
+protectedhttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListServerNamegetTopBlockLocations(RegionInforegion,
 http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringcurrentHost)
 Returns an ordered list of hosts which have better locality 
for this region
  than the current host.
@@ -513,7 +513,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 internalGetTopBlockLocation
-protectedHDFSBlocksDistributioninternalGetTopBlockLocation(RegionInforegion)
+protectedHDFSBlocksDistributioninternalGetTopBlockLocation(RegionInforegion)
 Returns an ordered list of hosts that are hosting the 
blocks for this
  region. The weight of each host is the sum of the block lengths of all
  files on that host, so the first host in the list is the server which holds
@@ -532,7 +532,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getTableDescriptor
-protectedTableDescriptorgetTableDescriptor(TableNametableName)
+protectedTableDescriptorgetTableDescriptor(TableNametableName)
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 return TableDescriptor for a given tableName
 
@@ -551,7 +551,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 mapHostNameToServerName

[16/51] [partial] hbase-site git commit: Published site at .

2018-01-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/14db89d7/testdevapidocs/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.html
 
b/testdevapidocs/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.html
index 1fce78c..61f4e73 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.html
@@ -109,7 +109,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public class TestAssignmentManager
+public class TestAssignmentManager
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 
 
@@ -459,7 +459,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 LOG
-private static finalorg.slf4j.Logger LOG
+private static finalorg.slf4j.Logger LOG
 
 
 
@@ -468,7 +468,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 name
-publicorg.junit.rules.TestName name
+publicorg.junit.rules.TestName name
 
 
 
@@ -477,7 +477,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 timeout
-public finalorg.junit.rules.TestRule timeout
+public finalorg.junit.rules.TestRule timeout
 
 
 
@@ -486,7 +486,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 exception
-public finalorg.junit.rules.ExpectedException exception
+public finalorg.junit.rules.ExpectedException exception
 
 
 
@@ -495,7 +495,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 PROC_NTHREADS
-private static finalint PROC_NTHREADS
+private static finalint PROC_NTHREADS
 
 See Also:
 Constant
 Field Values
@@ -508,7 +508,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 NREGIONS
-private static finalint NREGIONS
+private static finalint NREGIONS
 
 See Also:
 Constant
 Field Values
@@ -521,7 +521,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 NSERVERS
-private static finalint NSERVERS
+private static finalint NSERVERS
 
 
 
@@ -530,7 +530,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 UTIL
-privateHBaseTestingUtility UTIL
+privateHBaseTestingUtility UTIL
 
 
 
@@ -539,7 +539,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 rsDispatcher
-privateTestAssignmentManager.MockRSProcedureDispatcher
 rsDispatcher
+privateTestAssignmentManager.MockRSProcedureDispatcher
 rsDispatcher
 
 
 
@@ -548,7 +548,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 master
-privateMockMasterServices master
+privateMockMasterServices master
 
 
 
@@ -557,7 +557,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 am
-privateorg.apache.hadoop.hbase.master.assignment.AssignmentManager 
am
+privateorg.apache.hadoop.hbase.master.assignment.AssignmentManager 
am
 
 
 
@@ -566,7 +566,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 regionsToRegionServers
-privatehttp://docs.oracle.com/javase/8/docs/api/java/util/NavigableMap.html?is-external=true;
 title="class or interface in 
java.util">NavigableMaporg.apache.hadoop.hbase.ServerName,http://docs.oracle.com/javase/8/docs/api/java/util/SortedSet.html?is-external=true;
 title="class or interface in java.util">SortedSetbyte[] regionsToRegionServers
+privatehttp://docs.oracle.com/javase/8/docs/api/java/util/NavigableMap.html?is-external=true;
 title="class or interface in 
java.util">NavigableMaporg.apache.hadoop.hbase.ServerName,http://docs.oracle.com/javase/8/docs/api/java/util/SortedSet.html?is-external=true;
 title="class or interface in java.util">SortedSetbyte[] regionsToRegionServers
 
 
 
@@ -575,7 +575,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 executor
-privatehttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ScheduledExecutorService.html?is-external=true;
 title="class or interface in 
java.util.concurrent">ScheduledExecutorService executor
+privatehttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ScheduledExecutorService.html?is-external=true;
 title="class or interface in 
java.util.concurrent">ScheduledExecutorService executor
 
 
 
@@ -584,7 +584,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 assignProcMetrics
-privateorg.apache.hadoop.hbase.procedure2.ProcedureMetrics assignProcMetrics
+privateorg.apache.hadoop.hbase.procedure2.ProcedureMetrics assignProcMetrics
 
 
 
@@ -593,7 +593,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 unassignProcMetrics
-privateorg.apache.hadoop.hbase.procedure2.ProcedureMetrics unassignProcMetrics

[16/51] [partial] hbase-site git commit: Published site at .

2018-01-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/testdevapidocs/org/apache/hadoop/hbase/client/TestHCM.html
--
diff --git a/testdevapidocs/org/apache/hadoop/hbase/client/TestHCM.html 
b/testdevapidocs/org/apache/hadoop/hbase/client/TestHCM.html
deleted file mode 100644
index 69a07a3..000
--- a/testdevapidocs/org/apache/hadoop/hbase/client/TestHCM.html
+++ /dev/null
@@ -1,1124 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd;>
-
-
-
-
-
-TestHCM (Apache HBase 3.0.0-SNAPSHOT Test API)
-
-
-
-
-
-var methods = 
{"i0":9,"i1":9,"i2":10,"i3":9,"i4":9,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10};
-var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
-var altColor = "altColor";
-var rowColor = "rowColor";
-var tableTab = "tableTab";
-var activeTableTab = "activeTableTab";
-
-
-JavaScript is disabled on your browser.
-
-
-
-
-
-Skip navigation links
-
-
-
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
-
-
-
-
-PrevClass
-NextClass
-
-
-Frames
-NoFrames
-
-
-AllClasses
-
-
-
-
-
-
-
-Summary:
-Nested|
-Field|
-Constr|
-Method
-
-
-Detail:
-Field|
-Constr|
-Method
-
-
-
-
-
-
-
-
-org.apache.hadoop.hbase.client
-Class TestHCM
-
-
-
-http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
-
-
-org.apache.hadoop.hbase.client.TestHCM
-
-
-
-
-
-
-
-
-public class TestHCM
-extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
-This class is for testing HBaseConnectionManager 
features
-
-
-
-
-
-
-
-
-
-
-
-Nested Class Summary
-
-Nested Classes
-
-Modifier and Type
-Class and Description
-
-
-static class
-TestHCM.BlockingFilter
-
-
-static class
-TestHCM.SleepAndFailFirstTime
-This copro sleeps 20 second.
-
-
-
-static class
-TestHCM.SleepCoprocessor
-
-
-
-
-
-
-
-
-
-Field Summary
-
-Fields
-
-Modifier and Type
-Field and Description
-
-
-private static http://docs.oracle.com/javase/8/docs/api/java/util/Random.html?is-external=true;
 title="class or interface in java.util">Random
-_randy
-
-
-private static byte[]
-FAM_NAM
-
-
-private static org.slf4j.Logger
-LOG
-
-
-org.junit.rules.TestName
-name
-
-
-private static byte[]
-ROW
-
-
-private static byte[]
-ROW_X
-
-
-private static int
-RPC_RETRY
-
-
-protected static http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicBoolean.html?is-external=true;
 title="class or interface in 
java.util.concurrent.atomic">AtomicBoolean
-syncBlockingFilter
-
-
-private static 
org.apache.hadoop.hbase.TableName
-TABLE_NAME
-
-
-private static 
org.apache.hadoop.hbase.TableName
-TABLE_NAME1
-
-
-private static 
org.apache.hadoop.hbase.TableName
-TABLE_NAME2
-
-
-private static 
org.apache.hadoop.hbase.TableName
-TABLE_NAME3
-
-
-private static HBaseTestingUtility
-TEST_UTIL
-
-
-org.junit.rules.TestRule
-timeout
-
-
-
-
-
-
-
-
-
-Constructor Summary
-
-Constructors
-
-Constructor and Description
-
-
-TestHCM()
-
-
-
-
-
-
-
-
-
-Method Summary
-
-All MethodsStatic MethodsInstance MethodsConcrete Methods
-
-Modifier and Type
-Method and Description
-
-
-private static void
-assertEqualsWithJitter(longexpected,
-  longactual)
-
-
-private static void
-assertEqualsWithJitter(longexpected,
-  longactual,
-  longjitterBase)
-
-
-private int
-setNumTries(org.apache.hadoop.hbase.client.ConnectionImplementationhci,
-   intnewVal)
-
-
-static void
-setUpBeforeClass()
-
-
-static void
-tearDownAfterClass()
-
-
-void
-testAdminFactory()
-Naive test to check that Connection#getAdmin returns a 
properly constructed HBaseAdmin object
-
-
-
-void
-testCacheSeqNums()
-Test that stale cache updates don't override newer cached 
values.
-
-
-
-void
-testCallableSleep()
-
-
-void
-testClosing()
-
-
-void
-testClusterConnection()
-
-
-void
-testClusterStatus()
-
-
-void
-testConnection()
-This test checks that one can connect to the cluster with 
only the
-  ZooKeeper quorum set.
-
-
-
-private void
-testConnectionClose(booleanallowsInterrupt)
-
-
-void
-testConnectionCloseAllowsInterrupt()
-Test that we can handle connection close: it will trigger a 
retry, but the 

[16/51] [partial] hbase-site git commit: Published site at .

2018-01-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/testdevapidocs/index-all.html
--
diff --git a/testdevapidocs/index-all.html b/testdevapidocs/index-all.html
index 90f1ea4..a11a540 100644
--- a/testdevapidocs/index-all.html
+++ b/testdevapidocs/index-all.html
@@ -573,7 +573,7 @@
 
 addEdits(WAL,
 RegionInfo, HTableDescriptor, int, MultiVersionConcurrencyControl, 
NavigableMapbyte[], Integer) - Method in class 
org.apache.hadoop.hbase.regionserver.wal.AbstractTestFSWAL
 
-addEdits(WAL,
 HRegionInfo, HTableDescriptor, int, NavigableMapbyte[], 
Integer) - Method in class org.apache.hadoop.hbase.wal.TestFSHLogProvider
+addEdits(WAL,
 RegionInfo, TableDescriptor, int, NavigableMapbyte[], 
Integer) - Method in class org.apache.hadoop.hbase.wal.TestFSHLogProvider
 
 addedQualifier
 - Variable in class org.apache.hadoop.hbase.coprocessor.SampleRegionWALCoprocessor
 
@@ -810,7 +810,7 @@
 
 The AccessControlLists.addUserPermission may throw 
exception before closing the table.
 
-addWALEdits(TableName,
 HRegionInfo, byte[], byte[], int, EnvironmentEdge, WAL, 
NavigableMapbyte[], Integer, MultiVersionConcurrencyControl) 
- Method in class org.apache.hadoop.hbase.coprocessor.TestWALObserver
+addWALEdits(TableName,
 RegionInfo, byte[], byte[], int, EnvironmentEdge, WAL, NavigableMapbyte[], 
Integer, MultiVersionConcurrencyControl) - Method in class 
org.apache.hadoop.hbase.coprocessor.TestWALObserver
 
 addWALEdits(TableName,
 HRegionInfo, byte[], byte[], int, EnvironmentEdge, WAL, HTableDescriptor, 
MultiVersionConcurrencyControl, NavigableMapbyte[], Integer) 
- Method in class org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay
 
@@ -1194,6 +1194,10 @@
 
 AlwaysDelete()
 - Constructor for class org.apache.hadoop.hbase.master.cleaner.TestCleanerChore.AlwaysDelete
 
+AlwaysIncludeAndSeekNextRowFilter()
 - Constructor for class org.apache.hadoop.hbase.regionserver.querymatcher.TestUserScanQueryMatcher.AlwaysIncludeAndSeekNextRowFilter
+
+AlwaysIncludeFilter()
 - Constructor for class org.apache.hadoop.hbase.regionserver.querymatcher.TestUserScanQueryMatcher.AlwaysIncludeFilter
+
 AlwaysNextColFilter()
 - Constructor for class org.apache.hadoop.hbase.filter.TestFilterList.AlwaysNextColFilter
 
 am
 - Variable in class org.apache.hadoop.hbase.master.assignment.TestAssignmentManager
@@ -1256,7 +1260,7 @@
 
 APPEND_VALUE
 - Static variable in class org.apache.hadoop.hbase.coprocessor.TestPassCustomCellViaRegionObserver
 
-appendCompactionEvent(WALProvider.Writer,
 HRegionInfo, String[], String) - Static method in class 
org.apache.hadoop.hbase.wal.TestWALSplit
+appendCompactionEvent(WALProvider.Writer,
 RegionInfo, String[], String) - Static method in class 
org.apache.hadoop.hbase.wal.TestWALSplit
 
 appendCoprocessor(Configuration,
 String, String) - Static method in class 
org.apache.hadoop.hbase.security.visibility.VisibilityTestUtil
 
@@ -6519,14 +6523,16 @@
 
 createBasic1FamilyHTD(TableName)
 - Method in class org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay
 
-createBasic3FamilyHRegionInfo(String)
 - Method in class org.apache.hadoop.hbase.coprocessor.TestWALObserver
-
 createBasic3FamilyHRegionInfo(TableName)
 - Method in class org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay
 
 createBasic3FamilyHTD(String)
 - Method in class org.apache.hadoop.hbase.coprocessor.TestWALObserver
 
 createBasic3FamilyHTD(TableName)
 - Method in class org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay
 
+createBasicHRegionInfo(String)
 - Method in class org.apache.hadoop.hbase.coprocessor.TestWALObserver
+
+Creates an HRI around an HTD that has 
tableName.
+
 createBatchScan()
 - Static method in class org.apache.hadoop.hbase.client.AbstractTestAsyncTableScan
 
 createBatchSmallResultSizeScan()
 - Static method in class org.apache.hadoop.hbase.client.AbstractTestAsyncTableScan
@@ -6790,7 +6796,9 @@
 Create an HFile with the given number of rows between a 
given
  start key and end key @ family:qualifier.
 
-createHRegion(byte[],
 String, WAL, Durability) - Method in class 
org.apache.hadoop.hbase.regionserver.wal.TestDurability
+createHRegion(WALFactory,
 Durability) - Method in class 
org.apache.hadoop.hbase.regionserver.wal.TestDurability
+
+createHRegion(TableDescriptor,
 RegionInfo, String, WAL, Durability) - Method in class 
org.apache.hadoop.hbase.regionserver.wal.TestDurability
 
 createHTableDescriptor(TableName,
 byte[]...) - Static method in class 
org.apache.hadoop.hbase.master.procedure.TestCloneSnapshotProcedure
 
@@ -10584,7 +10592,7 @@
 
 FailingDummyReplicator(ListWAL.Entry,
 int) - Constructor for class org.apache.hadoop.hbase.replication.TestReplicationEndpoint.InterClusterReplicationEndpointForTest.FailingDummyReplicator
 
-FailingHRegionFileSystem(Configuration,
 FileSystem, Path, HRegionInfo) - Constructor for class 

[16/51] [partial] hbase-site git commit: Published site at .

2018-01-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/49431b18/devapidocs/org/apache/hadoop/hbase/filter/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/filter/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/filter/package-tree.html
index 53d61d8..f3faf34 100644
--- a/devapidocs/org/apache/hadoop/hbase/filter/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/filter/package-tree.html
@@ -182,14 +182,14 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
+org.apache.hadoop.hbase.filter.FilterWrapper.FilterRowRetCode
+org.apache.hadoop.hbase.filter.BitComparator.BitwiseOp
 org.apache.hadoop.hbase.filter.CompareFilter.CompareOp
-org.apache.hadoop.hbase.filter.FuzzyRowFilter.SatisfiesCode
 org.apache.hadoop.hbase.filter.Filter.ReturnCode
-org.apache.hadoop.hbase.filter.FilterWrapper.FilterRowRetCode
-org.apache.hadoop.hbase.filter.RegexStringComparator.EngineType
 org.apache.hadoop.hbase.filter.FuzzyRowFilter.Order
+org.apache.hadoop.hbase.filter.RegexStringComparator.EngineType
+org.apache.hadoop.hbase.filter.FuzzyRowFilter.SatisfiesCode
 org.apache.hadoop.hbase.filter.FilterList.Operator
-org.apache.hadoop.hbase.filter.BitComparator.BitwiseOp
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/49431b18/devapidocs/org/apache/hadoop/hbase/http/HttpServer.QuotingInputFilter.RequestQuoter.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/http/HttpServer.QuotingInputFilter.RequestQuoter.html
 
b/devapidocs/org/apache/hadoop/hbase/http/HttpServer.QuotingInputFilter.RequestQuoter.html
index 9c6914a..511b0f5 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/http/HttpServer.QuotingInputFilter.RequestQuoter.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/http/HttpServer.QuotingInputFilter.RequestQuoter.html
@@ -127,7 +127,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public static class HttpServer.QuotingInputFilter.RequestQuoter
+public static class HttpServer.QuotingInputFilter.RequestQuoter
 extends javax.servlet.http.HttpServletRequestWrapper
 
 
@@ -273,7 +273,7 @@ extends javax.servlet.http.HttpServletRequestWrapper
 
 
 rawRequest
-private finaljavax.servlet.http.HttpServletRequest rawRequest
+private finaljavax.servlet.http.HttpServletRequest rawRequest
 
 
 
@@ -290,7 +290,7 @@ extends javax.servlet.http.HttpServletRequestWrapper
 
 
 RequestQuoter
-publicRequestQuoter(javax.servlet.http.HttpServletRequestrawRequest)
+publicRequestQuoter(javax.servlet.http.HttpServletRequestrawRequest)
 
 
 
@@ -307,7 +307,7 @@ extends javax.servlet.http.HttpServletRequestWrapper
 
 
 getParameterNames
-publichttp://docs.oracle.com/javase/8/docs/api/java/util/Enumeration.html?is-external=true;
 title="class or interface in java.util">Enumerationhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringgetParameterNames()
+publichttp://docs.oracle.com/javase/8/docs/api/java/util/Enumeration.html?is-external=true;
 title="class or interface in java.util">Enumerationhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringgetParameterNames()
 Return the set of parameter names, quoting each name.
 
 Specified by:
@@ -323,7 +323,7 @@ extends javax.servlet.http.HttpServletRequestWrapper
 
 
 getParameter
-publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringgetParameter(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringname)
+publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringgetParameter(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringname)
 Unquote the name and quote the value.
 
 Specified by:
@@ -339,7 +339,7 @@ extends javax.servlet.http.HttpServletRequestWrapper
 
 
 getParameterValues
-publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String[]getParameterValues(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringname)
+publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 

[16/51] [partial] hbase-site git commit: Published site at .

2018-01-04 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7c40c62/devapidocs/org/apache/hadoop/hbase/master/MasterRpcServices.BalanceSwitchMode.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/MasterRpcServices.BalanceSwitchMode.html
 
b/devapidocs/org/apache/hadoop/hbase/master/MasterRpcServices.BalanceSwitchMode.html
index 3f627a4..ca3f255 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/MasterRpcServices.BalanceSwitchMode.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/MasterRpcServices.BalanceSwitchMode.html
@@ -122,7 +122,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-static enum MasterRpcServices.BalanceSwitchMode
+static enum MasterRpcServices.BalanceSwitchMode
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumMasterRpcServices.BalanceSwitchMode
 
 
@@ -210,7 +210,7 @@ the order they are declared.
 
 
 SYNC
-public static finalMasterRpcServices.BalanceSwitchMode SYNC
+public static finalMasterRpcServices.BalanceSwitchMode SYNC
 
 
 
@@ -219,7 +219,7 @@ the order they are declared.
 
 
 ASYNC
-public static finalMasterRpcServices.BalanceSwitchMode ASYNC
+public static finalMasterRpcServices.BalanceSwitchMode ASYNC
 
 
 
@@ -236,7 +236,7 @@ the order they are declared.
 
 
 values
-public staticMasterRpcServices.BalanceSwitchMode[]values()
+public staticMasterRpcServices.BalanceSwitchMode[]values()
 Returns an array containing the constants of this enum 
type, in
 the order they are declared.  This method may be used to iterate
 over the constants as follows:
@@ -256,7 +256,7 @@ for (MasterRpcServices.BalanceSwitchMode c : 
MasterRpcServices.BalanceSwitchMode
 
 
 valueOf
-public staticMasterRpcServices.BalanceSwitchModevalueOf(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringname)
+public staticMasterRpcServices.BalanceSwitchModevalueOf(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringname)
 Returns the enum constant of this type with the specified 
name.
 The string must match exactly an identifier used to declare an
 enum constant in this type.  (Extraneous whitespace characters are 



[16/51] [partial] hbase-site git commit: Published site at .

2018-01-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bb398572/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TruncateTableProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TruncateTableProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TruncateTableProcedureBiConsumer.html
index 5b3b750..a1f3f7e 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TruncateTableProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TruncateTableProcedureBiConsumer.html
@@ -97,3307 +97,3304 @@
 089import 
org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer;
 090import 
org.apache.hbase.thirdparty.io.netty.util.Timeout;
 091import 
org.apache.hbase.thirdparty.io.netty.util.TimerTask;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesResponse;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-100import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
-101import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
-102import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
-103import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
-104import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
-105import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest;
-106import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse;
-107import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest;
-108import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse;
-109import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest;
-110import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse;
-111import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
-112import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse;
-113import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-114import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-115import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-116import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-117import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-118import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-119import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-120import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-121import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-122import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-123import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
-124import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersRequest;
-125import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersResponse;
-126import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-127import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
-128import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest;
-129import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse;
-130import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersRequest;
-131import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersResponse;
-132import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
-133import 

[16/51] [partial] hbase-site git commit: Published site at .

2018-01-01 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/http/lib/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/http/lib/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/http/lib/package-tree.html
index ab5187b..c69defe 100644
--- a/devapidocs/org/apache/hadoop/hbase/http/lib/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/http/lib/package-tree.html
@@ -139,6 +139,6 @@
 
 
 
-Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/http/lib/package-use.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/http/lib/package-use.html 
b/devapidocs/org/apache/hadoop/hbase/http/lib/package-use.html
index 5ceaefa..dc898bf 100644
--- a/devapidocs/org/apache/hadoop/hbase/http/lib/package-use.html
+++ b/devapidocs/org/apache/hadoop/hbase/http/lib/package-use.html
@@ -157,6 +157,6 @@
 
 
 
-Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/http/log/LogLevel.Servlet.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/http/log/LogLevel.Servlet.html 
b/devapidocs/org/apache/hadoop/hbase/http/log/LogLevel.Servlet.html
index 2f93e3f..401f113 100644
--- a/devapidocs/org/apache/hadoop/hbase/http/log/LogLevel.Servlet.html
+++ b/devapidocs/org/apache/hadoop/hbase/http/log/LogLevel.Servlet.html
@@ -418,6 +418,6 @@ extends javax.servlet.http.HttpServlet
 
 
 
-Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/http/log/LogLevel.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/http/log/LogLevel.html 
b/devapidocs/org/apache/hadoop/hbase/http/log/LogLevel.html
index e572fe2..4b61634 100644
--- a/devapidocs/org/apache/hadoop/hbase/http/log/LogLevel.html
+++ b/devapidocs/org/apache/hadoop/hbase/http/log/LogLevel.html
@@ -376,6 +376,6 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 
-Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/http/log/class-use/LogLevel.Servlet.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/http/log/class-use/LogLevel.Servlet.html 
b/devapidocs/org/apache/hadoop/hbase/http/log/class-use/LogLevel.Servlet.html
index edc1655..40dad2e 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/http/log/class-use/LogLevel.Servlet.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/http/log/class-use/LogLevel.Servlet.html
@@ -120,6 +120,6 @@
 
 
 
-Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/http/log/class-use/LogLevel.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/http/log/class-use/LogLevel.html 
b/devapidocs/org/apache/hadoop/hbase/http/log/class-use/LogLevel.html
index ab53856..f07a83c 100644
--- a/devapidocs/org/apache/hadoop/hbase/http/log/class-use/LogLevel.html
+++ b/devapidocs/org/apache/hadoop/hbase/http/log/class-use/LogLevel.html
@@ -120,6 +120,6 @@
 
 
 
-Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/http/log/package-summary.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/http/log/package-summary.html 
b/devapidocs/org/apache/hadoop/hbase/http/log/package-summary.html
index 85c31ed..e75c120 100644
--- 

[16/51] [partial] hbase-site git commit: Published site at .

2017-12-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHRegion.HRegionForTesting.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHRegion.HRegionForTesting.html
 
b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHRegion.HRegionForTesting.html
new file mode 100644
index 000..b024669
--- /dev/null
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHRegion.HRegionForTesting.html
@@ -0,0 +1,375 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+TestHRegion.HRegionForTesting (Apache HBase 3.0.0-SNAPSHOT Test 
API)
+
+
+
+
+
+var methods = {"i0":10};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase.regionserver
+Class 
TestHRegion.HRegionForTesting
+
+
+
+http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.regionserver.HRegion
+
+
+org.apache.hadoop.hbase.regionserver.TestHRegion.HRegionForTesting
+
+
+
+
+
+
+
+
+
+All Implemented Interfaces:
+org.apache.hadoop.hbase.conf.ConfigurationObserver, 
org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver, 
org.apache.hadoop.hbase.io.HeapSize, 
org.apache.hadoop.hbase.regionserver.Region
+
+
+Enclosing class:
+TestHRegion
+
+
+
+public static class TestHRegion.HRegionForTesting
+extends org.apache.hadoop.hbase.regionserver.HRegion
+The same as HRegion class, the only difference is that 
instantiateHStore will
+ create a different HStore - HStoreForTesting. [HBASE-8518]
+
+
+
+
+
+
+
+
+
+
+
+Nested Class Summary
+
+
+
+
+Nested classes/interfaces inherited from 
classorg.apache.hadoop.hbase.regionserver.HRegion
+org.apache.hadoop.hbase.regionserver.HRegion.BulkLoadListener, 
org.apache.hadoop.hbase.regionserver.HRegion.FlushResult, 
org.apache.hadoop.hbase.regionserver.HRegion.FlushResultImpl, 
org.apache.hadoop.hbase.regionserver.HRegion.MutationBatchOperation, 
org.apache.hadoop.hbase.regionserver.HRegion.ObservedExceptionsInBatch, 
org.apache.hadoop.hbase.regionserver.HRegion.PrepareFlushResult, 
org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl, 
org.apache.hadoop.hbase.regionserver.HRegion.ReplayBatchOperation, 
org.apache.hadoop.hbase.regionserver.HRegion.RowLockContext, 
org.apache.hadoop.hbase.regionserver.HRegion.RowLockImpl, 
org.apache.hadoop.hbase.regionserver.HRegion.WriteState
+
+
+
+
+
+Nested classes/interfaces inherited from 
interfaceorg.apache.hadoop.hbase.regionserver.Region
+org.apache.hadoop.hbase.regionserver.Region.Operation, 
org.apache.hadoop.hbase.regionserver.Region.RowLock
+
+
+
+
+
+
+
+
+Field Summary
+
+
+
+
+Fields inherited from 
classorg.apache.hadoop.hbase.regionserver.HRegion
+busyWaitDuration, checkAndMutateChecksFailed, 
checkAndMutateChecksPassed, closed, closing, compactionNumBytesCompacted, 
compactionNumFilesCompacted, compactionsFailed, compactionsFinished, 
compactionsQueued, conf, dataInMemoryWithoutWAL, DEEP_OVERHEAD, 
DEFAULT_BUSY_WAIT_DURATION, DEFAULT_CACHE_FLUSH_INTERVAL, 
DEFAULT_FLUSH_PER_CHANGES, DEFAULT_HBASE_REGIONSERVER_MINIBATCH_SIZE, 
DEFAULT_MAX_CELL_SIZE, DEFAULT_ROW_PROCESSOR_TIMEOUT, 
DEFAULT_ROWLOCK_WAIT_DURATION, filteredReadRequestsCount, FIXED_OVERHEAD, 
flushesQueued, HBASE_MAX_CELL_SIZE_KEY, HBASE_REGIONSERVER_MINIBATCH_SIZE, 
lastReplayedCompactionSeqId, lastReplayedOpenRegionSeqId, 
LOAD_CFS_ON_DEMAND_CONFIG_KEY, lock, MAX_FLUSH_PER_CHANGES, 
maxBusyWaitDuration, maxBusyWaitMultiplier, maxCellSize, maxSeqIdInStores, 
MEMSTORE_FLUSH_PER_CHANGES, MEMSTORE_PERIODIC_FLUSH_INTERVAL, 
memstoreFlushSize, numMutationsWithoutWAL, readRequestsCount, 
rowProcessorExecutor, rowProcessorTimeout, rsServices, stores, 
SYSTEM_CACHE_FLUSH_INTERVAL
 , timestampSlop, writeRequestsCount, writestate
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors
+
+Constructor and Description
+
+

[16/51] [partial] hbase-site git commit: Published site at .

2017-12-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/63d6f712/devapidocs/org/apache/hadoop/hbase/ipc/RpcConnection.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/ipc/RpcConnection.html 
b/devapidocs/org/apache/hadoop/hbase/ipc/RpcConnection.html
index 10fa56f..2675977 100644
--- a/devapidocs/org/apache/hadoop/hbase/ipc/RpcConnection.html
+++ b/devapidocs/org/apache/hadoop/hbase/ipc/RpcConnection.html
@@ -180,7 +180,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 serverPrincipal
 
 
-protected 
org.apache.hadoop.hbase.shaded.io.netty.util.HashedWheelTimer
+protected 
org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer
 timeoutTimer
 
 
@@ -208,8 +208,8 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 protected 
-RpcConnection(org.apache.hadoop.conf.Configurationconf,
- 
org.apache.hadoop.hbase.shaded.io.netty.util.HashedWheelTimertimeoutTimer,
+RpcConnection(org.apache.hadoop.conf.Configurationconf,
+ 
org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimertimeoutTimer,
  ConnectionIdremoteId,
  http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringclusterId,
  booleanisSecurityEnabled,
@@ -407,7 +407,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 timeoutTimer
-protected 
finalorg.apache.hadoop.hbase.shaded.io.netty.util.HashedWheelTimer timeoutTimer
+protected 
finalorg.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer timeoutTimer
 
 
 
@@ -454,14 +454,14 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 Constructor Detail
-
+
 
 
 
 
 RpcConnection
 protectedRpcConnection(org.apache.hadoop.conf.Configurationconf,
-
org.apache.hadoop.hbase.shaded.io.netty.util.HashedWheelTimertimeoutTimer,
+
org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimertimeoutTimer,
 ConnectionIdremoteId,
 http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringclusterId,
 booleanisSecurityEnabled,

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/63d6f712/devapidocs/org/apache/hadoop/hbase/ipc/RpcServer.BlockingServiceAndInterface.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/ipc/RpcServer.BlockingServiceAndInterface.html
 
b/devapidocs/org/apache/hadoop/hbase/ipc/RpcServer.BlockingServiceAndInterface.html
index 7e2fc03..da8021c 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/ipc/RpcServer.BlockingServiceAndInterface.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/ipc/RpcServer.BlockingServiceAndInterface.html
@@ -138,7 +138,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 Field and Description
 
 
-private 
org.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingService
+private 
org.apache.hbase.thirdparty.com.google.protobuf.BlockingService
 service
 
 
@@ -160,7 +160,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 Constructor and Description
 
 
-BlockingServiceAndInterface(org.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingServiceservice,
+BlockingServiceAndInterface(org.apache.hbase.thirdparty.com.google.protobuf.BlockingServiceservice,
http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true;
 title="class or interface in 
java.lang">Class?serviceInterface)
 
 
@@ -179,7 +179,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 Method and Description
 
 
-org.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingService
+org.apache.hbase.thirdparty.com.google.protobuf.BlockingService
 getBlockingService()
 
 
@@ -214,7 +214,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 service
-private 
finalorg.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingService 
service
+private 
finalorg.apache.hbase.thirdparty.com.google.protobuf.BlockingService service
 
 
 
@@ -234,13 +234,13 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 Constructor Detail
-
+
 
 
 
 
 BlockingServiceAndInterface
-publicBlockingServiceAndInterface(org.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingServiceservice,
+publicBlockingServiceAndInterface(org.apache.hbase.thirdparty.com.google.protobuf.BlockingServiceservice,
http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true;
 title="class or interface in 
java.lang">Class?serviceInterface)
 
 
@@ -267,7 +267,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 

[16/51] [partial] hbase-site git commit: Published site at .

2017-12-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d449e87f/devapidocs/org/apache/hadoop/hbase/filter/KeyOnlyFilter.KeyOnlyByteBufferCell.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/filter/KeyOnlyFilter.KeyOnlyByteBufferCell.html
 
b/devapidocs/org/apache/hadoop/hbase/filter/KeyOnlyFilter.KeyOnlyByteBufferCell.html
deleted file mode 100644
index 60e7263..000
--- 
a/devapidocs/org/apache/hadoop/hbase/filter/KeyOnlyFilter.KeyOnlyByteBufferCell.html
+++ /dev/null
@@ -1,896 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd;>
-
-
-
-
-
-KeyOnlyFilter.KeyOnlyByteBufferCell (Apache HBase 3.0.0-SNAPSHOT 
API)
-
-
-
-
-
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10};
-var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
-var altColor = "altColor";
-var rowColor = "rowColor";
-var tableTab = "tableTab";
-var activeTableTab = "activeTableTab";
-
-
-JavaScript is disabled on your browser.
-
-
-
-
-
-Skip navigation links
-
-
-
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
-
-
-
-
-PrevClass
-NextClass
-
-
-Frames
-NoFrames
-
-
-AllClasses
-
-
-
-
-
-
-
-Summary:
-Nested|
-Field|
-Constr|
-Method
-
-
-Detail:
-Field|
-Constr|
-Method
-
-
-
-
-
-
-
-
-org.apache.hadoop.hbase.filter
-Class 
KeyOnlyFilter.KeyOnlyByteBufferCell
-
-
-
-http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
-
-
-org.apache.hadoop.hbase.ByteBufferCell
-
-
-org.apache.hadoop.hbase.filter.KeyOnlyFilter.KeyOnlyByteBufferCell
-
-
-
-
-
-
-
-
-
-All Implemented Interfaces:
-Cell
-
-
-Enclosing class:
-KeyOnlyFilter
-
-
-
-static class KeyOnlyFilter.KeyOnlyByteBufferCell
-extends ByteBufferCell
-
-
-
-
-
-
-
-
-
-
-
-Nested Class Summary
-
-
-
-
-Nested classes/interfaces inherited from 
interfaceorg.apache.hadoop.hbase.Cell
-Cell.DataType
-
-
-
-
-
-
-
-
-Field Summary
-
-Fields
-
-Modifier and Type
-Field and Description
-
-
-private ByteBufferCell
-cell
-
-
-private boolean
-lenAsVal
-
-
-
-
-
-
-
-
-
-Constructor Summary
-
-Constructors
-
-Constructor and Description
-
-
-KeyOnlyByteBufferCell(ByteBufferCellc,
- booleanlenAsVal)
-
-
-
-
-
-
-
-
-
-Method Summary
-
-All MethodsInstance MethodsConcrete Methods
-
-Modifier and Type
-Method and Description
-
-
-byte[]
-getFamilyArray()
-Contiguous bytes composed of legal HDFS filename characters 
which may start at any index in the
- containing array.
-
-
-
-http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBuffer
-getFamilyByteBuffer()
-
-
-byte
-getFamilyLength()
-
-
-int
-getFamilyOffset()
-
-
-int
-getFamilyPosition()
-
-
-byte[]
-getQualifierArray()
-Contiguous raw bytes that may start at any index in the 
containing array.
-
-
-
-http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBuffer
-getQualifierByteBuffer()
-
-
-int
-getQualifierLength()
-
-
-int
-getQualifierOffset()
-
-
-int
-getQualifierPosition()
-
-
-byte[]
-getRowArray()
-Contiguous raw bytes that may start at any index in the 
containing array.
-
-
-
-http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBuffer
-getRowByteBuffer()
-
-
-short
-getRowLength()
-
-
-int
-getRowOffset()
-
-
-int
-getRowPosition()
-
-
-long
-getSequenceId()
-A region-specific unique monotonically increasing sequence 
ID given to each Cell.
-
-
-
-byte[]
-getTagsArray()
-Contiguous raw bytes representing tags that may start at 
any index in the containing array.
-
-
-
-http://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBuffer
-getTagsByteBuffer()
-
-
-int
-getTagsLength()
-HBase internally uses 2 bytes to store tags length in 
Cell.
-
-
-
-int
-getTagsOffset()
-
-
-int
-getTagsPosition()
-
-
-long
-getTimestamp()
-
-
-Cell.DataType
-getType()
-Returns the type of cell in a human readable format using 
Cell.DataType
-
-
-
-byte
-getTypeByte()
-
-
-byte[]
-getValueArray()
-Contiguous raw bytes that may start at any index in the 
containing array.
-

[16/51] [partial] hbase-site git commit: Published site at .

2017-12-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d2b28a1a/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.MetaComparator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.MetaComparator.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.MetaComparator.html
index f8eace7..66b6656 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.MetaComparator.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.MetaComparator.html
@@ -27,2569 +27,2540 @@
 019 */
 020package org.apache.hadoop.hbase;
 021
-022import static 
org.apache.hadoop.hbase.Tag.TAG_LENGTH_SIZE;
-023import static 
org.apache.hadoop.hbase.util.Bytes.len;
-024
-025import java.io.DataInput;
-026import java.io.DataOutput;
-027import java.io.IOException;
-028import java.io.OutputStream;
-029import java.nio.ByteBuffer;
-030import java.util.ArrayList;
-031import java.util.Arrays;
-032import java.util.HashMap;
-033import java.util.Iterator;
-034import java.util.List;
-035import java.util.Map;
-036import java.util.Optional;
-037
-038import 
org.apache.hadoop.hbase.util.ByteBufferUtils;
-039import 
org.apache.hadoop.hbase.util.Bytes;
-040import 
org.apache.hadoop.hbase.util.ClassSize;
-041import 
org.apache.hadoop.io.RawComparator;
-042import 
org.apache.yetus.audience.InterfaceAudience;
-043import org.slf4j.Logger;
-044import org.slf4j.LoggerFactory;
-045
-046import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
-047
-048/**
-049 * An HBase Key/Value. This is the 
fundamental HBase Type.
+022import static 
org.apache.hadoop.hbase.util.Bytes.len;
+023
+024import java.io.DataInput;
+025import java.io.DataOutput;
+026import java.io.IOException;
+027import java.io.OutputStream;
+028import java.nio.ByteBuffer;
+029import java.util.ArrayList;
+030import java.util.Arrays;
+031import java.util.HashMap;
+032import java.util.Iterator;
+033import java.util.List;
+034import java.util.Map;
+035import 
org.apache.hadoop.hbase.util.ByteBufferUtils;
+036import 
org.apache.hadoop.hbase.util.Bytes;
+037import 
org.apache.hadoop.hbase.util.ClassSize;
+038import 
org.apache.hadoop.io.RawComparator;
+039import 
org.apache.yetus.audience.InterfaceAudience;
+040import org.slf4j.Logger;
+041import org.slf4j.LoggerFactory;
+042
+043import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
+044
+045/**
+046 * An HBase Key/Value. This is the 
fundamental HBase Type.
+047 * p
+048 * HBase applications and users should 
use the Cell interface and avoid directly using KeyValue and
+049 * member functions not defined in 
Cell.
 050 * p
-051 * HBase applications and users should 
use the Cell interface and avoid directly using KeyValue and
-052 * member functions not defined in 
Cell.
-053 * p
-054 * If being used client-side, the primary 
methods to access individual fields are
-055 * {@link #getRowArray()}, {@link 
#getFamilyArray()}, {@link #getQualifierArray()},
-056 * {@link #getTimestamp()}, and {@link 
#getValueArray()}. These methods allocate new byte arrays
-057 * and return copies. Avoid their use 
server-side.
-058 * p
-059 * Instances of this class are immutable. 
They do not implement Comparable but Comparators are
-060 * provided. Comparators change with 
context, whether user table or a catalog table comparison. Its
-061 * critical you use the appropriate 
comparator. There are Comparators for normal HFiles, Meta's
-062 * Hfiles, and bloom filter keys.
-063 * p
-064 * KeyValue wraps a byte array and takes 
offsets and lengths into passed array at where to start
-065 * interpreting the content as KeyValue. 
The KeyValue format inside a byte array is:
-066 * codelt;keylengthgt; 
lt;valuelengthgt; lt;keygt; 
lt;valuegt;/code Key is further
-067 * decomposed as: 
codelt;rowlengthgt; lt;rowgt; 
lt;columnfamilylengthgt;
-068 * lt;columnfamilygt; 
lt;columnqualifiergt;
-069 * lt;timestampgt; 
lt;keytypegt;/code The coderowlength/code 
maximum is
-070 * 
codeShort.MAX_SIZE/code, column family length maximum is 
codeByte.MAX_SIZE/code, and
-071 * column qualifier + key length must be 
lt; codeInteger.MAX_SIZE/code. The column does not
-072 * contain the family/qualifier 
delimiter, {@link #COLUMN_FAMILY_DELIMITER}br
-073 * KeyValue can optionally contain Tags. 
When it contains tags, it is added in the byte array after
-074 * the value part. The format for this 
part is: 
codelt;tagslengthgt;lt;tagsbytesgt;/code.
-075 * codetagslength/code 
maximum is codeShort.MAX_SIZE/code. The 
codetagsbytes/code
-076 * contain one or more tags where as each 
tag is of the form
-077 * 
codelt;taglengthgt;lt;tagtypegt;lt;tagbytesgt;/code.
 codetagtype/code is one byte
-078 * and codetaglength/code 
maximum is codeShort.MAX_SIZE/code and it includes 1 byte 
type
-079 * length and actual tag bytes length.
-080 */
-081@InterfaceAudience.Private
-082public class KeyValue implements 
ExtendedCell {
-083  

[16/51] [partial] hbase-site git commit: Published site at .

2017-12-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/devapidocs/src-html/org/apache/hadoop/hbase/KeyValueTestUtil.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/KeyValueTestUtil.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/KeyValueTestUtil.html
index 381bbfd..01af885 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/KeyValueTestUtil.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/KeyValueTestUtil.html
@@ -30,170 +30,169 @@
 022import java.util.Collection;
 023import java.util.List;
 024
-025import 
org.apache.hadoop.hbase.util.Bytes;
-026import 
org.apache.hadoop.hbase.util.IterableUtils;
+025import 
org.apache.commons.collections4.IterableUtils;
+026import 
org.apache.hadoop.hbase.util.Bytes;
 027import 
org.apache.hadoop.hbase.util.Strings;
 028import 
org.apache.yetus.audience.InterfaceAudience;
-029
-030import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
-031
-032@InterfaceAudience.Private
-033public class KeyValueTestUtil {
-034
-035  public static KeyValue create(
-036  String row,
-037  String family,
-038  String qualifier,
-039  long timestamp,
-040  String value)
-041  {
-042return create(row, family, qualifier, 
timestamp, KeyValue.Type.Put, value);
-043  }
-044
-045  public static KeyValue create(
-046  String row,
-047  String family,
-048  String qualifier,
-049  long timestamp,
-050  KeyValue.Type type,
-051  String value)
-052  {
-053  return new KeyValue(
-054  Bytes.toBytes(row),
-055  Bytes.toBytes(family),
-056  Bytes.toBytes(qualifier),
-057  timestamp,
-058  type,
-059  Bytes.toBytes(value)
-060  );
-061  }
-062
-063  public static ByteBuffer 
toByteBufferAndRewind(final Iterable? extends KeyValue kvs,
-064  boolean includeMemstoreTS) {
-065int totalBytes = 
KeyValueUtil.totalLengthWithMvccVersion(kvs, includeMemstoreTS);
-066ByteBuffer bb = 
ByteBuffer.allocate(totalBytes);
-067for (KeyValue kv : 
IterableUtils.nullSafe(kvs)) {
-068  KeyValueUtil.appendToByteBuffer(bb, 
kv, includeMemstoreTS);
-069}
-070bb.rewind();
-071return bb;
-072  }
-073
-074  /**
-075   * Checks whether KeyValues from 
kvCollection2 are contained in kvCollection1.
-076   *
-077   * The comparison is made without 
distinguishing MVCC version of the KeyValues
-078   *
-079   * @param kvCollection1
-080   * @param kvCollection2
-081   * @return true if KeyValues from 
kvCollection2 are contained in kvCollection1
-082   */
-083  public static boolean 
containsIgnoreMvccVersion(Collection? extends Cell kvCollection1,
-084  Collection? extends Cell 
kvCollection2) {
-085for (Cell kv1 : kvCollection1) {
-086  boolean found = false;
-087  for (Cell kv2 : kvCollection2) {
-088if 
(PrivateCellUtil.equalsIgnoreMvccVersion(kv1, kv2)) found = true;
-089  }
-090  if (!found) return false;
-091}
-092return true;
-093  }
-094
-095  public static ListKeyValue 
rewindThenToList(final ByteBuffer bb,
-096  final boolean includesMemstoreTS, 
final boolean useTags) {
-097bb.rewind();
-098ListKeyValue kvs = 
Lists.newArrayList();
-099KeyValue kv = null;
-100while (true) {
-101  kv = 
KeyValueUtil.nextShallowCopy(bb, includesMemstoreTS, useTags);
-102  if (kv == null) {
-103break;
-104  }
-105  kvs.add(kv);
-106}
-107return kvs;
-108  }
+029import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
+030
+031@InterfaceAudience.Private
+032public class KeyValueTestUtil {
+033
+034  public static KeyValue create(
+035  String row,
+036  String family,
+037  String qualifier,
+038  long timestamp,
+039  String value)
+040  {
+041return create(row, family, qualifier, 
timestamp, KeyValue.Type.Put, value);
+042  }
+043
+044  public static KeyValue create(
+045  String row,
+046  String family,
+047  String qualifier,
+048  long timestamp,
+049  KeyValue.Type type,
+050  String value)
+051  {
+052  return new KeyValue(
+053  Bytes.toBytes(row),
+054  Bytes.toBytes(family),
+055  Bytes.toBytes(qualifier),
+056  timestamp,
+057  type,
+058  Bytes.toBytes(value)
+059  );
+060  }
+061
+062  public static ByteBuffer 
toByteBufferAndRewind(final Iterable? extends KeyValue kvs,
+063  boolean includeMemstoreTS) {
+064int totalBytes = 
KeyValueUtil.totalLengthWithMvccVersion(kvs, includeMemstoreTS);
+065ByteBuffer bb = 
ByteBuffer.allocate(totalBytes);
+066for (KeyValue kv : 
IterableUtils.emptyIfNull(kvs)) {
+067  KeyValueUtil.appendToByteBuffer(bb, 
kv, includeMemstoreTS);
+068}
+069bb.rewind();
+070return bb;
+071  }
+072
+073  /**
+074   * Checks whether KeyValues from 
kvCollection2 are contained in kvCollection1.
+075   *
+076   * The 

[16/51] [partial] hbase-site git commit: Published site at .

2017-12-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7c0589c0/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ModifyTableFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ModifyTableFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ModifyTableFuture.html
index 6fecbc9..2accda0 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ModifyTableFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ModifyTableFuture.html
@@ -34,4140 +34,4141 @@
 026import 
java.nio.charset.StandardCharsets;
 027import java.util.ArrayList;
 028import java.util.Arrays;
-029import java.util.Collection;
-030import java.util.EnumSet;
-031import java.util.HashMap;
-032import java.util.Iterator;
-033import java.util.LinkedList;
-034import java.util.List;
-035import java.util.Map;
-036import java.util.Set;
-037import java.util.concurrent.Callable;
-038import 
java.util.concurrent.ExecutionException;
-039import java.util.concurrent.Future;
-040import java.util.concurrent.TimeUnit;
-041import 
java.util.concurrent.TimeoutException;
-042import 
java.util.concurrent.atomic.AtomicInteger;
-043import 
java.util.concurrent.atomic.AtomicReference;
-044import java.util.regex.Pattern;
-045import java.util.stream.Collectors;
-046import java.util.stream.Stream;
-047import 
org.apache.hadoop.conf.Configuration;
-048import 
org.apache.hadoop.hbase.Abortable;
-049import 
org.apache.hadoop.hbase.CacheEvictionStats;
-050import 
org.apache.hadoop.hbase.CacheEvictionStatsBuilder;
-051import 
org.apache.hadoop.hbase.ClusterMetrics.Option;
-052import 
org.apache.hadoop.hbase.ClusterStatus;
-053import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-054import 
org.apache.hadoop.hbase.HBaseConfiguration;
-055import 
org.apache.hadoop.hbase.HConstants;
-056import 
org.apache.hadoop.hbase.HRegionInfo;
-057import 
org.apache.hadoop.hbase.HRegionLocation;
-058import 
org.apache.hadoop.hbase.HTableDescriptor;
-059import 
org.apache.hadoop.hbase.MasterNotRunningException;
-060import 
org.apache.hadoop.hbase.MetaTableAccessor;
-061import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-062import 
org.apache.hadoop.hbase.NamespaceNotFoundException;
-063import 
org.apache.hadoop.hbase.NotServingRegionException;
-064import 
org.apache.hadoop.hbase.RegionLoad;
-065import 
org.apache.hadoop.hbase.RegionLocations;
-066import 
org.apache.hadoop.hbase.ServerName;
-067import 
org.apache.hadoop.hbase.TableExistsException;
-068import 
org.apache.hadoop.hbase.TableName;
-069import 
org.apache.hadoop.hbase.TableNotDisabledException;
-070import 
org.apache.hadoop.hbase.TableNotFoundException;
-071import 
org.apache.hadoop.hbase.UnknownRegionException;
-072import 
org.apache.hadoop.hbase.ZooKeeperConnectionException;
-073import 
org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil;
-074import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-075import 
org.apache.hadoop.hbase.client.security.SecurityCapability;
-076import 
org.apache.hadoop.hbase.exceptions.TimeoutIOException;
-077import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
-078import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
-079import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-080import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-081import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-082import 
org.apache.hadoop.hbase.quotas.QuotaRetriever;
-083import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-084import 
org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
-085import 
org.apache.hadoop.hbase.replication.ReplicationException;
-086import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-087import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-088import 
org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
-089import 
org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
-090import 
org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
-091import 
org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
-092import 
org.apache.hadoop.hbase.snapshot.UnknownSnapshotException;
-093import 
org.apache.hadoop.hbase.util.Addressing;
-094import 
org.apache.hadoop.hbase.util.Bytes;
-095import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-096import 
org.apache.hadoop.hbase.util.ForeignExceptionUtil;
-097import 
org.apache.hadoop.hbase.util.Pair;
-098import 
org.apache.hadoop.ipc.RemoteException;
-099import 
org.apache.hadoop.util.StringUtils;
-100import 
org.apache.yetus.audience.InterfaceAudience;
-101import 
org.apache.yetus.audience.InterfaceStability;
-102import org.slf4j.Logger;
-103import org.slf4j.LoggerFactory;
-104
-105import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
-106import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
-107import 

[16/51] [partial] hbase-site git commit: Published site at .

2017-12-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c4b2cc17/bulk-loads.html
--
diff --git a/bulk-loads.html b/bulk-loads.html
index 044bffd..22930ed 100644
--- a/bulk-loads.html
+++ b/bulk-loads.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase   
   Bulk Loads in Apache HBase (TM)
@@ -311,7 +311,7 @@ under the License. -->
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-12-21
+  Last Published: 
2017-12-22
 
 
 



[16/51] [partial] hbase-site git commit: Published site at .

2017-12-21 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/505bbb2e/dependency-info.html
--
diff --git a/dependency-info.html b/dependency-info.html
index 209ea2d..11c5b71 100644
--- a/dependency-info.html
+++ b/dependency-info.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Dependency Information
 
@@ -318,7 +318,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-12-20
+  Last Published: 
2017-12-21
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/505bbb2e/dependency-management.html
--
diff --git a/dependency-management.html b/dependency-management.html
index e88f31c..6aadd88 100644
--- a/dependency-management.html
+++ b/dependency-management.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Project Dependency Management
 
@@ -335,501 +335,501 @@
 jar
 http://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
 
-commons-logging
-http://commons.apache.org/proper/commons-logging/;>commons-logging
-1.2
-jar
-http://www.apache.org/licenses/LICENSE-2.0.txt;>The Apache Software 
License, Version 2.0
-
 io.dropwizard.metrics
 http://metrics.codahale.com/metrics-core/;>metrics-core
 3.2.1
 jar
 http://www.apache.org/licenses/LICENSE-2.0.html;>Apache License 
2.0
-
+
 javax.servlet
 http://servlet-spec.java.net;>javax.servlet-api
 3.1.0
 jar
 https://glassfish.dev.java.net/nonav/public/CDDL+GPL.html;>CDDL + GPLv2 
with classpath exception
-
+
 javax.ws.rs
 http://jax-rs-spec.java.net;>javax.ws.rs-api
 2.0.1
 jar
 http://glassfish.java.net/public/CDDL+GPL_1_1.html;>CDDL 1.1, http://glassfish.java.net/public/CDDL+GPL_1_1.html;>GPL2 w/ 
CPE
-
+
 javax.xml.bind
 http://jaxb.java.net/;>jaxb-api
 2.2.12
 jar
 https://glassfish.java.net/public/CDDL+GPL_1_1.html;>CDDL 1.1, https://glassfish.java.net/public/CDDL+GPL_1_1.html;>GPL2 w/ 
CPE
-
+
 junit
 http://junit.org;>junit
 4.12
 jar
 http://www.eclipse.org/legal/epl-v10.html;>Eclipse Public License 
1.0
-
+
 log4j
 http://logging.apache.org/log4j/1.2/;>log4j
 1.2.17
 jar
 http://www.apache.org/licenses/LICENSE-2.0.txt;>The Apache Software 
License, Version 2.0
-
+
 net.spy
 http://www.couchbase.org/code/couchbase/java;>spymemcached
 2.12.2
 jar
 http://www.apache.org/licenses/LICENSE-2.0.txt;>The Apache Software 
License, Version 2.0
-
+
 org.apache.avro
 http://avro.apache.org;>avro
 1.7.7
 jar
 http://www.apache.org/licenses/LICENSE-2.0.txt;>The Apache Software 
License, Version 2.0
-
+
 org.apache.commons
 http://commons.apache.org/proper/commons-collections/;>commons-collections4
 4.1
 jar
 http://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.commons
 http://commons.apache.org/proper/commons-crypto/;>commons-crypto
 1.0.0
 jar
 http://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.commons
 http://commons.apache.org/proper/commons-lang/;>commons-lang3
 3.6
 jar
 https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.commons
 http://commons.apache.org/proper/commons-math/;>commons-math3
 3.6.1
 jar
 http://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.curator
 http://curator.apache.org/curator-client;>curator-client
 4.0.0
 jar
 http://www.apache.org/licenses/LICENSE-2.0.txt;>The Apache Software 
License, Version 2.0
-
+
 org.apache.curator
 http://curator.apache.org/curator-framework;>curator-framework
 4.0.0
 jar
 http://www.apache.org/licenses/LICENSE-2.0.txt;>The Apache Software 
License, Version 2.0
-
+
 org.apache.curator
 http://curator.apache.org/curator-recipes;>curator-recipes
 4.0.0
 jar
 http://www.apache.org/licenses/LICENSE-2.0.txt;>The Apache Software 
License, Version 2.0
-
+
 org.apache.hadoop
 hadoop-auth
 2.7.4
 jar
 http://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hadoop
 hadoop-client
 2.7.4
 jar
 http://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hadoop
 hadoop-common
 2.7.4
 jar
 http://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hadoop
 hadoop-hdfs
 2.7.4
 jar
 http://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hadoop
 hadoop-mapreduce-client-core
 2.7.4
 jar
 http://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hadoop
 hadoop-mapreduce-client-jobclient
 2.7.4
 jar
 http://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hadoop
 hadoop-minicluster
 2.7.4
 jar
 http://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 

[16/51] [partial] hbase-site git commit: Published site at .

2017-12-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c4c0cfa5/apidocs/src-html/org/apache/hadoop/hbase/filter/WhileMatchFilter.html
--
diff --git 
a/apidocs/src-html/org/apache/hadoop/hbase/filter/WhileMatchFilter.html 
b/apidocs/src-html/org/apache/hadoop/hbase/filter/WhileMatchFilter.html
index 78f344c..ffe8840 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/filter/WhileMatchFilter.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/filter/WhileMatchFilter.html
@@ -57,117 +57,121 @@
 049return filter;
 050  }
 051
-052  public void reset() throws IOException 
{
-053this.filter.reset();
-054  }
-055
-056  private void changeFAR(boolean value) 
{
-057filterAllRemaining = 
filterAllRemaining || value;
-058  }
-059
-060  @Override
-061  public boolean filterAllRemaining() 
throws IOException {
-062return this.filterAllRemaining || 
this.filter.filterAllRemaining();
-063  }
-064
-065  @Override
-066  public boolean filterRowKey(byte[] 
buffer, int offset, int length) throws IOException {
-067boolean value = 
filter.filterRowKey(buffer, offset, length);
-068changeFAR(value);
-069return value;
-070  }
-071
-072  @Override
-073  public boolean filterRowKey(Cell cell) 
throws IOException {
-074if (filterAllRemaining()) return 
true;
-075boolean value = 
filter.filterRowKey(cell);
-076changeFAR(value);
-077return value;
-078  }
-079
-080  @Deprecated
-081  @Override
-082  public ReturnCode filterKeyValue(final 
Cell c) throws IOException {
-083return filterCell(c);
-084  }
-085
-086  @Override
-087  public ReturnCode filterCell(final Cell 
c) throws IOException {
-088ReturnCode code = 
filter.filterCell(c);
-089changeFAR(code != 
ReturnCode.INCLUDE);
-090return code;
-091  }
-092
-093  @Override
-094  public Cell transformCell(Cell v) 
throws IOException {
-095return filter.transformCell(v);
-096  }
-097
-098  @Override
-099  public boolean filterRow() throws 
IOException {
-100boolean filterRow = 
this.filter.filterRow();
-101changeFAR(filterRow);
-102return filterRow;
-103  }
-104  
-105  @Override
-106  public boolean hasFilterRow() {
-107return true;
-108  }
-109
-110  /**
-111   * @return The filter serialized using 
pb
-112   */
-113  public byte[] toByteArray() throws 
IOException {
-114FilterProtos.WhileMatchFilter.Builder 
builder =
-115  
FilterProtos.WhileMatchFilter.newBuilder();
-116
builder.setFilter(ProtobufUtil.toFilter(this.filter));
-117return 
builder.build().toByteArray();
-118  }
-119
-120  /**
-121   * @param pbBytes A pb serialized 
{@link WhileMatchFilter} instance
-122   * @return An instance of {@link 
WhileMatchFilter} made from codebytes/code
-123   * @throws 
org.apache.hadoop.hbase.exceptions.DeserializationException
-124   * @see #toByteArray
-125   */
-126  public static WhileMatchFilter 
parseFrom(final byte [] pbBytes)
-127  throws DeserializationException {
-128FilterProtos.WhileMatchFilter 
proto;
-129try {
-130  proto = 
FilterProtos.WhileMatchFilter.parseFrom(pbBytes);
-131} catch 
(InvalidProtocolBufferException e) {
-132  throw new 
DeserializationException(e);
-133}
-134try {
-135  return new 
WhileMatchFilter(ProtobufUtil.toFilter(proto.getFilter()));
-136} catch (IOException ioe) {
-137  throw new 
DeserializationException(ioe);
-138}
-139  }
-140
-141  /**
-142   * @param o the other filter to compare 
with
-143   * @return true if and only if the 
fields of the filter that are serialized
-144   * are equal to the corresponding 
fields in other.  Used for testing.
-145   */
-146  boolean areSerializedFieldsEqual(Filter 
o) {
-147if (o == this) return true;
-148if (!(o instanceof WhileMatchFilter)) 
return false;
-149
-150WhileMatchFilter other = 
(WhileMatchFilter)o;
-151return 
getFilter().areSerializedFieldsEqual(other.getFilter());
-152  }
-153
-154  public boolean isFamilyEssential(byte[] 
name) throws IOException {
-155return 
filter.isFamilyEssential(name);
-156  }
-157
-158  @Override
-159  public String toString() {
-160return 
this.getClass().getSimpleName() + " " + this.filter.toString();
-161  }
-162}
+052  @Override
+053  public void reset() throws IOException 
{
+054this.filter.reset();
+055  }
+056
+057  private void changeFAR(boolean value) 
{
+058filterAllRemaining = 
filterAllRemaining || value;
+059  }
+060
+061  @Override
+062  public boolean filterAllRemaining() 
throws IOException {
+063return this.filterAllRemaining || 
this.filter.filterAllRemaining();
+064  }
+065
+066  @Override
+067  public boolean filterRowKey(byte[] 
buffer, int offset, int length) throws IOException {
+068boolean value = 
filter.filterRowKey(buffer, offset, length);
+069changeFAR(value);
+070return value;
+071  }
+072
+073  @Override
+074  public boolean filterRowKey(Cell cell) 
throws IOException {
+075if 

[16/51] [partial] hbase-site git commit: Published site at .

2017-12-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/071f974b/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/Version.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
index da55c9a..43d76a6 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/Version.html
@@ -16,11 +16,11 @@
 008@InterfaceAudience.Private
 009public class Version {
 010  public static final String version = 
"3.0.0-SNAPSHOT";
-011  public static final String revision = 
"d5aefbd2c78463bf4b3815f38f43dd745035cc93";
+011  public static final String revision = 
"11e82de8a1d782be85a776ec08e8cd6a071185bf";
 012  public static final String user = 
"jenkins";
-013  public static final String date = "Thu 
Dec 14 14:42:24 UTC 2017";
+013  public static final String date = "Fri 
Dec 15 14:41:48 UTC 2017";
 014  public static final String url = 
"git://asf920.gq1.ygridcore.net/home/jenkins/jenkins-slave/workspace/hbase_generate_website/hbase";
-015  public static final String srcChecksum 
= "5a15e2a8f50f36fa000ed17bdec4fdf8";
+015  public static final String srcChecksum 
= "d9ae3041bfc94f76119bc777802f7080";
 016}
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/071f974b/devapidocs/src-html/org/apache/hadoop/hbase/client/Put.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/Put.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/Put.html
index 75a155d..460d5a7 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/client/Put.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/Put.html
@@ -40,350 +40,325 @@
 032import 
org.apache.hadoop.hbase.HConstants;
 033import 
org.apache.hadoop.hbase.IndividualBytesFieldCell;
 034import 
org.apache.hadoop.hbase.KeyValue;
-035import org.apache.hadoop.hbase.Tag;
-036import 
org.apache.hadoop.hbase.io.HeapSize;
-037import 
org.apache.hadoop.hbase.security.access.Permission;
-038import 
org.apache.hadoop.hbase.security.visibility.CellVisibility;
-039import 
org.apache.hadoop.hbase.util.Bytes;
-040import 
org.apache.yetus.audience.InterfaceAudience;
-041
-042/**
-043 * Used to perform Put operations for a 
single row.
-044 * p
-045 * To perform a Put, instantiate a Put 
object with the row to insert to, and
-046 * for each column to be inserted, 
execute {@link #addColumn(byte[], byte[],
-047 * byte[]) add} or {@link 
#addColumn(byte[], byte[], long, byte[]) add} if
-048 * setting the timestamp.
-049 */
-050@InterfaceAudience.Public
-051public class Put extends Mutation 
implements HeapSize, ComparableRow {
-052  /**
-053   * Create a Put operation for the 
specified row.
-054   * @param row row key
-055   */
-056  public Put(byte [] row) {
-057this(row, 
HConstants.LATEST_TIMESTAMP);
-058  }
-059
-060  /**
-061   * Create a Put operation for the 
specified row, using a given timestamp.
-062   *
-063   * @param row row key; we make a copy 
of what we are passed to keep local.
-064   * @param ts timestamp
-065   */
-066  public Put(byte[] row, long ts) {
-067this(row, 0, row.length, ts);
-068  }
-069
-070  /**
-071   * We make a copy of the passed in row 
key to keep local.
-072   * @param rowArray
-073   * @param rowOffset
-074   * @param rowLength
-075   */
-076  public Put(byte [] rowArray, int 
rowOffset, int rowLength) {
-077this(rowArray, rowOffset, rowLength, 
HConstants.LATEST_TIMESTAMP);
-078  }
-079
-080  /**
-081   * @param row row key; we make a copy 
of what we are passed to keep local.
-082   * @param ts  timestamp
-083   */
-084  public Put(ByteBuffer row, long ts) {
-085if (ts  0) {
-086  throw new 
IllegalArgumentException("Timestamp cannot be negative. ts=" + ts);
-087}
-088checkRow(row);
-089this.row = new 
byte[row.remaining()];
-090row.get(this.row);
-091this.ts = ts;
-092  }
-093
-094  /**
-095   * @param row row key; we make a copy 
of what we are passed to keep local.
-096   */
-097  public Put(ByteBuffer row) {
-098this(row, 
HConstants.LATEST_TIMESTAMP);
-099  }
-100
-101  /**
-102   * We make a copy of the passed in row 
key to keep local.
-103   * @param rowArray
-104   * @param rowOffset
-105   * @param rowLength
-106   * @param ts
-107   */
-108  public Put(byte [] rowArray, int 
rowOffset, int rowLength, long ts) {
-109checkRow(rowArray, rowOffset, 
rowLength);
-110this.row = Bytes.copy(rowArray, 
rowOffset, rowLength);
-111this.ts = ts;
-112if (ts  0) {
-113  throw new 
IllegalArgumentException("Timestamp cannot be negative. ts=" + ts);
-114}
-115  }
-116
-117  /**
-118   * Create a Put operation for an 
immutable row key.
-119   *
-120   * @param row row key
-121   * @param rowIsImmutable whether the 
input row is immutable.
-122   *   Set to true if 
the caller 

[16/51] [partial] hbase-site git commit: Published site at .

2017-12-14 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dc4e5c85/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.SplitTableRegionFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.SplitTableRegionFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.SplitTableRegionFuture.html
index 7c59e27..c904c56 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.SplitTableRegionFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.SplitTableRegionFuture.html
@@ -119,4048 +119,4054 @@
 111import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
 112import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest;
 113import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest;
-114import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-115import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-116import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
-117import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
-118import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest;
-119import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse;
-120import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest;
-121import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
-122import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
-123import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceRequest;
-124import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceResponse;
-125import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
-126import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-127import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-128import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-129import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos;
-130import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-131import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-132import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-133import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-134import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-135import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersRequest;
-136import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-137import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
-138import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest;
-139import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse;
-140import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
-141import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
-142import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
-143import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse;
-144import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
-145import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableRequest;
-146import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableResponse;
-147import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest;
-148import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse;
-149import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
-150import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse;
-151import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest;
-152import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse;
-153import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest;
-154import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
-155import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetLocksRequest;

[16/51] [partial] hbase-site git commit: Published site at .

2017-12-13 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4abd958d/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.html
index 1f2ab40..4da06d3 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.html
@@ -3142,7 +3142,7 @@ implements 
 
 FIXED_OVERHEAD
-public static finallong FIXED_OVERHEAD
+public static finallong FIXED_OVERHEAD
 
 
 
@@ -3151,7 +3151,7 @@ implements 
 
 DEEP_OVERHEAD
-public static finallong DEEP_OVERHEAD
+public static finallong DEEP_OVERHEAD
 
 
 
@@ -3160,7 +3160,7 @@ implements 
 
 MOCKED_LIST
-private static finalhttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListCell MOCKED_LIST
+private static finalhttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListCell MOCKED_LIST
 A mocked list implementation - discards all updates.
 
 
@@ -6940,7 +6940,7 @@ public staticorg.apache.hadoop.fs.Path
 
 reckonDelta
-private staticCellreckonDelta(Celldelta,
+private staticCellreckonDelta(Celldelta,
 CellcurrentCell,
 byte[]columnFamily,
 longnow,
@@ -6959,7 +6959,7 @@ public staticorg.apache.hadoop.fs.Path
 
 getLongValue
-private staticlonggetLongValue(Cellcell)
+private staticlonggetLongValue(Cellcell)
   throws DoNotRetryIOException
 
 Returns:
@@ -6975,7 +6975,7 @@ public staticorg.apache.hadoop.fs.Path
 
 get
-privatehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListCellget(Mutationmutation,
+privatehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListCellget(Mutationmutation,
HStorestore,
http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListCellcoordinates,
IsolationLevelisolation,
@@ -7000,7 +7000,7 @@ public staticorg.apache.hadoop.fs.Path
 
 sort
-private statichttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListCellsort(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListCellcells,
+private statichttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListCellsort(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListCellcells,
CellComparatorcomparator)
 
 Returns:
@@ -7014,7 +7014,7 @@ public staticorg.apache.hadoop.fs.Path
 
 checkFamily
-voidcheckFamily(byte[]family)
+voidcheckFamily(byte[]family)
   throws NoSuchColumnFamilyException
 
 Throws:
@@ -7028,7 +7028,7 @@ public staticorg.apache.hadoop.fs.Path
 
 heapSize
-publiclongheapSize()
+publiclongheapSize()
 
 Specified by:
 heapSizein
 interfaceHeapSize
@@ -7044,7 +7044,7 @@ public staticorg.apache.hadoop.fs.Path
 
 registerService
-publicbooleanregisterService(com.google.protobuf.Serviceinstance)
+publicbooleanregisterService(com.google.protobuf.Serviceinstance)
 Registers a new protocol buffer Service 
subclass as a coprocessor endpoint to
  be available for handling 
Region#execService(com.google.protobuf.RpcController,
 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceCall) 
calls.
@@ -7070,7 +7070,7 @@ public staticorg.apache.hadoop.fs.Path
 
 execService
-publiccom.google.protobuf.MessageexecService(com.google.protobuf.RpcControllercontroller,
+publiccom.google.protobuf.MessageexecService(com.google.protobuf.RpcControllercontroller,

org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceCallcall)
 throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Executes a single protocol buffer coprocessor endpoint 
Service method using
@@ -7098,7 +7098,7 @@ public staticorg.apache.hadoop.fs.Path
 
 shouldForceSplit
-booleanshouldForceSplit()
+booleanshouldForceSplit()
 
 
 
@@ -7107,7 +7107,7 @@ public staticorg.apache.hadoop.fs.Path
 
 getExplicitSplitPoint
-byte[]getExplicitSplitPoint()
+byte[]getExplicitSplitPoint()
 
 
 
@@ -7116,7 +7116,7 @@ public staticorg.apache.hadoop.fs.Path
 
 forceSplit
-voidforceSplit(byte[]sp)
+voidforceSplit(byte[]sp)
 
 
 
@@ 

[16/51] [partial] hbase-site git commit: Published site at .

2017-12-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e23b49ba/devapidocs/org/apache/hadoop/hbase/class-use/SettableSequenceId.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/class-use/SettableSequenceId.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/SettableSequenceId.html
deleted file mode 100644
index b79b3f2..000
--- a/devapidocs/org/apache/hadoop/hbase/class-use/SettableSequenceId.html
+++ /dev/null
@@ -1,381 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd;>
-
-
-
-
-
-Uses of Interface org.apache.hadoop.hbase.SettableSequenceId (Apache 
HBase 3.0.0-SNAPSHOT API)
-
-
-
-
-
-
-
-JavaScript is disabled on your browser.
-
-
-
-
-
-Skip navigation links
-
-
-
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
-
-
-
-
-Prev
-Next
-
-
-Frames
-NoFrames
-
-
-AllClasses
-
-
-
-
-
-
-
-
-
-
-Uses of 
Interfaceorg.apache.hadoop.hbase.SettableSequenceId
-
-
-
-
-
-Packages that use SettableSequenceId
-
-Package
-Description
-
-
-
-org.apache.hadoop.hbase
-
-
-
-org.apache.hadoop.hbase.io.encoding
-
-
-
-org.apache.hadoop.hbase.regionserver
-
-
-
-org.apache.hadoop.hbase.util
-
-
-
-
-
-
-
-
-
-
-Uses of SettableSequenceId in org.apache.hadoop.hbase
-
-Subinterfaces of SettableSequenceId in org.apache.hadoop.hbase
-
-Modifier and Type
-Interface and Description
-
-
-
-interface
-ExtendedCell
-Extension to Cell with server side required 
functions.
-
-
-
-
-
-Classes in org.apache.hadoop.hbase
 that implement SettableSequenceId
-
-Modifier and Type
-Class and Description
-
-
-
-class
-ByteBufferKeyValue
-This Cell is an implementation of ByteBufferCell where the data 
resides in
- off heap/ on heap ByteBuffer
-
-
-
-class
-IndividualBytesFieldCell
-
-
-class
-KeyValue
-An HBase Key/Value.
-
-
-
-static class
-KeyValue.KeyOnlyKeyValue
-A simple form of KeyValue that creates a keyvalue with only 
the key part of the byte[]
- Mainly used in places where we need to compare two cells.
-
-
-
-class
-NoTagsByteBufferKeyValue
-An extension of the ByteBufferKeyValue where the tags 
length is always 0
-
-
-
-class
-NoTagsKeyValue
-An extension of the KeyValue where the tags length is 
always 0
-
-
-
-private static class
-PrivateCellUtil.EmptyByteBufferCell
-These cells are used in reseeks/seeks to improve the read 
performance.
-
-
-
-private static class
-PrivateCellUtil.EmptyCell
-These cells are used in reseeks/seeks to improve the read 
performance.
-
-
-
-private static class
-PrivateCellUtil.FirstOnRowByteBufferCell
-
-
-private static class
-PrivateCellUtil.FirstOnRowCell
-
-
-private static class
-PrivateCellUtil.FirstOnRowColByteBufferCell
-
-
-private static class
-PrivateCellUtil.FirstOnRowColCell
-
-
-private static class
-PrivateCellUtil.FirstOnRowColTSByteBufferCell
-
-
-private static class
-PrivateCellUtil.FirstOnRowColTSCell
-
-
-private static class
-PrivateCellUtil.FirstOnRowDeleteFamilyCell
-
-
-private static class
-PrivateCellUtil.LastOnRowByteBufferCell
-
-
-private static class
-PrivateCellUtil.LastOnRowCell
-
-
-private static class
-PrivateCellUtil.LastOnRowColByteBufferCell
-
-
-private static class
-PrivateCellUtil.LastOnRowColCell
-
-
-(package private) static class
-PrivateCellUtil.TagRewriteByteBufferCell
-
-
-(package private) static class
-PrivateCellUtil.TagRewriteCell
-This can be used when a Cell has to change with 
addition/removal of one or more tags.
-
-
-
-(package private) static class
-PrivateCellUtil.ValueAndTagRewriteByteBufferCell
-
-
-(package private) static class
-PrivateCellUtil.ValueAndTagRewriteCell
-
-
-class
-SizeCachedKeyValue
-This class is an extension to KeyValue where rowLen and 
keyLen are cached.
-
-
-
-class
-SizeCachedNoTagsKeyValue
-This class is an extension to ContentSizeCachedKeyValue 
where there are no tags in Cell.
-
-
-
-
-
-
-
-
-Uses of SettableSequenceId in org.apache.hadoop.hbase.io.encoding
-
-Classes in org.apache.hadoop.hbase.io.encoding
 that implement SettableSequenceId
-
-Modifier and Type
-Class and Description
-
-
-
-protected static class
-BufferedDataBlockEncoder.OffheapDecodedCell
-
-
-protected static class
-BufferedDataBlockEncoder.OnheapDecodedCell
-Copies only the key part of the keybuffer by doing a deep 
copy and passes the
- seeker state members for taking a clone.
-
-
-
-
-
-
-
-
-Uses of SettableSequenceId in org.apache.hadoop.hbase.regionserver
-
-Classes in org.apache.hadoop.hbase.regionserver
 that implement SettableSequenceId
-
-Modifier and Type
-Class and 

[16/51] [partial] hbase-site git commit: Published site at .

2017-12-06 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d171b896/hbase-annotations/dependency-info.html
--
diff --git a/hbase-annotations/dependency-info.html 
b/hbase-annotations/dependency-info.html
index cd557dd..73b04b8 100644
--- a/hbase-annotations/dependency-info.html
+++ b/hbase-annotations/dependency-info.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Annotations  Dependency Information
 
@@ -147,7 +147,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-12-05
+  Last Published: 
2017-12-06
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d171b896/hbase-annotations/dependency-management.html
--
diff --git a/hbase-annotations/dependency-management.html 
b/hbase-annotations/dependency-management.html
index 2ce3384..2ac5ed6 100644
--- a/hbase-annotations/dependency-management.html
+++ b/hbase-annotations/dependency-management.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Annotations  Project Dependency 
Management
 
@@ -775,18 +775,24 @@
 test-jar
 https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
 
+org.apache.hbase
+http://hbase.apache.org/hbase-build-configuration/hbase-zookeeper;>hbase-zookeeper
+3.0.0-SNAPSHOT
+test-jar
+https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
+
 org.bouncycastle
 http://www.bouncycastle.org/java.html;>bcprov-jdk16
 1.46
 jar
 http://www.bouncycastle.org/licence.html;>Bouncy Castle 
Licence
-
+
 org.hamcrest
 https://github.com/hamcrest/JavaHamcrest/hamcrest-core;>hamcrest-core
 1.3
 jar
 http://www.opensource.org/licenses/bsd-license.php;>New BSD 
License
-
+
 org.mockito
 http://mockito.org;>mockito-core
 2.1.0
@@ -804,7 +810,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-12-05
+  Last Published: 
2017-12-06
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d171b896/hbase-annotations/index.html
--
diff --git a/hbase-annotations/index.html b/hbase-annotations/index.html
index 3f5ad69..e6e2d3b 100644
--- a/hbase-annotations/index.html
+++ b/hbase-annotations/index.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Annotations  About
 
@@ -119,7 +119,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-12-05
+  Last Published: 
2017-12-06
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d171b896/hbase-annotations/integration.html
--
diff --git a/hbase-annotations/integration.html 
b/hbase-annotations/integration.html
index ea953cf..65ed760 100644
--- a/hbase-annotations/integration.html
+++ b/hbase-annotations/integration.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Annotations  CI Management
 
@@ -126,7 +126,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-12-05
+  Last Published: 
2017-12-06
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d171b896/hbase-annotations/issue-tracking.html
--
diff --git a/hbase-annotations/issue-tracking.html 
b/hbase-annotations/issue-tracking.html
index d02e8d8..443efdd 100644
--- a/hbase-annotations/issue-tracking.html
+++ b/hbase-annotations/issue-tracking.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Annotations  Issue Management
 
@@ -123,7 +123,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-12-05
+  Last Published: 
2017-12-06
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d171b896/hbase-annotations/license.html
--
diff --git a/hbase-annotations/license.html b/hbase-annotations/license.html
index 23d4722..c722bfd 100644
--- a/hbase-annotations/license.html
+++ b/hbase-annotations/license.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 

[16/51] [partial] hbase-site git commit: Published site at .

2017-12-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c54c242b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html
index 64ba75d..6cb35af 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html
@@ -56,2149 +56,2149 @@
 048import 
java.util.function.ToLongFunction;
 049import java.util.stream.Collectors;
 050import java.util.stream.LongStream;
-051
-052import org.apache.commons.logging.Log;
-053import 
org.apache.commons.logging.LogFactory;
-054import 
org.apache.hadoop.conf.Configuration;
-055import org.apache.hadoop.fs.FileSystem;
-056import org.apache.hadoop.fs.Path;
-057import org.apache.hadoop.hbase.Cell;
-058import 
org.apache.hadoop.hbase.CellComparator;
-059import 
org.apache.hadoop.hbase.CellUtil;
-060import 
org.apache.hadoop.hbase.CompoundConfiguration;
-061import 
org.apache.hadoop.hbase.HConstants;
-062import 
org.apache.hadoop.hbase.MemoryCompactionPolicy;
-063import 
org.apache.hadoop.hbase.TableName;
-064import 
org.apache.hadoop.hbase.backup.FailedArchiveException;
-065import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-066import 
org.apache.hadoop.hbase.client.RegionInfo;
-067import 
org.apache.hadoop.hbase.client.Scan;
-068import 
org.apache.hadoop.hbase.conf.ConfigurationManager;
-069import 
org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver;
-070import 
org.apache.hadoop.hbase.io.HeapSize;
-071import 
org.apache.hadoop.hbase.io.compress.Compression;
-072import 
org.apache.hadoop.hbase.io.crypto.Encryption;
-073import 
org.apache.hadoop.hbase.io.hfile.CacheConfig;
-074import 
org.apache.hadoop.hbase.io.hfile.HFile;
-075import 
org.apache.hadoop.hbase.io.hfile.HFileContext;
-076import 
org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
-077import 
org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder;
-078import 
org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoderImpl;
-079import 
org.apache.hadoop.hbase.io.hfile.HFileScanner;
-080import 
org.apache.hadoop.hbase.io.hfile.InvalidHFileException;
-081import 
org.apache.hadoop.hbase.monitoring.MonitoredTask;
-082import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
-083import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker;
-084import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionProgress;
-085import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionRequestImpl;
-086import 
org.apache.hadoop.hbase.regionserver.compactions.DefaultCompactor;
-087import 
org.apache.hadoop.hbase.regionserver.compactions.OffPeakHours;
-088import 
org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher;
-089import 
org.apache.hadoop.hbase.regionserver.throttle.ThroughputController;
-090import 
org.apache.hadoop.hbase.regionserver.wal.WALUtil;
-091import 
org.apache.hadoop.hbase.security.EncryptionUtil;
-092import 
org.apache.hadoop.hbase.security.User;
-093import 
org.apache.hadoop.hbase.util.Bytes;
-094import 
org.apache.hadoop.hbase.util.ChecksumType;
-095import 
org.apache.hadoop.hbase.util.ClassSize;
-096import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-097import 
org.apache.hadoop.hbase.util.Pair;
-098import 
org.apache.hadoop.hbase.util.ReflectionUtils;
-099import 
org.apache.hadoop.util.StringUtils;
-100import 
org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix;
-101import 
org.apache.yetus.audience.InterfaceAudience;
-102
-103import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
-104import 
org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions;
-105import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableCollection;
-106import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableList;
-107import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
-108import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Sets;
-109import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-110import 
org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor;
-111
-112/**
-113 * A Store holds a column family in a 
Region.  Its a memstore and a set of zero
-114 * or more StoreFiles, which stretch 
backwards over time.
-115 *
-116 * pThere's no reason to consider 
append-logging at this level; all logging
-117 * and locking is handled at the HRegion 
level.  Store just provides
-118 * services to manage sets of StoreFiles. 
 One of the most important of those
-119 * services is compaction services where 
files are aggregated once they pass
-120 * a configurable threshold.
-121 *
-122 * 

[16/51] [partial] hbase-site git commit: Published site at .

2017-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.State.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.State.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.State.html
index de6cb11..dd54dd2 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.State.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.State.html
@@ -25,563 +25,558 @@
 017 */
 018package 
org.apache.hadoop.hbase.io.asyncfs;
 019
-020import static 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleState.READER_IDLE;
-021import static 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleState.WRITER_IDLE;
-022import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.HEART_BEAT_SEQNO;
-023import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.READ_TIMEOUT;
-024import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile;
-025import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.endFileLease;
-026import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.getStatus;
+020import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.HEART_BEAT_SEQNO;
+021import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.READ_TIMEOUT;
+022import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile;
+023import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.endFileLease;
+024import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.getStatus;
+025import static 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleState.READER_IDLE;
+026import static 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleState.WRITER_IDLE;
 027import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY;
 028
-029
-030import 
org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBuf;
-031import 
org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBufAllocator;
-032import 
org.apache.hadoop.hbase.shaded.io.netty.channel.Channel;
-033import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandler.Sharable;
-034import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandlerContext;
-035import 
org.apache.hadoop.hbase.shaded.io.netty.channel.EventLoop;
-036import 
org.apache.hadoop.hbase.shaded.io.netty.channel.SimpleChannelInboundHandler;
-037import 
org.apache.hadoop.hbase.shaded.io.netty.handler.codec.protobuf.ProtobufDecoder;
-038import 
org.apache.hadoop.hbase.shaded.io.netty.handler.codec.protobuf.ProtobufVarint32FrameDecoder;
-039import 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleStateEvent;
-040import 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleStateHandler;
-041import 
org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.Future;
-042import 
org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.Promise;
-043import 
org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.PromiseCombiner;
-044
-045import java.io.IOException;
-046import java.nio.ByteBuffer;
-047import java.util.ArrayDeque;
-048import java.util.Collection;
-049import java.util.Collections;
-050import java.util.Deque;
-051import java.util.IdentityHashMap;
-052import java.util.List;
-053import java.util.Set;
-054import 
java.util.concurrent.CompletableFuture;
-055import java.util.concurrent.TimeUnit;
-056import java.util.function.Supplier;
-057
-058import 
org.apache.hadoop.conf.Configuration;
-059import 
org.apache.hadoop.crypto.Encryptor;
-060import org.apache.hadoop.fs.Path;
+029import java.io.IOException;
+030import java.io.InterruptedIOException;
+031import java.nio.ByteBuffer;
+032import java.util.Collection;
+033import java.util.Collections;
+034import java.util.Iterator;
+035import java.util.List;
+036import java.util.Set;
+037import 
java.util.concurrent.CompletableFuture;
+038import 
java.util.concurrent.ConcurrentHashMap;
+039import 
java.util.concurrent.ConcurrentLinkedDeque;
+040import 
java.util.concurrent.ExecutionException;
+041import java.util.concurrent.TimeUnit;
+042import java.util.function.Supplier;
+043
+044import 
org.apache.hadoop.conf.Configuration;
+045import 
org.apache.hadoop.crypto.Encryptor;
+046import org.apache.hadoop.fs.Path;
+047import 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.CancelOnClose;
+048import 
org.apache.hadoop.hbase.util.CancelableProgressable;
+049import 
org.apache.hadoop.hbase.util.FSUtils;
+050import 
org.apache.hadoop.hdfs.DFSClient;
+051import 
org.apache.hadoop.hdfs.DistributedFileSystem;

[16/51] [partial] hbase-site git commit: Published site at .

2017-11-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncTable.CheckAndMutateBuilder.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncTable.CheckAndMutateBuilder.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncTable.CheckAndMutateBuilder.html
index c2c122a..7cece5c 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncTable.CheckAndMutateBuilder.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncTable.CheckAndMutateBuilder.html
@@ -439,11 +439,11 @@
 431  }
 432
 433  /**
-434   * Method that does a batch call on 
Deletes, Gets, Puts, Increments and Appends. The ordering of
-435   * execution of the actions is not 
defined. Meaning if you do a Put and a Get in the same
-436   * {@link #batch} call, you will not 
necessarily be guaranteed that the Get returns what the Put
-437   * had put.
-438   * @param actions list of Get, Put, 
Delete, Increment, Append objects
+434   * Method that does a batch call on 
Deletes, Gets, Puts, Increments, Appends and RowMutations. The
+435   * ordering of execution of the actions 
is not defined. Meaning if you do a Put and a Get in the
+436   * same {@link #batch} call, you will 
not necessarily be guaranteed that the Get returns what the
+437   * Put had put.
+438   * @param actions list of Get, Put, 
Delete, Increment, Append, and RowMutations objects
 439   * @return A list of {@link 
CompletableFuture}s that represent the result for each action.
 440   */
 441  T 
ListCompletableFutureT batch(List? extends Row 
actions);
@@ -451,7 +451,7 @@
 443  /**
 444   * A simple version of batch. It will 
fail if there are any failures and you will get the whole
 445   * result list at once if the operation 
is succeeded.
-446   * @param actions list of Get, Put, 
Delete, Increment, Append objects
+446   * @param actions list of Get, Put, 
Delete, Increment, Append and RowMutations objects
 447   * @return A list of the result for the 
actions. Wrapped by a {@link CompletableFuture}.
 448   */
 449  default T 
CompletableFutureListT batchAll(List? extends Row 
actions) {

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncTable.CoprocessorCallback.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncTable.CoprocessorCallback.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncTable.CoprocessorCallback.html
index c2c122a..7cece5c 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncTable.CoprocessorCallback.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncTable.CoprocessorCallback.html
@@ -439,11 +439,11 @@
 431  }
 432
 433  /**
-434   * Method that does a batch call on 
Deletes, Gets, Puts, Increments and Appends. The ordering of
-435   * execution of the actions is not 
defined. Meaning if you do a Put and a Get in the same
-436   * {@link #batch} call, you will not 
necessarily be guaranteed that the Get returns what the Put
-437   * had put.
-438   * @param actions list of Get, Put, 
Delete, Increment, Append objects
+434   * Method that does a batch call on 
Deletes, Gets, Puts, Increments, Appends and RowMutations. The
+435   * ordering of execution of the actions 
is not defined. Meaning if you do a Put and a Get in the
+436   * same {@link #batch} call, you will 
not necessarily be guaranteed that the Get returns what the
+437   * Put had put.
+438   * @param actions list of Get, Put, 
Delete, Increment, Append, and RowMutations objects
 439   * @return A list of {@link 
CompletableFuture}s that represent the result for each action.
 440   */
 441  T 
ListCompletableFutureT batch(List? extends Row 
actions);
@@ -451,7 +451,7 @@
 443  /**
 444   * A simple version of batch. It will 
fail if there are any failures and you will get the whole
 445   * result list at once if the operation 
is succeeded.
-446   * @param actions list of Get, Put, 
Delete, Increment, Append objects
+446   * @param actions list of Get, Put, 
Delete, Increment, Append and RowMutations objects
 447   * @return A list of the result for the 
actions. Wrapped by a {@link CompletableFuture}.
 448   */
 449  default T 
CompletableFutureListT batchAll(List? extends Row 
actions) {

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncTable.CoprocessorServiceBuilder.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncTable.CoprocessorServiceBuilder.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncTable.CoprocessorServiceBuilder.html
index c2c122a..7cece5c 100644
--- 

[16/51] [partial] hbase-site git commit: Published site at .

2017-11-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
index 29ea7b3..6ed75c9 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
@@ -1313,7093 +1313,7082 @@
 1305
 1306  @Override
 1307  public boolean isSplittable() {
-1308boolean result = isAvailable() 
 !hasReferences();
-1309LOG.info("ASKED IF SPLITTABLE " + 
result + " " + getRegionInfo().getShortNameToLog(),
-1310  new Throwable("LOGGING: 
REMOVE"));
-1311// REMOVE BELOW
-1312LOG.info("DEBUG LIST ALL FILES");
-1313for (HStore store : 
this.stores.values()) {
-1314  LOG.info("store " + 
store.getColumnFamilyName());
-1315  for (HStoreFile sf : 
store.getStorefiles()) {
-1316
LOG.info(sf.toStringDetailed());
-1317  }
-1318}
-1319return result;
-1320  }
-1321
-1322  @Override
-1323  public boolean isMergeable() {
-1324if (!isAvailable()) {
-1325  LOG.debug("Region " + this
-1326  + " is not mergeable because 
it is closing or closed");
-1327  return false;
-1328}
-1329if (hasReferences()) {
-1330  LOG.debug("Region " + this
-1331  + " is not mergeable because 
it has references");
-1332  return false;
-1333}
-1334
-1335return true;
+1308return isAvailable()  
!hasReferences();
+1309  }
+1310
+1311  @Override
+1312  public boolean isMergeable() {
+1313if (!isAvailable()) {
+1314  LOG.debug("Region " + this
+1315  + " is not mergeable because 
it is closing or closed");
+1316  return false;
+1317}
+1318if (hasReferences()) {
+1319  LOG.debug("Region " + this
+1320  + " is not mergeable because 
it has references");
+1321  return false;
+1322}
+1323
+1324return true;
+1325  }
+1326
+1327  public boolean areWritesEnabled() {
+1328synchronized(this.writestate) {
+1329  return 
this.writestate.writesEnabled;
+1330}
+1331  }
+1332
+1333  @VisibleForTesting
+1334  public MultiVersionConcurrencyControl 
getMVCC() {
+1335return mvcc;
 1336  }
 1337
-1338  public boolean areWritesEnabled() {
-1339synchronized(this.writestate) {
-1340  return 
this.writestate.writesEnabled;
-1341}
-1342  }
-1343
-1344  @VisibleForTesting
-1345  public MultiVersionConcurrencyControl 
getMVCC() {
-1346return mvcc;
-1347  }
-1348
-1349  @Override
-1350  public long getMaxFlushedSeqId() {
-1351return maxFlushedSeqId;
+1338  @Override
+1339  public long getMaxFlushedSeqId() {
+1340return maxFlushedSeqId;
+1341  }
+1342
+1343  /**
+1344   * @return readpoint considering given 
IsolationLevel. Pass {@code null} for default
+1345   */
+1346  public long 
getReadPoint(IsolationLevel isolationLevel) {
+1347if (isolationLevel != null 
 isolationLevel == IsolationLevel.READ_UNCOMMITTED) {
+1348  // This scan can read even 
uncommitted transactions
+1349  return Long.MAX_VALUE;
+1350}
+1351return mvcc.getReadPoint();
 1352  }
 1353
-1354  /**
-1355   * @return readpoint considering given 
IsolationLevel. Pass {@code null} for default
-1356   */
-1357  public long 
getReadPoint(IsolationLevel isolationLevel) {
-1358if (isolationLevel != null 
 isolationLevel == IsolationLevel.READ_UNCOMMITTED) {
-1359  // This scan can read even 
uncommitted transactions
-1360  return Long.MAX_VALUE;
-1361}
-1362return mvcc.getReadPoint();
-1363  }
-1364
-1365  public boolean 
isLoadingCfsOnDemandDefault() {
-1366return 
this.isLoadingCfsOnDemandDefault;
-1367  }
-1368
-1369  /**
-1370   * Close down this HRegion.  Flush the 
cache, shut down each HStore, don't
-1371   * service any more calls.
-1372   *
-1373   * pThis method could take 
some time to execute, so don't call it from a
-1374   * time-sensitive thread.
-1375   *
-1376   * @return Vector of all the storage 
files that the HRegion's component
-1377   * HStores make use of.  It's a list 
of all StoreFile objects. Returns empty
-1378   * vector if already closed and null 
if judged that it should not close.
-1379   *
-1380   * @throws IOException e
-1381   * @throws DroppedSnapshotException 
Thrown when replay of wal is required
-1382   * because a Snapshot was not properly 
persisted. The region is put in closing mode, and the
-1383   * caller MUST abort after this.
-1384   */
-1385  public Mapbyte[], 
ListHStoreFile close() throws IOException {
-1386return close(false);
-1387  }
-1388
-1389  private final Object closeLock = new 
Object();
-1390
-1391  /** Conf key for the periodic flush 
interval */
-1392  public 

[16/51] [partial] hbase-site git commit: Published site at .

2017-11-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b1eb7453/devapidocs/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.html 
b/devapidocs/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.html
index d0ae041..6b6d668 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.html
@@ -110,7 +110,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class VisibilityUtils
+public class VisibilityUtils
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 Utility method to support visibility
 
@@ -306,7 +306,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 LOG
-private static finalorg.apache.commons.logging.Log LOG
+private static finalorg.apache.commons.logging.Log LOG
 
 
 
@@ -315,7 +315,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 VISIBILITY_LABEL_GENERATOR_CLASS
-public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String VISIBILITY_LABEL_GENERATOR_CLASS
+public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String VISIBILITY_LABEL_GENERATOR_CLASS
 
 See Also:
 Constant
 Field Values
@@ -328,7 +328,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 SYSTEM_LABEL
-public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String SYSTEM_LABEL
+public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String SYSTEM_LABEL
 
 See Also:
 Constant
 Field Values
@@ -341,7 +341,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 SORTED_ORDINAL_SERIALIZATION_FORMAT_TAG
-public static finalTag SORTED_ORDINAL_SERIALIZATION_FORMAT_TAG
+public static finalTag SORTED_ORDINAL_SERIALIZATION_FORMAT_TAG
 
 
 
@@ -350,7 +350,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 COMMA
-private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String COMMA
+private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String COMMA
 
 See Also:
 Constant
 Field Values
@@ -363,7 +363,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 EXP_PARSER
-private static finalExpressionParser EXP_PARSER
+private static finalExpressionParser EXP_PARSER
 
 
 
@@ -372,7 +372,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 EXP_EXPANDER
-private static finalExpressionExpander EXP_EXPANDER
+private static finalExpressionExpander EXP_EXPANDER
 
 
 
@@ -389,7 +389,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 VisibilityUtils
-publicVisibilityUtils()
+publicVisibilityUtils()
 
 
 
@@ -406,7 +406,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getDataToWriteToZooKeeper
-public staticbyte[]getDataToWriteToZooKeeper(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,http://docs.oracle.com/javase/8/docs/api/java/lang/Integer.html?is-external=true;
 title="class or interface in 
java.lang">IntegerexistingLabels)
+public staticbyte[]getDataToWriteToZooKeeper(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,http://docs.oracle.com/javase/8/docs/api/java/lang/Integer.html?is-external=true;
 title="class or interface in 
java.lang">IntegerexistingLabels)
 Creates the labels data to be written to zookeeper.
 
 Parameters:
@@ -422,7 +422,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getUserAuthsDataToWriteToZooKeeper
-public staticbyte[]getUserAuthsDataToWriteToZooKeeper(http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 

  1   2   3   4   >