[19/51] [partial] hbase-site git commit: Published site at 620d70d6186fb800299bcc62ad7179fccfd1be41.

2019-01-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aa3fb87f/devapidocs/src-html/org/apache/hadoop/hbase/master/RegionState.State.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/RegionState.State.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/RegionState.State.html
index 214b37c..4d1bf1e 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/master/RegionState.State.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/RegionState.State.html
@@ -67,381 +67,390 @@
 059   // apply it to a 
region in this state, as it may lead to data loss as we
 060   // may have some 
data in recovered edits.
 061
-062/**
-063 * Convert to protobuf 
ClusterStatusProtos.RegionState.State
-064 */
-065public 
ClusterStatusProtos.RegionState.State convert() {
-066  
ClusterStatusProtos.RegionState.State rs;
-067  switch (this) {
-068case OFFLINE:
-069  rs = 
ClusterStatusProtos.RegionState.State.OFFLINE;
-070  break;
-071case OPENING:
-072  rs = 
ClusterStatusProtos.RegionState.State.OPENING;
-073  break;
-074case OPEN:
-075  rs = 
ClusterStatusProtos.RegionState.State.OPEN;
-076  break;
-077case CLOSING:
-078  rs = 
ClusterStatusProtos.RegionState.State.CLOSING;
+062public boolean matches(State... 
expected) {
+063  for (State state : expected) {
+064if (this == state) {
+065  return true;
+066}
+067  }
+068  return false;
+069}
+070
+071/**
+072 * Convert to protobuf 
ClusterStatusProtos.RegionState.State
+073 */
+074public 
ClusterStatusProtos.RegionState.State convert() {
+075  
ClusterStatusProtos.RegionState.State rs;
+076  switch (this) {
+077case OFFLINE:
+078  rs = 
ClusterStatusProtos.RegionState.State.OFFLINE;
 079  break;
-080case CLOSED:
-081  rs = 
ClusterStatusProtos.RegionState.State.CLOSED;
+080case OPENING:
+081  rs = 
ClusterStatusProtos.RegionState.State.OPENING;
 082  break;
-083case SPLITTING:
-084  rs = 
ClusterStatusProtos.RegionState.State.SPLITTING;
+083case OPEN:
+084  rs = 
ClusterStatusProtos.RegionState.State.OPEN;
 085  break;
-086case SPLIT:
-087  rs = 
ClusterStatusProtos.RegionState.State.SPLIT;
+086case CLOSING:
+087  rs = 
ClusterStatusProtos.RegionState.State.CLOSING;
 088  break;
-089case FAILED_OPEN:
-090  rs = 
ClusterStatusProtos.RegionState.State.FAILED_OPEN;
+089case CLOSED:
+090  rs = 
ClusterStatusProtos.RegionState.State.CLOSED;
 091  break;
-092case FAILED_CLOSE:
-093  rs = 
ClusterStatusProtos.RegionState.State.FAILED_CLOSE;
+092case SPLITTING:
+093  rs = 
ClusterStatusProtos.RegionState.State.SPLITTING;
 094  break;
-095case MERGING:
-096  rs = 
ClusterStatusProtos.RegionState.State.MERGING;
+095case SPLIT:
+096  rs = 
ClusterStatusProtos.RegionState.State.SPLIT;
 097  break;
-098case MERGED:
-099  rs = 
ClusterStatusProtos.RegionState.State.MERGED;
+098case FAILED_OPEN:
+099  rs = 
ClusterStatusProtos.RegionState.State.FAILED_OPEN;
 100  break;
-101case SPLITTING_NEW:
-102  rs = 
ClusterStatusProtos.RegionState.State.SPLITTING_NEW;
+101case FAILED_CLOSE:
+102  rs = 
ClusterStatusProtos.RegionState.State.FAILED_CLOSE;
 103  break;
-104case MERGING_NEW:
-105  rs = 
ClusterStatusProtos.RegionState.State.MERGING_NEW;
+104case MERGING:
+105  rs = 
ClusterStatusProtos.RegionState.State.MERGING;
 106  break;
-107case ABNORMALLY_CLOSED:
-108  rs = 
ClusterStatusProtos.RegionState.State.ABNORMALLY_CLOSED;
+107case MERGED:
+108  rs = 
ClusterStatusProtos.RegionState.State.MERGED;
 109  break;
-110default:
-111  throw new 
IllegalStateException("");
-112  }
-113  return rs;
-114}
-115
-116/**
-117 * Convert a protobuf 
HBaseProtos.RegionState.State to a RegionState.State
-118 *
-119 * @return the RegionState.State
-120 */
-121public static State 
convert(ClusterStatusProtos.RegionState.State protoState) {
-122  State state;
-123  switch (protoState) {
-124case OFFLINE:
-125  state = OFFLINE;
-126  break;
-127case PENDING_OPEN:
-128case OPENING:
-129  state = OPENING;
-130  break;
-131case OPEN:
-132  state = OPEN;
-133  break;
-134case PENDING_CLOSE:
-135case CLOSING:
-136  state = CLOSING;
-137  break;
-138case CLOSED:
-139  state = CLOSED;
-140  

[19/51] [partial] hbase-site git commit: Published site at 281d6429e55149cc4c05430dcc1d1dc136d8b245.

2019-01-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/901d593a/devapidocs/org/apache/hadoop/hbase/master/SplitWALManager.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/master/SplitWALManager.html 
b/devapidocs/org/apache/hadoop/hbase/master/SplitWALManager.html
new file mode 100644
index 000..e2036ad
--- /dev/null
+++ b/devapidocs/org/apache/hadoop/hbase/master/SplitWALManager.html
@@ -0,0 +1,598 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+SplitWALManager (Apache HBase 3.0.0-SNAPSHOT API)
+
+
+
+
+
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase.master
+Class SplitWALManager
+
+
+
+https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.master.SplitWALManager
+
+
+
+
+
+
+
+
+@InterfaceAudience.Private
+public class SplitWALManager
+extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+Create SplitWALProcedure 
for each WAL which need to split. Manage the workers for each
+ SplitWALProcedure.
+ Total number of workers is (number of online servers) * 
(HBASE_SPLIT_WAL_MAX_SPLITTER).
+ Helps assign and release workers for split tasks.
+ Provide helper method to delete split WAL file and directory.
+
+ The user can get the SplitWALProcedures via splitWALs(crashedServer, 
splitMeta)
+ can get the files that need to split via getWALsToSplit(crashedServer, 
splitMeta)
+ can delete the splitting WAL and directory via deleteSplitWAL(wal)
+ and deleteSplitWAL(crashedServer)
+ can check if splitting WALs of a crashed server is success via 
isSplitWALFinished(walPath)
+ can acquire and release a worker for splitting WAL via 
acquireSplitWALWorker(procedure)
+ and releaseSplitWALWorker(worker, scheduler)
+
+ This class is to replace the zk-based WAL splitting related code, MasterWalManager,
+ SplitLogManager, ZKSplitLog 
and
+ ZKSplitLogManagerCoordination
 can be removed
+ after we switch to procedure-based WAL splitting.
+
+
+
+
+
+
+
+
+
+
+
+Nested Class Summary
+
+Nested Classes
+
+Modifier and Type
+Class and Description
+
+
+private static class
+SplitWALManager.SplitWorkerAssigner
+help assign and release a worker for each WAL splitting task
+ For each worker, concurrent running splitting task should be no more than 
maxSplitTasks
+ If a task failed to acquire a worker, it will suspend and wait for workers 
available
+
+
+
+
+
+
+
+
+
+
+Field Summary
+
+Fields
+
+Modifier and Type
+Field and Description
+
+
+private 
org.apache.hadoop.conf.Configuration
+conf
+
+
+private org.apache.hadoop.fs.FileSystem
+fs
+
+
+private static org.slf4j.Logger
+LOG
+
+
+private MasterServices
+master
+
+
+private org.apache.hadoop.fs.Path
+rootDir
+
+
+private SplitWALManager.SplitWorkerAssigner
+splitWorkerAssigner
+
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors
+
+Constructor and Description
+
+
+SplitWALManager(MasterServicesmaster)
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All MethodsInstance MethodsConcrete Methods
+
+Modifier and Type
+Method and Description
+
+
+ServerName
+acquireSplitWALWorker(Procedure?procedure)
+try to acquire an worker from online servers which is 
executring
+
+
+
+void
+addUsedSplitWALWorker(ServerNameworker)
+When master restart, there will be a new 
splitWorkerAssigner.
+
+
+
+(package private) https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListProcedure
+createSplitWALProcedures(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in 
java.util">Listorg.apache.hadoop.fs.FileStatussplittingWALs,
+ServerNamecrashedServer)
+
+
+void

[19/51] [partial] hbase-site git commit: Published site at 466fa920fee572fe20db3b77ebf539dc304d5f31.

2019-01-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2bf59208/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.BuilderBase.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.BuilderBase.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.BuilderBase.html
index 2e150bc..0b315b8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.BuilderBase.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.BuilderBase.html
@@ -25,22 +25,22 @@
 017 */
 018package org.apache.hadoop.hbase.client;
 019
-020import static 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions.checkArgument;
-021import static 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions.checkNotNull;
-022import static 
org.apache.hadoop.hbase.client.ConnectionUtils.retries2Attempts;
+020import static 
org.apache.hadoop.hbase.client.ConnectionUtils.retries2Attempts;
+021import static 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions.checkArgument;
+022import static 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions.checkNotNull;
 023
-024import 
org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer;
-025
-026import java.util.List;
-027import 
java.util.concurrent.CompletableFuture;
-028import java.util.concurrent.TimeUnit;
-029
-030import 
org.apache.hadoop.hbase.HRegionLocation;
-031import 
org.apache.hadoop.hbase.ServerName;
-032import 
org.apache.hadoop.hbase.TableName;
-033import 
org.apache.yetus.audience.InterfaceAudience;
-034import 
org.apache.hadoop.hbase.client.metrics.ScanMetrics;
-035import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
+024import java.util.List;
+025import 
java.util.concurrent.CompletableFuture;
+026import java.util.concurrent.TimeUnit;
+027import 
org.apache.hadoop.hbase.HRegionLocation;
+028import 
org.apache.hadoop.hbase.ServerName;
+029import 
org.apache.hadoop.hbase.TableName;
+030import 
org.apache.hadoop.hbase.client.metrics.ScanMetrics;
+031import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
+032import 
org.apache.yetus.audience.InterfaceAudience;
+033
+034import 
org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer;
+035
 036import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService;
 037import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanResponse;
 038
@@ -83,432 +83,441 @@
 075
 076private RegionLocateType locateType = 
RegionLocateType.CURRENT;
 077
-078public 
SingleRequestCallerBuilderT table(TableName tableName) {
-079  this.tableName = tableName;
-080  return this;
-081}
-082
-083public 
SingleRequestCallerBuilderT row(byte[] row) {
-084  this.row = row;
-085  return this;
-086}
-087
-088public 
SingleRequestCallerBuilderT action(
-089
AsyncSingleRequestRpcRetryingCaller.CallableT callable) {
-090  this.callable = callable;
-091  return this;
-092}
-093
-094public 
SingleRequestCallerBuilderT operationTimeout(long operationTimeout, 
TimeUnit unit) {
-095  this.operationTimeoutNs = 
unit.toNanos(operationTimeout);
-096  return this;
-097}
-098
-099public 
SingleRequestCallerBuilderT rpcTimeout(long rpcTimeout, TimeUnit unit) 
{
-100  this.rpcTimeoutNs = 
unit.toNanos(rpcTimeout);
-101  return this;
-102}
-103
-104public 
SingleRequestCallerBuilderT locateType(RegionLocateType locateType) {
-105  this.locateType = locateType;
-106  return this;
-107}
-108
-109public 
SingleRequestCallerBuilderT pause(long pause, TimeUnit unit) {
-110  this.pauseNs = 
unit.toNanos(pause);
-111  return this;
-112}
-113
-114public 
SingleRequestCallerBuilderT maxAttempts(int maxAttempts) {
-115  this.maxAttempts = maxAttempts;
-116  return this;
-117}
-118
-119public 
SingleRequestCallerBuilderT startLogErrorsCnt(int startLogErrorsCnt) 
{
-120  this.startLogErrorsCnt = 
startLogErrorsCnt;
-121  return this;
-122}
-123
-124public 
AsyncSingleRequestRpcRetryingCallerT build() {
-125  return new 
AsyncSingleRequestRpcRetryingCaller(retryTimer, conn,
-126  checkNotNull(tableName, 
"tableName is null"), checkNotNull(row, "row is null"),
-127  checkNotNull(locateType, 
"locateType is null"), checkNotNull(callable, "action is null"),
-128  pauseNs, maxAttempts, 
operationTimeoutNs, rpcTimeoutNs, startLogErrorsCnt);
+078private int replicaId = 
RegionReplicaUtil.DEFAULT_REPLICA_ID;
+079
+080public 
SingleRequestCallerBuilderT table(TableName tableName) {
+081  this.tableName = tableName;
+082  return this;
+083}
+084
+085public 
SingleRequestCallerBuilderT row(byte[] row) {
+086  this.row = row;
+087  return this;
+088}
+089

[19/51] [partial] hbase-site git commit: Published site at e4b6b4afb933a961f543537875f87a2dc62d3757.

2019-01-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/849d84a8/devapidocs/src-html/org/apache/hadoop/hbase/thrift/ThriftHBaseServiceHandler.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/thrift/ThriftHBaseServiceHandler.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/thrift/ThriftHBaseServiceHandler.html
new file mode 100644
index 000..5b5b199
--- /dev/null
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/thrift/ThriftHBaseServiceHandler.html
@@ -0,0 +1,1419 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+Source code
+
+
+
+
+001/**
+002 *
+003 * Licensed to the Apache Software 
Foundation (ASF) under one
+004 * or more contributor license 
agreements.  See the NOTICE file
+005 * distributed with this work for 
additional information
+006 * regarding copyright ownership.  The 
ASF licenses this file
+007 * to you under the Apache License, 
Version 2.0 (the
+008 * "License"); you may not use this file 
except in compliance
+009 * with the License.  You may obtain a 
copy of the License at
+010 *
+011 * 
http://www.apache.org/licenses/LICENSE-2.0
+012 *
+013 * Unless required by applicable law or 
agreed to in writing, software
+014 * distributed under the License is 
distributed on an "AS IS" BASIS,
+015 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
+016 * See the License for the specific 
language governing permissions and
+017 * limitations under the License.
+018 */
+019
+020package org.apache.hadoop.hbase.thrift;
+021
+022import static 
org.apache.hadoop.hbase.thrift.Constants.COALESCE_INC_KEY;
+023import static 
org.apache.hadoop.hbase.util.Bytes.getBytes;
+024
+025import java.io.IOException;
+026import java.nio.ByteBuffer;
+027import java.util.ArrayList;
+028import java.util.Collections;
+029import java.util.HashMap;
+030import java.util.List;
+031import java.util.Map;
+032import java.util.TreeMap;
+033
+034import 
org.apache.hadoop.conf.Configuration;
+035import org.apache.hadoop.hbase.Cell;
+036import 
org.apache.hadoop.hbase.CellBuilder;
+037import 
org.apache.hadoop.hbase.CellBuilderFactory;
+038import 
org.apache.hadoop.hbase.CellBuilderType;
+039import 
org.apache.hadoop.hbase.CellUtil;
+040import 
org.apache.hadoop.hbase.HColumnDescriptor;
+041import 
org.apache.hadoop.hbase.HConstants;
+042import 
org.apache.hadoop.hbase.HRegionLocation;
+043import 
org.apache.hadoop.hbase.HTableDescriptor;
+044import 
org.apache.hadoop.hbase.KeyValue;
+045import 
org.apache.hadoop.hbase.MetaTableAccessor;
+046import 
org.apache.hadoop.hbase.ServerName;
+047import 
org.apache.hadoop.hbase.TableName;
+048import 
org.apache.hadoop.hbase.TableNotFoundException;
+049import 
org.apache.hadoop.hbase.client.Append;
+050import 
org.apache.hadoop.hbase.client.Delete;
+051import 
org.apache.hadoop.hbase.client.Durability;
+052import 
org.apache.hadoop.hbase.client.Get;
+053import 
org.apache.hadoop.hbase.client.Increment;
+054import 
org.apache.hadoop.hbase.client.OperationWithAttributes;
+055import 
org.apache.hadoop.hbase.client.Put;
+056import 
org.apache.hadoop.hbase.client.RegionInfo;
+057import 
org.apache.hadoop.hbase.client.RegionLocator;
+058import 
org.apache.hadoop.hbase.client.Result;
+059import 
org.apache.hadoop.hbase.client.ResultScanner;
+060import 
org.apache.hadoop.hbase.client.Scan;
+061import 
org.apache.hadoop.hbase.client.Table;
+062import 
org.apache.hadoop.hbase.filter.Filter;
+063import 
org.apache.hadoop.hbase.filter.ParseFilter;
+064import 
org.apache.hadoop.hbase.filter.PrefixFilter;
+065import 
org.apache.hadoop.hbase.filter.WhileMatchFilter;
+066import 
org.apache.hadoop.hbase.security.UserProvider;
+067import 
org.apache.hadoop.hbase.thrift.generated.AlreadyExists;
+068import 
org.apache.hadoop.hbase.thrift.generated.BatchMutation;
+069import 
org.apache.hadoop.hbase.thrift.generated.ColumnDescriptor;
+070import 
org.apache.hadoop.hbase.thrift.generated.Hbase;
+071import 
org.apache.hadoop.hbase.thrift.generated.IOError;
+072import 
org.apache.hadoop.hbase.thrift.generated.IllegalArgument;
+073import 
org.apache.hadoop.hbase.thrift.generated.Mutation;
+074import 
org.apache.hadoop.hbase.thrift.generated.TAppend;
+075import 
org.apache.hadoop.hbase.thrift.generated.TCell;
+076import 
org.apache.hadoop.hbase.thrift.generated.TIncrement;
+077import 
org.apache.hadoop.hbase.thrift.generated.TRegionInfo;
+078import 
org.apache.hadoop.hbase.thrift.generated.TRowResult;
+079import 
org.apache.hadoop.hbase.thrift.generated.TScan;
+080import 
org.apache.hadoop.hbase.util.Bytes;
+081import org.apache.thrift.TException;
+082import 
org.apache.yetus.audience.InterfaceAudience;
+083import org.slf4j.Logger;
+084import org.slf4j.LoggerFactory;
+085
+086import 
org.apache.hbase.thirdparty.com.google.common.base.Throwables;
+087
+088/**
+089 * The HBaseServiceHandler is a glue 
object that connects Thrift RPC calls to the
+090 * HBase client API primarily defined in 
the 

[19/51] [partial] hbase-site git commit: Published site at 3ab895979b643a2980bcdb7fee2078f14b614210.

2019-01-01 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/filter/RegexStringComparator.EngineType.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/filter/RegexStringComparator.EngineType.html
 
b/devapidocs/org/apache/hadoop/hbase/filter/RegexStringComparator.EngineType.html
index a8d8cac..c61a06c 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/filter/RegexStringComparator.EngineType.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/filter/RegexStringComparator.EngineType.html
@@ -343,6 +343,6 @@ not permitted.)
 
 
 
-Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/filter/RegexStringComparator.JavaRegexEngine.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/filter/RegexStringComparator.JavaRegexEngine.html
 
b/devapidocs/org/apache/hadoop/hbase/filter/RegexStringComparator.JavaRegexEngine.html
index eabe691..e3d1a1c 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/filter/RegexStringComparator.JavaRegexEngine.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/filter/RegexStringComparator.JavaRegexEngine.html
@@ -459,6 +459,6 @@ implements Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/filter/RegexStringComparator.JoniRegexEngine.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/filter/RegexStringComparator.JoniRegexEngine.html
 
b/devapidocs/org/apache/hadoop/hbase/filter/RegexStringComparator.JoniRegexEngine.html
index 022096c..b482f75 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/filter/RegexStringComparator.JoniRegexEngine.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/filter/RegexStringComparator.JoniRegexEngine.html
@@ -515,6 +515,6 @@ implements Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/filter/RegexStringComparator.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/filter/RegexStringComparator.html 
b/devapidocs/org/apache/hadoop/hbase/filter/RegexStringComparator.html
index a667e8b..30eaaa2 100644
--- a/devapidocs/org/apache/hadoop/hbase/filter/RegexStringComparator.html
+++ b/devapidocs/org/apache/hadoop/hbase/filter/RegexStringComparator.html
@@ -617,6 +617,6 @@ extends Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/filter/RowFilter.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/filter/RowFilter.html 
b/devapidocs/org/apache/hadoop/hbase/filter/RowFilter.html
index c9ada05..aa71bc1 100644
--- a/devapidocs/org/apache/hadoop/hbase/filter/RowFilter.html
+++ b/devapidocs/org/apache/hadoop/hbase/filter/RowFilter.html
@@ -705,6 +705,6 @@ publicCopyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/filter/SingleColumnValueExcludeFilter.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/filter/SingleColumnValueExcludeFilter.html 
b/devapidocs/org/apache/hadoop/hbase/filter/SingleColumnValueExcludeFilter.html
index 37a1476..7ec1160 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/filter/SingleColumnValueExcludeFilter.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/filter/SingleColumnValueExcludeFilter.html
@@ -718,6 +718,6 @@ protectedCopyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072019 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.html

[19/51] [partial] hbase-site git commit: Published site at 7820ba1dbdba58b1002cdfde08eb21aa7a0bb6da.

2018-12-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/09ea0d5f/devapidocs/src-html/org/apache/hadoop/hbase/client/ClientSideRegionScanner.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ClientSideRegionScanner.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ClientSideRegionScanner.html
index 0ffc8e0..a960a57 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ClientSideRegionScanner.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ClientSideRegionScanner.html
@@ -34,99 +34,107 @@
 026import org.apache.hadoop.fs.FileSystem;
 027import org.apache.hadoop.fs.Path;
 028import org.apache.hadoop.hbase.Cell;
-029import 
org.apache.hadoop.hbase.CellUtil;
-030import 
org.apache.hadoop.hbase.PrivateCellUtil;
-031import 
org.apache.hadoop.hbase.client.metrics.ScanMetrics;
+029import 
org.apache.hadoop.hbase.PrivateCellUtil;
+030import 
org.apache.hadoop.hbase.client.metrics.ScanMetrics;
+031import 
org.apache.hadoop.hbase.mob.MobFileCache;
 032import 
org.apache.hadoop.hbase.regionserver.HRegion;
 033import 
org.apache.hadoop.hbase.regionserver.RegionScanner;
-034import 
org.apache.yetus.audience.InterfaceAudience;
-035import org.slf4j.Logger;
-036import org.slf4j.LoggerFactory;
-037
-038/**
-039 * A client scanner for a region opened 
for read-only on the client side. Assumes region data
-040 * is not changing.
-041 */
-042@InterfaceAudience.Private
-043public class ClientSideRegionScanner 
extends AbstractClientScanner {
-044
-045  private static final Logger LOG = 
LoggerFactory.getLogger(ClientSideRegionScanner.class);
-046
-047  private HRegion region;
-048  RegionScanner scanner;
-049  ListCell values;
-050
-051  public 
ClientSideRegionScanner(Configuration conf, FileSystem fs,
-052  Path rootDir, TableDescriptor htd, 
RegionInfo hri, Scan scan, ScanMetrics scanMetrics)
-053  throws IOException {
-054// region is immutable, set isolation 
level
-055
scan.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);
-056
-057htd = 
TableDescriptorBuilder.newBuilder(htd).setReadOnly(true).build();
-058
-059// open region from the snapshot 
directory
-060this.region = 
HRegion.openHRegion(conf, fs, rootDir, hri, htd, null, null, null);
-061
-062// create an internal region 
scanner
-063this.scanner = 
region.getScanner(scan);
-064values = new ArrayList();
-065
-066if (scanMetrics == null) {
-067  initScanMetrics(scan);
-068} else {
-069  this.scanMetrics = scanMetrics;
-070}
-071region.startRegionOperation();
-072  }
+034import 
org.apache.hadoop.hbase.util.FSUtils;
+035import 
org.apache.yetus.audience.InterfaceAudience;
+036import org.slf4j.Logger;
+037import org.slf4j.LoggerFactory;
+038
+039/**
+040 * A client scanner for a region opened 
for read-only on the client side. Assumes region data
+041 * is not changing.
+042 */
+043@InterfaceAudience.Private
+044public class ClientSideRegionScanner 
extends AbstractClientScanner {
+045
+046  private static final Logger LOG = 
LoggerFactory.getLogger(ClientSideRegionScanner.class);
+047
+048  private HRegion region;
+049  RegionScanner scanner;
+050  ListCell values;
+051
+052  public 
ClientSideRegionScanner(Configuration conf, FileSystem fs,
+053  Path rootDir, TableDescriptor htd, 
RegionInfo hri, Scan scan, ScanMetrics scanMetrics)
+054  throws IOException {
+055// region is immutable, set isolation 
level
+056
scan.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);
+057
+058htd = 
TableDescriptorBuilder.newBuilder(htd).setReadOnly(true).build();
+059
+060// open region from the snapshot 
directory
+061region = 
HRegion.newHRegion(FSUtils.getTableDir(rootDir, htd.getTableName()), null, fs, 
conf,
+062  hri, htd, null);
+063// we won't initialize the 
MobFileCache when not running in RS process. so provided an
+064// initialized cache. Consider the 
case: an CF was set from an mob to non-mob. if we only
+065// initialize cache for MOB region, 
NPE from HMobStore will still happen. So Initialize the
+066// cache for every region although it 
may hasn't any mob CF, BTW the cache is very light-weight.
+067region.setMobFileCache(new 
MobFileCache(conf));
+068region.initialize();
+069
+070// create an internal region 
scanner
+071this.scanner = 
region.getScanner(scan);
+072values = new ArrayList();
 073
-074  @Override
-075  public Result next() throws IOException 
{
-076values.clear();
-077scanner.nextRaw(values);
-078if (values.isEmpty()) {
-079  //we are done
-080  return null;
-081}
-082
-083Result result = 
Result.create(values);
-084if (this.scanMetrics != null) {
-085  long resultSize = 0;
-086  for (Cell cell : values) {
-087resultSize += 
PrivateCellUtil.estimatedSerializedSizeOf(cell);
-088  }
-089  

[19/51] [partial] hbase-site git commit: Published site at c448604ceb987d113913f0583452b2abce04db0d.

2018-12-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4f8b8424/devapidocs/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheViewTmpl.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheViewTmpl.html 
b/devapidocs/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheViewTmpl.html
index d9d843f..21793b2 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheViewTmpl.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheViewTmpl.html
@@ -114,7 +114,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public class BlockCacheViewTmpl
+public class BlockCacheViewTmpl
 extends org.jamon.AbstractTemplateProxy
 
 
@@ -209,26 +209,29 @@ extends org.jamon.AbstractTemplateProxy
 
 
 org.jamon.Renderer
-makeRenderer(CacheConfigcacheConfig,
+makeRenderer(CacheConfigcacheConfig,
 org.apache.hadoop.conf.Configurationconf,
 https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringbcn,
-https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringbcv)
+https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringbcv,
+BlockCacheblockCache)
 
 
 void
-render(https://docs.oracle.com/javase/8/docs/api/java/io/Writer.html?is-external=true;
 title="class or interface in java.io">WriterjamonWriter,
+render(https://docs.oracle.com/javase/8/docs/api/java/io/Writer.html?is-external=true;
 title="class or interface in java.io">WriterjamonWriter,
   CacheConfigcacheConfig,
   org.apache.hadoop.conf.Configurationconf,
   https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringbcn,
-  https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringbcv)
+  https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringbcv,
+  BlockCacheblockCache)
 
 
 void
-renderNoFlush(https://docs.oracle.com/javase/8/docs/api/java/io/Writer.html?is-external=true;
 title="class or interface in java.io">WriterjamonWriter,
+renderNoFlush(https://docs.oracle.com/javase/8/docs/api/java/io/Writer.html?is-external=true;
 title="class or interface in java.io">WriterjamonWriter,
  CacheConfigcacheConfig,
  org.apache.hadoop.conf.Configurationconf,
  https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringbcn,
- https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringbcv)
+ https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringbcv,
+ BlockCacheblockCache)
 
 
 
@@ -265,7 +268,7 @@ extends org.jamon.AbstractTemplateProxy
 
 
 BlockCacheViewTmpl
-publicBlockCacheViewTmpl(org.jamon.TemplateManagerp_manager)
+publicBlockCacheViewTmpl(org.jamon.TemplateManagerp_manager)
 
 
 
@@ -274,7 +277,7 @@ extends org.jamon.AbstractTemplateProxy
 
 
 BlockCacheViewTmpl
-protectedBlockCacheViewTmpl(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringp_path)
+protectedBlockCacheViewTmpl(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringp_path)
 
 
 
@@ -283,7 +286,7 @@ extends org.jamon.AbstractTemplateProxy
 
 
 BlockCacheViewTmpl
-publicBlockCacheViewTmpl()
+publicBlockCacheViewTmpl()
 
 
 
@@ -300,7 +303,7 @@ extends org.jamon.AbstractTemplateProxy
 
 
 makeImplData
-protectedorg.jamon.AbstractTemplateProxy.ImplDatamakeImplData()
+protectedorg.jamon.AbstractTemplateProxy.ImplDatamakeImplData()
 
 Specified by:
 makeImplDatain 
classorg.jamon.AbstractTemplateProxy
@@ -313,7 +316,7 @@ extends org.jamon.AbstractTemplateProxy
 
 
 getImplData
-publicBlockCacheViewTmpl.ImplDatagetImplData()
+publicBlockCacheViewTmpl.ImplDatagetImplData()
 
 Overrides:
 getImplDatain 
classorg.jamon.AbstractTemplateProxy
@@ -326,7 +329,7 @@ extends org.jamon.AbstractTemplateProxy
 
 
 constructImpl
-publicorg.jamon.AbstractTemplateImplconstructImpl(https://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true;
 title="class or interface in java.lang">Class? extends 
org.jamon.AbstractTemplateImplp_class)
+publicorg.jamon.AbstractTemplateImplconstructImpl(https://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true;
 title="class or interface in java.lang">Class? extends 

[19/51] [partial] hbase-site git commit: Published site at 8bf966c8e936dec4d83bcbe85c5aab543f14a0df.

2018-12-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html
index a957d31..62f81b6 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html
@@ -142,5192 +142,5186 @@
 134import org.apache.hadoop.hbase.wal.WAL;
 135import 
org.apache.hadoop.hbase.wal.WALFactory;
 136import 
org.apache.hadoop.hbase.wal.WALSplitter;
-137import 
org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-138import 
org.apache.hadoop.hbase.zookeeper.ZKUtil;
-139import 
org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-140import 
org.apache.hadoop.hbase.zookeeper.ZNodePaths;
-141import 
org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
-142import 
org.apache.hadoop.ipc.RemoteException;
-143import 
org.apache.hadoop.security.UserGroupInformation;
-144import 
org.apache.hadoop.util.ReflectionUtils;
-145import org.apache.hadoop.util.Tool;
-146import 
org.apache.hadoop.util.ToolRunner;
-147import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-148import 
org.apache.hbase.thirdparty.com.google.common.collect.Sets;
-149import 
org.apache.yetus.audience.InterfaceAudience;
-150import 
org.apache.yetus.audience.InterfaceStability;
-151import 
org.apache.zookeeper.KeeperException;
-152import org.slf4j.Logger;
-153import org.slf4j.LoggerFactory;
-154
-155import 
org.apache.hbase.thirdparty.com.google.common.base.Joiner;
-156import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-157import 
org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;
-158import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
-159import 
org.apache.hbase.thirdparty.com.google.common.collect.Multimap;
-160import 
org.apache.hbase.thirdparty.com.google.common.collect.Ordering;
-161import 
org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;
-162
-163import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-164import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;
-165
-166/**
-167 * HBaseFsck (hbck) is a tool for 
checking and repairing region consistency and
-168 * table integrity problems in a 
corrupted HBase. This tool was written for hbase-1.x. It does not
-169 * work with hbase-2.x; it can read state 
but is not allowed to change state; i.e. effect 'repair'.
-170 * See hbck2 (HBASE-19121) for a hbck 
tool for hbase2.
-171 *
-172 * p
-173 * Region consistency checks verify that 
hbase:meta, region deployment on region
-174 * servers and the state of data in HDFS 
(.regioninfo files) all are in
-175 * accordance.
-176 * p
-177 * Table integrity checks verify that all 
possible row keys resolve to exactly
-178 * one region of a table.  This means 
there are no individual degenerate
-179 * or backwards regions; no holes between 
regions; and that there are no
-180 * overlapping regions.
-181 * p
-182 * The general repair strategy works in 
two phases:
-183 * ol
-184 * li Repair Table Integrity on 
HDFS. (merge or fabricate regions)
-185 * li Repair Region Consistency 
with hbase:meta and assignments
-186 * /ol
-187 * p
-188 * For table integrity repairs, the 
tables' region directories are scanned
-189 * for .regioninfo files.  Each table's 
integrity is then verified.  If there
-190 * are any orphan regions (regions with 
no .regioninfo files) or holes, new
-191 * regions are fabricated.  Backwards 
regions are sidelined as well as empty
-192 * degenerate (endkey==startkey) regions. 
 If there are any overlapping regions,
-193 * a new region is created and all data 
is merged into the new region.
-194 * p
-195 * Table integrity repairs deal solely 
with HDFS and could potentially be done
-196 * offline -- the hbase region servers or 
master do not need to be running.
-197 * This phase can eventually be used to 
completely reconstruct the hbase:meta table in
-198 * an offline fashion.
-199 * p
-200 * Region consistency requires three 
conditions -- 1) valid .regioninfo file
-201 * present in an HDFS region dir,  2) 
valid row with .regioninfo data in META,
-202 * and 3) a region is deployed only at 
the regionserver that was assigned to
-203 * with proper state in the master.
-204 * p
-205 * Region consistency repairs require 
hbase to be online so that hbck can
-206 * contact the HBase master and region 
servers.  The hbck#connect() method must
-207 * first be called successfully.  Much of 
the region consistency information
-208 * is transient and less risky to 
repair.
-209 * p
-210 * If hbck is 

[19/51] [partial] hbase-site git commit: Published site at 1acbd36c903b048141866b143507bfce124a5c5f.

2018-11-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5299e667/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
index 4b4692b..d1b845f 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperation.html
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private abstract static class HRegion.BatchOperationT
+private abstract static class HRegion.BatchOperationT
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 Class that tracks the progress of a batch operations, 
accumulating status codes and tracking
  the index at which processing is proceeding. These batch operations may get 
split into
@@ -408,7 +408,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 operations
-protected finalT[] operations
+protected finalT[] operations
 
 
 
@@ -417,7 +417,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 retCodeDetails
-protected finalOperationStatus[] retCodeDetails
+protected finalOperationStatus[] retCodeDetails
 
 
 
@@ -426,7 +426,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 walEditsFromCoprocessors
-protected finalWALEdit[] walEditsFromCoprocessors
+protected finalWALEdit[] walEditsFromCoprocessors
 
 
 
@@ -435,7 +435,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 familyCellMaps
-protected finalhttps://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Mapbyte[],https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListCell[] familyCellMaps
+protected finalhttps://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Mapbyte[],https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListCell[] familyCellMaps
 
 
 
@@ -444,7 +444,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 region
-protected finalHRegion region
+protected finalHRegion region
 
 
 
@@ -453,7 +453,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 nextIndexToProcess
-protectedint nextIndexToProcess
+protectedint nextIndexToProcess
 
 
 
@@ -462,7 +462,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 observedExceptions
-protected finalHRegion.ObservedExceptionsInBatch observedExceptions
+protected finalHRegion.ObservedExceptionsInBatch observedExceptions
 
 
 
@@ -471,7 +471,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 durability
-protectedDurability durability
+protectedDurability durability
 
 
 
@@ -480,7 +480,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 atomic
-protectedboolean atomic
+protectedboolean atomic
 
 
 
@@ -499,7 +499,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 BatchOperation
-publicBatchOperation(HRegionregion,
+publicBatchOperation(HRegionregion,
   T[]operations)
 
 
@@ -517,7 +517,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 visitBatchOperations
-publicvoidvisitBatchOperations(booleanpendingOnly,
+publicvoidvisitBatchOperations(booleanpendingOnly,
  intlastIndexExclusive,
  HRegion.BatchOperation.Visitorvisitor)
   throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
@@ -534,7 +534,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 getMutation
-public abstractMutationgetMutation(intindex)
+public abstractMutationgetMutation(intindex)
 
 
 
@@ -543,7 +543,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 getNonceGroup
-public abstractlonggetNonceGroup(intindex)
+public abstractlonggetNonceGroup(intindex)
 
 
 
@@ -552,7 +552,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 getNonce
-public abstractlonggetNonce(intindex)
+public abstractlonggetNonce(intindex)
 
 
 
@@ -561,7 +561,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 getMutationsForCoprocs
-public abstractMutation[]getMutationsForCoprocs()
+public abstractMutation[]getMutationsForCoprocs()
 This method is potentially expensive and useful mostly for 

[19/51] [partial] hbase-site git commit: Published site at 130057f13774f6b213cdb06952c805a29d59396e.

2018-11-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/68eae623/devapidocs/src-html/org/apache/hadoop/hbase/security/access/Permission.Action.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/security/access/Permission.Action.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/security/access/Permission.Action.html
index 042bf4a..cb2cfdc 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/security/access/Permission.Action.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/security/access/Permission.Action.html
@@ -30,194 +30,226 @@
 022import java.io.DataOutput;
 023import java.io.IOException;
 024import java.util.Arrays;
-025import java.util.Map;
-026
-027import 
org.apache.yetus.audience.InterfaceAudience;
-028import org.slf4j.Logger;
-029import org.slf4j.LoggerFactory;
-030import 
org.apache.hadoop.hbase.util.Bytes;
-031import 
org.apache.hadoop.io.VersionedWritable;
-032
-033import 
org.apache.hbase.thirdparty.com.google.common.collect.Maps;
-034
-035/**
-036 * Base permissions instance representing 
the ability to perform a given set
-037 * of actions.
-038 *
-039 * @see TablePermission
-040 */
-041@InterfaceAudience.Public
-042public class Permission extends 
VersionedWritable {
-043  protected static final byte VERSION = 
0;
-044
-045  @InterfaceAudience.Public
-046  public enum Action {
-047READ('R'), WRITE('W'), EXEC('X'), 
CREATE('C'), ADMIN('A');
-048
-049private final byte code;
-050Action(char code) {
-051  this.code = (byte)code;
-052}
-053
-054public byte code() { return code; }
-055  }
-056
-057  private static final Logger LOG = 
LoggerFactory.getLogger(Permission.class);
-058  protected static final 
MapByte,Action ACTION_BY_CODE = Maps.newHashMap();
-059
-060  protected Action[] actions;
+025import java.util.EnumSet;
+026import java.util.Map;
+027
+028import 
org.apache.yetus.audience.InterfaceAudience;
+029import org.slf4j.Logger;
+030import org.slf4j.LoggerFactory;
+031import 
org.apache.hadoop.hbase.util.Bytes;
+032import 
org.apache.hadoop.io.VersionedWritable;
+033
+034import 
org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap;
+035
+036/**
+037 * Base permissions instance representing 
the ability to perform a given set
+038 * of actions.
+039 *
+040 * @see TablePermission
+041 */
+042@InterfaceAudience.Public
+043public class Permission extends 
VersionedWritable {
+044  protected static final byte VERSION = 
0;
+045
+046  @InterfaceAudience.Public
+047  public enum Action {
+048READ('R'), WRITE('W'), EXEC('X'), 
CREATE('C'), ADMIN('A');
+049
+050private final byte code;
+051Action(char code) {
+052  this.code = (byte) code;
+053}
+054
+055public byte code() { return code; }
+056  }
+057
+058  @InterfaceAudience.Private
+059  protected enum Scope {
+060GLOBAL('G'), NAMESPACE('N'), 
TABLE('T'), EMPTY('E');
 061
-062  static {
-063for (Action a : Action.values()) {
-064  ACTION_BY_CODE.put(a.code(), a);
+062private final byte code;
+063Scope(char code) {
+064  this.code = (byte) code;
 065}
-066  }
-067
-068  /** Empty constructor for Writable 
implementation.  bDo not use./b */
-069  public Permission() {
-070super();
-071  }
-072
-073  public Permission(Action... assigned) 
{
-074if (assigned != null  
assigned.length  0) {
-075  actions = Arrays.copyOf(assigned, 
assigned.length);
-076}
-077  }
-078
-079  public Permission(byte[] actionCodes) 
{
-080if (actionCodes != null) {
-081  Action acts[] = new 
Action[actionCodes.length];
-082  int j = 0;
-083  for (int i=0; 
iactionCodes.length; i++) {
-084byte b = actionCodes[i];
-085Action a = 
ACTION_BY_CODE.get(b);
-086if (a == null) {
-087  LOG.error("Ignoring unknown 
action code '"+
-088  Bytes.toStringBinary(new 
byte[]{b})+"'");
-089  continue;
-090}
-091acts[j++] = a;
-092  }
-093  this.actions = Arrays.copyOf(acts, 
j);
-094}
+066
+067public byte code() {
+068  return code;
+069}
+070  }
+071
+072  private static final Logger LOG = 
LoggerFactory.getLogger(Permission.class);
+073
+074  protected static final MapByte, 
Action ACTION_BY_CODE;
+075  protected static final MapByte, 
Scope SCOPE_BY_CODE;
+076
+077  protected EnumSetAction actions 
= EnumSet.noneOf(Action.class);
+078  protected Scope scope = Scope.EMPTY;
+079
+080  static {
+081ACTION_BY_CODE = ImmutableMap.of(
+082  Action.READ.code, Action.READ,
+083  Action.WRITE.code, Action.WRITE,
+084  Action.EXEC.code, Action.EXEC,
+085  Action.CREATE.code, 
Action.CREATE,
+086  Action.ADMIN.code, Action.ADMIN
+087);
+088
+089SCOPE_BY_CODE = ImmutableMap.of(
+090  Scope.GLOBAL.code, Scope.GLOBAL,
+091  Scope.NAMESPACE.code, 
Scope.NAMESPACE,
+092  Scope.TABLE.code, Scope.TABLE,
+093  Scope.EMPTY.code, Scope.EMPTY
+094

[19/51] [partial] hbase-site git commit: Published site at d5e4faacc354c1bc4d93efa71ca97ee3a056123e.

2018-10-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b5e107c3/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.SystemExitWhenAbortTimeout.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.SystemExitWhenAbortTimeout.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.SystemExitWhenAbortTimeout.html
new file mode 100644
index 000..bf81ebb
--- /dev/null
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.SystemExitWhenAbortTimeout.html
@@ -0,0 +1,3912 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+Source code
+
+
+
+
+001/**
+002 * Licensed to the Apache Software 
Foundation (ASF) under one
+003 * or more contributor license 
agreements.  See the NOTICE file
+004 * distributed with this work for 
additional information
+005 * regarding copyright ownership.  The 
ASF licenses this file
+006 * to you under the Apache License, 
Version 2.0 (the
+007 * "License"); you may not use this file 
except in compliance
+008 * with the License.  You may obtain a 
copy of the License at
+009 *
+010 * 
http://www.apache.org/licenses/LICENSE-2.0
+011 *
+012 * Unless required by applicable law or 
agreed to in writing, software
+013 * distributed under the License is 
distributed on an "AS IS" BASIS,
+014 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
+015 * See the License for the specific 
language governing permissions and
+016 * limitations under the License.
+017 */
+018package 
org.apache.hadoop.hbase.regionserver;
+019
+020import java.io.IOException;
+021import 
java.lang.Thread.UncaughtExceptionHandler;
+022import java.lang.management.MemoryType;
+023import 
java.lang.management.MemoryUsage;
+024import java.lang.reflect.Constructor;
+025import java.net.BindException;
+026import java.net.InetAddress;
+027import java.net.InetSocketAddress;
+028import java.time.Duration;
+029import java.util.ArrayList;
+030import java.util.Collection;
+031import java.util.Collections;
+032import java.util.Comparator;
+033import java.util.HashSet;
+034import java.util.Iterator;
+035import java.util.List;
+036import java.util.Map;
+037import java.util.Map.Entry;
+038import java.util.Objects;
+039import java.util.Set;
+040import java.util.SortedMap;
+041import java.util.Timer;
+042import java.util.TimerTask;
+043import java.util.TreeMap;
+044import java.util.TreeSet;
+045import 
java.util.concurrent.ConcurrentHashMap;
+046import 
java.util.concurrent.ConcurrentMap;
+047import 
java.util.concurrent.ConcurrentSkipListMap;
+048import 
java.util.concurrent.atomic.AtomicBoolean;
+049import 
java.util.concurrent.locks.ReentrantReadWriteLock;
+050import java.util.function.Function;
+051import 
javax.management.MalformedObjectNameException;
+052import javax.servlet.http.HttpServlet;
+053import 
org.apache.commons.lang3.RandomUtils;
+054import 
org.apache.commons.lang3.StringUtils;
+055import 
org.apache.commons.lang3.SystemUtils;
+056import 
org.apache.hadoop.conf.Configuration;
+057import org.apache.hadoop.fs.FileSystem;
+058import org.apache.hadoop.fs.Path;
+059import 
org.apache.hadoop.hbase.Abortable;
+060import 
org.apache.hadoop.hbase.CacheEvictionStats;
+061import 
org.apache.hadoop.hbase.ChoreService;
+062import 
org.apache.hadoop.hbase.ClockOutOfSyncException;
+063import 
org.apache.hadoop.hbase.CoordinatedStateManager;
+064import 
org.apache.hadoop.hbase.DoNotRetryIOException;
+065import 
org.apache.hadoop.hbase.HBaseConfiguration;
+066import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
+067import 
org.apache.hadoop.hbase.HConstants;
+068import 
org.apache.hadoop.hbase.HealthCheckChore;
+069import 
org.apache.hadoop.hbase.MetaTableAccessor;
+070import 
org.apache.hadoop.hbase.NotServingRegionException;
+071import 
org.apache.hadoop.hbase.PleaseHoldException;
+072import 
org.apache.hadoop.hbase.ScheduledChore;
+073import 
org.apache.hadoop.hbase.ServerName;
+074import 
org.apache.hadoop.hbase.Stoppable;
+075import 
org.apache.hadoop.hbase.TableDescriptors;
+076import 
org.apache.hadoop.hbase.TableName;
+077import 
org.apache.hadoop.hbase.YouAreDeadException;
+078import 
org.apache.hadoop.hbase.ZNodeClearer;
+079import 
org.apache.hadoop.hbase.client.ClusterConnection;
+080import 
org.apache.hadoop.hbase.client.Connection;
+081import 
org.apache.hadoop.hbase.client.ConnectionUtils;
+082import 
org.apache.hadoop.hbase.client.RegionInfo;
+083import 
org.apache.hadoop.hbase.client.RegionInfoBuilder;
+084import 
org.apache.hadoop.hbase.client.RpcRetryingCallerFactory;
+085import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
+086import 
org.apache.hadoop.hbase.client.locking.EntityLock;
+087import 
org.apache.hadoop.hbase.client.locking.LockServiceClient;
+088import 
org.apache.hadoop.hbase.conf.ConfigurationManager;
+089import 
org.apache.hadoop.hbase.conf.ConfigurationObserver;
+090import 

[19/51] [partial] hbase-site git commit: Published site at 3fe8649b2c9ba1271c25e8f476548907e4c7a90d.

2018-10-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8f09a71d/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaRackCandidateGenerator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaRackCandidateGenerator.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaRackCandidateGenerator.html
index c7d99b2..9d1542c 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaRackCandidateGenerator.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaRackCandidateGenerator.html
@@ -382,1357 +382,1365 @@
 374for (int i = 0; i  
this.curFunctionCosts.length; i++) {
 375  curFunctionCosts[i] = 
tempFunctionCosts[i];
 376}
-377LOG.info("start 
StochasticLoadBalancer.balancer, initCost=" + currentCost + ", functionCost="
-378+ functionCost());
+377double initCost = currentCost;
+378double newCost = currentCost;
 379
-380double initCost = currentCost;
-381double newCost = currentCost;
-382
-383long computedMaxSteps;
-384if (runMaxSteps) {
-385  computedMaxSteps = 
Math.max(this.maxSteps,
-386  ((long)cluster.numRegions * 
(long)this.stepsPerRegion * (long)cluster.numServers));
-387} else {
-388  computedMaxSteps = 
Math.min(this.maxSteps,
-389  ((long)cluster.numRegions * 
(long)this.stepsPerRegion * (long)cluster.numServers));
-390}
-391// Perform a stochastic walk to see 
if we can get a good fit.
-392long step;
-393
-394for (step = 0; step  
computedMaxSteps; step++) {
-395  Cluster.Action action = 
nextAction(cluster);
-396
-397  if (action.type == Type.NULL) {
-398continue;
-399  }
-400
-401  cluster.doAction(action);
-402  updateCostsWithAction(cluster, 
action);
-403
-404  newCost = computeCost(cluster, 
currentCost);
-405
-406  // Should this be kept?
-407  if (newCost  currentCost) {
-408currentCost = newCost;
-409
-410// save for JMX
-411curOverallCost = currentCost;
-412for (int i = 0; i  
this.curFunctionCosts.length; i++) {
-413  curFunctionCosts[i] = 
tempFunctionCosts[i];
-414}
-415  } else {
-416// Put things back the way they 
were before.
-417// TODO: undo by remembering old 
values
-418Action undoAction = 
action.undoAction();
-419cluster.doAction(undoAction);
-420updateCostsWithAction(cluster, 
undoAction);
-421  }
-422
-423  if 
(EnvironmentEdgeManager.currentTime() - startTime 
-424  maxRunningTime) {
-425break;
-426  }
-427}
-428long endTime = 
EnvironmentEdgeManager.currentTime();
-429
-430
metricsBalancer.balanceCluster(endTime - startTime);
-431
-432// update costs metrics
-433updateStochasticCosts(tableName, 
curOverallCost, curFunctionCosts);
-434if (initCost  currentCost) {
-435  plans = 
createRegionPlans(cluster);
-436  LOG.info("Finished computing new 
load balance plan. Computation took {}" +
-437" to try {} different iterations. 
 Found a solution that moves " +
-438"{} regions; Going from a 
computed cost of {}" +
-439" to a new cost of {}", 
java.time.Duration.ofMillis(endTime - startTime),
-440step, plans.size(), initCost, 
currentCost);
-441  return plans;
-442}
-443LOG.info("Could not find a better 
load balance plan.  Tried {} different configurations in " +
-444  "{}, and did not find anything with 
a computed cost less than {}", step,
-445  java.time.Duration.ofMillis(endTime 
- startTime), initCost);
-446return null;
-447  }
-448
-449  /**
-450   * update costs to JMX
-451   */
-452  private void 
updateStochasticCosts(TableName tableName, Double overall, Double[] subCosts) 
{
-453if (tableName == null) return;
-454
-455// check if the metricsBalancer is 
MetricsStochasticBalancer before casting
-456if (metricsBalancer instanceof 
MetricsStochasticBalancer) {
-457  MetricsStochasticBalancer balancer 
= (MetricsStochasticBalancer) metricsBalancer;
-458  // overall cost
-459  
balancer.updateStochasticCost(tableName.getNameAsString(),
-460"Overall", "Overall cost", 
overall);
-461
-462  // each cost function
-463  for (int i = 0; i  
costFunctions.length; i++) {
-464CostFunction costFunction = 
costFunctions[i];
-465String costFunctionName = 
costFunction.getClass().getSimpleName();
-466Double costPercent = (overall == 
0) ? 0 : (subCosts[i] / overall);
-467// TODO: cost function may need a 
specific description
-468
balancer.updateStochasticCost(tableName.getNameAsString(), costFunctionName,
-469  "The percent of " + 

[19/51] [partial] hbase-site git commit: Published site at 7adf590106826b9e4432cfeee06acdc0ccff8c6e.

2018-10-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/425db230/testdevapidocs/org/apache/hadoop/hbase/procedure2/store/wal/TestWALProcedureTree.TestProcedure.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/procedure2/store/wal/TestWALProcedureTree.TestProcedure.html
 
b/testdevapidocs/org/apache/hadoop/hbase/procedure2/store/wal/TestWALProcedureTree.TestProcedure.html
new file mode 100644
index 000..2e5d0ec
--- /dev/null
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/procedure2/store/wal/TestWALProcedureTree.TestProcedure.html
@@ -0,0 +1,463 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+TestWALProcedureTree.TestProcedure (Apache HBase 3.0.0-SNAPSHOT Test 
API)
+
+
+
+
+
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase.procedure2.store.wal
+Class 
TestWALProcedureTree.TestProcedure
+
+
+
+https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.procedure2.Procedurehttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
+
+
+org.apache.hadoop.hbase.procedure2.store.wal.TestWALProcedureTree.TestProcedure
+
+
+
+
+
+
+
+
+
+All Implemented Interfaces:
+https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in 
java.lang">Comparableorg.apache.hadoop.hbase.procedure2.Procedurehttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
+
+
+Enclosing class:
+TestWALProcedureTree
+
+
+
+public static final class TestWALProcedureTree.TestProcedure
+extends org.apache.hadoop.hbase.procedure2.Procedurehttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
+
+
+
+
+
+
+
+
+
+
+
+Nested Class Summary
+
+
+
+
+Nested classes/interfaces inherited from 
classorg.apache.hadoop.hbase.procedure2.Procedure
+org.apache.hadoop.hbase.procedure2.Procedure.LockState
+
+
+
+
+
+
+
+
+Field Summary
+
+
+
+
+Fields inherited from 
classorg.apache.hadoop.hbase.procedure2.Procedure
+NO_PROC_ID, NO_TIMEOUT
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors
+
+Constructor and Description
+
+
+TestProcedure()
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All MethodsInstance MethodsConcrete Methods
+
+Modifier and Type
+Method and Description
+
+
+protected boolean
+abort(https://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Voidenv)
+
+
+void
+addStackIndex(intindex)
+
+
+protected void
+deserializeStateData(org.apache.hadoop.hbase.procedure2.ProcedureStateSerializerserializer)
+
+
+protected 
org.apache.hadoop.hbase.procedure2.Procedurehttps://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void[]
+execute(https://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Voidenv)
+
+
+protected void
+rollback(https://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Voidenv)
+
+
+protected void
+serializeStateData(org.apache.hadoop.hbase.procedure2.ProcedureStateSerializerserializer)
+
+
+void
+setParentProcId(longparentProcId)
+
+
+void
+setProcId(longprocId)
+
+
+
+
+
+
+Methods inherited from 
classorg.apache.hadoop.hbase.procedure2.Procedure
+acquireLock, afterReplay, beforeReplay, bypass, compareTo, 
completionCleanup, doExecute, doRollback, elapsedTime, getChildrenLatch, 
getException, getLastUpdate, getNonceKey, getOwner, getParentProcId, 
getProcedureMetrics, getProcId, getProcIdHashCode, getProcName, getResult, 
getRootProcedureId, getRootProcId, getStackIndexes, getState, 

[19/51] [partial] hbase-site git commit: Published site at 5fbb227deb365fe812d433fe39b85ac4b0ddee20.

2018-10-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9ebe686/devapidocs/org/apache/hadoop/hbase/master/procedure/package-tree.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/package-tree.html
index 3fa4076..8079e92 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/procedure/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/procedure/package-tree.html
@@ -216,8 +216,8 @@
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
 org.apache.hadoop.hbase.master.procedure.MetaProcedureInterface.MetaOperationType
-org.apache.hadoop.hbase.master.procedure.ServerProcedureInterface.ServerOperationType
 org.apache.hadoop.hbase.master.procedure.TableProcedureInterface.TableOperationType
+org.apache.hadoop.hbase.master.procedure.ServerProcedureInterface.ServerOperationType
 org.apache.hadoop.hbase.master.procedure.PeerProcedureInterface.PeerOperationType
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9ebe686/devapidocs/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.html
 
b/devapidocs/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.html
index 9576138..8967bb1 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.html
@@ -330,7 +330,7 @@ implements Procedure
-addStackIndex,
 afterReplay,
 beforeReplay,
 compareTo,
 completionCleanup,
 doExecute,
 doRollback,
 elapsedTime,
 getChildrenLatch,
 getException,
 getLastUpdate,
 getNonceKey,
 getOwner,
 getParentProcId,
 getProcedureMetrics,
 getProcId,
 getProcIdHashCode, getProcName,
 getResult,
 getRootProcedureId,
 getRootProcId,
 getStackIndexes,
 getState,
 getSubmittedTime,
 getTimeout,
 getTimeoutTimestamp,
 hasChildren,
 hasException,
 hasLock,
 hasOwner,
 hasParent,
 hasTimeout,
 haveSameParent,
 incChildrenLatch,
 isBypass,
 isFailed,
 isFinished,
 isInitializing,
 isRunnable,
 isSuccess,
 isWaiting,
 removeStackIndex,
 setAbortFailure,
 setChildrenLatch,
 setFailure,
 setFailure,
 setLastUpdate,
 setNonceKey,
 setOwner,
 setOwner, setParentProcId,
 setProcId,
 setResult,
 setRootProcId,
 setStackIndexes,
 setState,
 setSubmittedTime,
 setTimeout, shouldWaitClientAck,
 skipPersistence,
 toString,
 toStringClass,
 toStringClassDetails,
 toStringDetails,
 toStringSimpleSB,
 updateMetricsOnFinish, updateMetricsOnSubmit,
 updateTimestamp,
 wasExecuted
+addStackIndex,
 afterReplay,
 beforeReplay,
 bypass,
 compareTo,
 completionCleanup,
 doExecute,
 doRollback,
 elapsedTime,
 getChildrenLatch,
 getException,
 getLastUpdate,
 getNonceKey,
 getOwner,
 getParentProcId,
 getProcedureMetrics,
 getProcId, getProcIdHashCode,
 getProcName,
 getResult,
 getRootProcedureId,
 getRootProcId,
 getStackIndexes,
 getState,
 getSubmittedTime,
 <
 a 
href="../../../../../../org/apache/hadoop/hbase/procedure2/Procedure.html#getTimeout--">getTimeout,
 getTimeoutTimestamp,
 hasChildren,
 hasException,
 hasLock,
 hasOwner,
 hasParent,
 hasTimeout,
 haveSameParent, incChildrenLatch,
 isBypass,
 isFailed,
 isFinished,
 isInitializing,
 isRunnable,
 isSuccess,
 isWaiting,
 removeStackIndex, setAbortFailure,
 setChildrenLatch,
 setFailure,
 setFailure,
 setLastUpdate,
 setNonceKey,
 setOwner, setOwner,
 setParentProcId,
 setProcId,
 setResult,
 setRootProcId,
 setStackIndexes,
 setState,
 setSubmittedTime, setTimeout,
 shouldWaitClientAck,
 skipPersistence,
 toString,
 toStringClass,
 toStringClassDetails,
 toStringDetails,
 toStringSimpleSB, updateMetricsOnFinish,
 updateMetricsOnSubmit,
 updateTimestamp,
 wasExecuted
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9ebe686/devapidocs/org/apache/hadoop/hbase/master/replication/AddPeerProcedure.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/replication/AddPeerProcedure.html 
b/devapidocs/org/apache/hadoop/hbase/master/replication/AddPeerProcedure.html
index 3faea50..1e82772 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/replication/AddPeerProcedure.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/replication/AddPeerProcedure.html
@@ -347,7 +347,7 @@ extends Procedure
-addStackIndex,
 afterReplay,
 beforeReplay,
 compareTo,
 

[19/51] [partial] hbase-site git commit: Published site at 821e4d7de2d576189f4288d1c2acf9e9a9471f5c.

2018-10-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/323b17d9/testdevapidocs/org/apache/hadoop/hbase/client/class-use/RestoreSnapshotFromClientAfterSplittingRegionsTestBase.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/client/class-use/RestoreSnapshotFromClientAfterSplittingRegionsTestBase.html
 
b/testdevapidocs/org/apache/hadoop/hbase/client/class-use/RestoreSnapshotFromClientAfterSplittingRegionsTestBase.html
new file mode 100644
index 000..84f8136
--- /dev/null
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/client/class-use/RestoreSnapshotFromClientAfterSplittingRegionsTestBase.html
@@ -0,0 +1,169 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+Uses of Class 
org.apache.hadoop.hbase.client.RestoreSnapshotFromClientAfterSplittingRegionsTestBase
 (Apache HBase 3.0.0-SNAPSHOT Test API)
+
+
+
+
+
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev
+Next
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+
+
+
+Uses of 
Classorg.apache.hadoop.hbase.client.RestoreSnapshotFromClientAfterSplittingRegionsTestBase
+
+
+
+
+
+Packages that use RestoreSnapshotFromClientAfterSplittingRegionsTestBase
+
+Package
+Description
+
+
+
+org.apache.hadoop.hbase.client
+
+
+
+
+
+
+
+
+
+
+Uses of RestoreSnapshotFromClientAfterSplittingRegionsTestBase
 in org.apache.hadoop.hbase.client
+
+Subclasses of RestoreSnapshotFromClientAfterSplittingRegionsTestBase
 in org.apache.hadoop.hbase.client
+
+Modifier and Type
+Class and Description
+
+
+
+class
+TestMobRestoreSnapshotFromClientAfterSplittingRegions
+
+
+class
+TestRestoreSnapshotFromClientAfterSplittingRegions
+
+
+
+
+
+
+
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev
+Next
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+
+
+Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+
+

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/323b17d9/testdevapidocs/org/apache/hadoop/hbase/client/class-use/RestoreSnapshotFromClientAfterTruncateTestBase.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/client/class-use/RestoreSnapshotFromClientAfterTruncateTestBase.html
 
b/testdevapidocs/org/apache/hadoop/hbase/client/class-use/RestoreSnapshotFromClientAfterTruncateTestBase.html
new file mode 100644
index 000..3d8c231
--- /dev/null
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/client/class-use/RestoreSnapshotFromClientAfterTruncateTestBase.html
@@ -0,0 +1,169 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+Uses of Class 
org.apache.hadoop.hbase.client.RestoreSnapshotFromClientAfterTruncateTestBase 
(Apache HBase 3.0.0-SNAPSHOT Test API)
+
+
+
+
+
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev
+Next
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+
+
+
+Uses of 
Classorg.apache.hadoop.hbase.client.RestoreSnapshotFromClientAfterTruncateTestBase
+
+
+
+
+
+Packages that use RestoreSnapshotFromClientAfterTruncateTestBase
+
+Package
+Description
+
+
+
+org.apache.hadoop.hbase.client
+
+
+
+
+
+
+
+
+
+
+Uses of RestoreSnapshotFromClientAfterTruncateTestBase
 in org.apache.hadoop.hbase.client
+
+Subclasses of RestoreSnapshotFromClientAfterTruncateTestBase
 in org.apache.hadoop.hbase.client
+
+Modifier and Type
+Class and Description
+
+
+
+class
+TestMobRestoreSnapshotFromClientAfterTruncate
+
+
+class

[19/51] [partial] hbase-site git commit: Published site at fa5fa6ecdd071b72b58971058ff3ab9d28c3e709.

2018-10-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d1341859/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.DeleteState.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.DeleteState.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.DeleteState.html
index bd1b3f6..0c69df9 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.DeleteState.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.DeleteState.html
@@ -31,430 +31,441 @@
 023import java.util.Iterator;
 024import java.util.Map;
 025import java.util.TreeMap;
-026
+026import java.util.stream.LongStream;
 027import 
org.apache.yetus.audience.InterfaceAudience;
 028import 
org.apache.yetus.audience.InterfaceStability;
-029import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos;
-030
-031/**
-032 * Keeps track of live procedures.
-033 *
-034 * It can be used by the ProcedureStore 
to identify which procedures are already
-035 * deleted/completed to avoid the 
deserialization step on restart
-036 */
-037@InterfaceAudience.Private
-038@InterfaceStability.Evolving
-039public class ProcedureStoreTracker {
-040  // Key is procedure id corresponding to 
first bit of the bitmap.
-041  private final TreeMapLong, 
BitSetNode map = new TreeMap();
-042
-043  /**
-044   * If true, do not remove bits 
corresponding to deleted procedures. Note that this can result
-045   * in huge bitmaps overtime.
-046   * Currently, it's set to true only 
when building tracker state from logs during recovery. During
-047   * recovery, if we are sure that a 
procedure has been deleted, reading its old update entries
-048   * can be skipped.
-049   */
-050  private boolean keepDeletes = false;
-051  /**
-052   * If true, it means tracker has 
incomplete information about the active/deleted procedures.
-053   * It's set to true only when 
recovering from old logs. See {@link #isDeleted(long)} docs to
-054   * understand it's real use.
-055   */
-056  boolean partial = false;
-057
-058  private long minModifiedProcId = 
Long.MAX_VALUE;
-059  private long maxModifiedProcId = 
Long.MIN_VALUE;
-060
-061  public enum DeleteState { YES, NO, 
MAYBE }
-062
-063  public void 
resetToProto(ProcedureProtos.ProcedureStoreTracker trackerProtoBuf) {
-064reset();
-065for 
(ProcedureProtos.ProcedureStoreTracker.TrackerNode protoNode: 
trackerProtoBuf.getNodeList()) {
-066  final BitSetNode node = new 
BitSetNode(protoNode);
-067  map.put(node.getStart(), node);
-068}
-069  }
-070
-071  /**
-072   * Resets internal state to same as 
given {@code tracker}. Does deep copy of the bitmap.
-073   */
-074  public void 
resetTo(ProcedureStoreTracker tracker) {
-075resetTo(tracker, false);
-076  }
-077
-078  /**
-079   * Resets internal state to same as 
given {@code tracker}, and change the deleted flag according
-080   * to the modified flag if {@code 
resetDelete} is true. Does deep copy of the bitmap.
-081   * p/
-082   * The {@code resetDelete} will be set 
to true when building cleanup tracker, please see the
-083   * comments in {@link 
BitSetNode#BitSetNode(BitSetNode, boolean)} to learn how we change the
-084   * deleted flag if {@code resetDelete} 
is true.
-085   */
-086  public void 
resetTo(ProcedureStoreTracker tracker, boolean resetDelete) {
-087reset();
-088this.partial = tracker.partial;
-089this.minModifiedProcId = 
tracker.minModifiedProcId;
-090this.maxModifiedProcId = 
tracker.maxModifiedProcId;
-091this.keepDeletes = 
tracker.keepDeletes;
-092for (Map.EntryLong, 
BitSetNode entry : tracker.map.entrySet()) {
-093  map.put(entry.getKey(), new 
BitSetNode(entry.getValue(), resetDelete));
-094}
-095  }
-096
-097  public void insert(long procId) {
-098insert(null, procId);
-099  }
-100
-101  public void insert(long[] procIds) {
-102for (int i = 0; i  
procIds.length; ++i) {
-103  insert(procIds[i]);
-104}
-105  }
-106
-107  public void insert(long procId, long[] 
subProcIds) {
-108BitSetNode node = update(null, 
procId);
-109for (int i = 0; i  
subProcIds.length; ++i) {
-110  node = insert(node, 
subProcIds[i]);
-111}
-112  }
-113
-114  private BitSetNode insert(BitSetNode 
node, long procId) {
-115if (node == null || 
!node.contains(procId)) {
-116  node = getOrCreateNode(procId);
-117}
-118node.insertOrUpdate(procId);
-119trackProcIds(procId);
-120return node;
-121  }
-122
-123  public void update(long procId) {
-124update(null, procId);
-125  }
-126
-127  private BitSetNode update(BitSetNode 
node, long procId) {
-128node = lookupClosestNode(node, 
procId);
-129assert node != null : "expected node 
to update procId=" + procId;
-130assert node.contains(procId) : 

[19/51] [partial] hbase-site git commit: Published site at 6bc7089f9e0793efc9bdd46a84f5ccd9bc4579ad.

2018-09-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/419d0338/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerThread.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerThread.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerThread.html
index 43c66a8..061ce80 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerThread.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.WorkerThread.html
@@ -23,2136 +23,2142 @@
 015 * See the License for the specific 
language governing permissions and
 016 * limitations under the License.
 017 */
-018
-019package 
org.apache.hadoop.hbase.procedure2;
-020
-021import java.io.IOException;
-022import java.util.ArrayDeque;
-023import java.util.ArrayList;
-024import java.util.Arrays;
-025import java.util.Collection;
-026import java.util.Deque;
-027import java.util.HashSet;
-028import java.util.Iterator;
-029import java.util.List;
-030import java.util.Map;
-031import java.util.Objects;
-032import java.util.Set;
-033import 
java.util.concurrent.ConcurrentHashMap;
-034import 
java.util.concurrent.CopyOnWriteArrayList;
-035import java.util.concurrent.TimeUnit;
-036import 
java.util.concurrent.atomic.AtomicBoolean;
-037import 
java.util.concurrent.atomic.AtomicInteger;
-038import 
java.util.concurrent.atomic.AtomicLong;
-039import java.util.stream.Collectors;
-040import java.util.stream.Stream;
-041
-042import 
org.apache.hadoop.conf.Configuration;
-043import 
org.apache.hadoop.hbase.HConstants;
-044import 
org.apache.hadoop.hbase.exceptions.IllegalArgumentIOException;
-045import 
org.apache.hadoop.hbase.log.HBaseMarkers;
-046import 
org.apache.hadoop.hbase.procedure2.Procedure.LockState;
-047import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore;
-048import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureIterator;
-049import 
org.apache.hadoop.hbase.procedure2.util.StringUtils;
-050import 
org.apache.hadoop.hbase.security.User;
-051import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-052import 
org.apache.hadoop.hbase.util.IdLock;
-053import 
org.apache.hadoop.hbase.util.NonceKey;
-054import 
org.apache.hadoop.hbase.util.Threads;
-055import 
org.apache.yetus.audience.InterfaceAudience;
-056import org.slf4j.Logger;
-057import org.slf4j.LoggerFactory;
-058
-059import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-060import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
+018package 
org.apache.hadoop.hbase.procedure2;
+019
+020import java.io.IOException;
+021import java.util.ArrayDeque;
+022import java.util.ArrayList;
+023import java.util.Arrays;
+024import java.util.Collection;
+025import java.util.Deque;
+026import java.util.HashSet;
+027import java.util.Iterator;
+028import java.util.List;
+029import java.util.Map;
+030import java.util.Objects;
+031import java.util.Set;
+032import 
java.util.concurrent.ConcurrentHashMap;
+033import 
java.util.concurrent.CopyOnWriteArrayList;
+034import java.util.concurrent.TimeUnit;
+035import 
java.util.concurrent.atomic.AtomicBoolean;
+036import 
java.util.concurrent.atomic.AtomicInteger;
+037import 
java.util.concurrent.atomic.AtomicLong;
+038import java.util.stream.Collectors;
+039import java.util.stream.Stream;
+040import 
org.apache.hadoop.conf.Configuration;
+041import 
org.apache.hadoop.hbase.HConstants;
+042import 
org.apache.hadoop.hbase.exceptions.IllegalArgumentIOException;
+043import 
org.apache.hadoop.hbase.log.HBaseMarkers;
+044import 
org.apache.hadoop.hbase.procedure2.Procedure.LockState;
+045import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore;
+046import 
org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureIterator;
+047import 
org.apache.hadoop.hbase.procedure2.util.StringUtils;
+048import 
org.apache.hadoop.hbase.security.User;
+049import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+050import 
org.apache.hadoop.hbase.util.IdLock;
+051import 
org.apache.hadoop.hbase.util.NonceKey;
+052import 
org.apache.hadoop.hbase.util.Threads;
+053import 
org.apache.yetus.audience.InterfaceAudience;
+054import org.slf4j.Logger;
+055import org.slf4j.LoggerFactory;
+056
+057import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+058import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
+059
+060import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState;
 061
-062import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState;
-063
-064/**
-065 * Thread Pool that executes the 
submitted procedures.
-066 * The executor has a ProcedureStore 
associated.
-067 * Each operation is logged and on 
restart the pending procedures are resumed.
-068 *
-069 * Unless the 

[19/51] [partial] hbase-site git commit: Published site at d7e08317d2f214e4cca7b67578aba0ed7a567d54.

2018-09-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/37cf49a6/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
index 976894f..721035e 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.MasterStoppedException.html
@@ -3020,926 +3020,927 @@
 3012}
 3013  }
 3014
-3015  void checkServiceStarted() throws 
ServerNotRunningYetException {
-3016if (!serviceStarted) {
-3017  throw new 
ServerNotRunningYetException("Server is not running yet");
-3018}
-3019  }
-3020
-3021  public static class 
MasterStoppedException extends DoNotRetryIOException {
-3022MasterStoppedException() {
-3023  super();
-3024}
-3025  }
-3026
-3027  void checkInitialized() throws 
PleaseHoldException, ServerNotRunningYetException,
-3028  MasterNotRunningException, 
MasterStoppedException {
-3029checkServiceStarted();
-3030if (!isInitialized()) {
-3031  throw new 
PleaseHoldException("Master is initializing");
-3032}
-3033if (isStopped()) {
-3034  throw new 
MasterStoppedException();
-3035}
-3036  }
-3037
-3038  /**
-3039   * Report whether this master is 
currently the active master or not.
-3040   * If not active master, we are parked 
on ZK waiting to become active.
-3041   *
-3042   * This method is used for testing.
-3043   *
-3044   * @return true if active master, 
false if not.
-3045   */
-3046  @Override
-3047  public boolean isActiveMaster() {
-3048return activeMaster;
-3049  }
-3050
-3051  /**
-3052   * Report whether this master has 
completed with its initialization and is
-3053   * ready.  If ready, the master is 
also the active master.  A standby master
-3054   * is never ready.
-3055   *
-3056   * This method is used for testing.
-3057   *
-3058   * @return true if master is ready to 
go, false if not.
-3059   */
-3060  @Override
-3061  public boolean isInitialized() {
-3062return initialized.isReady();
-3063  }
-3064
-3065  /**
-3066   * Report whether this master is in 
maintenance mode.
-3067   *
-3068   * @return true if master is in 
maintenanceMode
-3069   */
-3070  @Override
-3071  public boolean isInMaintenanceMode() 
throws IOException {
-3072if (!isInitialized()) {
-3073  throw new 
PleaseHoldException("Master is initializing");
-3074}
-3075return 
maintenanceModeTracker.isInMaintenanceMode();
-3076  }
-3077
-3078  @VisibleForTesting
-3079  public void setInitialized(boolean 
isInitialized) {
-3080
procedureExecutor.getEnvironment().setEventReady(initialized, isInitialized);
-3081  }
-3082
-3083  @Override
-3084  public ProcedureEvent? 
getInitializedEvent() {
-3085return initialized;
-3086  }
-3087
-3088  /**
-3089   * Compute the average load across all 
region servers.
-3090   * Currently, this uses a very naive 
computation - just uses the number of
-3091   * regions being served, ignoring 
stats about number of requests.
-3092   * @return the average load
-3093   */
-3094  public double getAverageLoad() {
-3095if (this.assignmentManager == null) 
{
-3096  return 0;
-3097}
-3098
-3099RegionStates regionStates = 
this.assignmentManager.getRegionStates();
-3100if (regionStates == null) {
-3101  return 0;
-3102}
-3103return 
regionStates.getAverageLoad();
-3104  }
-3105
-3106  /*
-3107   * @return the count of region split 
plans executed
-3108   */
-3109  public long getSplitPlanCount() {
-3110return splitPlanCount;
-3111  }
-3112
-3113  /*
-3114   * @return the count of region merge 
plans executed
-3115   */
-3116  public long getMergePlanCount() {
-3117return mergePlanCount;
-3118  }
-3119
-3120  @Override
-3121  public boolean registerService(Service 
instance) {
-3122/*
-3123 * No stacking of instances is 
allowed for a single service name
-3124 */
-3125Descriptors.ServiceDescriptor 
serviceDesc = instance.getDescriptorForType();
-3126String serviceName = 
CoprocessorRpcUtils.getServiceName(serviceDesc);
-3127if 
(coprocessorServiceHandlers.containsKey(serviceName)) {
-3128  LOG.error("Coprocessor service 
"+serviceName+
-3129  " already registered, 
rejecting request from "+instance
-3130  );
-3131  return false;
-3132}
-3133
-3134
coprocessorServiceHandlers.put(serviceName, instance);
-3135if (LOG.isDebugEnabled()) {
-3136  LOG.debug("Registered master 
coprocessor service: service="+serviceName);
-3137}
-3138return true;
-3139  }
-3140
-3141  /**
-3142   * Utility for constructing an 
instance of the passed HMaster class.
-3143   * @param masterClass
-3144   * @return HMaster instance.
-3145   */

[19/51] [partial] hbase-site git commit: Published site at 8eaaa63114a64bcaeaf0ed9bdd88615ee22255c1.

2018-09-25 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f6f9d4f3/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html
index 8cc5add..34858d6 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html
@@ -2188,1428 +2188,1428 @@
 2180  }
 2181
 2182  @Override
-2183  public void postOpenDeployTasks(final 
PostOpenDeployContext context)
-2184  throws KeeperException, 
IOException {
-2185HRegion r = context.getRegion();
-2186long masterSystemTime = 
context.getMasterSystemTime();
-2187rpcServices.checkOpen();
-2188LOG.info("Post open deploy tasks for 
" + r.getRegionInfo().getRegionNameAsString());
-2189// Do checks to see if we need to 
compact (references or too many files)
-2190for (HStore s : r.stores.values()) 
{
-2191  if (s.hasReferences() || 
s.needsCompaction()) {
-2192
this.compactSplitThread.requestSystemCompaction(r, s, "Opening Region");
-2193  }
-2194}
-2195long openSeqNum = 
r.getOpenSeqNum();
-2196if (openSeqNum == 
HConstants.NO_SEQNUM) {
-2197  // If we opened a region, we 
should have read some sequence number from it.
-2198  LOG.error("No sequence number 
found when opening " +
-2199
r.getRegionInfo().getRegionNameAsString());
-2200  openSeqNum = 0;
-2201}
-2202
-2203// Notify master
-2204if (!reportRegionStateTransition(new 
RegionStateTransitionContext(
-2205TransitionCode.OPENED, 
openSeqNum, masterSystemTime, r.getRegionInfo( {
-2206  throw new IOException("Failed to 
report opened region to master: "
-2207+ 
r.getRegionInfo().getRegionNameAsString());
-2208}
-2209
-2210triggerFlushInPrimaryRegion(r);
-2211
-2212LOG.debug("Finished post open deploy 
task for " + r.getRegionInfo().getRegionNameAsString());
-2213  }
-2214
-2215  @Override
-2216  public boolean 
reportRegionStateTransition(final RegionStateTransitionContext context) {
-2217TransitionCode code = 
context.getCode();
-2218long openSeqNum = 
context.getOpenSeqNum();
-2219long masterSystemTime = 
context.getMasterSystemTime();
-2220RegionInfo[] hris = 
context.getHris();
-2221
-if (TEST_SKIP_REPORTING_TRANSITION) 
{
-2223  // This is for testing only in 
case there is no master
-2224  // to handle the region transition 
report at all.
-2225  if (code == TransitionCode.OPENED) 
{
-2226Preconditions.checkArgument(hris 
!= null  hris.length == 1);
-2227if (hris[0].isMetaRegion()) {
-2228  try {
-2229
MetaTableLocator.setMetaLocation(getZooKeeper(), serverName,
-2230
hris[0].getReplicaId(),State.OPEN);
-2231  } catch (KeeperException e) 
{
-2232LOG.info("Failed to update 
meta location", e);
-2233return false;
-2234  }
-2235} else {
-2236  try {
-2237
MetaTableAccessor.updateRegionLocation(clusterConnection,
-2238  hris[0], serverName, 
openSeqNum, masterSystemTime);
-2239  } catch (IOException e) {
-2240LOG.info("Failed to update 
meta", e);
-2241return false;
-2242  }
-2243}
-2244  }
-2245  return true;
-2246}
-2247
-2248
ReportRegionStateTransitionRequest.Builder builder =
-2249  
ReportRegionStateTransitionRequest.newBuilder();
-2250
builder.setServer(ProtobufUtil.toServerName(serverName));
-2251RegionStateTransition.Builder 
transition = builder.addTransitionBuilder();
-2252
transition.setTransitionCode(code);
-2253if (code == TransitionCode.OPENED 
 openSeqNum = 0) {
-2254  
transition.setOpenSeqNum(openSeqNum);
-2255}
-2256for (RegionInfo hri: hris) {
-2257  
transition.addRegionInfo(ProtobufUtil.toRegionInfo(hri));
-2258}
-2259ReportRegionStateTransitionRequest 
request = builder.build();
-2260int tries = 0;
-2261long pauseTime = 
INIT_PAUSE_TIME_MS;
-2262// Keep looping till we get an 
error. We want to send reports even though server is going down.
-2263// Only go down if clusterConnection 
is null. It is set to null almost as last thing as the
-2264// HRegionServer does down.
-2265while (this.clusterConnection != 
null  !this.clusterConnection.isClosed()) {
-2266  
RegionServerStatusService.BlockingInterface rss = rssStub;
-2267  try {
-2268if (rss == null) {
-2269  
createRegionServerStatusStub();
-2270  continue;
-2271}
-2272
ReportRegionStateTransitionResponse response =
-2273  

[19/51] [partial] hbase-site git commit: Published site at cd161d976ef47b84e904f2d54bac65d2f3417c2a.

2018-09-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fa1bebf8/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.html
index a5789e0..93a57cb 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.html
@@ -238,4120 +238,4119 @@
 230 * @see Admin
 231 */
 232@InterfaceAudience.Private
-233@InterfaceStability.Evolving
-234public class HBaseAdmin implements Admin 
{
-235  private static final Logger LOG = 
LoggerFactory.getLogger(HBaseAdmin.class);
-236
-237  private ClusterConnection connection;
-238
-239  private final Configuration conf;
-240  private final long pause;
-241  private final int numRetries;
-242  private final int syncWaitTimeout;
-243  private boolean aborted;
-244  private int operationTimeout;
-245  private int rpcTimeout;
-246
-247  private RpcRetryingCallerFactory 
rpcCallerFactory;
-248  private RpcControllerFactory 
rpcControllerFactory;
-249
-250  private NonceGenerator ng;
-251
-252  @Override
-253  public int getOperationTimeout() {
-254return operationTimeout;
-255  }
-256
-257  HBaseAdmin(ClusterConnection 
connection) throws IOException {
-258this.conf = 
connection.getConfiguration();
-259this.connection = connection;
-260
-261// TODO: receive 
ConnectionConfiguration here rather than re-parsing these configs every time.
-262this.pause = 
this.conf.getLong(HConstants.HBASE_CLIENT_PAUSE,
-263
HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
-264this.numRetries = 
this.conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
-265
HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
-266this.operationTimeout = 
this.conf.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT,
-267
HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT);
-268this.rpcTimeout = 
this.conf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY,
-269
HConstants.DEFAULT_HBASE_RPC_TIMEOUT);
-270this.syncWaitTimeout = 
this.conf.getInt(
-271  
"hbase.client.sync.wait.timeout.msec", 10 * 6); // 10min
-272
-273this.rpcCallerFactory = 
connection.getRpcRetryingCallerFactory();
-274this.rpcControllerFactory = 
connection.getRpcControllerFactory();
-275
-276this.ng = 
this.connection.getNonceGenerator();
-277  }
-278
-279  @Override
-280  public void abort(String why, Throwable 
e) {
-281// Currently does nothing but throw 
the passed message and exception
-282this.aborted = true;
-283throw new RuntimeException(why, e);
-284  }
-285
-286  @Override
-287  public boolean isAborted() {
-288return this.aborted;
-289  }
-290
-291  @Override
-292  public boolean abortProcedure(final 
long procId, final boolean mayInterruptIfRunning)
-293  throws IOException {
-294return 
get(abortProcedureAsync(procId, mayInterruptIfRunning), this.syncWaitTimeout,
-295  TimeUnit.MILLISECONDS);
-296  }
-297
-298  @Override
-299  public FutureBoolean 
abortProcedureAsync(final long procId, final boolean mayInterruptIfRunning)
-300  throws IOException {
-301Boolean abortProcResponse =
-302executeCallable(new 
MasterCallableAbortProcedureResponse(getConnection(),
-303getRpcControllerFactory()) 
{
-304  @Override
-305  protected AbortProcedureResponse 
rpcCall() throws Exception {
-306AbortProcedureRequest 
abortProcRequest =
-307
AbortProcedureRequest.newBuilder().setProcId(procId).build();
-308return 
master.abortProcedure(getRpcController(), abortProcRequest);
-309  }
-310}).getIsProcedureAborted();
-311return new AbortProcedureFuture(this, 
procId, abortProcResponse);
-312  }
-313
-314  @Override
-315  public ListTableDescriptor 
listTableDescriptors() throws IOException {
-316return 
listTableDescriptors((Pattern)null, false);
-317  }
-318
-319  @Override
-320  public ListTableDescriptor 
listTableDescriptors(Pattern pattern) throws IOException {
-321return listTableDescriptors(pattern, 
false);
-322  }
-323
-324  @Override
-325  public ListTableDescriptor 
listTableDescriptors(Pattern pattern, boolean includeSysTables)
-326  throws IOException {
-327return executeCallable(new 
MasterCallableListTableDescriptor(getConnection(),
-328getRpcControllerFactory()) {
-329  @Override
-330  protected 
ListTableDescriptor rpcCall() throws Exception {
-331GetTableDescriptorsRequest req 
=
-332
RequestConverter.buildGetTableDescriptorsRequest(pattern, includeSysTables);
-333return 
ProtobufUtil.toTableDescriptorList(master.getTableDescriptors(getRpcController(),
-334req));
-335  }
-336});
-337  }
-338
-339  @Override
-340  public TableDescriptor 
getDescriptor(TableName tableName)
-341  throws 

[19/51] [partial] hbase-site git commit: Published site at c6a65ba63fce85ac7c4b62b96ef2bbe6c35d2f00.

2018-09-04 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/293abb17/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionStat.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionStat.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionStat.html
index fe4e081..eecf20f 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionStat.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionStat.html
@@ -44,1858 +44,1838 @@
 036import 
org.apache.hadoop.hbase.HBaseIOException;
 037import 
org.apache.hadoop.hbase.HConstants;
 038import 
org.apache.hadoop.hbase.PleaseHoldException;
-039import 
org.apache.hadoop.hbase.RegionException;
-040import 
org.apache.hadoop.hbase.ServerName;
-041import 
org.apache.hadoop.hbase.TableName;
-042import 
org.apache.hadoop.hbase.UnknownRegionException;
-043import 
org.apache.hadoop.hbase.YouAreDeadException;
-044import 
org.apache.hadoop.hbase.client.DoNotRetryRegionException;
-045import 
org.apache.hadoop.hbase.client.RegionInfo;
-046import 
org.apache.hadoop.hbase.client.RegionInfoBuilder;
-047import 
org.apache.hadoop.hbase.client.Result;
-048import 
org.apache.hadoop.hbase.client.TableState;
-049import 
org.apache.hadoop.hbase.exceptions.UnexpectedStateException;
-050import 
org.apache.hadoop.hbase.favored.FavoredNodesManager;
-051import 
org.apache.hadoop.hbase.favored.FavoredNodesPromoter;
-052import 
org.apache.hadoop.hbase.master.LoadBalancer;
-053import 
org.apache.hadoop.hbase.master.MasterServices;
-054import 
org.apache.hadoop.hbase.master.MetricsAssignmentManager;
-055import 
org.apache.hadoop.hbase.master.NoSuchProcedureException;
-056import 
org.apache.hadoop.hbase.master.RegionPlan;
-057import 
org.apache.hadoop.hbase.master.RegionState;
-058import 
org.apache.hadoop.hbase.master.RegionState.State;
-059import 
org.apache.hadoop.hbase.master.ServerListener;
-060import 
org.apache.hadoop.hbase.master.TableStateManager;
-061import 
org.apache.hadoop.hbase.master.balancer.FavoredStochasticBalancer;
-062import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
-063import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureScheduler;
-064import 
org.apache.hadoop.hbase.master.procedure.ProcedureSyncWait;
-065import 
org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure;
-066import 
org.apache.hadoop.hbase.procedure2.Procedure;
-067import 
org.apache.hadoop.hbase.procedure2.ProcedureEvent;
-068import 
org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
-069import 
org.apache.hadoop.hbase.procedure2.ProcedureInMemoryChore;
-070import 
org.apache.hadoop.hbase.procedure2.util.StringUtils;
-071import 
org.apache.hadoop.hbase.regionserver.SequenceId;
-072import 
org.apache.hadoop.hbase.util.Bytes;
-073import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-074import 
org.apache.hadoop.hbase.util.HasThread;
-075import 
org.apache.hadoop.hbase.util.Pair;
-076import 
org.apache.hadoop.hbase.util.Threads;
-077import 
org.apache.hadoop.hbase.util.VersionInfo;
-078import 
org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-079import 
org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-080import 
org.apache.yetus.audience.InterfaceAudience;
-081import 
org.apache.zookeeper.KeeperException;
-082import org.slf4j.Logger;
-083import org.slf4j.LoggerFactory;
+039import 
org.apache.hadoop.hbase.ServerName;
+040import 
org.apache.hadoop.hbase.TableName;
+041import 
org.apache.hadoop.hbase.UnknownRegionException;
+042import 
org.apache.hadoop.hbase.YouAreDeadException;
+043import 
org.apache.hadoop.hbase.client.DoNotRetryRegionException;
+044import 
org.apache.hadoop.hbase.client.RegionInfo;
+045import 
org.apache.hadoop.hbase.client.RegionInfoBuilder;
+046import 
org.apache.hadoop.hbase.client.Result;
+047import 
org.apache.hadoop.hbase.client.TableState;
+048import 
org.apache.hadoop.hbase.exceptions.UnexpectedStateException;
+049import 
org.apache.hadoop.hbase.favored.FavoredNodesManager;
+050import 
org.apache.hadoop.hbase.favored.FavoredNodesPromoter;
+051import 
org.apache.hadoop.hbase.master.LoadBalancer;
+052import 
org.apache.hadoop.hbase.master.MasterServices;
+053import 
org.apache.hadoop.hbase.master.MetricsAssignmentManager;
+054import 
org.apache.hadoop.hbase.master.RegionPlan;
+055import 
org.apache.hadoop.hbase.master.RegionState;
+056import 
org.apache.hadoop.hbase.master.RegionState.State;
+057import 
org.apache.hadoop.hbase.master.ServerListener;
+058import 
org.apache.hadoop.hbase.master.TableStateManager;
+059import 
org.apache.hadoop.hbase.master.balancer.FavoredStochasticBalancer;
+060import 
org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+061import 

[19/51] [partial] hbase-site git commit: Published site at 7c1fad4992a169a35b4457e6f4afcb30d04406e9.

2018-08-31 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/74f60271/devapidocs/org/apache/hadoop/hbase/security/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/security/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/security/package-tree.html
index 67b7e3a..7ba3a64 100644
--- a/devapidocs/org/apache/hadoop/hbase/security/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/security/package-tree.html
@@ -192,8 +192,8 @@
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
 org.apache.hadoop.hbase.security.AuthMethod
-org.apache.hadoop.hbase.security.SaslUtil.QualityOfProtection
 org.apache.hadoop.hbase.security.SaslStatus
+org.apache.hadoop.hbase.security.SaslUtil.QualityOfProtection
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/74f60271/devapidocs/org/apache/hadoop/hbase/util/IdLock.Entry.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/util/IdLock.Entry.html 
b/devapidocs/org/apache/hadoop/hbase/util/IdLock.Entry.html
index 967a2e7..94b2436 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/IdLock.Entry.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/IdLock.Entry.html
@@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public static final class IdLock.Entry
+public static final class IdLock.Entry
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 An entry returned to the client as a lock object
 
@@ -212,7 +212,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 id
-private finallong id
+private finallong id
 
 
 
@@ -221,7 +221,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 numWaiters
-privateint numWaiters
+privateint numWaiters
 
 
 
@@ -230,7 +230,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 locked
-privateboolean locked
+privateboolean locked
 
 
 
@@ -247,7 +247,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 Entry
-privateEntry(longid)
+privateEntry(longid)
 
 
 
@@ -264,7 +264,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 toString
-publichttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringtoString()
+publichttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringtoString()
 
 Overrides:
 https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#toString--;
 title="class or interface in java.lang">toStringin 
classhttps://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/74f60271/devapidocs/org/apache/hadoop/hbase/util/IdLock.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/util/IdLock.html 
b/devapidocs/org/apache/hadoop/hbase/util/IdLock.html
index 99efa36..ebae49c 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/IdLock.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/IdLock.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = {"i0":10,"i1":10,"i2":10,"i3":10};
+var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -110,7 +110,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class IdLock
+public class IdLock
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 Allows multiple concurrent clients to lock on a numeric id 
with a minimal
  memory overhead. The intended usage is as follows:
@@ -218,6 +218,13 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 
+IdLock.Entry
+tryLockEntry(longid,
+longtime)
+Blocks until the lock corresponding to the given id is 
acquired.
+
+
+
 void
 waitForWaiters(longid,
   intnumWaiters)
@@ -250,7 +257,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 map
-privatehttps://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentMap.html?is-external=true;
 

[19/51] [partial] hbase-site git commit: Published site at 3afe9fb7e6ebfa71187cbe131558a83fae61cecd.

2018-08-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/424d7e41/testapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html
--
diff --git a/testapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html 
b/testapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html
index 999eaf7..5d35ced 100644
--- a/testapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html
+++ b/testapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":9,"i1":10,"i2":10,"i3":10,"i4":9,"i5":10,"i6":10,"i7":10,"i8":9,"i9":9,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":42,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":41,"i26":41,"i27":10,"i28":10,"i29":10,"i30":42,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":9,"i37":9,"i38":9,"i39":9,"i40":9,"i41":9,"i42":9,"i43":10,"i44":9,"i45":9,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":42,"i67":42,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":9,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":9,"i92":9,"i93":10,"i94":9,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":9,"i105":9,"i106":9,"i107":42,"i108":10,"i109":10,"i110":9,"i1
 
11":42,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":9,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10,"i135":9,"i136":9,"i137":10,"i138":10,"i139":10,"i140":10,"i141":10,"i142":9,"i143":9,"i144":10,"i145":9,"i146":10,"i147":10,"i148":10,"i149":10,"i150":9,"i151":9,"i152":10,"i153":10,"i154":10,"i155":10,"i156":10,"i157":10,"i158":10,"i159":10,"i160":10,"i161":10,"i162":10,"i163":10,"i164":10,"i165":10,"i166":10,"i167":10,"i168":10,"i169":10,"i170":10,"i171":10,"i172":10,"i173":10,"i174":10,"i175":10,"i176":10,"i177":10,"i178":10,"i179":10,"i180":10,"i181":10,"i182":10,"i183":10,"i184":10,"i185":10,"i186":10,"i187":10,"i188":10,"i189":10,"i190":10,"i191":10,"i192":9,"i193":10,"i194":10,"i195":10,"i196":10,"i197":10,"i198":10,"i199":10,"i200":10,"i201":10,"i202":10,"i203":10,"i204":10,"i205":10,"i206":10,"i207":10};
+var methods = 
{"i0":9,"i1":10,"i2":10,"i3":10,"i4":9,"i5":10,"i6":10,"i7":10,"i8":9,"i9":9,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":42,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":41,"i26":41,"i27":10,"i28":10,"i29":10,"i30":42,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":9,"i37":9,"i38":9,"i39":9,"i40":9,"i41":9,"i42":9,"i43":10,"i44":9,"i45":9,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":42,"i67":42,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":9,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":9,"i92":9,"i93":10,"i94":9,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":9,"i105":9,"i106":9,"i107":42,"i108":10,"i109":10,"i110":10,"i
 
111":9,"i112":42,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":9,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10,"i135":10,"i136":9,"i137":9,"i138":10,"i139":10,"i140":10,"i141":10,"i142":10,"i143":9,"i144":9,"i145":10,"i146":9,"i147":10,"i148":10,"i149":10,"i150":10,"i151":9,"i152":9,"i153":10,"i154":10,"i155":10,"i156":10,"i157":10,"i158":10,"i159":10,"i160":42,"i161":10,"i162":42,"i163":42,"i164":42,"i165":42,"i166":42,"i167":42,"i168":42,"i169":42,"i170":42,"i171":42,"i172":10,"i173":10,"i174":10,"i175":10,"i176":10,"i177":10,"i178":10,"i179":42,"i180":42,"i181":42,"i182":10,"i183":10,"i184":10,"i185":10,"i186":10,"i187":10,"i188":10,"i189":10,"i190":10,"i191":10,"i192":10,"i193":10,"i194":10,"i195":10,"i196":9,"i197":10,"i198":10,"i199":10,"i200":10,"i201":10,"i202":10,"i203":10,"i204":10,"i205":10,"i206":10,"i207":10,"i208":10,"i209":10,"i210":10,"i211":10
 };
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete 
Methods"],32:["t6","Deprecated Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -120,7 +120,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Public
-public class HBaseTestingUtility
+public class HBaseTestingUtility
 extends HBaseZKTestingUtility
 Facility for testing HBase. Replacement for
  old HBaseTestCase and 

[19/51] [partial] hbase-site git commit: Published site at a452487a9b82bfd33bc10683c3f8b8ae74d58883.

2018-08-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0cf79db0/devapidocs/org/apache/hadoop/hbase/filter/DependentColumnFilter.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/filter/DependentColumnFilter.html 
b/devapidocs/org/apache/hadoop/hbase/filter/DependentColumnFilter.html
index 7d9d2af..4421ed5 100644
--- a/devapidocs/org/apache/hadoop/hbase/filter/DependentColumnFilter.html
+++ b/devapidocs/org/apache/hadoop/hbase/filter/DependentColumnFilter.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":9,"i2":10,"i3":10,"i4":10,"i5":42,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":9,"i14":10,"i15":10,"i16":10};
+var methods = 
{"i0":10,"i1":9,"i2":10,"i3":10,"i4":10,"i5":10,"i6":42,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":9,"i16":10,"i17":10,"i18":10};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete 
Methods"],32:["t6","Deprecated Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -125,7 +125,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Public
-public class DependentColumnFilter
+public class DependentColumnFilter
 extends CompareFilter
 A filter for adding inter-column timestamp matching
  Only cells with a correspondingly timestamped entry in
@@ -285,24 +285,28 @@ extends 
 boolean
+equals(https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Objectobj)
+
+
+boolean
 filterAllRemaining()
 Filters that never filter all remaining can inherit this 
implementation that
  never stops the filter early.
 
 
-
+
 Filter.ReturnCode
 filterCell(Cellc)
 A way to filter based on the column family, column 
qualifier and/or the column value.
 
 
-
+
 Filter.ReturnCode
 filterKeyValue(Cellc)
 Deprecated.
 
 
-
+
 boolean
 filterRow()
 Filters that never filter by rows based on previously 
gathered state from
@@ -310,14 +314,14 @@ extends 
+
 void
 filterRowCells(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListCellkvs)
 Filters that never filter by modifying the returned List of 
Cells can
  inherit this implementation that does nothing.
 
 
-
+
 boolean
 filterRowKey(byte[]buffer,
 intoffset,
@@ -326,43 +330,47 @@ extends 
+
 boolean
 getDropDependentColumn()
 
-
+
 byte[]
 getFamily()
 
-
+
 byte[]
 getQualifier()
 
-
+
 boolean
 hasFilterRow()
 Fitlers that never filter by modifying the returned List of 
Cells can
  inherit this implementation that does nothing.
 
 
-
+
+int
+hashCode()
+
+
 static DependentColumnFilter
 parseFrom(byte[]pbBytes)
 
-
+
 void
 reset()
 Filters that are purely stateless and do nothing in their 
reset() methods can inherit
  this null/empty implementation.
 
 
-
+
 byte[]
 toByteArray()
 Return length 0 byte array for Filters that don't require 
special serialization
 
 
-
+
 https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 toString()
 Return filter's info for debugging and logging 
purpose.
@@ -395,7 +403,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
-https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--;
 title="class or interface in java.lang">clone, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-;
 title="class or interface in java.lang">equals, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--;
 title="class or interface in java.lang">finalize, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--;
 title="class or interface in java.lang">getClass, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--;
 title="class or interface in java.lang">hashCode, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notify--;
 title="class or interface in java.lang">notify, https://docs.oracle.com/javase/8/docs/api/ja
 va/lang/Object.html?is-external=true#notifyAll--" title="class or interface in 
java.lang">notifyAll, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait--;
 title="class or interface in java.lang">wait, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-;
 title="class or interface in java.lang">wait, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-int-;
 title="class or interface in java.lang">wait
+https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--;
 title="class or interface in 

[19/51] [partial] hbase-site git commit: Published site at 6a5b4f2a5c188f8eef4f2250b8b7db7dd1e750e4.

2018-08-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ff05a18/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
index db8431b..a8cb7c4 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
@@ -885,7766 +885,7797 @@
 877   * @return What the next sequence 
(edit) id should be.
 878   * @throws IOException e
 879   */
-880  private long initialize(final 
CancelableProgressable reporter) throws IOException {
-881
-882//Refuse to open the region if there 
is no column family in the table
-883if 
(htableDescriptor.getColumnFamilyCount() == 0) {
-884  throw new 
DoNotRetryIOException("Table " + 
htableDescriptor.getTableName().getNameAsString()+
-885  " should have at least one 
column family.");
-886}
-887
-888MonitoredTask status = 
TaskMonitor.get().createStatus("Initializing region " + this);
-889long nextSeqId = -1;
-890try {
-891  nextSeqId = 
initializeRegionInternals(reporter, status);
-892  return nextSeqId;
-893} finally {
-894  // nextSeqid will be -1 if the 
initialization fails.
-895  // At least it will be 0 
otherwise.
-896  if (nextSeqId == -1) {
-897status.abort("Exception during 
region " + getRegionInfo().getRegionNameAsString() +
-898  " initialization.");
-899  }
-900}
-901  }
-902
-903  private long 
initializeRegionInternals(final CancelableProgressable reporter,
-904  final MonitoredTask status) throws 
IOException {
-905if (coprocessorHost != null) {
-906  status.setStatus("Running 
coprocessor pre-open hook");
-907  coprocessorHost.preOpen();
-908}
-909
-910// Write HRI to a file in case we 
need to recover hbase:meta
-911// Only the primary replica should 
write .regioninfo
-912if 
(this.getRegionInfo().getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) {
-913  status.setStatus("Writing region 
info on filesystem");
-914  fs.checkRegionInfoOnFilesystem();
-915}
-916
-917// Initialize all the HStores
-918status.setStatus("Initializing all 
the Stores");
-919long maxSeqId = 
initializeStores(reporter, status);
-920this.mvcc.advanceTo(maxSeqId);
-921if 
(ServerRegionReplicaUtil.shouldReplayRecoveredEdits(this)) {
-922  CollectionHStore stores = 
this.stores.values();
-923  try {
-924// update the stores that we are 
replaying
-925LOG.debug("replaying wal for " + 
this.getRegionInfo().getEncodedName());
-926
stores.forEach(HStore::startReplayingFromWAL);
-927// Recover any edits if 
available.
-928maxSeqId = Math.max(maxSeqId,
-929  
replayRecoveredEditsIfAny(this.fs.getRegionDir(), maxSeqIdInStores, reporter, 
status));
-930// Make sure mvcc is up to max.
-931this.mvcc.advanceTo(maxSeqId);
-932  } finally {
-933LOG.debug("stopping wal replay 
for " + this.getRegionInfo().getEncodedName());
-934// update the stores that we are 
done replaying
-935
stores.forEach(HStore::stopReplayingFromWAL);
-936  }
-937}
-938this.lastReplayedOpenRegionSeqId = 
maxSeqId;
-939
-940
this.writestate.setReadOnly(ServerRegionReplicaUtil.isReadOnly(this));
-941this.writestate.flushRequested = 
false;
-942this.writestate.compacting.set(0);
-943
-944if (this.writestate.writesEnabled) 
{
-945  LOG.debug("Cleaning up temporary 
data for " + this.getRegionInfo().getEncodedName());
-946  // Remove temporary data left over 
from old regions
-947  status.setStatus("Cleaning up 
temporary data from old regions");
-948  fs.cleanupTempDir();
-949}
-950
-951if (this.writestate.writesEnabled) 
{
-952  status.setStatus("Cleaning up 
detritus from prior splits");
-953  // Get rid of any splits or merges 
that were lost in-progress.  Clean out
-954  // these directories here on open.  
We may be opening a region that was
-955  // being split but we crashed in 
the middle of it all.
-956  LOG.debug("Cleaning up detritus for 
" + this.getRegionInfo().getEncodedName());
-957  fs.cleanupAnySplitDetritus();
-958  fs.cleanupMergesDir();
-959}
+880  @VisibleForTesting
+881  long initialize(final 
CancelableProgressable reporter) throws IOException {
+882
+883//Refuse to open the region if there 
is no column family in the table
+884if 
(htableDescriptor.getColumnFamilyCount() == 0) {
+885  throw new 
DoNotRetryIOException("Table " + 
htableDescriptor.getTableName().getNameAsString()+
+886  " should have at least one 
column family.");
+887}
+888
+889

[19/51] [partial] hbase-site git commit: Published site at 63f2d3cbdc8151f5f61f33e0a078c51b9ac076a5.

2018-08-21 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7ae6a80c/devapidocs/org/apache/hadoop/hbase/master/assignment/class-use/RegionStates.ServerState.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/class-use/RegionStates.ServerState.html
 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/class-use/RegionStates.ServerState.html
deleted file mode 100644
index e26f23a..000
--- 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/class-use/RegionStates.ServerState.html
+++ /dev/null
@@ -1,213 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd;>
-
-
-
-
-
-Uses of Class 
org.apache.hadoop.hbase.master.assignment.RegionStates.ServerState (Apache 
HBase 3.0.0-SNAPSHOT API)
-
-
-
-
-
-
-
-JavaScript is disabled on your browser.
-
-
-
-
-
-Skip navigation links
-
-
-
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
-
-
-
-
-Prev
-Next
-
-
-Frames
-NoFrames
-
-
-AllClasses
-
-
-
-
-
-
-
-
-
-
-Uses of 
Classorg.apache.hadoop.hbase.master.assignment.RegionStates.ServerState
-
-
-
-
-
-Packages that use RegionStates.ServerState
-
-Package
-Description
-
-
-
-org.apache.hadoop.hbase.master.assignment
-
-
-
-
-
-
-
-
-
-
-Uses of RegionStates.ServerState in org.apache.hadoop.hbase.master.assignment
-
-Fields in org.apache.hadoop.hbase.master.assignment
 declared as RegionStates.ServerState
-
-Modifier and Type
-Field and Description
-
-
-
-private RegionStates.ServerState
-RegionStates.ServerStateNode.state
-
-
-
-
-Methods in org.apache.hadoop.hbase.master.assignment
 that return RegionStates.ServerState
-
-Modifier and Type
-Method and Description
-
-
-
-RegionStates.ServerState
-RegionStates.ServerStateNode.getState()
-
-
-static RegionStates.ServerState
-RegionStates.ServerState.valueOf(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringname)
-Returns the enum constant of this type with the specified 
name.
-
-
-
-static RegionStates.ServerState[]
-RegionStates.ServerState.values()
-Returns an array containing the constants of this enum 
type, in
-the order they are declared.
-
-
-
-
-
-Methods in org.apache.hadoop.hbase.master.assignment
 with parameters of type RegionStates.ServerState
-
-Modifier and Type
-Method and Description
-
-
-
-boolean
-RegionStates.ServerStateNode.isInState(RegionStates.ServerState...expected)
-
-
-private void
-RegionStates.setServerState(ServerNameserverName,
-  RegionStates.ServerStatestate)
-
-
-private void
-RegionStates.ServerStateNode.setState(RegionStates.ServerStatestate)
-
-
-
-
-
-
-
-
-
-
-
-
-Skip navigation links
-
-
-
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
-
-
-
-
-Prev
-Next
-
-
-Frames
-NoFrames
-
-
-AllClasses
-
-
-
-
-
-
-
-
-
-Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
-
-

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7ae6a80c/devapidocs/org/apache/hadoop/hbase/master/assignment/class-use/RegionStates.ServerStateNode.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/class-use/RegionStates.ServerStateNode.html
 
b/devapidocs/org/apache/hadoop/hbase/master/assignment/class-use/RegionStates.ServerStateNode.html
deleted file mode 100644
index 46ea3a4..000
--- 
a/devapidocs/org/apache/hadoop/hbase/master/assignment/class-use/RegionStates.ServerStateNode.html
+++ /dev/null
@@ -1,231 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd;>
-
-
-
-
-
-Uses of Class 
org.apache.hadoop.hbase.master.assignment.RegionStates.ServerStateNode (Apache 
HBase 3.0.0-SNAPSHOT API)
-
-
-
-
-
-
-
-JavaScript is disabled on your browser.
-
-
-
-
-
-Skip navigation links
-
-
-
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
-
-
-
-
-Prev
-Next
-
-
-Frames
-NoFrames
-
-
-AllClasses
-
-
-
 var methods = 
{"i0":42,"i1":42,"i2":42,"i3":42,"i4":42,"i5":42,"i6":42,"i7":42,"i8":42,"i9":42,"i10":42,"i11":42,"i12":42,"i13":42,"i14":42,"i15":42,"i16":42,"i17":42,"i18":42,"i19":42,"i20":42,"i21":42,"i22":42,"i23":42,"i24":42,"i25":42,"i26":42,"i27":42,"i28":42,"i29":42,"i30":42,"i31":42,"i32":42,"i33":42,"i34":42,"i35":42,"i36":42,"i37":42,"i38":42,"i39":42,"i40":42,"i41":42,"i42":42,"i43":42,"i44":42,"i45":42,"i46":42,"i47":42};
-var tabs = {65535:["t0","所有方法"],2:["t2","实例方法"],8:["t4","å…
·ä½“方法"],32:["t6","已过时的方法"]};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"],32:["t6","Deprecated Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
 var tableTab = "tableTab";
 var activeTableTab = "activeTableTab";
 
 
-您的浏览器已禁用 JavaScript。
+JavaScript is disabled on your browser.
 
 
 
 
 
-跳过导航链接
+Skip navigation links
 
 
 
-
-概览
-程序包
-ç±»
-使用
-树
-已过时
-索引
-帮助
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
 
 
 
 
-上一个类
-下一个类
+PrevClass
+NextClass
 
 
-框架
-无框架
+Frames
+NoFrames
 
 
-所有类
+AllClasses
 
 
 

[19/51] [partial] hbase-site git commit: Published site at e66a6603e36ecd67237ca16acd5e2de03f0d372d.

2018-07-19 Thread zhangduo
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5427a45e/apidocs/org/apache/hadoop/hbase/RetryImmediatelyException.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/RetryImmediatelyException.html 
b/apidocs/org/apache/hadoop/hbase/RetryImmediatelyException.html
index 36052a7..08c03f4 100644
--- a/apidocs/org/apache/hadoop/hbase/RetryImmediatelyException.html
+++ b/apidocs/org/apache/hadoop/hbase/RetryImmediatelyException.html
@@ -1,6 +1,6 @@
 http://www.w3.org/TR/html4/loose.dtd;>
 
-
+
 
 
 
@@ -20,38 +20,38 @@
 //-->
 
 
-JavaScript is disabled on your browser.
+您的浏览器已禁用 JavaScript。
 
 
 
 
 
-Skip navigation links
+跳过导航链接
 
 
 
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
+
+概览
+程序包
+ç±»
+使用
+树
+已过时
+索引
+帮助
 
 
 
 
-PrevClass
-NextClass
+上一个类
+下一个类
 
 
-Frames
-NoFrames
+框架
+无框架
 
 
-AllClasses
+所有类
 
 
 
 
org.apache.hadoop.hbase
-

Class RetryImmediatelyException

+

ç±» RetryImmediatelyException


[19/51] [partial] hbase-site git commit: Published site at 0f23784182ab88649de340d75804e0ff20dcd0fc.

2018-07-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bcb555af/devapidocs/org/apache/hadoop/hbase/tmpl/regionserver/ReplicationStatusTmpl.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/tmpl/regionserver/ReplicationStatusTmpl.html
 
b/devapidocs/org/apache/hadoop/hbase/tmpl/regionserver/ReplicationStatusTmpl.html
new file mode 100644
index 000..9c85343
--- /dev/null
+++ 
b/devapidocs/org/apache/hadoop/hbase/tmpl/regionserver/ReplicationStatusTmpl.html
@@ -0,0 +1,450 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+ReplicationStatusTmpl (Apache HBase 3.0.0-SNAPSHOT API)
+
+
+
+
+
+var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase.tmpl.regionserver
+Class 
ReplicationStatusTmpl
+
+
+
+https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+org.jamon.AbstractTemplateProxy
+
+
+org.apache.hadoop.hbase.tmpl.regionserver.ReplicationStatusTmpl
+
+
+
+
+
+
+
+
+
+
+public class ReplicationStatusTmpl
+extends org.jamon.AbstractTemplateProxy
+
+
+
+
+
+
+
+
+
+
+
+Nested Class Summary
+
+Nested Classes
+
+Modifier and Type
+Class and Description
+
+
+static class
+ReplicationStatusTmpl.ImplData
+
+
+static interface
+ReplicationStatusTmpl.Intf
+
+
+
+
+
+
+Nested classes/interfaces inherited from 
classorg.jamon.AbstractTemplateProxy
+org.jamon.AbstractTemplateProxy.ImplDataCompatibleT extends 
org.jamon.AbstractTemplateProxy.ImplData, 
org.jamon.AbstractTemplateProxy.ReplacementConstructor
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors
+
+Modifier
+Constructor and Description
+
+
+
+ReplicationStatusTmpl()
+
+
+protected 
+ReplicationStatusTmpl(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">Stringp_path)
+
+
+
+ReplicationStatusTmpl(org.jamon.TemplateManagerp_manager)
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All MethodsInstance MethodsConcrete Methods
+
+Modifier and Type
+Method and Description
+
+
+protected org.jamon.AbstractTemplateImpl
+constructImpl()
+
+
+org.jamon.AbstractTemplateImpl
+constructImpl(https://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true;
 title="class or interface in java.lang">Class? extends 
org.jamon.AbstractTemplateImplp_class)
+
+
+ReplicationStatusTmpl.ImplData
+getImplData()
+
+
+protected 
org.jamon.AbstractTemplateProxy.ImplData
+makeImplData()
+
+
+org.jamon.Renderer
+makeRenderer(HRegionServerregionServer)
+
+
+void
+render(https://docs.oracle.com/javase/8/docs/api/java/io/Writer.html?is-external=true;
 title="class or interface in java.io">WriterjamonWriter,
+  HRegionServerregionServer)
+
+
+void
+renderNoFlush(https://docs.oracle.com/javase/8/docs/api/java/io/Writer.html?is-external=true;
 title="class or interface in java.io">WriterjamonWriter,
+ HRegionServerregionServer)
+
+
+
+
+
+
+Methods inherited from classorg.jamon.AbstractTemplateProxy
+getTemplateManager, reset
+
+
+
+
+
+Methods inherited from classjava.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--;
 title="class or interface in java.lang">clone, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-;
 title="class or interface in java.lang">equals, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--;
 title="class or interface in java.lang">finalize, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--;
 title="class or interface in java.lang">getClass, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--;
 title="class or interface in java.lang">hashCode, 

[19/51] [partial] hbase-site git commit: Published site at 85b41f36e01214b6485c9352875c84ebf877dab3.

2018-06-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a5c66de0/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.html
index 8ac7885..5454963 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/HConstants.html
@@ -538,835 +538,842 @@
 530  // Other constants
 531
 532  /**
-533   * An empty instance.
+533   * An empty byte array instance.
 534   */
 535  public static final byte [] 
EMPTY_BYTE_ARRAY = new byte [0];
 536
-537  public static final ByteBuffer 
EMPTY_BYTE_BUFFER = ByteBuffer.wrap(EMPTY_BYTE_ARRAY);
-538
-539  /**
-540   * Used by scanners, etc when they want 
to start at the beginning of a region
-541   */
-542  public static final byte [] 
EMPTY_START_ROW = EMPTY_BYTE_ARRAY;
+537  /**
+538   * An empty string instance.
+539   */
+540  public static final String EMPTY_STRING 
= "";
+541
+542  public static final ByteBuffer 
EMPTY_BYTE_BUFFER = ByteBuffer.wrap(EMPTY_BYTE_ARRAY);
 543
 544  /**
-545   * Last row in a table.
+545   * Used by scanners, etc when they want 
to start at the beginning of a region
 546   */
-547  public static final byte [] 
EMPTY_END_ROW = EMPTY_START_ROW;
+547  public static final byte [] 
EMPTY_START_ROW = EMPTY_BYTE_ARRAY;
 548
 549  /**
-550* Used by scanners and others when 
they're trying to detect the end of a
-551* table
-552*/
-553  public static final byte [] LAST_ROW = 
EMPTY_BYTE_ARRAY;
-554
-555  /**
-556   * Max length a row can have because of 
the limitation in TFile.
-557   */
-558  public static final int MAX_ROW_LENGTH 
= Short.MAX_VALUE;
+550   * Last row in a table.
+551   */
+552  public static final byte [] 
EMPTY_END_ROW = EMPTY_START_ROW;
+553
+554  /**
+555* Used by scanners and others when 
they're trying to detect the end of a
+556* table
+557*/
+558  public static final byte [] LAST_ROW = 
EMPTY_BYTE_ARRAY;
 559
 560  /**
-561   * Timestamp to use when we want to 
refer to the latest cell.
-562   *
-563   * On client side, this is the 
timestamp set by default when no timestamp is specified,
-564   * to refer to the latest.
-565   * On server side, this acts as a 
notation.
-566   * (1) For a cell of Put, which has 
this notation,
-567   * its timestamp will be replaced 
with server's current time.
-568   * (2) For a cell of Delete, which has 
this notation,
-569   * A. If the cell is of {@link 
KeyValue.Type#Delete}, HBase issues a Get operation firstly.
-570   *a. When the count of cell it 
gets is less than the count of cell to delete,
-571   *   the timestamp of Delete 
cell will be replaced with server's current time.
-572   *b. When the count of cell it 
gets is equal to the count of cell to delete,
-573   *   the timestamp of Delete 
cell will be replaced with the latest timestamp of cell it
-574   *   gets.
-575   *   (c. It is invalid and an 
exception will be thrown,
-576   *   if the count of cell it 
gets is greater than the count of cell to delete,
-577   *   as the max version of Get 
is set to the count of cell to delete.)
-578   * B. If the cell is of other 
Delete types, like {@link KeyValue.Type#DeleteFamilyVersion},
-579   *{@link 
KeyValue.Type#DeleteColumn}, or {@link KeyValue.Type#DeleteFamily},
-580   *the timestamp of Delete cell 
will be replaced with server's current time.
-581   *
-582   * So that is why it is named as 
"latest" but assigned as the max value of Long.
-583   */
-584  public static final long 
LATEST_TIMESTAMP = Long.MAX_VALUE;
-585
-586  /**
-587   * Timestamp to use when we want to 
refer to the oldest cell.
-588   * Special! Used in fake Cells only. 
Should never be the timestamp on an actual Cell returned to
-589   * a client.
-590   * @deprecated Should not be public 
since hbase-1.3.0. For internal use only. Move internal to
-591   *   Scanners flagged as special 
timestamp value never to be returned as timestamp on a Cell.
-592   */
-593  @Deprecated
-594  public static final long 
OLDEST_TIMESTAMP = Long.MIN_VALUE;
-595
-596  /**
-597   * LATEST_TIMESTAMP in bytes form
-598   */
-599  public static final byte [] 
LATEST_TIMESTAMP_BYTES = {
-600// big-endian
-601(byte) (LATEST_TIMESTAMP  
56),
-602(byte) (LATEST_TIMESTAMP  
48),
-603(byte) (LATEST_TIMESTAMP  
40),
-604(byte) (LATEST_TIMESTAMP  
32),
-605(byte) (LATEST_TIMESTAMP  
24),
-606(byte) (LATEST_TIMESTAMP  
16),
-607(byte) (LATEST_TIMESTAMP  
8),
-608(byte) LATEST_TIMESTAMP,
-609  };
-610
-611  /**
-612   * Define for 'return-all-versions'.
-613   */
-614  public static final int ALL_VERSIONS = 
Integer.MAX_VALUE;
+561   * Max length a row can have because of 
the limitation in TFile.
+562   */
+563  public 

[19/51] [partial] hbase-site git commit: Published site at 6198e1fc7dfa85c3bc6b2855f9a5fb5f4b2354ff.

2018-06-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/eb5d2c62/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/MasterCoprocessorEnvironment.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/MasterCoprocessorEnvironment.html
 
b/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/MasterCoprocessorEnvironment.html
index 503565a..1d8ae44 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/MasterCoprocessorEnvironment.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/MasterCoprocessorEnvironment.html
@@ -676,12 +676,21 @@
 
 
 default void
+MasterObserver.postTransitReplicationPeerSyncReplicationState(ObserverContextMasterCoprocessorEnvironmentctx,
+  https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringpeerId,
+  SyncReplicationStatefrom,
+  SyncReplicationStateto)
+Called after transit current cluster state for the 
specified synchronous replication peer
+
+
+
+default void
 MasterObserver.postTruncateTable(ObserverContextMasterCoprocessorEnvironmentctx,
  TableNametableName)
 Called after the truncateTable operation has been 
requested.
 
 
-
+
 default void
 MasterObserver.postUnassign(ObserverContextMasterCoprocessorEnvironmentctx,
 RegionInforegionInfo,
@@ -689,7 +698,7 @@
 Called after the region unassignment has been 
requested.
 
 
-
+
 default void
 MasterObserver.postUpdateReplicationPeerConfig(ObserverContextMasterCoprocessorEnvironmentctx,
https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringpeerId,
@@ -697,14 +706,14 @@
 Called after update peerConfig for the specified peer
 
 
-
+
 default void
 MasterObserver.preAbortProcedure(ObserverContextMasterCoprocessorEnvironmentctx,
  longprocId)
 Called before a abortProcedure request has been 
processed.
 
 
-
+
 default void
 MasterObserver.preAddReplicationPeer(ObserverContextMasterCoprocessorEnvironmentctx,
  https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringpeerId,
@@ -712,48 +721,48 @@
 Called before add a replication peer
 
 
-
+
 default void
 MasterObserver.preAddRSGroup(ObserverContextMasterCoprocessorEnvironmentctx,
  https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringname)
 Called before a new region server group is added
 
 
-
+
 default void
 MasterObserver.preAssign(ObserverContextMasterCoprocessorEnvironmentctx,
  RegionInforegionInfo)
 Called prior to assigning a specific region.
 
 
-
+
 default void
 MasterObserver.preBalance(ObserverContextMasterCoprocessorEnvironmentctx)
 Called prior to requesting rebalancing of the cluster 
regions, though after
  the initial checks for regions in transition and the balance switch 
flag.
 
 
-
+
 default void
 MasterObserver.preBalanceRSGroup(ObserverContextMasterCoprocessorEnvironmentctx,
  https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringgroupName)
 Called before a region server group is removed
 
 
-
+
 default void
 MasterObserver.preBalanceSwitch(ObserverContextMasterCoprocessorEnvironmentctx,
 booleannewValue)
 Called prior to modifying the flag used to enable/disable 
region balancing.
 
 
-
+
 default void
 MasterObserver.preClearDeadServers(ObserverContextMasterCoprocessorEnvironmentctx)
 Called before clear dead region servers.
 
 
-
+
 default void
 MasterObserver.preCloneSnapshot(ObserverContextMasterCoprocessorEnvironmentctx,
 SnapshotDescriptionsnapshot,
@@ -761,7 +770,7 @@
 Called before a snapshot is cloned.
 
 
-
+
 default void
 MasterObserver.preCreateNamespace(ObserverContextMasterCoprocessorEnvironmentctx,
   NamespaceDescriptorns)
@@ -769,7 +778,7 @@
  HMaster.
 
 
-
+
 default void
 MasterObserver.preCreateTable(ObserverContextMasterCoprocessorEnvironmentctx,
   TableDescriptordesc,
@@ -778,7 +787,7 @@
  HMaster.
 
 
-
+
 default void
 MasterObserver.preCreateTableAction(ObserverContextMasterCoprocessorEnvironmentctx,
 TableDescriptordesc,
@@ -787,7 +796,7 @@
  HMaster.
 
 
-
+
 default void
 MasterObserver.preDecommissionRegionServers(ObserverContextMasterCoprocessorEnvironmentctx,
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListServerNameservers,
@@ -795,7 +804,7 @@
 Called before decommission region 

[19/51] [partial] hbase-site git commit: Published site at 14087cc919da9f2e0b1a68f701f6365ad9d1d71f.

2018-06-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/55ce8d97/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
index 3cc87d3..afd1ecc 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
@@ -121,7 +121,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-class HRegion.RegionScannerImpl
+class HRegion.RegionScannerImpl
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements RegionScanner, Shipper, RpcCallback
 RegionScannerImpl is used to combine scanners from multiple 
Stores (aka column families).
@@ -425,7 +425,7 @@ implements 
 
 storeHeap
-KeyValueHeap storeHeap
+KeyValueHeap storeHeap
 
 
 
@@ -434,7 +434,7 @@ implements 
 
 joinedHeap
-KeyValueHeap joinedHeap
+KeyValueHeap joinedHeap
 Heap of key-values that are not essential for the provided 
filters and are thus read
  on demand, if on-demand column family loading is enabled.
 
@@ -445,7 +445,7 @@ implements 
 
 joinedContinuationRow
-protectedCell joinedContinuationRow
+protectedCell joinedContinuationRow
 If the joined heap data gathering is interrupted due to 
scan limits, this will
  contain the row for which we are populating the values.
 
@@ -456,7 +456,7 @@ implements 
 
 filterClosed
-privateboolean filterClosed
+privateboolean filterClosed
 
 
 
@@ -465,7 +465,7 @@ implements 
 
 stopRow
-protected finalbyte[] stopRow
+protected finalbyte[] stopRow
 
 
 
@@ -474,7 +474,7 @@ implements 
 
 includeStopRow
-protected finalboolean includeStopRow
+protected finalboolean includeStopRow
 
 
 
@@ -483,7 +483,7 @@ implements 
 
 region
-protected finalHRegion region
+protected finalHRegion region
 
 
 
@@ -492,7 +492,7 @@ implements 
 
 comparator
-protected finalCellComparator comparator
+protected finalCellComparator comparator
 
 
 
@@ -501,7 +501,7 @@ implements 
 
 readPt
-private finallong readPt
+private finallong readPt
 
 
 
@@ -510,7 +510,7 @@ implements 
 
 maxResultSize
-private finallong maxResultSize
+private finallong maxResultSize
 
 
 
@@ -519,7 +519,7 @@ implements 
 
 defaultScannerContext
-private finalScannerContext defaultScannerContext
+private finalScannerContext defaultScannerContext
 
 
 
@@ -528,7 +528,7 @@ implements 
 
 filter
-private finalFilterWrapper filter
+private finalFilterWrapper filter
 
 
 
@@ -545,7 +545,7 @@ implements 
 
 RegionScannerImpl
-RegionScannerImpl(Scanscan,
+RegionScannerImpl(Scanscan,
   https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListKeyValueScanneradditionalScanners,
   HRegionregion)
throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
@@ -561,7 +561,7 @@ implements 
 
 RegionScannerImpl
-RegionScannerImpl(Scanscan,
+RegionScannerImpl(Scanscan,
   https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListKeyValueScanneradditionalScanners,
   HRegionregion,
   longnonceGroup,
@@ -587,7 +587,7 @@ implements 
 
 getRegionInfo
-publicRegionInfogetRegionInfo()
+publicRegionInfogetRegionInfo()
 
 Specified by:
 getRegionInfoin
 interfaceRegionScanner
@@ -602,7 +602,7 @@ implements 
 
 initializeScanners
-protectedvoidinitializeScanners(Scanscan,
+protectedvoidinitializeScanners(Scanscan,
   https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListKeyValueScanneradditionalScanners)
throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
@@ -617,7 +617,7 @@ implements 
 
 initializeKVHeap
-protectedvoidinitializeKVHeap(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListKeyValueScannerscanners,
+protectedvoidinitializeKVHeap(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListKeyValueScannerscanners,
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListKeyValueScannerjoinedScanners,
 HRegionregion)
  throws 

[19/51] [partial] hbase-site git commit: Published site at 72784c2d836a4b977667449d3adec5e8d15453f5.

2018-06-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2b11656f/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ReplicationFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ReplicationFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ReplicationFuture.html
index b6e7636..592c2cc 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ReplicationFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ReplicationFuture.html
@@ -356,3901 +356,3924 @@
 348  public FutureVoid 
modifyTableAsync(TableDescriptor td) throws IOException {
 349ModifyTableResponse response = 
executeCallable(
 350  new 
MasterCallableModifyTableResponse(getConnection(), 
getRpcControllerFactory()) {
-351@Override
-352protected ModifyTableResponse 
rpcCall() throws Exception {
-353  
setPriority(td.getTableName());
-354  ModifyTableRequest request = 
RequestConverter.buildModifyTableRequest(
-355td.getTableName(), td, 
ng.getNonceGroup(), ng.newNonce());
-356  return 
master.modifyTable(getRpcController(), request);
-357}
-358  });
-359return new ModifyTableFuture(this, 
td.getTableName(), response);
-360  }
-361
-362  @Override
-363  public ListTableDescriptor 
listTableDescriptorsByNamespace(byte[] name) throws IOException {
-364return executeCallable(new 
MasterCallableListTableDescriptor(getConnection(),
-365getRpcControllerFactory()) {
-366  @Override
-367  protected 
ListTableDescriptor rpcCall() throws Exception {
-368return 
master.listTableDescriptorsByNamespace(getRpcController(),
-369
ListTableDescriptorsByNamespaceRequest.newBuilder()
-370  
.setNamespaceName(Bytes.toString(name)).build())
-371.getTableSchemaList()
-372.stream()
-373
.map(ProtobufUtil::toTableDescriptor)
-374
.collect(Collectors.toList());
-375  }
-376});
-377  }
-378
-379  @Override
-380  public ListTableDescriptor 
listTableDescriptors(ListTableName tableNames) throws IOException {
-381return executeCallable(new 
MasterCallableListTableDescriptor(getConnection(),
-382getRpcControllerFactory()) {
-383  @Override
-384  protected 
ListTableDescriptor rpcCall() throws Exception {
-385GetTableDescriptorsRequest req 
=
-386
RequestConverter.buildGetTableDescriptorsRequest(tableNames);
-387  return 
ProtobufUtil.toTableDescriptorList(master.getTableDescriptors(getRpcController(),
-388  req));
-389  }
-390});
-391  }
-392
-393  @Override
-394  public ListRegionInfo 
getRegions(final ServerName sn) throws IOException {
-395AdminService.BlockingInterface admin 
= this.connection.getAdmin(sn);
-396// TODO: There is no timeout on this 
controller. Set one!
-397HBaseRpcController controller = 
rpcControllerFactory.newController();
-398return 
ProtobufUtil.getOnlineRegions(controller, admin);
-399  }
-400
-401  @Override
-402  public ListRegionInfo 
getRegions(TableName tableName) throws IOException {
-403if 
(TableName.isMetaTableName(tableName)) {
-404  return 
Arrays.asList(RegionInfoBuilder.FIRST_META_REGIONINFO);
-405} else {
-406  return 
MetaTableAccessor.getTableRegions(connection, tableName, true);
-407}
-408  }
-409
-410  private static class 
AbortProcedureFuture extends ProcedureFutureBoolean {
-411private boolean isAbortInProgress;
-412
-413public AbortProcedureFuture(
-414final HBaseAdmin admin,
-415final Long procId,
-416final Boolean abortProcResponse) 
{
-417  super(admin, procId);
-418  this.isAbortInProgress = 
abortProcResponse;
-419}
-420
-421@Override
-422public Boolean get(long timeout, 
TimeUnit unit)
-423throws InterruptedException, 
ExecutionException, TimeoutException {
-424  if (!this.isAbortInProgress) {
-425return false;
-426  }
-427  super.get(timeout, unit);
-428  return true;
-429}
-430  }
-431
-432  /** @return Connection used by this 
object. */
-433  @Override
-434  public Connection getConnection() {
-435return connection;
-436  }
-437
-438  @Override
-439  public boolean tableExists(final 
TableName tableName) throws IOException {
-440return executeCallable(new 
RpcRetryingCallableBoolean() {
-441  @Override
-442  protected Boolean rpcCall(int 
callTimeout) throws Exception {
-443return 
MetaTableAccessor.tableExists(connection, tableName);
-444  }
-445});
-446  }
-447
-448  @Override
-449  public HTableDescriptor[] listTables() 
throws IOException {
-450return listTables((Pattern)null, 
false);
-451  }
-452
-453  @Override
-454  public HTableDescriptor[] 
listTables(Pattern pattern) throws IOException {
-455   

[19/51] [partial] hbase-site git commit: Published site at 9101fc246f86445006bfbcdfda5cc495016dc280.

2018-06-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/65565d77/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.ReplicationBarrierResult.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.ReplicationBarrierResult.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.ReplicationBarrierResult.html
index fea2b5a..c7a6cc4 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.ReplicationBarrierResult.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.ReplicationBarrierResult.html
@@ -1354,816 +1354,824 @@
 1346   */
 1347  public static void 
putsToMetaTable(final Connection connection, final ListPut ps)
 1348  throws IOException {
-1349try (Table t = 
getMetaHTable(connection)) {
-1350  debugLogMutations(ps);
-1351  t.put(ps);
-1352}
-1353  }
-1354
-1355  /**
-1356   * Delete the passed 
coded/code from the codehbase:meta/code 
table.
-1357   * @param connection connection we're 
using
-1358   * @param d Delete to add to 
hbase:meta
-1359   */
-1360  private static void 
deleteFromMetaTable(final Connection connection, final Delete d)
-1361  throws IOException {
-1362ListDelete dels = new 
ArrayList(1);
-1363dels.add(d);
-1364deleteFromMetaTable(connection, 
dels);
-1365  }
-1366
-1367  /**
-1368   * Delete the passed 
codedeletes/code from the codehbase:meta/code 
table.
-1369   * @param connection connection we're 
using
-1370   * @param deletes Deletes to add to 
hbase:meta  This list should support #remove.
-1371   */
-1372  private static void 
deleteFromMetaTable(final Connection connection, final ListDelete 
deletes)
-1373  throws IOException {
-1374try (Table t = 
getMetaHTable(connection)) {
-1375  debugLogMutations(deletes);
-1376  t.delete(deletes);
-1377}
-1378  }
-1379
-1380  /**
-1381   * Deletes some replica columns 
corresponding to replicas for the passed rows
-1382   * @param metaRows rows in 
hbase:meta
-1383   * @param replicaIndexToDeleteFrom the 
replica ID we would start deleting from
-1384   * @param numReplicasToRemove how many 
replicas to remove
-1385   * @param connection connection we're 
using to access meta table
-1386   */
-1387  public static void 
removeRegionReplicasFromMeta(Setbyte[] metaRows,
-1388int replicaIndexToDeleteFrom, int 
numReplicasToRemove, Connection connection)
-1389  throws IOException {
-1390int absoluteIndex = 
replicaIndexToDeleteFrom + numReplicasToRemove;
-1391for (byte[] row : metaRows) {
-1392  long now = 
EnvironmentEdgeManager.currentTime();
-1393  Delete deleteReplicaLocations = 
new Delete(row);
-1394  for (int i = 
replicaIndexToDeleteFrom; i  absoluteIndex; i++) {
-1395
deleteReplicaLocations.addColumns(getCatalogFamily(),
-1396  getServerColumn(i), now);
-1397
deleteReplicaLocations.addColumns(getCatalogFamily(),
-1398  getSeqNumColumn(i), now);
-1399
deleteReplicaLocations.addColumns(getCatalogFamily(),
-1400  getStartCodeColumn(i), now);
-1401  }
-1402  deleteFromMetaTable(connection, 
deleteReplicaLocations);
-1403}
-1404  }
-1405
-1406  /**
-1407   * Execute the passed 
codemutations/code against codehbase:meta/code 
table.
-1408   * @param connection connection we're 
using
-1409   * @param mutations Puts and Deletes 
to execute on hbase:meta
-1410   * @throws IOException
-1411   */
-1412  public static void 
mutateMetaTable(final Connection connection,
-1413 
final ListMutation mutations)
-1414throws IOException {
-1415Table t = 
getMetaHTable(connection);
-1416try {
-1417  debugLogMutations(mutations);
-1418  t.batch(mutations, null);
-1419} catch (InterruptedException e) {
-1420  InterruptedIOException ie = new 
InterruptedIOException(e.getMessage());
-1421  ie.initCause(e);
-1422  throw ie;
-1423} finally {
-1424  t.close();
-1425}
-1426  }
-1427
-1428  private static void 
addRegionStateToPut(Put put, RegionState.State state) throws IOException {
-1429
put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY)
-1430.setRow(put.getRow())
-1431
.setFamily(HConstants.CATALOG_FAMILY)
-1432
.setQualifier(getRegionStateColumn())
-1433
.setTimestamp(put.getTimestamp())
-1434.setType(Cell.Type.Put)
-1435
.setValue(Bytes.toBytes(state.name()))
-1436.build());
-1437  }
-1438
-1439  /**
-1440   * Adds daughter region infos to 
hbase:meta row for the specified region. Note that this does not
-1441   * add its daughter's as different 
rows, but adds information about the daughters in the same row
-1442   * as the parent. Use
-1443   * {@link #splitRegion(Connection, 
RegionInfo, long, RegionInfo, RegionInfo, ServerName, int)}
-1444   * if you want to do that.
-1445   * @param connection 

[19/51] [partial] hbase-site git commit: Published site at 0b28155d274910b4e667b949d51f78809a1eff0b.

2018-06-14 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e11cf2cb/devapidocs/src-html/org/apache/hadoop/hbase/master/RegionServerTracker.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/RegionServerTracker.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/RegionServerTracker.html
index d60bbd0..cc0dba8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/RegionServerTracker.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/RegionServerTracker.html
@@ -7,179 +7,190 @@
 
 
 001/**
-002 *
-003 * Licensed to the Apache Software 
Foundation (ASF) under one
-004 * or more contributor license 
agreements.  See the NOTICE file
-005 * distributed with this work for 
additional information
-006 * regarding copyright ownership.  The 
ASF licenses this file
-007 * to you under the Apache License, 
Version 2.0 (the
-008 * "License"); you may not use this file 
except in compliance
-009 * with the License.  You may obtain a 
copy of the License at
-010 *
-011 * 
http://www.apache.org/licenses/LICENSE-2.0
-012 *
-013 * Unless required by applicable law or 
agreed to in writing, software
-014 * distributed under the License is 
distributed on an "AS IS" BASIS,
-015 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
-016 * See the License for the specific 
language governing permissions and
-017 * limitations under the License.
-018 */
-019package org.apache.hadoop.hbase.master;
-020
-021import java.io.IOException;
-022import java.io.InterruptedIOException;
-023import java.util.ArrayList;
+002 * Licensed to the Apache Software 
Foundation (ASF) under one
+003 * or more contributor license 
agreements.  See the NOTICE file
+004 * distributed with this work for 
additional information
+005 * regarding copyright ownership.  The 
ASF licenses this file
+006 * to you under the Apache License, 
Version 2.0 (the
+007 * "License"); you may not use this file 
except in compliance
+008 * with the License.  You may obtain a 
copy of the License at
+009 *
+010 * 
http://www.apache.org/licenses/LICENSE-2.0
+011 *
+012 * Unless required by applicable law or 
agreed to in writing, software
+013 * distributed under the License is 
distributed on an "AS IS" BASIS,
+014 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
+015 * See the License for the specific 
language governing permissions and
+016 * limitations under the License.
+017 */
+018package org.apache.hadoop.hbase.master;
+019
+020import java.io.IOException;
+021import java.io.InterruptedIOException;
+022import java.util.HashSet;
+023import java.util.Iterator;
 024import java.util.List;
-025import java.util.NavigableMap;
-026import java.util.TreeMap;
-027
-028import 
org.apache.hadoop.hbase.ServerName;
-029import 
org.apache.hadoop.hbase.zookeeper.ZKListener;
-030import 
org.apache.hadoop.hbase.zookeeper.ZKUtil;
-031import 
org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-032import 
org.apache.hadoop.hbase.zookeeper.ZNodePaths;
-033import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-034import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionServerInfo;
-035import 
org.apache.yetus.audience.InterfaceAudience;
-036import 
org.apache.zookeeper.KeeperException;
-037import org.slf4j.Logger;
-038import org.slf4j.LoggerFactory;
-039
-040/**
-041 * Tracks the online region servers via 
ZK.
-042 *
-043 * pHandling of new RSs checking 
in is done via RPC.  This class
-044 * is only responsible for watching for 
expired nodes.  It handles
-045 * listening for changes in the RS node 
list and watching each node.
-046 *
-047 * pIf an RS node gets deleted, 
this automatically handles calling of
-048 * {@link 
ServerManager#expireServer(ServerName)}
-049 */
-050@InterfaceAudience.Private
-051public class RegionServerTracker extends 
ZKListener {
-052  private static final Logger LOG = 
LoggerFactory.getLogger(RegionServerTracker.class);
-053  private final 
NavigableMapServerName, RegionServerInfo regionServers = new 
TreeMap();
-054  private ServerManager serverManager;
-055  private MasterServices server;
-056
-057  public RegionServerTracker(ZKWatcher 
watcher,
-058  MasterServices server, 
ServerManager serverManager) {
-059super(watcher);
-060this.server = server;
-061this.serverManager = serverManager;
-062  }
-063
-064  /**
-065   * Starts the tracking of online 
RegionServers.
-066   *
-067   * pAll RSs will be tracked 
after this method is called.
-068   *
-069   * @throws KeeperException
-070   * @throws IOException
-071   */
-072  public void start() throws 
KeeperException, IOException {
-073watcher.registerListener(this);
-074ListString servers =
-075  
ZKUtil.listChildrenAndWatchThem(watcher, watcher.getZNodePaths().rsZNode);
-076refresh(servers);
-077  }
-078
-079  private void refresh(final 
ListString servers) throws IOException {
-080

[19/51] [partial] hbase-site git commit: Published site at 7d3750bd9fc9747623549c242cc4171e224b3eaf.

2018-06-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.AssignmentProcedureEvent.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.AssignmentProcedureEvent.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.AssignmentProcedureEvent.html
index 5420d82..6ea3672 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.AssignmentProcedureEvent.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.AssignmentProcedureEvent.html
@@ -316,687 +316,728 @@
 308}
 309  }
 310
-311  public enum ServerState { ONLINE, 
SPLITTING, OFFLINE }
-312  public static class ServerStateNode 
implements ComparableServerStateNode {
-313private final ServerReportEvent 
reportEvent;
-314
-315private final 
SetRegionStateNode regions;
-316private final ServerName 
serverName;
-317
-318private volatile ServerState state = 
ServerState.ONLINE;
-319private volatile int versionNumber = 
0;
-320
-321public ServerStateNode(final 
ServerName serverName) {
-322  this.serverName = serverName;
-323  this.regions = 
ConcurrentHashMap.newKeySet();
-324  this.reportEvent = new 
ServerReportEvent(serverName);
-325}
-326
-327public ServerName getServerName() {
-328  return serverName;
-329}
+311  /**
+312   * Server State.
+313   */
+314  public enum ServerState {
+315/**
+316 * Initial state. Available.
+317 */
+318ONLINE,
+319
+320/**
+321 * Server expired/crashed. Currently 
undergoing WAL splitting.
+322 */
+323SPLITTING,
+324
+325/**
+326 * WAL splitting done.
+327 */
+328OFFLINE
+329  }
 330
-331public ServerState getState() {
-332  return state;
-333}
-334
-335public int getVersionNumber() {
-336  return versionNumber;
-337}
-338
-339public ProcedureEvent? 
getReportEvent() {
-340  return reportEvent;
-341}
+331  /**
+332   * State of Server; list of hosted 
regions, etc.
+333   */
+334  public static class ServerStateNode 
implements ComparableServerStateNode {
+335private final ServerReportEvent 
reportEvent;
+336
+337private final 
SetRegionStateNode regions;
+338private final ServerName 
serverName;
+339
+340private volatile ServerState state = 
ServerState.ONLINE;
+341private volatile int versionNumber = 
0;
 342
-343public boolean isInState(final 
ServerState... expected) {
-344  boolean expectedState = false;
-345  if (expected != null) {
-346for (int i = 0; i  
expected.length; ++i) {
-347  expectedState |= (state == 
expected[i]);
-348}
-349  }
-350  return expectedState;
+343public ServerStateNode(final 
ServerName serverName) {
+344  this.serverName = serverName;
+345  this.regions = 
ConcurrentHashMap.newKeySet();
+346  this.reportEvent = new 
ServerReportEvent(serverName);
+347}
+348
+349public ServerName getServerName() {
+350  return serverName;
 351}
 352
-353public void setState(final 
ServerState state) {
-354  this.state = state;
+353public ServerState getState() {
+354  return state;
 355}
 356
-357public void setVersionNumber(final 
int versionNumber) {
-358  this.versionNumber = 
versionNumber;
+357public int getVersionNumber() {
+358  return versionNumber;
 359}
 360
-361public SetRegionStateNode 
getRegions() {
-362  return regions;
+361public ProcedureEvent? 
getReportEvent() {
+362  return reportEvent;
 363}
 364
-365public int getRegionCount() {
-366  return regions.size();
+365public boolean isOffline() {
+366  return 
this.state.equals(ServerState.OFFLINE);
 367}
 368
-369public ArrayListRegionInfo 
getRegionInfoList() {
-370  ArrayListRegionInfo hris = 
new ArrayListRegionInfo(regions.size());
-371  for (RegionStateNode region: 
regions) {
-372
hris.add(region.getRegionInfo());
-373  }
-374  return hris;
-375}
-376
-377public void addRegion(final 
RegionStateNode regionNode) {
-378  this.regions.add(regionNode);
-379}
-380
-381public void removeRegion(final 
RegionStateNode regionNode) {
-382  this.regions.remove(regionNode);
-383}
-384
-385@Override
-386public int compareTo(final 
ServerStateNode other) {
-387  return 
getServerName().compareTo(other.getServerName());
-388}
-389
-390@Override
-391public int hashCode() {
-392  return 
getServerName().hashCode();
+369public boolean isInState(final 
ServerState... expected) {
+370  boolean expectedState = false;
+371  if (expected != null) {
+372for (int i = 0; i  
expected.length; ++i) {
+373  expectedState |= (state == 
expected[i]);
+374}
+375  

[19/51] [partial] hbase-site git commit: Published site at 997747076d8ec0b4346d7cb99c4b0667a7c14905.

2018-05-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4df09ed9/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ReplayBatchOperation.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ReplayBatchOperation.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ReplayBatchOperation.html
index 3da432b..d30fa8f 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ReplayBatchOperation.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ReplayBatchOperation.html
@@ -928,7690 +928,7698 @@
 920  CollectionHStore stores = 
this.stores.values();
 921  try {
 922// update the stores that we are 
replaying
-923
stores.forEach(HStore::startReplayingFromWAL);
-924// Recover any edits if 
available.
-925maxSeqId = Math.max(maxSeqId,
-926  
replayRecoveredEditsIfAny(this.fs.getRegionDir(), maxSeqIdInStores, reporter, 
status));
-927// Make sure mvcc is up to max.
-928this.mvcc.advanceTo(maxSeqId);
-929  } finally {
-930// update the stores that we are 
done replaying
-931
stores.forEach(HStore::stopReplayingFromWAL);
-932  }
-933}
-934this.lastReplayedOpenRegionSeqId = 
maxSeqId;
-935
-936
this.writestate.setReadOnly(ServerRegionReplicaUtil.isReadOnly(this));
-937this.writestate.flushRequested = 
false;
-938this.writestate.compacting.set(0);
-939
-940if (this.writestate.writesEnabled) 
{
-941  // Remove temporary data left over 
from old regions
-942  status.setStatus("Cleaning up 
temporary data from old regions");
-943  fs.cleanupTempDir();
-944}
-945
-946if (this.writestate.writesEnabled) 
{
-947  status.setStatus("Cleaning up 
detritus from prior splits");
-948  // Get rid of any splits or merges 
that were lost in-progress.  Clean out
-949  // these directories here on open.  
We may be opening a region that was
-950  // being split but we crashed in 
the middle of it all.
-951  fs.cleanupAnySplitDetritus();
-952  fs.cleanupMergesDir();
-953}
-954
-955// Initialize split policy
-956this.splitPolicy = 
RegionSplitPolicy.create(this, conf);
-957
-958// Initialize flush policy
-959this.flushPolicy = 
FlushPolicyFactory.create(this, conf);
-960
-961long lastFlushTime = 
EnvironmentEdgeManager.currentTime();
-962for (HStore store: stores.values()) 
{
-963  
this.lastStoreFlushTimeMap.put(store, lastFlushTime);
-964}
-965
-966// Use maximum of log sequenceid or 
that which was found in stores
-967// (particularly if no recovered 
edits, seqid will be -1).
-968long maxSeqIdFromFile =
-969  
WALSplitter.getMaxRegionSequenceId(fs.getFileSystem(), fs.getRegionDir());
-970long nextSeqId = Math.max(maxSeqId, 
maxSeqIdFromFile) + 1;
-971if (writestate.writesEnabled) {
-972  
WALSplitter.writeRegionSequenceIdFile(fs.getFileSystem(), fs.getRegionDir(), 
nextSeqId - 1);
-973}
-974
-975LOG.info("Opened {}; next 
sequenceid={}", this.getRegionInfo().getShortNameToLog(), nextSeqId);
-976
-977// A region can be reopened if failed 
a split; reset flags
-978this.closing.set(false);
-979this.closed.set(false);
-980
-981if (coprocessorHost != null) {
-982  status.setStatus("Running 
coprocessor post-open hooks");
-983  coprocessorHost.postOpen();
-984}
+923LOG.debug("replaying wal for " + 
this.getRegionInfo().getEncodedName());
+924
stores.forEach(HStore::startReplayingFromWAL);
+925// Recover any edits if 
available.
+926maxSeqId = Math.max(maxSeqId,
+927  
replayRecoveredEditsIfAny(this.fs.getRegionDir(), maxSeqIdInStores, reporter, 
status));
+928// Make sure mvcc is up to max.
+929this.mvcc.advanceTo(maxSeqId);
+930  } finally {
+931LOG.debug("stopping wal replay 
for " + this.getRegionInfo().getEncodedName());
+932// update the stores that we are 
done replaying
+933
stores.forEach(HStore::stopReplayingFromWAL);
+934  }
+935}
+936this.lastReplayedOpenRegionSeqId = 
maxSeqId;
+937
+938
this.writestate.setReadOnly(ServerRegionReplicaUtil.isReadOnly(this));
+939this.writestate.flushRequested = 
false;
+940this.writestate.compacting.set(0);
+941
+942if (this.writestate.writesEnabled) 
{
+943  LOG.debug("Cleaning up temporary 
data for " + this.getRegionInfo().getEncodedName());
+944  // Remove temporary data left over 
from old regions
+945  status.setStatus("Cleaning up 
temporary data from old regions");
+946  fs.cleanupTempDir();
+947}
+948
+949if (this.writestate.writesEnabled) 
{
+950  status.setStatus("Cleaning up 
detritus from prior splits");
+951  // Get rid of any splits or merges 
that were lost in-progress.  Clean out
+952  // these 

[19/51] [partial] hbase-site git commit: Published site at f3d1c021de2264301f68eadb9ef126ff83d7ef53.

2018-05-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/883dde2f/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.LogsComparator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.LogsComparator.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.LogsComparator.html
index a99b4a7..119472c 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.LogsComparator.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.LogsComparator.html
@@ -109,528 +109,529 @@
 101  protected FileSystem fs;
 102  // id of this cluster
 103  private UUID clusterId;
-104  // id of the other cluster
-105  private UUID peerClusterId;
-106  // total number of edits we 
replicated
-107  private AtomicLong totalReplicatedEdits 
= new AtomicLong(0);
-108  // The znode we currently play with
-109  protected String queueId;
-110  // Maximum number of retries before 
taking bold actions
-111  private int maxRetriesMultiplier;
-112  // Indicates if this particular source 
is running
-113  private volatile boolean sourceRunning 
= false;
-114  // Metrics for this source
-115  private MetricsSource metrics;
-116  // WARN threshold for the number of 
queued logs, defaults to 2
-117  private int logQueueWarnThreshold;
-118  // ReplicationEndpoint which will 
handle the actual replication
-119  private volatile ReplicationEndpoint 
replicationEndpoint;
-120  // A filter (or a chain of filters) for 
the WAL entries.
-121  protected WALEntryFilter 
walEntryFilter;
-122  // throttler
-123  private ReplicationThrottler 
throttler;
-124  private long defaultBandwidth;
-125  private long currentBandwidth;
-126  private WALFileLengthProvider 
walFileLengthProvider;
-127  protected final 
ConcurrentHashMapString, ReplicationSourceShipper workerThreads =
-128  new ConcurrentHashMap();
+104  // total number of edits we 
replicated
+105  private AtomicLong totalReplicatedEdits 
= new AtomicLong(0);
+106  // The znode we currently play with
+107  protected String queueId;
+108  // Maximum number of retries before 
taking bold actions
+109  private int maxRetriesMultiplier;
+110  // Indicates if this particular source 
is running
+111  private volatile boolean sourceRunning 
= false;
+112  // Metrics for this source
+113  private MetricsSource metrics;
+114  // WARN threshold for the number of 
queued logs, defaults to 2
+115  private int logQueueWarnThreshold;
+116  // ReplicationEndpoint which will 
handle the actual replication
+117  private volatile ReplicationEndpoint 
replicationEndpoint;
+118  // A filter (or a chain of filters) for 
the WAL entries.
+119  protected volatile WALEntryFilter 
walEntryFilter;
+120  // throttler
+121  private ReplicationThrottler 
throttler;
+122  private long defaultBandwidth;
+123  private long currentBandwidth;
+124  private WALFileLengthProvider 
walFileLengthProvider;
+125  protected final 
ConcurrentHashMapString, ReplicationSourceShipper workerThreads =
+126  new ConcurrentHashMap();
+127
+128  private AtomicLong totalBufferUsed;
 129
-130  private AtomicLong totalBufferUsed;
-131
-132  public static final String 
WAIT_ON_ENDPOINT_SECONDS =
-133
"hbase.replication.wait.on.endpoint.seconds";
-134  public static final int 
DEFAULT_WAIT_ON_ENDPOINT_SECONDS = 30;
-135  private int waitOnEndpointSeconds = 
-1;
+130  public static final String 
WAIT_ON_ENDPOINT_SECONDS =
+131
"hbase.replication.wait.on.endpoint.seconds";
+132  public static final int 
DEFAULT_WAIT_ON_ENDPOINT_SECONDS = 30;
+133  private int waitOnEndpointSeconds = 
-1;
+134
+135  private Thread initThread;
 136
-137  private Thread initThread;
-138
-139  /**
-140   * Instantiation method used by region 
servers
-141   * @param conf configuration to use
-142   * @param fs file system to use
-143   * @param manager replication manager 
to ping to
-144   * @param server the server for this 
region server
-145   * @param queueId the id of our 
replication queue
-146   * @param clusterId unique UUID for the 
cluster
-147   * @param metrics metrics for 
replication source
-148   */
-149  @Override
-150  public void init(Configuration conf, 
FileSystem fs, ReplicationSourceManager manager,
-151  ReplicationQueueStorage 
queueStorage, ReplicationPeer replicationPeer, Server server,
-152  String queueId, UUID clusterId, 
WALFileLengthProvider walFileLengthProvider,
-153  MetricsSource metrics) throws 
IOException {
-154this.server = server;
-155this.conf = 
HBaseConfiguration.create(conf);
-156this.waitOnEndpointSeconds =
-157  
this.conf.getInt(WAIT_ON_ENDPOINT_SECONDS, DEFAULT_WAIT_ON_ENDPOINT_SECONDS);
-158decorateConf();
-159this.sleepForRetries =
-160
this.conf.getLong("replication.source.sleepforretries", 

[19/51] [partial] hbase-site git commit: Published site at cf529f18a9959589fa635f78df4840472526ea2c.

2018-05-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.TestOptions.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.TestOptions.html 
b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.TestOptions.html
index 6313ac8..381917f 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.TestOptions.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.TestOptions.html
@@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-static class PerformanceEvaluation.TestOptions
+static class PerformanceEvaluation.TestOptions
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 Wraps up options passed to PerformanceEvaluation.
  This makes tracking all these arguments a little easier.
@@ -695,7 +695,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 cmdName
-https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String cmdName
+https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String cmdName
 
 
 
@@ -704,7 +704,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 nomapred
-boolean nomapred
+boolean nomapred
 
 
 
@@ -713,7 +713,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 filterAll
-boolean filterAll
+boolean filterAll
 
 
 
@@ -722,7 +722,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 startRow
-int startRow
+int startRow
 
 
 
@@ -731,7 +731,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 size
-float size
+float size
 
 
 
@@ -740,7 +740,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 perClientRunRows
-int perClientRunRows
+int perClientRunRows
 
 
 
@@ -749,7 +749,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 numClientThreads
-int numClientThreads
+int numClientThreads
 
 
 
@@ -758,7 +758,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 totalRows
-int totalRows
+int totalRows
 
 
 
@@ -767,7 +767,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 measureAfter
-int measureAfter
+int measureAfter
 
 
 
@@ -776,7 +776,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 sampleRate
-float sampleRate
+float sampleRate
 
 
 
@@ -785,7 +785,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 traceRate
-double traceRate
+double traceRate
 
 
 
@@ -794,7 +794,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 tableName
-https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String tableName
+https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String tableName
 
 
 
@@ -803,7 +803,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 flushCommits
-boolean flushCommits
+boolean flushCommits
 
 
 
@@ -812,7 +812,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 writeToWAL
-boolean writeToWAL
+boolean writeToWAL
 
 
 
@@ -821,7 +821,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 autoFlush
-boolean autoFlush
+boolean autoFlush
 
 
 
@@ -830,7 +830,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 oneCon
-boolean oneCon
+boolean oneCon
 
 
 
@@ -839,7 +839,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 useTags
-boolean useTags
+boolean useTags
 
 
 
@@ -848,7 +848,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 noOfTags
-int noOfTags
+int noOfTags
 
 
 
@@ -857,7 +857,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 reportLatency
-boolean reportLatency
+boolean reportLatency
 
 
 
@@ -866,7 +866,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 multiGet
-int multiGet
+int multiGet
 
 
 
@@ -875,7 +875,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 randomSleep
-int randomSleep
+int randomSleep
 
 
 
@@ -884,7 +884,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 inMemoryCF
-boolean inMemoryCF
+boolean inMemoryCF
 
 
 
@@ -893,7 +893,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 presplitRegions
-int presplitRegions
+int presplitRegions
 
 
 
@@ -902,7 +902,7 @@ extends 

[19/51] [partial] hbase-site git commit: Published site at 021f66d11d2cbb7308308093e29e69d6e7661ee9.

2018-05-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/92a26cfb/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
index 4a879bb..7d27402 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
@@ -300,7 +300,7 @@
 292  private MapString, 
com.google.protobuf.Service coprocessorServiceHandlers = 
Maps.newHashMap();
 293
 294  // Track data size in all memstores
-295  private final MemStoreSizing 
memStoreSize = new MemStoreSizing();
+295  private final MemStoreSizing 
memStoreSizing = new ThreadSafeMemStoreSizing();
 296  private final RegionServicesForStores 
regionServicesForStores = new RegionServicesForStores(this);
 297
 298  // Debug possible data loss due to WAL 
off
@@ -1218,7389 +1218,7399 @@
 1210   * Increase the size of mem store in 
this region and the size of global mem
 1211   * store
 1212   */
-1213  public void 
incMemStoreSize(MemStoreSize memStoreSize) {
-1214if (this.rsAccounting != null) {
-1215  
rsAccounting.incGlobalMemStoreSize(memStoreSize);
-1216}
-1217long dataSize;
-1218synchronized (this.memStoreSize) {
-1219  
this.memStoreSize.incMemStoreSize(memStoreSize);
-1220  dataSize = 
this.memStoreSize.getDataSize();
-1221}
-1222
checkNegativeMemStoreDataSize(dataSize, memStoreSize.getDataSize());
-1223  }
-1224
-1225  public void 
decrMemStoreSize(MemStoreSize memStoreSize) {
-1226if (this.rsAccounting != null) {
-1227  
rsAccounting.decGlobalMemStoreSize(memStoreSize);
-1228}
-1229long size;
-1230synchronized (this.memStoreSize) {
-1231  
this.memStoreSize.decMemStoreSize(memStoreSize);
-1232  size = 
this.memStoreSize.getDataSize();
+1213  void incMemStoreSize(MemStoreSize mss) 
{
+1214incMemStoreSize(mss.getDataSize(), 
mss.getHeapSize(), mss.getOffHeapSize());
+1215  }
+1216
+1217  void incMemStoreSize(long 
dataSizeDelta, long heapSizeDelta, long offHeapSizeDelta) {
+1218if (this.rsAccounting != null) {
+1219  
rsAccounting.incGlobalMemStoreSize(dataSizeDelta, heapSizeDelta, 
offHeapSizeDelta);
+1220}
+1221long dataSize =
+1222
this.memStoreSizing.incMemStoreSize(dataSizeDelta, heapSizeDelta, 
offHeapSizeDelta);
+1223
checkNegativeMemStoreDataSize(dataSize, dataSizeDelta);
+1224  }
+1225
+1226  void decrMemStoreSize(MemStoreSize 
mss) {
+1227decrMemStoreSize(mss.getDataSize(), 
mss.getHeapSize(), mss.getOffHeapSize());
+1228  }
+1229
+1230  void decrMemStoreSize(long 
dataSizeDelta, long heapSizeDelta, long offHeapSizeDelta) {
+1231if (this.rsAccounting != null) {
+1232  
rsAccounting.decGlobalMemStoreSize(dataSizeDelta, heapSizeDelta, 
offHeapSizeDelta);
 1233}
-1234checkNegativeMemStoreDataSize(size, 
-memStoreSize.getDataSize());
-1235  }
-1236
-1237  private void 
checkNegativeMemStoreDataSize(long memStoreDataSize, long delta) {
-1238// This is extremely bad if we make 
memStoreSize negative. Log as much info on the offending
-1239// caller as possible. (memStoreSize 
might be a negative value already -- freeing memory)
-1240if (memStoreDataSize  0) {
-1241  LOG.error("Asked to modify this 
region's (" + this.toString()
-1242  + ") memStoreSize to a 
negative value which is incorrect. Current memStoreSize="
-1243  + (memStoreDataSize - delta) + 
", delta=" + delta, new Exception());
-1244}
-1245  }
-1246
-1247  @Override
-1248  public RegionInfo getRegionInfo() {
-1249return this.fs.getRegionInfo();
-1250  }
-1251
-1252  /**
-1253   * @return Instance of {@link 
RegionServerServices} used by this HRegion.
-1254   * Can be null.
-1255   */
-1256  RegionServerServices 
getRegionServerServices() {
-1257return this.rsServices;
-1258  }
-1259
-1260  @Override
-1261  public long getReadRequestsCount() {
-1262return readRequestsCount.sum();
-1263  }
-1264
-1265  @Override
-1266  public long 
getFilteredReadRequestsCount() {
-1267return 
filteredReadRequestsCount.sum();
-1268  }
-1269
-1270  @Override
-1271  public long getWriteRequestsCount() 
{
-1272return writeRequestsCount.sum();
-1273  }
-1274
-1275  @Override
-1276  public long getMemStoreDataSize() {
-1277return memStoreSize.getDataSize();
-1278  }
-1279
-1280  @Override
-1281  public long getMemStoreHeapSize() {
-1282return memStoreSize.getHeapSize();
-1283  }
-1284
-1285  @Override
-1286  public long getMemStoreOffHeapSize() 
{
-1287return 
memStoreSize.getOffHeapSize();
-1288  }
-1289
-1290  /** @return store services for this 
region, to access services required by store level needs */
-1291  

[19/51] [partial] hbase-site git commit: Published site at acd0d1e446c164d9c54bfb461b2d449c8d717c07.

2018-05-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f2065178/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CmdDescriptor.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CmdDescriptor.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CmdDescriptor.html
index 2510283..418c60c 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CmdDescriptor.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CmdDescriptor.html
@@ -77,77 +77,77 @@
 069import 
org.apache.hadoop.hbase.client.RowMutations;
 070import 
org.apache.hadoop.hbase.client.Scan;
 071import 
org.apache.hadoop.hbase.client.Table;
-072import 
org.apache.hadoop.hbase.filter.BinaryComparator;
-073import 
org.apache.hadoop.hbase.filter.Filter;
-074import 
org.apache.hadoop.hbase.filter.FilterAllFilter;
-075import 
org.apache.hadoop.hbase.filter.FilterList;
-076import 
org.apache.hadoop.hbase.filter.PageFilter;
-077import 
org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
-078import 
org.apache.hadoop.hbase.filter.WhileMatchFilter;
-079import 
org.apache.hadoop.hbase.io.compress.Compression;
-080import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
-081import 
org.apache.hadoop.hbase.io.hfile.RandomDistribution;
-082import 
org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
-083import 
org.apache.hadoop.hbase.regionserver.BloomType;
-084import 
org.apache.hadoop.hbase.regionserver.CompactingMemStore;
-085import 
org.apache.hadoop.hbase.trace.HBaseHTraceConfiguration;
-086import 
org.apache.hadoop.hbase.trace.SpanReceiverHost;
-087import 
org.apache.hadoop.hbase.trace.TraceUtil;
-088import 
org.apache.hadoop.hbase.util.ByteArrayHashKey;
-089import 
org.apache.hadoop.hbase.util.Bytes;
-090import 
org.apache.hadoop.hbase.util.Hash;
-091import 
org.apache.hadoop.hbase.util.MurmurHash;
-092import 
org.apache.hadoop.hbase.util.Pair;
-093import 
org.apache.hadoop.hbase.util.YammerHistogramUtils;
-094import 
org.apache.hadoop.io.LongWritable;
-095import org.apache.hadoop.io.Text;
-096import org.apache.hadoop.mapreduce.Job;
-097import 
org.apache.hadoop.mapreduce.Mapper;
-098import 
org.apache.hadoop.mapreduce.lib.input.NLineInputFormat;
-099import 
org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
-100import 
org.apache.hadoop.mapreduce.lib.reduce.LongSumReducer;
-101import org.apache.hadoop.util.Tool;
-102import 
org.apache.hadoop.util.ToolRunner;
-103import 
org.apache.htrace.core.ProbabilitySampler;
-104import org.apache.htrace.core.Sampler;
-105import 
org.apache.htrace.core.TraceScope;
-106import 
org.apache.yetus.audience.InterfaceAudience;
-107import org.slf4j.Logger;
-108import org.slf4j.LoggerFactory;
-109import 
org.apache.hbase.thirdparty.com.google.common.base.MoreObjects;
-110import 
org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
-111
-112/**
-113 * Script used evaluating HBase 
performance and scalability.  Runs a HBase
-114 * client that steps through one of a set 
of hardcoded tests or 'experiments'
-115 * (e.g. a random reads test, a random 
writes test, etc.). Pass on the
-116 * command-line which test to run and how 
many clients are participating in
-117 * this experiment. Run {@code 
PerformanceEvaluation --help} to obtain usage.
-118 *
-119 * pThis class sets up and runs 
the evaluation programs described in
-120 * Section 7, iPerformance 
Evaluation/i, of the a
-121 * 
href="http://labs.google.com/papers/bigtable.html"Bigtable/a;
-122 * paper, pages 8-10.
-123 *
-124 * pBy default, runs as a 
mapreduce job where each mapper runs a single test
-125 * client. Can also run as a 
non-mapreduce, multithreaded application by
-126 * specifying {@code --nomapred}. Each 
client does about 1GB of data, unless
-127 * specified otherwise.
-128 */
-129@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
-130public class PerformanceEvaluation 
extends Configured implements Tool {
-131  static final String RANDOM_SEEK_SCAN = 
"randomSeekScan";
-132  static final String RANDOM_READ = 
"randomRead";
-133  private static final Logger LOG = 
LoggerFactory.getLogger(PerformanceEvaluation.class.getName());
-134  private static final ObjectMapper 
MAPPER = new ObjectMapper();
-135  static {
-136
MAPPER.configure(MapperFeature.SORT_PROPERTIES_ALPHABETICALLY, true);
-137  }
-138
-139  public static final String TABLE_NAME = 
"TestTable";
-140  public static final byte[] FAMILY_NAME 
= Bytes.toBytes("info");
-141  public static final byte [] COLUMN_ZERO 
= Bytes.toBytes("" + 0);
-142  public static final byte [] 
QUALIFIER_NAME = COLUMN_ZERO;
+072import 
org.apache.hadoop.hbase.client.metrics.ScanMetrics;
+073import 
org.apache.hadoop.hbase.filter.BinaryComparator;
+074import 
org.apache.hadoop.hbase.filter.Filter;
+075import 

[19/51] [partial] hbase-site git commit: Published site at 87f5b5f3411d96c31b4cb61b9a57ced22be91d1f.

2018-05-04 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/de18d468/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsDir.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsDir.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsDir.html
index e1bc325..63e7421 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsDir.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.WorkItemHdfsDir.html
@@ -66,5125 +66,5224 @@
 058import 
java.util.concurrent.TimeoutException;
 059import 
java.util.concurrent.atomic.AtomicBoolean;
 060import 
java.util.concurrent.atomic.AtomicInteger;
-061import org.apache.commons.io.IOUtils;
-062import 
org.apache.commons.lang3.RandomStringUtils;
-063import 
org.apache.commons.lang3.StringUtils;
-064import 
org.apache.hadoop.conf.Configuration;
-065import 
org.apache.hadoop.conf.Configured;
-066import 
org.apache.hadoop.fs.FSDataOutputStream;
-067import org.apache.hadoop.fs.FileStatus;
-068import org.apache.hadoop.fs.FileSystem;
-069import org.apache.hadoop.fs.Path;
-070import 
org.apache.hadoop.fs.permission.FsAction;
-071import 
org.apache.hadoop.fs.permission.FsPermission;
-072import 
org.apache.hadoop.hbase.Abortable;
-073import org.apache.hadoop.hbase.Cell;
-074import 
org.apache.hadoop.hbase.CellUtil;
-075import 
org.apache.hadoop.hbase.ClusterMetrics;
-076import 
org.apache.hadoop.hbase.ClusterMetrics.Option;
-077import 
org.apache.hadoop.hbase.HBaseConfiguration;
-078import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
-079import 
org.apache.hadoop.hbase.HConstants;
-080import 
org.apache.hadoop.hbase.HRegionInfo;
-081import 
org.apache.hadoop.hbase.HRegionLocation;
-082import 
org.apache.hadoop.hbase.KeyValue;
-083import 
org.apache.hadoop.hbase.MasterNotRunningException;
-084import 
org.apache.hadoop.hbase.MetaTableAccessor;
-085import 
org.apache.hadoop.hbase.RegionLocations;
-086import 
org.apache.hadoop.hbase.ServerName;
-087import 
org.apache.hadoop.hbase.TableName;
-088import 
org.apache.hadoop.hbase.ZooKeeperConnectionException;
-089import 
org.apache.hadoop.hbase.client.Admin;
-090import 
org.apache.hadoop.hbase.client.ClusterConnection;
-091import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-092import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
-093import 
org.apache.hadoop.hbase.client.Connection;
-094import 
org.apache.hadoop.hbase.client.ConnectionFactory;
-095import 
org.apache.hadoop.hbase.client.Delete;
-096import 
org.apache.hadoop.hbase.client.Get;
-097import 
org.apache.hadoop.hbase.client.Put;
-098import 
org.apache.hadoop.hbase.client.RegionInfo;
-099import 
org.apache.hadoop.hbase.client.RegionInfoBuilder;
-100import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
-101import 
org.apache.hadoop.hbase.client.Result;
-102import 
org.apache.hadoop.hbase.client.RowMutations;
-103import 
org.apache.hadoop.hbase.client.Table;
-104import 
org.apache.hadoop.hbase.client.TableDescriptor;
-105import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-106import 
org.apache.hadoop.hbase.client.TableState;
-107import 
org.apache.hadoop.hbase.io.FileLink;
-108import 
org.apache.hadoop.hbase.io.HFileLink;
-109import 
org.apache.hadoop.hbase.io.hfile.CacheConfig;
-110import 
org.apache.hadoop.hbase.io.hfile.HFile;
-111import 
org.apache.hadoop.hbase.log.HBaseMarkers;
-112import 
org.apache.hadoop.hbase.master.MasterFileSystem;
-113import 
org.apache.hadoop.hbase.master.RegionState;
-114import 
org.apache.hadoop.hbase.regionserver.HRegion;
-115import 
org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
-116import 
org.apache.hadoop.hbase.regionserver.StoreFileInfo;
-117import 
org.apache.hadoop.hbase.replication.ReplicationException;
-118import 
org.apache.hadoop.hbase.security.AccessDeniedException;
-119import 
org.apache.hadoop.hbase.security.UserProvider;
-120import 
org.apache.hadoop.hbase.util.Bytes.ByteArrayComparator;
-121import 
org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter.ERROR_CODE;
-122import 
org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker;
-123import 
org.apache.hadoop.hbase.util.hbck.ReplicationChecker;
-124import 
org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandler;
-125import 
org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandlerImpl;
-126import org.apache.hadoop.hbase.wal.WAL;
-127import 
org.apache.hadoop.hbase.wal.WALFactory;
-128import 
org.apache.hadoop.hbase.wal.WALSplitter;
-129import 
org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-130import 
org.apache.hadoop.hbase.zookeeper.ZKUtil;
-131import 
org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-132import 
org.apache.hadoop.hbase.zookeeper.ZNodePaths;
-133import 
org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
-134import 
org.apache.hadoop.ipc.RemoteException;
-135import 

[19/51] [partial] hbase-site git commit: Published site at 2912c953551bedbfbf30c32c156ed7bb187d54c3.

2018-04-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html
index 8302e28..c370eb9 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.CheckRegionConsistencyWorkItem.html
@@ -2113,3031 +2113,3033 @@
 2105
errors.reportError(ERROR_CODE.ORPHAN_TABLE_STATE,
 2106tableName + " unable to 
delete dangling table state " + tableState);
 2107  }
-2108} else {
-2109  
errors.reportError(ERROR_CODE.ORPHAN_TABLE_STATE,
-2110  tableName + " has dangling 
table state " + tableState);
-2111}
-2112  }
-2113}
-2114// check that all tables have 
states
-2115for (TableName tableName : 
tablesInfo.keySet()) {
-2116  if (isTableIncluded(tableName) 
 !tableStates.containsKey(tableName)) {
-2117if (fixMeta) {
-2118  
MetaTableAccessor.updateTableState(connection, tableName, 
TableState.State.ENABLED);
-2119  TableState newState = 
MetaTableAccessor.getTableState(connection, tableName);
-2120  if (newState == null) {
-2121
errors.reportError(ERROR_CODE.NO_TABLE_STATE,
-2122"Unable to change state 
for table " + tableName + " in meta ");
-2123  }
-2124} else {
-2125  
errors.reportError(ERROR_CODE.NO_TABLE_STATE,
-2126  tableName + " has no state 
in meta ");
-2127}
-2128  }
-2129}
-2130  }
-2131
-2132  private void preCheckPermission() 
throws IOException, AccessDeniedException {
-2133if 
(shouldIgnorePreCheckPermission()) {
-2134  return;
-2135}
-2136
-2137Path hbaseDir = 
FSUtils.getRootDir(getConf());
-2138FileSystem fs = 
hbaseDir.getFileSystem(getConf());
-2139UserProvider userProvider = 
UserProvider.instantiate(getConf());
-2140UserGroupInformation ugi = 
userProvider.getCurrent().getUGI();
-2141FileStatus[] files = 
fs.listStatus(hbaseDir);
-2142for (FileStatus file : files) {
-2143  try {
-2144FSUtils.checkAccess(ugi, file, 
FsAction.WRITE);
-2145  } catch (AccessDeniedException 
ace) {
-2146LOG.warn("Got 
AccessDeniedException when preCheckPermission ", ace);
-2147
errors.reportError(ERROR_CODE.WRONG_USAGE, "Current user " + 
ugi.getUserName()
-2148  + " does not have write perms 
to " + file.getPath()
-2149  + ". Please rerun hbck as hdfs 
user " + file.getOwner());
-2150throw ace;
-2151  }
-2152}
-2153  }
-2154
-2155  /**
-2156   * Deletes region from meta table
-2157   */
-2158  private void deleteMetaRegion(HbckInfo 
hi) throws IOException {
-2159
deleteMetaRegion(hi.metaEntry.getRegionName());
-2160  }
-2161
-2162  /**
-2163   * Deletes region from meta table
-2164   */
-2165  private void deleteMetaRegion(byte[] 
metaKey) throws IOException {
-2166Delete d = new Delete(metaKey);
-2167meta.delete(d);
-2168LOG.info("Deleted " + 
Bytes.toString(metaKey) + " from META" );
-2169  }
-2170
-2171  /**
-2172   * Reset the split parent region info 
in meta table
-2173   */
-2174  private void resetSplitParent(HbckInfo 
hi) throws IOException {
-2175RowMutations mutations = new 
RowMutations(hi.metaEntry.getRegionName());
-2176Delete d = new 
Delete(hi.metaEntry.getRegionName());
-2177
d.addColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER);
-2178
d.addColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER);
-2179mutations.add(d);
-2180
-2181RegionInfo hri = 
RegionInfoBuilder.newBuilder(hi.metaEntry)
-2182.setOffline(false)
-2183.setSplit(false)
-2184.build();
-2185Put p = 
MetaTableAccessor.makePutFromRegionInfo(hri, 
EnvironmentEdgeManager.currentTime());
-2186mutations.add(p);
-2187
-2188meta.mutateRow(mutations);
-2189LOG.info("Reset split parent " + 
hi.metaEntry.getRegionNameAsString() + " in META" );
-2190  }
-2191
-2192  /**
-2193   * This backwards-compatibility 
wrapper for permanently offlining a region
-2194   * that should not be alive.  If the 
region server does not support the
-2195   * "offline" method, it will use the 
closest unassign method instead.  This
-2196   * will basically work until one 
attempts to disable or delete the affected
-2197   * table.  The problem has to do with 
in-memory only master state, so
-2198   * restarting the HMaster or failing 
over to another should fix this.
-2199   */
-2200  private void offline(byte[] 
regionName) throws IOException {
-2201String regionString = 

[19/51] [partial] hbase-site git commit: Published site at 2a2258656b2fcd92b967131b6c1f037363553bc4.

2018-03-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e0fb1fde/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceState.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceState.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceState.html
index d7aa8b1..98a45a0 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceState.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceState.html
@@ -680,1330 +680,1333 @@
 672}
 673ListHRegionLocation locations 
= new ArrayList();
 674for (RegionInfo regionInfo : regions) 
{
-675  RegionLocations list = 
locateRegion(tableName, regionInfo.getStartKey(), useCache, true);
-676  if (list != null) {
-677for (HRegionLocation loc : 
list.getRegionLocations()) {
-678  if (loc != null) {
-679locations.add(loc);
-680  }
-681}
-682  }
-683}
-684return locations;
-685  }
-686
-687  @Override
-688  public HRegionLocation 
locateRegion(final TableName tableName, final byte[] row)
-689  throws IOException {
-690RegionLocations locations = 
locateRegion(tableName, row, true, true);
-691return locations == null ? null : 
locations.getRegionLocation();
-692  }
-693
-694  @Override
-695  public HRegionLocation 
relocateRegion(final TableName tableName, final byte[] row)
-696  throws IOException {
-697RegionLocations locations =
-698  relocateRegion(tableName, row, 
RegionReplicaUtil.DEFAULT_REPLICA_ID);
-699return locations == null ? null
-700  : 
locations.getRegionLocation(RegionReplicaUtil.DEFAULT_REPLICA_ID);
-701  }
-702
-703  @Override
-704  public RegionLocations 
relocateRegion(final TableName tableName,
-705  final byte [] row, int replicaId) 
throws IOException{
-706// Since this is an explicit request 
not to use any caching, finding
-707// disabled tables should not be 
desirable.  This will ensure that an exception is thrown when
-708// the first time a disabled table is 
interacted with.
-709if 
(!tableName.equals(TableName.META_TABLE_NAME)  
isTableDisabled(tableName)) {
-710  throw new 
TableNotEnabledException(tableName.getNameAsString() + " is disabled.");
-711}
-712
-713return locateRegion(tableName, row, 
false, true, replicaId);
-714  }
+675  if 
(!RegionReplicaUtil.isDefaultReplica(regionInfo)) {
+676continue;
+677  }
+678  RegionLocations list = 
locateRegion(tableName, regionInfo.getStartKey(), useCache, true);
+679  if (list != null) {
+680for (HRegionLocation loc : 
list.getRegionLocations()) {
+681  if (loc != null) {
+682locations.add(loc);
+683  }
+684}
+685  }
+686}
+687return locations;
+688  }
+689
+690  @Override
+691  public HRegionLocation 
locateRegion(final TableName tableName, final byte[] row)
+692  throws IOException {
+693RegionLocations locations = 
locateRegion(tableName, row, true, true);
+694return locations == null ? null : 
locations.getRegionLocation();
+695  }
+696
+697  @Override
+698  public HRegionLocation 
relocateRegion(final TableName tableName, final byte[] row)
+699  throws IOException {
+700RegionLocations locations =
+701  relocateRegion(tableName, row, 
RegionReplicaUtil.DEFAULT_REPLICA_ID);
+702return locations == null ? null
+703  : 
locations.getRegionLocation(RegionReplicaUtil.DEFAULT_REPLICA_ID);
+704  }
+705
+706  @Override
+707  public RegionLocations 
relocateRegion(final TableName tableName,
+708  final byte [] row, int replicaId) 
throws IOException{
+709// Since this is an explicit request 
not to use any caching, finding
+710// disabled tables should not be 
desirable.  This will ensure that an exception is thrown when
+711// the first time a disabled table is 
interacted with.
+712if 
(!tableName.equals(TableName.META_TABLE_NAME)  
isTableDisabled(tableName)) {
+713  throw new 
TableNotEnabledException(tableName.getNameAsString() + " is disabled.");
+714}
 715
-716  @Override
-717  public RegionLocations 
locateRegion(final TableName tableName, final byte[] row, boolean useCache,
-718  boolean retry) throws IOException 
{
-719return locateRegion(tableName, row, 
useCache, retry, RegionReplicaUtil.DEFAULT_REPLICA_ID);
-720  }
-721
-722  @Override
-723  public RegionLocations 
locateRegion(final TableName tableName, final byte[] row, boolean useCache,
-724  boolean retry, int replicaId) 
throws IOException {
-725checkClosed();
-726if (tableName == null || 
tableName.getName().length == 0) {
-727  throw new 
IllegalArgumentException("table name cannot be null or zero length");
-728}
-729if 

[19/51] [partial] hbase-site git commit: Published site at e468b4022f76688851b3e0c34722f01a56bd624f.

2018-03-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/16541468/devapidocs/org/apache/hadoop/hbase/class-use/Cell.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/Cell.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/Cell.html
index 22280e4..cf0947e 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/Cell.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/Cell.html
@@ -778,13 +778,6 @@ service.
 
 
 int
-CellComparator.compare(CellleftCell,
-   CellrightCell)
-Lexographically compares two cells.
-
-
-
-int
 KeyValue.MetaComparator.compare(Cellleft,
Cellright)
 Deprecated.
@@ -792,7 +785,7 @@ service.
  table.
 
 
-
+
 int
 KeyValue.KVComparator.compare(Cellleft,
Cellright)
@@ -801,6 +794,13 @@ service.
  rowkey, colfam/qual, timestamp, type, mvcc
 
 
+
+int
+CellComparator.compare(CellleftCell,
+   CellrightCell)
+Lexographically compares two cells.
+
+
 
 int
 CellComparatorImpl.compare(Cella,
@@ -1032,18 +1032,18 @@ service.
 
 
 int
-CellComparator.compareRows(CellleftCell,
-   CellrightCell)
-Lexographically compares the rows of two cells.
-
-
-
-int
 KeyValue.KVComparator.compareRows(Cellleft,
Cellright)
 Deprecated.
 
 
+
+int
+CellComparator.compareRows(CellleftCell,
+   CellrightCell)
+Lexographically compares the rows of two cells.
+
+
 
 int
 CellComparatorImpl.compareRows(Cellleft,
@@ -1058,18 +1058,18 @@ service.
 
 
 int
-CellComparator.compareTimestamps(CellleftCell,
- CellrightCell)
-Compares cell's timestamps in DESCENDING order.
-
-
-
-int
 KeyValue.KVComparator.compareTimestamps(Cellleft,
  Cellright)
 Deprecated.
 
 
+
+int
+CellComparator.compareTimestamps(CellleftCell,
+ CellrightCell)
+Compares cell's timestamps in DESCENDING order.
+
+
 
 int
 CellComparatorImpl.compareTimestamps(Cellleft,
@@ -2417,11 +2417,11 @@ service.
 
 
 private Cell
-AllowPartialScanResultCache.lastCell
+BatchScanResultCache.lastCell
 
 
 private Cell
-BatchScanResultCache.lastCell
+AllowPartialScanResultCache.lastCell
 
 
 
@@ -2529,25 +2529,25 @@ service.
 
 
 
-Append
-Append.add(Cellcell)
-Add column and value to this Append operation.
+Increment
+Increment.add(Cellcell)
+Add the specified KeyValue to this operation.
 
 
 
-(package private) Mutation
-Mutation.add(Cellcell)
-
-
 Delete
 Delete.add(Cellcell)
 Add an existing delete marker to this Delete object.
 
 
+
+(package private) Mutation
+Mutation.add(Cellcell)
+
 
-Increment
-Increment.add(Cellcell)
-Add the specified KeyValue to this operation.
+Append
+Append.add(Cellcell)
+Add column and value to this Append operation.
 
 
 
@@ -2645,38 +2645,38 @@ service.
   booleanmayHaveMoreCellsInRow)
 
 
-Append
-Append.setFamilyCellMap(https://docs.oracle.com/javase/8/docs/api/java/util/NavigableMap.html?is-external=true;
 title="class or interface in java.util">NavigableMapbyte[],https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListCellmap)
+Increment
+Increment.setFamilyCellMap(https://docs.oracle.com/javase/8/docs/api/java/util/NavigableMap.html?is-external=true;
 title="class or interface in java.util">NavigableMapbyte[],https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListCellmap)
 Deprecated.
 As of release 2.0.0, this 
will be removed in HBase 3.0.0.
- Use Append.Append(byte[],
 long, NavigableMap) instead
+ Use Increment.Increment(byte[],
 long, NavigableMap) instead
 
 
 
 
-Mutation
-Mutation.setFamilyCellMap(https://docs.oracle.com/javase/8/docs/api/java/util/NavigableMap.html?is-external=true;
 title="class or interface in java.util">NavigableMapbyte[],https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListCellmap)
+Delete
+Delete.setFamilyCellMap(https://docs.oracle.com/javase/8/docs/api/java/util/NavigableMap.html?is-external=true;
 title="class or interface in java.util">NavigableMapbyte[],https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListCellmap)
 Deprecated.
 As of release 2.0.0, this 
will be removed in HBase 3.0.0.
- Use Mutation.Mutation(byte[],
 long, NavigableMap) instead
+ Use Delete.Delete(byte[],
 long, NavigableMap) instead
 
 
 
 
-Delete
-Delete.setFamilyCellMap(https://docs.oracle.com/javase/8/docs/api/java/util/NavigableMap.html?is-external=true;
 title="class or interface in java.util">NavigableMapbyte[],https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListCellmap)
+Mutation
+Mutation.setFamilyCellMap(https://docs.oracle.com/javase/8/docs/api/java/util/NavigableMap.html?is-external=true;
 title="class or interface in 

[19/51] [partial] hbase-site git commit: Published site at 64061f896fe21512504e3886a400759e88b519da.

2018-03-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f1ebf5b6/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionLocateType.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionLocateType.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionLocateType.html
index f794fc9..21dd94d 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionLocateType.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionLocateType.html
@@ -106,7 +106,7 @@
 
 
 private RegionLocateType
-AsyncSingleRequestRpcRetryingCaller.locateType
+AsyncRpcRetryingCallerFactory.SingleRequestCallerBuilder.locateType
 
 
 RegionLocateType
@@ -114,7 +114,7 @@
 
 
 private RegionLocateType
-AsyncRpcRetryingCallerFactory.SingleRequestCallerBuilder.locateType
+AsyncSingleRequestRpcRetryingCaller.locateType
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f1ebf5b6/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionLocator.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionLocator.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionLocator.html
index f6e7bf3..195c3ee 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionLocator.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionLocator.html
@@ -230,14 +230,14 @@ service.
 
 
 private RegionLocator
-HFileOutputFormat2.TableInfo.regionLocator
-
-
-private RegionLocator
 TableInputFormatBase.regionLocator
 The RegionLocator of the 
table.
 
 
+
+private RegionLocator
+HFileOutputFormat2.TableInfo.regionLocator
+
 
 
 
@@ -248,15 +248,15 @@ service.
 
 
 
-RegionLocator
-HFileOutputFormat2.TableInfo.getRegionLocator()
-
-
 protected RegionLocator
 TableInputFormatBase.getRegionLocator()
 Allows subclasses to get the RegionLocator.
 
 
+
+RegionLocator
+HFileOutputFormat2.TableInfo.getRegionLocator()
+
 
 
 



[19/51] [partial] hbase-site git commit: Published site at 4cb40e6d846ce1f28ffb40d388c9efb753197813.

2018-03-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4dc2a2e8/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfo.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfo.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfo.html
index 40bbdbf..e4177f7 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfo.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionInfo.html
@@ -495,7 +495,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 private static HRegionLocation
-AsyncMetaTableAccessor.getRegionLocation(Resultr,
+MetaTableAccessor.getRegionLocation(Resultr,
  RegionInforegionInfo,
  intreplicaId)
 Returns the HRegionLocation parsed from the given meta row 
Result
@@ -504,7 +504,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 private static HRegionLocation
-MetaTableAccessor.getRegionLocation(Resultr,
+AsyncMetaTableAccessor.getRegionLocation(Resultr,
  RegionInforegionInfo,
  intreplicaId)
 Returns the HRegionLocation parsed from the given meta row 
Result
@@ -941,7 +941,9 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfo
-AsyncHBaseAdmin.getRegions(ServerNameserverName)
+AsyncAdmin.getRegions(ServerNameserverName)
+Get all the online regions on a region server.
+
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfo
@@ -950,22 +952,22 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 
-https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfo
-HBaseAdmin.getRegions(ServerNamesn)
+https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfo
+RawAsyncHBaseAdmin.getRegions(ServerNameserverName)
 
 
-https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfo
-AsyncAdmin.getRegions(ServerNameserverName)
-Get all the online regions on a region server.
-
+https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfo
+HBaseAdmin.getRegions(ServerNamesn)
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfo
-RawAsyncHBaseAdmin.getRegions(ServerNameserverName)
+AsyncHBaseAdmin.getRegions(ServerNameserverName)
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfo
-AsyncHBaseAdmin.getRegions(TableNametableName)
+AsyncAdmin.getRegions(TableNametableName)
+Get the regions of a given table.
+
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfo
@@ -974,18 +976,16 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 
-https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfo
-HBaseAdmin.getRegions(TableNametableName)
+https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListRegionInfo
+RawAsyncHBaseAdmin.getRegions(TableNametableName)
 
 
-https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in 

[19/51] [partial] hbase-site git commit: Published site at 8ab7b20f48951d77945181024f5e15842bc253c4.

2018-03-21 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6eb695c8/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html
index ecf500c..0cd5a4e 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html
@@ -238,8355 +238,8368 @@
 230  public static final String 
HBASE_MAX_CELL_SIZE_KEY = "hbase.server.keyvalue.maxsize";
 231  public static final int 
DEFAULT_MAX_CELL_SIZE = 10485760;
 232
-233  public static final String 
HBASE_REGIONSERVER_MINIBATCH_SIZE =
-234  
"hbase.regionserver.minibatch.size";
-235  public static final int 
DEFAULT_HBASE_REGIONSERVER_MINIBATCH_SIZE = 2;
-236
-237  /**
-238   * This is the global default value for 
durability. All tables/mutations not
-239   * defining a durability or using 
USE_DEFAULT will default to this value.
-240   */
-241  private static final Durability 
DEFAULT_DURABILITY = Durability.SYNC_WAL;
+233  /**
+234   * This is the global default value for 
durability. All tables/mutations not
+235   * defining a durability or using 
USE_DEFAULT will default to this value.
+236   */
+237  private static final Durability 
DEFAULT_DURABILITY = Durability.SYNC_WAL;
+238
+239  public static final String 
HBASE_REGIONSERVER_MINIBATCH_SIZE =
+240  
"hbase.regionserver.minibatch.size";
+241  public static final int 
DEFAULT_HBASE_REGIONSERVER_MINIBATCH_SIZE = 2;
 242
-243  final AtomicBoolean closed = new 
AtomicBoolean(false);
-244
-245  /* Closing can take some time; use the 
closing flag if there is stuff we don't
-246   * want to do while in closing state; 
e.g. like offer this region up to the
-247   * master as a region to close if the 
carrying regionserver is overloaded.
-248   * Once set, it is never cleared.
-249   */
-250  final AtomicBoolean closing = new 
AtomicBoolean(false);
-251
-252  /**
-253   * The max sequence id of flushed data 
on this region. There is no edit in memory that is
-254   * less that this sequence id.
-255   */
-256  private volatile long maxFlushedSeqId = 
HConstants.NO_SEQNUM;
-257
-258  /**
-259   * Record the sequence id of last flush 
operation. Can be in advance of
-260   * {@link #maxFlushedSeqId} when 
flushing a single column family. In this case,
-261   * {@link #maxFlushedSeqId} will be 
older than the oldest edit in memory.
-262   */
-263  private volatile long lastFlushOpSeqId 
= HConstants.NO_SEQNUM;
-264
-265  /**
-266   * The sequence id of the last replayed 
open region event from the primary region. This is used
-267   * to skip entries before this due to 
the possibility of replay edits coming out of order from
-268   * replication.
-269   */
-270  protected volatile long 
lastReplayedOpenRegionSeqId = -1L;
-271  protected volatile long 
lastReplayedCompactionSeqId = -1L;
-272
-273  
//
-274  // Members
-275  
//
-276
-277  // map from a locked row to the context 
for that lock including:
-278  // - CountDownLatch for threads waiting 
on that row
-279  // - the thread that owns the lock 
(allow reentrancy)
-280  // - reference count of (reentrant) 
locks held by the thread
-281  // - the row itself
-282  private final 
ConcurrentHashMapHashedBytes, RowLockContext lockedRows =
-283  new ConcurrentHashMap();
-284
-285  protected final Mapbyte[], 
HStore stores =
-286  new 
ConcurrentSkipListMap(Bytes.BYTES_RAWCOMPARATOR);
+243  public static final String 
WAL_HSYNC_CONF_KEY = "hbase.wal.hsync";
+244  public static final boolean 
DEFAULT_WAL_HSYNC = false;
+245
+246  final AtomicBoolean closed = new 
AtomicBoolean(false);
+247
+248  /* Closing can take some time; use the 
closing flag if there is stuff we don't
+249   * want to do while in closing state; 
e.g. like offer this region up to the
+250   * master as a region to close if the 
carrying regionserver is overloaded.
+251   * Once set, it is never cleared.
+252   */
+253  final AtomicBoolean closing = new 
AtomicBoolean(false);
+254
+255  /**
+256   * The max sequence id of flushed data 
on this region. There is no edit in memory that is
+257   * less that this sequence id.
+258   */
+259  private volatile long maxFlushedSeqId = 
HConstants.NO_SEQNUM;
+260
+261  /**
+262   * Record the sequence id of last flush 
operation. Can be in advance of
+263   * {@link #maxFlushedSeqId} when 
flushing a single column family. In this case,
+264   * {@link #maxFlushedSeqId} will be 
older than the oldest edit in memory.
+265   */
+266  private volatile long lastFlushOpSeqId 
= HConstants.NO_SEQNUM;
+267
+268  /**
+269   * The sequence id of the last replayed 
open 

[19/51] [partial] hbase-site git commit: Published site at 00095a2ef9442e3fd86c04876c9d91f2f8b23ad8.

2018-03-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileReadAccessor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileReadAccessor.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileReadAccessor.html
index 74fbf67..33418d0 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileReadAccessor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileReadAccessor.html
@@ -27,287 +27,296 @@
 019package 
org.apache.hadoop.hbase.io.hfile.bucket;
 020
 021import java.io.File;
-022import java.io.FileNotFoundException;
-023import java.io.IOException;
-024import java.io.RandomAccessFile;
-025import java.nio.ByteBuffer;
-026import 
java.nio.channels.ClosedChannelException;
-027import java.nio.channels.FileChannel;
-028import java.util.Arrays;
-029import 
org.apache.hadoop.hbase.io.hfile.Cacheable;
-030import 
org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType;
-031import 
org.apache.hadoop.hbase.io.hfile.CacheableDeserializer;
-032import 
org.apache.hadoop.hbase.nio.ByteBuff;
-033import 
org.apache.hadoop.hbase.nio.SingleByteBuff;
-034import 
org.apache.hadoop.util.StringUtils;
-035import 
org.apache.yetus.audience.InterfaceAudience;
-036import org.slf4j.Logger;
-037import org.slf4j.LoggerFactory;
-038
-039import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-040import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-041
-042/**
-043 * IO engine that stores data to a file 
on the local file system.
-044 */
-045@InterfaceAudience.Private
-046public class FileIOEngine implements 
IOEngine {
-047  private static final Logger LOG = 
LoggerFactory.getLogger(FileIOEngine.class);
-048  public static final String 
FILE_DELIMITER = ",";
-049  private final String[] filePaths;
-050  private final FileChannel[] 
fileChannels;
-051  private final RandomAccessFile[] 
rafs;
-052
-053  private final long sizePerFile;
-054  private final long capacity;
-055
-056  private FileReadAccessor readAccessor = 
new FileReadAccessor();
-057  private FileWriteAccessor writeAccessor 
= new FileWriteAccessor();
-058
-059  public FileIOEngine(long capacity, 
boolean maintainPersistence, String... filePaths)
-060  throws IOException {
-061this.sizePerFile = capacity / 
filePaths.length;
-062this.capacity = this.sizePerFile * 
filePaths.length;
-063this.filePaths = filePaths;
-064this.fileChannels = new 
FileChannel[filePaths.length];
-065if (!maintainPersistence) {
-066  for (String filePath : filePaths) 
{
-067File file = new File(filePath);
-068if (file.exists()) {
-069  if (LOG.isDebugEnabled()) {
-070LOG.debug("File " + filePath 
+ " already exists. Deleting!!");
-071  }
-072  file.delete();
-073  // If deletion fails still we 
can manage with the writes
-074}
-075  }
-076}
-077this.rafs = new 
RandomAccessFile[filePaths.length];
-078for (int i = 0; i  
filePaths.length; i++) {
-079  String filePath = filePaths[i];
-080  try {
-081rafs[i] = new 
RandomAccessFile(filePath, "rw");
-082long totalSpace = new 
File(filePath).getTotalSpace();
-083if (totalSpace  sizePerFile) 
{
-084  // The next setting length will 
throw exception,logging this message
-085  // is just used for the detail 
reason of exception,
-086  String msg = "Only " + 
StringUtils.byteDesc(totalSpace)
-087  + " total space under " + 
filePath + ", not enough for requested "
-088  + 
StringUtils.byteDesc(sizePerFile);
-089  LOG.warn(msg);
-090}
-091rafs[i].setLength(sizePerFile);
-092fileChannels[i] = 
rafs[i].getChannel();
-093LOG.info("Allocating cache " + 
StringUtils.byteDesc(sizePerFile)
-094+ ", on the path:" + 
filePath);
-095  } catch (IOException fex) {
-096LOG.error("Failed allocating 
cache on " + filePath, fex);
-097shutdown();
-098throw fex;
-099  }
-100}
-101  }
-102
-103  @Override
-104  public String toString() {
-105return "ioengine=" + 
this.getClass().getSimpleName() + ", paths="
-106+ Arrays.asList(filePaths) + ", 
capacity=" + String.format("%,d", this.capacity);
-107  }
-108
-109  /**
-110   * File IO engine is always able to 
support persistent storage for the cache
-111   * @return true
-112   */
-113  @Override
-114  public boolean isPersistent() {
-115return true;
-116  }
-117
-118  /**
-119   * Transfers data from file to the 
given byte buffer
-120   * @param offset The offset in the file 
where the first byte to be read
-121   * @param length The length of buffer 
that should be allocated for reading
-122  

[19/51] [partial] hbase-site git commit: Published site at 22f4def942f8a3367d0ca6598317e9b9a7d0cfcd.

2018-03-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8b1eaec1/devapidocs/src-html/org/apache/hadoop/hbase/client/TableDescriptorBuilder.ModifyableTableDescriptor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/TableDescriptorBuilder.ModifyableTableDescriptor.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/TableDescriptorBuilder.ModifyableTableDescriptor.html
index 05c0542..2d09bf8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/TableDescriptorBuilder.ModifyableTableDescriptor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/TableDescriptorBuilder.ModifyableTableDescriptor.html
@@ -35,1393 +35,1419 @@
 027import java.util.HashSet;
 028import java.util.List;
 029import java.util.Map;
-030import java.util.Set;
-031import java.util.TreeMap;
-032import java.util.TreeSet;
-033import java.util.function.Function;
-034import java.util.regex.Matcher;
-035import org.apache.hadoop.fs.Path;
-036import 
org.apache.hadoop.hbase.Coprocessor;
-037import 
org.apache.hadoop.hbase.HConstants;
-038import 
org.apache.hadoop.hbase.TableName;
-039import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-040import 
org.apache.hadoop.hbase.security.User;
-041import 
org.apache.hadoop.hbase.util.Bytes;
-042import 
org.apache.yetus.audience.InterfaceAudience;
-043import org.slf4j.Logger;
-044import org.slf4j.LoggerFactory;
-045
-046import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-047import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
-048
-049/**
-050 * @since 2.0.0
-051 */
-052@InterfaceAudience.Public
-053public class TableDescriptorBuilder {
-054  public static final Logger LOG = 
LoggerFactory.getLogger(TableDescriptorBuilder.class);
-055  @InterfaceAudience.Private
-056  public static final String SPLIT_POLICY 
= "SPLIT_POLICY";
-057  private static final Bytes 
SPLIT_POLICY_KEY = new Bytes(Bytes.toBytes(SPLIT_POLICY));
-058  /**
-059   * Used by HBase Shell interface to 
access this metadata
-060   * attribute which denotes the maximum 
size of the store file after which a
-061   * region split occurs.
-062   */
-063  @InterfaceAudience.Private
-064  public static final String MAX_FILESIZE 
= "MAX_FILESIZE";
-065  private static final Bytes 
MAX_FILESIZE_KEY
-066  = new 
Bytes(Bytes.toBytes(MAX_FILESIZE));
-067
-068  @InterfaceAudience.Private
-069  public static final String OWNER = 
"OWNER";
+030import java.util.Objects;
+031import java.util.Optional;
+032import java.util.Set;
+033import java.util.TreeMap;
+034import java.util.TreeSet;
+035import java.util.function.Function;
+036import java.util.regex.Matcher;
+037import java.util.regex.Pattern;
+038import 
org.apache.hadoop.hbase.Coprocessor;
+039import 
org.apache.hadoop.hbase.HConstants;
+040import 
org.apache.hadoop.hbase.TableName;
+041import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
+042import 
org.apache.hadoop.hbase.security.User;
+043import 
org.apache.hadoop.hbase.util.Bytes;
+044import 
org.apache.yetus.audience.InterfaceAudience;
+045import org.slf4j.Logger;
+046import org.slf4j.LoggerFactory;
+047
+048import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+049import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
+050
+051/**
+052 * @since 2.0.0
+053 */
+054@InterfaceAudience.Public
+055public class TableDescriptorBuilder {
+056  public static final Logger LOG = 
LoggerFactory.getLogger(TableDescriptorBuilder.class);
+057  @InterfaceAudience.Private
+058  public static final String SPLIT_POLICY 
= "SPLIT_POLICY";
+059  private static final Bytes 
SPLIT_POLICY_KEY = new Bytes(Bytes.toBytes(SPLIT_POLICY));
+060  /**
+061   * Used by HBase Shell interface to 
access this metadata
+062   * attribute which denotes the maximum 
size of the store file after which a
+063   * region split occurs.
+064   */
+065  @InterfaceAudience.Private
+066  public static final String MAX_FILESIZE 
= "MAX_FILESIZE";
+067  private static final Bytes 
MAX_FILESIZE_KEY
+068  = new 
Bytes(Bytes.toBytes(MAX_FILESIZE));
+069
 070  @InterfaceAudience.Private
-071  public static final Bytes OWNER_KEY
-072  = new 
Bytes(Bytes.toBytes(OWNER));
-073
-074  /**
-075   * Used by rest interface to access 
this metadata attribute
-076   * which denotes if the table is Read 
Only.
-077   */
-078  @InterfaceAudience.Private
-079  public static final String READONLY = 
"READONLY";
-080  private static final Bytes 
READONLY_KEY
-081  = new 
Bytes(Bytes.toBytes(READONLY));
-082
-083  /**
-084   * Used by HBase Shell interface to 
access this metadata
-085   * attribute which denotes if the table 
is compaction enabled.
-086   */
-087  @InterfaceAudience.Private
-088  public static final String 
COMPACTION_ENABLED = "COMPACTION_ENABLED";
-089  private static final Bytes 
COMPACTION_ENABLED_KEY
-090  = new 

[19/51] [partial] hbase-site git commit: Published site at 31da4d0bce69b3a47066a5df675756087ce4dc60.

2018-03-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a754d895/devapidocs/org/apache/hadoop/hbase/nio/MultiByteBuff.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/nio/MultiByteBuff.html 
b/devapidocs/org/apache/hadoop/hbase/nio/MultiByteBuff.html
index e2c6511..b425534 100644
--- a/devapidocs/org/apache/hadoop/hbase/nio/MultiByteBuff.html
+++ b/devapidocs/org/apache/hadoop/hbase/nio/MultiByteBuff.html
@@ -912,7 +912,7 @@ extends 
 
 getShort
-privateshortgetShort(intindex,
+privateshortgetShort(intindex,
intitemIndex)
 
 
@@ -922,7 +922,7 @@ extends 
 
 getLong
-privatelonggetLong(intindex,
+privatelonggetLong(intindex,
  intitemIndex)
 
 
@@ -932,7 +932,7 @@ extends 
 
 getLong
-publiclonggetLong(intindex)
+publiclonggetLong(intindex)
 Fetches the long at the given index. Does not change 
position of the underlying ByteBuffers
 
 Specified by:
@@ -950,7 +950,7 @@ extends 
 
 getLongAfterPosition
-publiclonggetLongAfterPosition(intoffset)
+publiclonggetLongAfterPosition(intoffset)
 Description copied from 
class:ByteBuff
 Fetches the long value at the given offset from current 
position. Does not change position
  of the underlying ByteBuffers.
@@ -968,7 +968,7 @@ extends 
 
 position
-publicintposition()
+publicintposition()
 
 Specified by:
 positionin
 classByteBuff
@@ -983,7 +983,7 @@ extends 
 
 position
-publicMultiByteBuffposition(intposition)
+publicMultiByteBuffposition(intposition)
 Sets this MBB's position to the given value.
 
 Specified by:
@@ -1001,7 +1001,7 @@ extends 
 
 rewind
-publicMultiByteBuffrewind()
+publicMultiByteBuffrewind()
 Rewinds this MBB and the position is set to 0
 
 Specified by:
@@ -1017,7 +1017,7 @@ extends 
 
 mark
-publicMultiByteBuffmark()
+publicMultiByteBuffmark()
 Marks the current position of the MBB
 
 Specified by:
@@ -1033,7 +1033,7 @@ extends 
 
 reset
-publicMultiByteBuffreset()
+publicMultiByteBuffreset()
 Similar to https://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBuffer.reset(), 
ensures that this MBB
  is reset back to last marked position.
 
@@ -1050,7 +1050,7 @@ extends 
 
 remaining
-publicintremaining()
+publicintremaining()
 Returns the number of elements between the current position 
and the
  limit.
 
@@ -1067,7 +1067,7 @@ extends 
 
 hasRemaining
-public finalbooleanhasRemaining()
+public finalbooleanhasRemaining()
 Returns true if there are elements between the current 
position and the limt
 
 Specified by:
@@ -1083,7 +1083,7 @@ extends 
 
 get
-publicbyteget()
+publicbyteget()
 A relative method that returns byte at the current 
position.  Increments the
  current position by the size of a byte.
 
@@ -1100,7 +1100,7 @@ extends 
 
 getShort
-publicshortgetShort()
+publicshortgetShort()
 Returns the short value at the current position. Also 
advances the position by the size
  of short
 
@@ -1117,7 +1117,7 @@ extends 
 
 getInt
-publicintgetInt()
+publicintgetInt()
 Returns the int value at the current position. Also 
advances the position by the size of int
 
 Specified by:
@@ -1133,7 +1133,7 @@ extends 
 
 getLong
-publiclonggetLong()
+publiclonggetLong()
 Returns the long value at the current position. Also 
advances the position by the size of long
 
 Specified by:
@@ -1149,7 +1149,7 @@ extends 
 
 get
-publicvoidget(byte[]dst)
+publicvoidget(byte[]dst)
 Copies the content from this MBB's current position to the 
byte array and fills it. Also
  advances the position of the MBB by the length of the byte[].
 
@@ -1166,7 +1166,7 @@ extends 
 
 get
-publicvoidget(byte[]dst,
+publicvoidget(byte[]dst,
 intoffset,
 intlength)
 Copies the specified number of bytes from this MBB's 
current position to the byte[]'s offset.
@@ -1187,7 +1187,7 @@ extends 
 
 get
-publicvoidget(intsourceOffset,
+publicvoidget(intsourceOffset,
 byte[]dst,
 intoffset,
 intlength)
@@ -1211,7 +1211,7 @@ extends 
 
 limit
-publicMultiByteBufflimit(intlimit)
+publicMultiByteBufflimit(intlimit)
 Marks the limit of this MBB.
 
 Specified by:
@@ -1229,7 +1229,7 @@ extends 
 
 limit
-publicintlimit()
+publicintlimit()
 Returns the limit of this MBB
 
 Specified by:
@@ -1245,7 +1245,7 @@ extends 
 
 slice
-publicMultiByteBuffslice()
+publicMultiByteBuffslice()
 Returns an MBB which is a sliced version of this MBB. The 
position, limit and mark
  of the new MBB will be independent than that of the original MBB.
  The content of the new MBB will start at this MBB's current position
@@ -1263,7 +1263,7 @@ extends 
 
 duplicate
-publicMultiByteBuffduplicate()
+publicMultiByteBuffduplicate()
 Returns an MBB which is a duplicate version of this MBB. 
The position, limit and mark
  of the new MBB will be independent than that of the original MBB.
  The content of the new MBB will start at this MBB's current 

[19/51] [partial] hbase-site git commit: Published site at 6b77786dfc46d25ac5bb5f1c8a4a9eb47b52a604.

2018-03-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/81cde4ce/devapidocs/org/apache/hadoop/hbase/client/class-use/RpcRetryingCallerFactory.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/RpcRetryingCallerFactory.html
 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/RpcRetryingCallerFactory.html
index 00622a7..4d6f99a 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/RpcRetryingCallerFactory.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/RpcRetryingCallerFactory.html
@@ -126,15 +126,15 @@
 
 
 private RpcRetryingCallerFactory
-ConnectionImplementation.rpcCallerFactory
+RegionCoprocessorRpcChannel.rpcCallerFactory
 
 
 private RpcRetryingCallerFactory
-HTable.rpcCallerFactory
+ConnectionImplementation.rpcCallerFactory
 
 
 private RpcRetryingCallerFactory
-RegionCoprocessorRpcChannel.rpcCallerFactory
+HTable.rpcCallerFactory
 
 
 private RpcRetryingCallerFactory
@@ -155,21 +155,21 @@
 
 
 RpcRetryingCallerFactory
-ClusterConnection.getNewRpcRetryingCallerFactory(org.apache.hadoop.conf.Configurationconf)
-Returns a new RpcRetryingCallerFactory from the given 
Configuration.
-
+ConnectionImplementation.getNewRpcRetryingCallerFactory(org.apache.hadoop.conf.Configurationconf)
 
 
 RpcRetryingCallerFactory
-ConnectionImplementation.getNewRpcRetryingCallerFactory(org.apache.hadoop.conf.Configurationconf)
+ClusterConnection.getNewRpcRetryingCallerFactory(org.apache.hadoop.conf.Configurationconf)
+Returns a new RpcRetryingCallerFactory from the given 
Configuration.
+
 
 
 RpcRetryingCallerFactory
-ClusterConnection.getRpcRetryingCallerFactory()
+ConnectionImplementation.getRpcRetryingCallerFactory()
 
 
 RpcRetryingCallerFactory
-ConnectionImplementation.getRpcRetryingCallerFactory()
+ClusterConnection.getRpcRetryingCallerFactory()
 
 
 static RpcRetryingCallerFactory

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/81cde4ce/devapidocs/org/apache/hadoop/hbase/client/class-use/Scan.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/class-use/Scan.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/Scan.html
index d833faa..f5a73bc 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/Scan.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/Scan.html
@@ -283,14 +283,6 @@ service.
 
 
 private Scan
-AsyncScanSingleRegionRpcRetryingCaller.scan
-
-
-protected Scan
-ScannerCallable.scan
-
-
-private Scan
 ScannerCallableWithReplicas.scan
 
 
@@ -307,6 +299,14 @@ service.
 
 
 private Scan
+AsyncScanSingleRegionRpcRetryingCaller.scan
+
+
+protected Scan
+ScannerCallable.scan
+
+
+private Scan
 TableSnapshotScanner.scan
 
 
@@ -339,11 +339,11 @@ service.
 
 
 protected Scan
-ScannerCallable.getScan()
+ClientScanner.getScan()
 
 
 protected Scan
-ClientScanner.getScan()
+ScannerCallable.getScan()
 
 
 Scan
@@ -638,29 +638,29 @@ service.
 
 
 ResultScanner
-RawAsyncTableImpl.getScanner(Scanscan)
-
-
-ResultScanner
-HTable.getScanner(Scanscan)
-The underlying HTable must 
not be closed.
+AsyncTable.getScanner(Scanscan)
+Returns a scanner on the current table as specified by the 
Scan 
object.
 
 
-
+
 ResultScanner
 Table.getScanner(Scanscan)
 Returns a scanner on the current table as specified by the 
Scan
  object.
 
 
-
+
 ResultScanner
 AsyncTableImpl.getScanner(Scanscan)
 
+
+ResultScanner
+RawAsyncTableImpl.getScanner(Scanscan)
+
 
 ResultScanner
-AsyncTable.getScanner(Scanscan)
-Returns a scanner on the current table as specified by the 
Scan 
object.
+HTable.getScanner(Scanscan)
+The underlying HTable must 
not be closed.
 
 
 
@@ -703,7 +703,9 @@ service.
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListResult
-RawAsyncTableImpl.scanAll(Scanscan)
+AsyncTable.scanAll(Scanscan)
+Return all the results that match the given scan 
object.
+
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListResult
@@ -711,9 +713,7 @@ service.
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListResult
-AsyncTable.scanAll(Scanscan)
-Return all the results that match the given scan 
object.
-
+RawAsyncTableImpl.scanAll(Scanscan)
 
 
 private Scan
@@ 

[19/51] [partial] hbase-site git commit: Published site at 1384da71375427b522b09f06862bb5d629cef52f.

2018-03-06 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d347bde8/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionLocateType.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionLocateType.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionLocateType.html
index 21dd94d..f794fc9 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionLocateType.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionLocateType.html
@@ -106,7 +106,7 @@
 
 
 private RegionLocateType
-AsyncRpcRetryingCallerFactory.SingleRequestCallerBuilder.locateType
+AsyncSingleRequestRpcRetryingCaller.locateType
 
 
 RegionLocateType
@@ -114,7 +114,7 @@
 
 
 private RegionLocateType
-AsyncSingleRequestRpcRetryingCaller.locateType
+AsyncRpcRetryingCallerFactory.SingleRequestCallerBuilder.locateType
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d347bde8/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionLocator.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionLocator.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionLocator.html
index 195c3ee..f6e7bf3 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionLocator.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/RegionLocator.html
@@ -230,13 +230,13 @@ service.
 
 
 private RegionLocator
-TableInputFormatBase.regionLocator
-The RegionLocator of the 
table.
-
+HFileOutputFormat2.TableInfo.regionLocator
 
 
 private RegionLocator
-HFileOutputFormat2.TableInfo.regionLocator
+TableInputFormatBase.regionLocator
+The RegionLocator of the 
table.
+
 
 
 
@@ -248,15 +248,15 @@ service.
 
 
 
+RegionLocator
+HFileOutputFormat2.TableInfo.getRegionLocator()
+
+
 protected RegionLocator
 TableInputFormatBase.getRegionLocator()
 Allows subclasses to get the RegionLocator.
 
 
-
-RegionLocator
-HFileOutputFormat2.TableInfo.getRegionLocator()
-
 
 
 



[19/51] [partial] hbase-site git commit: Published site at b7b86839250bf9b295ebc1948826f43a88736d6c.

2018-03-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6b94a2f2/devapidocs/src-html/org/apache/hadoop/hbase/HTableDescriptor.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/HTableDescriptor.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/HTableDescriptor.html
index ea5dd32..d19914b 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/HTableDescriptor.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/HTableDescriptor.html
@@ -33,9 +33,9 @@
 025import java.util.Set;
 026import java.util.stream.Collectors;
 027import java.util.stream.Stream;
-028
-029import org.apache.hadoop.fs.Path;
-030import 
org.apache.yetus.audience.InterfaceAudience;
+028import org.apache.hadoop.fs.Path;
+029import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+030import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor;
 031import 
org.apache.hadoop.hbase.client.Durability;
 032import 
org.apache.hadoop.hbase.client.TableDescriptor;
 033import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
@@ -43,859 +43,866 @@
 035import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
 036import 
org.apache.hadoop.hbase.security.User;
 037import 
org.apache.hadoop.hbase.util.Bytes;
-038import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-039import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor;
-040
-041/**
-042 * HTableDescriptor contains the details 
about an HBase table  such as the descriptors of
-043 * all the column families, is the table 
a catalog table, code hbase:meta /code,
-044 * if the table is read only, the maximum 
size of the memstore,
-045 * when the region split should occur, 
coprocessors associated with it etc...
-046 * @deprecated As of release 2.0.0, this 
will be removed in HBase 3.0.0.
-047 * Use {@link 
TableDescriptorBuilder} to build {@link HTableDescriptor}.
-048 */
-049@Deprecated
-050@InterfaceAudience.Public
-051public class HTableDescriptor implements 
TableDescriptor, ComparableHTableDescriptor {
-052  public static final String SPLIT_POLICY 
= TableDescriptorBuilder.SPLIT_POLICY;
-053  public static final String MAX_FILESIZE 
= TableDescriptorBuilder.MAX_FILESIZE;
-054  public static final String OWNER = 
TableDescriptorBuilder.OWNER;
-055  public static final Bytes OWNER_KEY = 
TableDescriptorBuilder.OWNER_KEY;
-056  public static final String READONLY = 
TableDescriptorBuilder.READONLY;
-057  public static final String 
COMPACTION_ENABLED = TableDescriptorBuilder.COMPACTION_ENABLED;
-058  public static final String 
MEMSTORE_FLUSHSIZE = TableDescriptorBuilder.MEMSTORE_FLUSHSIZE;
-059  public static final String FLUSH_POLICY 
= TableDescriptorBuilder.FLUSH_POLICY;
-060  public static final String IS_ROOT = 
"IS_ROOT";
-061  public static final String IS_META = 
TableDescriptorBuilder.IS_META;
-062  public static final String DURABILITY = 
TableDescriptorBuilder.DURABILITY;
-063  public static final String 
REGION_REPLICATION = TableDescriptorBuilder.REGION_REPLICATION;
-064  public static final String 
REGION_MEMSTORE_REPLICATION = 
TableDescriptorBuilder.REGION_MEMSTORE_REPLICATION;
-065  public static final String 
NORMALIZATION_ENABLED = TableDescriptorBuilder.NORMALIZATION_ENABLED;
-066  public static final String PRIORITY = 
TableDescriptorBuilder.PRIORITY;
-067  public static final boolean 
DEFAULT_READONLY = TableDescriptorBuilder.DEFAULT_READONLY;
-068  public static final boolean 
DEFAULT_COMPACTION_ENABLED = 
TableDescriptorBuilder.DEFAULT_COMPACTION_ENABLED;
-069  public static final boolean 
DEFAULT_NORMALIZATION_ENABLED = 
TableDescriptorBuilder.DEFAULT_NORMALIZATION_ENABLED;
-070  public static final long 
DEFAULT_MEMSTORE_FLUSH_SIZE = 
TableDescriptorBuilder.DEFAULT_MEMSTORE_FLUSH_SIZE;
-071  public static final int 
DEFAULT_REGION_REPLICATION = 
TableDescriptorBuilder.DEFAULT_REGION_REPLICATION;
-072  public static final boolean 
DEFAULT_REGION_MEMSTORE_REPLICATION = 
TableDescriptorBuilder.DEFAULT_REGION_MEMSTORE_REPLICATION;
-073  protected final 
ModifyableTableDescriptor delegatee;
-074
-075  /**
-076   * Construct a table descriptor 
specifying a TableName object
-077   * @param name Table name.
-078   * @see a 
href="https://issues.apache.org/jira/browse/HBASE-174"HADOOP-1581 HBASE: 
(HBASE-174) Un-openable tablename bug/a
-079   */
-080  public HTableDescriptor(final TableName 
name) {
-081this(new 
ModifyableTableDescriptor(name));
-082  }
-083
-084  /**
-085   * Construct a table descriptor by 
cloning the descriptor passed as a parameter.
-086   * p
-087   * Makes a deep copy of the supplied 
descriptor.
-088   * Can make a modifiable descriptor 
from an ImmutableHTableDescriptor.
-089   * @param desc The descriptor.
-090   */
-091  public HTableDescriptor(final 
HTableDescriptor desc) {
-092this(desc, true);
-093  }
-094
-095  protected 

[19/51] [partial] hbase-site git commit: Published site at 1d25b60831b8cc8f7ad5fd366f1867de5c20d2f3.

2018-03-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/eb05e3e3/apidocs/org/apache/hadoop/hbase/client/AsyncTableBuilder.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/client/AsyncTableBuilder.html 
b/apidocs/org/apache/hadoop/hbase/client/AsyncTableBuilder.html
index 3e502b3..93c85f0 100644
--- a/apidocs/org/apache/hadoop/hbase/client/AsyncTableBuilder.html
+++ b/apidocs/org/apache/hadoop/hbase/client/AsyncTableBuilder.html
@@ -151,35 +151,35 @@ public interface 
 AsyncTableBuilderC
 setOperationTimeout(longtimeout,
-   http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true;
 title="class or interface in 
java.util.concurrent">TimeUnitunit)
+   https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true;
 title="class or interface in 
java.util.concurrent">TimeUnitunit)
 Set timeout for a whole operation such as get, put or 
delete.
 
 
 
 AsyncTableBuilderC
 setReadRpcTimeout(longtimeout,
- http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true;
 title="class or interface in 
java.util.concurrent">TimeUnitunit)
+ https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true;
 title="class or interface in 
java.util.concurrent">TimeUnitunit)
 Set timeout for each read(get, scan) rpc request.
 
 
 
 AsyncTableBuilderC
 setRetryPause(longpause,
- http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true;
 title="class or interface in 
java.util.concurrent">TimeUnitunit)
+ https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true;
 title="class or interface in 
java.util.concurrent">TimeUnitunit)
 Set the base pause time for retrying.
 
 
 
 AsyncTableBuilderC
 setRpcTimeout(longtimeout,
- http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true;
 title="class or interface in 
java.util.concurrent">TimeUnitunit)
+ https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true;
 title="class or interface in 
java.util.concurrent">TimeUnitunit)
 Set timeout for each rpc request.
 
 
 
 AsyncTableBuilderC
 setScanTimeout(longtimeout,
-  http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true;
 title="class or interface in 
java.util.concurrent">TimeUnitunit)
+  https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true;
 title="class or interface in 
java.util.concurrent">TimeUnitunit)
 As now we have heartbeat support for scan, ideally a scan 
will never timeout unless the RS is
  crash.
 
@@ -193,7 +193,7 @@ public interface 
 AsyncTableBuilderC
 setWriteRpcTimeout(longtimeout,
-  http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true;
 title="class or interface in 
java.util.concurrent">TimeUnitunit)
+  https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true;
 title="class or interface in 
java.util.concurrent">TimeUnitunit)
 Set timeout for each write(put, delete) rpc request.
 
 
@@ -219,7 +219,7 @@ public interface 
 setOperationTimeout
 AsyncTableBuilderCsetOperationTimeout(longtimeout,
- http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true;
 title="class or interface in 
java.util.concurrent">TimeUnitunit)
+ https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true;
 title="class or interface in 
java.util.concurrent">TimeUnitunit)
 Set timeout for a whole operation such as get, put or 
delete. Notice that scan will not be
  effected by this value, see scanTimeoutNs.
  
@@ -240,7 +240,7 @@ public interface 
 setScanTimeout
 AsyncTableBuilderCsetScanTimeout(longtimeout,
-http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true;
 title="class or interface in 
java.util.concurrent">TimeUnitunit)
+https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true;
 title="class or interface in 
java.util.concurrent">TimeUnitunit)
 As now we have heartbeat support for scan, ideally a scan 
will never timeout unless the RS is
  crash. The RS will always return something before the rpc timed out or scan 
timed out to tell
  the client that it is still alive. The scan timeout is used as operation 
timeout for every
@@ -258,7 +258,7 @@ public interface 
 setRpcTimeout
 AsyncTableBuilderCsetRpcTimeout(longtimeout,
-   

[19/51] [partial] hbase-site git commit: Published site at .

2018-02-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ec8bf761/devapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html
index bb5c354..73aa8fe 100644
--- a/devapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html
@@ -105,14 +105,14 @@
 
 org.apache.hadoop.hbase.procedure2.util.DelayedUtil.DelayedContainerWithTimestampT
 
-org.apache.hadoop.hbase.procedure2.ProcedureExecutor.DelayedProcedure
+org.apache.hadoop.hbase.procedure2.DelayedProcedure
 org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher.BufferNode 
(implements org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher.RemoteNodeTEnv,TRemote)
 org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher.DelayedTask
 
 
 
 
-org.apache.hadoop.hbase.procedure2.ProcedureExecutor.InlineChore (implements 
java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.html?is-external=true;
 title="class or interface in java.lang">Runnable)
+org.apache.hadoop.hbase.procedure2.InlineChore (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.html?is-external=true;
 title="class or interface in java.lang">Runnable)
 
 org.apache.hadoop.hbase.procedure2.ProcedureExecutor.WorkerMonitor
 
@@ -146,13 +146,17 @@
 org.apache.hadoop.hbase.procedure2.RootProcedureState
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html?is-external=true;
 title="class or interface in java.lang">Thread (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Runnable.html?is-external=true;
 title="class or interface in java.lang">Runnable)
 
-org.apache.hadoop.hbase.procedure2.ProcedureExecutor.StoppableThread
+org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher.TimeoutExecutorThread
+org.apache.hadoop.hbase.procedure2.StoppableThread
+
+org.apache.hadoop.hbase.procedure2.ProcedureExecutor.WorkerThread
 
-org.apache.hadoop.hbase.procedure2.ProcedureExecutor.TimeoutExecutorThread
-org.apache.hadoop.hbase.procedure2.ProcedureExecutor.WorkerThread
+org.apache.hadoop.hbase.procedure2.ProcedureExecutor.KeepAliveWorkerThread
+
+
+org.apache.hadoop.hbase.procedure2.TimeoutExecutorThread
 
 
-org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher.TimeoutExecutorThread
 
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true;
 title="class or interface in java.lang">Throwable (implements java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
@@ -208,11 +212,11 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.procedure2.Procedure.LockState
-org.apache.hadoop.hbase.procedure2.LockType
-org.apache.hadoop.hbase.procedure2.LockedResourceType
 org.apache.hadoop.hbase.procedure2.RootProcedureState.State
 org.apache.hadoop.hbase.procedure2.StateMachineProcedure.Flow
+org.apache.hadoop.hbase.procedure2.LockType
+org.apache.hadoop.hbase.procedure2.Procedure.LockState
+org.apache.hadoop.hbase.procedure2.LockedResourceType
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ec8bf761/devapidocs/org/apache/hadoop/hbase/procedure2/package-use.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/procedure2/package-use.html 
b/devapidocs/org/apache/hadoop/hbase/procedure2/package-use.html
index aefd9d8..775f6eb 100644
--- a/devapidocs/org/apache/hadoop/hbase/procedure2/package-use.html
+++ b/devapidocs/org/apache/hadoop/hbase/procedure2/package-use.html
@@ -482,6 +482,14 @@
 BadProcedureException
 
 
+DelayedProcedure
+
+
+InlineChore
+Inline Chores (executors internal chores).
+
+
+
 LockedResource
 
 
@@ -519,37 +527,28 @@
 ProcedureException
 
 
-ProcedureExecutor.CompletedProcedureRetainer
+ProcedureExecutor
+Thread Pool that executes the submitted procedures.
+
 
 
-ProcedureExecutor.DelayedProcedure
+ProcedureExecutor.CompletedProcedureRetainer
 
 
-ProcedureExecutor.InlineChore
-
-
 ProcedureExecutor.ProcedureExecutorListener
 
-
-ProcedureExecutor.StoppableThread
-
 
 ProcedureExecutor.Testing
 
 
-ProcedureExecutor.TimeoutExecutorThread
-Runs task on a period such as check for stuck workers.
-
-
-
 ProcedureExecutor.WorkerThread
 
-
+
 ProcedureInMemoryChore
 Special procedure 

[19/51] [partial] hbase-site git commit: Published site at .

2018-02-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CellChunkImmutableSegment.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CellChunkImmutableSegment.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CellChunkImmutableSegment.html
index 42ad63c..85d3a6a 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CellChunkImmutableSegment.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CellChunkImmutableSegment.html
@@ -61,220 +61,262 @@
 053  protected 
CellChunkImmutableSegment(CellComparator comparator, MemStoreSegmentsIterator 
iterator,
 054  MemStoreLAB memStoreLAB, int 
numOfCells, MemStoreCompactionStrategy.Action action) {
 055super(null, comparator, memStoreLAB); 
// initialize the CellSet with NULL
-056incSize(0, DEEP_OVERHEAD_CCM); // 
initiate the heapSize with the size of the segment metadata
-057// build the new CellSet based on 
CellArrayMap and update the CellSet of the new Segment
-058initializeCellSet(numOfCells, 
iterator, action);
-059  }
-060
-061  
/**
-062   * C-tor to be used when new 
CellChunkImmutableSegment is built as a result of flattening
-063   * of CSLMImmutableSegment
-064   * The given iterator returns the Cells 
that "survived" the compaction.
-065   */
-066  protected 
CellChunkImmutableSegment(CSLMImmutableSegment segment,
-067  MemStoreSizing memstoreSizing, 
MemStoreCompactionStrategy.Action action) {
-068super(segment); // initiailize the 
upper class
-069
incSize(0,-CSLMImmutableSegment.DEEP_OVERHEAD_CSLM + 
CellChunkImmutableSegment.DEEP_OVERHEAD_CCM);
-070int numOfCells = 
segment.getCellsCount();
-071// build the new CellSet based on 
CellChunkMap
-072reinitializeCellSet(numOfCells, 
segment.getScanner(Long.MAX_VALUE), segment.getCellSet(),
-073action);
-074// arrange the meta-data size, 
decrease all meta-data sizes related to SkipList;
-075// add sizes of CellChunkMap entry, 
decrease also Cell object sizes
-076// (reinitializeCellSet doesn't take 
the care for the sizes)
-077long newSegmentSizeDelta = 
numOfCells*(indexEntrySize()-ClassSize.CONCURRENT_SKIPLISTMAP_ENTRY);
-078
-079incSize(0, newSegmentSizeDelta);
-080memstoreSizing.incMemStoreSize(0, 
newSegmentSizeDelta);
-081  }
-082
-083  @Override
-084  protected long indexEntrySize() {
-085return ((long) 
ClassSize.CELL_CHUNK_MAP_ENTRY - KeyValue.FIXED_OVERHEAD);
-086  }
-087
-088  @Override
-089  protected boolean canBeFlattened() {
-090return false;
-091  }
-092
-093  /  PRIVATE METHODS  
/
-094  
/**/
-095  // Create CellSet based on CellChunkMap 
from compacting iterator
-096  private void initializeCellSet(int 
numOfCells, MemStoreSegmentsIterator iterator,
-097  MemStoreCompactionStrategy.Action 
action) {
-098
-099// calculate how many chunks we will 
need for index
-100int chunkSize = 
ChunkCreator.getInstance().getChunkSize();
-101int numOfCellsInChunk = 
CellChunkMap.NUM_OF_CELL_REPS_IN_CHUNK;
-102int numberOfChunks = 
calculateNumberOfChunks(numOfCells, numOfCellsInChunk);
-103int numOfCellsAfterCompaction = 0;
-104int currentChunkIdx = 0;
-105int offsetInCurentChunk = 
ChunkCreator.SIZEOF_CHUNK_HEADER;
-106int numUniqueKeys=0;
-107Cell prev = null;
-108// all index Chunks are allocated 
from ChunkCreator
-109Chunk[] chunks = new 
Chunk[numberOfChunks];
-110for (int i=0; i  numberOfChunks; 
i++) {
-111  chunks[i] = 
this.getMemStoreLAB().getNewExternalChunk();
-112}
-113while (iterator.hasNext()) {
// the iterator hides the elimination logic for compaction
-114  boolean alreadyCopied = false;
-115  Cell c = iterator.next();
-116  numOfCellsAfterCompaction++;
-117  assert(c instanceof 
ExtendedCell);
-118  if (((ExtendedCell)c).getChunkId() 
== ExtendedCell.CELL_NOT_BASED_ON_CHUNK) {
-119// CellChunkMap assumes all cells 
are allocated on MSLAB.
-120// Therefore, cells which are not 
allocated on MSLAB initially,
-121// are copied into MSLAB here.
-122c = copyCellIntoMSLAB(c);
-123alreadyCopied = true;
-124  }
-125  if (offsetInCurentChunk + 
ClassSize.CELL_CHUNK_MAP_ENTRY  chunkSize) {
-126currentChunkIdx++;  
// continue to the next index chunk
-127offsetInCurentChunk = 
ChunkCreator.SIZEOF_CHUNK_HEADER;
-128  }
-129  if (action == 
MemStoreCompactionStrategy.Action.COMPACT  !alreadyCopied) {
-130// for compaction copy cell to 
the new segment (MSLAB copy)
-131c = maybeCloneWithAllocator(c, 
false);
-132  }
-133  

[19/51] [partial] hbase-site git commit: Published site at .

2018-02-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/193b4259/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BulkLoadListener.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BulkLoadListener.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BulkLoadListener.html
index bd13b53..802b925 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BulkLoadListener.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BulkLoadListener.html
@@ -900,7600 +900,7598 @@
 892if 
(this.getRegionInfo().getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) {
 893  status.setStatus("Writing region 
info on filesystem");
 894  fs.checkRegionInfoOnFilesystem();
-895} else {
-896  if (LOG.isDebugEnabled()) {
-897LOG.debug("Skipping creation of 
.regioninfo file for " + this.getRegionInfo());
-898  }
-899}
-900
-901// Initialize all the HStores
-902status.setStatus("Initializing all 
the Stores");
-903long maxSeqId = 
initializeStores(reporter, status);
-904this.mvcc.advanceTo(maxSeqId);
-905if 
(ServerRegionReplicaUtil.shouldReplayRecoveredEdits(this)) {
-906  CollectionHStore stores = 
this.stores.values();
-907  try {
-908// update the stores that we are 
replaying
-909
stores.forEach(HStore::startReplayingFromWAL);
-910// Recover any edits if 
available.
-911maxSeqId = Math.max(maxSeqId,
-912  
replayRecoveredEditsIfAny(this.fs.getRegionDir(), maxSeqIdInStores, reporter, 
status));
-913// Make sure mvcc is up to max.
-914this.mvcc.advanceTo(maxSeqId);
-915  } finally {
-916// update the stores that we are 
done replaying
-917
stores.forEach(HStore::stopReplayingFromWAL);
-918  }
-919}
-920this.lastReplayedOpenRegionSeqId = 
maxSeqId;
+895}
+896
+897// Initialize all the HStores
+898status.setStatus("Initializing all 
the Stores");
+899long maxSeqId = 
initializeStores(reporter, status);
+900this.mvcc.advanceTo(maxSeqId);
+901if 
(ServerRegionReplicaUtil.shouldReplayRecoveredEdits(this)) {
+902  CollectionHStore stores = 
this.stores.values();
+903  try {
+904// update the stores that we are 
replaying
+905
stores.forEach(HStore::startReplayingFromWAL);
+906// Recover any edits if 
available.
+907maxSeqId = Math.max(maxSeqId,
+908  
replayRecoveredEditsIfAny(this.fs.getRegionDir(), maxSeqIdInStores, reporter, 
status));
+909// Make sure mvcc is up to max.
+910this.mvcc.advanceTo(maxSeqId);
+911  } finally {
+912// update the stores that we are 
done replaying
+913
stores.forEach(HStore::stopReplayingFromWAL);
+914  }
+915}
+916this.lastReplayedOpenRegionSeqId = 
maxSeqId;
+917
+918
this.writestate.setReadOnly(ServerRegionReplicaUtil.isReadOnly(this));
+919this.writestate.flushRequested = 
false;
+920this.writestate.compacting.set(0);
 921
-922
this.writestate.setReadOnly(ServerRegionReplicaUtil.isReadOnly(this));
-923this.writestate.flushRequested = 
false;
-924this.writestate.compacting.set(0);
-925
-926if (this.writestate.writesEnabled) 
{
-927  // Remove temporary data left over 
from old regions
-928  status.setStatus("Cleaning up 
temporary data from old regions");
-929  fs.cleanupTempDir();
-930}
-931
-932if (this.writestate.writesEnabled) 
{
-933  status.setStatus("Cleaning up 
detritus from prior splits");
-934  // Get rid of any splits or merges 
that were lost in-progress.  Clean out
-935  // these directories here on open.  
We may be opening a region that was
-936  // being split but we crashed in 
the middle of it all.
-937  fs.cleanupAnySplitDetritus();
-938  fs.cleanupMergesDir();
-939}
-940
-941// Initialize split policy
-942this.splitPolicy = 
RegionSplitPolicy.create(this, conf);
-943
-944// Initialize flush policy
-945this.flushPolicy = 
FlushPolicyFactory.create(this, conf);
-946
-947long lastFlushTime = 
EnvironmentEdgeManager.currentTime();
-948for (HStore store: stores.values()) 
{
-949  
this.lastStoreFlushTimeMap.put(store, lastFlushTime);
-950}
-951
-952// Use maximum of log sequenceid or 
that which was found in stores
-953// (particularly if no recovered 
edits, seqid will be -1).
-954long nextSeqid = maxSeqId;
-955if (this.writestate.writesEnabled) 
{
-956  nextSeqid = 
WALSplitter.writeRegionSequenceIdFile(this.fs.getFileSystem(),
-957  this.fs.getRegionDir(), 
nextSeqid, 1);
-958} else {
-959  nextSeqid++;
-960}
-961
-962LOG.info("Onlined " + 
this.getRegionInfo().getShortNameToLog() +
-963  "; next sequenceid=" + 
nextSeqid);
+922if (this.writestate.writesEnabled) 
{
+923  // 

[19/51] [partial] hbase-site git commit: Published site at .

2018-02-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/94208cfe/devapidocs/org/apache/hadoop/hbase/filter/class-use/Filter.ReturnCode.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/filter/class-use/Filter.ReturnCode.html 
b/devapidocs/org/apache/hadoop/hbase/filter/class-use/Filter.ReturnCode.html
index bb2794a..0c342b2 100644
--- a/devapidocs/org/apache/hadoop/hbase/filter/class-use/Filter.ReturnCode.html
+++ b/devapidocs/org/apache/hadoop/hbase/filter/class-use/Filter.ReturnCode.html
@@ -151,115 +151,115 @@
 
 
 Filter.ReturnCode
-ColumnPrefixFilter.filterCell(Cellcell)
+FilterListWithAND.filterCell(Cellc)
 
 
 Filter.ReturnCode
-ColumnCountGetFilter.filterCell(Cellc)
+ValueFilter.filterCell(Cellc)
 
 
 Filter.ReturnCode
-RowFilter.filterCell(Cellv)
+SkipFilter.filterCell(Cellc)
 
 
 Filter.ReturnCode
-FuzzyRowFilter.filterCell(Cellc)
+FamilyFilter.filterCell(Cellc)
 
 
 Filter.ReturnCode
-Filter.filterCell(Cellc)
-A way to filter based on the column family, column 
qualifier and/or the column value.
-
+ColumnPrefixFilter.filterCell(Cellcell)
 
 
 Filter.ReturnCode
-RandomRowFilter.filterCell(Cellc)
+PageFilter.filterCell(Cellignored)
 
 
 Filter.ReturnCode
-FirstKeyOnlyFilter.filterCell(Cellc)
+RowFilter.filterCell(Cellv)
 
 
 Filter.ReturnCode
-SkipFilter.filterCell(Cellc)
+ColumnRangeFilter.filterCell(Cellc)
 
 
 Filter.ReturnCode
-TimestampsFilter.filterCell(Cellc)
+ColumnCountGetFilter.filterCell(Cellc)
 
 
 Filter.ReturnCode
-ValueFilter.filterCell(Cellc)
+MultipleColumnPrefixFilter.filterCell(Cellc)
 
 
 Filter.ReturnCode
-KeyOnlyFilter.filterCell(Cellignored)
+ColumnPaginationFilter.filterCell(Cellc)
 
 
 Filter.ReturnCode
-FamilyFilter.filterCell(Cellc)
+DependentColumnFilter.filterCell(Cellc)
 
 
 Filter.ReturnCode
-QualifierFilter.filterCell(Cellc)
+FilterListWithOR.filterCell(Cellc)
 
 
 Filter.ReturnCode
-FilterList.filterCell(Cellc)
+InclusiveStopFilter.filterCell(Cellc)
 
 
 Filter.ReturnCode
-ColumnRangeFilter.filterCell(Cellc)
+KeyOnlyFilter.filterCell(Cellignored)
 
 
 Filter.ReturnCode
-ColumnPaginationFilter.filterCell(Cellc)
+MultiRowRangeFilter.filterCell(Cellignored)
 
 
 Filter.ReturnCode
-FilterListWithAND.filterCell(Cellc)
+Filter.filterCell(Cellc)
+A way to filter based on the column family, column 
qualifier and/or the column value.
+
 
 
 Filter.ReturnCode
-WhileMatchFilter.filterCell(Cellc)
+FirstKeyOnlyFilter.filterCell(Cellc)
 
 
 Filter.ReturnCode
-MultiRowRangeFilter.filterCell(Cellignored)
+WhileMatchFilter.filterCell(Cellc)
 
 
 Filter.ReturnCode
-PrefixFilter.filterCell(Cellc)
+FirstKeyValueMatchingQualifiersFilter.filterCell(Cellc)
+Deprecated.
+
 
 
 Filter.ReturnCode
-DependentColumnFilter.filterCell(Cellc)
+TimestampsFilter.filterCell(Cellc)
 
 
 Filter.ReturnCode
-FirstKeyValueMatchingQualifiersFilter.filterCell(Cellc)
-Deprecated.
-
+FuzzyRowFilter.filterCell(Cellc)
 
 
 Filter.ReturnCode
-PageFilter.filterCell(Cellignored)
+FilterList.filterCell(Cellc)
 
 
 Filter.ReturnCode
-FilterListWithOR.filterCell(Cellc)
+RandomRowFilter.filterCell(Cellc)
 
 
 Filter.ReturnCode
-InclusiveStopFilter.filterCell(Cellc)
+PrefixFilter.filterCell(Cellc)
 
 
 Filter.ReturnCode
-MultipleColumnPrefixFilter.filterCell(Cellc)
+SingleColumnValueFilter.filterCell(Cellc)
 
 
 Filter.ReturnCode
-SingleColumnValueFilter.filterCell(Cellc)
+QualifierFilter.filterCell(Cellc)
 
 
 Filter.ReturnCode
@@ -275,158 +275,158 @@
 
 
 Filter.ReturnCode
-ColumnPrefixFilter.filterKeyValue(Cellc)
+ValueFilter.filterKeyValue(Cellc)
 Deprecated.
 
 
 
 Filter.ReturnCode
-ColumnCountGetFilter.filterKeyValue(Cellc)
+SkipFilter.filterKeyValue(Cellc)
 Deprecated.
 
 
 
 Filter.ReturnCode
-RowFilter.filterKeyValue(Cellc)
-Deprecated.
-
+FilterListBase.filterKeyValue(Cellc)
 
 
 Filter.ReturnCode
-FuzzyRowFilter.filterKeyValue(Cellc)
+FamilyFilter.filterKeyValue(Cellc)
 Deprecated.
 
 
 
 Filter.ReturnCode
-Filter.filterKeyValue(Cellc)
-Deprecated.
-As of release 2.0.0, this 
will be removed in HBase 3.0.0.
- Instead use filterCell(Cell)
-
+ColumnPrefixFilter.filterKeyValue(Cellc)
+Deprecated.
 
 
 
 Filter.ReturnCode
-RandomRowFilter.filterKeyValue(Cellc)
+PageFilter.filterKeyValue(Cellc)
 Deprecated.
 
 
 
 Filter.ReturnCode
-FirstKeyOnlyFilter.filterKeyValue(Cellc)
+RowFilter.filterKeyValue(Cellc)
 Deprecated.
 
 
 
 Filter.ReturnCode
-SkipFilter.filterKeyValue(Cellc)
+ColumnRangeFilter.filterKeyValue(Cellc)
 Deprecated.
 
 
 
 Filter.ReturnCode
-TimestampsFilter.filterKeyValue(Cellc)
+ColumnCountGetFilter.filterKeyValue(Cellc)
 Deprecated.
 
 
 
 Filter.ReturnCode
-ValueFilter.filterKeyValue(Cellc)
+MultipleColumnPrefixFilter.filterKeyValue(Cellc)
 Deprecated.
 
 
 
 Filter.ReturnCode
-KeyOnlyFilter.filterKeyValue(Cellignored)
+ColumnPaginationFilter.filterKeyValue(Cellc)
 Deprecated.
 
 
 
 Filter.ReturnCode
-FamilyFilter.filterKeyValue(Cellc)
+DependentColumnFilter.filterKeyValue(Cellc)
 Deprecated.
 
 
 
 

[19/51] [partial] hbase-site git commit: Published site at .

2018-02-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0cd17dc5/devapidocs/org/apache/hadoop/hbase/client/class-use/RpcRetryingCallerFactory.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/RpcRetryingCallerFactory.html
 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/RpcRetryingCallerFactory.html
index d481372..5e1590b 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/RpcRetryingCallerFactory.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/RpcRetryingCallerFactory.html
@@ -126,15 +126,15 @@
 
 
 private RpcRetryingCallerFactory
-RegionCoprocessorRpcChannel.rpcCallerFactory
+ConnectionImplementation.rpcCallerFactory
 
 
 private RpcRetryingCallerFactory
-ConnectionImplementation.rpcCallerFactory
+HTable.rpcCallerFactory
 
 
 private RpcRetryingCallerFactory
-HTable.rpcCallerFactory
+RegionCoprocessorRpcChannel.rpcCallerFactory
 
 
 private RpcRetryingCallerFactory
@@ -155,21 +155,21 @@
 
 
 RpcRetryingCallerFactory
-ConnectionImplementation.getNewRpcRetryingCallerFactory(org.apache.hadoop.conf.Configurationconf)
-
-
-RpcRetryingCallerFactory
 ClusterConnection.getNewRpcRetryingCallerFactory(org.apache.hadoop.conf.Configurationconf)
 Returns a new RpcRetryingCallerFactory from the given 
Configuration.
 
 
+
+RpcRetryingCallerFactory
+ConnectionImplementation.getNewRpcRetryingCallerFactory(org.apache.hadoop.conf.Configurationconf)
+
 
 RpcRetryingCallerFactory
-ConnectionImplementation.getRpcRetryingCallerFactory()
+ClusterConnection.getRpcRetryingCallerFactory()
 
 
 RpcRetryingCallerFactory
-ClusterConnection.getRpcRetryingCallerFactory()
+ConnectionImplementation.getRpcRetryingCallerFactory()
 
 
 static RpcRetryingCallerFactory

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0cd17dc5/devapidocs/org/apache/hadoop/hbase/client/class-use/Scan.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/class-use/Scan.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/Scan.html
index 6384833..018438c 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/Scan.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/Scan.html
@@ -283,27 +283,27 @@ service.
 
 
 private Scan
-ScannerCallableWithReplicas.scan
+AsyncScanSingleRegionRpcRetryingCaller.scan
 
 
 protected Scan
-ClientScanner.scan
+ScannerCallable.scan
 
 
 private Scan
-AsyncClientScanner.scan
+ScannerCallableWithReplicas.scan
 
 
-private Scan
-AsyncRpcRetryingCallerFactory.ScanSingleRegionCallerBuilder.scan
+protected Scan
+ClientScanner.scan
 
 
 private Scan
-AsyncScanSingleRegionRpcRetryingCaller.scan
+AsyncClientScanner.scan
 
 
-protected Scan
-ScannerCallable.scan
+private Scan
+AsyncRpcRetryingCallerFactory.ScanSingleRegionCallerBuilder.scan
 
 
 private Scan
@@ -339,11 +339,11 @@ service.
 
 
 protected Scan
-ClientScanner.getScan()
+ScannerCallable.getScan()
 
 
 protected Scan
-ScannerCallable.getScan()
+ClientScanner.getScan()
 
 
 Scan
@@ -638,29 +638,29 @@ service.
 
 
 ResultScanner
-AsyncTable.getScanner(Scanscan)
-Returns a scanner on the current table as specified by the 
Scan 
object.
-
+RawAsyncTableImpl.getScanner(Scanscan)
 
 
 ResultScanner
-Table.getScanner(Scanscan)
-Returns a scanner on the current table as specified by the 
Scan
- object.
+HTable.getScanner(Scanscan)
+The underlying HTable must 
not be closed.
 
 
 
 ResultScanner
-AsyncTableImpl.getScanner(Scanscan)
+Table.getScanner(Scanscan)
+Returns a scanner on the current table as specified by the 
Scan
+ object.
+
 
 
 ResultScanner
-RawAsyncTableImpl.getScanner(Scanscan)
+AsyncTableImpl.getScanner(Scanscan)
 
 
 ResultScanner
-HTable.getScanner(Scanscan)
-The underlying HTable must 
not be closed.
+AsyncTable.getScanner(Scanscan)
+Returns a scanner on the current table as specified by the 
Scan 
object.
 
 
 
@@ -703,9 +703,7 @@ service.
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListResult
-AsyncTable.scanAll(Scanscan)
-Return all the results that match the given scan 
object.
-
+RawAsyncTableImpl.scanAll(Scanscan)
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListResult
@@ -713,7 +711,9 @@ service.
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in 

[19/51] [partial] hbase-site git commit: Published site at .

2018-02-14 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/828486ae/devapidocs/org/apache/hadoop/hbase/client/coprocessor/class-use/Batch.Callback.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/coprocessor/class-use/Batch.Callback.html
 
b/devapidocs/org/apache/hadoop/hbase/client/coprocessor/class-use/Batch.Callback.html
index 61695fd..bf8d672 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/coprocessor/class-use/Batch.Callback.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/coprocessor/class-use/Batch.Callback.html
@@ -113,17 +113,17 @@
 
 
 
+private Batch.CallbackCResult
+AsyncRequestFutureImpl.callback
+
+
 private Batch.CallbackT
 AsyncProcessTask.callback
 
-
+
 private Batch.CallbackT
 AsyncProcessTask.Builder.callback
 
-
-private Batch.CallbackCResult
-AsyncRequestFutureImpl.callback
-
 
 
 
@@ -148,50 +148,42 @@
 
 
 Rvoid
-HTable.batchCallback(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">List? extends Rowactions,
- http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object[]results,
- Batch.CallbackRcallback)
-
-
-Rvoid
 Table.batchCallback(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">List? extends Rowactions,
  http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object[]results,
  Batch.CallbackRcallback)
 Same as Table.batch(List,
 Object[]), but with a callback.
 
 
+
+Rvoid
+HTable.batchCallback(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">List? extends Rowactions,
+ http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object[]results,
+ Batch.CallbackRcallback)
+
 
 R extends 
com.google.protobuf.Messagevoid
-HTable.batchCoprocessorService(com.google.protobuf.Descriptors.MethodDescriptormethodDescriptor,
+Table.batchCoprocessorService(com.google.protobuf.Descriptors.MethodDescriptormethodDescriptor,
com.google.protobuf.Messagerequest,
byte[]startKey,
byte[]endKey,
RresponsePrototype,
-   Batch.CallbackRcallback)
+   Batch.CallbackRcallback)
+Creates an instance of the given Service 
subclass for each table
+ region spanning the range from the startKey row to 
endKey row (inclusive), all
+ the invocations to the same region server will be batched into one call.
+
 
 
 R extends 
com.google.protobuf.Messagevoid
-Table.batchCoprocessorService(com.google.protobuf.Descriptors.MethodDescriptormethodDescriptor,
+HTable.batchCoprocessorService(com.google.protobuf.Descriptors.MethodDescriptormethodDescriptor,
com.google.protobuf.Messagerequest,
byte[]startKey,
byte[]endKey,
RresponsePrototype,
-   Batch.CallbackRcallback)
-Creates an instance of the given Service 
subclass for each table
- region spanning the range from the startKey row to 
endKey row (inclusive), all
- the invocations to the same region server will be batched into one call.
-
+   Batch.CallbackRcallback)
 
 
 T extends 
com.google.protobuf.Service,Rvoid
-HTable.coprocessorService(http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true;
 title="class or interface in java.lang">ClassTservice,
-  byte[]startKey,
-  byte[]endKey,
-  Batch.CallT,Rcallable,
-  Batch.CallbackRcallback)
-
-
-T extends 
com.google.protobuf.Service,Rvoid
 Table.coprocessorService(http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true;
 title="class or interface in java.lang">ClassTservice,
   byte[]startKey,
   byte[]endKey,
@@ -203,6 +195,14 @@
  with each Service instance.
 
 
+
+T extends 
com.google.protobuf.Service,Rvoid
+HTable.coprocessorService(http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true;
 title="class or interface in java.lang">ClassTservice,
+  byte[]startKey,
+  byte[]endKey,
+  Batch.CallT,Rcallable,
+  Batch.CallbackRcallback)
+
 
 static Rvoid
 HTable.doBatchWithCallback(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">List? extends Rowactions,


[19/51] [partial] hbase-site git commit: Published site at .

2018-02-13 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f272b0e8/devapidocs/org/apache/hadoop/hbase/client/class-use/Result.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/class-use/Result.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/Result.html
index 81b1f23..78d979d 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/Result.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/Result.html
@@ -292,7 +292,7 @@ service.
 
 
 private static HRegionLocation
-AsyncMetaTableAccessor.getRegionLocation(Resultr,
+MetaTableAccessor.getRegionLocation(Resultr,
  RegionInforegionInfo,
  intreplicaId)
 Returns the HRegionLocation parsed from the given meta row 
Result
@@ -301,7 +301,7 @@ service.
 
 
 private static HRegionLocation
-MetaTableAccessor.getRegionLocation(Resultr,
+AsyncMetaTableAccessor.getRegionLocation(Resultr,
  RegionInforegionInfo,
  intreplicaId)
 Returns the HRegionLocation parsed from the given meta row 
Result
@@ -309,14 +309,14 @@ service.
 
 
 
-private static http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in java.util">OptionalRegionLocations
-AsyncMetaTableAccessor.getRegionLocations(Resultr)
+static RegionLocations
+MetaTableAccessor.getRegionLocations(Resultr)
 Returns an HRegionLocationList extracted from the 
result.
 
 
 
-static RegionLocations
-MetaTableAccessor.getRegionLocations(Resultr)
+private static http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in java.util">OptionalRegionLocations
+AsyncMetaTableAccessor.getRegionLocations(Resultr)
 Returns an HRegionLocationList extracted from the 
result.
 
 
@@ -326,42 +326,42 @@ service.
 
 
 private static long
-AsyncMetaTableAccessor.getSeqNumDuringOpen(Resultr,
+MetaTableAccessor.getSeqNumDuringOpen(Resultr,
intreplicaId)
 The latest seqnum that the server writing to meta observed 
when opening the region.
 
 
 
 private static long
-MetaTableAccessor.getSeqNumDuringOpen(Resultr,
+AsyncMetaTableAccessor.getSeqNumDuringOpen(Resultr,
intreplicaId)
 The latest seqnum that the server writing to meta observed 
when opening the region.
 
 
 
-private static http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in java.util">OptionalServerName
-AsyncMetaTableAccessor.getServerName(Resultr,
+static ServerName
+MetaTableAccessor.getServerName(Resultr,
  intreplicaId)
 Returns a ServerName from catalog table Result.
 
 
 
-static ServerName
-MetaTableAccessor.getServerName(Resultr,
+private static http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in java.util">OptionalServerName
+AsyncMetaTableAccessor.getServerName(Resultr,
  intreplicaId)
 Returns a ServerName from catalog table Result.
 
 
 
-private static http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in java.util">OptionalTableState
-AsyncMetaTableAccessor.getTableState(Resultr)
-
-
 static TableState
 MetaTableAccessor.getTableState(Resultr)
 Decode table state from META Result.
 
 
+
+private static http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in java.util">OptionalTableState
+AsyncMetaTableAccessor.getTableState(Resultr)
+
 
 void
 AsyncMetaTableAccessor.MetaTableScanResultConsumer.onNext(Result[]results,
@@ -457,13 +457,13 @@ service.
 ClientScanner.cache
 
 
-private http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListResult
-CompleteScanResultCache.partialResults
-
-
 private http://docs.oracle.com/javase/8/docs/api/java/util/Deque.html?is-external=true;
 title="class or interface in java.util">DequeResult
 BatchScanResultCache.partialResults
 
+
+private http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListResult
+CompleteScanResultCache.partialResults
+
 
 private http://docs.oracle.com/javase/8/docs/api/java/util/Queue.html?is-external=true;
 title="class or interface in java.util">QueueResult
 AsyncTableResultScanner.queue
@@ -486,7 +486,7 @@ service.
 
 
 Result[]
-AllowPartialScanResultCache.addAndGet(Result[]results,
+BatchScanResultCache.addAndGet(Result[]results,
  booleanisHeartbeatMessage)
 
 
@@ -496,24 +496,20 @@ service.
 
 
 Result[]
-BatchScanResultCache.addAndGet(Result[]results,
+AllowPartialScanResultCache.addAndGet(Result[]results,
  booleanisHeartbeatMessage)
 
 
 Result
-Table.append(Appendappend)
-Appends values to one or more columns within a single 
row.
-
+HTable.append(Appendappend)
 
 
 Result

[19/51] [partial] hbase-site git commit: Published site at .

2018-02-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c83a37c8/devapidocs/org/apache/hadoop/hbase/client/coprocessor/class-use/Batch.Callback.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/coprocessor/class-use/Batch.Callback.html
 
b/devapidocs/org/apache/hadoop/hbase/client/coprocessor/class-use/Batch.Callback.html
index 61695fd..bf8d672 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/coprocessor/class-use/Batch.Callback.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/coprocessor/class-use/Batch.Callback.html
@@ -113,17 +113,17 @@
 
 
 
+private Batch.CallbackCResult
+AsyncRequestFutureImpl.callback
+
+
 private Batch.CallbackT
 AsyncProcessTask.callback
 
-
+
 private Batch.CallbackT
 AsyncProcessTask.Builder.callback
 
-
-private Batch.CallbackCResult
-AsyncRequestFutureImpl.callback
-
 
 
 
@@ -148,50 +148,42 @@
 
 
 Rvoid
-HTable.batchCallback(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">List? extends Rowactions,
- http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object[]results,
- Batch.CallbackRcallback)
-
-
-Rvoid
 Table.batchCallback(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">List? extends Rowactions,
  http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object[]results,
  Batch.CallbackRcallback)
 Same as Table.batch(List,
 Object[]), but with a callback.
 
 
+
+Rvoid
+HTable.batchCallback(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">List? extends Rowactions,
+ http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object[]results,
+ Batch.CallbackRcallback)
+
 
 R extends 
com.google.protobuf.Messagevoid
-HTable.batchCoprocessorService(com.google.protobuf.Descriptors.MethodDescriptormethodDescriptor,
+Table.batchCoprocessorService(com.google.protobuf.Descriptors.MethodDescriptormethodDescriptor,
com.google.protobuf.Messagerequest,
byte[]startKey,
byte[]endKey,
RresponsePrototype,
-   Batch.CallbackRcallback)
+   Batch.CallbackRcallback)
+Creates an instance of the given Service 
subclass for each table
+ region spanning the range from the startKey row to 
endKey row (inclusive), all
+ the invocations to the same region server will be batched into one call.
+
 
 
 R extends 
com.google.protobuf.Messagevoid
-Table.batchCoprocessorService(com.google.protobuf.Descriptors.MethodDescriptormethodDescriptor,
+HTable.batchCoprocessorService(com.google.protobuf.Descriptors.MethodDescriptormethodDescriptor,
com.google.protobuf.Messagerequest,
byte[]startKey,
byte[]endKey,
RresponsePrototype,
-   Batch.CallbackRcallback)
-Creates an instance of the given Service 
subclass for each table
- region spanning the range from the startKey row to 
endKey row (inclusive), all
- the invocations to the same region server will be batched into one call.
-
+   Batch.CallbackRcallback)
 
 
 T extends 
com.google.protobuf.Service,Rvoid
-HTable.coprocessorService(http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true;
 title="class or interface in java.lang">ClassTservice,
-  byte[]startKey,
-  byte[]endKey,
-  Batch.CallT,Rcallable,
-  Batch.CallbackRcallback)
-
-
-T extends 
com.google.protobuf.Service,Rvoid
 Table.coprocessorService(http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true;
 title="class or interface in java.lang">ClassTservice,
   byte[]startKey,
   byte[]endKey,
@@ -203,6 +195,14 @@
  with each Service instance.
 
 
+
+T extends 
com.google.protobuf.Service,Rvoid
+HTable.coprocessorService(http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true;
 title="class or interface in java.lang">ClassTservice,
+  byte[]startKey,
+  byte[]endKey,
+  Batch.CallT,Rcallable,
+  Batch.CallbackRcallback)
+
 
 static Rvoid
 HTable.doBatchWithCallback(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">List? extends Rowactions,


[19/51] [partial] hbase-site git commit: Published site at .

2018-02-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0ab8335e/devapidocs/org/apache/hadoop/hbase/exceptions/class-use/DeserializationException.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/exceptions/class-use/DeserializationException.html
 
b/devapidocs/org/apache/hadoop/hbase/exceptions/class-use/DeserializationException.html
index a07a830..80108a2 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/exceptions/class-use/DeserializationException.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/exceptions/class-use/DeserializationException.html
@@ -144,14 +144,16 @@
 
 
 
-static HTableDescriptor
-HTableDescriptor.parseFrom(byte[]bytes)
+static HColumnDescriptor
+HColumnDescriptor.parseFrom(byte[]bytes)
 Deprecated.
 
 
 
-static ClusterId
-ClusterId.parseFrom(byte[]bytes)
+static HTableDescriptor
+HTableDescriptor.parseFrom(byte[]bytes)
+Deprecated.
+
 
 
 static HRegionInfo
@@ -163,10 +165,8 @@
 
 
 
-static HColumnDescriptor
-HColumnDescriptor.parseFrom(byte[]bytes)
-Deprecated.
-
+static ClusterId
+ClusterId.parseFrom(byte[]bytes)
 
 
 static SplitLogTask
@@ -220,17 +220,17 @@
 TableDescriptorBuilder.ModifyableTableDescriptor.parseFrom(byte[]bytes)
 
 
-static RegionInfo
-RegionInfo.parseFrom(byte[]bytes)
-
-
 static ColumnFamilyDescriptor
 ColumnFamilyDescriptorBuilder.parseFrom(byte[]pbBytes)
 
-
+
 private static ColumnFamilyDescriptor
 ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor.parseFrom(byte[]bytes)
 
+
+static RegionInfo
+RegionInfo.parseFrom(byte[]bytes)
+
 
 static RegionInfo
 RegionInfo.parseFrom(byte[]bytes,
@@ -305,111 +305,111 @@
 ByteArrayComparable.parseFrom(byte[]pbBytes)
 
 
-static SingleColumnValueExcludeFilter
-SingleColumnValueExcludeFilter.parseFrom(byte[]pbBytes)
+static ColumnPrefixFilter
+ColumnPrefixFilter.parseFrom(byte[]pbBytes)
 
 
-static ValueFilter
-ValueFilter.parseFrom(byte[]pbBytes)
+static ColumnCountGetFilter
+ColumnCountGetFilter.parseFrom(byte[]pbBytes)
 
 
-static SkipFilter
-SkipFilter.parseFrom(byte[]pbBytes)
+static RowFilter
+RowFilter.parseFrom(byte[]pbBytes)
 
 
-static FamilyFilter
-FamilyFilter.parseFrom(byte[]pbBytes)
+static FuzzyRowFilter
+FuzzyRowFilter.parseFrom(byte[]pbBytes)
 
 
-static BinaryPrefixComparator
-BinaryPrefixComparator.parseFrom(byte[]pbBytes)
+static BinaryComparator
+BinaryComparator.parseFrom(byte[]pbBytes)
 
 
-static NullComparator
-NullComparator.parseFrom(byte[]pbBytes)
+static RegexStringComparator
+RegexStringComparator.parseFrom(byte[]pbBytes)
 
 
-static BigDecimalComparator
-BigDecimalComparator.parseFrom(byte[]pbBytes)
+static Filter
+Filter.parseFrom(byte[]pbBytes)
+Concrete implementers can signal a failure condition in 
their code by throwing an
+ http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException.
+
 
 
-static ColumnPrefixFilter
-ColumnPrefixFilter.parseFrom(byte[]pbBytes)
+static RandomRowFilter
+RandomRowFilter.parseFrom(byte[]pbBytes)
 
 
-static PageFilter
-PageFilter.parseFrom(byte[]pbBytes)
+static FirstKeyOnlyFilter
+FirstKeyOnlyFilter.parseFrom(byte[]pbBytes)
 
 
-static BitComparator
-BitComparator.parseFrom(byte[]pbBytes)
+static SkipFilter
+SkipFilter.parseFrom(byte[]pbBytes)
 
 
-static RowFilter
-RowFilter.parseFrom(byte[]pbBytes)
+static BinaryPrefixComparator
+BinaryPrefixComparator.parseFrom(byte[]pbBytes)
 
 
-static ColumnRangeFilter
-ColumnRangeFilter.parseFrom(byte[]pbBytes)
+static TimestampsFilter
+TimestampsFilter.parseFrom(byte[]pbBytes)
 
 
-static ColumnCountGetFilter
-ColumnCountGetFilter.parseFrom(byte[]pbBytes)
+static ValueFilter
+ValueFilter.parseFrom(byte[]pbBytes)
 
 
-static SubstringComparator
-SubstringComparator.parseFrom(byte[]pbBytes)
+static KeyOnlyFilter
+KeyOnlyFilter.parseFrom(byte[]pbBytes)
 
 
-static MultipleColumnPrefixFilter
-MultipleColumnPrefixFilter.parseFrom(byte[]pbBytes)
+static FamilyFilter
+FamilyFilter.parseFrom(byte[]pbBytes)
 
 
-static ColumnPaginationFilter
-ColumnPaginationFilter.parseFrom(byte[]pbBytes)
+static QualifierFilter
+QualifierFilter.parseFrom(byte[]pbBytes)
 
 
-static DependentColumnFilter
-DependentColumnFilter.parseFrom(byte[]pbBytes)
+static FilterList
+FilterList.parseFrom(byte[]pbBytes)
 
 
-static BinaryComparator
-BinaryComparator.parseFrom(byte[]pbBytes)
+static BigDecimalComparator
+BigDecimalComparator.parseFrom(byte[]pbBytes)
 
 
-static InclusiveStopFilter
-InclusiveStopFilter.parseFrom(byte[]pbBytes)
+static ColumnRangeFilter
+ColumnRangeFilter.parseFrom(byte[]pbBytes)
 
 
-static KeyOnlyFilter
-KeyOnlyFilter.parseFrom(byte[]pbBytes)
+static ColumnPaginationFilter
+ColumnPaginationFilter.parseFrom(byte[]pbBytes)
 
 
-static MultiRowRangeFilter
-MultiRowRangeFilter.parseFrom(byte[]pbBytes)
+static SubstringComparator
+SubstringComparator.parseFrom(byte[]pbBytes)
 
 
-static Filter
-Filter.parseFrom(byte[]pbBytes)
-Concrete implementers can signal a 

[19/51] [partial] hbase-site git commit: Published site at .

2018-02-04 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/src-html/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.CreateAsyncCallback.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.CreateAsyncCallback.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.CreateAsyncCallback.html
index 5844c3b..80259dd 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.CreateAsyncCallback.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.CreateAsyncCallback.html
@@ -159,7 +159,7 @@
 151}
 152Task task = 
findOrCreateOrphanTask(path);
 153if (task.isOrphan()  
(task.incarnation.get() == 0)) {
-154  LOG.info("resubmitting unassigned 
orphan task " + path);
+154  LOG.info("Resubmitting unassigned 
orphan task " + path);
 155  // ignore failure to resubmit. The 
timeout-monitor will handle it later
 156  // albeit in a more crude fashion
 157  resubmitTask(path, task, FORCE);
@@ -210,7 +210,7 @@
 202  
SplitLogCounters.tot_mgr_resubmit_force.increment();
 203  version = -1;
 204}
-205LOG.info("resubmitting task " + 
path);
+205LOG.info("Resubmitting task " + 
path);
 206task.incarnation.incrementAndGet();
 207boolean result = resubmit(path, 
version);
 208if (!result) {
@@ -288,7 +288,7 @@
 280
SplitLogCounters.tot_mgr_rescan_deleted.increment();
 281  }
 282  
SplitLogCounters.tot_mgr_missing_state_in_delete.increment();
-283  LOG.debug("deleted task without in 
memory state " + path);
+283  LOG.debug("Deleted task without in 
memory state " + path);
 284  return;
 285}
 286synchronized (task) {
@@ -336,13 +336,13 @@
 328  }
 329
 330  private void createNodeSuccess(String 
path) {
-331LOG.debug("put up splitlog task at 
znode " + path);
+331LOG.debug("Put up splitlog task at 
znode " + path);
 332getDataSetWatch(path, zkretries);
 333  }
 334
 335  private void createNodeFailure(String 
path) {
 336// TODO the Manager should split the 
log locally instead of giving up
-337LOG.warn("failed to create task node" 
+ path);
+337LOG.warn("Failed to create task node 
" + path);
 338setDone(path, FAILURE);
 339  }
 340
@@ -368,15 +368,15 @@
 360data = 
ZKMetadata.removeMetaData(data);
 361SplitLogTask slt = 
SplitLogTask.parseFrom(data);
 362if (slt.isUnassigned()) {
-363  LOG.debug("task not yet acquired " 
+ path + " ver = " + version);
+363  LOG.debug("Task not yet acquired " 
+ path + ", ver=" + version);
 364  handleUnassignedTask(path);
 365} else if (slt.isOwned()) {
 366  heartbeat(path, version, 
slt.getServerName());
 367} else if (slt.isResigned()) {
-368  LOG.info("task " + path + " entered 
state: " + slt.toString());
+368  LOG.info("Task " + path + " entered 
state=" + slt.toString());
 369  resubmitOrFail(path, FORCE);
 370} else if (slt.isDone()) {
-371  LOG.info("task " + path + " entered 
state: " + slt.toString());
+371  LOG.info("Task " + path + " entered 
state=" + slt.toString());
 372  if (taskFinisher != null  
!ZKSplitLog.isRescanNode(watcher, path)) {
 373if 
(taskFinisher.finish(slt.getServerName(), ZKSplitLog.getFileName(path)) == 
Status.DONE) {
 374  setDone(path, SUCCESS);
@@ -387,7 +387,7 @@
 379setDone(path, SUCCESS);
 380  }
 381} else if (slt.isErr()) {
-382  LOG.info("task " + path + " entered 
state: " + slt.toString());
+382  LOG.info("Task " + path + " entered 
state=" + slt.toString());
 383  resubmitOrFail(path, CHECK);
 384} else {
 385  LOG.error(HBaseMarkers.FATAL, 
"logic error - unexpected zk state for path = "
@@ -403,7 +403,7 @@
 395  }
 396
 397  private void 
getDataSetWatchFailure(String path) {
-398LOG.warn("failed to set data watch " 
+ path);
+398LOG.warn("Failed to set data watch " 
+ path);
 399setDone(path, FAILURE);
 400  }
 401
@@ -412,7 +412,7 @@
 404if (task == null) {
 405  if 
(!ZKSplitLog.isRescanNode(watcher, path)) {
 406
SplitLogCounters.tot_mgr_unacquired_orphan_done.increment();
-407LOG.debug("unacquired orphan task 
is done " + path);
+407LOG.debug("Unacquired orphan task 
is done " + path);
 408  }
 409} else {
 410  synchronized (task) {
@@ -449,7 +449,7 @@
 441
 442  private Task 
findOrCreateOrphanTask(String path) {
 443return 
computeIfAbsent(details.getTasks(), path, Task::new, () - {
-444  LOG.info("creating orphan task " + 
path);
+444  LOG.info("Creating orphan task " + 
path);
 445  
SplitLogCounters.tot_mgr_orphan_task_acquired.increment();
 446});
 447  }
@@ -458,7 +458,7 @@
 450Task task = 
findOrCreateOrphanTask(path);
 451if 

[19/51] [partial] hbase-site git commit: Published site at .

2018-02-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1f2eeb22/devapidocs/src-html/org/apache/hadoop/hbase/client/RowMutations.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RowMutations.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RowMutations.html
index 416bf21..6b01ff4 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/client/RowMutations.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/RowMutations.html
@@ -30,9 +30,9 @@
 022import java.util.Arrays;
 023import java.util.Collections;
 024import java.util.List;
-025
-026import 
org.apache.yetus.audience.InterfaceAudience;
-027import 
org.apache.hadoop.hbase.util.Bytes;
+025import 
org.apache.hadoop.hbase.util.Bytes;
+026import 
org.apache.hadoop.hbase.util.CollectionUtils;
+027import 
org.apache.yetus.audience.InterfaceAudience;
 028
 029/**
 030 * Performs multiple mutations atomically 
on a single row.
@@ -46,110 +46,148 @@
 038 */
 039@InterfaceAudience.Public
 040public class RowMutations implements Row 
{
-041  private final ListMutation 
mutations;
-042  private final byte [] row;
-043
-044  public RowMutations(byte [] row) {
-045this(row, -1);
-046  }
-047  /**
-048   * Create an atomic mutation for the 
specified row.
-049   * @param row row key
-050   * @param initialCapacity the initial 
capacity of the RowMutations
-051   */
-052  public RowMutations(byte [] row, int 
initialCapacity) {
-053Mutation.checkRow(row);
-054this.row = Bytes.copy(row);
-055if (initialCapacity = 0) {
-056  this.mutations = new 
ArrayList();
-057} else {
-058  this.mutations = new 
ArrayList(initialCapacity);
-059}
-060  }
-061
+041
+042  /**
+043   * Create a {@link RowMutations} with 
the specified mutations.
+044   * @param mutations the mutations to 
send
+045   * @return RowMutations
+046   * @throws IOException if any row in 
mutations is different to another
+047   */
+048  public static RowMutations of(List? 
extends Mutation mutations) throws IOException {
+049if 
(CollectionUtils.isEmpty(mutations)) {
+050  throw new 
IllegalArgumentException("Can't instantiate a RowMutations by empty list");
+051}
+052return new 
RowMutations(mutations.get(0).getRow(), mutations.size())
+053.add(mutations);
+054  }
+055
+056  private final ListMutation 
mutations;
+057  private final byte [] row;
+058
+059  public RowMutations(byte [] row) {
+060this(row, -1);
+061  }
 062  /**
-063   * Add a {@link Put} operation to the 
list of mutations
-064   * @param p The {@link Put} to add
-065   * @throws IOException
+063   * Create an atomic mutation for the 
specified row.
+064   * @param row row key
+065   * @param initialCapacity the initial 
capacity of the RowMutations
 066   */
-067  public void add(Put p) throws 
IOException {
-068internalAdd(p);
-069  }
-070
-071  /**
-072   * Add a {@link Delete} operation to 
the list of mutations
-073   * @param d The {@link Delete} to add
-074   * @throws IOException
-075   */
-076  public void add(Delete d) throws 
IOException {
-077internalAdd(d);
-078  }
-079
-080  private void internalAdd(Mutation m) 
throws IOException {
-081int res = Bytes.compareTo(this.row, 
m.getRow());
-082if (res != 0) {
-083  throw new WrongRowIOException("The 
row in the recently added Put/Delete " +
-084  
Bytes.toStringBinary(m.getRow()) + " doesn't match the original one " 
+
-085  Bytes.toStringBinary(this.row) 
+ "");
-086}
-087mutations.add(m);
-088  }
-089
-090  /**
-091   * @deprecated As of release 2.0.0, 
this will be removed in HBase 3.0.0.
-092   * Use {@link 
Row#COMPARATOR} instead
-093   */
-094  @Deprecated
-095  @Override
-096  public int compareTo(Row i) {
-097return Bytes.compareTo(this.getRow(), 
i.getRow());
+067  public RowMutations(byte [] row, int 
initialCapacity) {
+068this.row = 
Bytes.copy(Mutation.checkRow(row));
+069if (initialCapacity = 0) {
+070  this.mutations = new 
ArrayList();
+071} else {
+072  this.mutations = new 
ArrayList(initialCapacity);
+073}
+074  }
+075
+076  /**
+077   * Add a {@link Put} operation to the 
list of mutations
+078   * @param p The {@link Put} to add
+079   * @throws IOException if the row of 
added mutation doesn't match the original row
+080   * @deprecated since 2.0 version and 
will be removed in 3.0 version.
+081   * use {@link 
#add(Mutation)}
+082   */
+083  @Deprecated
+084  public void add(Put p) throws 
IOException {
+085add((Mutation) p);
+086  }
+087
+088  /**
+089   * Add a {@link Delete} operation to 
the list of mutations
+090   * @param d The {@link Delete} to add
+091   * @throws IOException if the row of 
added mutation doesn't match the original row
+092   * @deprecated since 2.0 version and 
will be removed in 3.0 version.
+093   * use {@link 
#add(Mutation)}
+094   */
+095  

[19/51] [partial] hbase-site git commit: Published site at .

2018-01-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/cc6597ec/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncClusterAdminApi.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncClusterAdminApi.html 
b/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncClusterAdminApi.html
index 816b330..512ff39 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncClusterAdminApi.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncClusterAdminApi.html
@@ -114,7 +114,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public class TestAsyncClusterAdminApi
+public class TestAsyncClusterAdminApi
 extends TestAsyncAdminBase
 
 
@@ -135,14 +135,18 @@ extends Field and Description
 
 
+static HBaseClassTestRule
+CLASS_RULE
+
+
 private http://docs.oracle.com/javase/8/docs/api/java/nio/file/Path.html?is-external=true;
 title="class or interface in java.nio.file">Path
 cnf2Path
 
-
+
 private http://docs.oracle.com/javase/8/docs/api/java/nio/file/Path.html?is-external=true;
 title="class or interface in java.nio.file">Path
 cnf3Path
 
-
+
 private http://docs.oracle.com/javase/8/docs/api/java/nio/file/Path.html?is-external=true;
 title="class or interface in java.nio.file">Path
 cnfPath
 
@@ -273,13 +277,22 @@ extends 
+
+
+
+
+CLASS_RULE
+public static finalHBaseClassTestRule CLASS_RULE
+
+
 
 
 
 
 
 cnfPath
-private finalhttp://docs.oracle.com/javase/8/docs/api/java/nio/file/Path.html?is-external=true;
 title="class or interface in java.nio.file">Path cnfPath
+private finalhttp://docs.oracle.com/javase/8/docs/api/java/nio/file/Path.html?is-external=true;
 title="class or interface in java.nio.file">Path cnfPath
 
 
 
@@ -288,7 +301,7 @@ extends 
 
 cnf2Path
-private finalhttp://docs.oracle.com/javase/8/docs/api/java/nio/file/Path.html?is-external=true;
 title="class or interface in java.nio.file">Path cnf2Path
+private finalhttp://docs.oracle.com/javase/8/docs/api/java/nio/file/Path.html?is-external=true;
 title="class or interface in java.nio.file">Path cnf2Path
 
 
 
@@ -297,7 +310,7 @@ extends 
 
 cnf3Path
-private finalhttp://docs.oracle.com/javase/8/docs/api/java/nio/file/Path.html?is-external=true;
 title="class or interface in java.nio.file">Path cnf3Path
+private finalhttp://docs.oracle.com/javase/8/docs/api/java/nio/file/Path.html?is-external=true;
 title="class or interface in java.nio.file">Path cnf3Path
 
 
 
@@ -314,7 +327,7 @@ extends 
 
 TestAsyncClusterAdminApi
-publicTestAsyncClusterAdminApi()
+publicTestAsyncClusterAdminApi()
 
 
 
@@ -331,7 +344,7 @@ extends 
 
 setUpBeforeClass
-public staticvoidsetUpBeforeClass()
+public staticvoidsetUpBeforeClass()
  throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">Exception
 
 Throws:
@@ -345,7 +358,7 @@ extends 
 
 testGetMasterInfoPort
-publicvoidtestGetMasterInfoPort()
+publicvoidtestGetMasterInfoPort()
throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">Exception
 
 Throws:
@@ -359,7 +372,7 @@ extends 
 
 testRegionServerOnlineConfigChange
-publicvoidtestRegionServerOnlineConfigChange()
+publicvoidtestRegionServerOnlineConfigChange()
 throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">Exception
 
 Throws:
@@ -373,7 +386,7 @@ extends 
 
 testMasterOnlineConfigChange
-publicvoidtestMasterOnlineConfigChange()
+publicvoidtestMasterOnlineConfigChange()
   throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">Exception
 
 Throws:
@@ -387,7 +400,7 @@ extends 
 
 testAllClusterOnlineConfigChange
-publicvoidtestAllClusterOnlineConfigChange()
+publicvoidtestAllClusterOnlineConfigChange()
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Throws:
@@ -401,7 +414,7 @@ extends 
 
 replaceHBaseSiteXML
-privatevoidreplaceHBaseSiteXML()
+privatevoidreplaceHBaseSiteXML()
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Throws:
@@ -415,7 +428,7 @@ extends 
 
 restoreHBaseSiteXML
-privatevoidrestoreHBaseSiteXML()
+privatevoidrestoreHBaseSiteXML()
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Throws:
@@ -429,7 +442,7 @@ extends 
 
 testRollWALWALWriter
-publicvoidtestRollWALWALWriter()
+publicvoidtestRollWALWALWriter()

[19/51] [partial] hbase-site git commit: Published site at .

2018-01-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aa7ffc92/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupManager.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupManager.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupManager.html
index a708c67..84b2841 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupManager.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/BackupManager.html
@@ -89,426 +89,420 @@
 081this.conf = conf;
 082this.conn = conn;
 083this.systemTable = new 
BackupSystemTable(conn);
-084
-085  }
-086
-087  /**
-088   * Returns backup info
-089   */
-090  protected BackupInfo getBackupInfo() 
{
-091return backupInfo;
-092  }
-093
-094  /**
-095   * This method modifies the master's 
configuration in order to inject backup-related features
-096   * (TESTs only)
-097   * @param conf configuration
-098   */
-099  @VisibleForTesting
-100  public static void 
decorateMasterConfiguration(Configuration conf) {
-101if (!isBackupEnabled(conf)) {
-102  return;
-103}
-104// Add WAL archive cleaner plug-in
-105String plugins = 
conf.get(HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS);
-106String cleanerClass = 
BackupLogCleaner.class.getCanonicalName();
-107if (!plugins.contains(cleanerClass)) 
{
-108  
conf.set(HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS, plugins + "," + 
cleanerClass);
-109}
-110
-111String classes = 
conf.get(ProcedureManagerHost.MASTER_PROCEDURE_CONF_KEY);
-112String masterProcedureClass = 
LogRollMasterProcedureManager.class.getName();
-113if (classes == null) {
-114  
conf.set(ProcedureManagerHost.MASTER_PROCEDURE_CONF_KEY, 
masterProcedureClass);
-115} else if 
(!classes.contains(masterProcedureClass)) {
-116  
conf.set(ProcedureManagerHost.MASTER_PROCEDURE_CONF_KEY, classes + "," + 
masterProcedureClass);
+084  }
+085
+086  /**
+087   * Returns backup info
+088   */
+089  protected BackupInfo getBackupInfo() 
{
+090return backupInfo;
+091  }
+092
+093  /**
+094   * This method modifies the master's 
configuration in order to inject backup-related features
+095   * (TESTs only)
+096   * @param conf configuration
+097   */
+098  @VisibleForTesting
+099  public static void 
decorateMasterConfiguration(Configuration conf) {
+100if (!isBackupEnabled(conf)) {
+101  return;
+102}
+103// Add WAL archive cleaner plug-in
+104String plugins = 
conf.get(HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS);
+105String cleanerClass = 
BackupLogCleaner.class.getCanonicalName();
+106if (!plugins.contains(cleanerClass)) 
{
+107  
conf.set(HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS, plugins + "," + 
cleanerClass);
+108}
+109
+110String classes = 
conf.get(ProcedureManagerHost.MASTER_PROCEDURE_CONF_KEY);
+111String masterProcedureClass = 
LogRollMasterProcedureManager.class.getName();
+112if (classes == null) {
+113  
conf.set(ProcedureManagerHost.MASTER_PROCEDURE_CONF_KEY, 
masterProcedureClass);
+114} else if 
(!classes.contains(masterProcedureClass)) {
+115  
conf.set(ProcedureManagerHost.MASTER_PROCEDURE_CONF_KEY, classes + ","
+116  + masterProcedureClass);
 117}
 118
 119if (LOG.isDebugEnabled()) {
 120  LOG.debug("Added log cleaner: " + 
cleanerClass + "\n" + "Added master procedure manager: "
 121  + masterProcedureClass);
 122}
-123
-124  }
-125
-126  /**
-127   * This method modifies the Region 
Server configuration in order to inject backup-related features
-128   * TESTs only.
-129   * @param conf configuration
-130   */
-131  @VisibleForTesting
-132  public static void 
decorateRegionServerConfiguration(Configuration conf) {
-133if (!isBackupEnabled(conf)) {
-134  return;
-135}
-136
-137String classes = 
conf.get(ProcedureManagerHost.REGIONSERVER_PROCEDURE_CONF_KEY);
-138String regionProcedureClass = 
LogRollRegionServerProcedureManager.class.getName();
-139if (classes == null) {
-140  
conf.set(ProcedureManagerHost.REGIONSERVER_PROCEDURE_CONF_KEY, 
regionProcedureClass);
-141} else if 
(!classes.contains(regionProcedureClass)) {
-142  
conf.set(ProcedureManagerHost.REGIONSERVER_PROCEDURE_CONF_KEY, classes + ","
-143  + regionProcedureClass);
-144}
-145String coproc = 
conf.get(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY);
-146String regionObserverClass = 
BackupObserver.class.getName();
-147
conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, (coproc == null ? "" : 
coproc + ",") +
-148regionObserverClass);
-149if (LOG.isDebugEnabled()) {
-150  LOG.debug("Added region procedure 
manager: " + regionProcedureClass +
-151". Added region observer: " + 
regionObserverClass);
-152}
-153  }
-154
-155  public static boolean 
isBackupEnabled(Configuration conf) {

[19/51] [partial] hbase-site git commit: Published site at .

2018-01-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/96e5e102/devapidocs/src-html/org/apache/hadoop/hbase/rest/MultiRowResource.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/rest/MultiRowResource.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/rest/MultiRowResource.html
index 340a4ae..60a1376 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/rest/MultiRowResource.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/rest/MultiRowResource.html
@@ -35,100 +35,101 @@
 027import javax.ws.rs.core.Response;
 028import javax.ws.rs.core.UriInfo;
 029
-030import 
org.apache.yetus.audience.InterfaceAudience;
-031import org.slf4j.Logger;
-032import org.slf4j.LoggerFactory;
-033import org.apache.hadoop.hbase.Cell;
-034import 
org.apache.hadoop.hbase.CellUtil;
-035import 
org.apache.hadoop.hbase.rest.model.CellModel;
-036import 
org.apache.hadoop.hbase.rest.model.CellSetModel;
-037import 
org.apache.hadoop.hbase.rest.model.RowModel;
-038
-039@InterfaceAudience.Private
-040public class MultiRowResource extends 
ResourceBase implements Constants {
-041  private static final Logger LOG = 
LoggerFactory.getLogger(MultiRowResource.class);
-042
-043  TableResource tableResource;
-044  Integer versions = null;
-045  String[] columns = null;
-046
-047  /**
-048   * Constructor
-049   *
-050   * @param tableResource
-051   * @param versions
-052   * @throws java.io.IOException
-053   */
-054  public MultiRowResource(TableResource 
tableResource, String versions, String columnsStr)
-055  throws IOException {
-056super();
-057this.tableResource = tableResource;
-058
-059if (columnsStr != null  
!columnsStr.equals("")) {
-060  this.columns = 
columnsStr.split(",");
-061}
-062
-063if (versions != null) {
-064  this.versions = 
Integer.valueOf(versions);
-065
-066}
-067  }
-068
-069  @GET
-070  @Produces({ MIMETYPE_XML, 
MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF })
-071  public Response get(final @Context 
UriInfo uriInfo) {
-072MultivaluedMapString, String 
params = uriInfo.getQueryParameters();
-073
-074
servlet.getMetrics().incrementRequests(1);
-075try {
-076  CellSetModel model = new 
CellSetModel();
-077  for (String rk : 
params.get(ROW_KEYS_PARAM_NAME)) {
-078RowSpec rowSpec = new 
RowSpec(rk);
-079
-080if (this.versions != null) {
-081  
rowSpec.setMaxVersions(this.versions);
-082}
-083
-084if (this.columns != null) {
-085  for (int i = 0; i  
this.columns.length; i++) {
-086
rowSpec.addColumn(this.columns[i].getBytes());
-087  }
-088}
-089
-090ResultGenerator generator =
-091  
ResultGenerator.fromRowSpec(this.tableResource.getName(), rowSpec, null,
-092
!params.containsKey(NOCACHE_PARAM_NAME));
-093Cell value = null;
-094RowModel rowModel = new 
RowModel(rk);
-095if (generator.hasNext()) {
-096  while ((value = 
generator.next()) != null) {
-097rowModel.addCell(new 
CellModel(CellUtil.cloneFamily(value), CellUtil
-098.cloneQualifier(value), 
value.getTimestamp(), CellUtil.cloneValue(value)));
-099  }
-100  model.addRow(rowModel);
-101} else {
-102  if (LOG.isTraceEnabled()) {
-103LOG.trace("The row : " + rk + 
" not found in the table.");
-104  }
-105}
-106  }
-107
-108  if (model.getRows().isEmpty()) {
-109  //If no rows found.
-110
servlet.getMetrics().incrementFailedGetRequests(1);
-111return 
Response.status(Response.Status.NOT_FOUND)
-112
.type(MIMETYPE_TEXT).entity("No rows found." + CRLF)
-113.build();
-114  } else {
-115
servlet.getMetrics().incrementSucessfulGetRequests(1);
-116return 
Response.ok(model).build();
-117  }
-118} catch (IOException e) {
-119  
servlet.getMetrics().incrementFailedGetRequests(1);
-120  return processException(e);
-121}
-122  }
-123}
+030import org.apache.hadoop.hbase.Cell;
+031import 
org.apache.hadoop.hbase.CellUtil;
+032import 
org.apache.hadoop.hbase.rest.model.CellModel;
+033import 
org.apache.hadoop.hbase.rest.model.CellSetModel;
+034import 
org.apache.hadoop.hbase.rest.model.RowModel;
+035import 
org.apache.hadoop.hbase.util.Bytes;
+036import 
org.apache.yetus.audience.InterfaceAudience;
+037import org.slf4j.Logger;
+038import org.slf4j.LoggerFactory;
+039
+040@InterfaceAudience.Private
+041public class MultiRowResource extends 
ResourceBase implements Constants {
+042  private static final Logger LOG = 
LoggerFactory.getLogger(MultiRowResource.class);
+043
+044  TableResource tableResource;
+045  Integer versions = null;
+046  String[] columns = null;
+047
+048  /**
+049   * Constructor
+050   *
+051   * @param tableResource
+052   * @param versions
+053   * @throws java.io.IOException
+054   */

[19/51] [partial] hbase-site git commit: Published site at .

2018-01-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/master/RegionServerTracker.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/master/RegionServerTracker.html 
b/devapidocs/org/apache/hadoop/hbase/master/RegionServerTracker.html
index 9c3b029..4da4882 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/RegionServerTracker.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/RegionServerTracker.html
@@ -280,7 +280,7 @@ extends 
 
 regionServers
-privatehttp://docs.oracle.com/javase/8/docs/api/java/util/NavigableMap.html?is-external=true;
 title="class or interface in java.util">NavigableMapServerName,org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionServerInfo
 regionServers
+private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/NavigableMap.html?is-external=true;
 title="class or interface in java.util">NavigableMapServerName,org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionServerInfo
 regionServers
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/master/ServerManager.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/master/ServerManager.html 
b/devapidocs/org/apache/hadoop/hbase/master/ServerManager.html
index 756eebd..0385115 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/ServerManager.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/ServerManager.html
@@ -824,7 +824,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 registerListener
-publicvoidregisterListener(ServerListenerlistener)
+publicvoidregisterListener(ServerListenerlistener)
 Add the listener to the notification list.
 
 Parameters:
@@ -838,7 +838,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 unregisterListener
-publicbooleanunregisterListener(ServerListenerlistener)
+publicbooleanunregisterListener(ServerListenerlistener)
 Remove the listener from the notification list.
 
 Parameters:
@@ -852,7 +852,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 regionServerStartup
-ServerNameregionServerStartup(org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequestrequest,
+ServerNameregionServerStartup(org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequestrequest,
http://docs.oracle.com/javase/8/docs/api/java/net/InetAddress.html?is-external=true;
 title="class or interface in java.net">InetAddressia)
 throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Let the server manager know a new regionserver has come 
online
@@ -873,7 +873,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 updateLastFlushedSequenceIds
-privatevoidupdateLastFlushedSequenceIds(ServerNamesn,
+privatevoidupdateLastFlushedSequenceIds(ServerNamesn,
   ServerLoadhsl)
 Updates last flushed sequence Ids for the regions on server 
sn
 
@@ -889,7 +889,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 regionServerReport
-publicvoidregionServerReport(ServerNamesn,
+publicvoidregionServerReport(ServerNamesn,
ServerLoadsl)
 throws YouAreDeadException
 
@@ -904,7 +904,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 checkAndRecordNewServer
-booleancheckAndRecordNewServer(ServerNameserverName,
+booleancheckAndRecordNewServer(ServerNameserverName,
 ServerLoadsl)
 Check is a server of same host and port already exists,
  if not, or the existed one got a smaller start code, record it.
@@ -923,7 +923,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 checkClockSkew
-privatevoidcheckClockSkew(ServerNameserverName,
+privatevoidcheckClockSkew(ServerNameserverName,
 longserverCurrentTime)
  throws ClockOutOfSyncException
 Checks if the clock skew between the server and the master. 
If the clock skew exceeds the
@@ -944,7 +944,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 checkIsDead
-privatevoidcheckIsDead(ServerNameserverName,
+privatevoidcheckIsDead(ServerNameserverName,
  http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringwhat)
   throws YouAreDeadException
 If this server is on the dead list, reject it with a 
YouAreDeadException.
@@ -965,7 +965,7 @@ extends 

[19/51] [partial] hbase-site git commit: Published site at .

2018-01-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/14db89d7/testapidocs/src-html/org/apache/hadoop/hbase/MiniHBaseCluster.html
--
diff --git a/testapidocs/src-html/org/apache/hadoop/hbase/MiniHBaseCluster.html 
b/testapidocs/src-html/org/apache/hadoop/hbase/MiniHBaseCluster.html
index 1636aa6..f79f186 100644
--- a/testapidocs/src-html/org/apache/hadoop/hbase/MiniHBaseCluster.html
+++ b/testapidocs/src-html/org/apache/hadoop/hbase/MiniHBaseCluster.html
@@ -86,811 +86,824 @@
 078   */
 079  public MiniHBaseCluster(Configuration 
conf, int numMasters, int numRegionServers)
 080  throws IOException, 
InterruptedException {
-081this(conf, numMasters, 
numRegionServers, null, null, null);
+081this(conf, numMasters, 
numRegionServers, null, null);
 082  }
 083
 084  /**
-085   * @param rsPorts Ports that 
RegionServer should use; pass ports if you want to test cluster
-086   *   restart where for sure the 
regionservers come up on same address+port (but
-087   *   just with different startcode); by 
default mini hbase clusters choose new
-088   *   arbitrary ports on each cluster 
start.
-089   * @throws IOException
-090   * @throws InterruptedException
-091   */
-092  public MiniHBaseCluster(Configuration 
conf, int numMasters, int numRegionServers,
-093 ListInteger rsPorts,
-094 Class? extends HMaster 
masterClass,
-095 Class? extends 
MiniHBaseCluster.MiniHBaseClusterRegionServer regionserverClass)
-096  throws IOException, 
InterruptedException {
-097super(conf);
-098conf.set(HConstants.MASTER_PORT, 
"0");
-099if 
(conf.getInt(HConstants.MASTER_INFO_PORT, 0) != -1) {
-100  
conf.set(HConstants.MASTER_INFO_PORT, "0");
-101}
-102
-103// Hadoop 2
-104
CompatibilityFactory.getInstance(MetricsAssertHelper.class).init();
-105
-106init(numMasters, numRegionServers, 
rsPorts, masterClass, regionserverClass);
-107this.initialClusterStatus = 
getClusterStatus();
-108  }
-109
-110  public Configuration getConfiguration() 
{
-111return this.conf;
-112  }
-113
-114  /**
-115   * Subclass so can get at protected 
methods (none at moment).  Also, creates
-116   * a FileSystem instance per 
instantiation.  Adds a shutdown own FileSystem
-117   * on the way out. Shuts down own 
Filesystem only, not All filesystems as
-118   * the FileSystem system exit hook 
does.
-119   */
-120  public static class 
MiniHBaseClusterRegionServer extends HRegionServer {
-121private Thread shutdownThread = 
null;
-122private User user = null;
-123/**
-124 * List of RegionServers killed so 
far. ServerName also comprises startCode of a server,
-125 * so any restarted instances of the 
same server will have different ServerName and will not
-126 * coincide with past dead ones. So 
there's no need to cleanup this list.
-127 */
-128static SetServerName 
killedServers = new HashSet();
-129
-130public 
MiniHBaseClusterRegionServer(Configuration conf)
-131throws IOException, 
InterruptedException {
-132  super(conf);
-133  this.user = User.getCurrent();
-134}
-135
-136/*
-137 * @param c
-138 * @param currentfs We return this if 
we did not make a new one.
-139 * @param uniqueName Same name used 
to help identify the created fs.
-140 * @return A new fs instance if we 
are up on DistributeFileSystem.
-141 * @throws IOException
-142 */
-143
-144@Override
-145protected void 
handleReportForDutyResponse(
-146final RegionServerStartupResponse 
c) throws IOException {
-147  
super.handleReportForDutyResponse(c);
-148  // Run this thread to shutdown our 
filesystem on way out.
-149  this.shutdownThread = new 
SingleFileSystemShutdownThread(getFileSystem());
-150}
-151
-152@Override
-153public void run() {
-154  try {
-155this.user.runAs(new 
PrivilegedActionObject(){
-156  public Object run() {
-157runRegionServer();
-158return null;
-159  }
-160});
-161  } catch (Throwable t) {
-162LOG.error("Exception in run", 
t);
-163  } finally {
-164// Run this on the way out.
-165if (this.shutdownThread != null) 
{
-166  this.shutdownThread.start();
-167  
Threads.shutdown(this.shutdownThread, 3);
-168}
-169  }
-170}
-171
-172private void runRegionServer() {
-173  super.run();
-174}
-175
-176@Override
-177protected void kill() {
-178  
killedServers.add(getServerName());
-179  super.kill();
-180}
-181
-182@Override
-183public void abort(final String 
reason, final Throwable cause) {
-184  this.user.runAs(new 
PrivilegedActionObject() {
-185public Object run() {
-186  abortRegionServer(reason, 
cause);
-187  return null;
-188}
-189  });
-190}
-191
-192private void 

[19/51] [partial] hbase-site git commit: Published site at .

2018-01-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/testdevapidocs/org/apache/hadoop/hbase/client/TestCISleep.html
--
diff --git a/testdevapidocs/org/apache/hadoop/hbase/client/TestCISleep.html 
b/testdevapidocs/org/apache/hadoop/hbase/client/TestCISleep.html
new file mode 100644
index 000..37187bc
--- /dev/null
+++ b/testdevapidocs/org/apache/hadoop/hbase/client/TestCISleep.html
@@ -0,0 +1,394 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+TestCISleep (Apache HBase 3.0.0-SNAPSHOT Test API)
+
+
+
+
+
+var methods = {"i0":10,"i1":10,"i2":10};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase.client
+Class TestCISleep
+
+
+
+http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.client.AbstractTestCITimeout
+
+
+org.apache.hadoop.hbase.client.TestCISleep
+
+
+
+
+
+
+
+
+
+
+public class TestCISleep
+extends AbstractTestCITimeout
+
+
+
+
+
+
+
+
+
+
+
+Nested Class Summary
+
+
+
+
+Nested classes/interfaces inherited from 
classorg.apache.hadoop.hbase.client.AbstractTestCITimeout
+AbstractTestCITimeout.SleepAndFailFirstTime,
 AbstractTestCITimeout.SleepCoprocessor
+
+
+
+
+
+
+
+
+Field Summary
+
+Fields
+
+Modifier and Type
+Field and Description
+
+
+private static org.slf4j.Logger
+LOG
+
+
+private 
org.apache.hadoop.hbase.TableName
+tableName
+
+
+
+
+
+
+Fields inherited from classorg.apache.hadoop.hbase.client.AbstractTestCITimeout
+FAM_NAM,
 name,
 TEST_UTIL
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors
+
+Constructor and Description
+
+
+TestCISleep()
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All MethodsInstance MethodsConcrete Methods
+
+Modifier and Type
+Method and Description
+
+
+void
+setUp()
+
+
+void
+testCallableSleep()
+
+
+void
+testRpcRetryingCallerSleep()
+Test starting from 0 index when RpcRetryingCaller calculate 
the backoff time.
+
+
+
+
+
+
+
+Methods inherited from classorg.apache.hadoop.hbase.client.AbstractTestCITimeout
+setUpBeforeClass,
 tearDownAfterClass
+
+
+
+
+
+Methods inherited from classjava.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--;
 title="class or interface in java.lang">clone, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-;
 title="class or interface in java.lang">equals, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--;
 title="class or interface in java.lang">finalize, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--;
 title="class or interface in java.lang">getClass, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--;
 title="class or interface in java.lang">hashCode, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notify--;
 title="class or interface in java.lang">notify, http://docs.oracle.com/javase/8/docs/api/java/lang
 /Object.html?is-external=true#notifyAll--" title="class or interface in 
java.lang">notifyAll, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#toString--;
 title="class or interface in java.lang">toString, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait--;
 title="class or interface in java.lang">wait, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-;
 title="class or interface in java.lang">wait, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-int-;
 title="class or interface in java.lang">wait
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Field Detail
+
+
+
+
+
+LOG
+private staticorg.slf4j.Logger LOG
+
+
+
+
+
+
+
+tableName
+privateorg.apache.hadoop.hbase.TableName tableName
+

[19/51] [partial] hbase-site git commit: Published site at .

2018-01-11 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/hbase-build-configuration/integration.html
--
diff --git a/hbase-build-configuration/integration.html 
b/hbase-build-configuration/integration.html
index 0a415f7..e2dd2e2 100644
--- a/hbase-build-configuration/integration.html
+++ b/hbase-build-configuration/integration.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Build Configuration  CI Management
 
@@ -126,7 +126,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-01-10
+  Last Published: 
2018-01-11
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/hbase-build-configuration/issue-tracking.html
--
diff --git a/hbase-build-configuration/issue-tracking.html 
b/hbase-build-configuration/issue-tracking.html
index 2604681..0062a19 100644
--- a/hbase-build-configuration/issue-tracking.html
+++ b/hbase-build-configuration/issue-tracking.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Build Configuration  Issue Management
 
@@ -123,7 +123,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-01-10
+  Last Published: 
2018-01-11
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/hbase-build-configuration/license.html
--
diff --git a/hbase-build-configuration/license.html 
b/hbase-build-configuration/license.html
index 6ae2a03..989d0f7 100644
--- a/hbase-build-configuration/license.html
+++ b/hbase-build-configuration/license.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Build Configuration  Project Licenses
 
@@ -326,7 +326,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-01-10
+  Last Published: 
2018-01-11
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/hbase-build-configuration/mail-lists.html
--
diff --git a/hbase-build-configuration/mail-lists.html 
b/hbase-build-configuration/mail-lists.html
index b98183f..8e37d21 100644
--- a/hbase-build-configuration/mail-lists.html
+++ b/hbase-build-configuration/mail-lists.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Build Configuration  Project Mailing 
Lists
 
@@ -176,7 +176,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-01-10
+  Last Published: 
2018-01-11
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/hbase-build-configuration/plugin-management.html
--
diff --git a/hbase-build-configuration/plugin-management.html 
b/hbase-build-configuration/plugin-management.html
index 1c1296b..34048dc 100644
--- a/hbase-build-configuration/plugin-management.html
+++ b/hbase-build-configuration/plugin-management.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Build Configuration  Project Plugin 
Management
 
@@ -271,7 +271,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-01-10
+  Last Published: 
2018-01-11
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/hbase-build-configuration/plugins.html
--
diff --git a/hbase-build-configuration/plugins.html 
b/hbase-build-configuration/plugins.html
index 9f411cd..e4617dc 100644
--- a/hbase-build-configuration/plugins.html
+++ b/hbase-build-configuration/plugins.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Build Configuration  Project Plugins
 
@@ -214,7 +214,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-01-10
+  Last Published: 
2018-01-11
 
 
 


[19/51] [partial] hbase-site git commit: Published site at .

2018-01-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/49431b18/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/MasterCoprocessorEnvironment.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/MasterCoprocessorEnvironment.html
 
b/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/MasterCoprocessorEnvironment.html
index b03d0d0..6b07909 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/MasterCoprocessorEnvironment.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/MasterCoprocessorEnvironment.html
@@ -1363,35 +1363,24 @@
 
 
 void
-AccessController.postSetSplitOrMergeEnabled(ObserverContextMasterCoprocessorEnvironmentctx,
-  booleannewValue,
-  MasterSwitchTypeswitchType)
-
-
-void
 AccessController.postStartMaster(ObserverContextMasterCoprocessorEnvironmentctx)
 
-
+
 void
 AccessController.postTruncateTable(ObserverContextMasterCoprocessorEnvironmentctx,
  TableNametableName)
 
-
+
 void
 AccessController.preAbortProcedure(ObserverContextMasterCoprocessorEnvironmentctx,
  longprocId)
 
-
+
 void
 AccessController.preAddReplicationPeer(ObserverContextMasterCoprocessorEnvironmentctx,
  http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringpeerId,
  ReplicationPeerConfigpeerConfig)
 
-
-void
-AccessController.preAddRSGroup(ObserverContextMasterCoprocessorEnvironmentctx,
- http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringname)
-
 
 void
 AccessController.preAssign(ObserverContextMasterCoprocessorEnvironmentc,
@@ -1403,30 +1392,25 @@
 
 
 void
-AccessController.preBalanceRSGroup(ObserverContextMasterCoprocessorEnvironmentctx,
- http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">StringgroupName)
-
-
-void
 AccessController.preBalanceSwitch(ObserverContextMasterCoprocessorEnvironmentc,
 booleannewValue)
 
-
+
 void
 AccessController.preClearDeadServers(ObserverContextMasterCoprocessorEnvironmentctx)
 
-
+
 void
 AccessController.preCloneSnapshot(ObserverContextMasterCoprocessorEnvironmentctx,
 SnapshotDescriptionsnapshot,
 TableDescriptorhTableDescriptor)
 
-
+
 void
 AccessController.preCreateNamespace(ObserverContextMasterCoprocessorEnvironmentctx,
   NamespaceDescriptorns)
 
-
+
 void
 AccessController.preCreateTable(ObserverContextMasterCoprocessorEnvironmentc,
   TableDescriptordesc,
@@ -1434,146 +1418,127 @@
 Observer implementations
 
 
-
+
 void
 CoprocessorWhitelistMasterObserver.preCreateTable(ObserverContextMasterCoprocessorEnvironmentctx,
   TableDescriptorhtd,
   RegionInfo[]regions)
 
-
+
 void
 AccessController.preDecommissionRegionServers(ObserverContextMasterCoprocessorEnvironmentctx,
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListServerNameservers,
 booleanoffload)
 
-
+
 void
 AccessController.preDeleteNamespace(ObserverContextMasterCoprocessorEnvironmentctx,
   http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">Stringnamespace)
 
-
+
 void
 AccessController.preDeleteSnapshot(ObserverContextMasterCoprocessorEnvironmentctx,
  SnapshotDescriptionsnapshot)
 
-
+
 void
 AccessController.preDeleteTable(ObserverContextMasterCoprocessorEnvironmentc,
   TableNametableName)
 
-
+
 void
 AccessController.preDisableReplicationPeer(ObserverContextMasterCoprocessorEnvironmentctx,
  http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">StringpeerId)
 
-
+
 void
 AccessController.preDisableTable(ObserverContextMasterCoprocessorEnvironmentc,
TableNametableName)
 
-
+
 void
 AccessController.preEnableReplicationPeer(ObserverContextMasterCoprocessorEnvironmentctx,
 http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">StringpeerId)
 
-
+
 void
 AccessController.preEnableTable(ObserverContextMasterCoprocessorEnvironmentc,
   TableNametableName)
 
-
+
 void
 AccessController.preGetLocks(ObserverContextMasterCoprocessorEnvironmentctx)
 
-
+
 void
 AccessController.preGetNamespaceDescriptor(ObserverContextMasterCoprocessorEnvironmentctx,
  http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 

[19/51] [partial] hbase-site git commit: Published site at .

2018-01-04 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7c40c62/devapidocs/org/apache/hadoop/hbase/master/ClusterStatusPublisher.MulticastPublisher.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/ClusterStatusPublisher.MulticastPublisher.html
 
b/devapidocs/org/apache/hadoop/hbase/master/ClusterStatusPublisher.MulticastPublisher.html
index f2817b6..2610912 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/ClusterStatusPublisher.MulticastPublisher.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/ClusterStatusPublisher.MulticastPublisher.html
@@ -50,7 +50,7 @@ var activeTableTab = "activeTableTab";
 
 
 PrevClass
-NextClass
+NextClass
 
 
 Frames
@@ -118,7 +118,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.LimitedPrivate(value="Configuration")
-public static class ClusterStatusPublisher.MulticastPublisher
+public static class ClusterStatusPublisher.MulticastPublisher
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements ClusterStatusPublisher.Publisher
 
@@ -141,7 +141,7 @@ implements 
 private static class
-ClusterStatusPublisher.MulticastPublisher.ClusterStatusEncoder
+ClusterStatusPublisher.MulticastPublisher.ClusterMetricsEncoder
 
 
 private static class
@@ -212,7 +212,7 @@ implements 
 void
-publish(ClusterStatuscs)
+publish(ClusterMetricscs)
 
 
 
@@ -242,7 +242,7 @@ implements 
 
 channel
-privateorg.apache.hbase.thirdparty.io.netty.channel.socket.DatagramChannel
 channel
+privateorg.apache.hbase.thirdparty.io.netty.channel.socket.DatagramChannel
 channel
 
 
 
@@ -251,7 +251,7 @@ implements 
 
 group
-private 
finalorg.apache.hbase.thirdparty.io.netty.channel.EventLoopGroup group
+private 
finalorg.apache.hbase.thirdparty.io.netty.channel.EventLoopGroup group
 
 
 
@@ -268,7 +268,7 @@ implements 
 
 MulticastPublisher
-publicMulticastPublisher()
+publicMulticastPublisher()
 
 
 
@@ -285,7 +285,7 @@ implements 
 
 connect
-publicvoidconnect(org.apache.hadoop.conf.Configurationconf)
+publicvoidconnect(org.apache.hadoop.conf.Configurationconf)
  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Specified by:
@@ -295,16 +295,16 @@ implements 
+
 
 
 
 
 publish
-publicvoidpublish(ClusterStatuscs)
+publicvoidpublish(ClusterMetricscs)
 
 Specified by:
-publishin
 interfaceClusterStatusPublisher.Publisher
+publishin
 interfaceClusterStatusPublisher.Publisher
 
 
 
@@ -314,7 +314,7 @@ implements 
 
 close
-publicvoidclose()
+publicvoidclose()
 
 Specified by:
 http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html?is-external=true#close--;
 title="class or interface in java.io">closein 
interfacehttp://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html?is-external=true;
 title="class or interface in java.io">Closeable
@@ -354,7 +354,7 @@ implements 
 
 PrevClass
-NextClass
+NextClass
 
 
 Frames

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7c40c62/devapidocs/org/apache/hadoop/hbase/master/ClusterStatusPublisher.Publisher.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/ClusterStatusPublisher.Publisher.html
 
b/devapidocs/org/apache/hadoop/hbase/master/ClusterStatusPublisher.Publisher.html
index e9838f8..d4a2c86 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/ClusterStatusPublisher.Publisher.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/ClusterStatusPublisher.Publisher.html
@@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public static interface ClusterStatusPublisher.Publisher
+public static interface ClusterStatusPublisher.Publisher
 extends http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html?is-external=true;
 title="class or interface in java.io">Closeable
 
 
@@ -143,7 +143,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html
 
 
 void
-publish(ClusterStatuscs)
+publish(ClusterMetricscs)
 
 
 
@@ -166,7 +166,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html
 
 
 connect
-voidconnect(org.apache.hadoop.conf.Configurationconf)
+voidconnect(org.apache.hadoop.conf.Configurationconf)
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Throws:
@@ -174,13 +174,13 @@ extends http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html
 
 
 
-
+
 
 
 
 
 publish
-voidpublish(ClusterStatuscs)
+voidpublish(ClusterMetricscs)
 
 
 
@@ -189,7 +189,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/io/Closeable.html
 
 
 close
-voidclose()
+voidclose()
 
 Specified by:
 http://docs.oracle.com/javase/8/docs/api/java/lang/AutoCloseable.html?is-external=true#close--;
 title="class or interface in 

[19/51] [partial] hbase-site git commit: Published site at .

2018-01-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bb398572/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.SplitTableRegionProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.SplitTableRegionProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.SplitTableRegionProcedureBiConsumer.html
index 5b3b750..a1f3f7e 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.SplitTableRegionProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.SplitTableRegionProcedureBiConsumer.html
@@ -97,3307 +97,3304 @@
 089import 
org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer;
 090import 
org.apache.hbase.thirdparty.io.netty.util.Timeout;
 091import 
org.apache.hbase.thirdparty.io.netty.util.TimerTask;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesResponse;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-100import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
-101import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
-102import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
-103import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
-104import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
-105import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest;
-106import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse;
-107import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest;
-108import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse;
-109import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest;
-110import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse;
-111import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
-112import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse;
-113import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-114import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-115import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-116import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-117import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-118import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-119import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-120import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-121import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-122import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-123import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
-124import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersRequest;
-125import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersResponse;
-126import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-127import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
-128import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest;
-129import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse;
-130import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersRequest;
-131import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DecommissionRegionServersResponse;
-132import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
-133import 

[19/51] [partial] hbase-site git commit: Published site at .

2018-01-01 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/exceptions/ConnectionClosingException.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/exceptions/ConnectionClosingException.html 
b/devapidocs/org/apache/hadoop/hbase/exceptions/ConnectionClosingException.html
index 096659e..f3985d2 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/exceptions/ConnectionClosingException.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/exceptions/ConnectionClosingException.html
@@ -309,6 +309,6 @@ extends http://docs.oracle.com/javase/8/docs/api/java/io/IOException.ht
 
 
 
-Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/exceptions/DeserializationException.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/exceptions/DeserializationException.html 
b/devapidocs/org/apache/hadoop/hbase/exceptions/DeserializationException.html
index 95cea2a..803efb5 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/exceptions/DeserializationException.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/exceptions/DeserializationException.html
@@ -303,6 +303,6 @@ extends Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/exceptions/FailedSanityCheckException.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/exceptions/FailedSanityCheckException.html 
b/devapidocs/org/apache/hadoop/hbase/exceptions/FailedSanityCheckException.html
index 53a42bb..e72bc5b 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/exceptions/FailedSanityCheckException.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/exceptions/FailedSanityCheckException.html
@@ -353,6 +353,6 @@ extends Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/exceptions/HBaseException.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/exceptions/HBaseException.html 
b/devapidocs/org/apache/hadoop/hbase/exceptions/HBaseException.html
index f054f49..b7cd61a 100644
--- a/devapidocs/org/apache/hadoop/hbase/exceptions/HBaseException.html
+++ b/devapidocs/org/apache/hadoop/hbase/exceptions/HBaseException.html
@@ -303,6 +303,6 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.ht
 
 
 
-Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/exceptions/IllegalArgumentIOException.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/exceptions/IllegalArgumentIOException.html 
b/devapidocs/org/apache/hadoop/hbase/exceptions/IllegalArgumentIOException.html
index 6b82661..59da83e 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/exceptions/IllegalArgumentIOException.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/exceptions/IllegalArgumentIOException.html
@@ -303,6 +303,6 @@ extends http://docs.oracle.com/javase/8/docs/api/java/io/IOException.ht
 
 
 
-Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/exceptions/MergeRegionException.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/exceptions/MergeRegionException.html 
b/devapidocs/org/apache/hadoop/hbase/exceptions/MergeRegionException.html
index 732208c..b17005d 100644
--- a/devapidocs/org/apache/hadoop/hbase/exceptions/MergeRegionException.html
+++ b/devapidocs/org/apache/hadoop/hbase/exceptions/MergeRegionException.html
@@ -342,6 +342,6 @@ extends Copyright  20072017 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
+Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All 

[19/51] [partial] hbase-site git commit: Published site at .

2017-12-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncProcess.MyAsyncProcess.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncProcess.MyAsyncProcess.html
 
b/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncProcess.MyAsyncProcess.html
index d4e32ed..84aeff9 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncProcess.MyAsyncProcess.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncProcess.MyAsyncProcess.html
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-static class TestAsyncProcess.MyAsyncProcess
+static class TestAsyncProcess.MyAsyncProcess
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 
 
@@ -381,7 +381,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 nbMultiResponse
-finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true;
 title="class or interface in java.util.concurrent.atomic">AtomicInteger nbMultiResponse
+finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true;
 title="class or interface in java.util.concurrent.atomic">AtomicInteger nbMultiResponse
 
 
 
@@ -390,7 +390,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 nbActions
-finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true;
 title="class or interface in java.util.concurrent.atomic">AtomicInteger nbActions
+finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true;
 title="class or interface in java.util.concurrent.atomic">AtomicInteger nbActions
 
 
 
@@ -399,7 +399,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 allReqs
-publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in 
java.util">Listorg.apache.hadoop.hbase.client.AsyncRequestFuture allReqs
+publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in 
java.util">Listorg.apache.hadoop.hbase.client.AsyncRequestFuture allReqs
 
 
 
@@ -408,7 +408,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 callsCt
-publichttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true;
 title="class or interface in java.util.concurrent.atomic">AtomicInteger callsCt
+publichttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true;
 title="class or interface in java.util.concurrent.atomic">AtomicInteger callsCt
 
 
 
@@ -417,7 +417,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 previousTimeout
-privatelong previousTimeout
+privatelong previousTimeout
 
 
 
@@ -426,7 +426,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 service
-finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html?is-external=true;
 title="class or interface in java.util.concurrent">ExecutorService service
+finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html?is-external=true;
 title="class or interface in java.util.concurrent">ExecutorService service
 
 
 
@@ -616,7 +616,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 MyAsyncProcess
-publicMyAsyncProcess(org.apache.hadoop.hbase.client.ClusterConnectionhc,
+publicMyAsyncProcess(org.apache.hadoop.hbase.client.ClusterConnectionhc,
   org.apache.hadoop.conf.Configurationconf)
 
 
@@ -626,7 +626,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 MyAsyncProcess
-publicMyAsyncProcess(org.apache.hadoop.hbase.client.ClusterConnectionhc,
+publicMyAsyncProcess(org.apache.hadoop.hbase.client.ClusterConnectionhc,
   org.apache.hadoop.conf.Configurationconf,
   http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true;
 title="class or interface in 
java.util.concurrent.atomic">AtomicIntegernbThreads)
 
@@ -637,7 +637,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 MyAsyncProcess
-publicMyAsyncProcess(org.apache.hadoop.hbase.client.ClusterConnectionhc,
+publicMyAsyncProcess(org.apache.hadoop.hbase.client.ClusterConnectionhc,
   org.apache.hadoop.conf.Configurationconf,
   booleanuseGlobalErrors)
 
@@ -656,7 +656,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 createAsyncRequestFuture

[19/51] [partial] hbase-site git commit: Published site at .

2017-12-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/63d6f712/devapidocs/org/apache/hadoop/hbase/ipc/DelegatingHBaseRpcController.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/ipc/DelegatingHBaseRpcController.html 
b/devapidocs/org/apache/hadoop/hbase/ipc/DelegatingHBaseRpcController.html
index 3d32aa0..f495ae6 100644
--- a/devapidocs/org/apache/hadoop/hbase/ipc/DelegatingHBaseRpcController.html
+++ b/devapidocs/org/apache/hadoop/hbase/ipc/DelegatingHBaseRpcController.html
@@ -109,7 +109,7 @@ var activeTableTab = "activeTableTab";
 
 
 All Implemented Interfaces:
-CellScannable, HBaseRpcController, 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController
+CellScannable, HBaseRpcController, 
org.apache.hbase.thirdparty.com.google.protobuf.RpcController
 
 
 
@@ -224,7 +224,7 @@ implements 
 void
-notifyOnCancel(org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallbackhttp://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Objectcallback)
+notifyOnCancel(org.apache.hbase.thirdparty.com.google.protobuf.RpcCallbackhttp://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Objectcallback)
 A little different from the basic RpcController:
  
  You can register multiple callbacks to an HBaseRpcController.
@@ -235,7 +235,7 @@ implements 
 void
-notifyOnCancel(org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallbackhttp://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Objectcallback,
+notifyOnCancel(org.apache.hbase.thirdparty.com.google.protobuf.RpcCallbackhttp://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Objectcallback,
   HBaseRpcController.CancellationCallbackaction)
 If not cancelled, add the callback to cancellation callback 
list.
 
@@ -349,7 +349,7 @@ implements reset()
 
 Specified by:
-resetin 
interfaceorg.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController
+resetin 
interfaceorg.apache.hbase.thirdparty.com.google.protobuf.RpcController
 
 
 
@@ -362,7 +362,7 @@ implements failed()
 
 Specified by:
-failedin 
interfaceorg.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController
+failedin 
interfaceorg.apache.hbase.thirdparty.com.google.protobuf.RpcController
 
 
 
@@ -375,7 +375,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringerrorText()
 
 Specified by:
-errorTextin 
interfaceorg.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController
+errorTextin 
interfaceorg.apache.hbase.thirdparty.com.google.protobuf.RpcController
 
 
 
@@ -388,7 +388,7 @@ implements startCancel()
 
 Specified by:
-startCancelin 
interfaceorg.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController
+startCancelin 
interfaceorg.apache.hbase.thirdparty.com.google.protobuf.RpcController
 
 
 
@@ -401,7 +401,7 @@ implements setFailed(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringreason)
 
 Specified by:
-setFailedin 
interfaceorg.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController
+setFailedin 
interfaceorg.apache.hbase.thirdparty.com.google.protobuf.RpcController
 
 
 
@@ -414,18 +414,18 @@ implements isCanceled()
 
 Specified by:
-isCanceledin 
interfaceorg.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController
+isCanceledin 
interfaceorg.apache.hbase.thirdparty.com.google.protobuf.RpcController
 
 
 
-
+
 
 
 
 
 notifyOnCancel
-publicvoidnotifyOnCancel(org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallbackhttp://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Objectcallback)
-Description copied from 
interface:HBaseRpcController
+publicvoidnotifyOnCancel(org.apache.hbase.thirdparty.com.google.protobuf.RpcCallbackhttp://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Objectcallback)
+Description copied from 
interface:HBaseRpcController
 A little different from the basic RpcController:
  
  You can register multiple callbacks to an 
HBaseRpcController.
@@ -434,9 +434,9 @@ implements Specified by:
-notifyOnCancelin
 interfaceHBaseRpcController
+notifyOnCancelin
 interfaceHBaseRpcController
 Specified by:
-notifyOnCancelin 
interfaceorg.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController
+notifyOnCancelin 
interfaceorg.apache.hbase.thirdparty.com.google.protobuf.RpcController
 
 
 
@@ -604,22 +604,22 @@ implements 
+
 
 
 
 
 notifyOnCancel

[19/51] [partial] hbase-site git commit: Published site at .

2017-12-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d449e87f/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.html
 
b/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.html
index 57edb86..34dd1aa 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.html
@@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-static class ConnectionImplementation.ServerErrorTracker
+static class ConnectionImplementation.ServerErrorTracker
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 The record of errors for servers.
 
@@ -257,7 +257,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 errorsByServer
-private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentMap.html?is-external=true;
 title="class or interface in java.util.concurrent">ConcurrentMapServerName,ConnectionImplementation.ServerErrorTracker.ServerErrors
 errorsByServer
+private finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentMap.html?is-external=true;
 title="class or interface in java.util.concurrent">ConcurrentMapServerName,ConnectionImplementation.ServerErrorTracker.ServerErrors
 errorsByServer
 
 
 
@@ -266,7 +266,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 canRetryUntil
-private finallong canRetryUntil
+private finallong canRetryUntil
 
 
 
@@ -275,7 +275,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 maxTries
-private finalint maxTries
+private finalint maxTries
 
 
 
@@ -284,7 +284,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 startTrackingTime
-private finallong startTrackingTime
+private finallong startTrackingTime
 
 
 
@@ -301,7 +301,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 ServerErrorTracker
-publicServerErrorTracker(longtimeout,
+publicServerErrorTracker(longtimeout,
   intmaxTries)
 Constructor
 
@@ -325,7 +325,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 canTryMore
-booleancanTryMore(intnumAttempt)
+booleancanTryMore(intnumAttempt)
 We stop to retry when we have exhausted BOTH the number of 
tries and the time allocated.
 
 Parameters:
@@ -339,7 +339,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 calculateBackoffTime
-longcalculateBackoffTime(ServerNameserver,
+longcalculateBackoffTime(ServerNameserver,
   longbasePause)
 Calculates the back-off time for a retrying request to a 
particular server.
 
@@ -357,7 +357,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 reportServerError
-voidreportServerError(ServerNameserver)
+voidreportServerError(ServerNameserver)
 Reports that there was an error on the server to do 
whatever bean-counting necessary.
 
 Parameters:
@@ -371,7 +371,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getStartTrackingTime
-longgetStartTrackingTime()
+longgetStartTrackingTime()
 
 
 



[19/51] [partial] hbase-site git commit: Published site at .

2017-12-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d2b28a1a/devapidocs/src-html/org/apache/hadoop/hbase/IndividualBytesFieldCell.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/IndividualBytesFieldCell.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/IndividualBytesFieldCell.html
index 242f82e..d6a8ea3 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/IndividualBytesFieldCell.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/IndividualBytesFieldCell.html
@@ -26,319 +26,286 @@
 018
 019package org.apache.hadoop.hbase;
 020
-021import static 
org.apache.hadoop.hbase.Tag.TAG_LENGTH_SIZE;
-022
-023import java.util.ArrayList;
-024import java.util.Iterator;
-025import java.util.List;
-026import java.util.Optional;
-027
-028import 
org.apache.commons.lang3.ArrayUtils;
-029import 
org.apache.hadoop.hbase.util.Bytes;
-030import 
org.apache.hadoop.hbase.util.ClassSize;
-031import 
org.apache.yetus.audience.InterfaceAudience;
-032
-033@InterfaceAudience.Private
-034public class IndividualBytesFieldCell 
implements ExtendedCell {
-035
-036  private static final long 
FIXED_OVERHEAD = ClassSize.align(  // do alignment(padding gap)
-037ClassSize.OBJECT  // 
object header
-038  + KeyValue.TIMESTAMP_TYPE_SIZE  // 
timestamp and type
-039  + Bytes.SIZEOF_LONG // 
sequence id
-040  + 5 * ClassSize.REFERENCE); // 
references to all byte arrays: row, family, qualifier, value, tags
-041
-042  // The following fields are backed by 
individual byte arrays
-043  private final byte[] row;
-044  private final int rOffset;
-045  private final int rLength;
-046  private final byte[] family;
-047  private final int fOffset;
-048  private final int fLength;
-049  private final byte[] qualifier;
-050  private final int qOffset;
-051  private final int qLength;
-052  private final byte[] value;
-053  private final int vOffset;
-054  private final int vLength;
-055  private final byte[] tags;  // A byte 
array, rather than an array of org.apache.hadoop.hbase.Tag
-056  private final int tagsOffset;
-057  private final int tagsLength;
-058
-059  // Other fields
-060  private long timestamp;
-061  private final byte type;  // A byte, 
rather than org.apache.hadoop.hbase.KeyValue.Type
-062  private long seqId;
-063
-064  public IndividualBytesFieldCell(byte[] 
row, byte[] family, byte[] qualifier,
-065  long 
timestamp, KeyValue.Type type,  byte[] value) {
-066this(row, family, qualifier, 
timestamp, type, 0L /* sequence id */, value, null /* tags */);
-067  }
-068
-069  public IndividualBytesFieldCell(byte[] 
row, byte[] family, byte[] qualifier,
-070  long 
timestamp, KeyValue.Type type, long seqId, byte[] value, byte[] tags) {
-071this(row, 0, 
ArrayUtils.getLength(row),
-072family, 0, 
ArrayUtils.getLength(family),
-073qualifier, 0, 
ArrayUtils.getLength(qualifier),
-074timestamp, type, seqId,
-075value, 0, 
ArrayUtils.getLength(value),
-076tags, 0, 
ArrayUtils.getLength(tags));
-077  }
+021import 
org.apache.commons.lang3.ArrayUtils;
+022import 
org.apache.hadoop.hbase.util.Bytes;
+023import 
org.apache.hadoop.hbase.util.ClassSize;
+024import 
org.apache.yetus.audience.InterfaceAudience;
+025
+026@InterfaceAudience.Private
+027public class IndividualBytesFieldCell 
implements ExtendedCell {
+028
+029  private static final long 
FIXED_OVERHEAD = ClassSize.align(  // do alignment(padding gap)
+030ClassSize.OBJECT  // 
object header
+031  + KeyValue.TIMESTAMP_TYPE_SIZE  // 
timestamp and type
+032  + Bytes.SIZEOF_LONG // 
sequence id
+033  + 5 * ClassSize.REFERENCE); // 
references to all byte arrays: row, family, qualifier, value, tags
+034
+035  // The following fields are backed by 
individual byte arrays
+036  private final byte[] row;
+037  private final int rOffset;
+038  private final int rLength;
+039  private final byte[] family;
+040  private final int fOffset;
+041  private final int fLength;
+042  private final byte[] qualifier;
+043  private final int qOffset;
+044  private final int qLength;
+045  private final byte[] value;
+046  private final int vOffset;
+047  private final int vLength;
+048  private final byte[] tags;  // A byte 
array, rather than an array of org.apache.hadoop.hbase.Tag
+049  private final int tagsOffset;
+050  private final int tagsLength;
+051
+052  // Other fields
+053  private long timestamp;
+054  private final byte type;  // A byte, 
rather than org.apache.hadoop.hbase.KeyValue.Type
+055  private long seqId;
+056
+057  public IndividualBytesFieldCell(byte[] 
row, byte[] family, byte[] qualifier,
+058  long 
timestamp, KeyValue.Type type,  byte[] value) {
+059this(row, family, qualifier, 
timestamp, type, 0L /* sequence id */, value, 

[19/51] [partial] hbase-site git commit: Published site at .

2017-12-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/devapidocs/org/apache/hadoop/hbase/util/Bytes.LexicographicalComparerHolder.UnsafeComparer.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/util/Bytes.LexicographicalComparerHolder.UnsafeComparer.html
 
b/devapidocs/org/apache/hadoop/hbase/util/Bytes.LexicographicalComparerHolder.UnsafeComparer.html
index 6aab17f..172f50c 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/util/Bytes.LexicographicalComparerHolder.UnsafeComparer.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/util/Bytes.LexicographicalComparerHolder.UnsafeComparer.html
@@ -122,7 +122,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-static enum Bytes.LexicographicalComparerHolder.UnsafeComparer
+static enum Bytes.LexicographicalComparerHolder.UnsafeComparer
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumBytes.LexicographicalComparerHolder.UnsafeComparer
 implements Bytes.Comparerbyte[]
 
@@ -238,7 +238,7 @@ the order they are declared.
 
 
 INSTANCE
-public static finalBytes.LexicographicalComparerHolder.UnsafeComparer
 INSTANCE
+public static finalBytes.LexicographicalComparerHolder.UnsafeComparer
 INSTANCE
 
 
 
@@ -255,7 +255,7 @@ the order they are declared.
 
 
 theUnsafe
-static finalsun.misc.Unsafe theUnsafe
+static finalsun.misc.Unsafe theUnsafe
 
 
 
@@ -272,7 +272,7 @@ the order they are declared.
 
 
 values
-public staticBytes.LexicographicalComparerHolder.UnsafeComparer[]values()
+public staticBytes.LexicographicalComparerHolder.UnsafeComparer[]values()
 Returns an array containing the constants of this enum 
type, in
 the order they are declared.  This method may be used to iterate
 over the constants as follows:
@@ -292,7 +292,7 @@ for (Bytes.LexicographicalComparerHolder.UnsafeComparer c : 
Bytes.Lexicographica
 
 
 valueOf
-public staticBytes.LexicographicalComparerHolder.UnsafeComparervalueOf(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringname)
+public staticBytes.LexicographicalComparerHolder.UnsafeComparervalueOf(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringname)
 Returns the enum constant of this type with the specified 
name.
 The string must match exactly an identifier used to declare an
 enum constant in this type.  (Extraneous whitespace characters are 
@@ -314,7 +314,7 @@ not permitted.)
 
 
 compareTo
-publicintcompareTo(byte[]buffer1,
+publicintcompareTo(byte[]buffer1,
  intoffset1,
  intlength1,
  byte[]buffer2,

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/devapidocs/org/apache/hadoop/hbase/util/Bytes.LexicographicalComparerHolder.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/util/Bytes.LexicographicalComparerHolder.html
 
b/devapidocs/org/apache/hadoop/hbase/util/Bytes.LexicographicalComparerHolder.html
index bafa964..81432ff 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/util/Bytes.LexicographicalComparerHolder.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/util/Bytes.LexicographicalComparerHolder.html
@@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-static class Bytes.LexicographicalComparerHolder
+static class Bytes.LexicographicalComparerHolder
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 Provides a lexicographical comparer implementation; either 
a Java
  implementation or a faster implementation based on Unsafe.
@@ -236,7 +236,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 UNSAFE_COMPARER_NAME
-static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String UNSAFE_COMPARER_NAME
+static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String UNSAFE_COMPARER_NAME
 
 
 
@@ -245,7 +245,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 BEST_COMPARER
-static finalBytes.Comparerbyte[] BEST_COMPARER
+static finalBytes.Comparerbyte[] BEST_COMPARER
 
 
 
@@ -262,7 +262,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 LexicographicalComparerHolder
-LexicographicalComparerHolder()
+LexicographicalComparerHolder()
 
 
 
@@ -279,7 +279,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getBestComparer
-staticBytes.Comparerbyte[]getBestComparer()
+staticBytes.Comparerbyte[]getBestComparer()
 Returns the Unsafe-using Comparer, or falls back to the 
pure-Java
  implementation if unable to do 

[19/51] [partial] hbase-site git commit: Published site at .

2017-12-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7c0589c0/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.EnableTableFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.EnableTableFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.EnableTableFuture.html
index 6fecbc9..2accda0 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.EnableTableFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.EnableTableFuture.html
@@ -34,4140 +34,4141 @@
 026import 
java.nio.charset.StandardCharsets;
 027import java.util.ArrayList;
 028import java.util.Arrays;
-029import java.util.Collection;
-030import java.util.EnumSet;
-031import java.util.HashMap;
-032import java.util.Iterator;
-033import java.util.LinkedList;
-034import java.util.List;
-035import java.util.Map;
-036import java.util.Set;
-037import java.util.concurrent.Callable;
-038import 
java.util.concurrent.ExecutionException;
-039import java.util.concurrent.Future;
-040import java.util.concurrent.TimeUnit;
-041import 
java.util.concurrent.TimeoutException;
-042import 
java.util.concurrent.atomic.AtomicInteger;
-043import 
java.util.concurrent.atomic.AtomicReference;
-044import java.util.regex.Pattern;
-045import java.util.stream.Collectors;
-046import java.util.stream.Stream;
-047import 
org.apache.hadoop.conf.Configuration;
-048import 
org.apache.hadoop.hbase.Abortable;
-049import 
org.apache.hadoop.hbase.CacheEvictionStats;
-050import 
org.apache.hadoop.hbase.CacheEvictionStatsBuilder;
-051import 
org.apache.hadoop.hbase.ClusterMetrics.Option;
-052import 
org.apache.hadoop.hbase.ClusterStatus;
-053import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-054import 
org.apache.hadoop.hbase.HBaseConfiguration;
-055import 
org.apache.hadoop.hbase.HConstants;
-056import 
org.apache.hadoop.hbase.HRegionInfo;
-057import 
org.apache.hadoop.hbase.HRegionLocation;
-058import 
org.apache.hadoop.hbase.HTableDescriptor;
-059import 
org.apache.hadoop.hbase.MasterNotRunningException;
-060import 
org.apache.hadoop.hbase.MetaTableAccessor;
-061import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-062import 
org.apache.hadoop.hbase.NamespaceNotFoundException;
-063import 
org.apache.hadoop.hbase.NotServingRegionException;
-064import 
org.apache.hadoop.hbase.RegionLoad;
-065import 
org.apache.hadoop.hbase.RegionLocations;
-066import 
org.apache.hadoop.hbase.ServerName;
-067import 
org.apache.hadoop.hbase.TableExistsException;
-068import 
org.apache.hadoop.hbase.TableName;
-069import 
org.apache.hadoop.hbase.TableNotDisabledException;
-070import 
org.apache.hadoop.hbase.TableNotFoundException;
-071import 
org.apache.hadoop.hbase.UnknownRegionException;
-072import 
org.apache.hadoop.hbase.ZooKeeperConnectionException;
-073import 
org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil;
-074import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-075import 
org.apache.hadoop.hbase.client.security.SecurityCapability;
-076import 
org.apache.hadoop.hbase.exceptions.TimeoutIOException;
-077import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
-078import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
-079import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-080import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-081import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-082import 
org.apache.hadoop.hbase.quotas.QuotaRetriever;
-083import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-084import 
org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
-085import 
org.apache.hadoop.hbase.replication.ReplicationException;
-086import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-087import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-088import 
org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
-089import 
org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
-090import 
org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
-091import 
org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
-092import 
org.apache.hadoop.hbase.snapshot.UnknownSnapshotException;
-093import 
org.apache.hadoop.hbase.util.Addressing;
-094import 
org.apache.hadoop.hbase.util.Bytes;
-095import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-096import 
org.apache.hadoop.hbase.util.ForeignExceptionUtil;
-097import 
org.apache.hadoop.hbase.util.Pair;
-098import 
org.apache.hadoop.ipc.RemoteException;
-099import 
org.apache.hadoop.util.StringUtils;
-100import 
org.apache.yetus.audience.InterfaceAudience;
-101import 
org.apache.yetus.audience.InterfaceStability;
-102import org.slf4j.Logger;
-103import org.slf4j.LoggerFactory;
-104
-105import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
-106import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
-107import 

[19/51] [partial] hbase-site git commit: Published site at .

2017-12-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c4b2cc17/apidocs/src-html/org/apache/hadoop/hbase/replication/ReplicationLoadSource.html
--
diff --git 
a/apidocs/src-html/org/apache/hadoop/hbase/replication/ReplicationLoadSource.html
 
b/apidocs/src-html/org/apache/hadoop/hbase/replication/ReplicationLoadSource.html
new file mode 100644
index 000..44c13a8
--- /dev/null
+++ 
b/apidocs/src-html/org/apache/hadoop/hbase/replication/ReplicationLoadSource.html
@@ -0,0 +1,127 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+Source code
+
+
+
+
+001/**
+002 * Licensed to the Apache Software 
Foundation (ASF) under one or more contributor license
+003 * agreements. See the NOTICE file 
distributed with this work for additional information regarding
+004 * copyright ownership. The ASF licenses 
this file to you under the Apache License, Version 2.0 (the
+005 * "License"); you may not use this file 
except in compliance with the License. You may obtain a
+006 * copy of the License at 
http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable
+007 * law or agreed to in writing, software 
distributed under the License is distributed on an "AS IS"
+008 * BASIS, WITHOUT WARRANTIES OR 
CONDITIONS OF ANY KIND, either express or implied. See the License
+009 * for the specific language governing 
permissions and limitations under the License.
+010 */
+011package 
org.apache.hadoop.hbase.replication;
+012
+013import 
org.apache.yetus.audience.InterfaceAudience;
+014
+015/**
+016 * A HBase ReplicationLoad to present 
MetricsSource information
+017 */
+018@InterfaceAudience.Public
+019public class ReplicationLoadSource {
+020  private final String peerID;
+021  private final long 
ageOfLastShippedOp;
+022  private final int sizeOfLogQueue;
+023  private final long 
timeStampOfLastShippedOp;
+024  private final long replicationLag;
+025
+026  // TODO: add the builder for this 
class
+027  @InterfaceAudience.Private
+028  public ReplicationLoadSource(String id, 
long age, int size, long timeStamp, long lag) {
+029this.peerID = id;
+030this.ageOfLastShippedOp = age;
+031this.sizeOfLogQueue = size;
+032this.timeStampOfLastShippedOp = 
timeStamp;
+033this.replicationLag = lag;
+034  }
+035
+036  public String getPeerID() {
+037return this.peerID;
+038  }
+039
+040  public long getAgeOfLastShippedOp() {
+041return this.ageOfLastShippedOp;
+042  }
+043
+044  public long getSizeOfLogQueue() {
+045return this.sizeOfLogQueue;
+046  }
+047
+048  public long 
getTimeStampOfLastShippedOp() {
+049return 
this.timeStampOfLastShippedOp;
+050  }
+051
+052  public long getReplicationLag() {
+053return this.replicationLag;
+054  }
+055}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+



[19/51] [partial] hbase-site git commit: Published site at .

2017-12-21 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/505bbb2e/checkstyle-aggregate.html
--
diff --git a/checkstyle-aggregate.html b/checkstyle-aggregate.html
index 2e1d4e8..d5a2465 100644
--- a/checkstyle-aggregate.html
+++ b/checkstyle-aggregate.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Checkstyle Results
 
@@ -286,10 +286,10 @@
 Warnings
 Errors
 
-3457
+3458
 0
 0
-19536
+19378
 
 Files
 
@@ -379,16 +379,6 @@
 0
 4
 
-org/apache/hadoop/hbase/CompatibilityFactory.java
-0
-0
-1
-
-org/apache/hadoop/hbase/CompatibilitySingletonFactory.java
-0
-0
-1
-
 org/apache/hadoop/hbase/CompoundConfiguration.java
 0
 0
@@ -432,7 +422,7 @@
 org/apache/hadoop/hbase/GenericTestUtils.java
 0
 0
-3
+2
 
 org/apache/hadoop/hbase/HBaseCluster.java
 0
@@ -442,7 +432,7 @@
 org/apache/hadoop/hbase/HBaseClusterManager.java
 0
 0
-3
+2
 
 org/apache/hadoop/hbase/HBaseCommonTestingUtility.java
 0
@@ -462,7 +452,7 @@
 org/apache/hadoop/hbase/HBaseTestingUtility.java
 0
 0
-266
+267
 
 org/apache/hadoop/hbase/HColumnDescriptor.java
 0
@@ -852,7 +842,7 @@
 org/apache/hadoop/hbase/TestKeyValue.java
 0
 0
-2
+3
 
 org/apache/hadoop/hbase/TestLocalHBaseCluster.java
 0
@@ -967,7 +957,7 @@
 org/apache/hadoop/hbase/ZKNamespaceManager.java
 0
 0
-5
+1
 
 org/apache/hadoop/hbase/ZNodeClearer.java
 0
@@ -984,942 +974,932 @@
 0
 1
 
-org/apache/hadoop/hbase/backup/BackupDriver.java
-0
-0
-1
-
 org/apache/hadoop/hbase/backup/BackupHFileCleaner.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/backup/BackupInfo.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/backup/BackupMergeJob.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/backup/BackupRestoreConstants.java
 0
 0
 5
-
+
 org/apache/hadoop/hbase/backup/BackupRestoreFactory.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/backup/BackupTableInfo.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/backup/FailedArchiveException.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/backup/HBackupFileSystem.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/backup/HFileArchiver.java
 0
 0
 19
-
+
 org/apache/hadoop/hbase/backup/LogUtils.java
 0
 0
-2
-
+1
+
 org/apache/hadoop/hbase/backup/RestoreDriver.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/backup/RestoreJob.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/backup/RestoreRequest.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/backup/TestBackupBase.java
 0
 0
 13
-
+
 org/apache/hadoop/hbase/backup/TestBackupBoundaryTests.java
 0
 0
 5
-
+
 org/apache/hadoop/hbase/backup/TestBackupDelete.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/backup/TestBackupDeleteRestore.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/backup/TestBackupDeleteWithFailures.java
 0
 0
 16
-
+
 org/apache/hadoop/hbase/backup/TestBackupDescribe.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/backup/TestBackupHFileCleaner.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/backup/TestBackupShowHistory.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/backup/TestBackupStatusProgress.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/backup/TestBackupSystemTable.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/backup/TestFullBackupSet.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/backup/TestFullRestore.java
 0
 0
 11
-
+
 org/apache/hadoop/hbase/backup/TestHFileArchiving.java
 0
 0
 13
-
+
 org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java
 0
 0
 5
-
+
 org/apache/hadoop/hbase/backup/TestRemoteBackup.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/backup/TestRemoteRestore.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/backup/TestRepairAfterFailedDelete.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/backup/TestSystemTableSnapshot.java
 0
 0
 1
-
+
 org/apache/hadoop/hbase/backup/example/HFileArchiveManager.java
 0
 0
 4
-
+
 org/apache/hadoop/hbase/backup/example/LongTermArchivingHFileCleaner.java
 0
 0
 5
-
+
 org/apache/hadoop/hbase/backup/example/TableHFileArchiveTracker.java
 0
 0
 7
-
+
 org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java
 0
 0
 9
-
+
 org/apache/hadoop/hbase/backup/example/ZKTableArchiveClient.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java
 0
 0
 15
-
+
 org/apache/hadoop/hbase/backup/impl/BackupCommands.java
 0
 0
 57
-
+
 org/apache/hadoop/hbase/backup/impl/BackupManager.java
 0
 0
-4
-
+3
+
 org/apache/hadoop/hbase/backup/impl/BackupManifest.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
 0
 0
-22
-
+21
+
 org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.java
 0
 0
 3
-
+
 org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
 0
 0
-5
-
+4
+
 org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java
 0
 0
 10
-
+
 org/apache/hadoop/hbase/backup/impl/RestoreTablesClient.java
 0
 0
 2
-
+
 org/apache/hadoop/hbase/backup/impl/TableBackupClient.java
 0
 0
 13
-
+
 

[19/51] [partial] hbase-site git commit: Published site at .

2017-12-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c4c0cfa5/apidocs/src-html/org/apache/hadoop/hbase/filter/ParseFilter.html
--
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/filter/ParseFilter.html 
b/apidocs/src-html/org/apache/hadoop/hbase/filter/ParseFilter.html
index 40d4a2d..50a85d6 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/filter/ParseFilter.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/filter/ParseFilter.html
@@ -30,881 +30,882 @@
 022import java.lang.reflect.Method;
 023import java.nio.ByteBuffer;
 024import 
java.nio.charset.CharacterCodingException;
-025import java.util.ArrayList;
-026import java.util.Collections;
-027import java.util.EmptyStackException;
-028import java.util.HashMap;
-029import java.util.Map;
-030import java.util.Set;
-031import java.util.Stack;
-032
-033import org.apache.commons.logging.Log;
-034import 
org.apache.commons.logging.LogFactory;
-035import 
org.apache.hadoop.hbase.CompareOperator;
-036import 
org.apache.yetus.audience.InterfaceAudience;
-037import 
org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
-038import 
org.apache.hadoop.hbase.util.Bytes;
-039
-040/**
-041 * This class allows a user to specify a 
filter via a string
-042 * The string is parsed using the methods 
of this class and
-043 * a filter object is constructed. This 
filter object is then wrapped
-044 * in a scanner object which is then 
returned
-045 * p
-046 * This class addresses the HBASE-4168 
JIRA. More documentation on this
-047 * Filter Language can be found at: 
https://issues.apache.org/jira/browse/HBASE-4176
-048 */
-049@InterfaceAudience.Public
-050public class ParseFilter {
-051  private static final Log LOG = 
LogFactory.getLog(ParseFilter.class);
-052
-053  private static HashMapByteBuffer, 
Integer operatorPrecedenceHashMap;
-054  private static HashMapString, 
String filterHashMap;
-055
-056  static {
-057// Registers all the filter supported 
by the Filter Language
-058filterHashMap = new 
HashMap();
-059filterHashMap.put("KeyOnlyFilter", 
ParseConstants.FILTER_PACKAGE + "." +
-060  "KeyOnlyFilter");
-061
filterHashMap.put("FirstKeyOnlyFilter", ParseConstants.FILTER_PACKAGE + "." +
-062  
"FirstKeyOnlyFilter");
-063filterHashMap.put("PrefixFilter", 
ParseConstants.FILTER_PACKAGE + "." +
-064  "PrefixFilter");
-065
filterHashMap.put("ColumnPrefixFilter", ParseConstants.FILTER_PACKAGE + "." +
-066  
"ColumnPrefixFilter");
-067
filterHashMap.put("MultipleColumnPrefixFilter", ParseConstants.FILTER_PACKAGE + 
"." +
-068  
"MultipleColumnPrefixFilter");
-069
filterHashMap.put("ColumnCountGetFilter", ParseConstants.FILTER_PACKAGE + "." 
+
-070  
"ColumnCountGetFilter");
-071filterHashMap.put("PageFilter", 
ParseConstants.FILTER_PACKAGE + "." +
-072  "PageFilter");
-073
filterHashMap.put("ColumnPaginationFilter", ParseConstants.FILTER_PACKAGE + "." 
+
-074  
"ColumnPaginationFilter");
-075
filterHashMap.put("InclusiveStopFilter", ParseConstants.FILTER_PACKAGE + "." 
+
-076  
"InclusiveStopFilter");
-077filterHashMap.put("TimestampsFilter", 
ParseConstants.FILTER_PACKAGE + "." +
-078  
"TimestampsFilter");
-079filterHashMap.put("RowFilter", 
ParseConstants.FILTER_PACKAGE + "." +
-080  "RowFilter");
-081filterHashMap.put("FamilyFilter", 
ParseConstants.FILTER_PACKAGE + "." +
-082  "FamilyFilter");
-083filterHashMap.put("QualifierFilter", 
ParseConstants.FILTER_PACKAGE + "." +
-084  
"QualifierFilter");
-085filterHashMap.put("ValueFilter", 
ParseConstants.FILTER_PACKAGE + "." +
-086  "ValueFilter");
-087
filterHashMap.put("ColumnRangeFilter", ParseConstants.FILTER_PACKAGE + "." +
-088  
"ColumnRangeFilter");
-089
filterHashMap.put("SingleColumnValueFilter", ParseConstants.FILTER_PACKAGE + 
"." +
-090  
"SingleColumnValueFilter");
-091
filterHashMap.put("SingleColumnValueExcludeFilter", 
ParseConstants.FILTER_PACKAGE + "." +
-092  
"SingleColumnValueExcludeFilter");
-093
filterHashMap.put("DependentColumnFilter", ParseConstants.FILTER_PACKAGE + "." 
+
-094  
"DependentColumnFilter");
-095
-096// Creates the 
operatorPrecedenceHashMap
-097operatorPrecedenceHashMap = new 
HashMap();
-098
operatorPrecedenceHashMap.put(ParseConstants.SKIP_BUFFER, 1);
-099
operatorPrecedenceHashMap.put(ParseConstants.WHILE_BUFFER, 1);
-100
operatorPrecedenceHashMap.put(ParseConstants.AND_BUFFER, 2);
-101
operatorPrecedenceHashMap.put(ParseConstants.OR_BUFFER, 3);
-102  }
-103
-104  /**
-105   * Parses the filterString and 
constructs a filter using it
-106   * 

[19/51] [partial] hbase-site git commit: Published site at .

2017-12-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/071f974b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.TableVisitorBase.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.TableVisitorBase.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.TableVisitorBase.html
index f1a2443..a469e93 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.TableVisitorBase.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.TableVisitorBase.html
@@ -1350,415 +1350,415 @@
 1342return delete;
 1343  }
 1344
-1345  public static Put 
makeBarrierPut(byte[] encodedRegionName, long seq, byte[] tableName) {
-1346byte[] seqBytes = 
Bytes.toBytes(seq);
-1347return new Put(encodedRegionName)
-1348
.addImmutable(HConstants.REPLICATION_BARRIER_FAMILY, seqBytes, seqBytes)
-1349
.addImmutable(HConstants.REPLICATION_META_FAMILY, tableNameCq, tableName);
-1350  }
-1351
-1352
-1353  public static Put 
makeDaughterPut(byte[] encodedRegionName, byte[] value) {
-1354return new 
Put(encodedRegionName).addImmutable(HConstants.REPLICATION_META_FAMILY,
-1355daughterNameCq, value);
-1356  }
-1357
-1358  public static Put makeParentPut(byte[] 
encodedRegionName, byte[] value) {
-1359return new 
Put(encodedRegionName).addImmutable(HConstants.REPLICATION_META_FAMILY,
-1360parentNameCq, value);
-1361  }
-1362
-1363  /**
-1364   * Adds split daughters to the Put
-1365   */
-1366  public static Put 
addDaughtersToPut(Put put, RegionInfo splitA, RegionInfo splitB) {
-1367if (splitA != null) {
-1368  put.addImmutable(
-1369HConstants.CATALOG_FAMILY, 
HConstants.SPLITA_QUALIFIER, RegionInfo.toByteArray(splitA));
-1370}
-1371if (splitB != null) {
-1372  put.addImmutable(
-1373HConstants.CATALOG_FAMILY, 
HConstants.SPLITB_QUALIFIER, RegionInfo.toByteArray(splitB));
-1374}
-1375return put;
-1376  }
-1377
-1378  /**
-1379   * Put the passed 
codeputs/code to the codehbase:meta/code 
table.
-1380   * Non-atomic for multi puts.
-1381   * @param connection connection we're 
using
-1382   * @param puts Put to add to 
hbase:meta
-1383   * @throws IOException
-1384   */
-1385  public static void 
putToMetaTable(final Connection connection, final Put... puts)
-1386throws IOException {
-1387put(getMetaHTable(connection), 
Arrays.asList(puts));
-1388  }
-1389
-1390  /**
-1391   * @param t Table to use (will be 
closed when done).
-1392   * @param puts puts to make
-1393   * @throws IOException
-1394   */
-1395  private static void put(final Table t, 
final ListPut puts) throws IOException {
-1396try {
-1397  if (METALOG.isDebugEnabled()) {
-1398
METALOG.debug(mutationsToString(puts));
-1399  }
-1400  t.put(puts);
-1401} finally {
-1402  t.close();
-1403}
-1404  }
-1405
-1406  /**
-1407   * Put the passed 
codeps/code to the codehbase:meta/code table.
-1408   * @param connection connection we're 
using
-1409   * @param ps Put to add to 
hbase:meta
-1410   * @throws IOException
-1411   */
-1412  public static void 
putsToMetaTable(final Connection connection, final ListPut ps)
-1413throws IOException {
-1414Table t = 
getMetaHTable(connection);
-1415try {
-1416  if (METALOG.isDebugEnabled()) {
-1417
METALOG.debug(mutationsToString(ps));
-1418  }
-1419  t.put(ps);
-1420} finally {
-1421  t.close();
-1422}
-1423  }
-1424
-1425  /**
-1426   * Delete the passed 
coded/code from the codehbase:meta/code 
table.
-1427   * @param connection connection we're 
using
-1428   * @param d Delete to add to 
hbase:meta
-1429   * @throws IOException
-1430   */
-1431  static void deleteFromMetaTable(final 
Connection connection, final Delete d)
-1432throws IOException {
-1433ListDelete dels = new 
ArrayList(1);
-1434dels.add(d);
-1435deleteFromMetaTable(connection, 
dels);
-1436  }
-1437
-1438  /**
-1439   * Delete the passed 
codedeletes/code from the codehbase:meta/code 
table.
-1440   * @param connection connection we're 
using
-1441   * @param deletes Deletes to add to 
hbase:meta  This list should support #remove.
-1442   * @throws IOException
-1443   */
-1444  public static void 
deleteFromMetaTable(final Connection connection, final ListDelete 
deletes)
-1445throws IOException {
-1446Table t = 
getMetaHTable(connection);
-1447try {
-1448  if (METALOG.isDebugEnabled()) {
-1449
METALOG.debug(mutationsToString(deletes));
-1450  }
-1451  t.delete(deletes);
-1452} finally {
-1453  t.close();
-1454}
-1455  }
-1456
-1457  /**
-1458   * Deletes some replica columns 
corresponding to replicas for the passed rows
-1459   * @param metaRows rows in 
hbase:meta
-1460   * @param replicaIndexToDeleteFrom the 
replica ID we would start deleting from
-1461   * @param 

[19/51] [partial] hbase-site git commit: Published site at .

2017-12-14 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dc4e5c85/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.WaitForStateCallable.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.WaitForStateCallable.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.WaitForStateCallable.html
index 7c59e27..c904c56 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.WaitForStateCallable.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.WaitForStateCallable.html
@@ -119,4048 +119,4054 @@
 111import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
 112import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest;
 113import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest;
-114import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-115import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-116import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
-117import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
-118import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest;
-119import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse;
-120import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest;
-121import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
-122import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
-123import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceRequest;
-124import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceResponse;
-125import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
-126import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-127import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-128import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-129import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos;
-130import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-131import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-132import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-133import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-134import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-135import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersRequest;
-136import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-137import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
-138import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest;
-139import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse;
-140import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
-141import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
-142import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
-143import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse;
-144import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
-145import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableRequest;
-146import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableResponse;
-147import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest;
-148import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse;
-149import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
-150import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse;
-151import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest;
-152import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse;
-153import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest;
-154import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
-155import 

[19/51] [partial] hbase-site git commit: Published site at .

2017-12-13 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4abd958d/devapidocs/org/apache/hadoop/hbase/client/class-use/Delete.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/class-use/Delete.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/Delete.html
index b0d2fef..84e7c56 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/Delete.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/Delete.html
@@ -408,8 +408,9 @@ service.
   byte[]qualifier,
   byte[]value,
   Deletedelete)
-Atomically checks if a row/family/qualifier value matches 
the expected
- value.
+Deprecated.
+Since 2.0.0. Will be 
removed in 3.0.0. Use Table.checkAndMutate(byte[],
 byte[])
+
 
 
 
@@ -419,8 +420,7 @@ service.
   byte[]qualifier,
   byte[]value,
   Deletedelete)
-Atomically checks if a row/family/qualifier value matches 
the expected
- value.
+Deprecated.
 
 
 
@@ -432,8 +432,7 @@ service.
   byte[]value,
   Deletedelete)
 Deprecated.
-Since 2.0.0. Will be 
removed in 3.0.0. Use
-  Table.checkAndDelete(byte[],
 byte[], byte[], byte[], Delete)
+Since 2.0.0. Will be 
removed in 3.0.0. Use Table.checkAndMutate(byte[],
 byte[])
 
 
 
@@ -456,8 +455,9 @@ service.
   CompareOperatorop,
   byte[]value,
   Deletedelete)
-Atomically checks if a row/family/qualifier value matches 
the expected
- value.
+Deprecated.
+Since 2.0.0. Will be 
removed in 3.0.0. Use Table.checkAndMutate(byte[],
 byte[])
+
 
 
 
@@ -468,8 +468,7 @@ service.
   CompareOperatorop,
   byte[]value,
   Deletedelete)
-Atomically checks if a row/family/qualifier value matches 
the expected
- value.
+Deprecated.
 
 
 
@@ -494,9 +493,7 @@ service.
 
 
 void
-HTable.delete(Deletedelete)
-Deletes the specified cells/row.
-
+HTable.delete(Deletedelete)
 
 
 private boolean
@@ -512,9 +509,17 @@ service.
 AsyncTable.CheckAndMutateBuilder.thenDelete(Deletedelete)
 
 
+boolean
+Table.CheckAndMutateBuilder.thenDelete(Deletedelete)
+
+
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true;
 title="class or interface in java.lang">Boolean
 RawAsyncTableImpl.CheckAndMutateBuilderImpl.thenDelete(Deletedelete)
 
+
+boolean
+HTable.CheckAndMutateBuilderImpl.thenDelete(Deletedelete)
+
 
 
 
@@ -546,9 +551,7 @@ service.
 
 
 void
-HTable.delete(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListDeletedeletes)
-Batch Deletes the specified cells/rows from the table.
-
+HTable.delete(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListDeletedeletes)
 
 
 default http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true;
 title="class or interface in java.lang">Void
@@ -835,12 +838,26 @@ service.
   byte[]qualifier,
   CompareOperatorcompareOp,
   byte[]value,
-  Deletedelete)
+  Deletedelete)
+Deprecated.
+
 
 
 void
 RemoteHTable.delete(Deletedelete)
 
+
+private boolean
+RemoteHTable.doCheckAndDelete(byte[]row,
+byte[]family,
+byte[]qualifier,
+byte[]value,
+Deletedelete)
+
+
+boolean
+RemoteHTable.CheckAndMutateBuilderImpl.thenDelete(Deletedelete)
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4abd958d/devapidocs/org/apache/hadoop/hbase/client/class-use/Durability.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/Durability.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/Durability.html
index 1490972..4e12c68 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/Durability.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/Durability.html
@@ -273,9 +273,7 @@ the order they are declared.
 byte[]family,
 byte[]qualifier,
 longamount,
-Durabilitydurability)
-Atomically increments a column value.
-
+Durabilitydurability)
 
 
 TableDescriptorBuilder

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4abd958d/devapidocs/org/apache/hadoop/hbase/client/class-use/Get.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/class-use/Get.html 

[19/51] [partial] hbase-site git commit: Published site at .

2017-12-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e23b49ba/devapidocs/org/apache/hadoop/hbase/PrivateCellUtil.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/PrivateCellUtil.html 
b/devapidocs/org/apache/hadoop/hbase/PrivateCellUtil.html
index 9392488..83099f8 100644
--- a/devapidocs/org/apache/hadoop/hbase/PrivateCellUtil.html
+++ b/devapidocs/org/apache/hadoop/hbase/PrivateCellUtil.html
@@ -110,7 +110,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public final class PrivateCellUtil
+public final class PrivateCellUtil
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 Utility methods helpful slinging Cell instances. It has more powerful 
and
  rich set of APIs than those in CellUtil for internal usage.
@@ -710,9 +710,8 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 static void
-setTimestamp(Cellcell,
-byte[]ts,
-inttsOffset)
+setTimestamp(Cellcell,
+byte[]ts)
 Sets the given timestamp to the cell.
 
 
@@ -737,9 +736,8 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 static boolean
-updateLatestStamp(Cellcell,
- byte[]ts,
- inttsOffset)
+updateLatestStamp(Cellcell,
+ byte[]ts)
 Sets the given timestamp to the cell iff current timestamp 
is
  HConstants.LATEST_TIMESTAMP.
 
@@ -875,7 +873,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 PrivateCellUtil
-privatePrivateCellUtil()
+privatePrivateCellUtil()
 Private constructor to keep this class from being 
instantiated.
 
 
@@ -893,7 +891,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 fillRowRange
-public staticByteRangefillRowRange(Cellcell,
+public staticByteRangefillRowRange(Cellcell,
  ByteRangerange)
 ByteRange
 
@@ -904,7 +902,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 fillFamilyRange
-public staticByteRangefillFamilyRange(Cellcell,
+public staticByteRangefillFamilyRange(Cellcell,
 ByteRangerange)
 
 
@@ -914,7 +912,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 fillQualifierRange
-public staticByteRangefillQualifierRange(Cellcell,
+public staticByteRangefillQualifierRange(Cellcell,
ByteRangerange)
 
 
@@ -924,7 +922,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 fillValueRange
-public staticByteRangefillValueRange(Cellcell,
+public staticByteRangefillValueRange(Cellcell,
ByteRangerange)
 
 
@@ -934,7 +932,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 fillTagRange
-public staticByteRangefillTagRange(Cellcell,
+public staticByteRangefillTagRange(Cellcell,
  ByteRangerange)
 
 
@@ -944,7 +942,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getRowByte
-public staticbytegetRowByte(Cellcell,
+public staticbytegetRowByte(Cellcell,
   intindex)
 misc
 
@@ -955,7 +953,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getQualifierByte
-public staticbytegetQualifierByte(Cellcell,
+public staticbytegetQualifierByte(Cellcell,
 intindex)
 
 
@@ -965,7 +963,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getValueBufferShallowCopy
-public statichttp://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBuffergetValueBufferShallowCopy(Cellcell)
+public statichttp://docs.oracle.com/javase/8/docs/api/java/nio/ByteBuffer.html?is-external=true;
 title="class or interface in java.nio">ByteBuffergetValueBufferShallowCopy(Cellcell)
 
 
 
@@ -974,7 +972,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 createCell
-public staticCellcreateCell(Cellcell,
+public staticCellcreateCell(Cellcell,
   http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListTagtags)
 
 Returns:
@@ -988,7 +986,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 createCell
-public staticCellcreateCell(Cellcell,
+public staticCellcreateCell(Cellcell,
   byte[]tags)
 
 Returns:
@@ -1002,7 +1000,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 createCell
-public staticCellcreateCell(Cellcell,
+public staticCellcreateCell(Cellcell,
   byte[]value,
   byte[]tags)
 
@@ 

[19/51] [partial] hbase-site git commit: Published site at .

2017-12-06 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d171b896/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.html
index 62bc799..5c004ce 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.html
@@ -250,7 +250,7 @@
 242Cell kv = cell;
 243// null input == user explicitly 
wants to flush
 244if (row == null  kv == 
null) {
-245  rollWriters();
+245  rollWriters(null);
 246  return;
 247}
 248
@@ -284,636 +284,642 @@
 276  configureStoragePolicy(conf, 
fs, tableAndFamily, writerPath);
 277}
 278
-279// If any of the HFiles for the 
column families has reached
-280// maxsize, we need to roll all 
the writers
-281if (wl != null  
wl.written + length = maxsize) {
-282  this.rollRequested = true;
-283}
-284
-285// This can only happen once a 
row is finished though
-286if (rollRequested  
Bytes.compareTo(this.previousRow, rowKey) != 0) {
-287  rollWriters();
-288}
-289
-290// create a new WAL writer, if 
necessary
-291if (wl == null || wl.writer == 
null) {
-292  if 
(conf.getBoolean(LOCALITY_SENSITIVE_CONF_KEY, DEFAULT_LOCALITY_SENSITIVE)) {
-293HRegionLocation loc = null;
-294
-295String tableName = 
Bytes.toString(tableNameBytes);
-296if (tableName != null) {
-297  try (Connection connection 
= ConnectionFactory.createConnection(conf);
-298 RegionLocator 
locator =
-299   
connection.getRegionLocator(TableName.valueOf(tableName))) {
-300loc = 
locator.getRegionLocation(rowKey);
-301  } catch (Throwable e) {
-302LOG.warn("There's 
something wrong when locating rowkey: " +
-303  Bytes.toString(rowKey) 
+ " for tablename: " + tableName, e);
-304loc = null;
-305  } }
-306
-307if (null == loc) {
-308  if (LOG.isTraceEnabled()) 
{
-309LOG.trace("failed to get 
region location, so use default writer for rowkey: " +
-310  
Bytes.toString(rowKey));
-311  }
-312  wl = 
getNewWriter(tableNameBytes, family, conf, null);
-313} else {
-314  if (LOG.isDebugEnabled()) 
{
-315LOG.debug("first rowkey: 
[" + Bytes.toString(rowKey) + "]");
-316  }
-317  InetSocketAddress 
initialIsa =
-318  new 
InetSocketAddress(loc.getHostname(), loc.getPort());
-319  if 
(initialIsa.isUnresolved()) {
-320if (LOG.isTraceEnabled()) 
{
-321  LOG.trace("failed to 
resolve bind address: " + loc.getHostname() + ":"
-322  + loc.getPort() + 
", so use default writer");
-323}
-324wl = 
getNewWriter(tableNameBytes, family, conf, null);
-325  } else {
-326if (LOG.isDebugEnabled()) 
{
-327  LOG.debug("use favored 
nodes writer: " + initialIsa.getHostString());
-328}
-329wl = 
getNewWriter(tableNameBytes, family, conf, new InetSocketAddress[] { 
initialIsa
-330});
-331  }
-332}
-333  } else {
-334wl = 
getNewWriter(tableNameBytes, family, conf, null);
-335  }
-336}
-337
-338// we now have the proper WAL 
writer. full steam ahead
-339// TODO : Currently in 
SettableTimeStamp but this will also move to ExtendedCell
-340
PrivateCellUtil.updateLatestStamp(cell, this.now);
-341wl.writer.append(kv);
-342wl.written += length;
-343
-344// Copy the row so we know when a 
row transition.
-345this.previousRow = rowKey;
-346  }
-347
-348  private void rollWriters() throws 
IOException {
-349for (WriterLength wl : 
this.writers.values()) {
-350  if (wl.writer != null) {
-351LOG.info(
-352"Writer=" + 
wl.writer.getPath() + ((wl.written == 0)? "": ", wrote=" + wl.written));
-353close(wl.writer);
-354  }
-355  wl.writer = null;
-356  wl.written = 0;
-357}
-358this.rollRequested = false;
-359  }
-360
-361  /*
-362   * Create a new StoreFile.Writer.
-363   * @param family
-364   * @return A WriterLength, 
containing a new StoreFile.Writer.
-365   * @throws IOException
-366   */
-367  

[19/51] [partial] hbase-site git commit: Published site at .

2017-12-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c54c242b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
index 3edfbef..9707b2c 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
@@ -2459,5936 +2459,5935 @@
 2451  }
 2452
 2453  for (HStore s : storesToFlush) {
-2454MemStoreSize flushableSize = 
s.getFlushableSize();
-2455
totalSizeOfFlushableStores.incMemStoreSize(flushableSize);
-2456
storeFlushCtxs.put(s.getColumnFamilyDescriptor().getName(),
-2457  
s.createFlushContext(flushOpSeqId, tracker));
-2458// for writing stores to WAL
-2459
committedFiles.put(s.getColumnFamilyDescriptor().getName(), null);
-2460
storeFlushableSize.put(s.getColumnFamilyDescriptor().getName(), 
flushableSize);
-2461  }
-2462
-2463  // write the snapshot start to 
WAL
-2464  if (wal != null  
!writestate.readOnly) {
-2465FlushDescriptor desc = 
ProtobufUtil.toFlushDescriptor(FlushAction.START_FLUSH,
-2466getRegionInfo(), 
flushOpSeqId, committedFiles);
-2467// No sync. Sync is below where 
no updates lock and we do FlushAction.COMMIT_FLUSH
-2468WALUtil.writeFlushMarker(wal, 
this.getReplicationScope(), getRegionInfo(), desc, false,
-2469mvcc);
-2470  }
-2471
-2472  // Prepare flush (take a 
snapshot)
-2473  for (StoreFlushContext flush : 
storeFlushCtxs.values()) {
-2474flush.prepare();
-2475  }
-2476} catch (IOException ex) {
-2477  doAbortFlushToWAL(wal, 
flushOpSeqId, committedFiles);
-2478  throw ex;
-2479} finally {
-2480  
this.updatesLock.writeLock().unlock();
-2481}
-2482String s = "Finished memstore 
snapshotting " + this + ", syncing WAL and waiting on mvcc, " +
-2483"flushsize=" + 
totalSizeOfFlushableStores;
-2484status.setStatus(s);
-2485doSyncOfUnflushedWALChanges(wal, 
getRegionInfo());
-2486return new 
PrepareFlushResult(storeFlushCtxs, committedFiles, storeFlushableSize, 
startTime,
-2487flushOpSeqId, flushedSeqId, 
totalSizeOfFlushableStores);
-2488  }
-2489
-2490  /**
-2491   * Utility method broken out of 
internalPrepareFlushCache so that method is smaller.
-2492   */
-2493  private void 
logFatLineOnFlush(CollectionHStore storesToFlush, long sequenceId) {
-2494if (!LOG.isInfoEnabled()) {
-2495  return;
-2496}
-2497// Log a fat line detailing what is 
being flushed.
-2498StringBuilder perCfExtras = null;
-2499if (!isAllFamilies(storesToFlush)) 
{
-2500  perCfExtras = new 
StringBuilder();
-2501  for (HStore store: storesToFlush) 
{
-2502perCfExtras.append("; 
").append(store.getColumnFamilyName());
-2503perCfExtras.append("=")
-2504
.append(StringUtils.byteDesc(store.getFlushableSize().getDataSize()));
-2505  }
-2506}
-2507LOG.info("Flushing " + + 
storesToFlush.size() + "/" + stores.size() +
-2508" column families, memstore=" + 
StringUtils.byteDesc(this.memstoreDataSize.get()) +
-2509((perCfExtras != null  
perCfExtras.length()  0)? perCfExtras.toString(): "") +
-2510((wal != null) ? "" : "; WAL is 
null, using passed sequenceid=" + sequenceId));
-2511  }
-2512
-2513  private void doAbortFlushToWAL(final 
WAL wal, final long flushOpSeqId,
-2514  final Mapbyte[], 
ListPath committedFiles) {
-2515if (wal == null) return;
-2516try {
-2517  FlushDescriptor desc = 
ProtobufUtil.toFlushDescriptor(FlushAction.ABORT_FLUSH,
-2518  getRegionInfo(), flushOpSeqId, 
committedFiles);
-2519  WALUtil.writeFlushMarker(wal, 
this.getReplicationScope(), getRegionInfo(), desc, false,
-2520  mvcc);
-2521} catch (Throwable t) {
-2522  LOG.warn("Received unexpected 
exception trying to write ABORT_FLUSH marker to WAL:" +
-2523  
StringUtils.stringifyException(t));
-2524  // ignore this since we will be 
aborting the RS with DSE.
-2525}
-2526// we have called 
wal.startCacheFlush(), now we have to abort it
-2527
wal.abortCacheFlush(this.getRegionInfo().getEncodedNameAsBytes());
-2528  }
-2529
-2530  /**
-2531   * Sync unflushed WAL changes. See 
HBASE-8208 for details
-2532   */
-2533  private static void 
doSyncOfUnflushedWALChanges(final WAL wal, final RegionInfo hri)
-2534  throws IOException {
-2535if (wal == null) {
-2536  return;
-2537}
-2538try {
-2539  wal.sync(); // ensure that flush 
marker is sync'ed
-2540} catch (IOException ioe) {
-2541  
wal.abortCacheFlush(hri.getEncodedNameAsBytes());
-2542  throw 

[19/51] [partial] hbase-site git commit: Published site at .

2017-11-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutputHelper.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutputHelper.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutputHelper.html
index c6e457f..9b7087a 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutputHelper.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutputHelper.html
@@ -25,166 +25,161 @@
 017 */
 018package 
org.apache.hadoop.hbase.io.asyncfs;
 019
-020import 
org.apache.hadoop.hbase.shaded.com.google.common.base.Throwables;
-021import 
org.apache.hadoop.hbase.shaded.com.google.common.util.concurrent.ThreadFactoryBuilder;
-022
-023import 
org.apache.hadoop.hbase.shaded.io.netty.channel.Channel;
-024import 
org.apache.hadoop.hbase.shaded.io.netty.channel.EventLoop;
-025
-026import java.io.IOException;
-027import java.io.InterruptedIOException;
-028import java.nio.ByteBuffer;
-029import 
java.util.concurrent.CompletableFuture;
-030import 
java.util.concurrent.ExecutionException;
-031import 
java.util.concurrent.ExecutorService;
-032import java.util.concurrent.Executors;
-033
-034import 
org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-035import 
org.apache.hadoop.fs.FSDataOutputStream;
-036import org.apache.hadoop.fs.FileSystem;
-037import org.apache.hadoop.fs.Path;
-038import 
org.apache.yetus.audience.InterfaceAudience;
-039import 
org.apache.hadoop.hbase.io.ByteArrayOutputStream;
-040import 
org.apache.hadoop.hbase.util.CancelableProgressable;
-041import 
org.apache.hadoop.hbase.util.CommonFSUtils;
-042import 
org.apache.hadoop.hdfs.DistributedFileSystem;
-043import 
org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-044
-045/**
-046 * Helper class for creating 
AsyncFSOutput.
-047 */
-048@InterfaceAudience.Private
-049public final class AsyncFSOutputHelper 
{
-050
-051  private AsyncFSOutputHelper() {
-052  }
-053
-054  /**
-055   * Create {@link 
FanOutOneBlockAsyncDFSOutput} for {@link DistributedFileSystem}, and a simple
-056   * implementation for other {@link 
FileSystem} which wraps around a {@link FSDataOutputStream}.
-057   */
-058  public static AsyncFSOutput 
createOutput(FileSystem fs, Path f, boolean overwrite,
-059  boolean createParent, short 
replication, long blockSize, EventLoop eventLoop,
-060  Class? extends Channel 
channelClass)
-061  throws IOException, 
CommonFSUtils.StreamLacksCapabilityException {
-062if (fs instanceof 
DistributedFileSystem) {
-063  return 
FanOutOneBlockAsyncDFSOutputHelper.createOutput((DistributedFileSystem) fs, 
f,
-064overwrite, createParent, 
replication, blockSize, eventLoop, channelClass);
-065}
-066final FSDataOutputStream fsOut;
-067int bufferSize = 
fs.getConf().getInt(CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY,
-068  
CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT);
-069if (createParent) {
-070  fsOut = fs.create(f, overwrite, 
bufferSize, replication, blockSize, null);
-071} else {
-072  fsOut = fs.createNonRecursive(f, 
overwrite, bufferSize, replication, blockSize, null);
-073}
-074// After we create the stream but 
before we attempt to use it at all
-075// ensure that we can provide the 
level of data safety we're configured
-076// to provide.
-077if 
(!(CommonFSUtils.hasCapability(fsOut, "hflush") 
-078
CommonFSUtils.hasCapability(fsOut, "hsync"))) {
-079  throw new 
CommonFSUtils.StreamLacksCapabilityException("hflush and hsync");
-080}
-081final ExecutorService flushExecutor 
=
-082
Executors.newSingleThreadExecutor(new ThreadFactoryBuilder().setDaemon(true)
-083
.setNameFormat("AsyncFSOutputFlusher-" + f.toString().replace("%", 
"%%")).build());
-084return new AsyncFSOutput() {
-085
-086  private final ByteArrayOutputStream 
out = new ByteArrayOutputStream();
-087
-088  @Override
-089  public void write(final byte[] b, 
final int off, final int len) {
-090if (eventLoop.inEventLoop()) {
-091  out.write(b, off, len);
-092} else {
-093  eventLoop.submit(() - 
out.write(b, off, len)).syncUninterruptibly();
-094}
+020import java.io.IOException;
+021import java.io.InterruptedIOException;
+022import java.nio.ByteBuffer;
+023import 
java.util.concurrent.CompletableFuture;
+024import 
java.util.concurrent.ExecutionException;
+025import 
java.util.concurrent.ExecutorService;
+026import java.util.concurrent.Executors;
+027
+028import 
org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+029import 
org.apache.hadoop.fs.FSDataOutputStream;
+030import org.apache.hadoop.fs.FileSystem;
+031import org.apache.hadoop.fs.Path;
+032import 
org.apache.hadoop.hbase.io.ByteArrayOutputStream;
+033import 

[19/51] [partial] hbase-site git commit: Published site at .

2017-11-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.ServerRequest.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.ServerRequest.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.ServerRequest.html
index 8ba8dc9..f973938 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.ServerRequest.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.ServerRequest.html
@@ -37,36 +37,36 @@
 029import java.io.IOException;
 030import java.util.ArrayList;
 031import java.util.Collections;
-032import java.util.IdentityHashMap;
-033import java.util.List;
-034import java.util.Map;
-035import java.util.Optional;
-036import 
java.util.concurrent.CompletableFuture;
-037import 
java.util.concurrent.ConcurrentHashMap;
-038import 
java.util.concurrent.ConcurrentLinkedQueue;
-039import 
java.util.concurrent.ConcurrentMap;
-040import 
java.util.concurrent.ConcurrentSkipListMap;
-041import java.util.concurrent.TimeUnit;
-042import java.util.function.Supplier;
-043import java.util.stream.Collectors;
-044import java.util.stream.Stream;
-045
-046import org.apache.commons.logging.Log;
-047import 
org.apache.commons.logging.LogFactory;
-048import 
org.apache.hadoop.hbase.CellScannable;
-049import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-050import 
org.apache.hadoop.hbase.HRegionLocation;
-051import 
org.apache.hadoop.hbase.ServerName;
-052import 
org.apache.hadoop.hbase.TableName;
-053import 
org.apache.yetus.audience.InterfaceAudience;
-054import 
org.apache.hadoop.hbase.client.MultiResponse.RegionResult;
-055import 
org.apache.hadoop.hbase.client.RetriesExhaustedException.ThrowableWithExtraContext;
-056import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-057import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-058import 
org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
-059import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
-060import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService;
-061import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
+032import java.util.HashMap;
+033import java.util.IdentityHashMap;
+034import java.util.List;
+035import java.util.Map;
+036import java.util.Optional;
+037import 
java.util.concurrent.CompletableFuture;
+038import 
java.util.concurrent.ConcurrentHashMap;
+039import 
java.util.concurrent.ConcurrentLinkedQueue;
+040import 
java.util.concurrent.ConcurrentMap;
+041import 
java.util.concurrent.ConcurrentSkipListMap;
+042import java.util.concurrent.TimeUnit;
+043import java.util.function.Supplier;
+044import java.util.stream.Collectors;
+045import java.util.stream.Stream;
+046
+047import org.apache.commons.logging.Log;
+048import 
org.apache.commons.logging.LogFactory;
+049import 
org.apache.hadoop.hbase.CellScannable;
+050import 
org.apache.hadoop.hbase.DoNotRetryIOException;
+051import 
org.apache.hadoop.hbase.HRegionLocation;
+052import 
org.apache.hadoop.hbase.ServerName;
+053import 
org.apache.hadoop.hbase.TableName;
+054import 
org.apache.yetus.audience.InterfaceAudience;
+055import 
org.apache.hadoop.hbase.client.MultiResponse.RegionResult;
+056import 
org.apache.hadoop.hbase.client.RetriesExhaustedException.ThrowableWithExtraContext;
+057import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
+058import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
+059import 
org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
+060import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
+061import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService;
 062import 
org.apache.hadoop.hbase.util.Bytes;
 063import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 064
@@ -240,212 +240,208 @@
 232  }
 233
 234  private ClientProtos.MultiRequest 
buildReq(Mapbyte[], RegionRequest actionsByRegion,
-235  ListCellScannable cells) 
throws IOException {
+235  ListCellScannable cells, 
MapInteger, Integer rowMutationsIndexMap) throws IOException {
 236ClientProtos.MultiRequest.Builder 
multiRequestBuilder = ClientProtos.MultiRequest.newBuilder();
 237ClientProtos.RegionAction.Builder 
regionActionBuilder = ClientProtos.RegionAction.newBuilder();
 238ClientProtos.Action.Builder 
actionBuilder = ClientProtos.Action.newBuilder();
 239ClientProtos.MutationProto.Builder 
mutationBuilder = ClientProtos.MutationProto.newBuilder();
 240for (Map.Entrybyte[], 
RegionRequest entry : actionsByRegion.entrySet()) {
-241  // TODO: remove the extra for loop 
as we will iterate it in mutationBuilder.
-242  if 
(!multiRequestBuilder.hasNonceGroup()) {
-243for (Action action : 

[19/51] [partial] hbase-site git commit: Published site at .

2017-11-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.WALStoreLeaseRecovery.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.WALStoreLeaseRecovery.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.WALStoreLeaseRecovery.html
index 0b9d890..d852fd0 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.WALStoreLeaseRecovery.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.WALStoreLeaseRecovery.html
@@ -155,25 +155,25 @@
 147  }
 148
 149  public boolean 
waitInitialized(Procedure proc) {
-150return 
procSched.waitEvent(master.getInitializedEvent(), proc);
+150return 
master.getInitializedEvent().suspendIfNotReady(proc);
 151  }
 152
 153  public boolean 
waitServerCrashProcessingEnabled(Procedure proc) {
 154if (master instanceof HMaster) {
-155  return 
procSched.waitEvent(((HMaster)master).getServerCrashProcessingEnabledEvent(), 
proc);
+155  return 
((HMaster)master).getServerCrashProcessingEnabledEvent().suspendIfNotReady(proc);
 156}
 157return false;
 158  }
 159
 160  public boolean 
waitFailoverCleanup(Procedure proc) {
-161return 
procSched.waitEvent(master.getAssignmentManager().getFailoverCleanupEvent(), 
proc);
+161return 
master.getAssignmentManager().getFailoverCleanupEvent().suspendIfNotReady(proc);
 162  }
 163
 164  public void 
setEventReady(ProcedureEvent event, boolean isReady) {
 165if (isReady) {
-166  procSched.wakeEvent(event);
+166  event.wake(procSched);
 167} else {
-168  procSched.suspendEvent(event);
+168  event.suspend();
 169}
 170  }
 171

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.html
index 0b9d890..d852fd0 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.html
@@ -155,25 +155,25 @@
 147  }
 148
 149  public boolean 
waitInitialized(Procedure proc) {
-150return 
procSched.waitEvent(master.getInitializedEvent(), proc);
+150return 
master.getInitializedEvent().suspendIfNotReady(proc);
 151  }
 152
 153  public boolean 
waitServerCrashProcessingEnabled(Procedure proc) {
 154if (master instanceof HMaster) {
-155  return 
procSched.waitEvent(((HMaster)master).getServerCrashProcessingEnabledEvent(), 
proc);
+155  return 
((HMaster)master).getServerCrashProcessingEnabledEvent().suspendIfNotReady(proc);
 156}
 157return false;
 158  }
 159
 160  public boolean 
waitFailoverCleanup(Procedure proc) {
-161return 
procSched.waitEvent(master.getAssignmentManager().getFailoverCleanupEvent(), 
proc);
+161return 
master.getAssignmentManager().getFailoverCleanupEvent().suspendIfNotReady(proc);
 162  }
 163
 164  public void 
setEventReady(ProcedureEvent event, boolean isReady) {
 165if (isReady) {
-166  procSched.wakeEvent(event);
+166  event.wake(procSched);
 167} else {
-168  procSched.suspendEvent(event);
+168  event.suspend();
 169}
 170  }
 171

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b9722a17/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.html
index e36d2ac..6175ecc 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.html
@@ -26,298 +26,281 @@
 018
 019package 
org.apache.hadoop.hbase.procedure2;
 020
-021import 
java.util.concurrent.locks.Condition;
-022import 
java.util.concurrent.locks.ReentrantLock;
-023import java.util.concurrent.TimeUnit;
-024
-025import org.apache.commons.logging.Log;
-026import 
org.apache.commons.logging.LogFactory;
-027import 
org.apache.yetus.audience.InterfaceAudience;
-028
-029@InterfaceAudience.Private
-030public abstract class 
AbstractProcedureScheduler implements ProcedureScheduler {
-031  private static final Log LOG = 
LogFactory.getLog(AbstractProcedureScheduler.class);
-032  private final ReentrantLock 
schedulerLock = new ReentrantLock();
-033  private final 

[19/51] [partial] hbase-site git commit: Published site at .

2017-11-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b1eb7453/devapidocs/org/apache/hadoop/hbase/security/access/AccessControlLists.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/security/access/AccessControlLists.html 
b/devapidocs/org/apache/hadoop/hbase/security/access/AccessControlLists.html
index 2910ed9..db9a1c3 100644
--- a/devapidocs/org/apache/hadoop/hbase/security/access/AccessControlLists.html
+++ b/devapidocs/org/apache/hadoop/hbase/security/access/AccessControlLists.html
@@ -110,7 +110,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class AccessControlLists
+public class AccessControlLists
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 Maintains lists of permission grants to users and groups to 
allow for
  authorization checks by AccessController.
@@ -464,7 +464,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 ACL_TABLE_NAME
-public static finalTableName ACL_TABLE_NAME
+public static finalTableName ACL_TABLE_NAME
 Internal storage table for access control lists
 
 
@@ -474,7 +474,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 ACL_GLOBAL_NAME
-public static finalbyte[] ACL_GLOBAL_NAME
+public static finalbyte[] ACL_GLOBAL_NAME
 
 
 
@@ -483,7 +483,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 ACL_LIST_FAMILY_STR
-public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String ACL_LIST_FAMILY_STR
+public static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String ACL_LIST_FAMILY_STR
 Column family used to store ACL grants
 
 See Also:
@@ -497,7 +497,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 ACL_LIST_FAMILY
-public static finalbyte[] ACL_LIST_FAMILY
+public static finalbyte[] ACL_LIST_FAMILY
 
 
 
@@ -506,7 +506,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 ACL_TAG_TYPE
-public static finalbyte ACL_TAG_TYPE
+public static finalbyte ACL_TAG_TYPE
 KV tag to store per cell access control lists
 
 See Also:
@@ -520,7 +520,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 NAMESPACE_PREFIX
-public static finalchar NAMESPACE_PREFIX
+public static finalchar NAMESPACE_PREFIX
 
 See Also:
 Constant
 Field Values
@@ -533,7 +533,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 ACL_KEY_DELIMITER
-public static finalchar ACL_KEY_DELIMITER
+public static finalchar ACL_KEY_DELIMITER
 Delimiter to separate user, column family, and qualifier in
  _acl_ table info: column keys
 
@@ -548,7 +548,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 LOG
-private static finalorg.apache.commons.logging.Log LOG
+private static finalorg.apache.commons.logging.Log LOG
 
 
 
@@ -557,7 +557,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 LIST_CODE
-private static finalint LIST_CODE
+private static finalint LIST_CODE
 
 See Also:
 Constant
 Field Values
@@ -570,7 +570,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 WRITABLE_CODE
-private static finalint WRITABLE_CODE
+private static finalint WRITABLE_CODE
 
 See Also:
 Constant
 Field Values
@@ -583,7 +583,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 WRITABLE_NOT_ENCODED
-private static finalint WRITABLE_NOT_ENCODED
+private static finalint WRITABLE_NOT_ENCODED
 
 See Also:
 Constant
 Field Values
@@ -604,7 +604,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 AccessControlLists
-publicAccessControlLists()
+publicAccessControlLists()
 
 
 
@@ -621,7 +621,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 addUserPermission
-staticvoidaddUserPermission(org.apache.hadoop.conf.Configurationconf,
+staticvoidaddUserPermission(org.apache.hadoop.conf.Configurationconf,
   UserPermissionuserPerm,
   Tablet,
   booleanmergeExistingPermissions)
@@ -643,7 +643,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 addUserPermission
-staticvoidaddUserPermission(org.apache.hadoop.conf.Configurationconf,
+staticvoidaddUserPermission(org.apache.hadoop.conf.Configurationconf,
   UserPermissionuserPerm,
   Tablet)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
@@ -659,7 +659,7 @@ extends 

  1   2   3   >