[13/51] [partial] hbase-site git commit: Published site at 620d70d6186fb800299bcc62ad7179fccfd1be41.

2019-01-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aa3fb87f/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.html
index afde950..ffe2be9 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.html
@@ -180,287 +180,290 @@
 172
services.getAssignmentManager().getRegionsOnServer(serverName);
 173  // Where to go next? Depends on 
whether we should split logs at all or
 174  // if we should do distributed 
log splitting.
-175  if (!this.shouldSplitWal) {
-176
setNextState(ServerCrashState.SERVER_CRASH_ASSIGN);
-177  } else {
-178
setNextState(ServerCrashState.SERVER_CRASH_SPLIT_LOGS);
-179  }
-180  break;
-181case SERVER_CRASH_SPLIT_LOGS:
-182  if 
(env.getMasterConfiguration().getBoolean(HBASE_SPLIT_WAL_COORDINATED_BY_ZK,
-183
DEFAULT_HBASE_SPLIT_COORDINATED_BY_ZK)) {
-184splitLogs(env);
-185
setNextState(ServerCrashState.SERVER_CRASH_ASSIGN);
-186  } else {
-187
am.getRegionStates().logSplitting(this.serverName);
-188
addChildProcedure(createSplittingWalProcedures(env, false));
-189
setNextState(ServerCrashState.SERVER_CRASH_DELETE_SPLIT_WALS_DIR);
-190  }
-191  break;
-192case 
SERVER_CRASH_DELETE_SPLIT_WALS_DIR:
-193  if (isSplittingDone(env, 
false)) {
-194cleanupSplitDir(env);
-195
setNextState(ServerCrashState.SERVER_CRASH_ASSIGN);
-196
am.getRegionStates().logSplit(this.serverName);
-197  } else {
-198
setNextState(ServerCrashState.SERVER_CRASH_SPLIT_LOGS);
-199  }
-200  break;
-201case SERVER_CRASH_ASSIGN:
-202  // If no regions to assign, 
skip assign and skip to the finish.
-203  // Filter out meta regions. 
Those are handled elsewhere in this procedure.
-204  // Filter changes 
this.regionsOnCrashedServer.
-205  if (filterDefaultMetaRegions()) 
{
-206if (LOG.isTraceEnabled()) {
-207  LOG
-208.trace("Assigning regions 
" + RegionInfo.getShortNameToLog(regionsOnCrashedServer) +
-209  ", " + this + "; 
cycles=" + getCycles());
-210}
-211assignRegions(env, 
regionsOnCrashedServer);
-212  }
-213  
setNextState(ServerCrashState.SERVER_CRASH_FINISH);
-214  break;
-215case SERVER_CRASH_HANDLE_RIT2:
-216  // Noop. Left in place because 
we used to call handleRIT here for a second time
-217  // but no longer necessary 
since HBASE-20634.
-218  
setNextState(ServerCrashState.SERVER_CRASH_FINISH);
-219  break;
-220case SERVER_CRASH_FINISH:
-221  LOG.info("removed crashed 
server {} after splitting done", serverName);
-222  
services.getAssignmentManager().getRegionStates().removeServer(serverName);
-223  
services.getServerManager().getDeadServers().finish(serverName);
-224  return Flow.NO_MORE_STATE;
-225default:
-226  throw new 
UnsupportedOperationException("unhandled state=" + state);
-227  }
-228} catch (IOException e) {
-229  LOG.warn("Failed state=" + state + 
", retry " + this + "; cycles=" + getCycles(), e);
-230}
-231return Flow.HAS_MORE_STATE;
-232  }
-233
-234  private void 
cleanupSplitDir(MasterProcedureEnv env) {
-235SplitWALManager splitWALManager = 
env.getMasterServices().getSplitWALManager();
-236try {
-237  
splitWALManager.deleteWALDir(serverName);
-238} catch (IOException e) {
-239  LOG.warn("remove WAL directory of 
server {} failed, ignore...", serverName, e);
-240}
-241  }
-242
-243  private boolean 
isSplittingDone(MasterProcedureEnv env, boolean splitMeta) {
-244LOG.debug("check if splitting WALs of 
{} done? isMeta: {}", serverName, splitMeta);
-245SplitWALManager splitWALManager = 
env.getMasterServices().getSplitWALManager();
-246try {
-247  return 
splitWALManager.getWALsToSplit(serverName, splitMeta).size() == 0;
-248} catch (IOException e) {
-249  LOG.warn("get filelist of 
serverName {} failed, retry...", serverName, e);
-250  return false;
-251}
-252  }
-253
-254  private Procedure[] 
createSplittingWalProcedures(MasterProcedureEnv env, boolean splitMeta)
-255  throws IOException {
-256LOG.info("Splitting WALs {}, isMeta: 
{}", this, splitMeta);
-257SplitWALManager splitWALManager = 
env.getMasterServices().getSplitWALManager();
-258List

[13/51] [partial] hbase-site git commit: Published site at 281d6429e55149cc4c05430dcc1d1dc136d8b245.

2019-01-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/901d593a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
index ed1a776..20f8f07 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/class-use/MasterProcedureEnv.html
@@ -1132,6 +1132,14 @@
 
 
 
+protected boolean
+SwitchRpcThrottleRemoteProcedure.abort(MasterProcedureEnv env) 
+
+
+protected boolean
+SplitWALRemoteProcedure.abort(MasterProcedureEnv env) 
+
+
 boolean
 RestoreSnapshotProcedure.abort(MasterProcedureEnv env) 
 
@@ -1213,47 +1221,51 @@
   https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List regions) 
 
 
+protected void
+SplitWALProcedure.afterReplay(MasterProcedureEnv env) 
+
+
 private void
 ServerCrashProcedure.assignRegions(MasterProcedureEnv env,
  https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List regions)
 Assign the regions on the crashed RS to other Rses.
 
 
-
+
 private static 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest
 RSProcedureDispatcher.buildOpenRegionRequest(MasterProcedureEnv env,
   ServerName serverName,
   https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List operations) 
 
-
+
 org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo
 RSProcedureDispatcher.RegionOpenOperation.buildRegionOpenInfoRequest(MasterProcedureEnv env) 
 
-
+
 private boolean
 ReopenTableRegionsProcedure.canSchedule(MasterProcedureEnv env,
HRegionLocation loc) 
 
-
+
 protected static void
 AbstractStateMachineTableProcedure.checkOnline(MasterProcedureEnv env,
RegionInfo ri)
 Check region is online.
 
 
-
+
 protected void
 AbstractStateMachineRegionProcedure.checkTableModifiable(MasterProcedureEnv env)
 Check whether a table is modifiable - exists and either 
offline or online with config set
 
 
-
+
 protected void
 AbstractStateMachineTableProcedure.checkTableModifiable(MasterProcedureEnv env)
 Check whether a table is modifiable - exists and either 
offline or online with config set
 
 
-
+
 private static void
 DeleteTableProcedure.cleanAnyRemainingRows(MasterProcedureEnv env,
  TableName tableName)
@@ -1261,6 +1273,20 @@
  info:regioninfo column was empty because of some write error.
 
 
+
+private void
+ServerCrashProcedure.cleanupSplitDir(MasterProcedureEnv env) 
+
+
+private void
+SwitchRpcThrottleRemoteProcedure.complete(MasterProcedureEnv env,
+https://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true";
 title="class or interface in 
java.lang">Throwable error) 
+
+
+private void
+SplitWALRemoteProcedure.complete(MasterProcedureEnv env,
+https://docs.oracle.com/javase/8/docs/api/java/lang/Throwable.html?is-external=true";
 title="class or interface in 
java.lang">Throwable error) 
+
 
 protected void
 TruncateTableProcedure.completionCleanup(MasterProcedureEnv env) 
@@ -1318,11 +1344,16 @@
  https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List newRegions) 
 
 
+private Procedure[]
+ServerCrashProcedure.createSplittingWalProcedures(MasterProcedureEnv env,
+boolean splitMeta) 
+
+
 protected static void
 DeleteTableProcedure.deleteAssignmentState(MasterProcedureEnv env,
  TableName tableName) 
 
-
+
 static void
 MasterDDLOperationHelper.deleteColumnFamilyFromFileSystem(MasterProcedureEnv env,
 TableName tableName,
@@ -1332,14 +1363,14 @@
 Remove the column family from the file system
 
 
-
+
 private static void
 DeleteNamespaceProcedure.deleteDirectory(MasterProcedureEnv env,
https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String namespaceName)
 Delete the namespace directories from the file system
 
 
-
+
 private void
 ModifyTableProcedure.deleteFromFs(MasterProcedureEnv env,
 TableDescriptor oldTableDescriptor,
@@ -1347,27 +1378,27 @@
 Removes from hdfs the families that are not longer present 
in the new table descriptor.
 
 
-
+
 protected static void
 DeleteTableProcedure.deleteFromFs(MasterProcedureEnv env,
 TableName tableName,

[13/51] [partial] hbase-site git commit: Published site at 466fa920fee572fe20db3b77ebf539dc304d5f31.

2019-01-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2bf59208/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncSingleRequestRpcRetryingCaller.Callable.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncSingleRequestRpcRetryingCaller.Callable.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncSingleRequestRpcRetryingCaller.Callable.html
index 6dc4edd..56e66a7 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncSingleRequestRpcRetryingCaller.Callable.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncSingleRequestRpcRetryingCaller.Callable.html
@@ -25,107 +25,110 @@
 017 */
 018package org.apache.hadoop.hbase.client;
 019
-020import 
org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer;
+020import static 
org.apache.hadoop.hbase.util.FutureUtils.addListener;
 021
 022import java.io.IOException;
 023import 
java.util.concurrent.CompletableFuture;
-024
-025import 
org.apache.hadoop.hbase.HRegionLocation;
-026import 
org.apache.hadoop.hbase.TableName;
-027import 
org.apache.yetus.audience.InterfaceAudience;
-028import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-029import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService;
-030import 
org.apache.hadoop.hbase.util.Bytes;
+024import 
org.apache.hadoop.hbase.HRegionLocation;
+025import 
org.apache.hadoop.hbase.TableName;
+026import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
+027import 
org.apache.hadoop.hbase.util.Bytes;
+028import 
org.apache.yetus.audience.InterfaceAudience;
+029
+030import 
org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer;
 031
-032/**
-033 * Retry caller for a single request, 
such as get, put, delete, etc.
-034 */
-035@InterfaceAudience.Private
-036class 
AsyncSingleRequestRpcRetryingCaller extends 
AsyncRpcRetryingCaller {
-037
-038  @FunctionalInterface
-039  public interface Callable {
-040CompletableFuture 
call(HBaseRpcController controller, HRegionLocation loc,
-041ClientService.Interface stub);
-042  }
-043
-044  private final TableName tableName;
+032import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService;
+033
+034/**
+035 * Retry caller for a single request, 
such as get, put, delete, etc.
+036 */
+037@InterfaceAudience.Private
+038class 
AsyncSingleRequestRpcRetryingCaller extends 
AsyncRpcRetryingCaller {
+039
+040  @FunctionalInterface
+041  public interface Callable {
+042CompletableFuture 
call(HBaseRpcController controller, HRegionLocation loc,
+043ClientService.Interface stub);
+044  }
 045
-046  private final byte[] row;
+046  private final TableName tableName;
 047
-048  private final RegionLocateType 
locateType;
+048  private final byte[] row;
 049
-050  private final Callable 
callable;
+050  private final int replicaId;
 051
-052  public 
AsyncSingleRequestRpcRetryingCaller(HashedWheelTimer retryTimer, 
AsyncConnectionImpl conn,
-053  TableName tableName, byte[] row, 
RegionLocateType locateType, Callable callable,
-054  long pauseNs, int maxAttempts, long 
operationTimeoutNs, long rpcTimeoutNs,
-055  int startLogErrorsCnt) {
-056super(retryTimer, conn, pauseNs, 
maxAttempts, operationTimeoutNs, rpcTimeoutNs,
-057startLogErrorsCnt);
-058this.tableName = tableName;
-059this.row = row;
-060this.locateType = locateType;
-061this.callable = callable;
-062  }
-063
-064  private void call(HRegionLocation loc) 
{
-065ClientService.Interface stub;
-066try {
-067  stub = 
conn.getRegionServerStub(loc.getServerName());
-068} catch (IOException e) {
-069  onError(e,
-070() -> "Get async stub to " + 
loc.getServerName() + " for '" + Bytes.toStringBinary(row)
-071+ "' in " + 
loc.getRegion().getEncodedName() + " of " + tableName + " failed",
-072err -> 
conn.getLocator().updateCachedLocation(loc, err));
-073  return;
-074}
-075resetCallTimeout();
-076callable.call(controller, loc, 
stub).whenComplete(
-077  (result, error) -> {
-078if (error != null) {
-079  onError(error,
-080() -> "Call to " + 
loc.getServerName() + " for '" + Bytes.toStringBinary(row) + "' in "
-081+ 
loc.getRegion().getEncodedName() + " of " + tableName + " failed",
-082err -> 
conn.getLocator().updateCachedLocation(loc, err));
-083  return;
-084}
-085future.complete(result);
-086  });
-087  }
-088
-089  @Override
-090  protected void doCall() {
-091long locateTimeoutNs;
-092if (operationTimeoutNs > 0) {
-093  locateTimeoutNs = 
remainingTimeNs();
-094  if (locateTimeoutNs <= 0) {
-095completeExceptionally();
-096return;
-097  }
-098} else {
-099  locateTimeoutNs = -1L;
-100}
-101conn.getLocator()
-102.getRegionLocation(tableName,

[13/51] [partial] hbase-site git commit: Published site at e4b6b4afb933a961f543537875f87a2dc62d3757.

2019-01-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/849d84a8/devapidocs/src-html/org/apache/hadoop/hbase/thrift/ThriftServerRunner.ResultScannerWrapper.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/thrift/ThriftServerRunner.ResultScannerWrapper.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/thrift/ThriftServerRunner.ResultScannerWrapper.html
deleted file mode 100644
index e692633..000
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/thrift/ThriftServerRunner.ResultScannerWrapper.html
+++ /dev/null
@@ -1,2103 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd";>
-
-
-Source code
-
-
-
-
-001/*
-002 * Licensed to the Apache Software 
Foundation (ASF) under one
-003 * or more contributor license 
agreements.  See the NOTICE file
-004 * distributed with this work for 
additional information
-005 * regarding copyright ownership.  The 
ASF licenses this file
-006 * to you under the Apache License, 
Version 2.0 (the
-007 * "License"); you may not use this file 
except in compliance
-008 * with the License.  You may obtain a 
copy of the License at
-009 *
-010 * 
http://www.apache.org/licenses/LICENSE-2.0
-011 *
-012 * Unless required by applicable law or 
agreed to in writing, software
-013 * distributed under the License is 
distributed on an "AS IS" BASIS,
-014 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
-015 * See the License for the specific 
language governing permissions and
-016 * limitations under the License.
-017 */
-018
-019package org.apache.hadoop.hbase.thrift;
-020
-021import static 
org.apache.hadoop.hbase.util.Bytes.getBytes;
-022
-023import java.io.IOException;
-024import java.net.InetAddress;
-025import java.net.InetSocketAddress;
-026import java.net.UnknownHostException;
-027import java.nio.ByteBuffer;
-028import java.security.PrivilegedAction;
-029import java.util.ArrayList;
-030import java.util.Arrays;
-031import java.util.Collections;
-032import java.util.HashMap;
-033import java.util.List;
-034import java.util.Map;
-035import java.util.TreeMap;
-036import 
java.util.concurrent.BlockingQueue;
-037import 
java.util.concurrent.ExecutorService;
-038import 
java.util.concurrent.LinkedBlockingQueue;
-039import 
java.util.concurrent.ThreadPoolExecutor;
-040import java.util.concurrent.TimeUnit;
-041
-042import 
javax.security.auth.callback.Callback;
-043import 
javax.security.auth.callback.UnsupportedCallbackException;
-044import 
javax.security.sasl.AuthorizeCallback;
-045import javax.security.sasl.SaslServer;
-046
-047import 
org.apache.commons.lang3.ArrayUtils;
-048import 
org.apache.hadoop.conf.Configuration;
-049import 
org.apache.hadoop.hbase.Cell.Type;
-050import 
org.apache.hadoop.hbase.CellBuilder;
-051import 
org.apache.hadoop.hbase.CellBuilderFactory;
-052import 
org.apache.hadoop.hbase.CellBuilderType;
-053import 
org.apache.hadoop.hbase.CellUtil;
-054import 
org.apache.hadoop.hbase.HBaseConfiguration;
-055import 
org.apache.hadoop.hbase.HColumnDescriptor;
-056import 
org.apache.hadoop.hbase.HConstants;
-057import 
org.apache.hadoop.hbase.HRegionLocation;
-058import 
org.apache.hadoop.hbase.HTableDescriptor;
-059import 
org.apache.hadoop.hbase.KeyValue;
-060import 
org.apache.hadoop.hbase.MetaTableAccessor;
-061import 
org.apache.hadoop.hbase.ServerName;
-062import 
org.apache.hadoop.hbase.TableName;
-063import 
org.apache.hadoop.hbase.TableNotFoundException;
-064import 
org.apache.hadoop.hbase.client.Admin;
-065import 
org.apache.hadoop.hbase.client.Append;
-066import 
org.apache.hadoop.hbase.client.Delete;
-067import 
org.apache.hadoop.hbase.client.Durability;
-068import 
org.apache.hadoop.hbase.client.Get;
-069import 
org.apache.hadoop.hbase.client.Increment;
-070import 
org.apache.hadoop.hbase.client.OperationWithAttributes;
-071import 
org.apache.hadoop.hbase.client.Put;
-072import 
org.apache.hadoop.hbase.client.RegionInfo;
-073import 
org.apache.hadoop.hbase.client.RegionLocator;
-074import 
org.apache.hadoop.hbase.client.Result;
-075import 
org.apache.hadoop.hbase.client.ResultScanner;
-076import 
org.apache.hadoop.hbase.client.Scan;
-077import 
org.apache.hadoop.hbase.client.Table;
-078import 
org.apache.hadoop.hbase.filter.Filter;
-079import 
org.apache.hadoop.hbase.filter.ParseFilter;
-080import 
org.apache.hadoop.hbase.filter.PrefixFilter;
-081import 
org.apache.hadoop.hbase.filter.WhileMatchFilter;
-082import 
org.apache.hadoop.hbase.http.HttpServerUtil;
-083import 
org.apache.hadoop.hbase.log.HBaseMarkers;
-084import 
org.apache.hadoop.hbase.security.SaslUtil;
-085import 
org.apache.hadoop.hbase.security.SaslUtil.QualityOfProtection;
-086import 
org.apache.hadoop.hbase.security.SecurityUtil;
-087import 
org.apache.hadoop.hbase.security.UserProvider;
-088import 
org.apache.hadoop.hbase.thrift.generated.AlreadyExists;
-089import 
org.apache.hadoop.hbase.thrift.generated.BatchMutation;
-090import 
org.apache.hadoop.hbase.thrift.generated.Column

[13/51] [partial] hbase-site git commit: Published site at 3ab895979b643a2980bcdb7fee2078f14b614210.

2019-01-01 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/ByteBufferIOEngine.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/ByteBufferIOEngine.html 
b/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/ByteBufferIOEngine.html
index 1e2490d..cf6fa92 100644
--- a/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/ByteBufferIOEngine.html
+++ b/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/ByteBufferIOEngine.html
@@ -550,6 +550,6 @@ So said all these, when we read a block it may be possible 
that the bytes of tha
 
 
 
-Copyright © 2007–2018 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
+Copyright © 2007–2019 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/CacheFullException.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/CacheFullException.html 
b/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/CacheFullException.html
index d69d375..2a8b987 100644
--- a/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/CacheFullException.html
+++ b/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/CacheFullException.html
@@ -398,6 +398,6 @@ extends https://docs.oracle.com/javase/8/docs/api/java/io/IOException.h
 
 
 
-Copyright © 2007–2018 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
+Copyright © 2007–2019 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/CachedEntryQueue.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/CachedEntryQueue.html 
b/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/CachedEntryQueue.html
index 38c4672..9303d0a 100644
--- a/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/CachedEntryQueue.html
+++ b/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/CachedEntryQueue.html
@@ -413,6 +413,6 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 
-Copyright © 2007–2018 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
+Copyright © 2007–2019 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileAccessor.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileAccessor.html
 
b/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileAccessor.html
index c2613a1..3914b21 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileAccessor.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileAccessor.html
@@ -234,6 +234,6 @@ var activeTableTab = "activeTableTab";
 
 
 
-Copyright © 2007–2018 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
+Copyright © 2007–2019 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileReadAccessor.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileReadAccessor.html
 
b/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileReadAccessor.html
index fbff701..7fdfba5 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileReadAccessor.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileReadAccessor.html
@@ -289,6 +289,6 @@ implements Copyright © 2007–2018 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
+Copyright © 2007–2019 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ef0dd56d/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileWriteAccessor.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileWriteAccessor.html
 
b/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileWriteAccessor.html
index 8d5dd69..8f21c6c 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileWriteAccessor.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.FileW

[13/51] [partial] hbase-site git commit: Published site at 7820ba1dbdba58b1002cdfde08eb21aa7a0bb6da.

2018-12-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/09ea0d5f/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BulkLoadListener.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BulkLoadListener.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BulkLoadListener.html
index 0f5a095..50bf692 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BulkLoadListener.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BulkLoadListener.html
@@ -78,8712 +78,8714 @@
 070import 
java.util.concurrent.locks.ReadWriteLock;
 071import 
java.util.concurrent.locks.ReentrantReadWriteLock;
 072import java.util.function.Function;
-073import 
org.apache.hadoop.conf.Configuration;
-074import org.apache.hadoop.fs.FileStatus;
-075import org.apache.hadoop.fs.FileSystem;
-076import 
org.apache.hadoop.fs.LocatedFileStatus;
-077import org.apache.hadoop.fs.Path;
-078import org.apache.hadoop.hbase.Cell;
-079import 
org.apache.hadoop.hbase.CellBuilderType;
-080import 
org.apache.hadoop.hbase.CellComparator;
-081import 
org.apache.hadoop.hbase.CellComparatorImpl;
-082import 
org.apache.hadoop.hbase.CellScanner;
-083import 
org.apache.hadoop.hbase.CellUtil;
-084import 
org.apache.hadoop.hbase.CompareOperator;
-085import 
org.apache.hadoop.hbase.CompoundConfiguration;
-086import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-087import 
org.apache.hadoop.hbase.DroppedSnapshotException;
-088import 
org.apache.hadoop.hbase.ExtendedCellBuilderFactory;
-089import 
org.apache.hadoop.hbase.HConstants;
-090import 
org.apache.hadoop.hbase.HConstants.OperationStatusCode;
-091import 
org.apache.hadoop.hbase.HDFSBlocksDistribution;
-092import 
org.apache.hadoop.hbase.KeyValue;
-093import 
org.apache.hadoop.hbase.KeyValueUtil;
-094import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-095import 
org.apache.hadoop.hbase.NotServingRegionException;
-096import 
org.apache.hadoop.hbase.PrivateCellUtil;
-097import 
org.apache.hadoop.hbase.RegionTooBusyException;
-098import org.apache.hadoop.hbase.Tag;
-099import org.apache.hadoop.hbase.TagUtil;
-100import 
org.apache.hadoop.hbase.UnknownScannerException;
-101import 
org.apache.hadoop.hbase.client.Append;
-102import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-103import 
org.apache.hadoop.hbase.client.CompactionState;
-104import 
org.apache.hadoop.hbase.client.Delete;
-105import 
org.apache.hadoop.hbase.client.Durability;
-106import 
org.apache.hadoop.hbase.client.Get;
-107import 
org.apache.hadoop.hbase.client.Increment;
-108import 
org.apache.hadoop.hbase.client.IsolationLevel;
-109import 
org.apache.hadoop.hbase.client.Mutation;
-110import 
org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor;
-111import 
org.apache.hadoop.hbase.client.Put;
-112import 
org.apache.hadoop.hbase.client.RegionInfo;
-113import 
org.apache.hadoop.hbase.client.RegionInfoBuilder;
-114import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
-115import 
org.apache.hadoop.hbase.client.Result;
-116import 
org.apache.hadoop.hbase.client.RowMutations;
-117import 
org.apache.hadoop.hbase.client.Scan;
-118import 
org.apache.hadoop.hbase.client.TableDescriptor;
-119import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-120import 
org.apache.hadoop.hbase.conf.ConfigurationManager;
-121import 
org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver;
-122import 
org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
-123import 
org.apache.hadoop.hbase.coprocessor.RegionObserver.MutationType;
-124import 
org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
-125import 
org.apache.hadoop.hbase.exceptions.FailedSanityCheckException;
-126import 
org.apache.hadoop.hbase.exceptions.TimeoutIOException;
-127import 
org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
-128import 
org.apache.hadoop.hbase.filter.ByteArrayComparable;
-129import 
org.apache.hadoop.hbase.filter.FilterWrapper;
-130import 
org.apache.hadoop.hbase.filter.IncompatibleFilterException;
-131import 
org.apache.hadoop.hbase.io.HFileLink;
-132import 
org.apache.hadoop.hbase.io.HeapSize;
-133import 
org.apache.hadoop.hbase.io.TimeRange;
-134import 
org.apache.hadoop.hbase.io.hfile.BlockCache;
-135import 
org.apache.hadoop.hbase.io.hfile.HFile;
-136import 
org.apache.hadoop.hbase.ipc.CallerDisconnectedException;
-137import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
-138import 
org.apache.hadoop.hbase.ipc.RpcCall;
-139import 
org.apache.hadoop.hbase.ipc.RpcServer;
-140import 
org.apache.hadoop.hbase.mob.MobFileCache;
-141import 
org.apache.hadoop.hbase.monitoring.MonitoredTask;
-142import 
org.apache.hadoop.hbase.monitoring.TaskMonitor;
-143import 
org.apache.hadoop.hbase.quotas.RegionServerSpaceQuotaManager;
-144import 
org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl.WriteEntry;
-145import 
org.apache.hadoop.hbase.regio

[13/51] [partial] hbase-site git commit: Published site at c448604ceb987d113913f0583452b2abce04db0d.

2018-12-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4f8b8424/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.BlockIndexReader.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.BlockIndexReader.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.BlockIndexReader.html
index 1124f8b..0c29054 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.BlockIndexReader.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.BlockIndexReader.html
@@ -,587 +,592 @@
 1103  
blockStream.write(midKeyMetadata);
 1104
blockWriter.writeHeaderAndData(out);
 1105if (cacheConf != null) {
-1106  HFileBlock blockForCaching = 
blockWriter.getBlockForCaching(cacheConf);
-1107  
cacheConf.getBlockCache().cacheBlock(new BlockCacheKey(nameForCaching,
-1108rootLevelIndexPos, true, 
blockForCaching.getBlockType()), blockForCaching);
-1109}
-1110  }
-
-1112  // Add root index block size
-1113  totalBlockOnDiskSize += 
blockWriter.getOnDiskSizeWithoutHeader();
-1114  totalBlockUncompressedSize +=
-1115  
blockWriter.getUncompressedSizeWithoutHeader();
-1116
-1117  if (LOG.isTraceEnabled()) {
-1118LOG.trace("Wrote a " + numLevels 
+ "-level index with root level at pos "
-1119  + rootLevelIndexPos + ", " + 
rootChunk.getNumEntries()
-1120  + " root-level entries, " + 
totalNumEntries + " total entries, "
-1121  + 
StringUtils.humanReadableInt(this.totalBlockOnDiskSize) +
-1122  " on-disk size, "
-1123  + 
StringUtils.humanReadableInt(totalBlockUncompressedSize) +
-1124  " total uncompressed 
size.");
-1125  }
-1126  return rootLevelIndexPos;
-1127}
-1128
-1129/**
-1130 * Writes the block index data as a 
single level only. Does not do any
-1131 * block framing.
-1132 *
-1133 * @param out the buffered output 
stream to write the index to. Typically a
-1134 *  stream writing into an 
{@link HFile} block.
-1135 * @param description a short 
description of the index being written. Used
-1136 *  in a log message.
-1137 * @throws IOException
-1138 */
-1139public void 
writeSingleLevelIndex(DataOutput out, String description)
-1140throws IOException {
-1141  expectNumLevels(1);
-1142
-1143  if (!singleLevelOnly)
-1144throw new 
IOException("Single-level mode is turned off");
-1145
-1146  if (rootChunk.getNumEntries() > 
0)
-1147throw new 
IOException("Root-level entries already added in " +
-1148"single-level mode");
-1149
-1150  rootChunk = curInlineChunk;
-1151  curInlineChunk = new 
BlockIndexChunk();
-1152
-1153  if (LOG.isTraceEnabled()) {
-1154LOG.trace("Wrote a single-level 
" + description + " index with "
-1155  + rootChunk.getNumEntries() + 
" entries, " + rootChunk.getRootSize()
-1156  + " bytes");
-1157  }
-1158  rootChunk.writeRoot(out);
-1159}
-1160
-1161/**
-1162 * Split the current level of the 
block index into intermediate index
-1163 * blocks of permitted size and 
write those blocks to disk. Return the next
-1164 * level of the block index 
referencing those intermediate-level blocks.
-1165 *
-1166 * @param out
-1167 * @param currentLevel the current 
level of the block index, such as the a
-1168 *  chunk referencing all 
leaf-level index blocks
-1169 * @return the parent level block 
index, which becomes the root index after
-1170 * a few (usually zero) 
iterations
-1171 * @throws IOException
-1172 */
-1173private BlockIndexChunk 
writeIntermediateLevel(FSDataOutputStream out,
-1174BlockIndexChunk currentLevel) 
throws IOException {
-1175  // Entries referencing 
intermediate-level blocks we are about to create.
-1176  BlockIndexChunk parent = new 
BlockIndexChunk();
-1177
-1178  // The current intermediate-level 
block index chunk.
-1179  BlockIndexChunk curChunk = new 
BlockIndexChunk();
-1180
-1181  for (int i = 0; i < 
currentLevel.getNumEntries(); ++i) {
-1182
curChunk.add(currentLevel.getBlockKey(i),
-1183
currentLevel.getBlockOffset(i), currentLevel.getOnDiskDataSize(i));
-1184
-1185// HBASE-16288: We have to have 
at least minIndexNumEntries(16) items in the index so that
-1186// we won't end up with too-many 
levels for a index with very large rowKeys. Also, if the
-1187// first key is larger than 
maxChunkSize this will cause infinite recursion.
-1188if (i >= minIndexNumEntries 
&& curChunk.getRootSize() >= maxChunkSize) {
-1189  writeIntermediateBlock(out, 
parent, curChunk);
-1190}
-1191  }
-1192
-1193  if (curChunk.ge

[13/51] [partial] hbase-site git commit: Published site at 8bf966c8e936dec4d83bcbe85c5aab543f14a0df.

2018-12-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/27555316/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html
index a957d31..62f81b6 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html
@@ -142,5192 +142,5186 @@
 134import org.apache.hadoop.hbase.wal.WAL;
 135import 
org.apache.hadoop.hbase.wal.WALFactory;
 136import 
org.apache.hadoop.hbase.wal.WALSplitter;
-137import 
org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-138import 
org.apache.hadoop.hbase.zookeeper.ZKUtil;
-139import 
org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-140import 
org.apache.hadoop.hbase.zookeeper.ZNodePaths;
-141import 
org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
-142import 
org.apache.hadoop.ipc.RemoteException;
-143import 
org.apache.hadoop.security.UserGroupInformation;
-144import 
org.apache.hadoop.util.ReflectionUtils;
-145import org.apache.hadoop.util.Tool;
-146import 
org.apache.hadoop.util.ToolRunner;
-147import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-148import 
org.apache.hbase.thirdparty.com.google.common.collect.Sets;
-149import 
org.apache.yetus.audience.InterfaceAudience;
-150import 
org.apache.yetus.audience.InterfaceStability;
-151import 
org.apache.zookeeper.KeeperException;
-152import org.slf4j.Logger;
-153import org.slf4j.LoggerFactory;
-154
-155import 
org.apache.hbase.thirdparty.com.google.common.base.Joiner;
-156import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-157import 
org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;
-158import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
-159import 
org.apache.hbase.thirdparty.com.google.common.collect.Multimap;
-160import 
org.apache.hbase.thirdparty.com.google.common.collect.Ordering;
-161import 
org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;
-162
-163import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-164import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface;
-165
-166/**
-167 * HBaseFsck (hbck) is a tool for 
checking and repairing region consistency and
-168 * table integrity problems in a 
corrupted HBase. This tool was written for hbase-1.x. It does not
-169 * work with hbase-2.x; it can read state 
but is not allowed to change state; i.e. effect 'repair'.
-170 * See hbck2 (HBASE-19121) for a hbck 
tool for hbase2.
-171 *
-172 * 

-173 * Region consistency checks verify that hbase:meta, region deployment on region -174 * servers and the state of data in HDFS (.regioninfo files) all are in -175 * accordance. -176 *

-177 * Table integrity checks verify that all possible row keys resolve to exactly -178 * one region of a table. This means there are no individual degenerate -179 * or backwards regions; no holes between regions; and that there are no -180 * overlapping regions. -181 *

-182 * The general repair strategy works in two phases: -183 *

    -184 *
  1. Repair Table Integrity on HDFS. (merge or fabricate regions) -185 *
  2. Repair Region Consistency with hbase:meta and assignments -186 *
-187 *

-188 * For table integrity repairs, the tables' region directories are scanned -189 * for .regioninfo files. Each table's integrity is then verified. If there -190 * are any orphan regions (regions with no .regioninfo files) or holes, new -191 * regions are fabricated. Backwards regions are sidelined as well as empty -192 * degenerate (endkey==startkey) regions. If there are any overlapping regions, -193 * a new region is created and all data is merged into the new region. -194 *

-195 * Table integrity repairs deal solely with HDFS and could potentially be done -196 * offline -- the hbase region servers or master do not need to be running. -197 * This phase can eventually be used to completely reconstruct the hbase:meta table in -198 * an offline fashion. -199 *

-200 * Region consistency requires three conditions -- 1) valid .regioninfo file -201 * present in an HDFS region dir, 2) valid row with .regioninfo data in META, -202 * and 3) a region is deployed only at the regionserver that was assigned to -203 * with proper state in the master. -204 *

-205 * Region consistency repairs require hbase to be online so that hbck can -206 * contact the HBase master and region servers. The hbck#connect() method must -207 * first be called successfully. Much of the region consistency information -208 * is transient and less risky to repair. -209 *

-210 * If hbck is run from the command line, there are a handful of arguments that -211 * can be used


[13/51] [partial] hbase-site git commit: Published site at 1acbd36c903b048141866b143507bfce124a5c5f.

2018-11-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5299e667/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.html
index be5ad01..dd6d5f5 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.html
@@ -805,10 +805,10 @@ implements https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String msg) 
 
 
-protected ClusterConnection
+private ClusterConnection
 createClusterConnection()
-Create a 'smarter' Connection, one that is capable of 
by-passing RPC if the request is to
- the local server; i.e.
+Create a 'smarter' Connection, one that is capable of 
by-passing RPC if the request is to the
+ local server; i.e.
 
 
 
@@ -2481,7 +2481,7 @@ protected static final https://docs.oracle.com/javase/8/docs/api/j
 
 
 movedRegions
-protected https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">MapString,HRegionServer.MovedRegionInfo> movedRegions
+protected https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">MapString,HRegionServer.MovedRegionInfo> movedRegions
 
 
 
@@ -2490,7 +2490,7 @@ protected static final https://docs.oracle.com/javase/8/docs/api/j
 
 
 TIMEOUT_REGION_MOVED
-private static final int TIMEOUT_REGION_MOVED
+private static final int TIMEOUT_REGION_MOVED
 
 See Also:
 Constant
 Field Values
@@ -2710,11 +2710,10 @@ protected static final https://docs.oracle.com/javase/8/docs/api/j
 
 
 createClusterConnection
-protected ClusterConnection createClusterConnection()
- throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
-Create a 'smarter' Connection, one that is capable of 
by-passing RPC if the request is to
- the local server; i.e. a short-circuit Connection. Safe to use going to local 
or remote
- server. Create this instance in a method can be intercepted and mocked in 
tests.
+private ClusterConnection createClusterConnection()
+   throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
+Create a 'smarter' Connection, one that is capable of 
by-passing RPC if the request is to the
+ local server; i.e. a short-circuit Connection. Safe to use going to local or 
remote server.
 
 Throws:
 https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
@@ -2727,7 +2726,7 @@ protected static final https://docs.oracle.com/javase/8/docs/api/j
 
 
 checkCodecs
-private static void checkCodecs(org.apache.hadoop.conf.Configuration c)
+private static void checkCodecs(org.apache.hadoop.conf.Configuration c)
  throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 Run test on configured codecs to make sure supporting libs 
are in place.
 
@@ -2744,7 +2743,7 @@ protected static final https://docs.oracle.com/javase/8/docs/api/j
 
 
 getClusterId
-public https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String getClusterId()
+public https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String getClusterId()
 
 
 
@@ -2753,7 +2752,7 @@ protected static final https://docs.oracle.com/javase/8/docs/api/j
 
 
 setupClusterConnection
-protected void setupClusterConnection()
+protected void setupClusterConnection()
throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 Setup our cluster connection if not already 
initialized.
 
@@ -2768,7 +2767,7 @@ protected static final https://docs.oracle.com/javase/8/docs/api/j
 
 
 preRegistrationInitialization
-private void preRegistrationInitialization()
+private void preRegistrationInitialization()
 All initialization needed before we go register with 
Master.
  Do bare minimum. Do bulk of initializations AFTER we've connected to the 
Master.
  In here we just put up the RpcServer, setup Connection, and

[13/51] [partial] hbase-site git commit: Published site at 130057f13774f6b213cdb06952c805a29d59396e.

2018-11-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/68eae623/devapidocs/src-html/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.RestoreMetaChanges.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.RestoreMetaChanges.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.RestoreMetaChanges.html
index cbae28e..aec5920 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.RestoreMetaChanges.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.RestoreMetaChanges.html
@@ -64,825 +64,827 @@
 056import 
org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
 057import 
org.apache.hadoop.hbase.regionserver.StoreFileInfo;
 058import 
org.apache.hadoop.hbase.security.access.AccessControlClient;
-059import 
org.apache.hadoop.hbase.security.access.ShadedAccessControlUtil;
-060import 
org.apache.hadoop.hbase.security.access.TablePermission;
-061import 
org.apache.hadoop.hbase.util.Bytes;
-062import 
org.apache.hadoop.hbase.util.FSUtils;
-063import 
org.apache.hadoop.hbase.util.ModifyRegionUtils;
-064import 
org.apache.hadoop.hbase.util.Pair;
-065import org.apache.hadoop.io.IOUtils;
-066import 
org.apache.yetus.audience.InterfaceAudience;
-067import org.slf4j.Logger;
-068import org.slf4j.LoggerFactory;
-069import 
org.apache.hbase.thirdparty.com.google.common.collect.ListMultimap;
-070import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-071import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
-072import 
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
-073
-074/**
-075 * Helper to Restore/Clone a Snapshot
-076 *
-077 * 

The helper assumes that a table is already created, and by calling restore() -078 * the content present in the snapshot will be restored as the new content of the table. -079 * -080 *

Clone from Snapshot: If the target table is empty, the restore operation -081 * is just a "clone operation", where the only operations are: -082 *

    -083 *
  • for each region in the snapshot create a new region -084 *(note that the region will have a different name, since the encoding contains the table name) -085 *
  • for each file in the region create a new HFileLink to point to the original file. -086 *
  • restore the logs, if any -087 *
-088 * -089 *

Restore from Snapshot: -090 *

    -091 *
  • for each region in the table verify which are available in the snapshot and which are not -092 *
      -093 *
    • if the region is not present in the snapshot, remove it. -094 *
    • if the region is present in the snapshot -095 *
        -096 *
      • for each file in the table region verify which are available in the snapshot -097 *
          -098 *
        • if the hfile is not present in the snapshot, remove it -099 *
        • if the hfile is present, keep it (nothing to do) -100 *
        -101 *
      • for each file in the snapshot region but not in the table -102 *
          -103 *
        • create a new HFileLink that point to the original file -104 *
        -105 *
      -106 *
    -107 *
  • for each region in the snapshot not present in the current table state -108 *
      -109 *
    • create a new region and for each file in the region create a new HFileLink -110 * (This is the same as the clone operation) -111 *
    -112 *
  • restore the logs, if any -113 *
-114 */ -115@InterfaceAudience.Private -116public class RestoreSnapshotHelper { -117 private static final Logger LOG = LoggerFactory.getLogger(RestoreSnapshotHelper.class); -118 -119 private final Map regionsMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); -120 -121 private final Map > parentsMap = new HashMap<>(); -122 -123 private final ForeignExceptionDispatcher monitor; -124 private final MonitoredTask status; -125 -126 private final SnapshotManifest snapshotManifest; -127 private final SnapshotDescription snapshotDesc; -128 private final TableName snapshotTable; -129 -130 private final TableDescriptor tableDesc; -131 private final Path rootDir; -132 private final Path tableDir; -133 -134 private final Configuration conf; -135 private final FileSystem fs; -136 private final boolean createBackRefs; -137 -138 public RestoreSnapshotHelper(final Configuration conf, -139 final FileSystem fs, -140 final SnapshotManifest manifest, -141 final TableDescriptor tableDescriptor, -142 final Path rootDir, -143 final ForeignExceptionDispatcher monitor, -144 final MonitoredTask status) { -145this(conf, fs, manifest, tableDescriptor, rootDir, monitor, status, true); -146 } -147 -148 public RestoreSnapshotHelper(final Configuration conf, -149 fina

[13/51] [partial] hbase-site git commit: Published site at d5e4faacc354c1bc4d93efa71ca97ee3a056123e.

2018-10-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b5e107c3/testdevapidocs/org/apache/hadoop/hbase/class-use/HBaseTestingUtility.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/class-use/HBaseTestingUtility.html 
b/testdevapidocs/org/apache/hadoop/hbase/class-use/HBaseTestingUtility.html
index 4c74942..c59a929 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/class-use/HBaseTestingUtility.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/class-use/HBaseTestingUtility.html
@@ -2521,30 +2521,34 @@
 TestDeleteNamespaceProcedure.UTIL 
 
 
+private static HBaseTestingUtility
+TestSchedulerQueueDeadLock.UTIL 
+
+
 protected static HBaseTestingUtility
 TestMasterObserverPostCalls.UTIL 
 
-
+
 protected static HBaseTestingUtility
 TestWALProcedureStoreOnHDFS.UTIL 
 
-
+
 private static HBaseTestingUtility
 TestReopenTableRegionsProcedureInfiniteLoop.UTIL 
 
-
+
 protected static HBaseTestingUtility
 TestSafemodeBringsDownMaster.UTIL 
 
-
+
 protected static HBaseTestingUtility
 TestMasterFailoverWithProcedures.UTIL 
 
-
+
 protected static HBaseTestingUtility
 TestMasterProcedureWalLease.UTIL 
 
-
+
 protected static HBaseTestingUtility
 TestProcedureAdmin.UTIL 
 
@@ -3204,61 +3208,65 @@
 
 
 private static HBaseTestingUtility
-TestShutdownWhileWALBroken.UTIL 
+TestRegionServerAbortTimeout.UTIL 
 
 
 private static HBaseTestingUtility
-TestHeapMemoryManager.UTIL 
+TestShutdownWhileWALBroken.UTIL 
 
 
 private static HBaseTestingUtility
-TestMajorCompaction.UTIL 
+TestHeapMemoryManager.UTIL 
 
 
 private static HBaseTestingUtility
-TestFlushLifeCycleTracker.UTIL 
+TestMajorCompaction.UTIL 
 
 
 private static HBaseTestingUtility
-TestMinorCompaction.UTIL 
+TestFlushLifeCycleTracker.UTIL 
 
 
 private static HBaseTestingUtility
-TestRegionServerCrashDisableWAL.UTIL 
+TestMinorCompaction.UTIL 
 
 
+private static HBaseTestingUtility
+TestRegionServerCrashDisableWAL.UTIL 
+
+
 protected static HBaseTestingUtility
 TestHRegionServerBulkLoad.UTIL 
 
-
+
 private static HBaseTestingUtility
 TestOpenSeqNumUnexpectedIncrease.UTIL 
 
-
+
 private static HBaseTestingUtility
 TestMobStoreCompaction.UTIL 
 
-
+
 private static HBaseTestingUtility
 TestCompaction.UTIL 
 
-
+
 private static HBaseTestingUtility
 TestSwitchToStreamRead.UTIL 
 
-
+
 private static HBaseTestingUtility
 TestGetClosestAtOrBefore.UTIL 
 
-
+
 private static HBaseTestingUtility
 TestCompactionLifeCycleTracker.UTIL 
 
-
+
 private static HBaseTestingUtility
 TestCompactionInDeadRegionServer.UTIL 
 
-
+
 private static HBaseTestingUtility
 TestScannerRetriableFailure.UTIL 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b5e107c3/testdevapidocs/org/apache/hadoop/hbase/client/TestClientScannerRPCTimeout.RegionServerWithScanTimeout.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/client/TestClientScannerRPCTimeout.RegionServerWithScanTimeout.html
 
b/testdevapidocs/org/apache/hadoop/hbase/client/TestClientScannerRPCTimeout.RegionServerWithScanTimeout.html
index 458ce8c..0c7de15 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/client/TestClientScannerRPCTimeout.RegionServerWithScanTimeout.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/client/TestClientScannerRPCTimeout.RegionServerWithScanTimeout.html
@@ -173,7 +173,7 @@ extends http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b5e107c3/testdevapidocs/org/apache/hadoop/hbase/client/TestMetaCache.RegionServerWithFakeRpcServices.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/client/TestMetaCache.RegionServerWithFakeRpcServices.html
 
b/testdevapidocs/org/apache/hadoop/hbase/client/TestMetaCache.RegionServerWithFakeRpcServices.html
index 5d2084e..0bc5274 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/client/TestMetaCache.RegionServerWithFakeRpcServices.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/client/TestMetaCache.RegionServerWithFakeRpcServices.html
@@ -179,7 +179,7 @@ extends 
org.apache.hadoop.hbase.regionserver.HRegionServer
 
 
 Fields inherited from 
class org.apache.hadoop.hbase.regionserver.HRegionServer
-cacheConfig, cacheFlusher, clusterConnection, clusterId, 
clusterStatusTracker, compactSplitThread, conf, configurationManager, csm, 
executorService, fs, fsOk, fsUtilizationChore, hMemManager, infoServer, leases, 
lock, MASTER_HOSTNAME_KEY, metaTableLocator, movedRegions, msgInterval, 
numRegionsToReport, onlineRegions, regionFavoredNodesMap, REGIONSERVER, 
regionsInTransitionInRS, replicationSinkHandler, replicationSourceHandler, 
rpcServices, secureBulkLoadManager, serverName, sleeper, startcode, 
tableDescriptors, TEST_SKIP_REPORTING_TRANSITION, threadWakeFrequency, 
useThisHostnameInstead, walFactory, walFs, walRoller, zooKeeper
+ABORT_TIMEOUT, ABORT_TIMEOUT_TASK, cacheConfig, cacheFlusher, 
clusterConnection, clust

[13/51] [partial] hbase-site git commit: Published site at 3fe8649b2c9ba1271c25e8f476548907e4c7a90d.

2018-10-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8f09a71d/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.html
index c7d99b2..9d1542c 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.html
@@ -382,1357 +382,1365 @@
 374for (int i = 0; i < 
this.curFunctionCosts.length; i++) {
 375  curFunctionCosts[i] = 
tempFunctionCosts[i];
 376}
-377LOG.info("start 
StochasticLoadBalancer.balancer, initCost=" + currentCost + ", functionCost="
-378+ functionCost());
+377double initCost = currentCost;
+378double newCost = currentCost;
 379
-380double initCost = currentCost;
-381double newCost = currentCost;
-382
-383long computedMaxSteps;
-384if (runMaxSteps) {
-385  computedMaxSteps = 
Math.max(this.maxSteps,
-386  ((long)cluster.numRegions * 
(long)this.stepsPerRegion * (long)cluster.numServers));
-387} else {
-388  computedMaxSteps = 
Math.min(this.maxSteps,
-389  ((long)cluster.numRegions * 
(long)this.stepsPerRegion * (long)cluster.numServers));
-390}
-391// Perform a stochastic walk to see 
if we can get a good fit.
-392long step;
-393
-394for (step = 0; step < 
computedMaxSteps; step++) {
-395  Cluster.Action action = 
nextAction(cluster);
-396
-397  if (action.type == Type.NULL) {
-398continue;
-399  }
-400
-401  cluster.doAction(action);
-402  updateCostsWithAction(cluster, 
action);
-403
-404  newCost = computeCost(cluster, 
currentCost);
-405
-406  // Should this be kept?
-407  if (newCost < currentCost) {
-408currentCost = newCost;
-409
-410// save for JMX
-411curOverallCost = currentCost;
-412for (int i = 0; i < 
this.curFunctionCosts.length; i++) {
-413  curFunctionCosts[i] = 
tempFunctionCosts[i];
-414}
-415  } else {
-416// Put things back the way they 
were before.
-417// TODO: undo by remembering old 
values
-418Action undoAction = 
action.undoAction();
-419cluster.doAction(undoAction);
-420updateCostsWithAction(cluster, 
undoAction);
-421  }
-422
-423  if 
(EnvironmentEdgeManager.currentTime() - startTime >
-424  maxRunningTime) {
-425break;
-426  }
-427}
-428long endTime = 
EnvironmentEdgeManager.currentTime();
-429
-430
metricsBalancer.balanceCluster(endTime - startTime);
-431
-432// update costs metrics
-433updateStochasticCosts(tableName, 
curOverallCost, curFunctionCosts);
-434if (initCost > currentCost) {
-435  plans = 
createRegionPlans(cluster);
-436  LOG.info("Finished computing new 
load balance plan. Computation took {}" +
-437" to try {} different iterations. 
 Found a solution that moves " +
-438"{} regions; Going from a 
computed cost of {}" +
-439" to a new cost of {}", 
java.time.Duration.ofMillis(endTime - startTime),
-440step, plans.size(), initCost, 
currentCost);
-441  return plans;
-442}
-443LOG.info("Could not find a better 
load balance plan.  Tried {} different configurations in " +
-444  "{}, and did not find anything with 
a computed cost less than {}", step,
-445  java.time.Duration.ofMillis(endTime 
- startTime), initCost);
-446return null;
-447  }
-448
-449  /**
-450   * update costs to JMX
-451   */
-452  private void 
updateStochasticCosts(TableName tableName, Double overall, Double[] subCosts) 
{
-453if (tableName == null) return;
-454
-455// check if the metricsBalancer is 
MetricsStochasticBalancer before casting
-456if (metricsBalancer instanceof 
MetricsStochasticBalancer) {
-457  MetricsStochasticBalancer balancer 
= (MetricsStochasticBalancer) metricsBalancer;
-458  // overall cost
-459  
balancer.updateStochasticCost(tableName.getNameAsString(),
-460"Overall", "Overall cost", 
overall);
-461
-462  // each cost function
-463  for (int i = 0; i < 
costFunctions.length; i++) {
-464CostFunction costFunction = 
costFunctions[i];
-465String costFunctionName = 
costFunction.getClass().getSimpleName();
-466Double costPercent = (overall == 
0) ? 0 : (subCosts[i] / overall);
-467// TODO: cost function may need a 
specific description
-468
balancer.updateStochasticCost(tableName.getNameAsString(), costFunctionName,
-469  "The percent of " + 
costFunctionName, costPercent);
-470  }
-471}
-472  }
-473
-474  private String functionCost() {
-475StringBuilder builder = new 
StringBuilder();
-476for (CostFu

[13/51] [partial] hbase-site git commit: Published site at 7adf590106826b9e4432cfeee06acdc0ccff8c6e.

2018-10-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/425db230/testdevapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/TestStressWALProcedureStore.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/TestStressWALProcedureStore.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/TestStressWALProcedureStore.html
index eeaae59..cfef377 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/TestStressWALProcedureStore.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/procedure2/store/wal/TestStressWALProcedureStore.html
@@ -26,139 +26,137 @@
 018package 
org.apache.hadoop.hbase.procedure2.store.wal;
 019
 020import static 
org.junit.Assert.assertEquals;
-021import static 
org.junit.Assert.assertFalse;
-022import static 
org.junit.Assert.assertTrue;
-023import static org.junit.Assert.fail;
-024
-025import java.io.IOException;
-026import java.util.Random;
-027import 
java.util.concurrent.atomic.AtomicLong;
-028import 
org.apache.hadoop.conf.Configuration;
-029import org.apache.hadoop.fs.FileSystem;
-030import org.apache.hadoop.fs.Path;
-031import 
org.apache.hadoop.hbase.HBaseClassTestRule;
-032import 
org.apache.hadoop.hbase.HBaseCommonTestingUtility;
-033import 
org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
-034import 
org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility.LoadCounter;
-035import 
org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility.TestProcedure;
-036import 
org.apache.hadoop.hbase.procedure2.util.StringUtils;
-037import 
org.apache.hadoop.hbase.testclassification.LargeTests;
-038import 
org.apache.hadoop.hbase.testclassification.MasterTests;
-039import org.junit.After;
-040import org.junit.Before;
-041import org.junit.ClassRule;
-042import org.junit.Ignore;
-043import org.junit.Test;
-044import 
org.junit.experimental.categories.Category;
-045import org.slf4j.Logger;
-046import org.slf4j.LoggerFactory;
-047
-048@Category({MasterTests.class, 
LargeTests.class})
-049public class TestStressWALProcedureStore 
{
-050
-051  @ClassRule
-052  public static final HBaseClassTestRule 
CLASS_RULE =
-053  
HBaseClassTestRule.forClass(TestStressWALProcedureStore.class);
+021import static 
org.junit.Assert.assertTrue;
+022
+023import java.io.IOException;
+024import java.util.Random;
+025import 
java.util.concurrent.atomic.AtomicLong;
+026import 
org.apache.hadoop.conf.Configuration;
+027import org.apache.hadoop.fs.FileSystem;
+028import org.apache.hadoop.fs.Path;
+029import 
org.apache.hadoop.hbase.HBaseClassTestRule;
+030import 
org.apache.hadoop.hbase.HBaseCommonTestingUtility;
+031import 
org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
+032import 
org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility.LoadCounter;
+033import 
org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility.TestProcedure;
+034import 
org.apache.hadoop.hbase.procedure2.util.StringUtils;
+035import 
org.apache.hadoop.hbase.testclassification.LargeTests;
+036import 
org.apache.hadoop.hbase.testclassification.MasterTests;
+037import org.junit.After;
+038import org.junit.Before;
+039import org.junit.ClassRule;
+040import org.junit.Ignore;
+041import org.junit.Test;
+042import 
org.junit.experimental.categories.Category;
+043import org.slf4j.Logger;
+044import org.slf4j.LoggerFactory;
+045
+046@Category({MasterTests.class, 
LargeTests.class})
+047public class TestStressWALProcedureStore 
{
+048
+049  @ClassRule
+050  public static final HBaseClassTestRule 
CLASS_RULE =
+051  
HBaseClassTestRule.forClass(TestStressWALProcedureStore.class);
+052
+053  private static final Logger LOG = 
LoggerFactory.getLogger(TestWALProcedureStore.class);
 054
-055  private static final Logger LOG = 
LoggerFactory.getLogger(TestWALProcedureStore.class);
+055  private static final int 
PROCEDURE_STORE_SLOTS = 8;
 056
-057  private static final int 
PROCEDURE_STORE_SLOTS = 8;
+057  private WALProcedureStore procStore;
 058
-059  private WALProcedureStore procStore;
-060
-061  private HBaseCommonTestingUtility 
htu;
-062  private FileSystem fs;
-063  private Path testDir;
-064  private Path logDir;
-065
-066  private void 
setupConfiguration(Configuration conf) {
-067
conf.setBoolean(WALProcedureStore.USE_HSYNC_CONF_KEY, false);
-068
conf.setInt(WALProcedureStore.PERIODIC_ROLL_CONF_KEY, 5000);
-069
conf.setInt(WALProcedureStore.ROLL_THRESHOLD_CONF_KEY, 128 * 1024);
-070  }
-071
-072  @Before
-073  public void setUp() throws IOException 
{
-074htu = new 
HBaseCommonTestingUtility();
-075
setupConfiguration(htu.getConfiguration());
-076
-077testDir = htu.getDataTestDir();
-078fs = 
testDir.getFileSystem(htu.getConfiguration());
-079assertTrue(testDir.depth() > 1);
-080
-081logDir = new Path(testDir, 
"proc-logs");
-082procStore = 
ProcedureTestingUtility.createWalStore(htu.getConfiguratio

[13/51] [partial] hbase-site git commit: Published site at 5fbb227deb365fe812d433fe39b85ac4b0ddee20.

2018-10-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c9ebe686/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.html 
b/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.html
index 94b8afc..bcc9006 100644
--- a/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.html
+++ b/devapidocs/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -299,13 +299,13 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 private https://docs.oracle.com/javase/8/docs/api/java/lang/ThreadGroup.html?is-external=true";
 title="class or interface in java.lang">ThreadGroup
 threadGroup
-Created in the #start(int, boolean) 
method.
+Created in the init(int,
 boolean) method.
 
 
 
 private TimeoutExecutorThread
 timeoutExecutor
-Created in the #start(int, boolean) 
method.
+Created in the init(int,
 boolean) method.
 
 
 
@@ -319,7 +319,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 private https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CopyOnWriteArrayList.html?is-external=true";
 title="class or interface in 
java.util.concurrent">CopyOnWriteArrayList
 workerThreads
-Created in the #start(int, boolean) 
method.
+Created in the init(int,
 boolean)  method.
 
 
 
@@ -387,17 +387,19 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">ListBoolean>
-bypassProcedure(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">ListLong> pids,
+bypassProcedure(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">ListLong> pids,
long lockWait,
-   boolean force)
+   boolean force,
+   boolean recursive)
 Bypass a procedure.
 
 
 
 (package private) boolean
-bypassProcedure(long pid,
+bypassProcedure(long pid,
long lockWait,
-   boolean force) 
+   boolean override,
+   boolean recursive) 
 
 
 private void
@@ -559,106 +561,110 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 
+private boolean
+isRootFinished(Procedure proc) 
+
+
 boolean
 isRunning() 
 
-
+
 boolean
 isStarted(long procId)
 Return true if the procedure is started.
 
 
-
+
 void
 join() 
 
-
+
 private void
 kill(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String msg) 
 
-
+
 private void
 load(boolean abortOnCorruption) 
 
-
+
 private void
 loadProcedures(ProcedureStore.ProcedureIterator procIter,
   boolean abortOnCorruption) 
 
-
+
 private long
 nextProcId() 
 
-
+
 private Procedure

[13/51] [partial] hbase-site git commit: Published site at 821e4d7de2d576189f4288d1c2acf9e9a9471f5c.

2018-10-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/323b17d9/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureExecution.TestSequentialProcedure.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureExecution.TestSequentialProcedure.html
 
b/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureExecution.TestSequentialProcedure.html
index 3f34fc3..17b6926 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureExecution.TestSequentialProcedure.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/procedure2/TestProcedureExecution.TestSequentialProcedure.html
@@ -127,7 +127,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public static class TestProcedureExecution.TestSequentialProcedure
+public static class TestProcedureExecution.TestSequentialProcedure
 extends org.apache.hadoop.hbase.procedure2.SequentialProcedureVoid>
 
 
@@ -175,7 +175,7 @@ extends 
org.apache.hadoop.hbase.procedure2.SequentialProcedureVoid>[]
 subProcs 
 
 
@@ -232,7 +232,7 @@ extends 
org.apache.hadoop.hbase.procedure2.SequentialProcedureVoid env) 
 
 
-protected 
org.apache.hadoop.hbase.procedure2.Procedure[]
+protected 
org.apache.hadoop.hbase.procedure2.ProcedureVoid>[]
 execute(https://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true";
 title="class or interface in java.lang">Void env) 
 
 
@@ -281,7 +281,7 @@ extends 
org.apache.hadoop.hbase.procedure2.SequentialProcedure<
 
 subProcs
-private final org.apache.hadoop.hbase.procedure2.Procedure[] subProcs
+private final org.apache.hadoop.hbase.procedure2.ProcedureVoid>[] subProcs
 
 
 
@@ -290,7 +290,7 @@ extends 
org.apache.hadoop.hbase.procedure2.SequentialProcedure<
 
 state
-private final https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">ListString> state
+private final https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">ListString> state
 
 
 
@@ -299,7 +299,7 @@ extends 
org.apache.hadoop.hbase.procedure2.SequentialProcedure<
 
 failure
-private final https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true";
 title="class or interface in java.lang">Exception failure
+private final https://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true";
 title="class or interface in java.lang">Exception failure
 
 
 
@@ -308,7 +308,7 @@ extends 
org.apache.hadoop.hbase.procedure2.SequentialProcedure<
 
 name
-private final https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String name
+private final https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String name
 
 
 
@@ -325,7 +325,7 @@ extends 
org.apache.hadoop.hbase.procedure2.SequentialProcedure<
 
 TestSequentialProcedure
-public TestSequentialProcedure()
+public TestSequentialProcedure()
 
 
 
@@ -334,7 +334,7 @@ extends 
org.apache.hadoop.hbase.procedure2.SequentialProcedure<
 
 TestSequentialProcedure
-public TestSequentialProcedure(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String name,
+public TestSequentialProcedure(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String name,
https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">ListString> state,

org.apache.hadoop.hbase.procedure2.Procedure... subProcs)
 
@@ -345,7 +345,7 @@ extends 
org.apache.h

[13/51] [partial] hbase-site git commit: Published site at fa5fa6ecdd071b72b58971058ff3ab9d28c3e709.

2018-10-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d1341859/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.WalProps.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.WalProps.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.WalProps.html
index f1a6dfa..e0f7285 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.WalProps.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.WalProps.html
@@ -416,735 +416,730 @@
 408this.blocksize = 
WALUtil.getWALBlockSize(this.conf, this.fs, this.walDir);
 409float multiplier = 
conf.getFloat("hbase.regionserver.logroll.multiplier", 0.5f);
 410this.logrollsize = 
(long)(this.blocksize * multiplier);
-411
-412boolean maxLogsDefined = 
conf.get("hbase.regionserver.maxlogs") != null;
-413if (maxLogsDefined) {
-414  
LOG.warn("'hbase.regionserver.maxlogs' was deprecated.");
-415}
-416this.maxLogs = 
conf.getInt("hbase.regionserver.maxlogs",
-417  Math.max(32, 
calculateMaxLogFiles(conf, logrollsize)));
-418
-419LOG.info("WAL configuration: 
blocksize=" + StringUtils.byteDesc(blocksize) + ", rollsize=" +
-420  
StringUtils.byteDesc(this.logrollsize) + ", prefix=" + this.walFilePrefix + ", 
suffix=" +
-421  walFileSuffix + ", logDir=" + 
this.walDir + ", archiveDir=" + this.walArchiveDir);
-422this.slowSyncNs = 
TimeUnit.MILLISECONDS
-423
.toNanos(conf.getInt("hbase.regionserver.hlog.slowsync.ms", 
DEFAULT_SLOW_SYNC_TIME_MS));
-424this.walSyncTimeoutNs = 
TimeUnit.MILLISECONDS
-425
.toNanos(conf.getLong("hbase.regionserver.hlog.sync.timeout", 
DEFAULT_WAL_SYNC_TIMEOUT_MS));
-426this.cachedSyncFutures = new 
ThreadLocal() {
-427  @Override
-428  protected SyncFuture initialValue() 
{
-429return new SyncFuture();
-430  }
-431};
-432this.implClassName = 
getClass().getSimpleName();
-433  }
-434
-435  /**
-436   * Used to initialize the WAL. Usually 
just call rollWriter to create the first log writer.
-437   */
-438  public void init() throws IOException 
{
-439rollWriter();
+411this.maxLogs = 
conf.getInt("hbase.regionserver.maxlogs",
+412  Math.max(32, 
calculateMaxLogFiles(conf, logrollsize)));
+413
+414LOG.info("WAL configuration: 
blocksize=" + StringUtils.byteDesc(blocksize) + ", rollsize=" +
+415  
StringUtils.byteDesc(this.logrollsize) + ", prefix=" + this.walFilePrefix + ", 
suffix=" +
+416  walFileSuffix + ", logDir=" + 
this.walDir + ", archiveDir=" + this.walArchiveDir);
+417this.slowSyncNs = 
TimeUnit.MILLISECONDS
+418
.toNanos(conf.getInt("hbase.regionserver.hlog.slowsync.ms", 
DEFAULT_SLOW_SYNC_TIME_MS));
+419this.walSyncTimeoutNs = 
TimeUnit.MILLISECONDS
+420
.toNanos(conf.getLong("hbase.regionserver.hlog.sync.timeout", 
DEFAULT_WAL_SYNC_TIMEOUT_MS));
+421this.cachedSyncFutures = new 
ThreadLocal() {
+422  @Override
+423  protected SyncFuture initialValue() 
{
+424return new SyncFuture();
+425  }
+426};
+427this.implClassName = 
getClass().getSimpleName();
+428  }
+429
+430  /**
+431   * Used to initialize the WAL. Usually 
just call rollWriter to create the first log writer.
+432   */
+433  public void init() throws IOException 
{
+434rollWriter();
+435  }
+436
+437  @Override
+438  public void 
registerWALActionsListener(WALActionsListener listener) {
+439this.listeners.add(listener);
 440  }
 441
 442  @Override
-443  public void 
registerWALActionsListener(WALActionsListener listener) {
-444this.listeners.add(listener);
+443  public boolean 
unregisterWALActionsListener(WALActionsListener listener) {
+444return 
this.listeners.remove(listener);
 445  }
 446
 447  @Override
-448  public boolean 
unregisterWALActionsListener(WALActionsListener listener) {
-449return 
this.listeners.remove(listener);
+448  public WALCoprocessorHost 
getCoprocessorHost() {
+449return coprocessorHost;
 450  }
 451
 452  @Override
-453  public WALCoprocessorHost 
getCoprocessorHost() {
-454return coprocessorHost;
+453  public Long startCacheFlush(byte[] 
encodedRegionName, Set families) {
+454return 
this.sequenceIdAccounting.startCacheFlush(encodedRegionName, families);
 455  }
 456
 457  @Override
-458  public Long startCacheFlush(byte[] 
encodedRegionName, Set families) {
-459return 
this.sequenceIdAccounting.startCacheFlush(encodedRegionName, families);
+458  public Long startCacheFlush(byte[] 
encodedRegionName, Map familyToSeq) {
+459return 
this.sequenceIdAccounting.startCacheFlush(encodedRegionName, familyToSeq);
 460  }
 461
 462  @Override
-463  public Long startCacheFlush(byte[] 
encodedRegionName, Map familyToSeq) {
-464return 
this.se

[13/51] [partial] hbase-site git commit: Published site at 6bc7089f9e0793efc9bdd46a84f5ccd9bc4579ad.

2018-09-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/419d0338/testdevapidocs/index-all.html
--
diff --git a/testdevapidocs/index-all.html b/testdevapidocs/index-all.html
index 83c6e82..e43d3af 100644
--- a/testdevapidocs/index-all.html
+++ b/testdevapidocs/index-all.html
@@ -158,6 +158,8 @@
  
 abort(TestProcedureReplayOrder.TestProcedureEnv)
 - Method in class org.apache.hadoop.hbase.procedure2.TestProcedureReplayOrder.TestProcedure
  
+abort(TestProcedureSkipPersistence.ProcEnv)
 - Method in class org.apache.hadoop.hbase.procedure2.TestProcedureSkipPersistence.TestProcedure
+ 
 abort(TestProcedureSuspended.TestProcEnv)
 - Method in class org.apache.hadoop.hbase.procedure2.TestProcedureSuspended.TestLockProcedure
  
 abort(TestProcedureToString.BasicProcedureEnv)
 - Method in class org.apache.hadoop.hbase.procedure2.TestProcedureToString.BasicProcedure
@@ -540,6 +542,8 @@
  
 addBarrier(RegionInfo,
 long...) - Method in class 
org.apache.hadoop.hbase.master.cleaner.TestReplicationBarrierCleaner
  
+addChildProcedure(T...)
 - Method in class org.apache.hadoop.hbase.master.replication.TestModifyPeerProcedureRetryBackoff.TestModifyPeerProcedure
+ 
 addColumn(TableName,
 ColumnFamilyDescriptor, long, long) - Method in class 
org.apache.hadoop.hbase.master.MockNoopMasterServices
  
 AddColumnAction - Class in org.apache.hadoop.hbase.chaos.actions
@@ -1475,6 +1479,8 @@
  
 assertAuthMethodWrite(DataOutputBuffer,
 AuthMethod) - Method in class org.apache.hadoop.hbase.security.TestHBaseSaslRpcClient
  
+assertBackoffIncrease()
 - Method in class org.apache.hadoop.hbase.master.replication.TestModifyPeerProcedureRetryBackoff
+ 
 assertBuffersEqual(ByteBuff,
 ByteBuff, Compression.Algorithm, DataBlockEncoding, boolean) - 
Static method in class org.apache.hadoop.hbase.io.hfile.TestHFileBlock
  
 assertByteEquals(byte[],
 byte[]) - Static method in class org.apache.hadoop.hbase.HBaseTestCase
@@ -5205,6 +5211,8 @@
  
 CLASS_RULE
 - Static variable in class org.apache.hadoop.hbase.master.procedure.TestWALProcedureStoreOnHDFS
  
+CLASS_RULE
 - Static variable in class org.apache.hadoop.hbase.master.replication.TestModifyPeerProcedureRetryBackoff
+ 
 CLASS_RULE
 - Static variable in class org.apache.hadoop.hbase.master.snapshot.TestSnapshotFileCache
  
 CLASS_RULE
 - Static variable in class org.apache.hadoop.hbase.master.snapshot.TestSnapshotHFileCleaner
@@ -5415,6 +5423,8 @@
  
 CLASS_RULE
 - Static variable in class org.apache.hadoop.hbase.procedure2.TestProcedureSchedulerConcurrency
  
+CLASS_RULE
 - Static variable in class org.apache.hadoop.hbase.procedure2.TestProcedureSkipPersistence
+ 
 CLASS_RULE
 - Static variable in class org.apache.hadoop.hbase.procedure2.TestProcedureSuspended
  
 CLASS_RULE
 - Static variable in class org.apache.hadoop.hbase.procedure2.TestProcedureToString
@@ -11989,6 +11999,8 @@
  
 deserializeStateData(ProcedureStateSerializer)
 - Method in class org.apache.hadoop.hbase.procedure2.TestProcedureReplayOrder.TestProcedure
  
+deserializeStateData(ProcedureStateSerializer)
 - Method in class org.apache.hadoop.hbase.procedure2.TestProcedureSkipPersistence.TestProcedure
+ 
 deserializeStateData(ProcedureStateSerializer)
 - Method in class org.apache.hadoop.hbase.procedure2.TestProcedureSuspended.TestLockProcedure
  
 deserializeStateData(ProcedureStateSerializer)
 - Method in class org.apache.hadoop.hbase.procedure2.TestProcedureToString.BasicProcedure
@@ -12889,10 +12901,14 @@
 
 enabledTables
 - Variable in class org.apache.hadoop.hbase.IntegrationTestDDLMasterFailover
  
+enablePeer(MasterProcedureEnv)
 - Method in class org.apache.hadoop.hbase.master.replication.TestModifyPeerProcedureRetryBackoff.TestModifyPeerProcedure
+ 
 enablePeer(String,
 int) - Method in class org.apache.hadoop.hbase.replication.TestMasterReplication
  
 enablePeerAndWaitUntilReplicationDone(int)
 - Method in class org.apache.hadoop.hbase.replication.SerialReplicationTestBase
  
+enablePeerBeforeFinish()
 - Method in class org.apache.hadoop.hbase.master.replication.TestModifyPeerProcedureRetryBackoff.TestModifyPeerProcedure
+ 
 enablePeerCalled
 - Variable in class org.apache.hadoop.hbase.replication.TestReplicationProcedureRetry.MockHMaster
  
 enableReplicationByModification(TableName,
 boolean, int, int, int) - Static method in class 
org.apache.hadoop.hbase.regionserver.TestRegionReplicasWithModifyTable
@@ -13410,6 +13426,8 @@
  
 execute(TestProcedureReplayOrder.TestProcedureEnv)
 - Method in class org.apache.hadoop.hbase.procedure2.TestProcedureReplayOrder.TestTwoStepProcedure
  
+execute(TestProcedureSkipPersistence.ProcEnv)
 - Method in class org.apache.hadoop.hbase.procedure2.TestProcedureSkipPersistence.TestProcedure
+ 
 execute(TestProcedureSuspended.TestProcEnv)
 - Method in class org.apache.hadoop.hbase.procedure2.TestProcedureSuspended.TestLockProcedure
  
 execute(TestProcedureToString.BasicProcedureEnv)
 - Method in class org.apac

[13/51] [partial] hbase-site git commit: Published site at d7e08317d2f214e4cca7b67578aba0ed7a567d54.

2018-09-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/37cf49a6/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.html
index c9ef545..85e5dcc 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.html
@@ -42,160 +42,163 @@
 034import 
org.apache.hadoop.hbase.master.assignment.RegionStateNode;
 035import 
org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
 036import 
org.apache.hadoop.hbase.security.User;
-037import 
org.apache.yetus.audience.InterfaceAudience;
-038
-039/**
-040 * Base class for all the Table 
procedures that want to use a StateMachineProcedure.
-041 * It provides helpers like basic 
locking, sync latch, and toStringClassDetails().
-042 */
-043@InterfaceAudience.Private
-044public abstract class 
AbstractStateMachineTableProcedure
-045extends 
StateMachineProcedure
-046implements TableProcedureInterface 
{
-047
-048  // used for compatibility with old 
clients
-049  private final ProcedurePrepareLatch 
syncLatch;
-050
-051  private User user;
-052
-053  protected 
AbstractStateMachineTableProcedure() {
-054// Required by the Procedure 
framework to create the procedure on replay
-055syncLatch = null;
-056  }
-057
-058  protected 
AbstractStateMachineTableProcedure(final MasterProcedureEnv env) {
-059this(env, null);
-060  }
-061
-062  /**
-063   * @param env Uses this to set 
Procedure Owner at least.
-064   */
-065  protected 
AbstractStateMachineTableProcedure(final MasterProcedureEnv env,
-066  final ProcedurePrepareLatch latch) 
{
-067if (env != null) {
-068  this.user = env.getRequestUser();
-069  this.setOwner(user);
-070}
-071// used for compatibility with 
clients without procedures
-072// they need a sync 
TableExistsException, TableNotFoundException, TableNotDisabledException, ...
-073this.syncLatch = latch;
-074  }
-075
-076  @Override
-077  public abstract TableName 
getTableName();
-078
-079  @Override
-080  public abstract TableOperationType 
getTableOperationType();
-081
-082  @Override
-083  public void toStringClassDetails(final 
StringBuilder sb) {
-084
sb.append(getClass().getSimpleName());
-085sb.append(" table=");
-086sb.append(getTableName());
-087  }
-088
-089  @Override
-090  protected boolean 
waitInitialized(MasterProcedureEnv env) {
-091return env.waitInitialized(this);
-092  }
-093
-094  @Override
-095  protected LockState acquireLock(final 
MasterProcedureEnv env) {
-096if 
(env.getProcedureScheduler().waitTableExclusiveLock(this, getTableName())) {
-097  return LockState.LOCK_EVENT_WAIT;
-098}
-099return LockState.LOCK_ACQUIRED;
-100  }
-101
-102  @Override
-103  protected void releaseLock(final 
MasterProcedureEnv env) {
-104
env.getProcedureScheduler().wakeTableExclusiveLock(this, getTableName());
-105  }
-106
-107  protected User getUser() {
-108return user;
-109  }
-110
-111  protected void setUser(final User user) 
{
-112this.user = user;
-113  }
-114
-115  protected void releaseSyncLatch() {
-116
ProcedurePrepareLatch.releaseLatch(syncLatch, this);
-117  }
-118
-119  /**
-120   * Check whether a table is modifiable 
- exists and either offline or online with config set
-121   * @param env MasterProcedureEnv
-122   * @throws IOException
-123   */
-124  protected void 
checkTableModifiable(final MasterProcedureEnv env) throws IOException {
-125// Checks whether the table exists
-126if 
(!MetaTableAccessor.tableExists(env.getMasterServices().getConnection(), 
getTableName())) {
-127  throw new 
TableNotFoundException(getTableName());
-128}
-129  }
-130
-131  protected final Path 
getRegionDir(MasterProcedureEnv env, RegionInfo region) throws IOException {
-132return 
env.getMasterServices().getMasterFileSystem().getRegionDir(region);
-133  }
-134
-135  /**
-136   * Check that cluster is up and master 
is running. Check table is modifiable.
-137   * If enabled, 
check table is enabled else check it is disabled.
-138   * Call in Procedure constructor so can 
pass any exception to caller.
-139   * @param enabled If true, check table 
is enabled and throw exception if not. If false, do the
-140   *inverse. If null, do 
no table checks.
-141   */
-142  protected void 
preflightChecks(MasterProcedureEnv env, Boolean enabled) throws 
HBaseIOException {
-143MasterServices master = 
env.getMasterServices();
-144if (!master.isClusterUp()) {
-145  throw new HBaseIOException("Cl

[13/51] [partial] hbase-site git commit: Published site at 8eaaa63114a64bcaeaf0ed9bdd88615ee22255c1.

2018-09-25 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f6f9d4f3/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerCloseCallBack.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerCloseCallBack.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerCloseCallBack.html
index 25f458d..20e3eaa 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerCloseCallBack.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerCloseCallBack.html
@@ -28,3711 +28,3756 @@
 020import java.io.FileNotFoundException;
 021import java.io.IOException;
 022import java.io.InterruptedIOException;
-023import 
java.lang.reflect.InvocationTargetException;
-024import java.net.BindException;
-025import java.net.InetSocketAddress;
-026import java.net.UnknownHostException;
-027import java.nio.ByteBuffer;
-028import java.util.ArrayList;
-029import java.util.Arrays;
-030import java.util.Collections;
-031import java.util.HashMap;
-032import java.util.Iterator;
-033import java.util.List;
-034import java.util.Map;
-035import java.util.Map.Entry;
-036import java.util.NavigableMap;
-037import java.util.Set;
-038import java.util.TreeSet;
-039import 
java.util.concurrent.ConcurrentHashMap;
-040import 
java.util.concurrent.ConcurrentMap;
-041import java.util.concurrent.TimeUnit;
-042import 
java.util.concurrent.atomic.AtomicBoolean;
-043import 
java.util.concurrent.atomic.AtomicLong;
-044import 
java.util.concurrent.atomic.LongAdder;
-045import 
org.apache.commons.lang3.mutable.MutableObject;
-046import 
org.apache.hadoop.conf.Configuration;
-047import org.apache.hadoop.fs.Path;
-048import 
org.apache.hadoop.hbase.ByteBufferExtendedCell;
-049import 
org.apache.hadoop.hbase.CacheEvictionStats;
-050import 
org.apache.hadoop.hbase.CacheEvictionStatsBuilder;
-051import org.apache.hadoop.hbase.Cell;
-052import 
org.apache.hadoop.hbase.CellScannable;
-053import 
org.apache.hadoop.hbase.CellScanner;
-054import 
org.apache.hadoop.hbase.CellUtil;
-055import 
org.apache.hadoop.hbase.CompareOperator;
-056import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-057import 
org.apache.hadoop.hbase.DroppedSnapshotException;
-058import 
org.apache.hadoop.hbase.HBaseIOException;
-059import 
org.apache.hadoop.hbase.HConstants;
-060import 
org.apache.hadoop.hbase.MultiActionResultTooLarge;
-061import 
org.apache.hadoop.hbase.NotServingRegionException;
-062import 
org.apache.hadoop.hbase.PrivateCellUtil;
-063import 
org.apache.hadoop.hbase.RegionTooBusyException;
-064import org.apache.hadoop.hbase.Server;
-065import 
org.apache.hadoop.hbase.ServerName;
-066import 
org.apache.hadoop.hbase.TableName;
-067import 
org.apache.hadoop.hbase.UnknownScannerException;
-068import 
org.apache.hadoop.hbase.client.Append;
-069import 
org.apache.hadoop.hbase.client.ConnectionUtils;
-070import 
org.apache.hadoop.hbase.client.Delete;
-071import 
org.apache.hadoop.hbase.client.Durability;
-072import 
org.apache.hadoop.hbase.client.Get;
-073import 
org.apache.hadoop.hbase.client.Increment;
-074import 
org.apache.hadoop.hbase.client.Mutation;
-075import 
org.apache.hadoop.hbase.client.Put;
-076import 
org.apache.hadoop.hbase.client.RegionInfo;
-077import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
-078import 
org.apache.hadoop.hbase.client.Result;
-079import 
org.apache.hadoop.hbase.client.Row;
-080import 
org.apache.hadoop.hbase.client.RowMutations;
-081import 
org.apache.hadoop.hbase.client.Scan;
-082import 
org.apache.hadoop.hbase.client.TableDescriptor;
-083import 
org.apache.hadoop.hbase.client.VersionInfoUtil;
-084import 
org.apache.hadoop.hbase.conf.ConfigurationObserver;
-085import 
org.apache.hadoop.hbase.exceptions.FailedSanityCheckException;
-086import 
org.apache.hadoop.hbase.exceptions.OutOfOrderScannerNextException;
-087import 
org.apache.hadoop.hbase.exceptions.ScannerResetException;
-088import 
org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
-089import 
org.apache.hadoop.hbase.filter.ByteArrayComparable;
-090import 
org.apache.hadoop.hbase.io.TimeRange;
-091import 
org.apache.hadoop.hbase.ipc.HBaseRPCErrorHandler;
-092import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-093import 
org.apache.hadoop.hbase.ipc.PriorityFunction;
-094import 
org.apache.hadoop.hbase.ipc.QosPriority;
-095import 
org.apache.hadoop.hbase.ipc.RpcCallContext;
-096import 
org.apache.hadoop.hbase.ipc.RpcCallback;
-097import 
org.apache.hadoop.hbase.ipc.RpcScheduler;
-098import 
org.apache.hadoop.hbase.ipc.RpcServer;
-099import 
org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface;
-100import 
org.apache.hadoop.hbase.ipc.RpcServerFactory;
-101import 
org.apache.hadoop.hbase.ipc.RpcServerInterface;
-102import 
org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
-103import 
org.apache.hadoop.

[13/51] [partial] hbase-site git commit: Published site at cd161d976ef47b84e904f2d54bac65d2f3417c2a.

2018-09-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fa1bebf8/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
index df4d2d2..20442d4 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.html
@@ -552,1331 +552,1334 @@
 544}
 545  }
 546
-547  public void assign(RegionInfo 
regionInfo, ServerName sn) throws IOException {
-548// TODO: should we use 
getRegionStateNode?
-549RegionStateNode regionNode = 
regionStates.getOrCreateRegionStateNode(regionInfo);
-550TransitRegionStateProcedure proc;
-551regionNode.lock();
-552try {
-553  preTransitCheck(regionNode, 
STATES_EXPECTED_ON_ASSIGN);
-554  proc = 
TransitRegionStateProcedure.assign(getProcedureEnvironment(), regionInfo, 
sn);
-555  regionNode.setProcedure(proc);
-556} finally {
-557  regionNode.unlock();
-558}
-559
ProcedureSyncWait.submitAndWaitProcedure(master.getMasterProcedureExecutor(), 
proc);
-560  }
-561
-562  public void assign(RegionInfo 
regionInfo) throws IOException {
-563assign(regionInfo, null);
-564  }
-565
-566  public void unassign(RegionInfo 
regionInfo) throws IOException {
-567RegionStateNode regionNode = 
regionStates.getRegionStateNode(regionInfo);
-568if (regionNode == null) {
-569  throw new 
UnknownRegionException("No RegionState found for " + 
regionInfo.getEncodedName());
-570}
-571TransitRegionStateProcedure proc;
-572regionNode.lock();
-573try {
-574  preTransitCheck(regionNode, 
STATES_EXPECTED_ON_UNASSIGN_OR_MOVE);
-575  proc = 
TransitRegionStateProcedure.unassign(getProcedureEnvironment(), regionInfo);
-576  regionNode.setProcedure(proc);
-577} finally {
-578  regionNode.unlock();
-579}
-580
ProcedureSyncWait.submitAndWaitProcedure(master.getMasterProcedureExecutor(), 
proc);
-581  }
-582
-583  private TransitRegionStateProcedure 
createMoveRegionProcedure(RegionInfo regionInfo,
-584  ServerName targetServer) throws 
HBaseIOException {
-585RegionStateNode regionNode = 
this.regionStates.getRegionStateNode(regionInfo);
-586if (regionNode == null) {
-587  throw new 
UnknownRegionException("No RegionState found for " + 
regionInfo.getEncodedName());
-588}
-589TransitRegionStateProcedure proc;
-590regionNode.lock();
-591try {
-592  preTransitCheck(regionNode, 
STATES_EXPECTED_ON_UNASSIGN_OR_MOVE);
-593  regionNode.checkOnline();
-594  proc = 
TransitRegionStateProcedure.move(getProcedureEnvironment(), regionInfo, 
targetServer);
-595  regionNode.setProcedure(proc);
-596} finally {
-597  regionNode.unlock();
-598}
-599return proc;
-600  }
-601
-602  public void move(RegionInfo regionInfo) 
throws IOException {
-603TransitRegionStateProcedure proc = 
createMoveRegionProcedure(regionInfo, null);
-604
ProcedureSyncWait.submitAndWaitProcedure(master.getMasterProcedureExecutor(), 
proc);
-605  }
-606
-607  public Future 
moveAsync(RegionPlan regionPlan) throws HBaseIOException {
-608TransitRegionStateProcedure proc =
-609  
createMoveRegionProcedure(regionPlan.getRegionInfo(), 
regionPlan.getDestination());
-610return 
ProcedureSyncWait.submitProcedure(master.getMasterProcedureExecutor(), proc);
-611  }
-612
-613  // 

-614  //  RegionTransition procedures 
helpers
-615  // 

-616
-617  /**
-618   * Create round-robin assigns. Use on 
table creation to distribute out regions across cluster.
-619   * @return AssignProcedures made out of 
the passed in hris and a call to the balancer
-620   * to populate the assigns with 
targets chosen using round-robin (default balancer
-621   * scheme). If at assign-time, 
the target chosen is no longer up, thats fine, the
-622   * AssignProcedure will ask the 
balancer for a new target, and so on.
-623   */
-624  public TransitRegionStateProcedure[] 
createRoundRobinAssignProcedures(List hris,
-625  List 
serversToExclude) {
-626if (hris.isEmpty()) {
-627  return new 
TransitRegionStateProcedure[0];
-628}
-629
-630if (serversToExclude != null
-631&& 
this.master.getServerManager().getOnlineServersList().size() == 1) {
-632  LOG.debug("Only one region server 
found and hence going ahead with the assignment");
-633  serversToExclude = null;
-634}
-635try {
-636  // Ask the balancer to assig

[13/51] [partial] hbase-site git commit: Published site at c6a65ba63fce85ac7c4b62b96ef2bbe6c35d2f00.

2018-09-04 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/293abb17/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.Action.Type.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.Action.Type.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.Action.Type.html
index c372545..af3b364 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.Action.Type.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.Cluster.Action.Type.html
@@ -1279,322 +1279,339 @@
 1271List 
lastFewRegions = new ArrayList<>();
 1272// assign the remaining by going 
through the list and try to assign to servers one-by-one
 1273int serverIdx = 
RANDOM.nextInt(numServers);
-1274for (RegionInfo region : 
unassignedRegions) {
+1274OUTER : for (RegionInfo region : 
unassignedRegions) {
 1275  boolean assigned = false;
-1276  for (int j = 0; j < numServers; 
j++) { // try all servers one by one
+1276  INNER : for (int j = 0; j < 
numServers; j++) { // try all servers one by one
 1277ServerName serverName = 
servers.get((j + serverIdx) % numServers);
 1278if 
(!cluster.wouldLowerAvailability(region, serverName)) {
 1279  List 
serverRegions =
 1280  
assignments.computeIfAbsent(serverName, k -> new ArrayList<>());
-1281  serverRegions.add(region);
-1282  cluster.doAssignRegion(region, 
serverName);
-1283  serverIdx = (j + serverIdx + 
1) % numServers; //remain from next server
-1284  assigned = true;
-1285  break;
-1286}
-1287  }
-1288  if (!assigned) {
-1289lastFewRegions.add(region);
-1290  }
-1291}
-1292// just sprinkle the rest of the 
regions on random regionservers. The balanceCluster will
-1293// make it optimal later. we can end 
up with this if numReplicas > numServers.
-1294for (RegionInfo region : 
lastFewRegions) {
-1295  int i = 
RANDOM.nextInt(numServers);
-1296  ServerName server = 
servers.get(i);
-1297  List 
serverRegions = assignments.computeIfAbsent(server, k -> new 
ArrayList<>());
-1298  serverRegions.add(region);
-1299  cluster.doAssignRegion(region, 
server);
-1300}
-1301return assignments;
-1302  }
-1303
-1304  protected Cluster 
createCluster(List servers, Collection 
regions) {
-1305// Get the snapshot of the current 
assignments for the regions in question, and then create
-1306// a cluster out of it. Note that we 
might have replicas already assigned to some servers
-1307// earlier. So we want to get the 
snapshot to see those assignments, but this will only contain
-1308// replicas of the regions that are 
passed (for performance).
-1309Map> clusterState = 
getRegionAssignmentsByServer(regions);
-1310
-1311for (ServerName server : servers) 
{
-1312  if 
(!clusterState.containsKey(server)) {
-1313clusterState.put(server, 
EMPTY_REGION_LIST);
-1314  }
-1315}
-1316return new Cluster(regions, 
clusterState, null, this.regionFinder,
-1317rackManager);
-1318  }
-1319
-1320  private List 
findIdleServers(List servers) {
-1321return 
this.services.getServerManager()
-1322
.getOnlineServersListWithPredicator(servers, IDLE_SERVER_PREDICATOR);
-1323  }
-1324
-1325  /**
-1326   * Used to assign a single region to a 
random server.
-1327   */
-1328  @Override
-1329  public ServerName 
randomAssignment(RegionInfo regionInfo, List servers)
-1330  throws HBaseIOException {
-1331
metricsBalancer.incrMiscInvocations();
-1332if (servers != null && 
servers.contains(masterServerName)) {
-1333  if (shouldBeOnMaster(regionInfo)) 
{
-1334return masterServerName;
-1335  }
-1336  if 
(!LoadBalancer.isTablesOnMaster(getConf())) {
-1337// Guarantee we do not put any 
regions on master
-1338servers = new 
ArrayList<>(servers);
-1339
servers.remove(masterServerName);
-1340  }
-1341}
-1342
-1343int numServers = servers == null ? 0 
: servers.size();
-1344if (numServers == 0) {
-1345  LOG.warn("Wanted to retain 
assignment but no servers to assign to");
-1346  return null;
-1347}
-1348if (numServers == 1) { // Only one 
server, nothing fancy we can do here
-1349  return servers.get(0);
-1350}
-1351List idleServers = 
findIdleServers(servers);
-1352if (idleServers.size() == 1) {
-1353  return idleServers.get(0);
-1354}
-1355final List 
finalServers = idleServers.isEmpty() ?
-1356servers : idleServers;
-1357List regions = 
Lists.newArrayLi

[13/51] [partial] hbase-site git commit: Published site at 7c1fad4992a169a35b4457e6f4afcb30d04406e9.

2018-08-31 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/74f60271/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.CompletedProcedureCleaner.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.CompletedProcedureCleaner.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.CompletedProcedureCleaner.html
index d11176a..2c14c50 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.CompletedProcedureCleaner.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.CompletedProcedureCleaner.html
@@ -982,1050 +982,1168 @@
 974  }
 975
 976  /**
-977   * Add a new root-procedure to the 
executor.
-978   * @param proc the new procedure to 
execute.
-979   * @param nonceKey the registered 
unique identifier for this operation from the client or process.
-980   * @return the procedure id, that can 
be used to monitor the operation
-981   */
-982  
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NP_NULL_ON_SOME_PATH",
-983  justification = "FindBugs is blind 
to the check-for-null")
-984  public long 
submitProcedure(Procedure proc, NonceKey nonceKey) {
-985
Preconditions.checkArgument(lastProcId.get() >= 0);
-986
-987prepareProcedure(proc);
-988
-989final Long currentProcId;
-990if (nonceKey != null) {
-991  currentProcId = 
nonceKeysToProcIdsMap.get(nonceKey);
-992  
Preconditions.checkArgument(currentProcId != null,
-993"Expected nonceKey=" + nonceKey + 
" to be reserved, use registerNonce(); proc=" + proc);
-994} else {
-995  currentProcId = nextProcId();
-996}
-997
-998// Initialize the procedure
-999proc.setNonceKey(nonceKey);
-1000
proc.setProcId(currentProcId.longValue());
-1001
-1002// Commit the transaction
-1003store.insert(proc, null);
-1004LOG.debug("Stored {}", proc);
-1005
-1006// Add the procedure to the 
executor
-1007return pushProcedure(proc);
-1008  }
-1009
-1010  /**
-1011   * Add a set of new root-procedure to 
the executor.
-1012   * @param procs the new procedures to 
execute.
-1013   */
-1014  // TODO: Do we need to take nonces 
here?
-1015  public void 
submitProcedures(Procedure[] procs) {
-1016
Preconditions.checkArgument(lastProcId.get() >= 0);
-1017if (procs == null || procs.length 
<= 0) {
-1018  return;
-1019}
-1020
-1021// Prepare procedure
-1022for (int i = 0; i < procs.length; 
++i) {
-1023  
prepareProcedure(procs[i]).setProcId(nextProcId());
-1024}
-1025
-1026// Commit the transaction
-1027store.insert(procs);
-1028if (LOG.isDebugEnabled()) {
-1029  LOG.debug("Stored " + 
Arrays.toString(procs));
-1030}
-1031
-1032// Add the procedure to the 
executor
-1033for (int i = 0; i < procs.length; 
++i) {
-1034  pushProcedure(procs[i]);
-1035}
-1036  }
-1037
-1038  private Procedure 
prepareProcedure(Procedure proc) {
-1039
Preconditions.checkArgument(proc.getState() == ProcedureState.INITIALIZING);
-1040
Preconditions.checkArgument(!proc.hasParent(), "unexpected parent", proc);
-1041if (this.checkOwnerSet) {
-1042  
Preconditions.checkArgument(proc.hasOwner(), "missing owner");
-1043}
-1044return proc;
-1045  }
-1046
-1047  private long 
pushProcedure(Procedure proc) {
-1048final long currentProcId = 
proc.getProcId();
+977   * Bypass a procedure. If the procedure 
is set to bypass, all the logic in
+978   * execute/rollback will be ignored and 
it will return success, whatever.
+979   * It is used to recover buggy stuck 
procedures, releasing the lock resources
+980   * and letting other procedures to run. 
Bypassing one procedure (and its ancestors will
+981   * be bypassed automatically) may leave 
the cluster in a middle state, e.g. region
+982   * not assigned, or some hdfs files 
left behind. After getting rid of those stuck procedures,
+983   * the operators may have to do some 
clean up on hdfs or schedule some assign procedures
+984   * to let region online. DO AT YOUR OWN 
RISK.
+985   * 

+986 * A procedure can be bypassed only if +987 * 1. The procedure is in state of RUNNABLE, WAITING, WAITING_TIMEOUT +988 * or it is a root procedure without any child. +989 * 2. No other worker thread is executing it +990 * 3. No child procedure has been submitted +991 * +992 *

+993 * If all the requirements are meet, the procedure and its ancestors will be +994 * bypassed and persisted to WAL. +995 * +996 *

+997 * If the procedure is in WAITING state, will set it to RUNNABLE add it to run queue. +998 * TODO: What about WAITING_TIMEOUT? +999 * @param id the procedure id +1000 * @param lockWait time to wait lock +1001 * @param force if force set to true, we will bypass the pro


[13/51] [partial] hbase-site git commit: Published site at 3afe9fb7e6ebfa71187cbe131558a83fae61cecd.

2018-08-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/424d7e41/testdevapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html
--
diff --git a/testdevapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html 
b/testdevapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html
index 9d66e82..310f524 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":9,"i1":10,"i2":10,"i3":10,"i4":9,"i5":9,"i6":10,"i7":10,"i8":10,"i9":9,"i10":9,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":42,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":41,"i29":41,"i30":10,"i31":10,"i32":10,"i33":42,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":9,"i40":9,"i41":9,"i42":9,"i43":9,"i44":9,"i45":9,"i46":10,"i47":9,"i48":9,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":42,"i71":42,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":9,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":9,"i100":9,"i101":10,"i102":9,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i109":10,"i110":10,
 
"i111":10,"i112":10,"i113":9,"i114":9,"i115":9,"i116":42,"i117":10,"i118":10,"i119":9,"i120":42,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":9,"i134":10,"i135":10,"i136":10,"i137":10,"i138":10,"i139":10,"i140":10,"i141":10,"i142":10,"i143":10,"i144":10,"i145":10,"i146":10,"i147":9,"i148":9,"i149":9,"i150":10,"i151":10,"i152":10,"i153":10,"i154":10,"i155":9,"i156":9,"i157":10,"i158":9,"i159":10,"i160":10,"i161":10,"i162":10,"i163":10,"i164":10,"i165":9,"i166":9,"i167":10,"i168":10,"i169":10,"i170":10,"i171":10,"i172":10,"i173":10,"i174":10,"i175":10,"i176":10,"i177":10,"i178":10,"i179":10,"i180":10,"i181":10,"i182":10,"i183":10,"i184":10,"i185":10,"i186":10,"i187":10,"i188":10,"i189":10,"i190":10,"i191":10,"i192":10,"i193":10,"i194":10,"i195":10,"i196":10,"i197":10,"i198":10,"i199":10,"i200":10,"i201":10,"i202":10,"i203":10,"i204":10,"i205":10,"i206":10,"i207":10,"i208":10,"i209":9,"i210":10,"i211":10,"
 
i212":10,"i213":10,"i214":10,"i215":10,"i216":10,"i217":10,"i218":10,"i219":10,"i220":10,"i221":10,"i222":10,"i223":10,"i224":10};
+var methods = 
{"i0":9,"i1":10,"i2":10,"i3":10,"i4":9,"i5":9,"i6":10,"i7":10,"i8":10,"i9":9,"i10":9,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":42,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":41,"i29":41,"i30":10,"i31":10,"i32":10,"i33":42,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":9,"i40":9,"i41":9,"i42":9,"i43":9,"i44":9,"i45":9,"i46":10,"i47":9,"i48":9,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":42,"i71":42,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":9,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":9,"i100":9,"i101":10,"i102":9,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i109":10,"i110":10,
 
"i111":10,"i112":10,"i113":9,"i114":9,"i115":9,"i116":42,"i117":10,"i118":10,"i119":10,"i120":9,"i121":42,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":9,"i135":10,"i136":10,"i137":10,"i138":10,"i139":10,"i140":10,"i141":10,"i142":10,"i143":10,"i144":10,"i145":10,"i146":10,"i147":10,"i148":9,"i149":9,"i150":9,"i151":10,"i152":10,"i153":10,"i154":10,"i155":10,"i156":9,"i157":9,"i158":10,"i159":9,"i160":10,"i161":10,"i162":10,"i163":10,"i164":10,"i165":10,"i166":9,"i167":9,"i168":10,"i169":10,"i170":10,"i171":10,"i172":10,"i173":10,"i174":10,"i175":10,"i176":42,"i177":10,"i178":42,"i179":42,"i180":42,"i181":42,"i182":42,"i183":42,"i184":42,"i185":42,"i186":42,"i187":42,"i188":10,"i189":10,"i190":10,"i191":10,"i192":10,"i193":10,"i194":10,"i195":42,"i196":42,"i197":42,"i198":10,"i199":10,"i200":10,"i201":10,"i202":10,"i203":10,"i204":10,"i205":10,"i206":10,"i207":10,"i208":10,"i209":10,"i210":10,"i211":10,
 
"i212":10,"i213":9,"i214":10,"i215":10,"i216":10,"i217":10,"i218":10,"i219":10,"i220":10,"i221":10,"i222":10,"i223":10,"i224":10,"i225":10,"i226":10,"i227":10,"i228":10};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete 
Methods"],32:["t

[13/51] [partial] hbase-site git commit: Published site at a452487a9b82bfd33bc10683c3f8b8ae74d58883.

2018-08-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0cf79db0/devapidocs/org/apache/hadoop/hbase/filter/PageFilter.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/filter/PageFilter.html 
b/devapidocs/org/apache/hadoop/hbase/filter/PageFilter.html
index fdd803c..f10b3da 100644
--- a/devapidocs/org/apache/hadoop/hbase/filter/PageFilter.html
+++ b/devapidocs/org/apache/hadoop/hbase/filter/PageFilter.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":9,"i2":10,"i3":10,"i4":42,"i5":10,"i6":10,"i7":10,"i8":10,"i9":9,"i10":10,"i11":10};
+var methods = 
{"i0":10,"i1":9,"i2":10,"i3":10,"i4":10,"i5":42,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":9,"i12":10,"i13":10};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete 
Methods"],32:["t6","Deprecated Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -120,7 +120,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Public
-public class PageFilter
+public class PageFilter
 extends FilterBase
 Implementation of Filter interface that limits results to a 
specific page
  size. It terminates scanning once the number of filter-passed rows is >
@@ -225,24 +225,28 @@ extends 
 
 boolean
+equals(https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object obj) 
+
+
+boolean
 filterAllRemaining()
 Filters that never filter all remaining can inherit this 
implementation that
  never stops the filter early.
 
 
-
+
 Filter.ReturnCode
 filterCell(Cell ignored)
 A way to filter based on the column family, column 
qualifier and/or the column value.
 
 
-
+
 Filter.ReturnCode
 filterKeyValue(Cell c)
 Deprecated. 
 
 
-
+
 boolean
 filterRow()
 Filters that never filter by rows based on previously 
gathered state from
@@ -250,34 +254,38 @@ extends 
 
 
-
+
 boolean
 filterRowKey(Cell cell)
 Filters a row based on the row key.
 
 
-
+
 long
 getPageSize() 
 
-
+
 boolean
 hasFilterRow()
 Fitlers that never filter by modifying the returned List of 
Cells can
  inherit this implementation that does nothing.
 
 
-
+
+int
+hashCode() 
+
+
 static PageFilter
 parseFrom(byte[] pbBytes) 
 
-
+
 byte[]
 toByteArray()
 Return length 0 byte array for Filters that don't require 
special serialization
 
 
-
+
 https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
 toString()
 Return filter's info for debugging and logging 
purpose.
@@ -303,7 +311,7 @@ extends 
 
 Methods inherited from class java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
-https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--";
 title="class or interface in java.lang">clone, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-";
 title="class or interface in java.lang">equals, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--";
 title="class or interface in java.lang">finalize, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--";
 title="class or interface in java.lang">getClass, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--";
 title="class or interface in java.lang">hashCode, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notify--";
 title="class or interface in java.lang">notify, https://docs.oracle.com/javase/8/docs/api/ja
 va/lang/Object.html?is-external=true#notifyAll--" title="class or interface in 
java.lang">notifyAll, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait--";
 title="class or interface in java.lang">wait, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-";
 title="class or interface in java.lang">wait, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-int-";
 title="class or interface in java.lang">wait
+https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--";
 title="class or interface in java.lang">clone, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--";
 title="class or interface in java.lang">finalize, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--";
 title="class or interface in java.lang">getClass, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notify--";
 title="class or interface in java.lang">notify, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notifyAll--";
 title="class or interface in java.lang">notifyAl

[13/51] [partial] hbase-site git commit: Published site at 6a5b4f2a5c188f8eef4f2250b8b7db7dd1e750e4.

2018-08-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1ff05a18/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MutableSegment.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MutableSegment.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MutableSegment.html
index 598cf81..26b0752 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MutableSegment.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MutableSegment.html
@@ -55,88 +55,92 @@
 047  + ClassSize.REFERENCE
 048  + ClassSize.ATOMIC_BOOLEAN);
 049
-050  protected MutableSegment(CellSet 
cellSet, CellComparator comparator, MemStoreLAB memStoreLAB) {
-051super(cellSet, comparator, 
memStoreLAB, TimeRangeTracker.create(TimeRangeTracker.Type.SYNC));
-052incMemStoreSize(0, DEEP_OVERHEAD, 0); 
// update the mutable segment metadata
-053  }
-054
-055  /**
-056   * Adds the given cell into the 
segment
-057   * @param cell the cell to add
-058   * @param mslabUsed whether using 
MSLAB
-059   */
-060  public void add(Cell cell, boolean 
mslabUsed, MemStoreSizing memStoreSizing,
-061  boolean sizeAddedPreOperation) {
-062internalAdd(cell, mslabUsed, 
memStoreSizing, sizeAddedPreOperation);
-063  }
-064
-065  public void upsert(Cell cell, long 
readpoint, MemStoreSizing memStoreSizing,
-066  boolean sizeAddedPreOperation) {
-067internalAdd(cell, false, 
memStoreSizing, sizeAddedPreOperation);
+050  protected MutableSegment(CellSet 
cellSet, CellComparator comparator,
+051  MemStoreLAB memStoreLAB, 
MemStoreSizing memstoreSizing) {
+052super(cellSet, comparator, 
memStoreLAB, TimeRangeTracker.create(TimeRangeTracker.Type.SYNC));
+053incMemStoreSize(0, DEEP_OVERHEAD, 0); 
// update the mutable segment metadata
+054if (memstoreSizing != null) {
+055  memstoreSizing.incMemStoreSize(0, 
DEEP_OVERHEAD, 0);
+056}
+057  }
+058
+059  /**
+060   * Adds the given cell into the 
segment
+061   * @param cell the cell to add
+062   * @param mslabUsed whether using 
MSLAB
+063   */
+064  public void add(Cell cell, boolean 
mslabUsed, MemStoreSizing memStoreSizing,
+065  boolean sizeAddedPreOperation) {
+066internalAdd(cell, mslabUsed, 
memStoreSizing, sizeAddedPreOperation);
+067  }
 068
-069// Get the Cells for the 
row/family/qualifier regardless of timestamp.
-070// For this case we want to clean up 
any other puts
-071Cell firstCell = 
PrivateCellUtil.createFirstOnRowColTS(cell, HConstants.LATEST_TIMESTAMP);
-072SortedSet ss = 
this.tailSet(firstCell);
-073Iterator it = 
ss.iterator();
-074// versions visible to oldest 
scanner
-075int versionsVisible = 0;
-076while (it.hasNext()) {
-077  Cell cur = it.next();
-078
-079  if (cell == cur) {
-080// ignore the one just put in
-081continue;
-082  }
-083  // check that this is the row and 
column we are interested in, otherwise bail
-084  if (CellUtil.matchingRows(cell, 
cur) && CellUtil.matchingQualifier(cell, cur)) {
-085// only remove Puts that 
concurrent scanners cannot possibly see
-086if (cur.getTypeByte() == 
KeyValue.Type.Put.getCode() && cur.getSequenceId() <= readpoint) {
-087  if (versionsVisible >= 1) 
{
-088// if we get here we have 
seen at least one version visible to the oldest scanner,
-089// which means we can prove 
that no scanner will see this version
-090
-091// false means there was a 
change, so give us the size.
-092// TODO when the removed cell 
ie.'cur' having its data in MSLAB, we can not release that
-093// area. Only the Cell object 
as such going way. We need to consider cellLen to be
-094// decreased there as 0 only. 
Just keeping it as existing code now. We need to know the
-095// removed cell is from MSLAB 
or not. Will do once HBASE-16438 is in
-096int cellLen = 
getCellLength(cur);
-097long heapSize = 
heapSizeChange(cur, true);
-098long offHeapSize = 
offHeapSizeChange(cur, true);
-099incMemStoreSize(-cellLen, 
-heapSize, -offHeapSize);
-100if (memStoreSizing != null) 
{
-101  
memStoreSizing.decMemStoreSize(cellLen, heapSize, offHeapSize);
-102}
-103it.remove();
-104  } else {
-105versionsVisible++;
-106  }
-107}
-108  } else {
-109// past the row or column, done
-110break;
-111  }
-112}
-113  }
-114
-115  public boolean setInMemoryFlushed() {
-116return flushed.compareAndSet(false, 
true);
+069  public void upsert(Cell cell, long 
readpoint, MemStoreSizing memStoreSizing,
+070  boolean sizeAddedPreOperation) {
+071internalAdd(cell, false, 
memStoreSizing, sizeAddedPreOperation);
+072
+073// Get the Cells for th

[13/51] [partial] hbase-site git commit: Published site at 63f2d3cbdc8151f5f61f33e0a078c51b9ac076a5.

2018-08-21 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7ae6a80c/devapidocs/org/apache/hadoop/hbase/master/procedure/ReopenTableRegionsProcedure.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/ReopenTableRegionsProcedure.html
 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/ReopenTableRegionsProcedure.html
index 19df400..25c9b9d 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/procedure/ReopenTableRegionsProcedure.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/procedure/ReopenTableRegionsProcedure.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -129,11 +129,9 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class ReopenTableRegionsProcedure
+public class ReopenTableRegionsProcedure
 extends AbstractStateMachineTableProcedure
-Used for reopening the regions for a table.
- 
- Currently we use MoveRegionProcedure 
to reopen regions.
+Used for reopening the regions for a table.
 
 
 
@@ -243,60 +241,55 @@ extends Method and Description
 
 
-private MoveRegionProcedure
-createReopenProcedure(MasterProcedureEnv env,
- HRegionLocation loc) 
-
-
 protected void
 deserializeStateData(ProcedureStateSerializer serializer)
 Called on store load to allow the user to decode the 
previously serialized
  state.
 
 
-
+
 protected StateMachineProcedure.Flow
 executeFromState(MasterProcedureEnv env,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.ReopenTableRegionsState state)
 called to perform a single step of the specified 'state' of 
the procedure
 
 
-
+
 protected 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.ReopenTableRegionsState
 getInitialState()
 Return the initial state object that will be used for the 
first call to executeFromState().
 
 
-
+
 protected 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.ReopenTableRegionsState
 getState(int stateId)
 Convert an ordinal (or state id) to an Enum (or more 
descriptive) state object.
 
 
-
+
 protected int
 getStateId(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.ReopenTableRegionsState state)
 Convert the Enum (or more descriptive) state object to an 
ordinal (or state id).
 
 
-
+
 TableName
 getTableName() 
 
-
+
 TableProcedureInterface.TableOperationType
 getTableOperationType()
 Given an operation type we can take decisions about what to 
do with pending operations.
 
 
-
+
 protected void
 rollbackState(MasterProcedureEnv env,
  
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.ReopenTableRegionsState state)
 called to perform the rollback of the specified state
 
 
-
+
 protected void
 serializeStateData(ProcedureStateSerializer serializer)
 The user-level code of the procedure may have some state to
@@ -352,7 +345,7 @@ extends 
 
 LOG
-private static final org.slf4j.Logger LOG
+private static final org.slf4j.Logger LOG
 
 
 
@@ -361,7 +354,7 @@ extends 
 
 tableName
-private TableName tableName
+private TableName tableName
 
 
 
@@ -370,7 +363,7 @@ extends 
 
 regions
-private https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List regions
+private https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List regions
 
 
 
@@ -387,7 +380,7 @@ extends 
 
 ReopenTableRegionsProcedure
-public ReopenTableRegionsProcedure()
+public ReopenTableRegionsProcedure()
 
 
 
@@ -396,7 +389,7 @@ extends 
 
 ReopenTableRegionsProcedure
-public ReopenTableRegionsProcedure(TableName tableName)
+public ReopenTableRegionsProcedure(TableName tableName)
 
 
 
@@ -413,7 +406,7 @@ extends 
 
 getTableName
-public TableName getTableName()
+public TableName getTableName()
 
 Specified by:
 getTableName in
 interface TableProcedureInterface
@@ -430,7 +423,7 @@ extends 
 
 getTableOperationType
-public TableProcedureInterface.TableOperationType getTableOperationType()
+public TableProcedureInterface.TableOperationType getTableOperationType()
 Description copied from 
interface: TableProcedureInterface
 Given an operation type we can take decisions about what to 
do with pending operations.
  e.g. if we get a delete and we have some table operation pending (e.g. add 
column)
@@ -445,23 +438,13 @@ extends 
-
-
-
-
-createReopenProcedure
-private MoveRegionProc

[13/51] [partial] hbase-site git commit: Published site at 092efb42749bf7fc6ad338c96aae8e7b9d3a2c74.

2018-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.LogRecoveredEditsOutputSink.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.LogRecoveredEditsOutputSink.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.LogRecoveredEditsOutputSink.html
index 63e4b46..514f830 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.LogRecoveredEditsOutputSink.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.LogRecoveredEditsOutputSink.html
@@ -468,15 +468,15 @@
 460   * creating it if necessary.
 461   * @param logEntry
 462   * @param fileNameBeingSplit the file 
being split currently. Used to generate tmp file name.
-463   * @param conf
-464   * @return Path to file into which to 
dump split log edits.
-465   * @throws IOException
-466   */
-467  @SuppressWarnings("deprecation")
-468  @VisibleForTesting
-469  static Path 
getRegionSplitEditsPath(final Entry logEntry, String fileNameBeingSplit,
-470  Configuration conf)
-471  throws IOException {
+463   * @param tmpDirName of the directory 
used to sideline old recovered edits file
+464   * @param conf
+465   * @return Path to file into which to 
dump split log edits.
+466   * @throws IOException
+467   */
+468  @SuppressWarnings("deprecation")
+469  @VisibleForTesting
+470  static Path 
getRegionSplitEditsPath(final Entry logEntry, String fileNameBeingSplit,
+471  String tmpDirName, Configuration 
conf) throws IOException {
 472FileSystem fs = 
FileSystem.get(conf);
 473Path rootDir = 
FSUtils.getRootDir(conf);
 474Path tableDir = 
FSUtils.getTableDir(rootDir, logEntry.getKey().getTableName());
@@ -491,7 +491,7 @@
 483  return null;
 484}
 485if (fs.exists(dir) && 
fs.isFile(dir)) {
-486  Path tmp = new Path("/tmp");
+486  Path tmp = new Path(tmpDirName);
 487  if (!fs.exists(tmp)) {
 488fs.mkdirs(tmp);
 489  }
@@ -1520,411 +1520,413 @@
 1512 * @return a path with a write for 
that path. caller should close.
 1513 */
 1514WriterAndPath createWAP(byte[] 
region, Entry entry) throws IOException {
-1515  Path regionedits = 
getRegionSplitEditsPath(entry,
-1516  
fileBeingSplit.getPath().getName(), conf);
-1517  if (regionedits == null) {
-1518return null;
-1519  }
-1520  FileSystem rootFs = 
FileSystem.get(conf);
-1521  if (rootFs.exists(regionedits)) 
{
-1522LOG.warn("Found old edits file. 
It could be the "
-1523+ "result of a previous 
failed split attempt. Deleting " + regionedits + ", length="
-1524+ 
rootFs.getFileStatus(regionedits).getLen());
-1525if (!rootFs.delete(regionedits, 
false)) {
-1526  LOG.warn("Failed delete of old 
{}", regionedits);
-1527}
-1528  }
-1529  Writer w = 
createWriter(regionedits);
-1530  LOG.debug("Creating writer 
path={}", regionedits);
-1531  return new 
WriterAndPath(regionedits, w, entry.getKey().getSequenceId());
-1532}
-1533
-1534void filterCellByStore(Entry 
logEntry) {
-1535  Map 
maxSeqIdInStores =
-1536  
regionMaxSeqIdInStores.get(Bytes.toString(logEntry.getKey().getEncodedRegionName()));
-1537  if 
(MapUtils.isEmpty(maxSeqIdInStores)) {
-1538return;
-1539  }
-1540  // Create the array list for the 
cells that aren't filtered.
-1541  // We make the assumption that 
most cells will be kept.
-1542  ArrayList keptCells = 
new ArrayList<>(logEntry.getEdit().getCells().size());
-1543  for (Cell cell : 
logEntry.getEdit().getCells()) {
-1544if 
(CellUtil.matchingFamily(cell, WALEdit.METAFAMILY)) {
-1545  keptCells.add(cell);
-1546} else {
-1547  byte[] family = 
CellUtil.cloneFamily(cell);
-1548  Long maxSeqId = 
maxSeqIdInStores.get(family);
-1549  // Do not skip cell even if 
maxSeqId is null. Maybe we are in a rolling upgrade,
-1550  // or the master was crashed 
before and we can not get the information.
-1551  if (maxSeqId == null || 
maxSeqId.longValue() < logEntry.getKey().getSequenceId()) {
-1552keptCells.add(cell);
-1553  }
-1554}
-1555  }
-1556
-1557  // Anything in the keptCells array 
list is still live.
-1558  // So rather than removing the 
cells from the array list
-1559  // which would be an O(n^2) 
operation, we just replace the list
-1560  
logEntry.getEdit().setCells(keptCells);
-1561}
-1562
-1563@Override
-1564public void append(RegionEntryBuffer 
buffer) throws IOException {
-1565  appendBuffer(buffer, true);
-1566}
-1567
-1568WriterAndPath 
appendBuffer(RegionEntryBuffer buffer, boolean reusable) throws IOException{
-1569  List entries = 
buffer.entryBuffer;
-1570  if (entries.isEmpty()) {

[13/51] [partial] hbase-site git commit: Published site at 613d831429960348dc42c3bdb6ea5d31be15c81c.

2018-08-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7cf6034b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketProtoUtils.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketProtoUtils.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketProtoUtils.html
new file mode 100644
index 000..80852ec
--- /dev/null
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketProtoUtils.html
@@ -0,0 +1,263 @@
+http://www.w3.org/TR/html4/loose.dtd";>
+
+
+Source code
+
+
+
+
+001/*
+002 * Copyright The Apache Software 
Foundation
+003 *
+004 * Licensed to the Apache Software 
Foundation (ASF) under one
+005 * or more contributor license 
agreements.  See the NOTICE file
+006 * distributed with this work for 
additional information
+007 * regarding copyright ownership.  The 
ASF licenses this file
+008 * to you under the Apache License, 
Version 2.0 (the
+009 * "License"); you may not use this file 
except in compliance
+010 * with the License.  You may obtain a 
copy of the License at
+011 *
+012 * 
http://www.apache.org/licenses/LICENSE-2.0
+013 *
+014 * Unless required by applicable law or 
agreed to in writing, software
+015
+016 * distributed under the License is 
distributed on an "AS IS" BASIS,
+017 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
+018 * See the License for the specific 
language governing permissions and
+019 * limitations under the License.
+020 */
+021package 
org.apache.hadoop.hbase.io.hfile.bucket;
+022
+023import java.io.IOException;
+024import java.util.Map;
+025import 
java.util.concurrent.ConcurrentHashMap;
+026
+027import 
org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
+028import 
org.apache.hadoop.hbase.io.hfile.BlockPriority;
+029import 
org.apache.hadoop.hbase.io.hfile.BlockType;
+030import 
org.apache.hadoop.hbase.io.hfile.CacheableDeserializerIdManager;
+031import 
org.apache.hadoop.hbase.io.hfile.HFileBlock;
+032import 
org.apache.yetus.audience.InterfaceAudience;
+033
+034import 
org.apache.hadoop.hbase.shaded.protobuf.generated.BucketCacheProtos;
+035
+036@InterfaceAudience.Private
+037final class BucketProtoUtils {
+038  private BucketProtoUtils() {
+039
+040  }
+041
+042  static 
BucketCacheProtos.BucketCacheEntry toPB(BucketCache cache) {
+043return 
BucketCacheProtos.BucketCacheEntry.newBuilder()
+044
.setCacheCapacity(cache.getMaxSize())
+045
.setIoClass(cache.ioEngine.getClass().getName())
+046
.setMapClass(cache.backingMap.getClass().getName())
+047
.putAllDeserializers(CacheableDeserializerIdManager.save())
+048
.setBackingMap(BucketProtoUtils.toPB(cache.backingMap))
+049.build();
+050  }
+051
+052  private static 
BucketCacheProtos.BackingMap toPB(
+053  Map backingMap) {
+054BucketCacheProtos.BackingMap.Builder 
builder = BucketCacheProtos.BackingMap.newBuilder();
+055for (Map.Entry entry : backingMap.entrySet()) {
+056  
builder.addEntry(BucketCacheProtos.BackingMapEntry.newBuilder()
+057  .setKey(toPB(entry.getKey()))
+058  
.setValue(toPB(entry.getValue()))
+059  .build());
+060}
+061return builder.build();
+062  }
+063
+064  private static 
BucketCacheProtos.BlockCacheKey toPB(BlockCacheKey key) {
+065return 
BucketCacheProtos.BlockCacheKey.newBuilder()
+066
.setHfilename(key.getHfileName())
+067.setOffset(key.getOffset())
+068
.setPrimaryReplicaBlock(key.isPrimary())
+069
.setBlockType(toPB(key.getBlockType()))
+070.build();
+071  }
+072
+073  private static 
BucketCacheProtos.BlockType toPB(BlockType blockType) {
+074switch(blockType) {
+075  case DATA:
+076return 
BucketCacheProtos.BlockType.data;
+077  case META:
+078return 
BucketCacheProtos.BlockType.meta;
+079  case TRAILER:
+080return 
BucketCacheProtos.BlockType.trailer;
+081  case INDEX_V1:
+082return 
BucketCacheProtos.BlockType.index_v1;
+083  case FILE_INFO:
+084return 
BucketCacheProtos.BlockType.file_info;
+085  case LEAF_INDEX:
+086return 
BucketCacheProtos.BlockType.leaf_index;
+087  case ROOT_INDEX:
+088return 
BucketCacheProtos.BlockType.root_index;
+089  case BLOOM_CHUNK:
+090return 
BucketCacheProtos.BlockType.bloom_chunk;
+091  case ENCODED_DATA:
+092return 
BucketCacheProtos.BlockType.encoded_data;
+093  case GENERAL_BLOOM_META:
+094return 
BucketCacheProtos.BlockType.general_bloom_meta;
+095  case INTERMEDIATE_INDEX:
+096return 
BucketCacheProtos.BlockType.intermediate_index;
+097  case DELETE_FAMILY_BLOOM_META:
+098return 
BucketCacheProtos.BlockType.delete_family_bloom_meta;
+099  default:
+100throw new Error("Unrecogni

[13/51] [partial] hbase-site git commit: Published site at ba5d1c1f28301adc99019d9d6c4a04fac98ae511.

2018-07-25 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/804782f0/devapidocs/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClientHandler.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClientHandler.html
 
b/devapidocs/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClientHandler.html
index b650cff..d48e675 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClientHandler.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClientHandler.html
@@ -129,7 +129,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class NettyHBaseSaslRpcClientHandler
+public class NettyHBaseSaslRpcClientHandler
 extends 
org.apache.hbase.thirdparty.io.netty.channel.SimpleChannelInboundHandler
 Implement SASL logic for netty rpc client.
 
@@ -322,7 +322,7 @@ extends 
org.apache.hbase.thirdparty.io.netty.channel.SimpleChannelInboundHandler
 
 
 LOG
-private static final org.slf4j.Logger LOG
+private static final org.slf4j.Logger LOG
 
 
 
@@ -331,7 +331,7 @@ extends 
org.apache.hbase.thirdparty.io.netty.channel.SimpleChannelInboundHandler
 
 
 saslPromise
-private 
final org.apache.hbase.thirdparty.io.netty.util.concurrent.PromiseBoolean> saslPromise
+private 
final org.apache.hbase.thirdparty.io.netty.util.concurrent.PromiseBoolean> saslPromise
 
 
 
@@ -340,7 +340,7 @@ extends 
org.apache.hbase.thirdparty.io.netty.channel.SimpleChannelInboundHandler
 
 
 ugi
-private final org.apache.hadoop.security.UserGroupInformation ugi
+private final org.apache.hadoop.security.UserGroupInformation ugi
 
 
 
@@ -349,7 +349,7 @@ extends 
org.apache.hbase.thirdparty.io.netty.channel.SimpleChannelInboundHandler
 
 
 saslRpcClient
-private final NettyHBaseSaslRpcClient 
saslRpcClient
+private final NettyHBaseSaslRpcClient 
saslRpcClient
 
 
 
@@ -358,7 +358,7 @@ extends 
org.apache.hbase.thirdparty.io.netty.channel.SimpleChannelInboundHandler
 
 
 conf
-private final org.apache.hadoop.conf.Configuration conf
+private final org.apache.hadoop.conf.Configuration conf
 
 
 
@@ -367,7 +367,7 @@ extends 
org.apache.hbase.thirdparty.io.netty.channel.SimpleChannelInboundHandler
 
 
 needProcessConnectionHeader
-private boolean needProcessConnectionHeader
+private boolean needProcessConnectionHeader
 
 
 
@@ -384,7 +384,7 @@ extends 
org.apache.hbase.thirdparty.io.netty.channel.SimpleChannelInboundHandler
 
 
 NettyHBaseSaslRpcClientHandler
-public NettyHBaseSaslRpcClientHandler(org.apache.hbase.thirdparty.io.netty.util.concurrent.PromiseBoolean> saslPromise,
+public NettyHBaseSaslRpcClientHandler(org.apache.hbase.thirdparty.io.netty.util.concurrent.PromiseBoolean> saslPromise,
   
org.apache.hadoop.security.UserGroupInformation ugi,
   AuthMethod method,
   
org.apache.hadoop.security.token.Token token,
@@ -415,7 +415,7 @@ extends 
org.apache.hbase.thirdparty.io.netty.channel.SimpleChannelInboundHandler
 
 
 writeResponse
-private void writeResponse(org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext ctx,
+private void writeResponse(org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext ctx,
byte[] response)
 
 
@@ -425,7 +425,7 @@ extends 
org.apache.hbase.thirdparty.io.netty.channel.SimpleChannelInboundHandler
 
 
 tryComplete
-private void tryComplete(org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext ctx)
+private void tryComplete(org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext ctx)
 
 
 
@@ -434,7 +434,7 @@ extends 
org.apache.hbase.thirdparty.io.netty.channel.SimpleChannelInboundHandler
 
 
 setCryptoAESOption
-private void setCryptoAESOption()
+private void setCryptoAESOption()
 
 
 
@@ -443,7 +443,7 @@ extends 
org.apache.hbase.thirdparty.io.netty.channel.SimpleChannelInboundHandler
 
 
 isNeedProcessConnectionHeader
-public boolean isNeedProcessConnectionHeader()
+public boolean isNeedProcessConnectionHeader()
 
 
 
@@ -452,7 +452,7 @@ extends 
org.apache.hbase.thirdparty.io.netty.channel.SimpleChannelInboundHandler
 
 
 handlerAdded
-public void handlerAdded(org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext ctx)
+public void handlerAdded(org.apache

[13/51] [partial] hbase-site git commit: Published site at b4759ce6e72f50ccd9d410bd5917dc5a515414f1.

2018-07-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/df8fd1d3/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaHostCostFunction.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaHostCostFunction.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaHostCostFunction.html
index 233dba3..91b9055 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaHostCostFunction.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaHostCostFunction.html
@@ -540,1205 +540,1204 @@
 532  
sm.getRegionMetrics().forEach((byte[] regionName, RegionMetrics rm) -> {
 533Deque 
rLoads = oldLoads.get(Bytes.toString(regionName));
 534if (rLoads == null) {
-535  // There was nothing there
-536  rLoads = new 
ArrayDeque<>();
-537} else if (rLoads.size() >= 
numRegionLoadsToRemember) {
-538  rLoads.remove();
-539}
-540rLoads.add(new 
BalancerRegionLoad(rm));
-541
loads.put(Bytes.toString(regionName), rLoads);
-542  });
-543});
-544
-545for(CostFromRegionLoadFunction cost : 
regionLoadFunctions) {
-546  cost.setLoads(loads);
-547}
-548  }
-549
-550  protected void initCosts(Cluster 
cluster) {
-551for (CostFunction c:costFunctions) 
{
-552  c.init(cluster);
-553}
-554  }
-555
-556  protected void 
updateCostsWithAction(Cluster cluster, Action action) {
-557for (CostFunction c : costFunctions) 
{
-558  c.postAction(action);
-559}
-560  }
-561
-562  /**
-563   * Get the names of the cost 
functions
-564   */
-565  public String[] getCostFunctionNames() 
{
-566if (costFunctions == null) return 
null;
-567String[] ret = new 
String[costFunctions.length];
-568for (int i = 0; i < 
costFunctions.length; i++) {
-569  CostFunction c = 
costFunctions[i];
-570  ret[i] = 
c.getClass().getSimpleName();
-571}
-572
-573return ret;
-574  }
-575
-576  /**
-577   * This is the main cost function.  It 
will compute a cost associated with a proposed cluster
-578   * state.  All different costs will be 
combined with their multipliers to produce a double cost.
-579   *
-580   * @param cluster The state of the 
cluster
-581   * @param previousCost the previous 
cost. This is used as an early out.
-582   * @return a double of a cost 
associated with the proposed cluster state.  This cost is an
-583   * aggregate of all individual 
cost functions.
-584   */
-585  protected double computeCost(Cluster 
cluster, double previousCost) {
-586double total = 0;
-587
-588for (int i = 0; i < 
costFunctions.length; i++) {
-589  CostFunction c = 
costFunctions[i];
-590  this.tempFunctionCosts[i] = 0.0;
-591
-592  if (c.getMultiplier() <= 0) {
-593continue;
-594  }
-595
-596  Float multiplier = 
c.getMultiplier();
-597  Double cost = c.cost();
-598
-599  this.tempFunctionCosts[i] = 
multiplier*cost;
-600  total += 
this.tempFunctionCosts[i];
-601
-602  if (total > previousCost) {
-603break;
-604  }
-605}
-606
-607return total;
-608  }
-609
-610  /** Generates a candidate action to be 
applied to the cluster for cost function search */
-611  abstract static class 
CandidateGenerator {
-612abstract Cluster.Action 
generate(Cluster cluster);
-613
-614/**
-615 * From a list of regions pick a 
random one. Null can be returned which
-616 * {@link 
StochasticLoadBalancer#balanceCluster(Map)} recognize as signal to try a region 
move
-617 * rather than swap.
-618 *
-619 * @param clusterThe state of 
the cluster
-620 * @param server index of the 
server
-621 * @param chanceOfNoSwap Chance that 
this will decide to try a move rather
-622 *   than a 
swap.
-623 * @return a random {@link 
RegionInfo} or null if an asymmetrical move is
-624 * suggested.
-625 */
-626protected int 
pickRandomRegion(Cluster cluster, int server, double chanceOfNoSwap) {
-627  // Check to see if this is just a 
move.
-628  if 
(cluster.regionsPerServer[server].length == 0 || RANDOM.nextFloat() < 
chanceOfNoSwap) {
-629// signal a move only.
-630return -1;
-631  }
-632  int rand = 
RANDOM.nextInt(cluster.regionsPerServer[server].length);
-633  return 
cluster.regionsPerServer[server][rand];
-634
-635}
-636protected int 
pickRandomServer(Cluster cluster) {
-637  if (cluster.numServers < 1) {
-638return -1;
-639  }
-640
-641  return 
RANDOM.nextInt(cluster.numServers);
-642}
-643
-644protected int pickRandomRack(Cluster 
cluster) {
-645  if (cluster.numRack

[13/51] [partial] hbase-site git commit: Published site at e66a6603e36ecd67237ca16acd5e2de03f0d372d.

2018-07-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0c6f447e/apidocs/org/apache/hadoop/hbase/TableNotDisabledException.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/TableNotDisabledException.html 
b/apidocs/org/apache/hadoop/hbase/TableNotDisabledException.html
index 0a51c6a..4c5a7f0 100644
--- a/apidocs/org/apache/hadoop/hbase/TableNotDisabledException.html
+++ b/apidocs/org/apache/hadoop/hbase/TableNotDisabledException.html
@@ -1,6 +1,6 @@
 http://www.w3.org/TR/html4/loose.dtd";>
 
-
+
 
 
 
@@ -20,38 +20,38 @@
 //-->
 
 
-您的浏览器已禁用 JavaScript。
+JavaScript is disabled on your browser.
 
 
 
 
 
-跳过导航链接
+Skip navigation links
 
 
 
-
-概览
-程序包
-ç±»
-使用
-树
-已过时
-索引
-帮助
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
 
 
 
 
-上一个类
-下一个类
+Prev Class
+Next Class
 
 
-框架
-无框架
+Frames
+No Frames
 
 
-所有类
+All Classes
 
 
 
 
org.apache.hadoop.hbase
-

ç±» TableNotDisabledException

+

Class TableNotDisabledException


[13/51] [partial] hbase-site git commit: Published site at e66a6603e36ecd67237ca16acd5e2de03f0d372d.

2018-07-19 Thread zhangduo
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/5427a45e/apidocs/org/apache/hadoop/hbase/TableName.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/TableName.html 
b/apidocs/org/apache/hadoop/hbase/TableName.html
index 79df019..9efaa27 100644
--- a/apidocs/org/apache/hadoop/hbase/TableName.html
+++ b/apidocs/org/apache/hadoop/hbase/TableName.html
@@ -1,6 +1,6 @@
 http://www.w3.org/TR/html4/loose.dtd";>
 
-
+
 
 
 
@@ -19,45 +19,45 @@
 }
 //-->
 var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":9,"i11":9,"i12":9,"i13":9,"i14":9,"i15":9,"i16":9,"i17":9,"i18":10,"i19":10,"i20":10,"i21":9,"i22":9,"i23":9,"i24":9,"i25":9};
-var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
+var tabs = 
{65535:["t0","所有方法"],1:["t1","静态方法"],2:["t2","实例方法"],8:["t4","å
…·ä½“方法"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
 var tableTab = "tableTab";
 var activeTableTab = "activeTableTab";
 
 
-JavaScript is disabled on your browser.
+您的浏览器已禁用 JavaScript。
 
 
 
 
 
-Skip navigation links
+跳过导航链接
 
 
 
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
+
+概览
+程序包
+ç±»
+使用
+树
+已过时
+索引
+帮助
 
 
 
 
-Prev Class
-Next Class
+上一个类
+下一个类
 
 
-Frames
-No Frames
+框架
+无框架
 
 
-All Classes
+所有类
 
 
 

[13/51] [partial] hbase-site git commit: Published site at 0f23784182ab88649de340d75804e0ff20dcd0fc.

2018-07-03 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bcb555af/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.SaslClientCallbackHandler.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.SaslClientCallbackHandler.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.SaslClientCallbackHandler.html
index 05e032c..40ef9f4 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.SaslClientCallbackHandler.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.SaslClientCallbackHandler.html
@@ -25,767 +25,805 @@
 017 */
 018package 
org.apache.hadoop.hbase.io.asyncfs;
 019
-020import static 
org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleState.READER_IDLE;
-021import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY;
+020import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY;
+021import static 
org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleState.READER_IDLE;
 022
-023import 
org.apache.hbase.thirdparty.com.google.common.base.Charsets;
-024import 
org.apache.hbase.thirdparty.com.google.common.base.Throwables;
-025import 
org.apache.hbase.thirdparty.com.google.common.collect.ImmutableSet;
-026import 
org.apache.hbase.thirdparty.com.google.common.collect.Maps;
-027import 
com.google.protobuf.CodedOutputStream;
-028
-029import 
org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf;
-030import 
org.apache.hbase.thirdparty.io.netty.buffer.ByteBufOutputStream;
-031import 
org.apache.hbase.thirdparty.io.netty.buffer.CompositeByteBuf;
-032import 
org.apache.hbase.thirdparty.io.netty.buffer.Unpooled;
-033import 
org.apache.hbase.thirdparty.io.netty.channel.Channel;
-034import 
org.apache.hbase.thirdparty.io.netty.channel.ChannelDuplexHandler;
-035import 
org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext;
-036import 
org.apache.hbase.thirdparty.io.netty.channel.ChannelOutboundHandlerAdapter;
-037import 
org.apache.hbase.thirdparty.io.netty.channel.ChannelPipeline;
-038import 
org.apache.hbase.thirdparty.io.netty.channel.ChannelPromise;
-039import 
org.apache.hbase.thirdparty.io.netty.channel.SimpleChannelInboundHandler;
-040import 
org.apache.hbase.thirdparty.io.netty.handler.codec.LengthFieldBasedFrameDecoder;
-041import 
org.apache.hbase.thirdparty.io.netty.handler.codec.MessageToByteEncoder;
-042import 
org.apache.hbase.thirdparty.io.netty.handler.codec.protobuf.ProtobufDecoder;
-043import 
org.apache.hbase.thirdparty.io.netty.handler.codec.protobuf.ProtobufVarint32FrameDecoder;
-044import 
org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateEvent;
-045import 
org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler;
-046import 
org.apache.hbase.thirdparty.io.netty.util.concurrent.Promise;
-047
-048import java.io.IOException;
-049import java.lang.reflect.Field;
-050import 
java.lang.reflect.InvocationTargetException;
-051import java.lang.reflect.Method;
-052import java.net.InetAddress;
-053import java.net.InetSocketAddress;
-054import java.nio.ByteBuffer;
-055import 
java.security.GeneralSecurityException;
-056import java.util.Arrays;
-057import java.util.Collections;
-058import java.util.List;
-059import java.util.Map;
-060import java.util.Set;
-061import java.util.concurrent.TimeUnit;
-062import 
java.util.concurrent.atomic.AtomicBoolean;
-063
-064import 
javax.security.auth.callback.Callback;
-065import 
javax.security.auth.callback.CallbackHandler;
-066import 
javax.security.auth.callback.NameCallback;
-067import 
javax.security.auth.callback.PasswordCallback;
-068import 
javax.security.auth.callback.UnsupportedCallbackException;
-069import 
javax.security.sasl.RealmCallback;
-070import 
javax.security.sasl.RealmChoiceCallback;
-071import javax.security.sasl.Sasl;
-072import javax.security.sasl.SaslClient;
-073import 
javax.security.sasl.SaslException;
-074
-075import 
org.apache.commons.codec.binary.Base64;
-076import 
org.apache.commons.lang3.StringUtils;
-077import 
org.apache.hadoop.conf.Configuration;
-078import 
org.apache.hadoop.crypto.CipherOption;
-079import 
org.apache.hadoop.crypto.CipherSuite;
-080import 
org.apache.hadoop.crypto.CryptoCodec;
-081import 
org.apache.hadoop.crypto.Decryptor;
-082import 
org.apache.hadoop.crypto.Encryptor;
-083import 
org.apache.hadoop.crypto.key.KeyProvider.KeyVersion;
-084import 
org.apache.hadoop.fs.FileEncryptionInfo;
-085import 
org.apache.yetus.audience.InterfaceAudience;
-086import org.slf4j.Logger;
-087import org.slf4j.LoggerFactory;
-088
-089import com.google.protobuf.ByteString;
-090import 
org.apache.hadoop.hdfs.DFSClient;
-091import 
org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-092import 
org.apach

[13/51] [partial] hbase-site git commit: Published site at 85b41f36e01214b6485c9352875c84ebf877dab3.

2018-06-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a5c66de0/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteTableFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteTableFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteTableFuture.html
index c10cfbf..a3e2f4a 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteTableFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteTableFuture.html
@@ -3371,7 +3371,7 @@
 3363private V result = null;
 3364
 3365private final HBaseAdmin admin;
-3366private final Long procId;
+3366protected final Long procId;
 3367
 3368public ProcedureFuture(final 
HBaseAdmin admin, final Long procId) {
 3369  this.admin = admin;
@@ -3653,653 +3653,651 @@
 3645 * @return a description of the 
operation
 3646 */
 3647protected String getDescription() 
{
-3648  return "Operation: " + 
getOperationType() + ", "
-3649  + "Table Name: " + 
tableName.getNameWithNamespaceInclAsString();
-3650
-3651}
-3652
-3653protected abstract class 
TableWaitForStateCallable implements WaitForStateCallable {
-3654  @Override
-3655  public void 
throwInterruptedException() throws InterruptedIOException {
-3656throw new 
InterruptedIOException("Interrupted while waiting for operation: "
-3657+ getOperationType() + " on 
table: " + tableName.getNameWithNamespaceInclAsString());
-3658  }
-3659
-3660  @Override
-3661  public void 
throwTimeoutException(long elapsedTime) throws TimeoutException {
-3662throw new TimeoutException("The 
operation: " + getOperationType() + " on table: " +
-3663tableName.getNameAsString() 
+ " has not completed after " + elapsedTime + "ms");
-3664  }
-3665}
-3666
-3667@Override
-3668protected V 
postOperationResult(final V result, final long deadlineTs)
-3669throws IOException, 
TimeoutException {
-3670  LOG.info(getDescription() + " 
completed");
-3671  return 
super.postOperationResult(result, deadlineTs);
-3672}
-3673
-3674@Override
-3675protected V 
postOperationFailure(final IOException exception, final long deadlineTs)
-3676throws IOException, 
TimeoutException {
-3677  LOG.info(getDescription() + " 
failed with " + exception.getMessage());
-3678  return 
super.postOperationFailure(exception, deadlineTs);
-3679}
-3680
-3681protected void 
waitForTableEnabled(final long deadlineTs)
-3682throws IOException, 
TimeoutException {
-3683  waitForState(deadlineTs, new 
TableWaitForStateCallable() {
-3684@Override
-3685public boolean checkState(int 
tries) throws IOException {
-3686  try {
-3687if 
(getAdmin().isTableAvailable(tableName)) {
-3688  return true;
-3689}
-3690  } catch 
(TableNotFoundException tnfe) {
-3691LOG.debug("Table " + 
tableName.getNameWithNamespaceInclAsString()
-3692+ " was not enabled, 
sleeping. tries=" + tries);
-3693  }
-3694  return false;
-3695}
-3696  });
-3697}
-3698
-3699protected void 
waitForTableDisabled(final long deadlineTs)
-3700throws IOException, 
TimeoutException {
-3701  waitForState(deadlineTs, new 
TableWaitForStateCallable() {
-3702@Override
-3703public boolean checkState(int 
tries) throws IOException {
-3704  return 
getAdmin().isTableDisabled(tableName);
-3705}
-3706  });
-3707}
-3708
-3709protected void 
waitTableNotFound(final long deadlineTs)
-3710throws IOException, 
TimeoutException {
-3711  waitForState(deadlineTs, new 
TableWaitForStateCallable() {
-3712@Override
-3713public boolean checkState(int 
tries) throws IOException {
-3714  return 
!getAdmin().tableExists(tableName);
-3715}
-3716  });
-3717}
-3718
-3719protected void 
waitForSchemaUpdate(final long deadlineTs)
-3720throws IOException, 
TimeoutException {
-3721  waitForState(deadlineTs, new 
TableWaitForStateCallable() {
-3722@Override
-3723public boolean checkState(int 
tries) throws IOException {
-3724  return 
getAdmin().getAlterStatus(tableName).getFirst() == 0;
-3725}
-3726  });
-3727}
-3728
-3729protected void 
waitForAllRegionsOnline(final long deadlineTs, final byte[][] splitKeys)
-3730throws IOException, 
TimeoutException {
-3731  final TableDescriptor desc = 
getTableDescriptor();
-3732  final AtomicInteger actualRegCount 
= new AtomicInteger(0);
-3733  final MetaTableAccessor.Visitor 
visitor = new MetaTableAccessor.Visitor() {
-3734@Override
-3735public boolean visit(Result 
rowResult) throws IOException {
-3736  

[13/51] [partial] hbase-site git commit: Published site at 6198e1fc7dfa85c3bc6b2855f9a5fb5f4b2354ff.

2018-06-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/eb5d2c62/devapidocs/org/apache/hadoop/hbase/master/MasterCoprocessorHost.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/MasterCoprocessorHost.html 
b/devapidocs/org/apache/hadoop/hbase/master/MasterCoprocessorHost.html
index 5974c8b..4f26d3d 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/MasterCoprocessorHost.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/MasterCoprocessorHost.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i
 
109":10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10,"i135":10,"i136":10,"i137":10,"i138":10,"i139":10,"i140":10,"i141":10,"i142":10,"i143":10,"i144":10,"i145":10,"i146":10,"i147":10,"i148":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i
 
109":10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10,"i135":10,"i136":10,"i137":10,"i138":10,"i139":10,"i140":10,"i141":10,"i142":10,"i143":10,"i144":10,"i145":10,"i146":10,"i147":10,"i148":10,"i149":10,"i150":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -115,7 +115,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class MasterCoprocessorHost
+public class MasterCoprocessorHost
 extends CoprocessorHost
 Provides the coprocessor framework and environment for 
master oriented
  operations.  HMaster interacts with the 
loaded coprocessors
@@ -583,204 +583,210 @@ extends 
 void
-postTruncateTable(TableName tableName) 
+postTransitReplicationPeerSyncReplicationState(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String peerId,
+  SyncReplicationState from,
+  SyncReplicationState to) 
 
 
 void
+postTruncateTable(TableName tableName) 
+
+
+void
 postUnassign(RegionInfo regionInfo,
 boolean force) 
 
-
+
 void
 postUpdateReplicationPeerConfig(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String peerId,
ReplicationPeerConfig peerConfig) 
 
-
+
 void
 preAbortProcedure(ProcedureExecutor procEnv,
  long procId) 
 
-
+
 void
 preAddReplicationPeer(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang

[13/51] [partial] hbase-site git commit: Published site at 14087cc919da9f2e0b1a68f701f6365ad9d1d71f.

2018-06-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/55ce8d97/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.html
index 5335c61..08bc2fb 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.html
@@ -332,7 +332,7 @@ implements MetricsRegionServerSource
-APPEND_KEY,
 AVERAGE_REGION_SIZE,
 AVERAGE_REGION_SIZE_DESC,
 AVG_STORE_FILE_AGE,
 AVG_STORE_FILE_AGE_DESC,
 BLOCK_CACHE_BLOOM_CHUNK_HIT_COUNT,
 BLOCK_CAC
 HE_BLOOM_CHUNK_MISS_COUNT, BLOCK_CACHE_COUNT,
 BLOCK_CACHE_COUNT_DESC,
 BLOCK_CACHE_DATA_HIT_COUNT,
 BLOCK_CACHE_DATA_MISS_COUNT,
 BLOCK_CACHE_DELETE_FAMILY_BLOOM_HIT_COUNT,
 BLOCK_CACHE_DELETE_FAMILY_BLOOM_MISS_COUNT,
 BLOCK_CACHE_ENCODED_DATA_HIT_COUNT,
 BLOCK_CACHE_ENCODED_DATA_MISS_COUNT,
 BLOCK_CACHE_EVICTION_COUNT,
 BLOCK_CACHE_EVICTION_COUNT_DESC,
 BLOCK_CACHE_EXPRESS_HIT_PERCENT,
 BLOCK_CACHE_EXPRESS_HIT_PERCENT_DESC,
 BLOCK_CACHE_FAILED_INSERTION_COUNT,
 BLOCK_CACHE_FAILED_INSERTION_COUNT_DESC,
 BLOCK_CACHE_FILE_INFO_HIT_COUNT,
 BLOCK_CACHE_FILE_INFO_MISS_COUNT,
 BLOCK_CACHE_FREE_DESC,
 BLOCK_CACHE_FREE_SIZE,
 BLOCK_CACHE_GENERAL_BLOOM_META_HIT_COUNT,
 BLOCK_CACHE_GENERAL_BLOOM_META_MISS_COUNT,
 BLOCK_CACHE_HIT_COUNT,
 BLOCK_CACHE_HIT_COUNT_DESC,
 BLOCK_CACHE_HIT_PERCENT,
 BLOCK_CACHE_HIT_PERCENT_DESC,
 BLOCK_CACHE_INTERMEDIATE_INDEX_HIT_COUNT, BLOCK_CACHE_INTERMEDIATE_INDEX_MISS_COUNT,
 BLOCK_CACHE_LEAF_INDEX_HIT_COUNT,
 BLOCK_CACHE_LEAF_INDEX_MISS_COUNT,
 BLOCK_CACHE_META_HIT_COUNT,
 BLOCK_CACHE_META_MISS_COUNT,
 BLOCK_CACHE_MISS_COUNT, BLOCK_CACHE_PRIMARY_EVICTION_COUNT,
 BLOCK_CACHE_PRIMARY_EVICTION_COUNT_DESC,
 BLOCK_CACHE_PRIMARY_HIT_COUNT,
 BLOCK_CACHE_PRIMARY_HIT_COUNT_DESC,
 BLOCK_CACHE_PRIMARY_MISS_COUNT,
 B
 LOCK_CACHE_ROOT_INDEX_HIT_COUNT, BLOCK_CACHE_ROOT_INDEX_MISS_COUNT,
 BLOCK_CACHE_SIZE,
 BLOCK_CACHE_SIZE_DESC,
 BLOCK_CACHE_TRAILER_HIT_COUNT,
 BLOCK_CACHE_TRAILER_MISS_COUNT,
 BLOCK_COUNT_MISS_COUNT_DESC,
 BLOCK_COUNT_PRIMARY_MISS_COUNT_DESC,
 BLOCKED_REQUESTS_COUNT,
 BLOCKED_REQUESTS_COUNT_DESC,
 CELLS_COUNT_COMPACTED_FROM_MOB,
 CELLS_COUNT_COMPACTED_FROM_MOB_DESC,
 CELLS_COUNT_COMPACTED_TO_MOB,
 CELLS_COUNT_COMPACTED_TO_MOB_DESC, CELLS_SIZE_COMPACTED_FROM_MOB,
 CELLS_SIZE_COMPACTED_FROM_MOB_DESC,
 CELLS_SIZE_COMPACTED_TO_MOB,
 CELLS_SIZE_COMPACTED_TO_MOB_DESC,
 CHECK_AND_DELETE_KEY,
 CHECK_AND_PUT_KEY,
 CHECK_MUTATE_FAILED_COUNT,
 CHECK_MUTATE_FAILED_COUNT_DESC,
 CHECK_MUTATE_PASSED_COUNT,
 CHECK_MUTATE_PASSED_COUNT_DESC,
 CLUSTER_ID_DESC,
 CLUSTER_ID_NAME,
 COMPACTED_CE
 LLS, COMPACTED_CELLS_DESC,
 COMPACTED_CELLS_SIZE,
 COMPACTED_CELLS_SIZE_DESC,
 COMPACTED_INPUT_BYTES,
 COMPACTED_INPUT_BYTES_DESC,
 COMPACTED_OUTPUT_BYTES,
 COMPACTED_OUTPUT_BYTES_DESC, href="../../../../../org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.html#COMPACTION_INPUT_FILE_COUNT">COMPACTION_INPUT_FILE_COUNT,
 > href="../../../../../org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.html#COMPACTION_INPUT_FILE_COUNT_DESC">COMPACTION_INPUT_FILE_COUNT_DESC,
 > href="../../../../../org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.html#COMPACTION_INPUT_SIZE">COMPACTION_INPUT_SIZE,
 > href="../../../../../org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.html#COMPACTION_INPUT_SIZE_DESC">COMPACTION_INPUT_SIZE_DESC,
 > href="../../../../../org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.html#COMPACTION_OUTPUT_FILE_COUNT">COMPACTION_OUTPUT_FILE_COUNT,
 > href="../../../../../org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.html#COMPACTION_OUTPUT_FILE_COUNT_DESC">COMPACTION_OUTPUT_FILE_COUNT_DESC,
 > COMPACTION_OUTPUT_SIZE,
 COMPACTION_OUTPUT_SIZE_DESC,
 COMPACTION_QUEUE_LENGTH,
 COMPACTION_QUEUE_LENGTH_DESC,
 COMPACTION_TIME,
 COMPACTION_TIME_DESC,
 DATA_SIZE_WITHOUT_WAL,
 DATA_SIZE_WITHOUT_WAL_DESC,
 DELETE_BATCH_KEY,
 DELETE_KEY,
 FILTERED_READ_REQUEST_COUNT,
 FILTERED_READ_REQUEST_COUNT_DESC,
 FLUSH_MEMSTORE_SIZE,
 FLUSH_MEMSTORE_SIZE_DESC,
 FLUSH_OUTPUT_SIZE,
 FLUSH_OUTPUT_SIZE_DESC,
 FLUSH_QUEUE_LENGTH,
 FLUSH_QUEUE_LENGTH_DESC,
 FLUSH_TIME,
 FLUSH_TIME_DESC,
 FLUSHED_CELLS,
 FLUSHED_CELLS_DESC, FLUSHED_CELLS_SIZE,
 FLUSHED_CELLS_SIZE_DESC,
 FLUSHED_MEMSTORE_BYTES,
 FLUSHED_MEMSTORE_BYTES_DESC,
 FLUSHED_OUTPUT_BYTES,
 FLUSHED_OUTPUT_BYTES_DESC,
 GET_KEY, GET_SIZE_KEY,
 HEDGED_READ_WINS,
 HEDGED_READ_WINS_DESC,
 HEDGED_READS,
 HEDGED_READS_DESC,
 INCREMENT_KEY,
 L1_CACHE_HIT_COUNT,
 L1_CACHE_HIT_COUNT_DESC,
 L1_CACHE_HIT_RATIO,
 L1_CACHE_HIT_RATIO_DESC,
 L1_CACHE_MISS_COUNT,
 L1_CACHE_MISS_COUNT_DESC,
 L1_CACHE_MISS_RATIO,
 L1_CACHE_MISS_RATIO_DESC,
 L2_CACHE_HIT_COUNT,
 L2_CACHE_HIT_COUNT_DESC,
 L2_CACHE_HIT_RATIO,
 L

[13/51] [partial] hbase-site git commit: Published site at 72784c2d836a4b977667449d3adec5e8d15453f5.

2018-06-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2b11656f/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.TruncateTableFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.TruncateTableFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.TruncateTableFuture.html
index b6e7636..592c2cc 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.TruncateTableFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.TruncateTableFuture.html
@@ -356,3901 +356,3924 @@
 348  public Future 
modifyTableAsync(TableDescriptor td) throws IOException {
 349ModifyTableResponse response = 
executeCallable(
 350  new 
MasterCallable(getConnection(), 
getRpcControllerFactory()) {
-351@Override
-352protected ModifyTableResponse 
rpcCall() throws Exception {
-353  
setPriority(td.getTableName());
-354  ModifyTableRequest request = 
RequestConverter.buildModifyTableRequest(
-355td.getTableName(), td, 
ng.getNonceGroup(), ng.newNonce());
-356  return 
master.modifyTable(getRpcController(), request);
-357}
-358  });
-359return new ModifyTableFuture(this, 
td.getTableName(), response);
-360  }
-361
-362  @Override
-363  public List 
listTableDescriptorsByNamespace(byte[] name) throws IOException {
-364return executeCallable(new 
MasterCallable>(getConnection(),
-365getRpcControllerFactory()) {
-366  @Override
-367  protected 
List rpcCall() throws Exception {
-368return 
master.listTableDescriptorsByNamespace(getRpcController(),
-369
ListTableDescriptorsByNamespaceRequest.newBuilder()
-370  
.setNamespaceName(Bytes.toString(name)).build())
-371.getTableSchemaList()
-372.stream()
-373
.map(ProtobufUtil::toTableDescriptor)
-374
.collect(Collectors.toList());
-375  }
-376});
-377  }
-378
-379  @Override
-380  public List 
listTableDescriptors(List tableNames) throws IOException {
-381return executeCallable(new 
MasterCallable>(getConnection(),
-382getRpcControllerFactory()) {
-383  @Override
-384  protected 
List rpcCall() throws Exception {
-385GetTableDescriptorsRequest req 
=
-386
RequestConverter.buildGetTableDescriptorsRequest(tableNames);
-387  return 
ProtobufUtil.toTableDescriptorList(master.getTableDescriptors(getRpcController(),
-388  req));
-389  }
-390});
-391  }
-392
-393  @Override
-394  public List 
getRegions(final ServerName sn) throws IOException {
-395AdminService.BlockingInterface admin 
= this.connection.getAdmin(sn);
-396// TODO: There is no timeout on this 
controller. Set one!
-397HBaseRpcController controller = 
rpcControllerFactory.newController();
-398return 
ProtobufUtil.getOnlineRegions(controller, admin);
-399  }
-400
-401  @Override
-402  public List 
getRegions(TableName tableName) throws IOException {
-403if 
(TableName.isMetaTableName(tableName)) {
-404  return 
Arrays.asList(RegionInfoBuilder.FIRST_META_REGIONINFO);
-405} else {
-406  return 
MetaTableAccessor.getTableRegions(connection, tableName, true);
-407}
-408  }
-409
-410  private static class 
AbortProcedureFuture extends ProcedureFuture {
-411private boolean isAbortInProgress;
-412
-413public AbortProcedureFuture(
-414final HBaseAdmin admin,
-415final Long procId,
-416final Boolean abortProcResponse) 
{
-417  super(admin, procId);
-418  this.isAbortInProgress = 
abortProcResponse;
-419}
-420
-421@Override
-422public Boolean get(long timeout, 
TimeUnit unit)
-423throws InterruptedException, 
ExecutionException, TimeoutException {
-424  if (!this.isAbortInProgress) {
-425return false;
-426  }
-427  super.get(timeout, unit);
-428  return true;
-429}
-430  }
-431
-432  /** @return Connection used by this 
object. */
-433  @Override
-434  public Connection getConnection() {
-435return connection;
-436  }
-437
-438  @Override
-439  public boolean tableExists(final 
TableName tableName) throws IOException {
-440return executeCallable(new 
RpcRetryingCallable() {
-441  @Override
-442  protected Boolean rpcCall(int 
callTimeout) throws Exception {
-443return 
MetaTableAccessor.tableExists(connection, tableName);
-444  }
-445});
-446  }
-447
-448  @Override
-449  public HTableDescriptor[] listTables() 
throws IOException {
-450return listTables((Pattern)null, 
false);
-451  }
-452
-453  @Override
-454  public HTableDescriptor[] 
listTables(Patte

[13/51] [partial] hbase-site git commit: Published site at 9101fc246f86445006bfbcdfda5cc495016dc280.

2018-06-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/65565d77/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
index eb16038..74bacd8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/HMaster.InitializationMonitor.html
@@ -6,7 +6,7 @@
 
 
 
-001/*
+001/**
 002 * Licensed to the Apache Software 
Foundation (ASF) under one
 003 * or more contributor license 
agreements.  See the NOTICE file
 004 * distributed with this work for 
additional information
@@ -48,692 +48,692 @@
 040import java.util.Map;
 041import java.util.Map.Entry;
 042import java.util.Objects;
-043import java.util.Set;
-044import 
java.util.concurrent.ExecutionException;
-045import java.util.concurrent.Future;
-046import java.util.concurrent.TimeUnit;
-047import 
java.util.concurrent.TimeoutException;
-048import 
java.util.concurrent.atomic.AtomicInteger;
-049import 
java.util.concurrent.atomic.AtomicReference;
-050import java.util.function.Function;
-051import java.util.regex.Pattern;
-052import java.util.stream.Collectors;
-053import javax.servlet.ServletException;
-054import javax.servlet.http.HttpServlet;
-055import 
javax.servlet.http.HttpServletRequest;
-056import 
javax.servlet.http.HttpServletResponse;
-057import 
org.apache.commons.lang3.StringUtils;
-058import 
org.apache.hadoop.conf.Configuration;
-059import org.apache.hadoop.fs.Path;
-060import 
org.apache.hadoop.hbase.ClusterId;
-061import 
org.apache.hadoop.hbase.ClusterMetrics;
-062import 
org.apache.hadoop.hbase.ClusterMetrics.Option;
-063import 
org.apache.hadoop.hbase.ClusterMetricsBuilder;
-064import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-065import 
org.apache.hadoop.hbase.HBaseIOException;
-066import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
-067import 
org.apache.hadoop.hbase.HConstants;
-068import 
org.apache.hadoop.hbase.InvalidFamilyOperationException;
-069import 
org.apache.hadoop.hbase.MasterNotRunningException;
-070import 
org.apache.hadoop.hbase.MetaTableAccessor;
-071import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-072import 
org.apache.hadoop.hbase.PleaseHoldException;
-073import 
org.apache.hadoop.hbase.ReplicationPeerNotFoundException;
-074import 
org.apache.hadoop.hbase.ScheduledChore;
-075import 
org.apache.hadoop.hbase.ServerName;
-076import 
org.apache.hadoop.hbase.TableDescriptors;
-077import 
org.apache.hadoop.hbase.TableName;
-078import 
org.apache.hadoop.hbase.TableNotDisabledException;
-079import 
org.apache.hadoop.hbase.TableNotFoundException;
-080import 
org.apache.hadoop.hbase.UnknownRegionException;
-081import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-082import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
-083import 
org.apache.hadoop.hbase.client.MasterSwitchType;
-084import 
org.apache.hadoop.hbase.client.RegionInfo;
-085import 
org.apache.hadoop.hbase.client.Result;
-086import 
org.apache.hadoop.hbase.client.TableDescriptor;
-087import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-088import 
org.apache.hadoop.hbase.client.TableState;
-089import 
org.apache.hadoop.hbase.client.VersionInfoUtil;
-090import 
org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
-091import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-092import 
org.apache.hadoop.hbase.exceptions.MergeRegionException;
-093import 
org.apache.hadoop.hbase.executor.ExecutorType;
-094import 
org.apache.hadoop.hbase.favored.FavoredNodesManager;
-095import 
org.apache.hadoop.hbase.favored.FavoredNodesPromoter;
-096import 
org.apache.hadoop.hbase.http.InfoServer;
-097import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
-098import 
org.apache.hadoop.hbase.ipc.RpcServer;
-099import 
org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
-100import 
org.apache.hadoop.hbase.log.HBaseMarkers;
-101import 
org.apache.hadoop.hbase.master.MasterRpcServices.BalanceSwitchMode;
-102import 
org.apache.hadoop.hbase.master.assignment.AssignmentManager;
-103import 
org.apache.hadoop.hbase.master.assignment.MergeTableRegionsProcedure;
-104import 
org.apache.hadoop.hbase.master.assignment.RegionStates;
-105import 
org.apache.hadoop.hbase.master.assignment.RegionStates.RegionStateNode;
-106import 
org.apache.hadoop.hbase.master.balancer.BalancerChore;
-107import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer;
-108import 
org.apache.hadoop.hbase.master.balancer.ClusterStatusChore;
-109import 
org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory;
-110import 
org.apache.hadoop.hbase.master.cleaner.CleanerChore;
-111import 
org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
-112import 
org.apache.hadoop.hb

[13/51] [partial] hbase-site git commit: Published site at 0b28155d274910b4e667b949d51f78809a1eff0b.

2018-06-14 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e11cf2cb/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.ByteStringCompressor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.ByteStringCompressor.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.ByteStringCompressor.html
index 83c17c0..9df0225 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.ByteStringCompressor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.ByteStringCompressor.html
@@ -54,323 +54,362 @@
 046import org.apache.hadoop.io.IOUtils;
 047
 048import 
org.apache.hbase.thirdparty.com.google.protobuf.ByteString;
-049
+049import 
org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations;
 050
-051/**
-052 * Compression in this class is lifted 
off Compressor/KeyValueCompression.
-053 * This is a pure coincidence... they are 
independent and don't have to be compatible.
-054 *
-055 * This codec is used at server side for 
writing cells to WAL as well as for sending edits
-056 * as part of the distributed splitting 
process.
-057 */
-058@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC,
-059  HBaseInterfaceAudience.PHOENIX, 
HBaseInterfaceAudience.CONFIG})
-060public class WALCellCodec implements 
Codec {
-061  /** Configuration key for the class to 
use when encoding cells in the WAL */
-062  public static final String 
WAL_CELL_CODEC_CLASS_KEY = "hbase.regionserver.wal.codec";
-063
-064  protected final CompressionContext 
compression;
-065  protected final ByteStringUncompressor 
statelessUncompressor = new ByteStringUncompressor() {
-066@Override
-067public byte[] uncompress(ByteString 
data, Dictionary dict) throws IOException {
-068  return 
WALCellCodec.uncompressByteString(data, dict);
-069}
-070  };
-071
-072  /**
-073   * All subclasses must 
implement a no argument constructor
-074   */
-075  public WALCellCodec() {
-076this.compression = null;
-077  }
-078
-079  /**
-080   * Default constructor - all 
subclasses must implement a constructor with this signature 
-081   * if they are to be dynamically loaded 
from the {@link Configuration}.
-082   * @param conf configuration to 
configure this
-083   * @param compression compression the 
codec should support, can be null to indicate no
-084   *  compression
-085   */
-086  public WALCellCodec(Configuration conf, 
CompressionContext compression) {
-087this.compression = compression;
-088  }
-089
-090  public static String 
getWALCellCodecClass(Configuration conf) {
-091return 
conf.get(WAL_CELL_CODEC_CLASS_KEY, WALCellCodec.class.getName());
-092  }
-093
-094  /**
-095   * Create and setup a {@link 
WALCellCodec} from the {@code cellCodecClsName} and
-096   * CompressionContext, if {@code 
cellCodecClsName} is specified.
-097   * Otherwise Cell Codec classname is 
read from {@link Configuration}.
-098   * Fully prepares the codec for use.
-099   * @param conf {@link Configuration} to 
read for the user-specified codec. If none is specified,
-100   *  uses a {@link 
WALCellCodec}.
-101   * @param cellCodecClsName name of 
codec
-102   * @param compression compression the 
codec should use
-103   * @return a {@link WALCellCodec} ready 
for use.
-104   * @throws 
UnsupportedOperationException if the codec cannot be instantiated
-105   */
-106
-107  public static WALCellCodec 
create(Configuration conf, String cellCodecClsName,
-108  CompressionContext compression) 
throws UnsupportedOperationException {
-109if (cellCodecClsName == null) {
-110  cellCodecClsName = 
getWALCellCodecClass(conf);
-111}
-112return 
ReflectionUtils.instantiateWithCustomCtor(cellCodecClsName, new Class[]
-113{ Configuration.class, 
CompressionContext.class }, new Object[] { conf, compression });
-114  }
-115
-116  /**
-117   * Create and setup a {@link 
WALCellCodec} from the
-118   * CompressionContext.
-119   * Cell Codec classname is read from 
{@link Configuration}.
-120   * Fully prepares the codec for use.
-121   * @param conf {@link Configuration} to 
read for the user-specified codec. If none is specified,
-122   *  uses a {@link 
WALCellCodec}.
-123   * @param compression compression the 
codec should use
-124   * @return a {@link WALCellCodec} ready 
for use.
-125   * @throws 
UnsupportedOperationException if the codec cannot be instantiated
-126   */
-127  public static WALCellCodec 
create(Configuration conf,
-128  CompressionContext compression) 
throws UnsupportedOperationException {
-129String cellCodecClsName = 
getWALCellCodecClass(conf);
-130return 
ReflectionUtils.instantiateWithCustomCtor(cellCodecClsName, new Class[]
-131{ Configuration.class, 
CompressionContext.class }, new Object[] { co

[13/51] [partial] hbase-site git commit: Published site at 7d3750bd9fc9747623549c242cc4171e224b3eaf.

2018-06-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/3469cbc0/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.ServerStateNode.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.ServerStateNode.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.ServerStateNode.html
index 5420d82..6ea3672 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.ServerStateNode.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/RegionStates.ServerStateNode.html
@@ -316,687 +316,728 @@
 308}
 309  }
 310
-311  public enum ServerState { ONLINE, 
SPLITTING, OFFLINE }
-312  public static class ServerStateNode 
implements Comparable {
-313private final ServerReportEvent 
reportEvent;
-314
-315private final 
Set regions;
-316private final ServerName 
serverName;
-317
-318private volatile ServerState state = 
ServerState.ONLINE;
-319private volatile int versionNumber = 
0;
-320
-321public ServerStateNode(final 
ServerName serverName) {
-322  this.serverName = serverName;
-323  this.regions = 
ConcurrentHashMap.newKeySet();
-324  this.reportEvent = new 
ServerReportEvent(serverName);
-325}
-326
-327public ServerName getServerName() {
-328  return serverName;
-329}
+311  /**
+312   * Server State.
+313   */
+314  public enum ServerState {
+315/**
+316 * Initial state. Available.
+317 */
+318ONLINE,
+319
+320/**
+321 * Server expired/crashed. Currently 
undergoing WAL splitting.
+322 */
+323SPLITTING,
+324
+325/**
+326 * WAL splitting done.
+327 */
+328OFFLINE
+329  }
 330
-331public ServerState getState() {
-332  return state;
-333}
-334
-335public int getVersionNumber() {
-336  return versionNumber;
-337}
-338
-339public ProcedureEvent 
getReportEvent() {
-340  return reportEvent;
-341}
+331  /**
+332   * State of Server; list of hosted 
regions, etc.
+333   */
+334  public static class ServerStateNode 
implements Comparable {
+335private final ServerReportEvent 
reportEvent;
+336
+337private final 
Set regions;
+338private final ServerName 
serverName;
+339
+340private volatile ServerState state = 
ServerState.ONLINE;
+341private volatile int versionNumber = 
0;
 342
-343public boolean isInState(final 
ServerState... expected) {
-344  boolean expectedState = false;
-345  if (expected != null) {
-346for (int i = 0; i < 
expected.length; ++i) {
-347  expectedState |= (state == 
expected[i]);
-348}
-349  }
-350  return expectedState;
+343public ServerStateNode(final 
ServerName serverName) {
+344  this.serverName = serverName;
+345  this.regions = 
ConcurrentHashMap.newKeySet();
+346  this.reportEvent = new 
ServerReportEvent(serverName);
+347}
+348
+349public ServerName getServerName() {
+350  return serverName;
 351}
 352
-353public void setState(final 
ServerState state) {
-354  this.state = state;
+353public ServerState getState() {
+354  return state;
 355}
 356
-357public void setVersionNumber(final 
int versionNumber) {
-358  this.versionNumber = 
versionNumber;
+357public int getVersionNumber() {
+358  return versionNumber;
 359}
 360
-361public Set 
getRegions() {
-362  return regions;
+361public ProcedureEvent 
getReportEvent() {
+362  return reportEvent;
 363}
 364
-365public int getRegionCount() {
-366  return regions.size();
+365public boolean isOffline() {
+366  return 
this.state.equals(ServerState.OFFLINE);
 367}
 368
-369public ArrayList 
getRegionInfoList() {
-370  ArrayList hris = 
new ArrayList(regions.size());
-371  for (RegionStateNode region: 
regions) {
-372
hris.add(region.getRegionInfo());
-373  }
-374  return hris;
-375}
-376
-377public void addRegion(final 
RegionStateNode regionNode) {
-378  this.regions.add(regionNode);
-379}
-380
-381public void removeRegion(final 
RegionStateNode regionNode) {
-382  this.regions.remove(regionNode);
-383}
-384
-385@Override
-386public int compareTo(final 
ServerStateNode other) {
-387  return 
getServerName().compareTo(other.getServerName());
-388}
-389
-390@Override
-391public int hashCode() {
-392  return 
getServerName().hashCode();
+369public boolean isInState(final 
ServerState... expected) {
+370  boolean expectedState = false;
+371  if (expected != null) {
+372for (int i = 0; i < 
expected.length; ++i) {
+373  expectedState |= (state == 
expected[i]);
+374}
+375  }
+376  return 

[13/51] [partial] hbase-site git commit: Published site at 997747076d8ec0b4346d7cb99c4b0667a7c14905.

2018-05-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4df09ed9/devapidocs/src-html/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.RSGroupAdminServiceImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.RSGroupAdminServiceImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.RSGroupAdminServiceImpl.html
index 168462e..67da347 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.RSGroupAdminServiceImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.RSGroupAdminServiceImpl.html
@@ -213,330 +213,337 @@
 205if 
(master.getMasterCoprocessorHost() != null) {
 206  
master.getMasterCoprocessorHost().preMoveServers(hostPorts, 
request.getTargetGroup());
 207}
-208
groupAdminServer.moveServers(hostPorts, request.getTargetGroup());
-209if 
(master.getMasterCoprocessorHost() != null) {
-210  
master.getMasterCoprocessorHost().postMoveServers(hostPorts, 
request.getTargetGroup());
-211}
-212  } catch (IOException e) {
-213
CoprocessorRpcUtils.setControllerException(controller, e);
-214  }
-215  done.run(builder.build());
-216}
-217
-218@Override
-219public void moveTables(RpcController 
controller, MoveTablesRequest request,
-220
RpcCallback done) {
-221  MoveTablesResponse.Builder builder 
= MoveTablesResponse.newBuilder();
-222  Set tables = new 
HashSet<>(request.getTableNameList().size());
-223  for (HBaseProtos.TableName 
tableName : request.getTableNameList()) {
-224
tables.add(ProtobufUtil.toTableName(tableName));
-225  }
-226  
LOG.info(master.getClientIdAuditPrefix() + " move tables " + tables +" to 
rsgroup "
-227  + request.getTargetGroup());
-228  try {
-229if 
(master.getMasterCoprocessorHost() != null) {
-230  
master.getMasterCoprocessorHost().preMoveTables(tables, 
request.getTargetGroup());
-231}
-232
groupAdminServer.moveTables(tables, request.getTargetGroup());
-233if 
(master.getMasterCoprocessorHost() != null) {
-234  
master.getMasterCoprocessorHost().postMoveTables(tables, 
request.getTargetGroup());
-235}
-236  } catch (IOException e) {
-237
CoprocessorRpcUtils.setControllerException(controller, e);
-238  }
-239  done.run(builder.build());
-240}
-241
-242@Override
-243public void addRSGroup(RpcController 
controller, AddRSGroupRequest request,
-244
RpcCallback done) {
-245  AddRSGroupResponse.Builder builder 
= AddRSGroupResponse.newBuilder();
-246  
LOG.info(master.getClientIdAuditPrefix() + " add rsgroup " + 
request.getRSGroupName());
-247  try {
-248if 
(master.getMasterCoprocessorHost() != null) {
-249  
master.getMasterCoprocessorHost().preAddRSGroup(request.getRSGroupName());
-250}
-251
groupAdminServer.addRSGroup(request.getRSGroupName());
-252if 
(master.getMasterCoprocessorHost() != null) {
-253  
master.getMasterCoprocessorHost().postAddRSGroup(request.getRSGroupName());
-254}
-255  } catch (IOException e) {
-256
CoprocessorRpcUtils.setControllerException(controller, e);
-257  }
-258  done.run(builder.build());
-259}
-260
-261@Override
-262public void 
removeRSGroup(RpcController controller,
-263RemoveRSGroupRequest request, 
RpcCallback done) {
-264  RemoveRSGroupResponse.Builder 
builder =
-265  
RemoveRSGroupResponse.newBuilder();
-266  
LOG.info(master.getClientIdAuditPrefix() + " remove rsgroup " + 
request.getRSGroupName());
-267  try {
-268if 
(master.getMasterCoprocessorHost() != null) {
-269  
master.getMasterCoprocessorHost().preRemoveRSGroup(request.getRSGroupName());
-270}
-271
groupAdminServer.removeRSGroup(request.getRSGroupName());
-272if 
(master.getMasterCoprocessorHost() != null) {
-273  
master.getMasterCoprocessorHost().postRemoveRSGroup(request.getRSGroupName());
-274}
-275  } catch (IOException e) {
-276
CoprocessorRpcUtils.setControllerException(controller, e);
-277  }
-278  done.run(builder.build());
-279}
-280
-281@Override
-282public void 
balanceRSGroup(RpcController controller,
-283BalanceRSGroupRequest request, 
RpcCallback done) {
-284  BalanceRSGroupResponse.Builder 
builder = BalanceRSGroupResponse.newBuilder();
-285  
LOG.info(master.getClientIdAuditPrefix() + " balance rsgroup, group=" +
-286  
request.getRSGroupName());
-287  try {
-288if 
(master.getMasterCoprocessorHost() != null) {
-289  
master.getMasterCoprocessorHost().preBalanceRSGroup(re

[13/51] [partial] hbase-site git commit: Published site at f3d1c021de2264301f68eadb9ef126ff83d7ef53.

2018-05-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/883dde2f/devapidocs/src-html/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.FailedOpenUpdaterThread.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.FailedOpenUpdaterThread.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.FailedOpenUpdaterThread.html
index 54b1f96..ed95cbf 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.FailedOpenUpdaterThread.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.FailedOpenUpdaterThread.html
@@ -31,922 +31,906 @@
 023import java.io.ByteArrayInputStream;
 024import java.io.IOException;
 025import java.util.ArrayList;
-026import java.util.Arrays;
-027import java.util.Collections;
-028import java.util.HashMap;
-029import java.util.HashSet;
-030import java.util.LinkedList;
-031import java.util.List;
-032import java.util.Map;
-033import java.util.NavigableSet;
-034import java.util.Set;
-035import java.util.SortedSet;
-036import java.util.TreeSet;
-037import 
java.util.concurrent.atomic.AtomicBoolean;
-038
-039import 
org.apache.hadoop.conf.Configuration;
-040import org.apache.hadoop.hbase.Cell;
-041import 
org.apache.hadoop.hbase.CellUtil;
-042import 
org.apache.hadoop.hbase.Coprocessor;
-043import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-044import 
org.apache.hadoop.hbase.HColumnDescriptor;
-045import 
org.apache.hadoop.hbase.HConstants;
-046import 
org.apache.hadoop.hbase.HTableDescriptor;
-047import 
org.apache.hadoop.hbase.MetaTableAccessor;
-048import 
org.apache.hadoop.hbase.MetaTableAccessor.DefaultVisitorBase;
-049import 
org.apache.hadoop.hbase.ServerName;
-050import 
org.apache.hadoop.hbase.TableName;
-051import 
org.apache.hadoop.hbase.client.ClusterConnection;
-052import 
org.apache.hadoop.hbase.client.Delete;
-053import 
org.apache.hadoop.hbase.client.Get;
-054import 
org.apache.hadoop.hbase.client.Mutation;
-055import 
org.apache.hadoop.hbase.client.Put;
-056import 
org.apache.hadoop.hbase.client.RegionInfo;
-057import 
org.apache.hadoop.hbase.client.Result;
-058import 
org.apache.hadoop.hbase.client.Scan;
-059import 
org.apache.hadoop.hbase.client.Table;
-060import 
org.apache.hadoop.hbase.client.TableState;
-061import 
org.apache.hadoop.hbase.constraint.ConstraintException;
-062import 
org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint;
-063import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-064import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
-065import 
org.apache.hadoop.hbase.master.MasterServices;
-066import 
org.apache.hadoop.hbase.master.ServerListener;
-067import 
org.apache.hadoop.hbase.master.TableStateManager;
-068import 
org.apache.hadoop.hbase.master.assignment.RegionStates.RegionStateNode;
-069import 
org.apache.hadoop.hbase.net.Address;
-070import 
org.apache.hadoop.hbase.procedure2.Procedure;
-071import 
org.apache.hadoop.hbase.protobuf.ProtobufMagic;
-072import 
org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-073import 
org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos;
-074import 
org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos;
-075import 
org.apache.hadoop.hbase.quotas.QuotaTableUtil;
-076import 
org.apache.hadoop.hbase.quotas.QuotaUtil;
-077import 
org.apache.hadoop.hbase.regionserver.DisabledRegionSplitPolicy;
-078import 
org.apache.hadoop.hbase.security.access.AccessControlLists;
-079import 
org.apache.hadoop.hbase.util.Bytes;
-080import 
org.apache.hadoop.hbase.zookeeper.ZKUtil;
-081import 
org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-082import 
org.apache.hadoop.hbase.zookeeper.ZNodePaths;
-083import 
org.apache.yetus.audience.InterfaceAudience;
-084import 
org.apache.zookeeper.KeeperException;
-085import org.slf4j.Logger;
-086import org.slf4j.LoggerFactory;
-087
-088import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
-089import 
org.apache.hbase.thirdparty.com.google.common.collect.Maps;
-090import 
org.apache.hbase.thirdparty.com.google.common.collect.Sets;
-091import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
-093
-094/**
-095 * This is an implementation of {@link 
RSGroupInfoManager} which makes
-096 * use of an HBase table as the 
persistence store for the group information.
-097 * It also makes use of zookeeper to 
store group information needed
-098 * for bootstrapping during offline 
mode.
-099 *
-100 * 

Concurrency

-101 * RSGroup state is kept locally in Maps. There is a rsgroup name to cached -102 * RSGroupInfo Map at {@link #rsGroupMap} and a Map of tables to the name of the -103 * rsgroup they belong too (in {@link #tableMap}). These Maps are persisted to the -104 * hbase:rsgroup table (and cached in zk) on each modification. -105 * -106 *

Mutations on


[13/51] [partial] hbase-site git commit: Published site at cf529f18a9959589fa635f78df4840472526ea2c.

2018-05-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7bcc960d/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncRandomReadTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncRandomReadTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncRandomReadTest.html
index 3f8844b..cdb9398 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncRandomReadTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AsyncRandomReadTest.html
@@ -140,2712 +140,2713 @@
 132public class PerformanceEvaluation 
extends Configured implements Tool {
 133  static final String RANDOM_SEEK_SCAN = 
"randomSeekScan";
 134  static final String RANDOM_READ = 
"randomRead";
-135  private static final Logger LOG = 
LoggerFactory.getLogger(PerformanceEvaluation.class.getName());
-136  private static final ObjectMapper 
MAPPER = new ObjectMapper();
-137  static {
-138
MAPPER.configure(MapperFeature.SORT_PROPERTIES_ALPHABETICALLY, true);
-139  }
-140
-141  public static final String TABLE_NAME = 
"TestTable";
-142  public static final String 
FAMILY_NAME_BASE = "info";
-143  public static final byte[] FAMILY_ZERO 
= Bytes.toBytes("info0");
-144  public static final byte[] COLUMN_ZERO 
= Bytes.toBytes("" + 0);
-145  public static final int 
DEFAULT_VALUE_LENGTH = 1000;
-146  public static final int ROW_LENGTH = 
26;
-147
-148  private static final int ONE_GB = 1024 
* 1024 * 1000;
-149  private static final int 
DEFAULT_ROWS_PER_GB = ONE_GB / DEFAULT_VALUE_LENGTH;
-150  // TODO : should we make this 
configurable
-151  private static final int TAG_LENGTH = 
256;
-152  private static final DecimalFormat FMT 
= new DecimalFormat("0.##");
-153  private static final MathContext CXT = 
MathContext.DECIMAL64;
-154  private static final BigDecimal 
MS_PER_SEC = BigDecimal.valueOf(1000);
-155  private static final BigDecimal 
BYTES_PER_MB = BigDecimal.valueOf(1024 * 1024);
-156  private static final TestOptions 
DEFAULT_OPTS = new TestOptions();
-157
-158  private static Map COMMANDS = new TreeMap<>();
-159  private static final Path PERF_EVAL_DIR 
= new Path("performance_evaluation");
-160
-161  static {
-162
addCommandDescriptor(AsyncRandomReadTest.class, "asyncRandomRead",
-163"Run async random read test");
-164
addCommandDescriptor(AsyncRandomWriteTest.class, "asyncRandomWrite",
-165"Run async random write test");
-166
addCommandDescriptor(AsyncSequentialReadTest.class, "asyncSequentialRead",
-167"Run async sequential read 
test");
-168
addCommandDescriptor(AsyncSequentialWriteTest.class, "asyncSequentialWrite",
-169"Run async sequential write 
test");
-170
addCommandDescriptor(AsyncScanTest.class, "asyncScan",
-171"Run async scan test (read every 
row)");
-172
addCommandDescriptor(RandomReadTest.class, RANDOM_READ,
-173  "Run random read test");
-174
addCommandDescriptor(RandomSeekScanTest.class, RANDOM_SEEK_SCAN,
-175  "Run random seek and scan 100 
test");
-176
addCommandDescriptor(RandomScanWithRange10Test.class, "scanRange10",
-177  "Run random seek scan with both 
start and stop row (max 10 rows)");
-178
addCommandDescriptor(RandomScanWithRange100Test.class, "scanRange100",
-179  "Run random seek scan with both 
start and stop row (max 100 rows)");
-180
addCommandDescriptor(RandomScanWithRange1000Test.class, "scanRange1000",
-181  "Run random seek scan with both 
start and stop row (max 1000 rows)");
-182
addCommandDescriptor(RandomScanWithRange1Test.class, "scanRange1",
-183  "Run random seek scan with both 
start and stop row (max 1 rows)");
-184
addCommandDescriptor(RandomWriteTest.class, "randomWrite",
-185  "Run random write test");
-186
addCommandDescriptor(SequentialReadTest.class, "sequentialRead",
-187  "Run sequential read test");
-188
addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite",
-189  "Run sequential write test");
-190addCommandDescriptor(ScanTest.class, 
"scan",
-191  "Run scan test (read every 
row)");
-192
addCommandDescriptor(FilteredScanTest.class, "filterScan",
-193  "Run scan test using a filter to 
find a specific row based on it's value " +
-194  "(make sure to use --rows=20)");
-195
addCommandDescriptor(IncrementTest.class, "increment",
-196  "Increment on each row; clients 
overlap on keyspace so some concurrent operations");
-197
addCommandDescriptor(AppendTest.class, "append",
-198  "Append on each row; clients 
overlap on keyspace so some concurrent operations");
-199
addCommandDescriptor(CheckAndMutateTest.class, "checkAndMutate",
-200  "CheckAndMutate on each row; 
clients overlap on keyspace so some concurrent operations");
-201
addComma

[13/51] [partial] hbase-site git commit: Published site at 021f66d11d2cbb7308308093e29e69d6e7661ee9.

2018-05-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/92a26cfb/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html
index 4a879bb..7d27402 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html
@@ -300,7 +300,7 @@
 292  private Map coprocessorServiceHandlers = 
Maps.newHashMap();
 293
 294  // Track data size in all memstores
-295  private final MemStoreSizing 
memStoreSize = new MemStoreSizing();
+295  private final MemStoreSizing 
memStoreSizing = new ThreadSafeMemStoreSizing();
 296  private final RegionServicesForStores 
regionServicesForStores = new RegionServicesForStores(this);
 297
 298  // Debug possible data loss due to WAL 
off
@@ -1218,7389 +1218,7399 @@
 1210   * Increase the size of mem store in 
this region and the size of global mem
 1211   * store
 1212   */
-1213  public void 
incMemStoreSize(MemStoreSize memStoreSize) {
-1214if (this.rsAccounting != null) {
-1215  
rsAccounting.incGlobalMemStoreSize(memStoreSize);
-1216}
-1217long dataSize;
-1218synchronized (this.memStoreSize) {
-1219  
this.memStoreSize.incMemStoreSize(memStoreSize);
-1220  dataSize = 
this.memStoreSize.getDataSize();
-1221}
-1222
checkNegativeMemStoreDataSize(dataSize, memStoreSize.getDataSize());
-1223  }
-1224
-1225  public void 
decrMemStoreSize(MemStoreSize memStoreSize) {
-1226if (this.rsAccounting != null) {
-1227  
rsAccounting.decGlobalMemStoreSize(memStoreSize);
-1228}
-1229long size;
-1230synchronized (this.memStoreSize) {
-1231  
this.memStoreSize.decMemStoreSize(memStoreSize);
-1232  size = 
this.memStoreSize.getDataSize();
+1213  void incMemStoreSize(MemStoreSize mss) 
{
+1214incMemStoreSize(mss.getDataSize(), 
mss.getHeapSize(), mss.getOffHeapSize());
+1215  }
+1216
+1217  void incMemStoreSize(long 
dataSizeDelta, long heapSizeDelta, long offHeapSizeDelta) {
+1218if (this.rsAccounting != null) {
+1219  
rsAccounting.incGlobalMemStoreSize(dataSizeDelta, heapSizeDelta, 
offHeapSizeDelta);
+1220}
+1221long dataSize =
+1222
this.memStoreSizing.incMemStoreSize(dataSizeDelta, heapSizeDelta, 
offHeapSizeDelta);
+1223
checkNegativeMemStoreDataSize(dataSize, dataSizeDelta);
+1224  }
+1225
+1226  void decrMemStoreSize(MemStoreSize 
mss) {
+1227decrMemStoreSize(mss.getDataSize(), 
mss.getHeapSize(), mss.getOffHeapSize());
+1228  }
+1229
+1230  void decrMemStoreSize(long 
dataSizeDelta, long heapSizeDelta, long offHeapSizeDelta) {
+1231if (this.rsAccounting != null) {
+1232  
rsAccounting.decGlobalMemStoreSize(dataSizeDelta, heapSizeDelta, 
offHeapSizeDelta);
 1233}
-1234checkNegativeMemStoreDataSize(size, 
-memStoreSize.getDataSize());
-1235  }
-1236
-1237  private void 
checkNegativeMemStoreDataSize(long memStoreDataSize, long delta) {
-1238// This is extremely bad if we make 
memStoreSize negative. Log as much info on the offending
-1239// caller as possible. (memStoreSize 
might be a negative value already -- freeing memory)
-1240if (memStoreDataSize < 0) {
-1241  LOG.error("Asked to modify this 
region's (" + this.toString()
-1242  + ") memStoreSize to a 
negative value which is incorrect. Current memStoreSize="
-1243  + (memStoreDataSize - delta) + 
", delta=" + delta, new Exception());
-1244}
-1245  }
-1246
-1247  @Override
-1248  public RegionInfo getRegionInfo() {
-1249return this.fs.getRegionInfo();
-1250  }
-1251
-1252  /**
-1253   * @return Instance of {@link 
RegionServerServices} used by this HRegion.
-1254   * Can be null.
-1255   */
-1256  RegionServerServices 
getRegionServerServices() {
-1257return this.rsServices;
-1258  }
-1259
-1260  @Override
-1261  public long getReadRequestsCount() {
-1262return readRequestsCount.sum();
-1263  }
-1264
-1265  @Override
-1266  public long 
getFilteredReadRequestsCount() {
-1267return 
filteredReadRequestsCount.sum();
-1268  }
-1269
-1270  @Override
-1271  public long getWriteRequestsCount() 
{
-1272return writeRequestsCount.sum();
-1273  }
-1274
-1275  @Override
-1276  public long getMemStoreDataSize() {
-1277return memStoreSize.getDataSize();
-1278  }
-1279
-1280  @Override
-1281  public long getMemStoreHeapSize() {
-1282return memStoreSize.getHeapSize();
-1283  }
-1284
-1285  @Override
-1286  public long getMemStoreOffHeapSize() 
{
-1287return 
memStoreSize.getOffHeapSize();
-1288  }
-1289
-1290  /** @return store services for this 
region, to access services required by store level needs */
-1291  public RegionServicesForStores 
getRegionServicesForStores() {
-1292return regionServicesForStore

[13/51] [partial] hbase-site git commit: Published site at acd0d1e446c164d9c54bfb461b2d449c8d717c07.

2018-05-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f2065178/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomScanWithRange1Test.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomScanWithRange1Test.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomScanWithRange1Test.html
index 2510283..418c60c 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomScanWithRange1Test.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomScanWithRange1Test.html
@@ -77,77 +77,77 @@
 069import 
org.apache.hadoop.hbase.client.RowMutations;
 070import 
org.apache.hadoop.hbase.client.Scan;
 071import 
org.apache.hadoop.hbase.client.Table;
-072import 
org.apache.hadoop.hbase.filter.BinaryComparator;
-073import 
org.apache.hadoop.hbase.filter.Filter;
-074import 
org.apache.hadoop.hbase.filter.FilterAllFilter;
-075import 
org.apache.hadoop.hbase.filter.FilterList;
-076import 
org.apache.hadoop.hbase.filter.PageFilter;
-077import 
org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
-078import 
org.apache.hadoop.hbase.filter.WhileMatchFilter;
-079import 
org.apache.hadoop.hbase.io.compress.Compression;
-080import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
-081import 
org.apache.hadoop.hbase.io.hfile.RandomDistribution;
-082import 
org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
-083import 
org.apache.hadoop.hbase.regionserver.BloomType;
-084import 
org.apache.hadoop.hbase.regionserver.CompactingMemStore;
-085import 
org.apache.hadoop.hbase.trace.HBaseHTraceConfiguration;
-086import 
org.apache.hadoop.hbase.trace.SpanReceiverHost;
-087import 
org.apache.hadoop.hbase.trace.TraceUtil;
-088import 
org.apache.hadoop.hbase.util.ByteArrayHashKey;
-089import 
org.apache.hadoop.hbase.util.Bytes;
-090import 
org.apache.hadoop.hbase.util.Hash;
-091import 
org.apache.hadoop.hbase.util.MurmurHash;
-092import 
org.apache.hadoop.hbase.util.Pair;
-093import 
org.apache.hadoop.hbase.util.YammerHistogramUtils;
-094import 
org.apache.hadoop.io.LongWritable;
-095import org.apache.hadoop.io.Text;
-096import org.apache.hadoop.mapreduce.Job;
-097import 
org.apache.hadoop.mapreduce.Mapper;
-098import 
org.apache.hadoop.mapreduce.lib.input.NLineInputFormat;
-099import 
org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
-100import 
org.apache.hadoop.mapreduce.lib.reduce.LongSumReducer;
-101import org.apache.hadoop.util.Tool;
-102import 
org.apache.hadoop.util.ToolRunner;
-103import 
org.apache.htrace.core.ProbabilitySampler;
-104import org.apache.htrace.core.Sampler;
-105import 
org.apache.htrace.core.TraceScope;
-106import 
org.apache.yetus.audience.InterfaceAudience;
-107import org.slf4j.Logger;
-108import org.slf4j.LoggerFactory;
-109import 
org.apache.hbase.thirdparty.com.google.common.base.MoreObjects;
-110import 
org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
-111
-112/**
-113 * Script used evaluating HBase 
performance and scalability.  Runs a HBase
-114 * client that steps through one of a set 
of hardcoded tests or 'experiments'
-115 * (e.g. a random reads test, a random 
writes test, etc.). Pass on the
-116 * command-line which test to run and how 
many clients are participating in
-117 * this experiment. Run {@code 
PerformanceEvaluation --help} to obtain usage.
-118 *
-119 * 

This class sets up and runs the evaluation programs described in -120 * Section 7, Performance Evaluation, of the Bigtable; -122 * paper, pages 8-10. -123 * -124 *

By default, runs as a mapreduce job where each mapper runs a single test -125 * client. Can also run as a non-mapreduce, multithreaded application by -126 * specifying {@code --nomapred}. Each client does about 1GB of data, unless -127 * specified otherwise. -128 */ -129@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) -130public class PerformanceEvaluation extends Configured implements Tool { -131 static final String RANDOM_SEEK_SCAN = "randomSeekScan"; -132 static final String RANDOM_READ = "randomRead"; -133 private static final Logger LOG = LoggerFactory.getLogger(PerformanceEvaluation.class.getName()); -134 private static final ObjectMapper MAPPER = new ObjectMapper(); -135 static { -136 MAPPER.configure(MapperFeature.SORT_PROPERTIES_ALPHABETICALLY, true); -137 } -138 -139 public static final String TABLE_NAME = "TestTable"; -140 public static final byte[] FAMILY_NAME = Bytes.toBytes("info"); -141 public static final byte [] COLUMN_ZERO = Bytes.toBytes("" + 0); -142 public static final byte [] QUALIFIER_NAME = COLUMN_ZERO; +072import org.apache.hadoop.hbase.client.metrics.ScanMetrics; +073import org.apache.hadoop.hbase.filter.BinaryComparator; +074import org.ap


[13/51] [partial] hbase-site git commit: Published site at 87f5b5f3411d96c31b4cb61b9a57ced22be91d1f.

2018-05-04 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/de18d468/testdevapidocs/index-all.html
--
diff --git a/testdevapidocs/index-all.html b/testdevapidocs/index-all.html
index 8c4060d..b1e54cc 100644
--- a/testdevapidocs/index-all.html
+++ b/testdevapidocs/index-all.html
@@ -835,6 +835,8 @@
  
 addStateAndBarrier(RegionInfo,
 RegionState.State, long...) - Method in class 
org.apache.hadoop.hbase.replication.regionserver.TestSerialReplicationChecker
  
+addStateAndBarrier(RegionInfo,
 RegionState.State, long...) - Method in class 
org.apache.hadoop.hbase.util.TestHBaseFsckCleanReplicationBarriers
+ 
 addStoreFile()
 - Method in class org.apache.hadoop.hbase.regionserver.TestHStore
  
 addThread(MultithreadedTestUtil.TestThread)
 - Method in class org.apache.hadoop.hbase.MultithreadedTestUtil.TestContext
@@ -3134,10 +3136,6 @@
  
 call()
 - Method in class org.apache.hadoop.hbase.procedure2.store.wal.ProcedureWALPerformanceEvaluation.Worker
  
-call()
 - Method in class org.apache.hadoop.hbase.replication.TestReplicationEndpoint.InterClusterReplicationEndpointForTest.DummyReplicator
- 
-call()
 - Method in class org.apache.hadoop.hbase.replication.TestReplicationEndpoint.InterClusterReplicationEndpointForTest.FailingDummyReplicator
- 
 call()
 - Method in class org.apache.hadoop.hbase.TestCompatibilitySingletonFactory.TestCompatibilitySingletonFactoryCallable
  
 call()
 - Method in class org.apache.hadoop.hbase.util.TestIdLock.IdLockTestThread
@@ -3204,6 +3202,8 @@
  
 canonicalizeMetricName(String)
 - Method in class org.apache.hadoop.hbase.test.MetricsAssertHelperImpl
  
+canReplicateToSameCluster()
 - Method in class org.apache.hadoop.hbase.replication.regionserver.TestSerialReplicationEndpoint.TestEndpoint
+ 
 canRollback
 - Variable in class org.apache.hadoop.hbase.client.TestGetProcedureResult.DummyProcedure
  
 CAPACITY
 - Static variable in class org.apache.hadoop.hbase.util.TestBoundedPriorityBlockingQueue
@@ -5743,6 +5743,8 @@
  
 CLASS_RULE
 - Static variable in class org.apache.hadoop.hbase.replication.regionserver.TestSerialReplicationChecker
  
+CLASS_RULE
 - Static variable in class org.apache.hadoop.hbase.replication.regionserver.TestSerialReplicationEndpoint
+ 
 CLASS_RULE
 - Static variable in class org.apache.hadoop.hbase.replication.regionserver.TestWALEntrySinkFilter
  
 CLASS_RULE
 - Static variable in class org.apache.hadoop.hbase.replication.regionserver.TestWALEntryStream
@@ -6251,6 +6253,8 @@
  
 CLASS_RULE
 - Static variable in class org.apache.hadoop.hbase.util.TestFSVisitor
  
+CLASS_RULE
 - Static variable in class org.apache.hadoop.hbase.util.TestHBaseFsckCleanReplicationBarriers
+ 
 CLASS_RULE
 - Static variable in class org.apache.hadoop.hbase.util.TestHBaseFsckComparator
  
 CLASS_RULE
 - Static variable in class org.apache.hadoop.hbase.util.TestHBaseFsckEncryption
@@ -6449,6 +6453,8 @@
  
 cleanRegionRootDir(FileSystem,
 Path) - Method in class org.apache.hadoop.hbase.wal.WALPerformanceEvaluation
  
+cleanReplicationBarrier(Configuration,
 TableName) - Static method in class 
org.apache.hadoop.hbase.util.hbck.HbckTestingUtil
+ 
 cleanup()
 - Method in class org.apache.hadoop.hbase.backup.TestBackupHFileCleaner
  
 cleanup()
 - Method in class org.apache.hadoop.hbase.client.TestAsyncSnapshotAdminApi
@@ -6643,8 +6649,6 @@
  
 clearCompactionQueues(RpcController,
 AdminProtos.ClearCompactionQueuesRequest) - Method in class 
org.apache.hadoop.hbase.master.MockRegionServer
  
-clearCompactionQueues(RpcController,
 AdminProtos.ClearCompactionQueuesRequest) - Method in class 
org.apache.hadoop.hbase.replication.regionserver.TestReplicator.FailureInjectingReplicationEndpointForTest.FailureInjectingBlockingInterface
- 
 clearMapping()
 - Static method in class org.apache.hadoop.hbase.http.TestHttpServer.MyGroupsProvider
  
 clearOutput(Path)
 - Static method in class org.apache.hadoop.hbase.coprocessor.TestSecureExport
@@ -6657,8 +6661,6 @@
  
 clearRegionBlockCache(HRegionServer)
 - Method in class org.apache.hadoop.hbase.regionserver.TestClearRegionBlockCache
  
-clearRegionBlockCache(RpcController,
 AdminProtos.ClearRegionBlockCacheRequest) - Method in class 
org.apache.hadoop.hbase.replication.regionserver.TestReplicator.FailureInjectingReplicationEndpointForTest.FailureInjectingBlockingInterface
- 
 clearSnapshots()
 - Method in class org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshotNotifierForTest
  
 clearTable()
 - Method in class org.apache.hadoop.hbase.coprocessor.TestPassCustomCellViaRegionObserver
@@ -6864,8 +6866,6 @@
  
 closeRegion(HBaseTestingUtility,
 HRegionServer, HRegionInfo) - Static method in class 
org.apache.hadoop.hbase.regionserver.TestRegionServerNoMaster
  
-closeRegion(RpcController,
 AdminProtos.CloseRegionRequest) - Method in class 
org.apache.hadoop.hbase.replication.regionserver.TestReplicator.FailureInjectingReplicationEndpointForTest.FailureInjectingBlockingInterface
- 
 closeRe

[13/51] [partial] hbase-site git commit: Published site at 2912c953551bedbfbf30c32c156ed7bb187d54c3.

2018-04-12 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d220bc5e/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html
index 8302e28..c370eb9 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HdfsEntry.html
@@ -2113,3031 +2113,3033 @@
 2105
errors.reportError(ERROR_CODE.ORPHAN_TABLE_STATE,
 2106tableName + " unable to 
delete dangling table state " + tableState);
 2107  }
-2108} else {
-2109  
errors.reportError(ERROR_CODE.ORPHAN_TABLE_STATE,
-2110  tableName + " has dangling 
table state " + tableState);
-2111}
-2112  }
-2113}
-2114// check that all tables have 
states
-2115for (TableName tableName : 
tablesInfo.keySet()) {
-2116  if (isTableIncluded(tableName) 
&& !tableStates.containsKey(tableName)) {
-2117if (fixMeta) {
-2118  
MetaTableAccessor.updateTableState(connection, tableName, 
TableState.State.ENABLED);
-2119  TableState newState = 
MetaTableAccessor.getTableState(connection, tableName);
-2120  if (newState == null) {
-2121
errors.reportError(ERROR_CODE.NO_TABLE_STATE,
-2122"Unable to change state 
for table " + tableName + " in meta ");
-2123  }
-2124} else {
-2125  
errors.reportError(ERROR_CODE.NO_TABLE_STATE,
-2126  tableName + " has no state 
in meta ");
-2127}
-2128  }
-2129}
-2130  }
-2131
-2132  private void preCheckPermission() 
throws IOException, AccessDeniedException {
-2133if 
(shouldIgnorePreCheckPermission()) {
-2134  return;
-2135}
-2136
-2137Path hbaseDir = 
FSUtils.getRootDir(getConf());
-2138FileSystem fs = 
hbaseDir.getFileSystem(getConf());
-2139UserProvider userProvider = 
UserProvider.instantiate(getConf());
-2140UserGroupInformation ugi = 
userProvider.getCurrent().getUGI();
-2141FileStatus[] files = 
fs.listStatus(hbaseDir);
-2142for (FileStatus file : files) {
-2143  try {
-2144FSUtils.checkAccess(ugi, file, 
FsAction.WRITE);
-2145  } catch (AccessDeniedException 
ace) {
-2146LOG.warn("Got 
AccessDeniedException when preCheckPermission ", ace);
-2147
errors.reportError(ERROR_CODE.WRONG_USAGE, "Current user " + 
ugi.getUserName()
-2148  + " does not have write perms 
to " + file.getPath()
-2149  + ". Please rerun hbck as hdfs 
user " + file.getOwner());
-2150throw ace;
-2151  }
-2152}
-2153  }
-2154
-2155  /**
-2156   * Deletes region from meta table
-2157   */
-2158  private void deleteMetaRegion(HbckInfo 
hi) throws IOException {
-2159
deleteMetaRegion(hi.metaEntry.getRegionName());
-2160  }
-2161
-2162  /**
-2163   * Deletes region from meta table
-2164   */
-2165  private void deleteMetaRegion(byte[] 
metaKey) throws IOException {
-2166Delete d = new Delete(metaKey);
-2167meta.delete(d);
-2168LOG.info("Deleted " + 
Bytes.toString(metaKey) + " from META" );
-2169  }
-2170
-2171  /**
-2172   * Reset the split parent region info 
in meta table
-2173   */
-2174  private void resetSplitParent(HbckInfo 
hi) throws IOException {
-2175RowMutations mutations = new 
RowMutations(hi.metaEntry.getRegionName());
-2176Delete d = new 
Delete(hi.metaEntry.getRegionName());
-2177
d.addColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER);
-2178
d.addColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER);
-2179mutations.add(d);
-2180
-2181RegionInfo hri = 
RegionInfoBuilder.newBuilder(hi.metaEntry)
-2182.setOffline(false)
-2183.setSplit(false)
-2184.build();
-2185Put p = 
MetaTableAccessor.makePutFromRegionInfo(hri, 
EnvironmentEdgeManager.currentTime());
-2186mutations.add(p);
-2187
-2188meta.mutateRow(mutations);
-2189LOG.info("Reset split parent " + 
hi.metaEntry.getRegionNameAsString() + " in META" );
-2190  }
-2191
-2192  /**
-2193   * This backwards-compatibility 
wrapper for permanently offlining a region
-2194   * that should not be alive.  If the 
region server does not support the
-2195   * "offline" method, it will use the 
closest unassign method instead.  This
-2196   * will basically work until one 
attempts to disable or delete the affected
-2197   * table.  The problem has to do with 
in-memory only master state, so
-2198   * restarting the HMaster or failing 
over to another should fix this.
-2199   */
-2200  private void offline(byte[] 
regionName) throws IOException {
-2201String regionString = 
Bytes.toStringBinary(regionName);
-2202if (!rsSupportsOffline) {
-2203  LOG.warn("Using unassign region " 
+ region

[13/51] [partial] hbase-site git commit: Published site at 2a2258656b2fcd92b967131b6c1f037363553bc4.

2018-03-27 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e0fb1fde/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.SimpleReporter.Builder.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.SimpleReporter.Builder.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.SimpleReporter.Builder.html
index 50caf18..61bf913 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.SimpleReporter.Builder.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.SimpleReporter.Builder.html
@@ -45,773 +45,774 @@
 037import java.util.TimeZone;
 038import java.util.concurrent.TimeUnit;
 039
-040import 
org.apache.commons.cli.CommandLine;
-041import 
org.apache.commons.cli.CommandLineParser;
-042import 
org.apache.commons.cli.HelpFormatter;
-043import org.apache.commons.cli.Option;
-044import 
org.apache.commons.cli.OptionGroup;
-045import org.apache.commons.cli.Options;
-046import 
org.apache.commons.cli.ParseException;
-047import 
org.apache.commons.cli.PosixParser;
-048import 
org.apache.commons.lang3.StringUtils;
-049import 
org.apache.hadoop.conf.Configuration;
-050import 
org.apache.hadoop.conf.Configured;
-051import org.apache.hadoop.fs.FileSystem;
-052import org.apache.hadoop.fs.Path;
-053import org.apache.hadoop.hbase.Cell;
-054import 
org.apache.hadoop.hbase.CellComparator;
-055import 
org.apache.hadoop.hbase.CellUtil;
-056import 
org.apache.hadoop.hbase.HBaseConfiguration;
-057import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
-058import 
org.apache.hadoop.hbase.HConstants;
-059import 
org.apache.hadoop.hbase.HRegionInfo;
-060import 
org.apache.hadoop.hbase.KeyValue;
-061import 
org.apache.hadoop.hbase.KeyValueUtil;
-062import 
org.apache.hadoop.hbase.PrivateCellUtil;
-063import 
org.apache.hadoop.hbase.TableName;
-064import org.apache.hadoop.hbase.Tag;
-065import 
org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
-066import 
org.apache.hadoop.hbase.io.hfile.HFile.FileInfo;
-067import 
org.apache.hadoop.hbase.mob.MobUtils;
-068import 
org.apache.hadoop.hbase.regionserver.HStoreFile;
-069import 
org.apache.hadoop.hbase.regionserver.TimeRangeTracker;
-070import 
org.apache.hadoop.hbase.util.BloomFilter;
-071import 
org.apache.hadoop.hbase.util.BloomFilterFactory;
-072import 
org.apache.hadoop.hbase.util.BloomFilterUtil;
-073import 
org.apache.hadoop.hbase.util.Bytes;
-074import 
org.apache.hadoop.hbase.util.FSUtils;
-075import 
org.apache.hadoop.hbase.util.HFileArchiveUtil;
-076import org.apache.hadoop.util.Tool;
-077import 
org.apache.hadoop.util.ToolRunner;
-078import 
org.apache.yetus.audience.InterfaceAudience;
-079import 
org.apache.yetus.audience.InterfaceStability;
-080import org.slf4j.Logger;
-081import org.slf4j.LoggerFactory;
-082
-083import 
com.codahale.metrics.ConsoleReporter;
-084import com.codahale.metrics.Counter;
-085import com.codahale.metrics.Gauge;
-086import com.codahale.metrics.Histogram;
-087import com.codahale.metrics.Meter;
-088import 
com.codahale.metrics.MetricFilter;
-089import 
com.codahale.metrics.MetricRegistry;
-090import 
com.codahale.metrics.ScheduledReporter;
-091import com.codahale.metrics.Snapshot;
-092import com.codahale.metrics.Timer;
-093
-094/**
-095 * Implements pretty-printing 
functionality for {@link HFile}s.
-096 */
-097@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
-098@InterfaceStability.Evolving
-099public class HFilePrettyPrinter extends 
Configured implements Tool {
-100
-101  private static final Logger LOG = 
LoggerFactory.getLogger(HFilePrettyPrinter.class);
-102
-103  private Options options = new 
Options();
-104
-105  private boolean verbose;
-106  private boolean printValue;
-107  private boolean printKey;
-108  private boolean shouldPrintMeta;
-109  private boolean printBlockIndex;
-110  private boolean printBlockHeaders;
-111  private boolean printStats;
-112  private boolean checkRow;
-113  private boolean checkFamily;
-114  private boolean isSeekToRow = false;
-115  private boolean checkMobIntegrity = 
false;
-116  private Map> mobFileLocations;
-117  private static final int 
FOUND_MOB_FILES_CACHE_CAPACITY = 50;
-118  private static final int 
MISSING_MOB_FILES_CACHE_CAPACITY = 20;
-119  private PrintStream out = System.out;
-120  private PrintStream err = System.err;
-121
-122  /**
-123   * The row which the user wants to 
specify and print all the KeyValues for.
-124   */
-125  private byte[] row = null;
-126
-127  private List files = new 
ArrayList<>();
-128  private int count;
-129
-130  private static final String FOUR_SPACES 
= "";
-131
-132  public HFilePrettyPrinter() {
-133super();
-134init();
-135  }
-136
-137  public HFilePrettyPrinter(Configuration 
conf) {
-138super(conf);
-139init();
-140  }
-141
-142  private void init() {
-143options.addOption("v", "v

[13/51] [partial] hbase-site git commit: Published site at e468b4022f76688851b3e0c34722f01a56bd624f.

2018-03-24 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/16541468/devapidocs/org/apache/hadoop/hbase/class-use/Size.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/Size.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/Size.html
index 3f1b032..837378b 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/Size.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/Size.html
@@ -189,17 +189,25 @@
 
 
 Size
+RegionMetrics.getBloomFilterSize() 
+
+
+Size
+RegionMetricsBuilder.RegionMetricsImpl.getBloomFilterSize() 
+
+
+Size
 RegionLoad.getBloomFilterSize()
 Deprecated. 
  
 
 
 Size
-RegionMetrics.getBloomFilterSize() 
+ServerMetricsBuilder.ServerMetricsImpl.getMaxHeapSize() 
 
 
 Size
-RegionMetricsBuilder.RegionMetricsImpl.getBloomFilterSize() 
+ServerMetrics.getMaxHeapSize() 
 
 
 Size
@@ -209,11 +217,11 @@
 
 
 Size
-ServerMetrics.getMaxHeapSize() 
+RegionMetrics.getMemStoreSize() 
 
 
 Size
-ServerMetricsBuilder.ServerMetricsImpl.getMaxHeapSize() 
+RegionMetricsBuilder.RegionMetricsImpl.getMemStoreSize() 
 
 
 Size
@@ -223,11 +231,15 @@
 
 
 Size
-RegionMetrics.getMemStoreSize() 
+RegionMetrics.getStoreFileIndexSize()
+TODO: why we pass the same value to different counters? 
Currently, the value from
+ getStoreFileIndexSize() is same with getStoreFileRootLevelIndexSize()
+ see HRegionServer#createRegionLoad.
+
 
 
 Size
-RegionMetricsBuilder.RegionMetricsImpl.getMemStoreSize() 
+RegionMetricsBuilder.RegionMetricsImpl.getStoreFileIndexSize() 
 
 
 Size
@@ -237,15 +249,11 @@
 
 
 Size
-RegionMetrics.getStoreFileIndexSize()
-TODO: why we pass the same value to different counters? 
Currently, the value from
- getStoreFileIndexSize() is same with getStoreFileRootLevelIndexSize()
- see HRegionServer#createRegionLoad.
-
+RegionMetrics.getStoreFileRootLevelIndexSize() 
 
 
 Size
-RegionMetricsBuilder.RegionMetricsImpl.getStoreFileIndexSize() 
+RegionMetricsBuilder.RegionMetricsImpl.getStoreFileRootLevelIndexSize() 
 
 
 Size
@@ -255,11 +263,11 @@
 
 
 Size
-RegionMetrics.getStoreFileRootLevelIndexSize() 
+RegionMetrics.getStoreFileSize() 
 
 
 Size
-RegionMetricsBuilder.RegionMetricsImpl.getStoreFileRootLevelIndexSize() 
+RegionMetricsBuilder.RegionMetricsImpl.getStoreFileSize() 
 
 
 Size
@@ -269,11 +277,11 @@
 
 
 Size
-RegionMetrics.getStoreFileSize() 
+RegionMetrics.getStoreFileUncompressedDataIndexSize() 
 
 
 Size
-RegionMetricsBuilder.RegionMetricsImpl.getStoreFileSize() 
+RegionMetricsBuilder.RegionMetricsImpl.getStoreFileUncompressedDataIndexSize() 
 
 
 Size
@@ -283,11 +291,11 @@
 
 
 Size
-RegionMetrics.getStoreFileUncompressedDataIndexSize() 
+RegionMetrics.getUncompressedStoreFileSize() 
 
 
 Size
-RegionMetricsBuilder.RegionMetricsImpl.getStoreFileUncompressedDataIndexSize() 
+RegionMetricsBuilder.RegionMetricsImpl.getUncompressedStoreFileSize() 
 
 
 Size
@@ -297,11 +305,11 @@
 
 
 Size
-RegionMetrics.getUncompressedStoreFileSize() 
+ServerMetricsBuilder.ServerMetricsImpl.getUsedHeapSize() 
 
 
 Size
-RegionMetricsBuilder.RegionMetricsImpl.getUncompressedStoreFileSize() 
+ServerMetrics.getUsedHeapSize() 
 
 
 Size
@@ -309,14 +317,6 @@
 Deprecated. 
  
 
-
-Size
-ServerMetrics.getUsedHeapSize() 
-
-
-Size
-ServerMetricsBuilder.ServerMetricsImpl.getUsedHeapSize() 
-
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/16541468/devapidocs/org/apache/hadoop/hbase/class-use/TableDescriptors.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/class-use/TableDescriptors.html 
b/devapidocs/org/apache/hadoop/hbase/class-use/TableDescriptors.html
index 63833f7..72d579d 100644
--- a/devapidocs/org/apache/hadoop/hbase/class-use/TableDescriptors.html
+++ b/devapidocs/org/apache/hadoop/hbase/class-use/TableDescriptors.html
@@ -122,11 +122,11 @@
 
 
 TableDescriptors
-HMaster.getTableDescriptors() 
+MasterServices.getTableDescriptors() 
 
 
 TableDescriptors
-MasterServices.getTableDescriptors() 
+HMaster.getTableDescriptors() 
 
 
 



[13/51] [partial] hbase-site git commit: Published site at 64061f896fe21512504e3886a400759e88b519da.

2018-03-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f1ebf5b6/devapidocs/org/apache/hadoop/hbase/client/coprocessor/class-use/Batch.Callback.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/coprocessor/class-use/Batch.Callback.html
 
b/devapidocs/org/apache/hadoop/hbase/client/coprocessor/class-use/Batch.Callback.html
index 65d4b29..a6c6bcc 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/coprocessor/class-use/Batch.Callback.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/coprocessor/class-use/Batch.Callback.html
@@ -113,17 +113,17 @@
 
 
 
+private Batch.Callback
+AsyncRequestFutureImpl.callback 
+
+
 private Batch.Callback
 AsyncProcessTask.callback 
 
-
+
 private Batch.Callback
 AsyncProcessTask.Builder.callback 
 
-
-private Batch.Callback
-AsyncRequestFutureImpl.callback 
-
 
 
 
@@ -148,50 +148,42 @@
 
 
  void
-HTable.batchCallback(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List actions,
- https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object[] results,
- Batch.Callback callback) 
-
-
- void
 Table.batchCallback(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List actions,
  https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object[] results,
  Batch.Callback callback)
 Same as Table.batch(List,
 Object[]), but with a callback.
 
 
+
+ void
+HTable.batchCallback(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List actions,
+ https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object[] results,
+ Batch.Callback callback) 
+
 
 void
-HTable.batchCoprocessorService(com.google.protobuf.Descriptors.MethodDescriptor methodDescriptor,
+Table.batchCoprocessorService(com.google.protobuf.Descriptors.MethodDescriptor methodDescriptor,
com.google.protobuf.Message request,
byte[] startKey,
byte[] endKey,
R responsePrototype,
-   Batch.Callback callback) 
+   Batch.Callback callback)
+Creates an instance of the given Service 
subclass for each table
+ region spanning the range from the startKey row to 
endKey row (inclusive), all
+ the invocations to the same region server will be batched into one call.
+
 
 
 void
-Table.batchCoprocessorService(com.google.protobuf.Descriptors.MethodDescriptor methodDescriptor,
+HTable.batchCoprocessorService(com.google.protobuf.Descriptors.MethodDescriptor methodDescriptor,
com.google.protobuf.Message request,
byte[] startKey,
byte[] endKey,
R responsePrototype,
-   Batch.Callback callback)
-Creates an instance of the given Service 
subclass for each table
- region spanning the range from the startKey row to 
endKey row (inclusive), all
- the invocations to the same region server will be batched into one call.
-
+   Batch.Callback callback) 
 
 
 void
-HTable.coprocessorService(https://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true";
 title="class or interface in java.lang">Class service,
-  byte[] startKey,
-  byte[] endKey,
-  Batch.Call callable,
-  Batch.Callback callback) 
-
-
-void
 Table.coprocessorService(https://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true";
 title="class or interface in java.lang">Class service,
   byte[] startKey,
   byte[] endKey,
@@ -203,6 +195,14 @@
  with each Service instance.
 
 
+
+void
+HTable.coprocessorService(https://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true";
 title="class or interface in java.lang">Class service,
+  byte[] startKey,
+  byte[] endKey,
+  Batch.Call callable,
+  Batch.Callback callback) 
+
 
 static  void
 HTable.doBatchWithCallback(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List actions,

htt

[13/51] [partial] hbase-site git commit: Published site at 4cb40e6d846ce1f28ffb40d388c9efb753197813.

2018-03-22 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4dc2a2e8/devapidocs/org/apache/hadoop/hbase/client/class-use/TableDescriptor.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/TableDescriptor.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/TableDescriptor.html
index 17b9d95..d3eae32 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/TableDescriptor.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/TableDescriptor.html
@@ -443,13 +443,13 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 TableDescriptor
-Table.getDescriptor()
-Gets the table 
descriptor for this table.
-
+HTable.getDescriptor() 
 
 
 TableDescriptor
-HTable.getDescriptor() 
+Table.getDescriptor()
+Gets the table 
descriptor for this table.
+
 
 
 TableDescriptor
@@ -503,52 +503,52 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFuture
-AsyncHBaseAdmin.getDescriptor(TableName tableName) 
-
-
-https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFuture
 AsyncAdmin.getDescriptor(TableName tableName)
 Method for getting the tableDescriptor
 
 
-
+
 https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFuture
 RawAsyncHBaseAdmin.getDescriptor(TableName tableName) 
 
+
+https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFuture
+AsyncHBaseAdmin.getDescriptor(TableName tableName) 
+
 
 private https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureList>
 RawAsyncHBaseAdmin.getTableDescriptors(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsRequest request) 
 
 
-https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
-Admin.listTableDescriptors()
+default https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureList>
+AsyncAdmin.listTableDescriptors()
 List all the userspace tables.
 
 
 
 https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
-HBaseAdmin.listTableDescriptors() 
-
-
-default https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureList>
-AsyncAdmin.listTableDescriptors()
+Admin.listTableDescriptors()
 List all the userspace tables.
 
 
-
-https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureList>
-AsyncHBaseAdmin.listTableDescriptors(boolean includeSysTables) 
-
 
+https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
+HBaseAdmin.listTableDescriptors() 
+
+
 https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFutureList>
 AsyncAdmin.listTableDescriptors(boolean includeSysTables)
 List all the tables.
 
 
-
+
 https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true";
 title="class or interface in java.util.concurrent">CompletableFuture

[13/51] [partial] hbase-site git commit: Published site at 8ab7b20f48951d77945181024f5e15842bc253c4.

2018-03-21 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6eb695c8/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/handler/CloseRegionHandler.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/handler/CloseRegionHandler.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/handler/CloseRegionHandler.html
index 9e3d786..d217fed 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/handler/CloseRegionHandler.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/handler/CloseRegionHandler.html
@@ -96,13 +96,13 @@
 088  @Override
 089  public void process() {
 090try {
-091  String name = 
regionInfo.getRegionNameAsString();
-092  LOG.debug("Processing close of " + 
name);
+091  String name = 
regionInfo.getEncodedName();
+092  LOG.trace("Processing close of {}", 
name);
 093  String encodedRegionName = 
regionInfo.getEncodedName();
 094  // Check that this region is being 
served here
 095  HRegion region = 
(HRegion)rsServices.getRegion(encodedRegionName);
 096  if (region == null) {
-097LOG.warn("Received CLOSE for 
region " + name + " but currently not serving - ignoring");
+097LOG.warn("Received CLOSE for 
region {} but currently not serving - ignoring", name);
 098// TODO: do better than a simple 
warning
 099return;
 100  }
@@ -112,31 +112,30 @@
 104if (region.close(abort) == null) 
{
 105  // This region got closed.  
Most likely due to a split.
 106  // The split message will clean 
up the master state.
-107  LOG.warn("Can't close region: 
was already closed during close(): " +
-108name);
-109  return;
-110}
-111  } catch (IOException ioe) {
-112// An IOException here indicates 
that we couldn't successfully flush the
-113// memstore before closing. So, 
we need to abort the server and allow
-114// the master to split our logs 
in order to recover the data.
-115server.abort("Unrecoverable 
exception while closing region " +
-116  
regionInfo.getRegionNameAsString() + ", still finishing close", ioe);
-117throw new 
RuntimeException(ioe);
-118  }
-119
-120  
this.rsServices.removeRegion(region, destination);
-121  
rsServices.reportRegionStateTransition(new 
RegionStateTransitionContext(TransitionCode.CLOSED,
-122  HConstants.NO_SEQNUM, -1, 
regionInfo));
-123
-124  // Done!  Region is closed on this 
RS
-125  LOG.debug("Closed " + 
region.getRegionInfo().getRegionNameAsString());
-126} finally {
-127  
this.rsServices.getRegionsInTransitionInRS().
-128
remove(this.regionInfo.getEncodedNameAsBytes(), Boolean.FALSE);
-129}
-130  }
-131}
+107  LOG.warn("Can't close region 
{}, was already closed during close()", name);
+108  return;
+109}
+110  } catch (IOException ioe) {
+111// An IOException here indicates 
that we couldn't successfully flush the
+112// memstore before closing. So, 
we need to abort the server and allow
+113// the master to split our logs 
in order to recover the data.
+114server.abort("Unrecoverable 
exception while closing region " +
+115  
regionInfo.getRegionNameAsString() + ", still finishing close", ioe);
+116throw new 
RuntimeException(ioe);
+117  }
+118
+119  
this.rsServices.removeRegion(region, destination);
+120  
rsServices.reportRegionStateTransition(new 
RegionStateTransitionContext(TransitionCode.CLOSED,
+121  HConstants.NO_SEQNUM, -1, 
regionInfo));
+122
+123  // Done!  Region is closed on this 
RS
+124  LOG.debug("Closed " + 
region.getRegionInfo().getRegionNameAsString());
+125} finally {
+126  
this.rsServices.getRegionsInTransitionInRS().
+127
remove(this.regionInfo.getEncodedNameAsBytes(), Boolean.FALSE);
+128}
+129  }
+130}
 
 
 



[13/51] [partial] hbase-site git commit: Published site at 00095a2ef9442e3fd86c04876c9d91f2f8b23ad8.

2018-03-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bd675fa3/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.html
index d984f0e..c1f4d85 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.html
@@ -115,753 +115,756 @@
 107  final RegionInfo regionToSplit, 
final byte[] splitRow) throws IOException {
 108super(env, regionToSplit);
 109preflightChecks(env, true);
-110this.bestSplitRow = splitRow;
-111checkSplittable(env, regionToSplit, 
bestSplitRow);
-112final TableName table = 
regionToSplit.getTable();
-113final long rid = 
getDaughterRegionIdTimestamp(regionToSplit);
-114this.daughter_1_RI = 
RegionInfoBuilder.newBuilder(table)
-115
.setStartKey(regionToSplit.getStartKey())
-116.setEndKey(bestSplitRow)
-117.setSplit(false)
-118.setRegionId(rid)
-119.build();
-120this.daughter_2_RI = 
RegionInfoBuilder.newBuilder(table)
-121.setStartKey(bestSplitRow)
-122
.setEndKey(regionToSplit.getEndKey())
-123.setSplit(false)
-124.setRegionId(rid)
-125.build();
-126TableDescriptor htd = 
env.getMasterServices().getTableDescriptors().get(getTableName());
-127
if(htd.getRegionSplitPolicyClassName() != null) {
-128  // Since we don't have region 
reference here, creating the split policy instance without it.
-129  // This can be used to invoke 
methods which don't require Region reference. This instantiation
-130  // of a class on Master-side though 
it only makes sense on the RegionServer-side is
-131  // for Phoenix Local Indexing. 
Refer HBASE-12583 for more information.
-132  Class clazz =
-133  
RegionSplitPolicy.getSplitPolicyClass(htd, env.getMasterConfiguration());
-134  this.splitPolicy = 
ReflectionUtils.newInstance(clazz, env.getMasterConfiguration());
-135}
-136  }
-137
-138  /**
-139   * Check whether the region is 
splittable
-140   * @param env MasterProcedureEnv
-141   * @param regionToSplit parent Region 
to be split
-142   * @param splitRow if splitRow is not 
specified, will first try to get bestSplitRow from RS
-143   * @throws IOException
-144   */
-145  private void checkSplittable(final 
MasterProcedureEnv env,
-146  final RegionInfo regionToSplit, 
final byte[] splitRow) throws IOException {
-147// Ask the remote RS if this region 
is splittable.
-148// If we get an IOE, report it along 
w/ the failure so can see why we are not splittable at this time.
-149if(regionToSplit.getReplicaId() != 
RegionInfo.DEFAULT_REPLICA_ID) {
-150  throw new IllegalArgumentException 
("Can't invoke split on non-default regions directly");
-151}
-152RegionStateNode node =
-153
env.getAssignmentManager().getRegionStates().getRegionStateNode(getParentRegion());
-154IOException splittableCheckIOE = 
null;
-155boolean splittable = false;
-156if (node != null) {
-157  try {
-158if (bestSplitRow == null || 
bestSplitRow.length == 0) {
-159  LOG.info("splitKey isn't 
explicitly specified, " + " will try to find a best split key from RS");
-160}
-161// Always set bestSplitRow 
request as true here,
-162// need to call Region#checkSplit 
to check it splittable or not
-163GetRegionInfoResponse response 
=
-164
Util.getRegionInfoResponse(env, node.getRegionLocation(), node.getRegionInfo(), 
true);
-165if(bestSplitRow == null || 
bestSplitRow.length == 0) {
-166  bestSplitRow = 
response.hasBestSplitRow() ? response.getBestSplitRow().toByteArray() : null;
-167}
-168splittable = 
response.hasSplittable() && response.getSplittable();
-169
-170if (LOG.isDebugEnabled()) {
-171  LOG.debug("Splittable=" + 
splittable + " " + node.toShortString());
-172}
-173  } catch (IOException e) {
-174splittableCheckIOE = e;
-175  }
-176}
-177
-178if (!splittable) {
-179  IOException e = new 
IOException(regionToSplit.getShortNameToLog() + " NOT splittable");
-180  if (splittableCheckIOE != null) 
e.initCause(splittableCheckIOE);
-181  throw e;
-182}
-183
-184if(bestSplitRow == null || 
bestSplitRow.length == 0) {
-185  throw new 
DoNotRetryIOException("Region not splittable because bestSplitPoint = null, "
-186  + "maybe table is too small for 
auto split. For force split, try specifying split row");
-187}
-188
-189if 
(Bytes.equals(regionToSplit.getStartKey(), bestSplitRow

[13/51] [partial] hbase-site git commit: Published site at 22f4def942f8a3367d0ca6598317e9b9a7d0cfcd.

2018-03-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8b1eaec1/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.RegionObserverOperationWithoutResult.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.RegionObserverOperationWithoutResult.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.RegionObserverOperationWithoutResult.html
index b99f924..2bb6cea 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.RegionObserverOperationWithoutResult.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.RegionObserverOperationWithoutResult.html
@@ -37,1779 +37,1734 @@
 029import java.util.UUID;
 030import 
java.util.concurrent.ConcurrentHashMap;
 031import 
java.util.concurrent.ConcurrentMap;
-032import java.util.regex.Matcher;
-033
-034import 
org.apache.commons.collections4.map.AbstractReferenceMap;
-035import 
org.apache.commons.collections4.map.ReferenceMap;
-036import 
org.apache.hadoop.conf.Configuration;
-037import org.apache.hadoop.fs.FileSystem;
-038import org.apache.hadoop.fs.Path;
-039import org.apache.hadoop.hbase.Cell;
-040import 
org.apache.hadoop.hbase.CompareOperator;
-041import 
org.apache.hadoop.hbase.Coprocessor;
-042import 
org.apache.hadoop.hbase.HBaseConfiguration;
-043import 
org.apache.hadoop.hbase.HConstants;
-044import 
org.apache.hadoop.hbase.RawCellBuilder;
-045import 
org.apache.hadoop.hbase.RawCellBuilderFactory;
-046import 
org.apache.hadoop.hbase.ServerName;
-047import 
org.apache.hadoop.hbase.SharedConnection;
-048import 
org.apache.hadoop.hbase.client.Append;
-049import 
org.apache.hadoop.hbase.client.Connection;
-050import 
org.apache.hadoop.hbase.client.Delete;
-051import 
org.apache.hadoop.hbase.client.Durability;
-052import 
org.apache.hadoop.hbase.client.Get;
-053import 
org.apache.hadoop.hbase.client.Increment;
-054import 
org.apache.hadoop.hbase.client.Mutation;
-055import 
org.apache.hadoop.hbase.client.Put;
-056import 
org.apache.hadoop.hbase.client.RegionInfo;
-057import 
org.apache.hadoop.hbase.client.Result;
-058import 
org.apache.hadoop.hbase.client.Scan;
-059import 
org.apache.hadoop.hbase.client.TableDescriptor;
-060import 
org.apache.hadoop.hbase.coprocessor.BaseEnvironment;
-061import 
org.apache.hadoop.hbase.coprocessor.BulkLoadObserver;
-062import 
org.apache.hadoop.hbase.coprocessor.CoprocessorException;
-063import 
org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
-064import 
org.apache.hadoop.hbase.coprocessor.CoprocessorService;
-065import 
org.apache.hadoop.hbase.coprocessor.CoprocessorServiceBackwardCompatiblity;
-066import 
org.apache.hadoop.hbase.coprocessor.CoreCoprocessor;
-067import 
org.apache.hadoop.hbase.coprocessor.EndpointObserver;
-068import 
org.apache.hadoop.hbase.coprocessor.HasRegionServerServices;
-069import 
org.apache.hadoop.hbase.coprocessor.MetricsCoprocessor;
-070import 
org.apache.hadoop.hbase.coprocessor.ObserverContext;
-071import 
org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
-072import 
org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
-073import 
org.apache.hadoop.hbase.coprocessor.RegionObserver;
-074import 
org.apache.hadoop.hbase.coprocessor.RegionObserver.MutationType;
-075import 
org.apache.hadoop.hbase.filter.ByteArrayComparable;
-076import 
org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
-077import 
org.apache.hadoop.hbase.io.Reference;
-078import 
org.apache.hadoop.hbase.io.hfile.CacheConfig;
-079import 
org.apache.hadoop.hbase.metrics.MetricRegistry;
-080import 
org.apache.hadoop.hbase.regionserver.Region.Operation;
-081import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker;
-082import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
-083import 
org.apache.hadoop.hbase.regionserver.querymatcher.DeleteTracker;
-084import 
org.apache.hadoop.hbase.security.User;
-085import 
org.apache.hbase.thirdparty.com.google.common.collect.Lists;
-086import 
org.apache.hadoop.hbase.util.Bytes;
-087import 
org.apache.hadoop.hbase.util.CoprocessorClassLoader;
-088import 
org.apache.hadoop.hbase.util.Pair;
-089import 
org.apache.hadoop.hbase.wal.WALEdit;
-090import 
org.apache.hadoop.hbase.wal.WALKey;
-091import 
org.apache.yetus.audience.InterfaceAudience;
-092import org.slf4j.Logger;
-093import org.slf4j.LoggerFactory;
-094
-095/**
-096 * Implements the coprocessor environment 
and runtime support for coprocessors
-097 * loaded within a {@link Region}.
-098 */
-099@InterfaceAudience.Private
-100public class RegionCoprocessorHost
-101extends 
CoprocessorHost {
-102
-103  private static final Logger LOG = 
LoggerFactory.getLogger(RegionCoprocessorHost.class);
-104  // The shared data map
-105  private static final 
ReferenceMap

[13/51] [partial] hbase-site git commit: Published site at 31da4d0bce69b3a47066a5df675756087ce4dc60.

2018-03-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a754d895/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.html
index 21ec0e6..b022e4a 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.html
@@ -114,7 +114,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class HRegion
+public class HRegion
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 implements HeapSize, PropagatingConfigurationObserver, Region
 Regions store data for a certain region of a table.  It 
stores all columns
@@ -597,36 +597,40 @@ implements splitRequest 
 
 
+private StoreHotnessProtector
+storeHotnessProtector 
+
+
 protected https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">Map
 stores 
 
-
+
 static int
 SYSTEM_CACHE_FLUSH_INTERVAL
 Default interval for System tables memstore flush
 
 
-
+
 private https://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true";
 title="class or interface in java.lang">Long
 timeoutForWriteLock 
 
-
+
 (package private) long
 timestampSlop 
 
-
+
 private https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/locks/ReentrantReadWriteLock.html?is-external=true";
 title="class or interface in 
java.util.concurrent.locks">ReentrantReadWriteLock
 updatesLock 
 
-
+
 private WAL
 wal 
 
-
+
 (package private) https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/LongAdder.html?is-external=true";
 title="class or interface in 
java.util.concurrent.atomic">LongAdder
 writeRequestsCount 
 
-
+
 (package private) HRegion.WriteState
 writestate 
 
@@ -2294,7 +2298,7 @@ implements 
 
 LOG
-private static final org.slf4j.Logger LOG
+private static final org.slf4j.Logger LOG
 
 
 
@@ -2303,7 +2307,7 @@ implements 
 
 LOAD_CFS_ON_DEMAND_CONFIG_KEY
-public static final https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String LOAD_CFS_ON_DEMAND_CONFIG_KEY
+public static final https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String LOAD_CFS_ON_DEMAND_CONFIG_KEY
 
 See Also:
 Constant
 Field Values
@@ -2316,7 +2320,7 @@ implements 
 
 HBASE_MAX_CELL_SIZE_KEY
-public static final https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String HBASE_MAX_CELL_SIZE_KEY
+public static final https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String HBASE_MAX_CELL_SIZE_KEY
 
 See Also:
 Constant
 Field Values
@@ -2329,7 +2333,7 @@ implements 
 
 DEFAULT_MAX_CELL_SIZE
-public static final int DEFAULT_MAX_CELL_SIZE
+public static final int DEFAULT_MAX_CELL_SIZE
 
 See Also:
 Constant
 Field Values
@@ -2342,7 +2346,7 @@ implements 
 
 HBASE_REGIONSERVER_MINIBATCH_SIZE
-public static final https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String HBASE_REGIONSERVER_MINIBATCH_SIZE
+public static final https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String HBASE_REGIONSERVER_MINIBATCH_SIZE
 
 See Also:
 Constant
 Field Values
@@ -2355,7 +2359,7 @@ implements 
 
 DEFAULT_HBASE_REGIONSERVER_MINIBATCH_SIZE
-public static final int DEFAULT_HBASE_REGIONSERVER_MINIBATCH_SIZE
+public static final int DEFAULT_HBASE_REGIONSERVER_MINIBATCH_SIZE
 
 See Also:
 Constant
 Field Values
@@ -2368,7 +2372,7 @@ implements 
 
 DEFAULT_DURABILITY
-private static final Durability DEFAULT_DURABILITY
+private static final Durability DEFAULT_DURABILITY
 This is the global default value for durability. All 
tables/mutations not
  defining a durability or using USE_DEFAULT will default to this value.
 
@@ -2379,7 +2383,7 @@ implements 
 
 closed
-final https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicBoolean.html?is-external=true";
 title="class or interface in java.util.concurrent.atomic">AtomicBoolean closed
+final https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicBoolean.html?is-external=true";
 title="class or interface in java.util.concurrent.atomic">AtomicBoolean closed
 
 
 
@@ -2388,7 +2392,7 @@ implements 
 
 closing
-final https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicBoolean.html?is-external=true";
 title="class or interface in java.util.concurrent.atomic">AtomicBoolean closing
+final https

[13/51] [partial] hbase-site git commit: Published site at 6b77786dfc46d25ac5bb5f1c8a4a9eb47b52a604.

2018-03-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/81cde4ce/devapidocs/org/apache/hadoop/hbase/filter/class-use/Filter.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/filter/class-use/Filter.html 
b/devapidocs/org/apache/hadoop/hbase/filter/class-use/Filter.html
index dc4b7bd..41b2105 100644
--- a/devapidocs/org/apache/hadoop/hbase/filter/class-use/Filter.html
+++ b/devapidocs/org/apache/hadoop/hbase/filter/class-use/Filter.html
@@ -495,15 +495,15 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 static Filter
-ColumnPrefixFilter.createFilterFromArguments(https://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
+SingleColumnValueExcludeFilter.createFilterFromArguments(https://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
 
 
 static Filter
-ColumnCountGetFilter.createFilterFromArguments(https://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
+ValueFilter.createFilterFromArguments(https://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
 
 
 static Filter
-RowFilter.createFilterFromArguments(https://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
+FamilyFilter.createFilterFromArguments(https://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
 
 
 static Filter
@@ -513,69 +513,69 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 static Filter
-FirstKeyOnlyFilter.createFilterFromArguments(https://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
+ColumnPrefixFilter.createFilterFromArguments(https://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
 
 
 static Filter
-TimestampsFilter.createFilterFromArguments(https://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
+PageFilter.createFilterFromArguments(https://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
 
 
 static Filter
-ValueFilter.createFilterFromArguments(https://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
+RowFilter.createFilterFromArguments(https://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
 
 
 static Filter
-KeyOnlyFilter.createFilterFromArguments(https://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
+ColumnRangeFilter.createFilterFromArguments(https://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
 
 
 static Filter
-FamilyFilter.createFilterFromArguments(https://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
+ColumnCountGetFilter.createFilterFromArguments(https://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
 
 
 static Filter
-QualifierFilter.createFilterFromArguments(https://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
+MultipleColumnPrefixFilter.createFilterFromArguments(https://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
 
 
 static Filter
-ColumnRangeFilter.createFilterFromArguments(https://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
+ColumnPaginationFilter.createFilterFromArguments(https://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true

[13/51] [partial] hbase-site git commit: Published site at 1384da71375427b522b09f06862bb5d629cef52f.

2018-03-06 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d347bde8/devapidocs/org/apache/hadoop/hbase/client/coprocessor/class-use/Batch.Callback.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/coprocessor/class-use/Batch.Callback.html
 
b/devapidocs/org/apache/hadoop/hbase/client/coprocessor/class-use/Batch.Callback.html
index a6c6bcc..65d4b29 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/coprocessor/class-use/Batch.Callback.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/coprocessor/class-use/Batch.Callback.html
@@ -113,17 +113,17 @@
 
 
 
-private Batch.Callback
-AsyncRequestFutureImpl.callback 
-
-
 private Batch.Callback
 AsyncProcessTask.callback 
 
-
+
 private Batch.Callback
 AsyncProcessTask.Builder.callback 
 
+
+private Batch.Callback
+AsyncRequestFutureImpl.callback 
+
 
 
 
@@ -148,42 +148,50 @@
 
 
  void
-Table.batchCallback(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List actions,
+HTable.batchCallback(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List actions,
  https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object[] results,
- Batch.Callback callback)
-Same as Table.batch(List,
 Object[]), but with a callback.
-
+ Batch.Callback callback) 
 
 
  void
-HTable.batchCallback(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List actions,
+Table.batchCallback(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List actions,
  https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object[] results,
- Batch.Callback callback) 
+ Batch.Callback callback)
+Same as Table.batch(List,
 Object[]), but with a callback.
+
 
 
 void
-Table.batchCoprocessorService(com.google.protobuf.Descriptors.MethodDescriptor methodDescriptor,
+HTable.batchCoprocessorService(com.google.protobuf.Descriptors.MethodDescriptor methodDescriptor,
com.google.protobuf.Message request,
byte[] startKey,
byte[] endKey,
R responsePrototype,
-   Batch.Callback callback)
-Creates an instance of the given Service 
subclass for each table
- region spanning the range from the startKey row to 
endKey row (inclusive), all
- the invocations to the same region server will be batched into one call.
-
+   Batch.Callback callback) 
 
 
 void
-HTable.batchCoprocessorService(com.google.protobuf.Descriptors.MethodDescriptor methodDescriptor,
+Table.batchCoprocessorService(com.google.protobuf.Descriptors.MethodDescriptor methodDescriptor,
com.google.protobuf.Message request,
byte[] startKey,
byte[] endKey,
R responsePrototype,
-   Batch.Callback callback) 
+   Batch.Callback callback)
+Creates an instance of the given Service 
subclass for each table
+ region spanning the range from the startKey row to 
endKey row (inclusive), all
+ the invocations to the same region server will be batched into one call.
+
 
 
 void
+HTable.coprocessorService(https://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true";
 title="class or interface in java.lang">Class service,
+  byte[] startKey,
+  byte[] endKey,
+  Batch.Call callable,
+  Batch.Callback callback) 
+
+
+void
 Table.coprocessorService(https://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true";
 title="class or interface in java.lang">Class service,
   byte[] startKey,
   byte[] endKey,
@@ -195,14 +203,6 @@
  with each Service instance.
 
 
-
-void
-HTable.coprocessorService(https://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true";
 title="class or interface in java.lang">Class service,
-  byte[] startKey,
-  byte[] endKey,
-  Batch.Call callable,
-  Batch.Callback callback) 
-
 
 static  void
 HTable.doBatchWithCallback(https://docs.oracle.com/javase/8/docs/api/java/ut

[13/51] [partial] hbase-site git commit: Published site at b7b86839250bf9b295ebc1948826f43a88736d6c.

2018-03-05 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6b94a2f2/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.ReplicationBarrierResult.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.ReplicationBarrierResult.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.ReplicationBarrierResult.html
new file mode 100644
index 000..8fffb89
--- /dev/null
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/MetaTableAccessor.ReplicationBarrierResult.html
@@ -0,0 +1,2162 @@
+http://www.w3.org/TR/html4/loose.dtd";>
+
+
+Source code
+
+
+
+
+001/**
+002 * Licensed to the Apache Software 
Foundation (ASF) under one
+003 * or more contributor license 
agreements.  See the NOTICE file
+004 * distributed with this work for 
additional information
+005 * regarding copyright ownership.  The 
ASF licenses this file
+006 * to you under the Apache License, 
Version 2.0 (the
+007 * "License"); you may not use this file 
except in compliance
+008 * with the License.  You may obtain a 
copy of the License at
+009 *
+010 * 
http://www.apache.org/licenses/LICENSE-2.0
+011 *
+012 * Unless required by applicable law or 
agreed to in writing, software
+013 * distributed under the License is 
distributed on an "AS IS" BASIS,
+014 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
+015 * See the License for the specific 
language governing permissions and
+016 * limitations under the License.
+017 */
+018package org.apache.hadoop.hbase;
+019
+020import 
edu.umd.cs.findbugs.annotations.NonNull;
+021import 
edu.umd.cs.findbugs.annotations.Nullable;
+022import java.io.Closeable;
+023import java.io.IOException;
+024import java.io.InterruptedIOException;
+025import java.util.ArrayList;
+026import java.util.Arrays;
+027import java.util.Collections;
+028import java.util.LinkedHashMap;
+029import java.util.List;
+030import java.util.Map;
+031import java.util.NavigableMap;
+032import java.util.Set;
+033import java.util.SortedMap;
+034import java.util.TreeMap;
+035import java.util.regex.Matcher;
+036import java.util.regex.Pattern;
+037import java.util.stream.Collectors;
+038import java.util.stream.Stream;
+039import 
org.apache.hadoop.conf.Configuration;
+040import 
org.apache.hadoop.hbase.Cell.Type;
+041import 
org.apache.hadoop.hbase.client.Connection;
+042import 
org.apache.hadoop.hbase.client.ConnectionFactory;
+043import 
org.apache.hadoop.hbase.client.Consistency;
+044import 
org.apache.hadoop.hbase.client.Delete;
+045import 
org.apache.hadoop.hbase.client.Get;
+046import 
org.apache.hadoop.hbase.client.Mutation;
+047import 
org.apache.hadoop.hbase.client.Put;
+048import 
org.apache.hadoop.hbase.client.RegionInfo;
+049import 
org.apache.hadoop.hbase.client.RegionInfoBuilder;
+050import 
org.apache.hadoop.hbase.client.RegionLocator;
+051import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
+052import 
org.apache.hadoop.hbase.client.RegionServerCallable;
+053import 
org.apache.hadoop.hbase.client.Result;
+054import 
org.apache.hadoop.hbase.client.ResultScanner;
+055import 
org.apache.hadoop.hbase.client.Scan;
+056import 
org.apache.hadoop.hbase.client.Table;
+057import 
org.apache.hadoop.hbase.client.TableState;
+058import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
+059import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
+060import 
org.apache.hadoop.hbase.master.RegionState;
+061import 
org.apache.hadoop.hbase.master.RegionState.State;
+062import 
org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+063import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
+064import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier;
+065import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
+066import 
org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos;
+067import 
org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MutateRowsRequest;
+068import 
org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MutateRowsResponse;
+069import 
org.apache.hadoop.hbase.util.Bytes;
+070import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+071import 
org.apache.hadoop.hbase.util.ExceptionUtil;
+072import 
org.apache.hadoop.hbase.util.Pair;
+073import 
org.apache.hadoop.hbase.util.PairOfSameType;
+074import 
org.apache.yetus.audience.InterfaceAudience;
+075import org.slf4j.Logger;
+076import org.slf4j.LoggerFactory;
+077
+078import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+079
+080/**
+081 * 

+082 * Read/write operations on region and assignment information store in hbase:meta. +083 *

+084 *

+085 * Some of the methods of this class take ZooKeeperWatcher as a param. The only reason for this is +086 * because when used on client-side (like from HBaseAdmin), we want to use short-living connection +087 * (


[13/51] [partial] hbase-site git commit: Published site at 1d25b60831b8cc8f7ad5fd366f1867de5c20d2f3.

2018-03-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/eb05e3e3/apidocs/org/apache/hadoop/hbase/client/Operation.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/client/Operation.html 
b/apidocs/org/apache/hadoop/hbase/client/Operation.html
index af6c1b9..9ef2891 100644
--- a/apidocs/org/apache/hadoop/hbase/client/Operation.html
+++ b/apidocs/org/apache/hadoop/hbase/client/Operation.html
@@ -97,7 +97,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">java.lang.Object
+https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">java.lang.Object
 
 
 org.apache.hadoop.hbase.client.Operation
@@ -115,7 +115,7 @@ var activeTableTab = "activeTableTab";
 
 @InterfaceAudience.Public
 public abstract class Operation
-extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
+extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 Superclass for any type that maps to a potentially 
application-level query.
  (e.g. Put, Get, Delete, Scan, Next, etc.)
  Contains methods for exposure to logging and debugging tools.
@@ -155,47 +155,47 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 Method and Description
 
 
-abstract http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">MapString,http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object>
+abstract https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">MapString,https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object>
 getFingerprint()
 Produces a Map containing a fingerprint which identifies 
the type and 
  the static schema components of a query (i.e.
 
 
 
-http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
+https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
 toJSON()
 Produces a JSON object sufficient for description of a query
  in a debugging or logging context.
 
 
 
-http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
+https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
 toJSON(int maxCols)
 Produces a JSON object for fingerprint and details exposure 
in a
  parseable format.
 
 
 
-http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">MapString,http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object>
+https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">MapString,https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object>
 toMap()
 Produces a Map containing a full summary of a query.
 
 
 
-abstract http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">MapString,http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object>
+abstract https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">MapString,https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object>
 toMap(int maxCols)
 Produces a Map containing a summary of the details of a 
query 
  beyond the scope of

[13/51] [partial] hbase-site git commit: Published site at .

2018-02-20 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ec8bf761/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.html
index 93f650f..d7aa8b1 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.html
@@ -546,1472 +546,1464 @@
 538return this.conf;
 539  }
 540
-541  /**
-542   * @return true if the master is 
running, throws an exception otherwise
-543   * @throws 
org.apache.hadoop.hbase.MasterNotRunningException - if the master is not 
running
-544   * @deprecated this has been deprecated 
without a replacement
-545   */
-546  @Deprecated
-547  @Override
-548  public boolean isMasterRunning()
-549  throws MasterNotRunningException, 
ZooKeeperConnectionException {
-550// When getting the master 
connection, we check it's running,
-551// so if there is no exception, it 
means we've been able to get a
-552// connection on a running master
-553MasterKeepAliveConnection m = 
getKeepAliveMasterService();
-554m.close();
-555return true;
-556  }
-557
-558  @Override
-559  public HRegionLocation 
getRegionLocation(final TableName tableName,
-560  final byte [] row, boolean 
reload)
-561  throws IOException {
-562return reload? 
relocateRegion(tableName, row): locateRegion(tableName, row);
-563  }
-564
-565
-566  @Override
-567  public boolean isTableEnabled(TableName 
tableName) throws IOException {
-568return 
getTableState(tableName).inStates(TableState.State.ENABLED);
-569  }
-570
-571  @Override
-572  public boolean 
isTableDisabled(TableName tableName) throws IOException {
-573return 
getTableState(tableName).inStates(TableState.State.DISABLED);
-574  }
-575
-576  @Override
-577  public boolean isTableAvailable(final 
TableName tableName, @Nullable final byte[][] splitKeys)
-578  throws IOException {
-579if (this.closed) {
-580  throw new IOException(toString() + 
" closed");
-581}
-582try {
-583  if (!isTableEnabled(tableName)) {
-584LOG.debug("Table " + tableName + 
" not enabled");
-585return false;
-586  }
-587  List> locations =
-588
MetaTableAccessor.getTableRegionsAndLocations(this, tableName, true);
-589
-590  int notDeployed = 0;
-591  int regionCount = 0;
-592  for (Pair pair : locations) {
-593RegionInfo info = 
pair.getFirst();
-594if (pair.getSecond() == null) {
-595  if (LOG.isDebugEnabled()) {
-596LOG.debug("Table " + 
tableName + " has not deployed region " + pair.getFirst()
-597.getEncodedName());
-598  }
-599  notDeployed++;
-600} else if (splitKeys != null
-601&& 
!Bytes.equals(info.getStartKey(), HConstants.EMPTY_BYTE_ARRAY)) {
-602  for (byte[] splitKey : 
splitKeys) {
-603// Just check if the splitkey 
is available
-604if 
(Bytes.equals(info.getStartKey(), splitKey)) {
-605  regionCount++;
-606  break;
-607}
-608  }
-609} else {
-610  // Always empty start row 
should be counted
-611  regionCount++;
-612}
-613  }
-614  if (notDeployed > 0) {
-615if (LOG.isDebugEnabled()) {
-616  LOG.debug("Table " + tableName 
+ " has " + notDeployed + " regions");
-617}
-618return false;
-619  } else if (splitKeys != null 
&& regionCount != splitKeys.length + 1) {
-620if (LOG.isDebugEnabled()) {
-621  LOG.debug("Table " + tableName 
+ " expected to have " + (splitKeys.length + 1)
-622  + " regions, but only " + 
regionCount + " available");
-623}
-624return false;
-625  } else {
-626if (LOG.isDebugEnabled()) {
-627  LOG.debug("Table " + tableName 
+ " should be available");
-628}
-629return true;
-630  }
-631} catch (TableNotFoundException tnfe) 
{
-632  LOG.warn("Table " + tableName + " 
not enabled, it is not exists");
-633  return false;
-634}
-635  }
-636
-637  @Override
-638  public HRegionLocation 
locateRegion(final byte[] regionName) throws IOException {
-639RegionLocations locations = 
locateRegion(RegionInfo.getTable(regionName),
-640  RegionInfo.getStartKey(regionName), 
false, true);
-641return locations == null ? null : 
locations.getRegionLocation();
+541  private void checkClosed() throws 
DoNotRetryIOException {
+542if (this.closed) {
+543  throw new 
DoNotRetryIOException(toString() + " closed");
+544}
+545  }
+546
+547  /**
+548   * @return true if the master is 
runnin

[13/51] [partial] hbase-site git commit: Published site at .

2018-02-18 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/991224b9/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.Result.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.Result.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.Result.html
index 802b925..a3e80ab 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.Result.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResult.Result.html
@@ -73,229 +73,229 @@
 065import 
java.util.concurrent.TimeoutException;
 066import 
java.util.concurrent.atomic.AtomicBoolean;
 067import 
java.util.concurrent.atomic.AtomicInteger;
-068import 
java.util.concurrent.atomic.AtomicLong;
-069import 
java.util.concurrent.atomic.LongAdder;
-070import java.util.concurrent.locks.Lock;
-071import 
java.util.concurrent.locks.ReadWriteLock;
-072import 
java.util.concurrent.locks.ReentrantReadWriteLock;
-073import java.util.function.Function;
-074import 
org.apache.hadoop.conf.Configuration;
-075import org.apache.hadoop.fs.FileStatus;
-076import org.apache.hadoop.fs.FileSystem;
-077import 
org.apache.hadoop.fs.LocatedFileStatus;
-078import org.apache.hadoop.fs.Path;
-079import org.apache.hadoop.hbase.Cell;
-080import 
org.apache.hadoop.hbase.CellBuilderType;
-081import 
org.apache.hadoop.hbase.CellComparator;
-082import 
org.apache.hadoop.hbase.CellComparatorImpl;
-083import 
org.apache.hadoop.hbase.CellScanner;
-084import 
org.apache.hadoop.hbase.CellUtil;
-085import 
org.apache.hadoop.hbase.CompareOperator;
-086import 
org.apache.hadoop.hbase.CompoundConfiguration;
-087import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-088import 
org.apache.hadoop.hbase.DroppedSnapshotException;
-089import 
org.apache.hadoop.hbase.ExtendedCellBuilderFactory;
-090import 
org.apache.hadoop.hbase.HConstants;
-091import 
org.apache.hadoop.hbase.HConstants.OperationStatusCode;
-092import 
org.apache.hadoop.hbase.HDFSBlocksDistribution;
-093import 
org.apache.hadoop.hbase.HRegionInfo;
-094import 
org.apache.hadoop.hbase.KeyValue;
-095import 
org.apache.hadoop.hbase.KeyValueUtil;
-096import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-097import 
org.apache.hadoop.hbase.NotServingRegionException;
-098import 
org.apache.hadoop.hbase.PrivateCellUtil;
-099import 
org.apache.hadoop.hbase.RegionTooBusyException;
-100import 
org.apache.hadoop.hbase.TableName;
-101import org.apache.hadoop.hbase.Tag;
-102import org.apache.hadoop.hbase.TagUtil;
-103import 
org.apache.hadoop.hbase.UnknownScannerException;
-104import 
org.apache.hadoop.hbase.client.Append;
-105import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
-106import 
org.apache.hadoop.hbase.client.CompactionState;
-107import 
org.apache.hadoop.hbase.client.Delete;
-108import 
org.apache.hadoop.hbase.client.Durability;
-109import 
org.apache.hadoop.hbase.client.Get;
-110import 
org.apache.hadoop.hbase.client.Increment;
-111import 
org.apache.hadoop.hbase.client.IsolationLevel;
-112import 
org.apache.hadoop.hbase.client.Mutation;
-113import 
org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor;
-114import 
org.apache.hadoop.hbase.client.Put;
-115import 
org.apache.hadoop.hbase.client.RegionInfo;
-116import 
org.apache.hadoop.hbase.client.RegionReplicaUtil;
-117import 
org.apache.hadoop.hbase.client.Result;
-118import 
org.apache.hadoop.hbase.client.RowMutations;
-119import 
org.apache.hadoop.hbase.client.Scan;
-120import 
org.apache.hadoop.hbase.client.TableDescriptor;
-121import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-122import 
org.apache.hadoop.hbase.conf.ConfigurationManager;
-123import 
org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver;
-124import 
org.apache.hadoop.hbase.coprocessor.RegionObserver.MutationType;
-125import 
org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
-126import 
org.apache.hadoop.hbase.exceptions.FailedSanityCheckException;
-127import 
org.apache.hadoop.hbase.exceptions.TimeoutIOException;
-128import 
org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
-129import 
org.apache.hadoop.hbase.filter.ByteArrayComparable;
-130import 
org.apache.hadoop.hbase.filter.FilterWrapper;
-131import 
org.apache.hadoop.hbase.filter.IncompatibleFilterException;
-132import 
org.apache.hadoop.hbase.io.HFileLink;
-133import 
org.apache.hadoop.hbase.io.HeapSize;
-134import 
org.apache.hadoop.hbase.io.TimeRange;
-135import 
org.apache.hadoop.hbase.io.hfile.HFile;
-136import 
org.apache.hadoop.hbase.ipc.CallerDisconnectedException;
-137import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
-138import 
org.apache.hadoop.hbase.ipc.RpcCall;
-139import 
org.apache.hadoop.hbase.ipc.RpcServer;
-140import 
org.apache.hadoop.hbase.monitoring.MonitoredTask;
-141import 
org.apache.hadoop.hbase.monitoring.TaskMonitor;
-142import 
org.apache.hadoop.hba

[13/51] [partial] hbase-site git commit: Published site at .

2018-02-17 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/193b4259/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
index bd13b53..802b925 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
@@ -900,7600 +900,7598 @@
 892if 
(this.getRegionInfo().getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) {
 893  status.setStatus("Writing region 
info on filesystem");
 894  fs.checkRegionInfoOnFilesystem();
-895} else {
-896  if (LOG.isDebugEnabled()) {
-897LOG.debug("Skipping creation of 
.regioninfo file for " + this.getRegionInfo());
-898  }
-899}
-900
-901// Initialize all the HStores
-902status.setStatus("Initializing all 
the Stores");
-903long maxSeqId = 
initializeStores(reporter, status);
-904this.mvcc.advanceTo(maxSeqId);
-905if 
(ServerRegionReplicaUtil.shouldReplayRecoveredEdits(this)) {
-906  Collection stores = 
this.stores.values();
-907  try {
-908// update the stores that we are 
replaying
-909
stores.forEach(HStore::startReplayingFromWAL);
-910// Recover any edits if 
available.
-911maxSeqId = Math.max(maxSeqId,
-912  
replayRecoveredEditsIfAny(this.fs.getRegionDir(), maxSeqIdInStores, reporter, 
status));
-913// Make sure mvcc is up to max.
-914this.mvcc.advanceTo(maxSeqId);
-915  } finally {
-916// update the stores that we are 
done replaying
-917
stores.forEach(HStore::stopReplayingFromWAL);
-918  }
-919}
-920this.lastReplayedOpenRegionSeqId = 
maxSeqId;
+895}
+896
+897// Initialize all the HStores
+898status.setStatus("Initializing all 
the Stores");
+899long maxSeqId = 
initializeStores(reporter, status);
+900this.mvcc.advanceTo(maxSeqId);
+901if 
(ServerRegionReplicaUtil.shouldReplayRecoveredEdits(this)) {
+902  Collection stores = 
this.stores.values();
+903  try {
+904// update the stores that we are 
replaying
+905
stores.forEach(HStore::startReplayingFromWAL);
+906// Recover any edits if 
available.
+907maxSeqId = Math.max(maxSeqId,
+908  
replayRecoveredEditsIfAny(this.fs.getRegionDir(), maxSeqIdInStores, reporter, 
status));
+909// Make sure mvcc is up to max.
+910this.mvcc.advanceTo(maxSeqId);
+911  } finally {
+912// update the stores that we are 
done replaying
+913
stores.forEach(HStore::stopReplayingFromWAL);
+914  }
+915}
+916this.lastReplayedOpenRegionSeqId = 
maxSeqId;
+917
+918
this.writestate.setReadOnly(ServerRegionReplicaUtil.isReadOnly(this));
+919this.writestate.flushRequested = 
false;
+920this.writestate.compacting.set(0);
 921
-922
this.writestate.setReadOnly(ServerRegionReplicaUtil.isReadOnly(this));
-923this.writestate.flushRequested = 
false;
-924this.writestate.compacting.set(0);
-925
-926if (this.writestate.writesEnabled) 
{
-927  // Remove temporary data left over 
from old regions
-928  status.setStatus("Cleaning up 
temporary data from old regions");
-929  fs.cleanupTempDir();
-930}
-931
-932if (this.writestate.writesEnabled) 
{
-933  status.setStatus("Cleaning up 
detritus from prior splits");
-934  // Get rid of any splits or merges 
that were lost in-progress.  Clean out
-935  // these directories here on open.  
We may be opening a region that was
-936  // being split but we crashed in 
the middle of it all.
-937  fs.cleanupAnySplitDetritus();
-938  fs.cleanupMergesDir();
-939}
-940
-941// Initialize split policy
-942this.splitPolicy = 
RegionSplitPolicy.create(this, conf);
-943
-944// Initialize flush policy
-945this.flushPolicy = 
FlushPolicyFactory.create(this, conf);
-946
-947long lastFlushTime = 
EnvironmentEdgeManager.currentTime();
-948for (HStore store: stores.values()) 
{
-949  
this.lastStoreFlushTimeMap.put(store, lastFlushTime);
-950}
-951
-952// Use maximum of log sequenceid or 
that which was found in stores
-953// (particularly if no recovered 
edits, seqid will be -1).
-954long nextSeqid = maxSeqId;
-955if (this.writestate.writesEnabled) 
{
-956  nextSeqid = 
WALSplitter.writeRegionSequenceIdFile(this.fs.getFileSystem(),
-957  this.fs.getRegionDir(), 
nextSeqid, 1);
-958} else {
-959  nextSeqid++;
-960}
-961
-962LOG.info("Onlined " + 
this.getRegionInfo().getShortNameToLog() +
-963  "; next sequenceid=" + 
nextSeqid);
+922if (this.writestate.writesEnabled) 
{
+

[13/51] [partial] hbase-site git commit: Published site at .

2018-02-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/94208cfe/devapidocs/org/apache/hadoop/hbase/ipc/class-use/PriorityFunction.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/ipc/class-use/PriorityFunction.html 
b/devapidocs/org/apache/hadoop/hbase/ipc/class-use/PriorityFunction.html
index 5f7ce59..7244ce2 100644
--- a/devapidocs/org/apache/hadoop/hbase/ipc/class-use/PriorityFunction.html
+++ b/devapidocs/org/apache/hadoop/hbase/ipc/class-use/PriorityFunction.html
@@ -114,15 +114,15 @@
 
 
 private PriorityFunction
-RpcExecutor.priority 
+SimpleRpcScheduler.priority 
 
 
 private PriorityFunction
-RpcExecutor.CallPriorityComparator.priority 
+RpcExecutor.priority 
 
 
 private PriorityFunction
-SimpleRpcScheduler.priority 
+RpcExecutor.CallPriorityComparator.priority 
 
 
 
@@ -319,7 +319,7 @@
 
 
 RpcScheduler
-FifoRpcSchedulerFactory.create(org.apache.hadoop.conf.Configuration conf,
+RpcSchedulerFactory.create(org.apache.hadoop.conf.Configuration conf,
   PriorityFunction priority)
 Deprecated. 
 
@@ -333,16 +333,18 @@
 
 
 RpcScheduler
-RpcSchedulerFactory.create(org.apache.hadoop.conf.Configuration conf,
+FifoRpcSchedulerFactory.create(org.apache.hadoop.conf.Configuration conf,
   PriorityFunction priority)
 Deprecated. 
 
 
 
 RpcScheduler
-FifoRpcSchedulerFactory.create(org.apache.hadoop.conf.Configuration conf,
+RpcSchedulerFactory.create(org.apache.hadoop.conf.Configuration conf,
   PriorityFunction priority,
-  Abortable server) 
+  Abortable server)
+Constructs a RpcScheduler.
+
 
 
 RpcScheduler
@@ -352,11 +354,9 @@
 
 
 RpcScheduler
-RpcSchedulerFactory.create(org.apache.hadoop.conf.Configuration conf,
+FifoRpcSchedulerFactory.create(org.apache.hadoop.conf.Configuration conf,
   PriorityFunction priority,
-  Abortable server)
-Constructs a RpcScheduler.
-
+  Abortable server) 
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/94208cfe/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcCallback.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcCallback.html 
b/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcCallback.html
index 6d59fb7..4a25f5c 100644
--- a/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcCallback.html
+++ b/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcCallback.html
@@ -123,13 +123,13 @@
 
 
 void
-RpcCallContext.setCallBack(RpcCallback callback)
-Sets a callback which has to be executed at the end of this 
RPC call.
-
+ServerCall.setCallBack(RpcCallback callback) 
 
 
 void
-ServerCall.setCallBack(RpcCallback callback) 
+RpcCallContext.setCallBack(RpcCallback callback)
+Sets a callback which has to be executed at the end of this 
RPC call.
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/94208cfe/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcControllerFactory.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcControllerFactory.html 
b/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcControllerFactory.html
index baa4e5e..fab4d7a 100644
--- a/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcControllerFactory.html
+++ b/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcControllerFactory.html
@@ -131,24 +131,32 @@
 
 
 
-protected RpcControllerFactory
-RegionAdminServiceCallable.rpcControllerFactory 
-
-
 private RpcControllerFactory
 ConnectionImplementation.rpcControllerFactory 
 
+
+protected RpcControllerFactory
+ClientScanner.rpcControllerFactory 
+
 
+protected RpcControllerFactory
+RegionAdminServiceCallable.rpcControllerFactory 
+
+
 (package private) RpcControllerFactory
 AsyncConnectionImpl.rpcControllerFactory 
 
-
+
 private RpcControllerFactory
 HTable.rpcControllerFactory 
 
+
+private RpcControllerFactory
+HBaseAdmin.rpcControllerFactory 
+
 
 private RpcControllerFactory
-RpcRetryingCallerWithReadReplicas.rpcControllerFactory 
+SecureBulkLoadClient.rpcControllerFactory 
 
 
 protected RpcControllerFactory
@@ -156,15 +164,7 @@
 
 
 private RpcControllerFactory
-HBaseAdmin.rpcControllerFactory 
-
-
-private RpcControllerFactory
-SecureBulkLoadClient.rpcControllerFactory 
-
-
-protected RpcControllerFactory
-ClientScanner.rpcControllerFactory 
+RpcRetryingCallerWithReadReplicas.rpcControllerFactory 
 
 
 (package private) RpcControllerFactory
@@ -181,11 +181,11 @@
 
 
 RpcControllerFactory
-ClusterConnection.getRpcControllerFactory() 
+ConnectionImplementation.getRpcControllerFactory() 
 
 
 RpcControllerFactory
-ConnectionImplementation.getRpcControllerFactory() 
+ClusterConnection.getRpcControllerFactory() 
 
 
 private RpcControllerFactory

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/94208cfe/devapidocs/org/apache/hadoop/hbase/ipc/class-use/RpcExecutor.Handler.html

[13/51] [partial] hbase-site git commit: Published site at .

2018-02-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0cd17dc5/devapidocs/org/apache/hadoop/hbase/filter/class-use/Filter.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/filter/class-use/Filter.html 
b/devapidocs/org/apache/hadoop/hbase/filter/class-use/Filter.html
index 463f4fa..65795ae 100644
--- a/devapidocs/org/apache/hadoop/hbase/filter/class-use/Filter.html
+++ b/devapidocs/org/apache/hadoop/hbase/filter/class-use/Filter.html
@@ -488,15 +488,15 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 static Filter
-SingleColumnValueExcludeFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
+ColumnPrefixFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
 
 
 static Filter
-ValueFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
+ColumnCountGetFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
 
 
 static Filter
-FamilyFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
+RowFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
 
 
 static Filter
@@ -506,63 +506,63 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 static Filter
-ColumnPrefixFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
+FirstKeyOnlyFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
 
 
 static Filter
-PageFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
+TimestampsFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
 
 
 static Filter
-RowFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
+ValueFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
 
 
 static Filter
-ColumnRangeFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
+KeyOnlyFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
 
 
 static Filter
-ColumnCountGetFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
+FamilyFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
 
 
 static Filter
-MultipleColumnPrefixFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
+QualifierFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
 
 
 static Filter
-ColumnPaginationFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in 
java.util">ArrayList filterArguments) 
+ColumnRangeFilter.createFilterFromArguments(http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or 

[13/51] [partial] hbase-site git commit: Published site at .

2018-02-14 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/828486ae/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/BlockCacheKey.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/BlockCacheKey.html 
b/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/BlockCacheKey.html
index 570fb68..b8ce496 100644
--- a/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/BlockCacheKey.html
+++ b/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/BlockCacheKey.html
@@ -168,39 +168,27 @@
 
 
 void
-CombinedBlockCache.cacheBlock(BlockCacheKey cacheKey,
-  Cacheable buf) 
-
-
-void
 BlockCache.cacheBlock(BlockCacheKey cacheKey,
   Cacheable buf)
 Add block to cache (defaults to not in-memory).
 
 
-
+
 void
 LruBlockCache.cacheBlock(BlockCacheKey cacheKey,
   Cacheable buf)
 Cache the block with the specified name and buffer.
 
 
-
-void
-MemcachedBlockCache.cacheBlock(BlockCacheKey cacheKey,
-  Cacheable buf) 
-
 
 void
-CombinedBlockCache.cacheBlock(BlockCacheKey cacheKey,
-  Cacheable buf,
-  boolean inMemory) 
+CombinedBlockCache.cacheBlock(BlockCacheKey cacheKey,
+  Cacheable buf) 
 
 
 void
-InclusiveCombinedBlockCache.cacheBlock(BlockCacheKey cacheKey,
-  Cacheable buf,
-  boolean inMemory) 
+MemcachedBlockCache.cacheBlock(BlockCacheKey cacheKey,
+  Cacheable buf) 
 
 
 void
@@ -220,6 +208,18 @@
 
 
 void
+CombinedBlockCache.cacheBlock(BlockCacheKey cacheKey,
+  Cacheable buf,
+  boolean inMemory) 
+
+
+void
+InclusiveCombinedBlockCache.cacheBlock(BlockCacheKey cacheKey,
+  Cacheable buf,
+  boolean inMemory) 
+
+
+void
 MemcachedBlockCache.cacheBlock(BlockCacheKey cacheKey,
   Cacheable buf,
   boolean inMemory) 
@@ -232,21 +232,21 @@
 
 
 boolean
-CombinedBlockCache.evictBlock(BlockCacheKey cacheKey) 
+BlockCache.evictBlock(BlockCacheKey cacheKey)
+Evict block from cache.
+
 
 
 boolean
-InclusiveCombinedBlockCache.evictBlock(BlockCacheKey cacheKey) 
+LruBlockCache.evictBlock(BlockCacheKey cacheKey) 
 
 
 boolean
-BlockCache.evictBlock(BlockCacheKey cacheKey)
-Evict block from cache.
-
+CombinedBlockCache.evictBlock(BlockCacheKey cacheKey) 
 
 
 boolean
-LruBlockCache.evictBlock(BlockCacheKey cacheKey) 
+InclusiveCombinedBlockCache.evictBlock(BlockCacheKey cacheKey) 
 
 
 boolean
@@ -254,35 +254,35 @@
 
 
 Cacheable
-CombinedBlockCache.getBlock(BlockCacheKey cacheKey,
+BlockCache.getBlock(BlockCacheKey cacheKey,
 boolean caching,
 boolean repeat,
-boolean updateCacheMetrics) 
+boolean updateCacheMetrics)
+Fetch block from cache.
+
 
 
 Cacheable
-InclusiveCombinedBlockCache.getBlock(BlockCacheKey cacheKey,
+LruBlockCache.getBlock(BlockCacheKey cacheKey,
 boolean caching,
 boolean repeat,
-boolean updateCacheMetrics) 
+boolean updateCacheMetrics)
+Get the buffer of the block with the specified name.
+
 
 
 Cacheable
-BlockCache.getBlock(BlockCacheKey cacheKey,
+CombinedBlockCache.getBlock(BlockCacheKey cacheKey,
 boolean caching,
 boolean repeat,
-boolean updateCacheMetrics)
-Fetch block from cache.
-
+boolean updateCacheMetrics) 
 
 
 Cacheable
-LruBlockCache.getBlock(BlockCacheKey cacheKey,
+InclusiveCombinedBlockCache.getBlock(BlockCacheKey cacheKey,
 boolean caching,
 boolean repeat,
-boolean updateCacheMetrics)
-Get the buffer of the block with the specified name.
-
+boolean updateCacheMetrics) 
 
 
 Cacheable
@@ -308,11 +308,6 @@
 CombinedBlockCache.getRefCount(BlockCacheKey cacheKey) 
 
 
-void
-CombinedBlockCache.returnBlock(BlockCacheKey cacheKey,
-   Cacheable block) 
-
-
 default void
 BlockCache.returnBlock(BlockCacheKey cacheKey,
Cacheable block)
@@ -320,6 +315,11 @@
  is over.
 
 
+
+void
+CombinedBlockCache.returnBlock(BlockCacheKey cacheKey,
+   Cacheable block) 
+
 
 
 
@@ -497,14 +497,14 @@
 
 
 void
-BucketCache.BucketEntryGroup.add(http://docs.oracle.com/javase/8/docs/api/java/util/Map.Entry.html?is-external=true";
 title="class or interface in java.util">Map.Entry block) 
-
-
-void
 CachedEntryQueue.add(http://docs.oracle.com/javase/8/docs/api/java/util/Map.Entry.html?is-external=true";
 title="class or interface in java.util">Map.Entry entry)
 Attempt to add the specified entry to this queue.
 
 
+
+void
+BucketCache.BucketEntryGroup.add(http://docs.oracle.com/javase/8/docs/api/java/util/Map.Entry.html?is-external=true";
 title="class or interface in java.util">Map.Entry block) 
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/828486ae/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/BlockType.html
--
diff --git 

[13/51] [partial] hbase-site git commit: Published site at .

2018-02-13 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f272b0e8/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/RegionObserver.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/RegionObserver.html 
b/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/RegionObserver.html
index 0275527..486d927 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/RegionObserver.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/coprocessor/class-use/RegionObserver.html
@@ -263,19 +263,19 @@
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true";
 title="class or interface in java.util">Optional
-WriteHeavyIncrementObserver.getRegionObserver() 
+ScanModifyingObserver.getRegionObserver() 
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true";
 title="class or interface in java.util">Optional
-ScanModifyingObserver.getRegionObserver() 
+WriteHeavyIncrementObserver.getRegionObserver() 
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true";
 title="class or interface in java.util">Optional
-ValueRewritingObserver.getRegionObserver() 
+ZooKeeperScanPolicyObserver.getRegionObserver() 
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true";
 title="class or interface in java.util">Optional
-ZooKeeperScanPolicyObserver.getRegionObserver() 
+ValueRewritingObserver.getRegionObserver() 
 
 
 http://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true";
 title="class or interface in java.util">Optional

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f272b0e8/devapidocs/org/apache/hadoop/hbase/errorhandling/class-use/ForeignException.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/errorhandling/class-use/ForeignException.html
 
b/devapidocs/org/apache/hadoop/hbase/errorhandling/class-use/ForeignException.html
index afcf2a7..730711e 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/errorhandling/class-use/ForeignException.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/errorhandling/class-use/ForeignException.html
@@ -339,14 +339,6 @@
 
 
 void
-ProcedureCoordinatorRpcs.sendAbortToMembers(Procedure procName,
-  ForeignException cause)
-Notify the members that the coordinator has aborted the 
procedure and that it should release
- barrier resources.
-
-
-
-void
 ZKProcedureCoordinator.sendAbortToMembers(Procedure proc,
   ForeignException ee)
 This is the abort message being sent by the coordinator to 
member
@@ -355,6 +347,14 @@
  coordinator.
 
 
+
+void
+ProcedureCoordinatorRpcs.sendAbortToMembers(Procedure procName,
+  ForeignException cause)
+Notify the members that the coordinator has aborted the 
procedure and that it should release
+ barrier resources.
+
+
 
 void
 ProcedureMemberRpcs.sendMemberAborted(Subprocedure sub,

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f272b0e8/devapidocs/org/apache/hadoop/hbase/exceptions/HBaseException.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/exceptions/HBaseException.html 
b/devapidocs/org/apache/hadoop/hbase/exceptions/HBaseException.html
index b7cd61a..b8cf044 100644
--- a/devapidocs/org/apache/hadoop/hbase/exceptions/HBaseException.html
+++ b/devapidocs/org/apache/hadoop/hbase/exceptions/HBaseException.html
@@ -117,7 +117,7 @@
 
 
 Direct Known Subclasses:
-CoordinatedStateException, DeserializationException, ProcedureException, ReplicationException
+DeserializationException, ProcedureException, ReplicationException
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f272b0e8/devapidocs/org/apache/hadoop/hbase/exceptions/class-use/DeserializationException.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/exceptions/class-use/DeserializationException.html
 
b/devapidocs/org/apache/hadoop/hbase/exceptions/class-use/DeserializationException.html
index a07a830..80108a2 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/exceptions/class-use/DeserializationException.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/exceptions/class-use/DeserializationException.html
@@ -144,14 +144,16 @@
 
 
 
-static HTableDescriptor
-HTableDescriptor.parseFrom(byte[] bytes)
+static HColumnDescriptor
+HColumnDescriptor.parseFrom(byte[] bytes)
 Deprecated. 
  
 
 
-static ClusterId
-ClusterId.parseFrom(byte[] bytes) 
+static HTableDescriptor
+HTableDescriptor.parseFrom(byte[] bytes)
+Deprecated. 
+ 
 
 
 static HRegionInfo
@@ -163,10 +165,8 @@
 
 
 
-static HColumnDescriptor
-HColumnDescriptor.parseFrom(byte[] bytes)
-Deprecated. 
- 
+static ClusterId
+ClusterId.parse

[13/51] [partial] hbase-site git commit: Published site at .

2018-02-10 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c83a37c8/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/BlockCacheKey.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/BlockCacheKey.html 
b/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/BlockCacheKey.html
index 570fb68..b8ce496 100644
--- a/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/BlockCacheKey.html
+++ b/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/BlockCacheKey.html
@@ -168,39 +168,27 @@
 
 
 void
-CombinedBlockCache.cacheBlock(BlockCacheKey cacheKey,
-  Cacheable buf) 
-
-
-void
 BlockCache.cacheBlock(BlockCacheKey cacheKey,
   Cacheable buf)
 Add block to cache (defaults to not in-memory).
 
 
-
+
 void
 LruBlockCache.cacheBlock(BlockCacheKey cacheKey,
   Cacheable buf)
 Cache the block with the specified name and buffer.
 
 
-
-void
-MemcachedBlockCache.cacheBlock(BlockCacheKey cacheKey,
-  Cacheable buf) 
-
 
 void
-CombinedBlockCache.cacheBlock(BlockCacheKey cacheKey,
-  Cacheable buf,
-  boolean inMemory) 
+CombinedBlockCache.cacheBlock(BlockCacheKey cacheKey,
+  Cacheable buf) 
 
 
 void
-InclusiveCombinedBlockCache.cacheBlock(BlockCacheKey cacheKey,
-  Cacheable buf,
-  boolean inMemory) 
+MemcachedBlockCache.cacheBlock(BlockCacheKey cacheKey,
+  Cacheable buf) 
 
 
 void
@@ -220,6 +208,18 @@
 
 
 void
+CombinedBlockCache.cacheBlock(BlockCacheKey cacheKey,
+  Cacheable buf,
+  boolean inMemory) 
+
+
+void
+InclusiveCombinedBlockCache.cacheBlock(BlockCacheKey cacheKey,
+  Cacheable buf,
+  boolean inMemory) 
+
+
+void
 MemcachedBlockCache.cacheBlock(BlockCacheKey cacheKey,
   Cacheable buf,
   boolean inMemory) 
@@ -232,21 +232,21 @@
 
 
 boolean
-CombinedBlockCache.evictBlock(BlockCacheKey cacheKey) 
+BlockCache.evictBlock(BlockCacheKey cacheKey)
+Evict block from cache.
+
 
 
 boolean
-InclusiveCombinedBlockCache.evictBlock(BlockCacheKey cacheKey) 
+LruBlockCache.evictBlock(BlockCacheKey cacheKey) 
 
 
 boolean
-BlockCache.evictBlock(BlockCacheKey cacheKey)
-Evict block from cache.
-
+CombinedBlockCache.evictBlock(BlockCacheKey cacheKey) 
 
 
 boolean
-LruBlockCache.evictBlock(BlockCacheKey cacheKey) 
+InclusiveCombinedBlockCache.evictBlock(BlockCacheKey cacheKey) 
 
 
 boolean
@@ -254,35 +254,35 @@
 
 
 Cacheable
-CombinedBlockCache.getBlock(BlockCacheKey cacheKey,
+BlockCache.getBlock(BlockCacheKey cacheKey,
 boolean caching,
 boolean repeat,
-boolean updateCacheMetrics) 
+boolean updateCacheMetrics)
+Fetch block from cache.
+
 
 
 Cacheable
-InclusiveCombinedBlockCache.getBlock(BlockCacheKey cacheKey,
+LruBlockCache.getBlock(BlockCacheKey cacheKey,
 boolean caching,
 boolean repeat,
-boolean updateCacheMetrics) 
+boolean updateCacheMetrics)
+Get the buffer of the block with the specified name.
+
 
 
 Cacheable
-BlockCache.getBlock(BlockCacheKey cacheKey,
+CombinedBlockCache.getBlock(BlockCacheKey cacheKey,
 boolean caching,
 boolean repeat,
-boolean updateCacheMetrics)
-Fetch block from cache.
-
+boolean updateCacheMetrics) 
 
 
 Cacheable
-LruBlockCache.getBlock(BlockCacheKey cacheKey,
+InclusiveCombinedBlockCache.getBlock(BlockCacheKey cacheKey,
 boolean caching,
 boolean repeat,
-boolean updateCacheMetrics)
-Get the buffer of the block with the specified name.
-
+boolean updateCacheMetrics) 
 
 
 Cacheable
@@ -308,11 +308,6 @@
 CombinedBlockCache.getRefCount(BlockCacheKey cacheKey) 
 
 
-void
-CombinedBlockCache.returnBlock(BlockCacheKey cacheKey,
-   Cacheable block) 
-
-
 default void
 BlockCache.returnBlock(BlockCacheKey cacheKey,
Cacheable block)
@@ -320,6 +315,11 @@
  is over.
 
 
+
+void
+CombinedBlockCache.returnBlock(BlockCacheKey cacheKey,
+   Cacheable block) 
+
 
 
 
@@ -497,14 +497,14 @@
 
 
 void
-BucketCache.BucketEntryGroup.add(http://docs.oracle.com/javase/8/docs/api/java/util/Map.Entry.html?is-external=true";
 title="class or interface in java.util">Map.Entry block) 
-
-
-void
 CachedEntryQueue.add(http://docs.oracle.com/javase/8/docs/api/java/util/Map.Entry.html?is-external=true";
 title="class or interface in java.util">Map.Entry entry)
 Attempt to add the specified entry to this queue.
 
 
+
+void
+BucketCache.BucketEntryGroup.add(http://docs.oracle.com/javase/8/docs/api/java/util/Map.Entry.html?is-external=true";
 title="class or interface in java.util">Map.Entry block) 
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c83a37c8/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/BlockType.html
--
diff --git 

[13/51] [partial] hbase-site git commit: Published site at .

2018-02-09 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0ab8335e/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFile.Writer.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFile.Writer.html 
b/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFile.Writer.html
index abcb738..c7d05d1 100644
--- a/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFile.Writer.html
+++ b/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFile.Writer.html
@@ -143,17 +143,17 @@
 
 
 void
-NoOpDataBlockEncoder.saveMetadata(HFile.Writer writer) 
+HFileDataBlockEncoderImpl.saveMetadata(HFile.Writer writer) 
 
 
 void
-HFileDataBlockEncoder.saveMetadata(HFile.Writer writer)
-Save metadata in HFile which will be written to disk
-
+NoOpDataBlockEncoder.saveMetadata(HFile.Writer writer) 
 
 
 void
-HFileDataBlockEncoderImpl.saveMetadata(HFile.Writer writer) 
+HFileDataBlockEncoder.saveMetadata(HFile.Writer writer)
+Save metadata in HFile which will be written to disk
+
 
 
 
@@ -203,18 +203,18 @@
 
 
 
-abstract void
-BloomContext.addLastBloomKey(HFile.Writer writer)
-Adds the last bloom key to the HFile Writer as part of 
StorefileWriter close.
-
+void
+RowColBloomContext.addLastBloomKey(HFile.Writer writer) 
 
 
 void
 RowBloomContext.addLastBloomKey(HFile.Writer writer) 
 
 
-void
-RowColBloomContext.addLastBloomKey(HFile.Writer writer) 
+abstract void
+BloomContext.addLastBloomKey(HFile.Writer writer)
+Adds the last bloom key to the HFile Writer as part of 
StorefileWriter close.
+
 
 
 static BloomFilterWriter

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0ab8335e/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFileBlock.Writer.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFileBlock.Writer.html 
b/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFileBlock.Writer.html
index 274bfad..479b9d3 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFileBlock.Writer.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFileBlock.Writer.html
@@ -106,15 +106,15 @@
 
 
 
-private HFileBlock.Writer
-HFileBlockIndex.BlockIndexWriter.blockWriter 
-
-
 protected HFileBlock.Writer
 HFileWriterImpl.blockWriter
 block writer
 
 
+
+private HFileBlock.Writer
+HFileBlockIndex.BlockIndexWriter.blockWriter 
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0ab8335e/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFileContext.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFileContext.html 
b/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFileContext.html
index b293c97..0c892c8 100644
--- a/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFileContext.html
+++ b/devapidocs/org/apache/hadoop/hbase/io/hfile/class-use/HFileContext.html
@@ -136,15 +136,15 @@
 
 
 HFileContext
-HFileBlockEncodingContext.getHFileContext() 
+HFileBlockDecodingContext.getHFileContext() 
 
 
 HFileContext
-HFileBlockDecodingContext.getHFileContext() 
+HFileBlockDefaultDecodingContext.getHFileContext() 
 
 
 HFileContext
-HFileBlockDefaultDecodingContext.getHFileContext() 
+HFileBlockEncodingContext.getHFileContext() 
 
 
 HFileContext
@@ -224,24 +224,24 @@
 
 
 private HFileContext
-HFile.WriterFactory.fileContext 
-
-
-private HFileContext
 HFileBlock.fileContext
 Meta data that holds meta information on the 
hfileblock.
 
 
-
+
 private HFileContext
 HFileBlock.Writer.fileContext
 Meta data that holds information about the hfileblock
 
 
-
+
 private HFileContext
 HFileBlock.FSReaderImpl.fileContext 
 
+
+private HFileContext
+HFile.WriterFactory.fileContext 
+
 
 private HFileContext
 HFileReaderImpl.hfileContext 
@@ -277,20 +277,20 @@
 
 
 HFileContext
+HFileWriterImpl.getFileContext() 
+
+
+HFileContext
 HFile.Writer.getFileContext()
 Return the file context for the HFile this writer belongs 
to
 
 
-
+
 HFileContext
 HFile.Reader.getFileContext()
 Return the file context of the HFile this reader belongs 
to
 
 
-
-HFileContext
-HFileWriterImpl.getFileContext() 
-
 
 HFileContext
 HFileReaderImpl.getFileContext() 
@@ -323,35 +323,35 @@
 
 
 HFileBlockDecodingContext
-NoOpDataBlockEncoder.newDataBlockDecodingContext(HFileContext meta) 
+HFileDataBlockEncoderImpl.newDataBlockDecodingContext(HFileContext fileContext) 
 
 
 HFileBlockDecodingContext
-HFileDataBlockEncoder.newDataBlockDecodingContext(HFileContext fileContext)
-create a encoder specific decoding context for 
reading.
-
+NoOpDataBlockEncoder.newDataBlockDecodingContext(HFileContext meta) 
 
 
 HFileBlockDecodingContext
-HFileDataBlockEncoderImpl.newDataBlockDecodingContext(HFileContext fileContext) 
+HFileDataBlockEncoder.newDataBlockDecodingContext(HFileContext fileContext)
+create a encoder specific decoding context for 
r

[13/51] [partial] hbase-site git commit: Published site at .

2018-02-04 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/6674e3ab/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverOperationWithoutResult.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverOperationWithoutResult.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverOperationWithoutResult.html
index 77fb9b5..c4e8c8b 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverOperationWithoutResult.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.ObserverOperationWithoutResult.html
@@ -165,520 +165,519 @@
 157E env = 
checkAndLoadInstance(implClass, priority, conf);
 158if (env != null) {
 159  
this.coprocEnvironments.add(env);
-160  LOG.info(
-161  "System coprocessor " + 
className + " was loaded " + "successfully with priority (" + priority + 
").");
-162  ++priority;
-163}
-164  } catch (Throwable t) {
-165// We always abort if system 
coprocessors cannot be loaded
-166abortServer(className, t);
-167  }
-168}
-169  }
-170
-171  /**
-172   * Load a coprocessor implementation 
into the host
-173   * @param path path to implementation 
jar
-174   * @param className the main class 
name
-175   * @param priority chaining priority
-176   * @param conf configuration for 
coprocessor
-177   * @throws java.io.IOException 
Exception
-178   */
-179  public E load(Path path, String 
className, int priority,
-180  Configuration conf) throws 
IOException {
-181String[] includedClassPrefixes = 
null;
-182if 
(conf.get(HConstants.CP_HTD_ATTR_INCLUSION_KEY) != null){
-183  String prefixes = 
conf.get(HConstants.CP_HTD_ATTR_INCLUSION_KEY);
-184  includedClassPrefixes = 
prefixes.split(";");
-185}
-186return load(path, className, 
priority, conf, includedClassPrefixes);
-187  }
-188
-189  /**
-190   * Load a coprocessor implementation 
into the host
-191   * @param path path to implementation 
jar
-192   * @param className the main class 
name
-193   * @param priority chaining priority
-194   * @param conf configuration for 
coprocessor
-195   * @param includedClassPrefixes class 
name prefixes to include
-196   * @throws java.io.IOException 
Exception
-197   */
-198  public E load(Path path, String 
className, int priority,
-199  Configuration conf, String[] 
includedClassPrefixes) throws IOException {
-200Class implClass;
-201LOG.debug("Loading coprocessor class 
" + className + " with path " +
-202path + " and priority " + 
priority);
-203
-204ClassLoader cl = null;
-205if (path == null) {
-206  try {
-207implClass = 
getClass().getClassLoader().loadClass(className);
-208  } catch (ClassNotFoundException e) 
{
-209throw new IOException("No jar 
path specified for " + className);
-210  }
-211} else {
-212  cl = 
CoprocessorClassLoader.getClassLoader(
-213path, 
getClass().getClassLoader(), pathPrefix, conf);
-214  try {
-215implClass = 
((CoprocessorClassLoader)cl).loadClass(className, includedClassPrefixes);
-216  } catch (ClassNotFoundException e) 
{
-217throw new IOException("Cannot 
load external coprocessor class " + className, e);
-218  }
-219}
-220
-221//load custom code for coprocessor
-222Thread currentThread = 
Thread.currentThread();
-223ClassLoader hostClassLoader = 
currentThread.getContextClassLoader();
-224try{
-225  // switch temporarily to the thread 
classloader for custom CP
-226  
currentThread.setContextClassLoader(cl);
-227  E cpInstance = 
checkAndLoadInstance(implClass, priority, conf);
-228  return cpInstance;
-229} finally {
-230  // restore the fresh (host) 
classloader
-231  
currentThread.setContextClassLoader(hostClassLoader);
-232}
-233  }
-234
-235  @VisibleForTesting
-236  public void load(Class implClass, int priority, Configuration conf)
-237  throws IOException {
-238E env = 
checkAndLoadInstance(implClass, priority, conf);
-239coprocEnvironments.add(env);
-240  }
-241
-242  /**
-243   * @param implClass Implementation 
class
-244   * @param priority priority
-245   * @param conf configuration
-246   * @throws java.io.IOException 
Exception
-247   */
-248  public E 
checkAndLoadInstance(Class implClass, int priority, Configuration 
conf)
-249  throws IOException {
-250// create the instance
-251C impl;
-252try {
-253  impl = 
checkAndGetInstance(implClass);
-254  if (impl == null) {
-255LOG.error("Cannot load 
coprocessor " + implClass.getSimpleName());
-256return null;
-257  }
-258} catch 
(InstantiationException|IllegalAccessException e) {
-259  throw new IOException(e);
-260}
-261/

[13/51] [partial] hbase-site git commit: Published site at .

2018-02-02 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1f2eeb22/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.html
index b7c24d7..eecd2f9 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.html
@@ -44,792 +44,792 @@
 036import java.util.List;
 037import java.util.Map;
 038import java.util.NavigableSet;
-039import java.util.Objects;
-040import java.util.PriorityQueue;
-041import java.util.Set;
-042import 
java.util.concurrent.ArrayBlockingQueue;
-043import 
java.util.concurrent.BlockingQueue;
-044import 
java.util.concurrent.ConcurrentHashMap;
-045import 
java.util.concurrent.ConcurrentMap;
-046import 
java.util.concurrent.ConcurrentSkipListSet;
-047import java.util.concurrent.Executors;
-048import 
java.util.concurrent.ScheduledExecutorService;
-049import java.util.concurrent.TimeUnit;
-050import 
java.util.concurrent.atomic.AtomicInteger;
-051import 
java.util.concurrent.atomic.AtomicLong;
-052import 
java.util.concurrent.atomic.LongAdder;
-053import java.util.concurrent.locks.Lock;
-054import 
java.util.concurrent.locks.ReentrantLock;
-055import 
java.util.concurrent.locks.ReentrantReadWriteLock;
-056import 
org.apache.hadoop.conf.Configuration;
-057import 
org.apache.hadoop.hbase.HBaseConfiguration;
-058import 
org.apache.hadoop.hbase.io.HeapSize;
-059import 
org.apache.hadoop.hbase.io.hfile.BlockCache;
-060import 
org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
-061import 
org.apache.hadoop.hbase.io.hfile.BlockCacheUtil;
-062import 
org.apache.hadoop.hbase.io.hfile.BlockPriority;
-063import 
org.apache.hadoop.hbase.io.hfile.BlockType;
-064import 
org.apache.hadoop.hbase.io.hfile.CacheStats;
-065import 
org.apache.hadoop.hbase.io.hfile.Cacheable;
-066import 
org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType;
-067import 
org.apache.hadoop.hbase.io.hfile.CacheableDeserializer;
-068import 
org.apache.hadoop.hbase.io.hfile.CacheableDeserializerIdManager;
-069import 
org.apache.hadoop.hbase.io.hfile.CachedBlock;
-070import 
org.apache.hadoop.hbase.io.hfile.HFileBlock;
-071import 
org.apache.hadoop.hbase.nio.ByteBuff;
-072import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-073import 
org.apache.hadoop.hbase.util.HasThread;
-074import 
org.apache.hadoop.hbase.util.IdReadWriteLock;
-075import 
org.apache.hadoop.hbase.util.IdReadWriteLock.ReferenceType;
-076import 
org.apache.hadoop.util.StringUtils;
-077import 
org.apache.yetus.audience.InterfaceAudience;
-078import org.slf4j.Logger;
-079import org.slf4j.LoggerFactory;
-080
-081import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-082import 
org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
-083import 
org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
-084
-085/**
-086 * BucketCache uses {@link 
BucketAllocator} to allocate/free blocks, and uses
-087 * BucketCache#ramCache and 
BucketCache#backingMap in order to
-088 * determine if a given element is in the 
cache. The bucket cache can use on-heap or
-089 * off-heap memory {@link 
ByteBufferIOEngine} or in a file {@link FileIOEngine} to
-090 * store/read the block data.
-091 *
-092 * 

Eviction is via a similar algorithm as used in -093 * {@link org.apache.hadoop.hbase.io.hfile.LruBlockCache} -094 * -095 *

BucketCache can be used as mainly a block cache (see -096 * {@link org.apache.hadoop.hbase.io.hfile.CombinedBlockCache}), combined with -097 * LruBlockCache to decrease CMS GC and heap fragmentation. -098 * -099 *

It also can be used as a secondary cache (e.g. using a file on ssd/fusionio to store -100 * blocks) to enlarge cache space via -101 * {@link org.apache.hadoop.hbase.io.hfile.LruBlockCache#setVictimCache} -102 */ -103@InterfaceAudience.Private -104public class BucketCache implements BlockCache, HeapSize { -105 private static final Logger LOG = LoggerFactory.getLogger(BucketCache.class); -106 -107 /** Priority buckets config */ -108 static final String SINGLE_FACTOR_CONFIG_NAME = "hbase.bucketcache.single.factor"; -109 static final String MULTI_FACTOR_CONFIG_NAME = "hbase.bucketcache.multi.factor"; -110 static final String MEMORY_FACTOR_CONFIG_NAME = "hbase.bucketcache.memory.factor"; -111 static final String EXTRA_FREE_FACTOR_CONFIG_NAME = "hbase.bucketcache.extrafreefactor"; -112 static final String ACCEPT_FACTOR_CONFIG_NAME = "hbase.bucketcache.acceptfactor"; -113 static final String MIN_FACTOR_CONFIG_NAME = "hbase.bucketcache.minfactor"; -114 -115 /** Priority buckets */ -116 @VisibleForTesting -117 static final float DEFAULT_SINGLE_FACTOR = 0.25f; -118 static final float DEFAULT_MULTI_FAC


[13/51] [partial] hbase-site git commit: Published site at .

2018-01-29 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/cc6597ec/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncTableBatch.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncTableBatch.html 
b/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncTableBatch.html
index 803c064..3e641ef 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncTableBatch.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncTableBatch.html
@@ -109,7 +109,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public class TestAsyncTableBatch
+public class TestAsyncTableBatch
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 
 
@@ -149,42 +149,46 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 Field and Description
 
 
+static HBaseClassTestRule
+CLASS_RULE 
+
+
 private static 
org.apache.hadoop.hbase.client.AsyncConnection
 CONN 
 
-
+
 private static int
 COUNT 
 
-
+
 private static byte[]
 CQ 
 
-
+
 private static byte[]
 CQ1 
 
-
+
 private static byte[]
 FAMILY 
 
-
+
 private static byte[][]
 SPLIT_KEYS 
 
-
+
 private static 
org.apache.hadoop.hbase.TableName
 TABLE_NAME 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/function/Function.html?is-external=true";
 title="class or interface in 
java.util.function">Function>
 tableGetter 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
 tableType 
 
-
+
 private static HBaseTestingUtility
 TEST_UTIL 
 
@@ -290,13 +294,22 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 Field Detail
+
+
+
+
+
+CLASS_RULE
+public static final HBaseClassTestRule CLASS_RULE
+
+
 
 
 
 
 
 TEST_UTIL
-private static final HBaseTestingUtility TEST_UTIL
+private static final HBaseTestingUtility TEST_UTIL
 
 
 
@@ -305,7 +318,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 TABLE_NAME
-private static org.apache.hadoop.hbase.TableName TABLE_NAME
+private static org.apache.hadoop.hbase.TableName TABLE_NAME
 
 
 
@@ -314,7 +327,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 FAMILY
-private static byte[] FAMILY
+private static byte[] FAMILY
 
 
 
@@ -323,7 +336,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 CQ
-private static byte[] CQ
+private static byte[] CQ
 
 
 
@@ -332,7 +345,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 CQ1
-private static byte[] CQ1
+private static byte[] CQ1
 
 
 
@@ -341,7 +354,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 COUNT
-private static int COUNT
+private static int COUNT
 
 
 
@@ -350,7 +363,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 CONN
-private static org.apache.hadoop.hbase.client.AsyncConnection CONN
+private static org.apache.hadoop.hbase.client.AsyncConnection CONN
 
 
 
@@ -359,7 +372,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 SPLIT_KEYS
-private static byte[][] SPLIT_KEYS
+private static byte[][] SPLIT_KEYS
 
 
 
@@ -368,7 +381,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 tableType
-public http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String tableType
+public http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String tableType
 
 
 
@@ -377,7 +390,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 tableGetter
-public http://docs.oracle.com/javase/8/docs/api/java/util/function/Function.html?is-external=true";
 title="class or interface in 
java.util.function">Function>
 tableGetter
+public http://docs.oracle.com/javase/8/docs/api/java/util/function/Function.html?is-external=true";
 title="class or interface in 
java.util.function">Function>
 tableGetter
 
 
 
@@ -394,7 +407,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 TestAsyncTableBatch
-public TestAsyncTableBatch()
+public TestAsyncTableBatch()
 
 
 
@@ -411,7 +424,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getRawTable
-private 
static org.apache.hadoop.hbase.client.AsyncTable getRawTable(org.apache.hadoop.hbase.TableName tableName)
+private 
static org.apache.hadoop.hbase.client.AsyncTable getRawTable(org.apache.hadoop.hbase.TableName tableName)
 
 
 
@@ -420,7 +433,7 @@ 

[13/51] [partial] hbase-site git commit: Published site at .

2018-01-28 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aa7ffc92/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.html
index 2ac2191..a0a02e0 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.html
@@ -47,12 +47,12 @@
 039import 
org.apache.hadoop.hbase.backup.BackupType;
 040import 
org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager;
 041import 
org.apache.hadoop.hbase.backup.util.BackupUtils;
-042import 
org.apache.yetus.audience.InterfaceAudience;
-043import org.slf4j.Logger;
-044import org.slf4j.LoggerFactory;
-045import 
org.apache.hadoop.hbase.client.Admin;
-046import 
org.apache.hadoop.hbase.client.Connection;
-047import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+042import 
org.apache.hadoop.hbase.client.Admin;
+043import 
org.apache.hadoop.hbase.client.Connection;
+044import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+045import 
org.apache.yetus.audience.InterfaceAudience;
+046import org.slf4j.Logger;
+047import org.slf4j.LoggerFactory;
 048
 049/**
 050 * Full table backup implementation
@@ -94,7 +94,7 @@
 086  // Currently we simply set the sub 
copy tasks by counting the table snapshot number, we can
 087  // calculate the real files' size 
for the percentage in the future.
 088  // 
backupCopier.setSubTaskPercntgInWholeTask(1f / numOfSnapshots);
-089  int res = 0;
+089  int res;
 090  String[] args = new String[4];
 091  args[0] = "-snapshot";
 092  args[1] = 
backupInfo.getSnapshotName(table);
@@ -124,121 +124,119 @@
 116  }
 117
 118  /**
-119   * Backup request execution
-120   * @throws IOException
-121   */
-122  @Override
-123  public void execute() throws 
IOException {
-124try (Admin admin = conn.getAdmin()) 
{
-125  // Begin BACKUP
-126  beginBackup(backupManager, 
backupInfo);
-127  String savedStartCode = null;
-128  boolean firstBackup = false;
-129  // do snapshot for full table 
backup
-130
-131  savedStartCode = 
backupManager.readBackupStartCode();
-132  firstBackup = savedStartCode == 
null || Long.parseLong(savedStartCode) == 0L;
-133  if (firstBackup) {
-134// This is our first backup. 
Let's put some marker to system table so that we can hold the logs
-135// while we do the backup.
-136
backupManager.writeBackupStartCode(0L);
-137  }
-138  // We roll log here before we do 
the snapshot. It is possible there is duplicate data
-139  // in the log that is already in 
the snapshot. But if we do it after the snapshot, we
-140  // could have data loss.
-141  // A better approach is to do the 
roll log on each RS in the same global procedure as
-142  // the snapshot.
-143  LOG.info("Execute roll log 
procedure for full backup ...");
-144
-145  Map props = 
new HashMap();
-146  props.put("backupRoot", 
backupInfo.getBackupRootDir());
-147  
admin.execProcedure(LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_SIGNATURE,
-148
LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_NAME, props);
-149
-150  newTimestamps = 
backupManager.readRegionServerLastLogRollResult();
-151  if (firstBackup) {
-152// Updates registered log files
-153// We record ALL old WAL files as 
registered, because
-154// this is a first full backup in 
the system and these
-155// files are not needed for next 
incremental backup
-156List logFiles = 
BackupUtils.getWALFilesOlderThan(conf, newTimestamps);
-157
backupManager.recordWALFiles(logFiles);
-158  }
-159
-160  // SNAPSHOT_TABLES:
-161  
backupInfo.setPhase(BackupPhase.SNAPSHOT);
-162  for (TableName tableName : 
tableList) {
-163String snapshotName =
-164"snapshot_" + 
Long.toString(EnvironmentEdgeManager.currentTime()) + "_"
-165+ 
tableName.getNamespaceAsString() + "_" + tableName.getQualifierAsString();
-166
-167snapshotTable(admin, tableName, 
snapshotName);
-168
backupInfo.setSnapshotName(tableName, snapshotName);
-169  }
-170
-171  // SNAPSHOT_COPY:
-172  // do snapshot copy
-173  LOG.debug("snapshot copy for " + 
backupId);
-174  snapshotCopy(backupInfo);
-175  // Updates incremental backup table 
set
-176  
backupManager.addIncrementalBackupTableSet(backupInfo.getTables());
-177
-178  // BACKUP_COMPLETE:
-179  // set overall backup status: 
complete. Here we make sure to complete the backup.
-180  // After this checkpoint, even if 
entering cancel process, will let the ba

[13/51] [partial] hbase-site git commit: Published site at .

2018-01-26 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/96e5e102/testdevapidocs/org/apache/hadoop/hbase/regionserver/package-summary.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/regionserver/package-summary.html 
b/testdevapidocs/org/apache/hadoop/hbase/regionserver/package-summary.html
index 9044d25..300f9cb 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/regionserver/package-summary.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/regionserver/package-summary.html
@@ -1156,226 +1156,230 @@
 
 
 
-TestScannerHeartbeatMessages.SparseFilter
+TestScannerHeartbeatMessages.SparseCellFilter
  
 
 
-TestScannerRetriableFailure
+TestScannerHeartbeatMessages.SparseRowFilter
  
 
 
-TestScannerRetriableFailure.FaultyScannerObserver
+TestScannerRetriableFailure
  
 
 
-TestScannerWithBulkload
+TestScannerRetriableFailure.FaultyScannerObserver
  
 
 
+TestScannerWithBulkload
+ 
+
+
 TestScannerWithCorruptHFile
 
 Tests a scanner on a corrupt hfile.
 
 
-
+
 TestScannerWithCorruptHFile.CorruptHFileCoprocessor
  
 
-
+
 TestScanWithBloomError
 
 Test a multi-column scanner when there is a Bloom filter 
false-positive.
 
 
-
+
 TestSCVFWithMiniCluster
  
 
-
+
 TestSeekOptimizations
 
 Test various seek optimizations for correctness and check 
if they are
  actually saving I/O operations.
 
 
-
+
 TestServerCustomProtocol
  
 
-
+
 TestServerCustomProtocol.PingHandler
  
 
-
+
 TestServerNonceManager
  
 
-
+
 TestServerNonceManager.TestRunnable
  
 
-
+
 TestSettingTimeoutOnBlockingPoint
  
 
-
+
 TestSettingTimeoutOnBlockingPoint.SleepCoprocessor
  
 
-
+
 TestSimpleTimeRangeTracker
  
 
-
+
 TestSplitLogWorker
  
 
-
+
 TestSplitLogWorker.DummyServer
  
 
-
+
 TestSplitTransactionOnCluster
 
 The below tests are testing split region against a running 
cluster
 
 
-
+
 TestSplitTransactionOnCluster.CustomSplitPolicy
  
 
-
+
 TestSplitTransactionOnCluster.FailingSplitMasterObserver
  
 
-
+
 TestSplitTransactionOnCluster.MyMaster
  
 
-
+
 TestSplitTransactionOnCluster.MyMasterRpcServices
  
 
-
+
 TestSplitWalDataLoss
 
 Testcase for 
https://issues.apache.org/jira/browse/HBASE-13811
 
 
-
+
 TestStoreFileInfo
 
 Test HStoreFile
 
 
-
+
 TestStoreFileRefresherChore
  
 
-
+
 TestStoreFileRefresherChore.FailingHRegionFileSystem
  
 
-
+
 TestStoreFileRefresherChore.StaleStorefileRefresherChore
  
 
-
+
 TestStoreFileScannerWithTagCompression
  
 
-
+
 TestStoreScanner
  
 
-
+
 TestStoreScanner.CellGridStoreScanner
 
 A StoreScanner for our CELL_GRID above.
 
 
-
+
 TestStoreScanner.CellWithVersionsNoOptimizeStoreScanner
  
 
-
+
 TestStoreScanner.CellWithVersionsStoreScanner
  
 
-
+
 TestStoreScanner.KeyValueHeapWithCount
  
 
-
+
 TestStripeStoreEngine
  
 
-
+
 TestStripeStoreEngine.TestStoreEngine
  
 
-
+
 TestStripeStoreFileManager
  
 
-
+
 TestSwitchToStreamRead
  
 
-
+
 TestSyncTimeRangeTracker
  
 
-
+
 TestSyncTimeRangeTracker.RandomTestData
  
 
-
+
 TestSyncTimeRangeTracker.TrtUpdateRunnable
  
 
-
+
 TestTags
 
 Class that test tags
 
 
-
+
 TestTags.TestCoprocessorForTags
  
 
-
+
 TestTimestampFilterSeekHint
  
 
-
+
 TestWalAndCompactingMemStoreFlush
 
 This test verifies the correctness of the Per Column Family 
flushing strategy
  when part of the memstores are compacted memstores
 
 
-
+
 TestWALLockup
 
 Testing for lock up of WAL subsystem.
 
 
-
+
 TestWALLockup.DummyServer
  
 
-
+
 TestWALLockup.DummyWALActionsListener
  
 
-
+
 TestWALMonotonicallyIncreasingSeqId
 
 Test for HBASE-17471.
 
 
-
+
 TestWideScanner
  
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/96e5e102/testdevapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html 
b/testdevapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
index ddfe794..86fbb12 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
@@ -186,7 +186,8 @@
 
 org.apache.hadoop.hbase.filter.FilterBase
 
-org.apache.hadoop.hbase.regionserver.TestScannerHeartbeatMessages.SparseFilter
+org.apache.hadoop.hbase.regionserver.TestScannerHeartbeatMessages.SparseCellFilter
+org.apache.hadoop.hbase.regionserver.TestScannerHeartbeatMessages.SparseRowFilter
 
 
 
@@ -632,10 +633,10 @@
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true";
 title="class or interface in java.lang">Enum (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true";
 title="class or interface in java.lang">Comparable, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true";
 title="class or interface in java.io">Serializable)
 
 org.apache.hadoop.hbase.regionserver.TestAtomicOperation.TestStep
+org.ap

[13/51] [partial] hbase-site git commit: Published site at .

2018-01-23 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8118541f/devapidocs/org/apache/hadoop/hbase/quotas/QuotaObserverChore.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/quotas/QuotaObserverChore.html 
b/devapidocs/org/apache/hadoop/hbase/quotas/QuotaObserverChore.html
index 0e12e96..e0e432d 100644
--- a/devapidocs/org/apache/hadoop/hbase/quotas/QuotaObserverChore.html
+++ b/devapidocs/org/apache/hadoop/hbase/quotas/QuotaObserverChore.html
@@ -872,7 +872,7 @@ extends 
 
 pruneOldRegionReports
-void pruneOldRegionReports()
+void pruneOldRegionReports()
 Removes region reports over a certain age.
 
 
@@ -882,7 +882,7 @@ extends 
 
 fetchAllTablesWithQuotasDefined
-QuotaObserverChore.TablesWithQuotas fetchAllTablesWithQuotasDefined()
+QuotaObserverChore.TablesWithQuotas fetchAllTablesWithQuotasDefined()
  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 Computes the set of all tables that have quotas defined. 
This includes tables with quotas
  explicitly set on them, in addition to tables that exist namespaces which 
have a quota
@@ -899,7 +899,7 @@ extends 
 
 getTableSnapshotStore
-QuotaSnapshotStore getTableSnapshotStore()
+QuotaSnapshotStore getTableSnapshotStore()
 
 
 
@@ -908,7 +908,7 @@ extends 
 
 getNamespaceSnapshotStore
-QuotaSnapshotStoreString> getNamespaceSnapshotStore()
+QuotaSnapshotStoreString> getNamespaceSnapshotStore()
 
 
 
@@ -917,7 +917,7 @@ extends 
 
 getTableQuotaSnapshots
-public http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">Map getTableQuotaSnapshots()
+public http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">Map getTableQuotaSnapshots()
 Returns an unmodifiable view over the current SpaceQuotaSnapshot objects
  for each HBase table with a quota defined.
 
@@ -928,7 +928,7 @@ extends 
 
 getNamespaceQuotaSnapshots
-public http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">MapString,SpaceQuotaSnapshot> getNamespaceQuotaSnapshots()
+public http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">MapString,SpaceQuotaSnapshot> getNamespaceQuotaSnapshots()
 Returns an unmodifiable view over the current SpaceQuotaSnapshot objects
  for each HBase namespace with a quota defined.
 
@@ -939,7 +939,7 @@ extends 
 
 getTableQuotaSnapshot
-SpaceQuotaSnapshot getTableQuotaSnapshot(TableName table)
+SpaceQuotaSnapshot getTableQuotaSnapshot(TableName table)
 Fetches the SpaceQuotaSnapshot for the 
given table.
 
 
@@ -949,7 +949,7 @@ extends 
 
 setTableQuotaSnapshot
-void setTableQuotaSnapshot(TableName table,
+void setTableQuotaSnapshot(TableName table,
SpaceQuotaSnapshot snapshot)
 Stores the quota state for the given table.
 
@@ -960,7 +960,7 @@ extends 
 
 getNamespaceQuotaSnapshot
-SpaceQuotaSnapshot getNamespaceQuotaSnapshot(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String namespace)
+SpaceQuotaSnapshot getNamespaceQuotaSnapshot(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String namespace)
 Fetches the SpaceQuotaSnapshot for the 
given namespace from this chore.
 
 
@@ -970,7 +970,7 @@ extends 
 
 setNamespaceQuotaSnapshot
-void setNamespaceQuotaSnapshot(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String namespace,
+void setNamespaceQuotaSnapshot(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String namespace,
SpaceQuotaSnapshot snapshot)
 Stores the given snapshot for the given 
namespace in this chore.
 
@@ -981,7 +981,7 @@ extends 
 
 getPeriod
-static int getPeriod(org.apache.hadoop.conf.Configuration conf)
+static int getPeriod(org.apache.hadoop.conf.Configuration conf)
 Extracts the period for the chore from the 
configuration.
 
 Parameters:
@@ -

[13/51] [partial] hbase-site git commit: Published site at .

2018-01-19 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/14db89d7/testdevapidocs/src-html/org/apache/hadoop/hbase/MiniHBaseCluster.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/MiniHBaseCluster.html 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/MiniHBaseCluster.html
index 1636aa6..f79f186 100644
--- a/testdevapidocs/src-html/org/apache/hadoop/hbase/MiniHBaseCluster.html
+++ b/testdevapidocs/src-html/org/apache/hadoop/hbase/MiniHBaseCluster.html
@@ -86,811 +86,824 @@
 078   */
 079  public MiniHBaseCluster(Configuration 
conf, int numMasters, int numRegionServers)
 080  throws IOException, 
InterruptedException {
-081this(conf, numMasters, 
numRegionServers, null, null, null);
+081this(conf, numMasters, 
numRegionServers, null, null);
 082  }
 083
 084  /**
-085   * @param rsPorts Ports that 
RegionServer should use; pass ports if you want to test cluster
-086   *   restart where for sure the 
regionservers come up on same address+port (but
-087   *   just with different startcode); by 
default mini hbase clusters choose new
-088   *   arbitrary ports on each cluster 
start.
-089   * @throws IOException
-090   * @throws InterruptedException
-091   */
-092  public MiniHBaseCluster(Configuration 
conf, int numMasters, int numRegionServers,
-093 List rsPorts,
-094 Class 
masterClass,
-095 Class regionserverClass)
-096  throws IOException, 
InterruptedException {
-097super(conf);
-098conf.set(HConstants.MASTER_PORT, 
"0");
-099if 
(conf.getInt(HConstants.MASTER_INFO_PORT, 0) != -1) {
-100  
conf.set(HConstants.MASTER_INFO_PORT, "0");
-101}
-102
-103// Hadoop 2
-104
CompatibilityFactory.getInstance(MetricsAssertHelper.class).init();
-105
-106init(numMasters, numRegionServers, 
rsPorts, masterClass, regionserverClass);
-107this.initialClusterStatus = 
getClusterStatus();
-108  }
-109
-110  public Configuration getConfiguration() 
{
-111return this.conf;
-112  }
-113
-114  /**
-115   * Subclass so can get at protected 
methods (none at moment).  Also, creates
-116   * a FileSystem instance per 
instantiation.  Adds a shutdown own FileSystem
-117   * on the way out. Shuts down own 
Filesystem only, not All filesystems as
-118   * the FileSystem system exit hook 
does.
-119   */
-120  public static class 
MiniHBaseClusterRegionServer extends HRegionServer {
-121private Thread shutdownThread = 
null;
-122private User user = null;
-123/**
-124 * List of RegionServers killed so 
far. ServerName also comprises startCode of a server,
-125 * so any restarted instances of the 
same server will have different ServerName and will not
-126 * coincide with past dead ones. So 
there's no need to cleanup this list.
-127 */
-128static Set 
killedServers = new HashSet<>();
-129
-130public 
MiniHBaseClusterRegionServer(Configuration conf)
-131throws IOException, 
InterruptedException {
-132  super(conf);
-133  this.user = User.getCurrent();
-134}
-135
-136/*
-137 * @param c
-138 * @param currentfs We return this if 
we did not make a new one.
-139 * @param uniqueName Same name used 
to help identify the created fs.
-140 * @return A new fs instance if we 
are up on DistributeFileSystem.
-141 * @throws IOException
-142 */
-143
-144@Override
-145protected void 
handleReportForDutyResponse(
-146final RegionServerStartupResponse 
c) throws IOException {
-147  
super.handleReportForDutyResponse(c);
-148  // Run this thread to shutdown our 
filesystem on way out.
-149  this.shutdownThread = new 
SingleFileSystemShutdownThread(getFileSystem());
-150}
-151
-152@Override
-153public void run() {
-154  try {
-155this.user.runAs(new 
PrivilegedAction(){
-156  public Object run() {
-157runRegionServer();
-158return null;
-159  }
-160});
-161  } catch (Throwable t) {
-162LOG.error("Exception in run", 
t);
-163  } finally {
-164// Run this on the way out.
-165if (this.shutdownThread != null) 
{
-166  this.shutdownThread.start();
-167  
Threads.shutdown(this.shutdownThread, 3);
-168}
-169  }
-170}
-171
-172private void runRegionServer() {
-173  super.run();
-174}
-175
-176@Override
-177protected void kill() {
-178  
killedServers.add(getServerName());
-179  super.kill();
-180}
-181
-182@Override
-183public void abort(final String 
reason, final Throwable cause) {
-184  this.user.runAs(new 
PrivilegedAction() {
-185public Object run() {
-186  abortRegionServer(reason, 
cause);
-187  return null;
-188}
-189  });
-190}
-191
-192privat

[13/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0b638133/testdevapidocs/org/apache/hadoop/hbase/client/package-summary.html
--
diff --git a/testdevapidocs/org/apache/hadoop/hbase/client/package-summary.html 
b/testdevapidocs/org/apache/hadoop/hbase/client/package-summary.html
index 8c1b22f..a20ffa6 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/client/package-summary.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/client/package-summary.html
@@ -110,400 +110,467 @@
  
 
 
-AbstractTestResultScannerCursor
+AbstractTestCIOperationTimeout
+
+Based class for testing operation timeout logic for 
ConnectionImplementation.
+
+
+
+AbstractTestCIRpcTimeout
+
+Based class for testing rpc timeout logic for 
ConnectionImplementation.
+
+
+
+AbstractTestCITimeout
+
+Based class for testing timeout logic for 
ConnectionImplementation.
+
+
+
+AbstractTestCITimeout.SleepAndFailFirstTime
+
+This copro sleeps 20 second.
+
+
+
+AbstractTestCITimeout.SleepCoprocessor
  
 
 
-AbstractTestScanCursor
+AbstractTestResultScannerCursor
  
 
 
-AbstractTestScanCursor.SparseFilter
+AbstractTestScanCursor
  
 
 
-AbstractTestShell
+AbstractTestScanCursor.SparseFilter
  
 
 
+AbstractTestShell
+ 
+
+
 BufferingScanResultConsumer
 
 A scan result consumer which buffers all the data in memory 
and you can call the BufferingScanResultConsumer.take()
  method below to get the result one by one.
 
 
-
+
 ColumnCountOnRowFilter
  
 
-
+
 DoNothingAsyncRegistry
 
 Registry that does nothing.
 
 
-
+
 HConnectionTestingUtility
 
 ClusterConnection testing utility.
 
 
-
+
 HConnectionTestingUtility.SleepAtFirstRpcCall
 
 This coproceesor sleep 2s at first increment/append rpc 
call.
 
 
-
+
 SimpleScanResultConsumer
  
 
-
+
 TestAdmin1
 
 Class to test HBaseAdmin.
 
 
-
+
 TestAdmin2
 
 Class to test HBaseAdmin.
 
 
-
+
 TestAllowPartialScanResultCache
  
 
-
+
 TestAlwaysSetScannerId
 
 Testcase to make sure that we always set scanner id in 
ScanResponse.
 
 
-
+
 TestAppendFromClientSide
 
 Run Append tests that use the HBase clients;
 
 
-
+
 TestAsyncAdminBase
 
 Class to test AsyncAdmin.
 
 
-
+
 TestAsyncAdminBuilder
  
 
-
+
 TestAsyncAdminBuilder.TestMaxRetriesCoprocessor
  
 
-
+
 TestAsyncAdminBuilder.TestOperationTimeoutCoprocessor
  
 
-
+
 TestAsyncAdminBuilder.TestRpcTimeoutCoprocessor
  
 
-
+
 TestAsyncAggregationClient
  
 
-
+
 TestAsyncBufferMutator
  
 
-
+
 TestAsyncClusterAdminApi
  
 
-
+
 TestAsyncClusterAdminApi2
 
 Only used to test stopMaster/stopRegionServer/shutdown 
methods.
 
 
-
+
 TestAsyncDecommissionAdminApi
  
 
-
+
 TestAsyncMetaRegionLocator
  
 
-
+
 TestAsyncNamespaceAdminApi
 
 Class to test asynchronous namespace admin operations.
 
 
-
+
 TestAsyncNonMetaRegionLocator
  
 
-
+
 TestAsyncNonMetaRegionLocatorConcurrenyLimit
  
 
-
+
 TestAsyncNonMetaRegionLocatorConcurrenyLimit.CountingRegionObserver
  
 
-
+
 TestAsyncProcedureAdminApi
 
 Class to test asynchronous procedure admin operations.
 
 
-
+
 TestAsyncProcess
  
 
-
+
 TestAsyncProcess.AsyncProcessForThrowableCheck
  
 
-
+
 TestAsyncProcess.AsyncProcessWithFailure
  
 
-
+
 TestAsyncProcess.CallerWithFailure
  
 
-
+
 TestAsyncProcess.CountingThreadFactory
  
 
-
+
 TestAsyncProcess.MyAsyncProcess
  
 
-
+
 TestAsyncProcess.MyAsyncProcessWithReplicas
  
 
-
+
 TestAsyncProcess.MyAsyncRequestFutureImpl
  
 
-
+
 TestAsyncProcess.MyClientBackoffPolicy
 
 Make the backoff time always different on each call.
 
 
-
+
 TestAsyncProcess.MyConnectionImpl
 
 Returns our async process.
 
 
-
+
 TestAsyncProcess.MyConnectionImpl.TestRegistry
  
 
-
+
 TestAsyncProcess.MyConnectionImpl2
 
 Returns our async process.
 
 
-
+
 TestAsyncProcess.MyThreadPoolExecutor
  
 
-
+
 TestAsyncQuotaAdminApi
  
 
-
+
 TestAsyncRegionAdminApi
 
 Class to test asynchronous region admin operations.
 
 
-
+
 TestAsyncRegionLocatorTimeout
  
 
-
+
 TestAsyncRegionLocatorTimeout.SleepRegionObserver
  
 
-
+
 TestAsyncReplicationAdminApi
 
 Class to test asynchronous replication admin 
operations.
 
 
-
+
 TestAsyncReplicationAdminApiWithClusters
 
 Class to test asynchronous replication admin operations 
when more than 1 cluster
 
 
-
+
 TestAsyncResultScannerCursor
  
 
-
+
 TestAsyncSingleRequestRpcRetryingCaller
  
 
-
+
 TestAsyncSnapshotAdminApi
  
 
-
+
 TestAsyncTable
  
 
-
+
 TestAsyncTableAdminApi
 
 Class to test asynchronous table admin operations.
 
 
-
+
 TestAsyncTableBatch
  
 
-
+
 TestAsyncTableBatch.ErrorInjectObserver
  
 
-
+
 TestAsyncTableGetMultiThreaded
 
 Will split the table, and move region randomly when 
testing.
 
 
-
+
 TestAsyncTableGetMultiThreadedWithBasicCompaction
  
 
-
+
 TestAsyncTableGetMultiThreadedWithEagerCompaction
  
 
-
+
 TestAsyncTableNoncedRetry
  
 
-
+
 TestAsyncTableScan
  
 
-
+
 TestAsyncTableScanAll
  
 
-
+
 TestAsyncTableScanMetrics
  
 
-
+
 TestAsyncTableScanner
  
 
-
+
 TestAsyncTableScannerCloseWhileSuspending
  
 
-
+
 TestAs

[13/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f183e80f/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.html
 
b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.html
index af41c2e..d4fb05c 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.html
@@ -109,7 +109,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public class TestCacheOnWriteInSchema
+public class TestCacheOnWriteInSchema
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 Tests HFile cache-on-write functionality for 
data blocks, non-root
  index blocks, and Bloom filter blocks, as specified by the column 
family.
@@ -304,7 +304,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 LOG
-private static final org.slf4j.Logger LOG
+private static final org.slf4j.Logger LOG
 
 
 
@@ -313,7 +313,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 name
-public org.junit.rules.TestName name
+public org.junit.rules.TestName name
 
 
 
@@ -322,7 +322,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 TEST_UTIL
-private static final HBaseTestingUtility TEST_UTIL
+private static final HBaseTestingUtility TEST_UTIL
 
 
 
@@ -331,7 +331,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 DIR
-private static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String DIR
+private static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String DIR
 
 
 
@@ -340,7 +340,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 table
-private static byte[] table
+private static byte[] table
 
 
 
@@ -349,7 +349,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 family
-private static byte[] family
+private static byte[] family
 
 
 
@@ -358,7 +358,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 NUM_KV
-private static final int NUM_KV
+private static final int NUM_KV
 
 See Also:
 Constant
 Field Values
@@ -371,7 +371,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 rand
-private static final http://docs.oracle.com/javase/8/docs/api/java/util/Random.html?is-external=true";
 title="class or interface in java.util">Random rand
+private static final http://docs.oracle.com/javase/8/docs/api/java/util/Random.html?is-external=true";
 title="class or interface in java.util">Random rand
 
 
 
@@ -380,7 +380,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 NUM_VALID_KEY_TYPES
-private static final int NUM_VALID_KEY_TYPES
+private static final int NUM_VALID_KEY_TYPES
 The number of valid key types possible in a store file
 
 
@@ -390,7 +390,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 cowType
-private final TestCacheOnWriteInSchema.CacheOnWriteType
 cowType
+private final TestCacheOnWriteInSchema.CacheOnWriteType
 cowType
 
 
 
@@ -399,7 +399,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 conf
-private org.apache.hadoop.conf.Configuration conf
+private org.apache.hadoop.conf.Configuration conf
 
 
 
@@ -408,7 +408,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 testDescription
-private final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String testDescription
+private final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String testDescription
 
 
 
@@ -417,7 +417,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 region
-private org.apache.hadoop.hbase.regionserver.HRegion region
+private org.apache.hadoop.hbase.regionserver.HRegion region
 
 
 
@@ -426,7 +426,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 store
-private org.apache.hadoop.hbase.regionserver.HStore store
+private org.apache.hadoop.hbase.regionserver.HStore store
 
 
 
@@ -435,7 +435,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 walFactory
-private org.apache.hadoop.hbase.wal.WALFactory walFactory
+private org.apache.hadoop.hbase.wal.WALFactory walFactory
 
 
 
@@ -444,7 +444,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 fs
-private org.apache.hadoop.fs.F

[13/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/49431b18/devapidocs/org/apache/hadoop/hbase/master/HMasterCommandLine.LocalHMaster.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/HMasterCommandLine.LocalHMaster.html
 
b/devapidocs/org/apache/hadoop/hbase/master/HMasterCommandLine.LocalHMaster.html
index cda4db5..c094735 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/HMasterCommandLine.LocalHMaster.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/HMasterCommandLine.LocalHMaster.html
@@ -247,14 +247,14 @@ extends 
 
 Methods inherited from class org.apache.hadoop.hbase.master.HMaster
-abort,
 abortProcedure,
 addColumn,
 addReplicationPeer,
 balance,
 balance,
 balanceSwitch,
 canCreateBaseZNode, canUpdateTableDescriptor,
 checkIfShouldMoveSystemRegionAsync,
 checkInitialized,
 checkServiceStarted,
 checkTableModifiable,
 configureInfoServer,
 constructMaster,
 createMetaBootstrap,
 createNamespace,
 createQuotaSnapshotNotifier,
 createRpcServices,
 createServerManager,
 createSystemTable,
 createTable, 
decommissionRegionServers,
 deleteColumn,
 deleteNamespace,
 deleteTable,
 disableReplicationPeer,
 disableTable,
 enableReplicationPeer, 
enableTable,
 getAssignmentManager,
 getAverageLoad,
 getCatalogJanitor,
 getClientIdAuditPrefix,
 getClusterMetrics,
 getClusterMetrics,
 getClusterMetricsWithoutCoprocessor, getClusterMetricsWithoutCoprocessor,
 getClusterSchema,
 getDumpServlet,
 getFavoredNodesManager,
 getHFileCleaner,
 getInitializedEvent,
 getLastMajorCompactionTimestamp,
 getLastMajorCompactionTimestampForRegion,
 getLoadBalancer,
 getLoadBalancerClassName,
 getLoadedCoprocessors,
 getLockManager,
 getLocks,
 getLogCleaner,
 getMasterActiveTime,
 getMasterCoprocessorHost, getMasterCoprocessors,
 getMasterFileSystem,
 getMasterFinishedInitializationTime,
 getMasterMetrics,
 getMasterProcedureExecutor,
 getMasterProcedureManagerHost,
 getMasterQuotaManager,
 getMasterRpcServices, getMasterStartTime,
 getMasterWalManager,
 getMergePlanCount,
 getMetaTableObserver,
 getMobCompactionState,
 getNamespace,
 getNamespaces,
 getNumWALFiles,
 getProcedures,
 getProcessName,
 getQuotaObserverChore,
 getRegionNormalizer,
 getRegionNormalizerTracker,
 getRegionServerFatalLogBuffer,
 getRegionServerInfoPort,
 getRegionSe
 rverVersion, getRemoteInetAddress,
 getReplicationPeerConfig,
 getServerCrashProcessingEnabledEvent,
 getServerManager,
 getServerName,
 getSnapshotManager,
 getSpaceQuotaSnapshotNotifier,
 getS
 plitOrMergeTracker, getSplitPlanCount,
 getTableDescriptors,
 getTableRegionForRow,
 getTableStateManager,
 getWalProcedureStore,
 getZooKeeper,
 initClusterSchemaService,
 initializeZKBasedSys
 temTrackers, initQuotaManager,
 isActiveMaster,
 isBalancerOn,
 isCatalogJanitorEnabled,
 isCleanerChoreEnabled,
 isInitialized,
 isInMaintenanceMode,
 isNormalizerOn,
 isServerCrashProcessingEnabled, isSplitOrMergeEnabled,
 listDecommissionedRegionServers,
 listReplicationPeers,
 listTableDescriptors,
 listTableDescriptorsByNamespace,
 listTableNames,
 listTableNamesByNamespace,
 login,
 main,
 mergeRegions,
 modifyColumn,
 modifyNamespace,
 modifyTable,
 move,
 normalizeRegions,
 recommissionRegionServer,
 recoverMeta,
 registerService,
 removeReplicationPeer,
 reportMobCompactionEnd,
 reportMobCompactionStart,
 requestMobCompaction,
 restoreSnapshot,
 setCatalogJanitorEnabled,
 setInitialized,
 setServerCrashProcessingEnabled,
 shutdown, splitRegion,
 stopMaster,
 stopServiceThreads,
 truncateTable,
 updateConfigurationForSpaceQuotaObserver,
 updateReplicationPeerConfig,
 waitForMasterActive
+abort,
 abortProcedure,
 addColumn,
 addReplicationPeer,
 balance,
 balance,
 balanceSwitch,
 canCreateBaseZNode, canUpdateTableDescriptor,
 checkIfShouldMoveSystemRegionAsync,
 checkInitialized,
 checkServiceStarted,
 checkTableModifiable,
 configureInfoServer,
 constructMaster,
 createMetaBootstrap,
 createNamespace,
 createQuotaSnapshotNotifier,
 createRpcServices,
 createServerManager,
 createSystemTable,
 createTable, 
decommissionRegionServers,
 deleteColumn,
 deleteNamespace,
 deleteTable,
 disableReplicationPeer,
 disableTable,
 enableReplicationPeer, 
enableTable,
 getAssignmentManager,
 getAverageLoad,
 getCatalogJanitor,
 getClientIdAuditPrefix,
 getClusterMetrics,
 getClusterMetrics,
 getClusterMetricsWithoutCoprocessor, getClusterMetricsWithoutCoprocessor,
 getClusterSchema,
 getDumpServlet,
 getFavoredNodesManager,
 getHFileCleaner,
 getInitializedEvent,
 getLastMajorCompactionTimestamp,
 getLastMajorCompactionTimestampForRegion,
 getLoadBalancer,
 getLoadBalancerClassName,
 getLoadedCoprocessors,
 getLockManager,
 getLocks,
 getLogCleaner,
 getMasterActiveTime,
 getMasterCoprocessorHost, getMasterCoprocessors,
 getMasterFileSystem,
 getMasterFinishedInitializationTime,
 getMasterMetrics,
 getMasterProcedureExecutor,
 getMasterProcedureManagerHost,
 getMasterQuotaManager,
 getMasterRpcServices, getMast

[13/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7c40c62/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CandidateGenerator.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CandidateGenerator.html
 
b/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CandidateGenerator.html
index 3e22e30..ac39a7c 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CandidateGenerator.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CandidateGenerator.html
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-abstract static class StochasticLoadBalancer.CandidateGenerator
+abstract static class StochasticLoadBalancer.CandidateGenerator
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 Generates a candidate action to be applied to the cluster 
for cost function search
 
@@ -232,7 +232,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 CandidateGenerator
-CandidateGenerator()
+CandidateGenerator()
 
 
 
@@ -249,7 +249,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 generate
-abstract BaseLoadBalancer.Cluster.Action generate(BaseLoadBalancer.Cluster cluster)
+abstract BaseLoadBalancer.Cluster.Action generate(BaseLoadBalancer.Cluster cluster)
 
 
 
@@ -258,7 +258,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 pickRandomRegion
-protected int pickRandomRegion(BaseLoadBalancer.Cluster cluster,
+protected int pickRandomRegion(BaseLoadBalancer.Cluster cluster,
int server,
double chanceOfNoSwap)
 From a list of regions pick a random one. Null can be 
returned which
@@ -282,7 +282,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 pickRandomServer
-protected int pickRandomServer(BaseLoadBalancer.Cluster cluster)
+protected int pickRandomServer(BaseLoadBalancer.Cluster cluster)
 
 
 
@@ -291,7 +291,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 pickRandomRack
-protected int pickRandomRack(BaseLoadBalancer.Cluster cluster)
+protected int pickRandomRack(BaseLoadBalancer.Cluster cluster)
 
 
 
@@ -300,7 +300,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 pickOtherRandomServer
-protected int pickOtherRandomServer(BaseLoadBalancer.Cluster cluster,
+protected int pickOtherRandomServer(BaseLoadBalancer.Cluster cluster,
 int serverIndex)
 
 
@@ -310,7 +310,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 pickOtherRandomRack
-protected int pickOtherRandomRack(BaseLoadBalancer.Cluster cluster,
+protected int pickOtherRandomRack(BaseLoadBalancer.Cluster cluster,
   int rackIndex)
 
 
@@ -320,7 +320,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 pickRandomRegions
-protected BaseLoadBalancer.Cluster.Action pickRandomRegions(BaseLoadBalancer.Cluster cluster,
+protected BaseLoadBalancer.Cluster.Action pickRandomRegions(BaseLoadBalancer.Cluster cluster,
 
int thisServer,
 
int otherServer)
 
@@ -331,7 +331,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getAction
-protected BaseLoadBalancer.Cluster.Action getAction(int fromServer,
+protected BaseLoadBalancer.Cluster.Action getAction(int fromServer,
 int fromRegion,
 int toServer,
 int toRegion)
@@ -343,7 +343,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 getRandomIterationOrder
-protected http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">ListInteger> getRandomIterationOrder(int length)
+protected http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">ListInteger> getRandomIterationOrder(int length)
 Returns a random iteration order of indexes of an array 
with size length
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c7c40c62/devapidocs/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.CostFromRegionLoadAsRateFunction.html

[13/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/bb398572/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.EnvironmentPriorityComparator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.EnvironmentPriorityComparator.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.EnvironmentPriorityComparator.html
index b8e321a..439a50d 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.EnvironmentPriorityComparator.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.EnvironmentPriorityComparator.html
@@ -468,274 +468,216 @@
 460  }
 461
 462  /**
-463   * Used to gracefully handle fallback 
to deprecated methods when we
-464   * evolve coprocessor APIs.
-465   *
-466   * When a particular Coprocessor API is 
updated to change methods, hosts can support fallback
-467   * to the deprecated API by using this 
method to determine if an instance implements the new API.
-468   * In the event that said support is 
partial, then in the face of a runtime issue that prevents
-469   * proper operation {@link 
#legacyWarning(Class, String)} should be used to let operators know.
-470   *
-471   * For examples of this in action, see 
the implementation of
-472   * 
    -473 *
  • {@link org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost} -474 *
  • {@link org.apache.hadoop.hbase.regionserver.wal.WALCoprocessorHost} -475 *
-476 * -477 * @param clazz Coprocessor you wish to evaluate -478 * @param methodName the name of the non-deprecated method version -479 * @param parameterTypes the Class of the non-deprecated method's arguments in the order they are -480 * declared. -481 */ -482 @InterfaceAudience.Private -483 protected static boolean useLegacyMethod(final Class clazz, -484 final String methodName, final Class... parameterTypes) { -485boolean useLegacy; -486// Use reflection to see if they implement the non-deprecated version -487try { -488 clazz.getDeclaredMethod(methodName, parameterTypes); -489 LOG.debug("Found an implementation of '" + methodName + "' that uses updated method " + -490 "signature. Skipping legacy support for invocations in '" + clazz +"'."); -491 useLegacy = false; -492} catch (NoSuchMethodException exception) { -493 useLegacy = true; -494} catch (SecurityException exception) { -495 LOG.warn("The Security Manager denied our attempt to detect if the coprocessor '" + clazz + -496 "' requires legacy support; assuming it does. If you get later errors about legacy " + -497 "coprocessor use, consider updating your security policy to allow access to the package" + -498 " and declared members of your implementation."); -499 LOG.debug("Details of Security Manager rejection.", exception); -500 useLegacy = true; +463 * Used to limit legacy handling to once per Coprocessor class per classloader. +464 */ +465 private static final Set> legacyWarning = +466 new ConcurrentSkipListSet<>( +467 new Comparator>() { +468@Override +469public int compare(Class c1, Class c2) { +470 if (c1.equals(c2)) { +471return 0; +472 } +473 return c1.getName().compareTo(c2.getName()); +474} +475 }); +476 +477 /** +478 * Implementations defined function to get an observer of type {@code O} from a coprocessor of +479 * type {@code C}. Concrete implementations of CoprocessorHost define one getter for each +480 * observer they can handle. For e.g. RegionCoprocessorHost will use 3 getters, one for +481 * each of RegionObserver, EndpointObserver and BulkLoadObserver. +482 * These getters are used by {@code ObserverOperation} to get appropriate observer from the +483 * coprocessor. +484 */ +485 @FunctionalInterface +486 public interface ObserverGetter extends Function> {} +487 +488 private abstract class ObserverOperation extends ObserverContextImpl { +489ObserverGetter observerGetter; +490 +491 ObserverOperation(ObserverGetter observerGetter) { +492 this(observerGetter, null); +493} +494 +495 ObserverOperation(ObserverGetter observerGetter, User user) { +496 this(observerGetter, user, false); +497} +498 +499 ObserverOperation(ObserverGetter observerGetter, boolean bypassable) { +500 this(observerGetter, null, bypassable); 501} -502return useLegacy; -503 } -504 -505 /** -506 * Used to limit legacy handling to once per Coprocessor class per classloader. -507 */ -508 private static fina

[13/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/EncodingState.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/EncodingState.html 
b/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/EncodingState.html
index ae13a7b..441a393 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/EncodingState.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/EncodingState.html
@@ -242,6 +242,6 @@
 
 
 
-Copyright © 2007–2017 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
+Copyright © 2007–2018 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/FastDiffDeltaEncoder.FastDiffCompressionState.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/FastDiffDeltaEncoder.FastDiffCompressionState.html
 
b/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/FastDiffDeltaEncoder.FastDiffCompressionState.html
index 6188452..12052fd 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/FastDiffDeltaEncoder.FastDiffCompressionState.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/FastDiffDeltaEncoder.FastDiffCompressionState.html
@@ -162,6 +162,6 @@
 
 
 
-Copyright © 2007–2017 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
+Copyright © 2007–2018 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/FastDiffDeltaEncoder.FastDiffSeekerState.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/FastDiffDeltaEncoder.FastDiffSeekerState.html
 
b/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/FastDiffDeltaEncoder.FastDiffSeekerState.html
index d23dbe5..a10d13a 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/FastDiffDeltaEncoder.FastDiffSeekerState.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/FastDiffDeltaEncoder.FastDiffSeekerState.html
@@ -120,6 +120,6 @@
 
 
 
-Copyright © 2007–2017 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
+Copyright © 2007–2018 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/FastDiffDeltaEncoder.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/FastDiffDeltaEncoder.html
 
b/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/FastDiffDeltaEncoder.html
index 9e88df7..2cd1425 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/FastDiffDeltaEncoder.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/FastDiffDeltaEncoder.html
@@ -120,6 +120,6 @@
 
 
 
-Copyright © 2007–2017 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
+Copyright © 2007–2018 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDecodingContext.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDecodingContext.html
 
b/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDecodingContext.html
index a694809..7161108 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDecodingContext.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDecodingContext.html
@@ -345,6 +345,6 @@
 
 
 
-Copyright © 2007–2017 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
+Copyright © 2007–2018 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/69506d41/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDefaultDecodingContext.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDefaultDecodingContext.html
 
b/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlockDefaultDecodingContext.html
index 337ccf5..79b047f 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/io/encoding/class-use/HFileBlo

[13/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.AsyncProcessForThrowableCheck.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.AsyncProcessForThrowableCheck.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.AsyncProcessForThrowableCheck.html
index bbd91b8..4f76302 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.AsyncProcessForThrowableCheck.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.AsyncProcessForThrowableCheck.html
@@ -56,1641 +56,1753 @@
 048import 
java.util.concurrent.atomic.AtomicBoolean;
 049import 
java.util.concurrent.atomic.AtomicInteger;
 050import 
java.util.concurrent.atomic.AtomicLong;
-051
-052import 
org.apache.hadoop.conf.Configuration;
-053import 
org.apache.hadoop.hbase.CallQueueTooBigException;
-054import 
org.apache.hadoop.hbase.CategoryBasedTimeout;
-055import org.apache.hadoop.hbase.Cell;
-056import 
org.apache.hadoop.hbase.HConstants;
-057import 
org.apache.hadoop.hbase.HRegionInfo;
-058import 
org.apache.hadoop.hbase.HRegionLocation;
-059import 
org.apache.hadoop.hbase.RegionLocations;
-060import 
org.apache.hadoop.hbase.ServerName;
-061import 
org.apache.hadoop.hbase.TableName;
-062import 
org.apache.hadoop.hbase.client.AsyncProcessTask.ListRowAccess;
-063import 
org.apache.hadoop.hbase.client.AsyncProcessTask.SubmittedRows;
-064import 
org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicy;
-065import 
org.apache.hadoop.hbase.client.backoff.ServerStatistics;
-066import 
org.apache.hadoop.hbase.client.coprocessor.Batch;
-067import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-068import 
org.apache.hadoop.hbase.testclassification.ClientTests;
-069import 
org.apache.hadoop.hbase.testclassification.MediumTests;
-070import 
org.apache.hadoop.hbase.util.Bytes;
-071import 
org.apache.hadoop.hbase.util.Threads;
-072import org.junit.Assert;
-073import org.junit.BeforeClass;
-074import org.junit.Ignore;
-075import org.junit.Rule;
-076import org.junit.Test;
-077import 
org.junit.experimental.categories.Category;
-078import org.junit.rules.TestRule;
-079import org.mockito.Mockito;
-080import org.slf4j.Logger;
-081import org.slf4j.LoggerFactory;
-082
-083@Category({ClientTests.class, 
MediumTests.class})
-084public class TestAsyncProcess {
-085  @Rule public final TestRule timeout = 
CategoryBasedTimeout.builder().withTimeout(this.getClass()).
-086  
withLookingForStuckThread(true).build();
-087  private static final Logger LOG = 
LoggerFactory.getLogger(TestAsyncProcess.class);
-088  private static final TableName 
DUMMY_TABLE =
-089  TableName.valueOf("DUMMY_TABLE");
-090  private static final byte[] 
DUMMY_BYTES_1 = Bytes.toBytes("DUMMY_BYTES_1");
-091  private static final byte[] 
DUMMY_BYTES_2 = Bytes.toBytes("DUMMY_BYTES_2");
-092  private static final byte[] 
DUMMY_BYTES_3 = Bytes.toBytes("DUMMY_BYTES_3");
-093  private static final byte[] FAILS = 
Bytes.toBytes("FAILS");
-094  private static final Configuration CONF 
= new Configuration();
-095  private static final 
ConnectionConfiguration CONNECTION_CONFIG =
-096  new 
ConnectionConfiguration(CONF);
-097  private static final ServerName sn = 
ServerName.valueOf("s1,1,1");
-098  private static final ServerName sn2 = 
ServerName.valueOf("s2,2,2");
-099  private static final ServerName sn3 = 
ServerName.valueOf("s3,3,3");
-100  private static final HRegionInfo hri1 
=
-101  new HRegionInfo(DUMMY_TABLE, 
DUMMY_BYTES_1, DUMMY_BYTES_2, false, 1);
-102  private static final HRegionInfo hri2 
=
-103  new HRegionInfo(DUMMY_TABLE, 
DUMMY_BYTES_2, HConstants.EMPTY_END_ROW, false, 2);
-104  private static final HRegionInfo hri3 
=
-105  new HRegionInfo(DUMMY_TABLE, 
DUMMY_BYTES_3, HConstants.EMPTY_END_ROW, false, 3);
-106  private static final HRegionLocation 
loc1 = new HRegionLocation(hri1, sn);
-107  private static final HRegionLocation 
loc2 = new HRegionLocation(hri2, sn);
-108  private static final HRegionLocation 
loc3 = new HRegionLocation(hri3, sn2);
-109
-110  // Replica stuff
-111  private static final RegionInfo hri1r1 
= RegionReplicaUtil.getRegionInfoForReplica(hri1, 1);
-112  private static final RegionInfo hri1r2 
= RegionReplicaUtil.getRegionInfoForReplica(hri1, 2);
-113  private static final RegionInfo hri2r1 
= RegionReplicaUtil.getRegionInfoForReplica(hri2, 1);
-114  private static final RegionLocations 
hrls1 = new RegionLocations(new HRegionLocation(hri1, sn),
-115  new HRegionLocation(hri1r1, sn2), 
new HRegionLocation(hri1r2, sn3));
-116  private static final RegionLocations 
hrls2 = new RegionLocations(new HRegionLocation(hri2, sn2),
-117  new HRegionLocation(hri2r1, 
sn3));
-118  private static final RegionLocations 
hrls3 =
-119  new RegionLocations(new 
HRegionLocati

[13/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/63d6f712/devapidocs/org/apache/hadoop/hbase/master/ClusterSchemaService.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/ClusterSchemaService.html 
b/devapidocs/org/apache/hadoop/hbase/master/ClusterSchemaService.html
index 85fc67d..0418f9d 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/ClusterSchemaService.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/ClusterSchemaService.html
@@ -95,7 +95,7 @@
 
 
 All Superinterfaces:
-ClusterSchema, 
org.apache.hadoop.hbase.shaded.com.google.common.util.concurrent.Service
+ClusterSchema, 
org.apache.hbase.thirdparty.com.google.common.util.concurrent.Service
 
 
 All Known Implementing Classes:
@@ -105,7 +105,7 @@
 
 @InterfaceAudience.Private
 public interface ClusterSchemaService
-extends ClusterSchema, 
org.apache.hadoop.hbase.shaded.com.google.common.util.concurrent.Service
+extends ClusterSchema, 
org.apache.hbase.thirdparty.com.google.common.util.concurrent.Service
 Mixes in ClusterSchema and Service
 
 
@@ -120,11 +120,11 @@ extends 
-
+
 
 
-Nested classes/interfaces inherited from 
interface org.apache.hadoop.hbase.shaded.com.google.common.util.concurrent.Service
-org.apache.hadoop.hbase.shaded.com.google.common.util.concurrent.Service.Listener,
 
org.apache.hadoop.hbase.shaded.com.google.common.util.concurrent.Service.State
+Nested classes/interfaces inherited from 
interface org.apache.hbase.thirdparty.com.google.common.util.concurrent.Service
+org.apache.hbase.thirdparty.com.google.common.util.concurrent.Service.Listener,
 
org.apache.hbase.thirdparty.com.google.common.util.concurrent.Service.State
 
 
 
@@ -157,10 +157,10 @@ extends createNamespace,
 deleteNamespace,
 getNamespace,
 getNamespaces,
 getTableNamespaceManager,
 modifyNamespace
 
 
-
+
 
 
-Methods inherited from 
interface org.apache.hadoop.hbase.shaded.com.google.common.util.concurrent.Service
+Methods inherited from 
interface org.apache.hbase.thirdparty.com.google.common.util.concurrent.Service
 addListener, awaitRunning, awaitRunning, awaitTerminated, 
awaitTerminated, failureCause, isRunning, startAsync, state, 
stopAsync
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/63d6f712/devapidocs/org/apache/hadoop/hbase/master/ClusterSchemaServiceImpl.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/ClusterSchemaServiceImpl.html 
b/devapidocs/org/apache/hadoop/hbase/master/ClusterSchemaServiceImpl.html
index d648c61..31bcb59 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/ClusterSchemaServiceImpl.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/ClusterSchemaServiceImpl.html
@@ -100,7 +100,7 @@ var activeTableTab = "activeTableTab";
 http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">java.lang.Object
 
 
-org.apache.hadoop.hbase.shaded.com.google.common.util.concurrent.AbstractService
+org.apache.hbase.thirdparty.com.google.common.util.concurrent.AbstractService
 
 
 org.apache.hadoop.hbase.master.ClusterSchemaServiceImpl
@@ -114,13 +114,13 @@ var activeTableTab = "activeTableTab";
 
 
 All Implemented Interfaces:
-ClusterSchema, ClusterSchemaService, 
org.apache.hadoop.hbase.shaded.com.google.common.util.concurrent.Service
+ClusterSchema, ClusterSchemaService, 
org.apache.hbase.thirdparty.com.google.common.util.concurrent.Service
 
 
 
 @InterfaceAudience.Private
 class ClusterSchemaServiceImpl
-extends 
org.apache.hadoop.hbase.shaded.com.google.common.util.concurrent.AbstractService
+extends 
org.apache.hbase.thirdparty.com.google.common.util.concurrent.AbstractService
 implements ClusterSchemaService
 
 
@@ -135,11 +135,11 @@ implements 
-
+
 
 
-Nested classes/interfaces inherited from 
interface org.apache.hadoop.hbase.shaded.com.google.common.util.concurrent.Service
-org.apache.hadoop.hbase.shaded.com.google.common.util.concurrent.Service.Listener,
 
org.apache.hadoop.hbase.shaded.com.google.common.util.concurrent.Service.State
+Nested classes/interfaces inherited from 
interface org.apache.hbase.thirdparty.com.google.common.util.concurrent.Service
+org.apache.hbase.thirdparty.com.google.common.util.concurrent.Service.Listener,
 
org.apache.hbase.thirdparty.com.google.common.util.concurrent.Service.State
 
 
 
@@ -264,10 +264,10 @@ implements 
-
+
 
 
-Methods inherited from 
class org.apache.hadoop.hbase.shaded.com.google.common.util.concurrent.AbstractService
+Methods inherited from 
class org.apache.hbase.thirdparty.com.google.common.util.concurrent.AbstractService
 addListener, awaitRunning, awaitRunning, awaitTerminated, 
awaitTerminated, failureCause, isRunning, notifyFailed, notifyStarted, 
notifyStopped, startAsync, state, stopAsync, toString
 
 
@@ -278,10 +278,10 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#c

[13/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d449e87f/devapidocs/org/apache/hadoop/hbase/http/HttpServer.QuotingInputFilter.RequestQuoter.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/http/HttpServer.QuotingInputFilter.RequestQuoter.html
 
b/devapidocs/org/apache/hadoop/hbase/http/HttpServer.QuotingInputFilter.RequestQuoter.html
index 99506e3..2271f7b 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/http/HttpServer.QuotingInputFilter.RequestQuoter.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/http/HttpServer.QuotingInputFilter.RequestQuoter.html
@@ -127,7 +127,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public static class HttpServer.QuotingInputFilter.RequestQuoter
+public static class HttpServer.QuotingInputFilter.RequestQuoter
 extends javax.servlet.http.HttpServletRequestWrapper
 
 
@@ -273,7 +273,7 @@ extends javax.servlet.http.HttpServletRequestWrapper
 
 
 rawRequest
-private final javax.servlet.http.HttpServletRequest rawRequest
+private final javax.servlet.http.HttpServletRequest rawRequest
 
 
 
@@ -290,7 +290,7 @@ extends javax.servlet.http.HttpServletRequestWrapper
 
 
 RequestQuoter
-public RequestQuoter(javax.servlet.http.HttpServletRequest rawRequest)
+public RequestQuoter(javax.servlet.http.HttpServletRequest rawRequest)
 
 
 
@@ -307,7 +307,7 @@ extends javax.servlet.http.HttpServletRequestWrapper
 
 
 getParameterNames
-public http://docs.oracle.com/javase/8/docs/api/java/util/Enumeration.html?is-external=true";
 title="class or interface in java.util">EnumerationString> getParameterNames()
+public http://docs.oracle.com/javase/8/docs/api/java/util/Enumeration.html?is-external=true";
 title="class or interface in java.util">EnumerationString> getParameterNames()
 Return the set of parameter names, quoting each name.
 
 Specified by:
@@ -323,7 +323,7 @@ extends javax.servlet.http.HttpServletRequestWrapper
 
 
 getParameter
-public http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String getParameter(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String name)
+public http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String getParameter(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String name)
 Unquote the name and quote the value.
 
 Specified by:
@@ -339,7 +339,7 @@ extends javax.servlet.http.HttpServletRequestWrapper
 
 
 getParameterValues
-public http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String[] getParameterValues(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String name)
+public http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String[] getParameterValues(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String name)
 
 Specified by:
 getParameterValues in 
interface javax.servlet.ServletRequest
@@ -354,7 +354,7 @@ extends javax.servlet.http.HttpServletRequestWrapper
 
 
 getParameterMap
-public http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">MapString,http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String[]> getParameterMap()
+public http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">MapString,http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String[]> getParameterMap()
 
 Specified by:
 getParameterMap in 
interface javax.servlet.ServletRequest
@@ -369,7 +369,7 @@ extends javax.servlet.http.HttpServletRequestWrapper
 
 
 getRequestURL
-public http://docs.oracle.com/javase/8/docs/api/java/lang/StringBuffer.html?is-external=true";
 title="class or interface in java.lang">StringBuffer getRequestURL()
+public http://docs.oracle.com/javase/8/docs/api/java/lang/StringBuffer.html?is-external=true";
 title="class or interface in java

[13/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d2b28a1a/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.html
index f8eace7..66b6656 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.html
@@ -27,2569 +27,2540 @@
 019 */
 020package org.apache.hadoop.hbase;
 021
-022import static 
org.apache.hadoop.hbase.Tag.TAG_LENGTH_SIZE;
-023import static 
org.apache.hadoop.hbase.util.Bytes.len;
-024
-025import java.io.DataInput;
-026import java.io.DataOutput;
-027import java.io.IOException;
-028import java.io.OutputStream;
-029import java.nio.ByteBuffer;
-030import java.util.ArrayList;
-031import java.util.Arrays;
-032import java.util.HashMap;
-033import java.util.Iterator;
-034import java.util.List;
-035import java.util.Map;
-036import java.util.Optional;
-037
-038import 
org.apache.hadoop.hbase.util.ByteBufferUtils;
-039import 
org.apache.hadoop.hbase.util.Bytes;
-040import 
org.apache.hadoop.hbase.util.ClassSize;
-041import 
org.apache.hadoop.io.RawComparator;
-042import 
org.apache.yetus.audience.InterfaceAudience;
-043import org.slf4j.Logger;
-044import org.slf4j.LoggerFactory;
-045
-046import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
-047
-048/**
-049 * An HBase Key/Value. This is the 
fundamental HBase Type.
+022import static 
org.apache.hadoop.hbase.util.Bytes.len;
+023
+024import java.io.DataInput;
+025import java.io.DataOutput;
+026import java.io.IOException;
+027import java.io.OutputStream;
+028import java.nio.ByteBuffer;
+029import java.util.ArrayList;
+030import java.util.Arrays;
+031import java.util.HashMap;
+032import java.util.Iterator;
+033import java.util.List;
+034import java.util.Map;
+035import 
org.apache.hadoop.hbase.util.ByteBufferUtils;
+036import 
org.apache.hadoop.hbase.util.Bytes;
+037import 
org.apache.hadoop.hbase.util.ClassSize;
+038import 
org.apache.hadoop.io.RawComparator;
+039import 
org.apache.yetus.audience.InterfaceAudience;
+040import org.slf4j.Logger;
+041import org.slf4j.LoggerFactory;
+042
+043import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
+044
+045/**
+046 * An HBase Key/Value. This is the 
fundamental HBase Type.
+047 * 

+048 * HBase applications and users should use the Cell interface and avoid directly using KeyValue and +049 * member functions not defined in Cell. 050 *

-051 * HBase applications and users should use the Cell interface and avoid directly using KeyValue and -052 * member functions not defined in Cell. -053 *

-054 * If being used client-side, the primary methods to access individual fields are -055 * {@link #getRowArray()}, {@link #getFamilyArray()}, {@link #getQualifierArray()}, -056 * {@link #getTimestamp()}, and {@link #getValueArray()}. These methods allocate new byte arrays -057 * and return copies. Avoid their use server-side. -058 *

-059 * Instances of this class are immutable. They do not implement Comparable but Comparators are -060 * provided. Comparators change with context, whether user table or a catalog table comparison. Its -061 * critical you use the appropriate comparator. There are Comparators for normal HFiles, Meta's -062 * Hfiles, and bloom filter keys. -063 *

-064 * KeyValue wraps a byte array and takes offsets and lengths into passed array at where to start -065 * interpreting the content as KeyValue. The KeyValue format inside a byte array is: -066 * <keylength> <valuelength> <key> <value> Key is further -067 * decomposed as: <rowlength> <row> <columnfamilylength> -068 * <columnfamily> <columnqualifier> -069 * <timestamp> <keytype> The rowlength maximum is -070 * Short.MAX_SIZE, column family length maximum is Byte.MAX_SIZE, and -071 * column qualifier + key length must be < Integer.MAX_SIZE. The column does not -072 * contain the family/qualifier delimiter, {@link #COLUMN_FAMILY_DELIMITER}
-073 * KeyValue can optionally contain Tags. When it contains tags, it is added in the byte array after -074 * the value part. The format for this part is: <tagslength><tagsbytes>. -075 * tagslength maximum is Short.MAX_SIZE. The tagsbytes -076 * contain one or more tags where as each tag is of the form -077 * <taglength><tagtype><tagbytes>. tagtype is one byte -078 * and taglength maximum is Short.MAX_SIZE and it includes 1 byte type -079 * length and actual tag bytes length. -080 */ -081@InterfaceAudience.Private -082public class KeyValue implemen


[13/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b618ac40/devapidocs/src-html/org/apache/hadoop/hbase/client/Mutation.CellWrapper.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/Mutation.CellWrapper.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/Mutation.CellWrapper.html
new file mode 100644
index 000..8b6f080
--- /dev/null
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/Mutation.CellWrapper.html
@@ -0,0 +1,1040 @@
+http://www.w3.org/TR/html4/loose.dtd";>
+
+
+Source code
+
+
+
+
+001/*
+002 * Licensed to the Apache Software 
Foundation (ASF) under one
+003 * or more contributor license 
agreements.  See the NOTICE file
+004 * distributed with this work for 
additional information
+005 * regarding copyright ownership.  The 
ASF licenses this file
+006 * to you under the Apache License, 
Version 2.0 (the
+007 * "License"); you may not use this file 
except in compliance
+008 * with the License.  You may obtain a 
copy of the License at
+009 *
+010 * 
http://www.apache.org/licenses/LICENSE-2.0
+011 *
+012 * Unless required by applicable law or 
agreed to in writing, software
+013 * distributed under the License is 
distributed on an "AS IS" BASIS,
+014 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
+015 * See the License for the specific 
language governing permissions and
+016 * limitations under the License.
+017 */
+018
+019package org.apache.hadoop.hbase.client;
+020
+021import static 
org.apache.hadoop.hbase.Tag.TAG_LENGTH_SIZE;
+022
+023import java.io.IOException;
+024import java.nio.ByteBuffer;
+025import java.util.ArrayList;
+026import java.util.Arrays;
+027import java.util.HashMap;
+028import java.util.Iterator;
+029import java.util.List;
+030import java.util.Map;
+031import java.util.NavigableMap;
+032import java.util.Optional;
+033import java.util.TreeMap;
+034import java.util.UUID;
+035import java.util.stream.Collectors;
+036import 
org.apache.hadoop.hbase.ArrayBackedTag;
+037import org.apache.hadoop.hbase.Cell;
+038import 
org.apache.hadoop.hbase.CellScannable;
+039import 
org.apache.hadoop.hbase.CellScanner;
+040import 
org.apache.hadoop.hbase.CellUtil;
+041import 
org.apache.hadoop.hbase.ExtendedCell;
+042import 
org.apache.hadoop.hbase.HConstants;
+043import 
org.apache.hadoop.hbase.KeyValue;
+044import 
org.apache.hadoop.hbase.PrivateCellUtil;
+045import org.apache.hadoop.hbase.RawCell;
+046import org.apache.hadoop.hbase.Tag;
+047import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
+048import 
org.apache.hadoop.hbase.io.HeapSize;
+049import 
org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+050import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
+051import 
org.apache.hadoop.hbase.security.access.AccessControlConstants;
+052import 
org.apache.hadoop.hbase.security.access.AccessControlUtil;
+053import 
org.apache.hadoop.hbase.security.access.Permission;
+054import 
org.apache.hadoop.hbase.security.visibility.CellVisibility;
+055import 
org.apache.hadoop.hbase.security.visibility.VisibilityConstants;
+056import 
org.apache.hadoop.hbase.util.Bytes;
+057import 
org.apache.hadoop.hbase.util.ClassSize;
+058import 
org.apache.yetus.audience.InterfaceAudience;
+059
+060import 
org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions;
+061import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.ArrayListMultimap;
+062import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.ListMultimap;
+063import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
+064import 
org.apache.hadoop.hbase.shaded.com.google.common.io.ByteArrayDataInput;
+065import 
org.apache.hadoop.hbase.shaded.com.google.common.io.ByteArrayDataOutput;
+066import 
org.apache.hadoop.hbase.shaded.com.google.common.io.ByteStreams;
+067
+068@InterfaceAudience.Public
+069public abstract class Mutation extends 
OperationWithAttributes implements Row, CellScannable,
+070HeapSize {
+071  public static final long 
MUTATION_OVERHEAD = ClassSize.align(
+072  // This
+073  ClassSize.OBJECT +
+074  // row + 
OperationWithAttributes.attributes
+075  2 * ClassSize.REFERENCE +
+076  // Timestamp
+077  1 * Bytes.SIZEOF_LONG +
+078  // durability
+079  ClassSize.REFERENCE +
+080  // familyMap
+081  ClassSize.REFERENCE +
+082  // familyMap
+083  ClassSize.TREEMAP +
+084  // priority
+085  ClassSize.INTEGER
+086  );
+087
+088  /**
+089   * The attribute for storing the list 
of clusters that have consumed the change.
+090   */
+091  private static final String 
CONSUMED_CLUSTER_IDS = "_cs.id";
+092
+093  /**
+094   * The attribute for storing TTL for 
the result of the mutation.
+095   */
+096  private static final String 
OP_ATTRIBUTE_TTL = "_ttl";
+097
+098  private static final String 
RETURN_RESULTS = "_rr_";
+099
+100  // TODO: row should be final
+101  protected byte [] row

[13/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/7c0589c0/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.html
index 6fecbc9..2accda0 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ProcedureFuture.html
@@ -34,4140 +34,4141 @@
 026import 
java.nio.charset.StandardCharsets;
 027import java.util.ArrayList;
 028import java.util.Arrays;
-029import java.util.Collection;
-030import java.util.EnumSet;
-031import java.util.HashMap;
-032import java.util.Iterator;
-033import java.util.LinkedList;
-034import java.util.List;
-035import java.util.Map;
-036import java.util.Set;
-037import java.util.concurrent.Callable;
-038import 
java.util.concurrent.ExecutionException;
-039import java.util.concurrent.Future;
-040import java.util.concurrent.TimeUnit;
-041import 
java.util.concurrent.TimeoutException;
-042import 
java.util.concurrent.atomic.AtomicInteger;
-043import 
java.util.concurrent.atomic.AtomicReference;
-044import java.util.regex.Pattern;
-045import java.util.stream.Collectors;
-046import java.util.stream.Stream;
-047import 
org.apache.hadoop.conf.Configuration;
-048import 
org.apache.hadoop.hbase.Abortable;
-049import 
org.apache.hadoop.hbase.CacheEvictionStats;
-050import 
org.apache.hadoop.hbase.CacheEvictionStatsBuilder;
-051import 
org.apache.hadoop.hbase.ClusterMetrics.Option;
-052import 
org.apache.hadoop.hbase.ClusterStatus;
-053import 
org.apache.hadoop.hbase.DoNotRetryIOException;
-054import 
org.apache.hadoop.hbase.HBaseConfiguration;
-055import 
org.apache.hadoop.hbase.HConstants;
-056import 
org.apache.hadoop.hbase.HRegionInfo;
-057import 
org.apache.hadoop.hbase.HRegionLocation;
-058import 
org.apache.hadoop.hbase.HTableDescriptor;
-059import 
org.apache.hadoop.hbase.MasterNotRunningException;
-060import 
org.apache.hadoop.hbase.MetaTableAccessor;
-061import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-062import 
org.apache.hadoop.hbase.NamespaceNotFoundException;
-063import 
org.apache.hadoop.hbase.NotServingRegionException;
-064import 
org.apache.hadoop.hbase.RegionLoad;
-065import 
org.apache.hadoop.hbase.RegionLocations;
-066import 
org.apache.hadoop.hbase.ServerName;
-067import 
org.apache.hadoop.hbase.TableExistsException;
-068import 
org.apache.hadoop.hbase.TableName;
-069import 
org.apache.hadoop.hbase.TableNotDisabledException;
-070import 
org.apache.hadoop.hbase.TableNotFoundException;
-071import 
org.apache.hadoop.hbase.UnknownRegionException;
-072import 
org.apache.hadoop.hbase.ZooKeeperConnectionException;
-073import 
org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil;
-074import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-075import 
org.apache.hadoop.hbase.client.security.SecurityCapability;
-076import 
org.apache.hadoop.hbase.exceptions.TimeoutIOException;
-077import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
-078import 
org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
-079import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-080import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-081import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-082import 
org.apache.hadoop.hbase.quotas.QuotaRetriever;
-083import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-084import 
org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
-085import 
org.apache.hadoop.hbase.replication.ReplicationException;
-086import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-087import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-088import 
org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
-089import 
org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
-090import 
org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
-091import 
org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
-092import 
org.apache.hadoop.hbase.snapshot.UnknownSnapshotException;
-093import 
org.apache.hadoop.hbase.util.Addressing;
-094import 
org.apache.hadoop.hbase.util.Bytes;
-095import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-096import 
org.apache.hadoop.hbase.util.ForeignExceptionUtil;
-097import 
org.apache.hadoop.hbase.util.Pair;
-098import 
org.apache.hadoop.ipc.RemoteException;
-099import 
org.apache.hadoop.util.StringUtils;
-100import 
org.apache.yetus.audience.InterfaceAudience;
-101import 
org.apache.yetus.audience.InterfaceStability;
-102import org.slf4j.Logger;
-103import org.slf4j.LoggerFactory;
-104
-105import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
-106import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
-107import 
org.apache.h

[13/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c4b2cc17/devapidocs/deprecated-list.html
--
diff --git a/devapidocs/deprecated-list.html b/devapidocs/deprecated-list.html
index b7892a6..4ecd3a9 100644
--- a/devapidocs/deprecated-list.html
+++ b/devapidocs/deprecated-list.html
@@ -137,64 +137,76 @@
 
 
 
-org.apache.hadoop.hbase.coprocessor.CoprocessorServiceBackwardCompatiblity
+org.apache.hadoop.hbase.ClusterStatus
+As of release 2.0.0, this 
will be removed in HBase 3.0.0
+ Use ClusterMetrics 
instead.
+
 
 
+org.apache.hadoop.hbase.coprocessor.CoprocessorServiceBackwardCompatiblity
+
+
 org.apache.hadoop.hbase.util.Counter
 use http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/LongAdder.html?is-external=true";
 title="class or interface in 
java.util.concurrent.atomic">LongAdder instead.
 
 
-
+
 org.apache.hadoop.hbase.filter.FirstKeyValueMatchingQualifiersFilter
 Deprecated in 2.0. See 
HBASE-13347
 
 
-
+
 org.apache.hadoop.hbase.HColumnDescriptor
 
-
+
 org.apache.hadoop.hbase.HRegionInfo
 As of release 2.0.0, this 
will be removed in HBase 3.0.0.
  use RegionInfoBuilder to build RegionInfo.
 
 
-
+
 org.apache.hadoop.hbase.HTableDescriptor
 As of release 2.0.0, this 
will be removed in HBase 3.0.0.
  Use TableDescriptorBuilder to 
build HTableDescriptor.
 
 
-
+
 org.apache.hadoop.hbase.client.ImmutableHColumnDescriptor
 
-
+
 org.apache.hadoop.hbase.client.ImmutableHRegionInfo
 
-
+
 org.apache.hadoop.hbase.client.ImmutableHTableDescriptor
 
-
+
 org.apache.hadoop.hbase.KeyValue.KVComparator
 : Use CellComparatorImpl. Deprecated for 
hbase 2.0, remove for hbase 3.0.
 
 
-
+
 org.apache.hadoop.hbase.KeyValue.MetaComparator
 : CellComparatorImpl.META_COMPARATOR
 to be used. Deprecated for hbase 2.0, remove for hbase 3.0.
 
 
-
+
 org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles
 As of release 2.0.0, this 
will be removed in HBase 3.0.0. Use
  LoadIncrementalHFiles 
instead.
 
 
-
+
 org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles.LoadQueueItem
 As of release 2.0.0, this 
will be removed in HBase 3.0.0. Use
  LoadIncrementalHFiles.LoadQueueItem
 instead.
 
 
+
+org.apache.hadoop.hbase.RegionLoad
+As of release 2.0.0, this 
will be removed in HBase 3.0.0
+ Use RegionMetrics 
instead.
+
+
 
 org.apache.hadoop.hbase.client.replication.ReplicationAdmin
 use Admin instead.
@@ -206,16 +218,22 @@
 
 
 
+org.apache.hadoop.hbase.ServerLoad
+As of release 2.0.0, this 
will be removed in HBase 3.0.0
+ Use ServerMetrics 
instead.
+
+
+
 org.apache.hadoop.hbase.client.UnmodifyableHRegionInfo
 As of release 2.0.0, this 
will be removed in HBase 3.0.0.
 
 
-
+
 org.apache.hadoop.hbase.zookeeper.ZKLeaderManager
 Not used
 
 
-
+
 org.apache.hadoop.hbase.zookeeper.ZKUtil.NodeAndData
 Unused
 
@@ -298,24 +316,30 @@
 
 
 
-org.apache.hadoop.hbase.mapreduce.SimpleTotalOrderPartitioner.END
+org.apache.hadoop.hbase.ServerLoad.EMPTY_SERVERLOAD
+As of release 2.0.0, this 
will be removed in HBase 3.0.0
+ Use ServerMetricsBuilder.of(ServerName)
 instead.
+
 
 
+org.apache.hadoop.hbase.mapreduce.SimpleTotalOrderPartitioner.END
+
+
 org.apache.hadoop.hbase.HConstants.HBASE_REGIONSERVER_LEASE_PERIOD_KEY
 This config option is 
deprecated. Will be removed at later releases after 0.96.
 
 
-
+
 org.apache.hadoop.hbase.mapreduce.TableSplit.LOG
 LOG variable would be made 
private. fix in hbase 3.0
 
 
-
+
 org.apache.hadoop.hbase.KeyValue.META_COMPARATOR
 Use CellComparatorImpl.META_COMPARATOR
 instead. Deprecated for hbase 2.0, remove for hbase 3.0.
 
 
-
+
 org.apache.hadoop.hbase.metrics.BaseSourceImpl.metricsRegistry
 Use 
hbase-metrics/hbase-metrics-api module interfaces for new metrics.
  Defining BaseSources for new metric groups (WAL, RPC, etc) is not needed 
anymore, however,
@@ -323,55 +347,55 @@
  MetricRegistry instance together with the 
HBaseMetrics2HadoopMetricsAdapter.
 
 
-
+
 org.apache.hadoop.hbase.http.HttpServer.Builder.name
 
-
+
 org.apache.hadoop.hbase.HConstants.OLDEST_TIMESTAMP
 Should not be public since 
hbase-1.3.0. For internal use only. Move internal to
Scanners flagged as special timestamp value never to be returned as 
timestamp on a Cell.
 
 
-
+
 org.apache.hadoop.hbase.http.HttpServer.Builder.port
 
-
+
 org.apache.hadoop.hbase.regionserver.KeyPrefixRegionSplitPolicy.PREFIX_LENGTH_KEY_DEPRECATED
 
-
+
 org.apache.hadoop.hbase.client.Scan.SCAN_ATTRIBUTES_METRICS_DATA
 
-
+
 org.apache.hadoop.hbase.client.Scan.SCAN_ATTRIBUTES_METRICS_ENABLE
 since 1.0.0. Use Scan.setScanMetricsEnabled(boolean)
 
 
-
+
 org.apache.hadoop.hbase.regionserver.RSRpcServices.SCANNER_ALREADY_CLOSED
 
-
+
 org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationGlobalSourceSource.shippedKBsCounter
 
-
+
 org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSourceSourceImpl.shippedKBsKey
 
-
+

[13/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/505bbb2e/devapidocs/org/apache/hadoop/hbase/KeyValue.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/KeyValue.html 
b/devapidocs/org/apache/hadoop/hbase/KeyValue.html
index d8cfb05..628bd79 100644
--- a/devapidocs/org/apache/hadoop/hbase/KeyValue.html
+++ b/devapidocs/org/apache/hadoop/hbase/KeyValue.html
@@ -118,7 +118,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class KeyValue
+public class KeyValue
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 implements ExtendedCell
 An HBase Key/Value. This is the fundamental HBase Type.
@@ -286,7 +286,7 @@ implements length 
 
 
-private static 
org.apache.commons.logging.Log
+private static org.slf4j.Logger
 LOG 
 
 
@@ -1167,7 +1167,7 @@ implements 
 
 EMPTY_ARRAY_LIST
-private static final http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in java.util">ArrayList EMPTY_ARRAY_LIST
+private static final http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in java.util">ArrayList EMPTY_ARRAY_LIST
 
 
 
@@ -1176,7 +1176,7 @@ implements 
 
 LOG
-private static final org.apache.commons.logging.Log LOG
+private static final org.slf4j.Logger LOG
 
 
 
@@ -1185,7 +1185,7 @@ implements 
 
 FIXED_OVERHEAD
-public static final int FIXED_OVERHEAD
+public static final int FIXED_OVERHEAD
 
 
 
@@ -1194,7 +1194,7 @@ implements 
 
 COLUMN_FAMILY_DELIMITER
-public static final char COLUMN_FAMILY_DELIMITER
+public static final char COLUMN_FAMILY_DELIMITER
 Colon character in UTF-8
 
 See Also:
@@ -1208,7 +1208,7 @@ implements 
 
 COLUMN_FAMILY_DELIM_ARRAY
-public static final byte[] COLUMN_FAMILY_DELIM_ARRAY
+public static final byte[] COLUMN_FAMILY_DELIM_ARRAY
 
 
 
@@ -1218,7 +1218,7 @@ implements 
 COMPARATOR
 http://docs.oracle.com/javase/8/docs/api/java/lang/Deprecated.html?is-external=true";
 title="class or interface in java.lang">@Deprecated
-public static final KeyValue.KVComparator COMPARATOR
+public static final KeyValue.KVComparator COMPARATOR
 Deprecated. Use CellComparator.getInstance()
 instead. Deprecated for hbase 2.0, remove for hbase 3.0.
 Comparator for plain key/values; i.e. non-catalog table 
key/values. Works on Key portion
  of KeyValue only.
@@ -1231,7 +1231,7 @@ public static final 
 META_COMPARATOR
 http://docs.oracle.com/javase/8/docs/api/java/lang/Deprecated.html?is-external=true";
 title="class or interface in java.lang">@Deprecated
-public static final KeyValue.KVComparator META_COMPARATOR
+public static final KeyValue.KVComparator META_COMPARATOR
 Deprecated. Use CellComparatorImpl.META_COMPARATOR
 instead. Deprecated for hbase 2.0, remove for hbase 3.0.
 A KeyValue.KVComparator 
for hbase:meta catalog table
  KeyValues.
@@ -1243,7 +1243,7 @@ public static final 
 
 KEY_LENGTH_SIZE
-public static final int KEY_LENGTH_SIZE
+public static final int KEY_LENGTH_SIZE
 Size of the key length field in bytes
 
 See Also:
@@ -1257,7 +1257,7 @@ public static final 
 
 TYPE_SIZE
-public static final int TYPE_SIZE
+public static final int TYPE_SIZE
 Size of the key type field in bytes
 
 See Also:
@@ -1271,7 +1271,7 @@ public static final 
 
 ROW_LENGTH_SIZE
-public static final int ROW_LENGTH_SIZE
+public static final int ROW_LENGTH_SIZE
 Size of the row length field in bytes
 
 See Also:
@@ -1285,7 +1285,7 @@ public static final 
 
 FAMILY_LENGTH_SIZE
-public static final int FAMILY_LENGTH_SIZE
+public static final int FAMILY_LENGTH_SIZE
 Size of the family length field in bytes
 
 See Also:
@@ -1299,7 +1299,7 @@ public static final 
 
 TIMESTAMP_SIZE
-public static final int TIMESTAMP_SIZE
+public static final int TIMESTAMP_SIZE
 Size of the timestamp field in bytes
 
 See Also:
@@ -1313,7 +1313,7 @@ public static final 
 
 TIMESTAMP_TYPE_SIZE
-public static final int TIMESTAMP_TYPE_SIZE
+public static final int TIMESTAMP_TYPE_SIZE
 
 See Also:
 Constant
 Field Values
@@ -1326,7 +1326,7 @@ public static final 
 
 KEY_INFRASTRUCTURE_SIZE
-public static final int KEY_INFRASTRUCTURE_SIZE
+public static final int KEY_INFRASTRUCTURE_SIZE
 
 See Also:
 Constant
 Field Values
@@ -1339,7 +1339,7 @@ public static final 
 
 ROW_OFFSET
-public static final int ROW_OFFSET
+public static final int ROW_OFFSET
 
 See Also:
 Constant
 Field Values
@@ -1352,7 +1352,7 @@ public static final 
 
 ROW_KEY_OFFSET
-public static final int ROW_KEY_OFFSET
+public static final int ROW_KEY_OFFSET
 
 See Also:
 Constant
 Field Values
@@ -1365,7 +1365,7 @@ public static final 
 
 KEYVALUE_INFRASTRUCTURE_SIZE
-public static final int KEYVALUE_INFRASTRUCTURE_SIZE
+public static final int KEYVALUE_INFRASTRUCTURE_SIZE
 
 See Also:
 Constant
 Field Values
@@ -1378,7 +1378,7 @@ public static final 
 
 TAGS_L

[13/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d0f1a9f6/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/dependencies.html
--
diff --git 
a/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/dependencies.html
 
b/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/dependencies.html
index d8e6410..cfd 100644
--- 
a/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/dependencies.html
+++ 
b/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/dependencies.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Exemplar for hbase-shaded-client archetype – 
Project Dependencies
 
@@ -3491,7 +3491,7 @@ built on Jackson JSON processor
 No
 
 hbase-annotations-3.0.0-SNAPSHOT-tests.jar
-14.7 kB
+14.8 kB
 -
 -
 -
@@ -4157,7 +4157,7 @@ built on Jackson JSON processor
 Sealed
 
 155
-86.7 MB
+86.8 MB
 42399
 38304
 1576
@@ -4194,7 +4194,7 @@ built on Jackson JSON processor
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-12-19
+  Last Published: 
2017-12-20
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d0f1a9f6/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/dependency-convergence.html
--
diff --git 
a/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/dependency-convergence.html
 
b/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/dependency-convergence.html
index 63585e0..fc22c30 100644
--- 
a/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/dependency-convergence.html
+++ 
b/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/dependency-convergence.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Exemplar for hbase-shaded-client archetype – 
Reactor Dependency Convergence
 
@@ -829,7 +829,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-12-19
+  Last Published: 
2017-12-20
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d0f1a9f6/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/dependency-info.html
--
diff --git 
a/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/dependency-info.html
 
b/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/dependency-info.html
index 60860e1..f77800b 100644
--- 
a/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/dependency-info.html
+++ 
b/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/dependency-info.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Exemplar for hbase-shaded-client archetype – 
Dependency Information
 
@@ -147,7 +147,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-12-19
+  Last Published: 
2017-12-20
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d0f1a9f6/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/dependency-management.html
--
diff --git 
a/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/dependency-management.html
 
b/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/dependency-management.html
index da28e62..c212684 100644
--- 
a/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/dependency-management.html
+++ 
b/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/dependency-management.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Exemplar for hbase-shaded-client archetype – 
Project Dependency Management
 
@@ -810,7 +810,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-12-19
+  Last Published: 
2017-12-20
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d0f1a9f6/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/index.html
--
diff --git 
a/hbase-build-configuration/hbase-archetypes/hb

[13/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dad9a249/devapidocs/org/apache/hadoop/hbase/regionserver/HMobStore.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/HMobStore.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HMobStore.html
index 7b68289..372d0ee 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/HMobStore.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/HMobStore.html
@@ -119,7 +119,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class HMobStore
+public class HMobStore
 extends HStore
 The store implementation to save MOBs (medium objects), it 
extends the HStore.
  When a descriptor of a column family has the value "IS_MOB", it means this 
column family
@@ -501,7 +501,7 @@ extends 
 
 LOG
-private static final org.apache.commons.logging.Log LOG
+private static final org.apache.commons.logging.Log LOG
 
 
 
@@ -510,7 +510,7 @@ extends 
 
 mobCacheConfig
-private MobCacheConfig mobCacheConfig
+private MobCacheConfig mobCacheConfig
 
 
 
@@ -519,7 +519,7 @@ extends 
 
 homePath
-private org.apache.hadoop.fs.Path homePath
+private org.apache.hadoop.fs.Path homePath
 
 
 
@@ -528,7 +528,7 @@ extends 
 
 mobFamilyPath
-private org.apache.hadoop.fs.Path mobFamilyPath
+private org.apache.hadoop.fs.Path mobFamilyPath
 
 
 
@@ -537,7 +537,7 @@ extends 
 
 cellsCountCompactedToMob
-private volatile long cellsCountCompactedToMob
+private volatile long cellsCountCompactedToMob
 
 
 
@@ -546,7 +546,7 @@ extends 
 
 cellsCountCompactedFromMob
-private volatile long cellsCountCompactedFromMob
+private volatile long cellsCountCompactedFromMob
 
 
 
@@ -555,7 +555,7 @@ extends 
 
 cellsSizeCompactedToMob
-private volatile long cellsSizeCompactedToMob
+private volatile long cellsSizeCompactedToMob
 
 
 
@@ -564,7 +564,7 @@ extends 
 
 cellsSizeCompactedFromMob
-private volatile long cellsSizeCompactedFromMob
+private volatile long cellsSizeCompactedFromMob
 
 
 
@@ -573,7 +573,7 @@ extends 
 
 mobFlushCount
-private volatile long mobFlushCount
+private volatile long mobFlushCount
 
 
 
@@ -582,7 +582,7 @@ extends 
 
 mobFlushedCellsCount
-private volatile long mobFlushedCellsCount
+private volatile long mobFlushedCellsCount
 
 
 
@@ -591,7 +591,7 @@ extends 
 
 mobFlushedCellsSize
-private volatile long mobFlushedCellsSize
+private volatile long mobFlushedCellsSize
 
 
 
@@ -600,7 +600,7 @@ extends 
 
 mobScanCellsCount
-private volatile long mobScanCellsCount
+private volatile long mobScanCellsCount
 
 
 
@@ -609,7 +609,7 @@ extends 
 
 mobScanCellsSize
-private volatile long mobScanCellsSize
+private volatile long mobScanCellsSize
 
 
 
@@ -618,7 +618,7 @@ extends 
 
 family
-private ColumnFamilyDescriptor 
family
+private ColumnFamilyDescriptor 
family
 
 
 
@@ -627,7 +627,7 @@ extends 
 
 map
-private http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">MapString,http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in 
java.util">List> map
+private http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">MapString,http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in 
java.util">List> map
 
 
 
@@ -636,7 +636,7 @@ extends 
 
 keyLock
-private final IdLock keyLock
+private final IdLock keyLock
 
 
 
@@ -645,7 +645,7 @@ extends 
 
 refCellTags
-private final byte[] refCellTags
+private final byte[] refCellTags
 
 
 
@@ -662,7 +662,7 @@ extends 
 
 HMobStore
-public HMobStore(HRegion region,
+public HMobStore(HRegion region,
  ColumnFamilyDescriptor family,
  org.apache.hadoop.conf.Configuration confParam)
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
@@ -686,7 +686,7 @@ extends 
 
 createCacheConf
-protected void createCacheConf(ColumnFamilyDescriptor family)
+protected void createCacheConf(ColumnFamilyDescriptor family)
 Creates the mob cache config.
 
 Overrides:
@@ -702,7 +702,7 @@ extends 
 
 getConfiguration
-public org.apache.hadoop.conf.Configuration getConfiguration()
+public org.apache.hadoop.conf.Configuration getConfiguration()
 Gets current config.
 
 
@@ -712,7 +712,7 @@ extends 
 
 createScanner
-protected KeyValueScanner createScanner(Scan scan,
+protected KeyValueScanner createScanner(Scan scan,
 ScanInfo scanInfo,
 

[13/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c4c0cfa5/apidocs/src-html/org/apache/hadoop/hbase/io/encoding/DataBlockEncoding.html
--
diff --git 
a/apidocs/src-html/org/apache/hadoop/hbase/io/encoding/DataBlockEncoding.html 
b/apidocs/src-html/org/apache/hadoop/hbase/io/encoding/DataBlockEncoding.html
index 940a0dd..590fb1c 100644
--- 
a/apidocs/src-html/org/apache/hadoop/hbase/io/encoding/DataBlockEncoding.html
+++ 
b/apidocs/src-html/org/apache/hadoop/hbase/io/encoding/DataBlockEncoding.html
@@ -27,8 +27,8 @@
 019import java.io.IOException;
 020import java.io.OutputStream;
 021
-022import 
org.apache.yetus.audience.InterfaceAudience;
-023import 
org.apache.hadoop.hbase.util.Bytes;
+022import 
org.apache.hadoop.hbase.util.Bytes;
+023import 
org.apache.yetus.audience.InterfaceAudience;
 024
 025/**
 026 * Provide access to all data block 
encoding algorithms. All of the algorithms
@@ -180,19 +180,16 @@
 172return algorithm;
 173  }
 174
-175  protected static DataBlockEncoder 
createEncoder(String fullyQualifiedClassName){
-176  try {
-177return 
(DataBlockEncoder)Class.forName(fullyQualifiedClassName).newInstance();
-178  } catch (InstantiationException e) 
{
-179throw new RuntimeException(e);
-180  } catch (IllegalAccessException e) 
{
-181throw new RuntimeException(e);
-182  } catch (ClassNotFoundException e) 
{
-183throw new 
IllegalArgumentException(e);
-184  }
-185  }
-186
-187}
+175  protected static DataBlockEncoder 
createEncoder(String fullyQualifiedClassName) {
+176try {
+177  return (DataBlockEncoder) 
Class.forName(fullyQualifiedClassName).getDeclaredConstructor()
+178  .newInstance();
+179} catch (Exception e) {
+180  throw new RuntimeException(e);
+181}
+182  }
+183
+184}
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c4c0cfa5/apidocs/src-html/org/apache/hadoop/hbase/quotas/QuotaRetriever.html
--
diff --git 
a/apidocs/src-html/org/apache/hadoop/hbase/quotas/QuotaRetriever.html 
b/apidocs/src-html/org/apache/hadoop/hbase/quotas/QuotaRetriever.html
index 8bfb749..d28d8dc 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/quotas/QuotaRetriever.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/quotas/QuotaRetriever.html
@@ -93,95 +93,96 @@
 085}
 086  }
 087
-088  public void close() throws IOException 
{
-089if (this.table != null) {
-090  this.table.close();
-091  this.table = null;
-092}
-093// Null out the connection on close() 
even if we didn't explicitly close it
-094// to maintain typical semantics.
-095if (isManagedConnection) {
-096  if (this.connection != null) {
-097this.connection.close();
-098  }
-099}
-100this.connection = null;
-101  }
-102
-103  public QuotaSettings next() throws 
IOException {
-104if (cache.isEmpty()) {
-105  Result result = scanner.next();
-106  if (result == null) {
-107return null;
-108  }
-109  
QuotaTableUtil.parseResultToCollection(result, cache);
-110}
-111return cache.poll();
-112  }
-113
-114  @Override
-115  public Iterator 
iterator() {
-116return new Iter();
-117  }
-118
-119  private class Iter implements 
Iterator {
-120QuotaSettings cache;
-121
-122public Iter() {
-123  try {
-124cache = 
QuotaRetriever.this.next();
-125  } catch (IOException e) {
-126
LOG.warn(StringUtils.stringifyException(e));
-127  }
-128}
-129
-130@Override
-131public boolean hasNext() {
-132  return cache != null;
-133}
-134
-135@Override
-136public QuotaSettings next() {
-137  QuotaSettings result = cache;
-138  try {
-139cache = 
QuotaRetriever.this.next();
-140  } catch (IOException e) {
-141
LOG.warn(StringUtils.stringifyException(e));
-142  }
-143  return result;
-144}
-145
-146@Override
-147public void remove() {
-148  throw new 
RuntimeException("remove() not supported");
-149}
-150  }
-151
-152  /**
-153   * Open a QuotaRetriever with no 
filter, all the quota settings will be returned.
-154   * @param conf Configuration object to 
use.
-155   * @return the QuotaRetriever
-156   * @throws IOException if a remote or 
network exception occurs
-157   */
-158  public static QuotaRetriever open(final 
Configuration conf) throws IOException {
-159return open(conf, null);
-160  }
-161
-162  /**
-163   * Open a QuotaRetriever with the 
specified filter.
-164   * @param conf Configuration object to 
use.
-165   * @param filter the QuotaFilter
-166   * @return the QuotaRetriever
-167   * @throws IOException if a remote or 
network exception occurs
-168   */
-169  public static QuotaRetriever open(final 
Configuration conf, final QuotaFilter filter)
-170  throws IOException {
-171   

[13/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/071f974b/devapidocs/src-html/org/apache/hadoop/hbase/client/example/MultiThreadedClientExample.WriteExampleCallable.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/example/MultiThreadedClientExample.WriteExampleCallable.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/example/MultiThreadedClientExample.WriteExampleCallable.html
index 0b8baa8..c77170b 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/example/MultiThreadedClientExample.WriteExampleCallable.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/example/MultiThreadedClientExample.WriteExampleCallable.html
@@ -30,308 +30,325 @@
 022import org.apache.commons.logging.Log;
 023import 
org.apache.commons.logging.LogFactory;
 024import 
org.apache.hadoop.conf.Configured;
-025import 
org.apache.hadoop.hbase.TableName;
-026import 
org.apache.hadoop.hbase.client.Connection;
-027import 
org.apache.hadoop.hbase.client.ConnectionFactory;
-028import 
org.apache.hadoop.hbase.client.Put;
-029import 
org.apache.hadoop.hbase.client.RegionLocator;
-030import 
org.apache.hadoop.hbase.client.Result;
-031import 
org.apache.hadoop.hbase.client.ResultScanner;
-032import 
org.apache.hadoop.hbase.client.Scan;
-033import 
org.apache.hadoop.hbase.client.Table;
-034import 
org.apache.hadoop.hbase.filter.KeyOnlyFilter;
-035import 
org.apache.hadoop.hbase.util.Bytes;
-036import org.apache.hadoop.util.Tool;
-037import 
org.apache.hadoop.util.ToolRunner;
-038
-039import java.io.IOException;
-040import java.util.ArrayList;
-041import java.util.List;
-042import java.util.concurrent.Callable;
-043import 
java.util.concurrent.ExecutorService;
-044import java.util.concurrent.Executors;
-045import 
java.util.concurrent.ForkJoinPool;
-046import java.util.concurrent.Future;
-047import 
java.util.concurrent.ThreadFactory;
-048import 
java.util.concurrent.ThreadLocalRandom;
-049import java.util.concurrent.TimeUnit;
-050
-051
-052/**
-053 * Example on how to use HBase's {@link 
Connection} and {@link Table} in a
-054 * multi-threaded environment. Each table 
is a light weight object
-055 * that is created and thrown away. 
Connections are heavy weight objects
-056 * that hold on to zookeeper connections, 
async processes, and other state.
-057 *
-058 * 
-059 * Usage:
-060 * bin/hbase 
org.apache.hadoop.hbase.client.example.MultiThreadedClientExample testTableName 
50
-061 * 
-062 * -063 *

-064 * The table should already be created before running the command. -065 * This example expects one column family named d. -066 *

-067 *

-068 * This is meant to show different operations that are likely to be -069 * done in a real world application. These operations are: -070 *

-071 * -072 *
    -073 *
  • -074 * 30% of all operations performed are batch writes. -075 * 30 puts are created and sent out at a time. -076 * The response for all puts is waited on. -077 *
  • -078 *
  • -079 * 20% of all operations are single writes. -080 * A single put is sent out and the response is waited for. -081 *
  • -082 *
  • -083 * 50% of all operations are scans. -084 * These scans start at a random place and scan up to 100 rows. -085 *
  • -086 *
-087 * -088 */ -089public class MultiThreadedClientExample extends Configured implements Tool { -090 private static final Log LOG = LogFactory.getLog(MultiThreadedClientExample.class); -091 private static final int DEFAULT_NUM_OPERATIONS = 50; -092 -093 /** -094 * The name of the column family. -095 * -096 * d for default. -097 */ -098 private static final byte[] FAMILY = Bytes.toBytes("d"); -099 -100 /** -101 * For the example we're just using one qualifier. -102 */ -103 private static final byte[] QUAL = Bytes.toBytes("test"); -104 -105 private final ExecutorService internalPool; -106 -107 private final int threads; -108 -109 public MultiThreadedClientExample() throws IOException { -110// Base number of threads. -111// This represents the number of threads you application has -112// that can be interacting with an hbase client. -113this.threads = Runtime.getRuntime().availableProcessors() * 4; -114 -115// Daemon threads are great for things that get shut down. -116ThreadFactory threadFactory = new ThreadFactoryBuilder() -117 .setDaemon(true).setNameFormat("internal-pol-%d").build(); -118 -119 -120this.internalPool = Executors.newFixedThreadPool(threads, threadFactory); -121 } +025import org.apache.hadoop.hbase.CellBuilder; +026import org.apache.hadoop.hbase.CellBuilderFactory; +027import org.apache.hadoop.hbase.CellBuilderType; +028import org.apache.hadoop.hbase.TableName; +029import org.apache.hadoop.hbase.client.Connection; +030import org.apache.hadoop.hbase.client.ConnectionFactory; +031import org.apache.h

[13/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/dc4e5c85/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ThrowableAbortable.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ThrowableAbortable.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ThrowableAbortable.html
index 7c59e27..c904c56 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ThrowableAbortable.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.ThrowableAbortable.html
@@ -119,4048 +119,4054 @@
 111import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
 112import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest;
 113import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest;
-114import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-115import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-116import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
-117import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
-118import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest;
-119import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse;
-120import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest;
-121import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
-122import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
-123import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceRequest;
-124import 
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceResponse;
-125import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
-126import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-127import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-128import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-129import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos;
-130import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-131import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-132import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-133import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-134import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-135import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ClearDeadServersRequest;
-136import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-137import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
-138import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest;
-139import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse;
-140import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
-141import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
-142import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
-143import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse;
-144import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
-145import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableRequest;
-146import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableResponse;
-147import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest;
-148import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse;
-149import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
-150import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse;
-151import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest;
-152import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse;
-153import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest;
-154import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
-155import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetLocksRequest;
-156import 
org.apache.had

[13/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4abd958d/devapidocs/org/apache/hadoop/hbase/rest/client/class-use/RemoteHTable.CheckAndMutateBuilderImpl.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/rest/client/class-use/RemoteHTable.CheckAndMutateBuilderImpl.html
 
b/devapidocs/org/apache/hadoop/hbase/rest/client/class-use/RemoteHTable.CheckAndMutateBuilderImpl.html
new file mode 100644
index 000..5dd3247
--- /dev/null
+++ 
b/devapidocs/org/apache/hadoop/hbase/rest/client/class-use/RemoteHTable.CheckAndMutateBuilderImpl.html
@@ -0,0 +1,125 @@
+http://www.w3.org/TR/html4/loose.dtd";>
+
+
+
+
+
+Uses of Class 
org.apache.hadoop.hbase.rest.client.RemoteHTable.CheckAndMutateBuilderImpl 
(Apache HBase 3.0.0-SNAPSHOT API)
+
+
+
+
+
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev
+Next
+
+
+Frames
+No Frames
+
+
+All Classes
+
+
+
+
+
+
+
+
+
+
+Uses of 
Classorg.apache.hadoop.hbase.rest.client.RemoteHTable.CheckAndMutateBuilderImpl
+
+No usage of 
org.apache.hadoop.hbase.rest.client.RemoteHTable.CheckAndMutateBuilderImpl
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev
+Next
+
+
+Frames
+No Frames
+
+
+All Classes
+
+
+
+
+
+
+
+
+
+Copyright © 2007–2017 https://www.apache.org/";>The Apache Software Foundation. All rights 
reserved.
+
+

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4abd958d/devapidocs/org/apache/hadoop/hbase/rest/client/package-summary.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/rest/client/package-summary.html 
b/devapidocs/org/apache/hadoop/hbase/rest/client/package-summary.html
index abc6251..c61f5ef 100644
--- a/devapidocs/org/apache/hadoop/hbase/rest/client/package-summary.html
+++ b/devapidocs/org/apache/hadoop/hbase/rest/client/package-summary.html
@@ -106,7 +106,7 @@
 HTable interface to remote tables accessed via REST 
gateway
 
 
-
+
 Response
 
 The HTTP result code, response headers, and body of a HTTP 
response.

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4abd958d/devapidocs/org/apache/hadoop/hbase/rest/client/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/rest/client/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/rest/client/package-tree.html
index 925ead5..3cb8c57 100644
--- a/devapidocs/org/apache/hadoop/hbase/rest/client/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/rest/client/package-tree.html
@@ -85,6 +85,7 @@
 org.apache.hadoop.hbase.rest.client.Cluster
 org.apache.hadoop.hbase.rest.client.RemoteAdmin
 org.apache.hadoop.hbase.rest.client.RemoteHTable (implements 
org.apache.hadoop.hbase.client.Table)
+org.apache.hadoop.hbase.rest.client.RemoteHTable.CheckAndMutateBuilderImpl 
(implements org.apache.hadoop.hbase.client.Table.CheckAndMutateBuilder)
 org.apache.hadoop.hbase.rest.client.RemoteHTable.Scanner (implements 
org.apache.hadoop.hbase.client.ResultScanner)
 org.apache.hadoop.hbase.rest.client.RemoteHTable.Scanner.Iter (implements 
java.util.http://docs.oracle.com/javase/8/docs/api/java/util/Iterator.html?is-external=true";
 title="class or interface in java.util">Iterator)
 org.apache.hadoop.hbase.rest.client.Response

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/4abd958d/devapidocs/org/apache/hadoop/hbase/rest/model/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/rest/model/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/rest/model/package-tree.html
index dc2f127..91776e4 100644
--- a/devapidocs/org/apache/hadoop/hbase/rest/model/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/rest/model/package-tree.html
@@ -110,8 +110,8 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true";
 title="class or interface in java.lang">Enum (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=tru

[13/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/e23b49ba/devapidocs/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.html 
b/devapidocs/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.html
index ea38a87..f988e84 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":9,"i2":10,"i3":9,"i4":9,"i5":10,"i6":9,"i7":9,"i8":9,"i9":9,"i10":9,"i11":9,"i12":9,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":9};
+var methods = 
{"i0":10,"i1":9,"i2":10,"i3":9,"i4":9,"i5":10,"i6":9,"i7":9,"i8":9,"i9":9,"i10":9,"i11":9,"i12":9,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":9};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -194,16 +194,6 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
-CACHE_DATA_IN_L1
-Key for cache data into L1 if cache is set up with more 
than one tier.
-
-
-
-private static Bytes
-CACHE_DATA_IN_L1_BYTES 
-
-
-static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
 CACHE_DATA_ON_WRITE 
 
 
@@ -281,260 +271,254 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 static boolean
-DEFAULT_CACHE_DATA_IN_L1
-Default setting for whether to cache data blocks in L1 
tier.
-
-
-
-static boolean
 DEFAULT_CACHE_DATA_ON_WRITE
 Default setting for whether to cache data blocks on write 
if block caching
  is enabled.
 
 
-
+
 static boolean
 DEFAULT_CACHE_INDEX_ON_WRITE
 Default setting for whether to cache index blocks on write 
if block caching
  is enabled.
 
 
-
+
 static boolean
 DEFAULT_COMPRESS_TAGS
 Default compress tags along with any type of 
DataBlockEncoding.
 
 
-
+
 static Compression.Algorithm
 DEFAULT_COMPRESSION
 Default compression type.
 
 
-
+
 static DataBlockEncoding
 DEFAULT_DATA_BLOCK_ENCODING
 Default data block encoding algorithm.
 
 
-
+
 static short
 DEFAULT_DFS_REPLICATION 
 
-
+
 static boolean
 DEFAULT_EVICT_BLOCKS_ON_CLOSE
 Default setting for whether to evict cached blocks from the 
blockcache on
  close.
 
 
-
+
 static boolean
 DEFAULT_IN_MEMORY
 Default setting for whether to try and serve this column 
family from memory
  or not.
 
 
-
+
 static KeepDeletedCells
 DEFAULT_KEEP_DELETED
 Default setting for preventing deleted from being collected 
immediately.
 
 
-
+
 static int
 DEFAULT_MAX_VERSIONS
 Default number of versions of a record to keep.
 
 
-
+
 static int
 DEFAULT_MIN_VERSIONS
 Default is not to keep a minimum of versions.
 
 
-
+
 private static boolean
 DEFAULT_MOB 
 
-
+
 static MobCompactPartitionPolicy
 DEFAULT_MOB_COMPACT_PARTITION_POLICY 
 
-
+
 static long
 DEFAULT_MOB_THRESHOLD 
 
-
+
 static boolean
 DEFAULT_NEW_VERSION_BEHAVIOR 
 
-
+
 static boolean
 DEFAULT_PREFETCH_BLOCKS_ON_OPEN 
 
-
+
 static int
 DEFAULT_REPLICATION_SCOPE
 Default scope.
 
 
-
+
 static int
 DEFAULT_TTL
 Default time to live of cell contents.
 
 
-
+
 private static http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">MapString,http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String>
 DEFAULT_VALUES 
 
-
+
 private ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor
 desc 
 
-
+
 static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
 DFS_REPLICATION 
 
-
+
 private static Bytes
 DFS_REPLICATION_BYTES 
 
-
+
 static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
 ENCRYPTION 
 
-
+
 private static Bytes
 ENCRYPTION_BYTES 
 
-
+
 static http://docs.oracle.com/javase/8

[13/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d171b896/hbase-build-configuration/hbase-archetypes/dependency-convergence.html
--
diff --git 
a/hbase-build-configuration/hbase-archetypes/dependency-convergence.html 
b/hbase-build-configuration/hbase-archetypes/dependency-convergence.html
index a626f4f..e56371c 100644
--- a/hbase-build-configuration/hbase-archetypes/dependency-convergence.html
+++ b/hbase-build-configuration/hbase-archetypes/dependency-convergence.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Archetypes – Reactor Dependency 
Convergence
 
@@ -488,22 +488,22 @@
 3.4.10
 
 
-org.apache.hbase:hbase-assembly:pom:3.0.0-SNAPSHOT+- org.apache.hbase:hbase-server:jar:3.0.0-SNAPSHOT:compile|  +- org.apache.hbase:hbase-client:jar:3.0.0-SNAPSHOT:compile|  |  \- (org.apache.zookeeper:zookeeper:jar:3.4.10:compile
 - version managed from 3.4.6; omitted for duplicate)|  +- org.apache.zookeeper:zookeeper:jar:3.4.10:compile|  +- org.apache.hadoop:hadoop-common:jar:2.7.4:compile|  |  \- (org.apache.zookeeper:zookeeper:jar:3.4.10:compile
 - version managed from 3.4.6; omitted for duplicate)|  +- org.apache.hadoop:hadoop-auth:jar:2.7.4:compile|  |  \- (org.apache.zookeeper:zookeeper:jar:3.4.10:compile
 - version managed from 3.4.6; omitted for duplicate)|  \- org.apache.hadoop:hadoop-client:jar:2.7.4:compile| \- org.apache.had
 oop:hadoop-mapreduce-client-app:jar:2.7.4:compile|\- org.apache.hadoop:hadoop-mapreduce-client-common:jar:2.7.4:compile|   \- org.apache.hadoop:hadoop-yarn-server-common:jar:2.7.4:compile|  \- (org.apache.zookeeper:zookeeper:jar:3.4.10:compile
 - version managed from 3.4.6; omitted for duplicate)+- org.apache.hbase:hbase-mapreduce:jar:3.0.0-SNAPSHOT:compile|  \- (org.apache.zookeeper:zookeeper:jar:3.4.10:compile - 
version managed from 3.4.6; omitted for duplicate)+- org.apache.hbase:hbase-mapreduce:test-jar:tests:3.0.0-SNAPSHOT:test|  \- (org.apache.zookeeper:zookeeper:jar:3.4.10:test - 
version managed from 3.4.6; omitted for duplicate)+- org.apache.hbase:hbase-testing-util:jar:3.0.0-SNAP
 SHOT:test|  +- org.apache.hbase:hbase-server:test-jar:tests:3.0.0-SNAPSHOT:test|  |  \- (org.apache.zookeeper:zookeeper:jar:3.4.10:test
 - version managed from 3.4.6; omitted for duplicate)|  \- org.apache.hadoop:hadoop-minicluster:jar:2.7.4:test| +- org.apache.hadoop:hadoop-common:test-jar:tests:2.7.4:test| |  \- (org.apache.zookeeper:zookeeper:jar:3.4.10:test
 - version managed from 3.4.6; omitted for duplicate)| \- org.apache.hadoop:hadoop-yarn-server-tests:test-jar:tests:2.7.4:test|\- org.apache.hadoop:hadoop-yarn-server-resourcemanager:jar:2.7.4:test|   \- (org.apache.zookeeper:zookeeper:jar:3.4.10:test
 - version managed from 3.4.6; omitted for dupli
 cate)+- org.apache.hbase:hbase-backup:jar:3.0.0-SNAPSHOT:compile|  \- (org.apache.zookeeper:zookeeper:jar:3.4.10:compile - 
version managed from 3.4.6; omitted for duplicate)+- org.apache.hbase:hbase-replication:jar:3.0.0-SNAPSHOT:compile|  \- (org.apache.zookeeper:zookeeper:jar:3.4.10:compile - 
version managed from 3.4.6; omitted for duplicate)+- org.apache.hbase:hbase-examples:jar:3.0.0-SNAPSHOT:compile|  \- (org.apache.zookeeper:zookeeper:jar:3.4.10:compile - 
version managed from 3.4.6; omitted for duplicate)+- org.apache.hbase:hbase-zookeeper:jar:3.0.0-SNAPSHOT:compile|  \- (org.apache.zookeeper:zookeeper:jar:3.4.10:compile - 
version managed from 3.4.6; omitted for duplicate)\- org.apache.hbase:hbase-rsgroup:jar:3.0.0-SNAPSHOT:compile   \- (org.apache.zookeeper:zookeeper:jar:3.4.10:compile 
- version managed from 3.4.6
 ; omitted for duplicate)
-org.apache.hbase:hbase-backup:jar:3.0.0-SNAPSHOT+- org.apache.hbase:hbase-client:jar:3.0.0-SNAPSHOT:compile|  +- (org.apache.zookeeper:zookeeper:jar:3.4.10:compile - 
version managed from 3.4.6; omitted for duplicate)|  \- org.apache.hadoop:hadoop-auth:jar:2.7.4:compile| \- (org.apache.zookeeper:zookeeper:jar:3.4.10:compile
 - version managed from 3.4.6; omitted for duplicate)+- org.apache.hbase:hbase-server:jar:3.0.0-SNAPSHOT:compile|  +- org.apache.hbase:hbase-zookeeper:jar:3.0.0-SNAPSHOT:compile|  |  \- (org.apache.zookeeper:zookeeper:jar:3.4.10:compile
 - version managed from 3.4.6; omitted for duplicate)|  +- org.apache.hbase:hbase-replication:jar:3.0.0-SNAPSHOT:compile|  |  \- (org.apache.zookeeper:zookeeper:jar:3.4.10:compile
 - version managed from 3.4.6; omitted for dup
 licate)|  +- (org.apache.zookeeper:zookeeper:jar:3.4.10:compile - 
version managed from 3.4.6; omitted for duplicate)|  \- org.apache.hadoop:hadoop-client:jar:2.7.4:compile| \- org.apache.hadoop:hadoop-mapreduce-client-app:jar:2.7.4:compile|\- org.apache.hadoop:hadoop-mapreduce-client-common:jar:2.7.4:compile|   \- org.apache.hadoop:hadoop-yarn-server-common:jar:2.7.4:compile|  \- (org.apache.zookeeper:zookeeper:jar:3.4.10

[13/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/c54c242b/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.Task.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.Task.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.Task.html
new file mode 100644
index 000..6c7ef2a
--- /dev/null
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.Task.html
@@ -0,0 +1,419 @@
+http://www.w3.org/TR/html4/loose.dtd";>
+
+
+Source code
+
+
+
+
+001/**
+002 * Licensed to the Apache Software 
Foundation (ASF) under one
+003 * or more contributor license 
agreements.  See the NOTICE file
+004 * distributed with this work for 
additional information
+005 * regarding copyright ownership.  The 
ASF licenses this file
+006 * to you under the Apache License, 
Version 2.0 (the
+007 * "License"); you may not use this file 
except in compliance
+008 * with the License.  You may obtain a 
copy of the License at
+009 *
+010 * 
http://www.apache.org/licenses/LICENSE-2.0
+011 *
+012 * Unless required by applicable law or 
agreed to in writing, software
+013 * distributed under the License is 
distributed on an "AS IS" BASIS,
+014 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
+015 * See the License for the specific 
language governing permissions and
+016 * limitations under the License.
+017 */
+018package 
org.apache.hadoop.hbase.zookeeper;
+019
+020import static 
org.apache.hadoop.hbase.HConstants.DEFAULT_ZK_SESSION_TIMEOUT;
+021import static 
org.apache.hadoop.hbase.HConstants.ZK_SESSION_TIMEOUT;
+022
+023import java.io.Closeable;
+024import java.io.IOException;
+025import java.util.Arrays;
+026import java.util.EnumSet;
+027import 
java.util.concurrent.CompletableFuture;
+028import java.util.concurrent.DelayQueue;
+029import java.util.concurrent.Delayed;
+030import java.util.concurrent.TimeUnit;
+031import 
java.util.concurrent.atomic.AtomicBoolean;
+032
+033import org.apache.commons.logging.Log;
+034import 
org.apache.commons.logging.LogFactory;
+035import 
org.apache.hadoop.conf.Configuration;
+036import 
org.apache.yetus.audience.InterfaceAudience;
+037import 
org.apache.zookeeper.KeeperException;
+038import 
org.apache.zookeeper.KeeperException.Code;
+039import org.apache.zookeeper.ZooKeeper;
+040import org.apache.zookeeper.data.Stat;
+041
+042import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
+043
+044/**
+045 * A very simple read only zookeeper 
implementation without watcher support.
+046 */
+047@InterfaceAudience.Private
+048public final class ReadOnlyZKClient 
implements Closeable {
+049
+050  private static final Log LOG = 
LogFactory.getLog(ReadOnlyZKClient.class);
+051
+052  public static final String 
RECOVERY_RETRY = "zookeeper.recovery.retry";
+053
+054  private static final int 
DEFAULT_RECOVERY_RETRY = 30;
+055
+056  public static final String 
RECOVERY_RETRY_INTERVAL_MILLIS =
+057  
"zookeeper.recovery.retry.intervalmill";
+058
+059  private static final int 
DEFAULT_RECOVERY_RETRY_INTERVAL_MILLIS = 1000;
+060
+061  public static final String 
KEEPALIVE_MILLIS = "zookeeper.keep-alive.time";
+062
+063  private static final int 
DEFAULT_KEEPALIVE_MILLIS = 6;
+064
+065  private static final 
EnumSet FAIL_FAST_CODES = EnumSet.of(Code.NOAUTH, 
Code.AUTHFAILED);
+066
+067  private final String connectString;
+068
+069  private final int sessionTimeoutMs;
+070
+071  private final int maxRetries;
+072
+073  private final int retryIntervalMs;
+074
+075  private final int keepAliveTimeMs;
+076
+077  private static abstract class Task 
implements Delayed {
+078
+079protected long time = 
System.nanoTime();
+080
+081public boolean needZk() {
+082  return false;
+083}
+084
+085public void exec(ZooKeeper zk) {
+086}
+087
+088public void connectFailed(IOException 
e) {
+089}
+090
+091public void closed(IOException e) {
+092}
+093
+094@Override
+095public int compareTo(Delayed o) {
+096  Task that = (Task) o;
+097  int c = Long.compare(time, 
that.time);
+098  if (c != 0) {
+099return c;
+100  }
+101  return 
Integer.compare(System.identityHashCode(this), 
System.identityHashCode(that));
+102}
+103
+104@Override
+105public long getDelay(TimeUnit unit) 
{
+106  return unit.convert(time - 
System.nanoTime(), TimeUnit.NANOSECONDS);
+107}
+108  }
+109
+110  private static final Task CLOSE = new 
Task() {
+111  };
+112
+113  private final DelayQueue 
tasks = new DelayQueue<>();
+114
+115  private final AtomicBoolean closed = 
new AtomicBoolean(false);
+116
+117  private ZooKeeper zookeeper;
+118
+119  private String getId() {
+120return String.format("0x%08x", 
System.identityHashCode(this));
+121  }
+122
+123  public ReadOnlyZKClient(Configuration 
conf) {
+

[13/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713d773f/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.CancelOnClose.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.CancelOnClose.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.CancelOnClose.html
index 25e368d..d0f781f 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.CancelOnClose.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.CancelOnClose.html
@@ -25,798 +25,798 @@
 017 */
 018package 
org.apache.hadoop.hbase.io.asyncfs;
 019
-020import static 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelOption.CONNECT_TIMEOUT_MILLIS;
-021import static 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleState.READER_IDLE;
-022import static 
org.apache.hadoop.fs.CreateFlag.CREATE;
-023import static 
org.apache.hadoop.fs.CreateFlag.OVERWRITE;
-024import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createEncryptor;
-025import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.trySaslNegotiate;
+020import static 
org.apache.hadoop.fs.CreateFlag.CREATE;
+021import static 
org.apache.hadoop.fs.CreateFlag.OVERWRITE;
+022import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createEncryptor;
+023import static 
org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.trySaslNegotiate;
+024import static 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelOption.CONNECT_TIMEOUT_MILLIS;
+025import static 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleState.READER_IDLE;
 026import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY;
 027import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME;
 028import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT;
 029import static 
org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage.PIPELINE_SETUP_CREATE;
 030
-031import 
org.apache.hadoop.hbase.shaded.com.google.common.base.Throwables;
-032import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableMap;
-033import 
com.google.protobuf.CodedOutputStream;
-034
-035import 
org.apache.hadoop.hbase.shaded.io.netty.bootstrap.Bootstrap;
-036import 
org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBuf;
-037import 
org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBufAllocator;
-038import 
org.apache.hadoop.hbase.shaded.io.netty.buffer.ByteBufOutputStream;
-039import 
org.apache.hadoop.hbase.shaded.io.netty.buffer.PooledByteBufAllocator;
-040import 
org.apache.hadoop.hbase.shaded.io.netty.channel.Channel;
-041import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelFuture;
-042import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelFutureListener;
-043import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandler;
-044import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelHandlerContext;
-045import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelInitializer;
-046import 
org.apache.hadoop.hbase.shaded.io.netty.channel.ChannelPipeline;
-047import 
org.apache.hadoop.hbase.shaded.io.netty.channel.EventLoop;
-048import 
org.apache.hadoop.hbase.shaded.io.netty.channel.SimpleChannelInboundHandler;
-049import 
org.apache.hadoop.hbase.shaded.io.netty.handler.codec.protobuf.ProtobufDecoder;
-050import 
org.apache.hadoop.hbase.shaded.io.netty.handler.codec.protobuf.ProtobufVarint32FrameDecoder;
-051import 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleStateEvent;
-052import 
org.apache.hadoop.hbase.shaded.io.netty.handler.timeout.IdleStateHandler;
-053import 
org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.Future;
-054import 
org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.FutureListener;
-055import 
org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.Promise;
-056
-057import java.io.IOException;
-058import 
java.lang.reflect.InvocationTargetException;
-059import java.lang.reflect.Method;
-060import java.util.ArrayList;
-061import java.util.EnumSet;
-062import java.util.List;
-063import java.util.concurrent.TimeUnit;
-064
-065import org.apache.commons.logging.Log;
-066import 
org.apache.commons.logging.LogFactory;
-067import 
org.apache.hadoop.conf.Configuration;
-068import 
org.apache.hadoop.crypto.CryptoProtocolVersion;
-069import 
org.apache.hadoop.crypto.Encryptor;
-070import org.apache.hadoop.fs.CreateFlag;
-071import org.apache.hadoop.fs.FileSystem;
-072import 
org.apache.hadoop.fs.FileSystemLinkResolver;
-073import org.apache.hadoop.fs.Path;
-074import 
org.apache.hadoop.fs.UnresolvedLinkException;
-075import 
org.apache.hadoop.fs.permiss

[13/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/fd365a2b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteColumnFamilyFuture.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteColumnFamilyFuture.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteColumnFamilyFuture.html
index d438f22..7c59e27 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteColumnFamilyFuture.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.DeleteColumnFamilyFuture.html
@@ -1290,8 +1290,8 @@
 1282   CompactType 
compactType) throws IOException {
 1283switch (compactType) {
 1284  case MOB:
-1285
compact(this.connection.getAdminForMaster(), getMobRegionInfo(tableName), 
major,
-1286  columnFamily);
+1285
compact(this.connection.getAdminForMaster(), 
RegionInfo.createMobRegionInfo(tableName),
+1286major, columnFamily);
 1287break;
 1288  case NORMAL:
 1289checkTableExists(tableName);
@@ -3248,7 +3248,7 @@
 3240  new 
Callable() {
 3241@Override
 3242public 
AdminProtos.GetRegionInfoResponse.CompactionState call() throws Exception {
-3243  RegionInfo info = 
getMobRegionInfo(tableName);
+3243  RegionInfo info = 
RegionInfo.createMobRegionInfo(tableName);
 3244  GetRegionInfoRequest 
request =
 3245
RequestConverter.buildGetRegionInfoRequest(info.getRegionName(), true);
 3246  GetRegionInfoResponse 
response = masterAdmin.getRegionInfo(rpcController, request);
@@ -3312,7 +3312,7 @@
 3304}
 3305break;
 3306  default:
-3307throw new 
IllegalArgumentException("Unknowne compactType: " + compactType);
+3307throw new 
IllegalArgumentException("Unknown compactType: " + compactType);
 3308}
 3309if (state != null) {
 3310  return 
ProtobufUtil.createCompactionState(state);
@@ -3847,325 +3847,320 @@
 3839});
 3840  }
 3841
-3842  private RegionInfo 
getMobRegionInfo(TableName tableName) {
-3843return 
RegionInfoBuilder.newBuilder(tableName).setStartKey(Bytes.toBytes(".mob")).setRegionId(0)
-3844.build();
-3845  }
-3846
-3847  private RpcControllerFactory 
getRpcControllerFactory() {
-3848return this.rpcControllerFactory;
-3849  }
-3850
-3851  @Override
-3852  public void addReplicationPeer(String 
peerId, ReplicationPeerConfig peerConfig, boolean enabled)
-3853  throws IOException {
-3854executeCallable(new 
MasterCallable(getConnection(), getRpcControllerFactory()) {
-3855  @Override
-3856  protected Void rpcCall() throws 
Exception {
-3857
master.addReplicationPeer(getRpcController(),
-3858  
RequestConverter.buildAddReplicationPeerRequest(peerId, peerConfig, 
enabled));
-3859return null;
-3860  }
-3861});
-3862  }
-3863
-3864  @Override
-3865  public void 
removeReplicationPeer(String peerId) throws IOException {
-3866executeCallable(new 
MasterCallable(getConnection(), getRpcControllerFactory()) {
-3867  @Override
-3868  protected Void rpcCall() throws 
Exception {
-3869
master.removeReplicationPeer(getRpcController(),
-3870  
RequestConverter.buildRemoveReplicationPeerRequest(peerId));
-3871return null;
-3872  }
-3873});
-3874  }
-3875
-3876  @Override
-3877  public void 
enableReplicationPeer(final String peerId) throws IOException {
-3878executeCallable(new 
MasterCallable(getConnection(), getRpcControllerFactory()) {
-3879  @Override
-3880  protected Void rpcCall() throws 
Exception {
-3881
master.enableReplicationPeer(getRpcController(),
-3882  
RequestConverter.buildEnableReplicationPeerRequest(peerId));
-3883return null;
-3884  }
-3885});
-3886  }
-3887
-3888  @Override
-3889  public void 
disableReplicationPeer(final String peerId) throws IOException {
-3890executeCallable(new 
MasterCallable(getConnection(), getRpcControllerFactory()) {
-3891  @Override
-3892  protected Void rpcCall() throws 
Exception {
-3893
master.disableReplicationPeer(getRpcController(),
-3894  
RequestConverter.buildDisableReplicationPeerRequest(peerId));
-3895return null;
-3896  }
-3897});
-3898  }
-3899
-3900  @Override
-3901  public ReplicationPeerConfig 
getReplicationPeerConfig(final String peerId) throws IOException {
-3902return executeCallable(new 
MasterCallable(getConnection(),
-3903getRpcControllerFactory()) {
-3904  @Override
-3905  protected ReplicationPeerConfig 
rpcCall() throws Exception {
-3906GetReplicationPeerConfigResponse 
response = master.getReplicationPeerConfig(
-3907  getRpcCont

  1   2   3   4   >